applied-ai-018 commited on
Commit
b90def7
·
verified ·
1 Parent(s): 9eff8f1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step80/zero/16.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step80/zero/8.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step80/zero/8.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step80/zero/8.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  5. venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_aggregate.h +466 -0
  6. venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_scalar.h +1717 -0
  7. venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_vector.h +697 -0
  8. venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/exec.h +489 -0
  9. venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/expression.h +295 -0
  10. venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/kernel.h +752 -0
  11. venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/type_fwd.h +58 -0
  12. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h +33 -0
  13. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h +221 -0
  14. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/aligned_storage.h +145 -0
  15. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator.h +2058 -0
  16. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_util.h +460 -0
  17. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/basic_decimal.h +492 -0
  18. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/benchmark_util.h +211 -0
  19. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h +95 -0
  20. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_block_counter.h +570 -0
  21. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_run_reader.h +515 -0
  22. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_stream_utils.h +529 -0
  23. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_util.h +370 -0
  24. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h +43 -0
  25. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h +112 -0
  26. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_reader.h +273 -0
  27. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_writer.h +286 -0
  28. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitset_stack.h +89 -0
  29. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h +34 -0
  30. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking64_default.h +0 -0
  31. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx2.h +28 -0
  32. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx512.h +28 -0
  33. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_default.h +0 -0
  34. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/byte_size.h +88 -0
  35. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h +241 -0
  36. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h +411 -0
  37. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/cpu_info.h +114 -0
  38. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/crc32.h +36 -0
  39. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/debug.h +29 -0
  40. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/dispatch.h +115 -0
  41. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/double_conversion.h +32 -0
  42. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/endian.h +245 -0
  43. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/formatting.h +656 -0
  44. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/functional.h +160 -0
  45. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/hashing.h +944 -0
  46. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util_overflow.h +118 -0
  47. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h +452 -0
  48. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/iterator.h +568 -0
  49. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/launder.h +35 -0
  50. venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/logging.h +259 -0
ckpts/universal/global_step80/zero/16.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42787a47c14038affaa6997674816506ae6d10dd7e8b3dfc35c90a52026fc982
3
+ size 33555627
ckpts/universal/global_step80/zero/8.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5dbb776214980e309ca63828409e3f40bbc566805152479a4e13703a48fd21f
3
+ size 33555612
ckpts/universal/global_step80/zero/8.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b67284a4e140d271ba46aba22965ed84ae00a8e87ce91f82f3cc8589c90b46ff
3
+ size 33555627
ckpts/universal/global_step80/zero/8.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3d24f58bab28f666390d91d4f38b3f780aa4bf48b071b64ce8a911c47b71332
3
+ size 33555533
venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_aggregate.h ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Eager evaluation convenience APIs for invoking common functions, including
19
+ // necessary memory allocations
20
+
21
+ #pragma once
22
+
23
+ #include <vector>
24
+
25
+ #include "arrow/compute/function_options.h"
26
+ #include "arrow/datum.h"
27
+ #include "arrow/result.h"
28
+ #include "arrow/util/macros.h"
29
+ #include "arrow/util/visibility.h"
30
+
31
+ namespace arrow {
32
+
33
+ class Array;
34
+
35
+ namespace compute {
36
+
37
+ class ExecContext;
38
+
39
+ // ----------------------------------------------------------------------
40
+ // Aggregate functions
41
+
42
+ /// \addtogroup compute-concrete-options
43
+ /// @{
44
+
45
+ /// \brief Control general scalar aggregate kernel behavior
46
+ ///
47
+ /// By default, null values are ignored (skip_nulls = true).
48
+ class ARROW_EXPORT ScalarAggregateOptions : public FunctionOptions {
49
+ public:
50
+ explicit ScalarAggregateOptions(bool skip_nulls = true, uint32_t min_count = 1);
51
+ static constexpr char const kTypeName[] = "ScalarAggregateOptions";
52
+ static ScalarAggregateOptions Defaults() { return ScalarAggregateOptions{}; }
53
+
54
+ /// If true (the default), null values are ignored. Otherwise, if any value is null,
55
+ /// emit null.
56
+ bool skip_nulls;
57
+ /// If less than this many non-null values are observed, emit null.
58
+ uint32_t min_count;
59
+ };
60
+
61
+ /// \brief Control count aggregate kernel behavior.
62
+ ///
63
+ /// By default, only non-null values are counted.
64
+ class ARROW_EXPORT CountOptions : public FunctionOptions {
65
+ public:
66
+ enum CountMode {
67
+ /// Count only non-null values.
68
+ ONLY_VALID = 0,
69
+ /// Count only null values.
70
+ ONLY_NULL,
71
+ /// Count both non-null and null values.
72
+ ALL,
73
+ };
74
+ explicit CountOptions(CountMode mode = CountMode::ONLY_VALID);
75
+ static constexpr char const kTypeName[] = "CountOptions";
76
+ static CountOptions Defaults() { return CountOptions{}; }
77
+
78
+ CountMode mode;
79
+ };
80
+
81
+ /// \brief Control Mode kernel behavior
82
+ ///
83
+ /// Returns top-n common values and counts.
84
+ /// By default, returns the most common value and count.
85
+ class ARROW_EXPORT ModeOptions : public FunctionOptions {
86
+ public:
87
+ explicit ModeOptions(int64_t n = 1, bool skip_nulls = true, uint32_t min_count = 0);
88
+ static constexpr char const kTypeName[] = "ModeOptions";
89
+ static ModeOptions Defaults() { return ModeOptions{}; }
90
+
91
+ int64_t n = 1;
92
+ /// If true (the default), null values are ignored. Otherwise, if any value is null,
93
+ /// emit null.
94
+ bool skip_nulls;
95
+ /// If less than this many non-null values are observed, emit null.
96
+ uint32_t min_count;
97
+ };
98
+
99
+ /// \brief Control Delta Degrees of Freedom (ddof) of Variance and Stddev kernel
100
+ ///
101
+ /// The divisor used in calculations is N - ddof, where N is the number of elements.
102
+ /// By default, ddof is zero, and population variance or stddev is returned.
103
+ class ARROW_EXPORT VarianceOptions : public FunctionOptions {
104
+ public:
105
+ explicit VarianceOptions(int ddof = 0, bool skip_nulls = true, uint32_t min_count = 0);
106
+ static constexpr char const kTypeName[] = "VarianceOptions";
107
+ static VarianceOptions Defaults() { return VarianceOptions{}; }
108
+
109
+ int ddof = 0;
110
+ /// If true (the default), null values are ignored. Otherwise, if any value is null,
111
+ /// emit null.
112
+ bool skip_nulls;
113
+ /// If less than this many non-null values are observed, emit null.
114
+ uint32_t min_count;
115
+ };
116
+
117
+ /// \brief Control Quantile kernel behavior
118
+ ///
119
+ /// By default, returns the median value.
120
+ class ARROW_EXPORT QuantileOptions : public FunctionOptions {
121
+ public:
122
+ /// Interpolation method to use when quantile lies between two data points
123
+ enum Interpolation {
124
+ LINEAR = 0,
125
+ LOWER,
126
+ HIGHER,
127
+ NEAREST,
128
+ MIDPOINT,
129
+ };
130
+
131
+ explicit QuantileOptions(double q = 0.5, enum Interpolation interpolation = LINEAR,
132
+ bool skip_nulls = true, uint32_t min_count = 0);
133
+
134
+ explicit QuantileOptions(std::vector<double> q,
135
+ enum Interpolation interpolation = LINEAR,
136
+ bool skip_nulls = true, uint32_t min_count = 0);
137
+
138
+ static constexpr char const kTypeName[] = "QuantileOptions";
139
+ static QuantileOptions Defaults() { return QuantileOptions{}; }
140
+
141
+ /// probability level of quantile must be between 0 and 1 inclusive
142
+ std::vector<double> q;
143
+ enum Interpolation interpolation;
144
+ /// If true (the default), null values are ignored. Otherwise, if any value is null,
145
+ /// emit null.
146
+ bool skip_nulls;
147
+ /// If less than this many non-null values are observed, emit null.
148
+ uint32_t min_count;
149
+ };
150
+
151
+ /// \brief Control TDigest approximate quantile kernel behavior
152
+ ///
153
+ /// By default, returns the median value.
154
+ class ARROW_EXPORT TDigestOptions : public FunctionOptions {
155
+ public:
156
+ explicit TDigestOptions(double q = 0.5, uint32_t delta = 100,
157
+ uint32_t buffer_size = 500, bool skip_nulls = true,
158
+ uint32_t min_count = 0);
159
+ explicit TDigestOptions(std::vector<double> q, uint32_t delta = 100,
160
+ uint32_t buffer_size = 500, bool skip_nulls = true,
161
+ uint32_t min_count = 0);
162
+ static constexpr char const kTypeName[] = "TDigestOptions";
163
+ static TDigestOptions Defaults() { return TDigestOptions{}; }
164
+
165
+ /// probability level of quantile must be between 0 and 1 inclusive
166
+ std::vector<double> q;
167
+ /// compression parameter, default 100
168
+ uint32_t delta;
169
+ /// input buffer size, default 500
170
+ uint32_t buffer_size;
171
+ /// If true (the default), null values are ignored. Otherwise, if any value is null,
172
+ /// emit null.
173
+ bool skip_nulls;
174
+ /// If less than this many non-null values are observed, emit null.
175
+ uint32_t min_count;
176
+ };
177
+
178
+ /// \brief Control Index kernel behavior
179
+ class ARROW_EXPORT IndexOptions : public FunctionOptions {
180
+ public:
181
+ explicit IndexOptions(std::shared_ptr<Scalar> value);
182
+ // Default constructor for serialization
183
+ IndexOptions();
184
+ static constexpr char const kTypeName[] = "IndexOptions";
185
+
186
+ std::shared_ptr<Scalar> value;
187
+ };
188
+
189
+ /// \brief Configure a grouped aggregation
190
+ struct ARROW_EXPORT Aggregate {
191
+ Aggregate() = default;
192
+
193
+ Aggregate(std::string function, std::shared_ptr<FunctionOptions> options,
194
+ std::vector<FieldRef> target, std::string name = "")
195
+ : function(std::move(function)),
196
+ options(std::move(options)),
197
+ target(std::move(target)),
198
+ name(std::move(name)) {}
199
+
200
+ Aggregate(std::string function, std::shared_ptr<FunctionOptions> options,
201
+ FieldRef target, std::string name = "")
202
+ : Aggregate(std::move(function), std::move(options),
203
+ std::vector<FieldRef>{std::move(target)}, std::move(name)) {}
204
+
205
+ Aggregate(std::string function, FieldRef target, std::string name)
206
+ : Aggregate(std::move(function), /*options=*/NULLPTR,
207
+ std::vector<FieldRef>{std::move(target)}, std::move(name)) {}
208
+
209
+ Aggregate(std::string function, std::string name)
210
+ : Aggregate(std::move(function), /*options=*/NULLPTR,
211
+ /*target=*/std::vector<FieldRef>{}, std::move(name)) {}
212
+
213
+ /// the name of the aggregation function
214
+ std::string function;
215
+
216
+ /// options for the aggregation function
217
+ std::shared_ptr<FunctionOptions> options;
218
+
219
+ /// zero or more fields to which aggregations will be applied
220
+ std::vector<FieldRef> target;
221
+
222
+ /// optional output field name for aggregations
223
+ std::string name;
224
+ };
225
+
226
+ /// @}
227
+
228
+ /// \brief Count values in an array.
229
+ ///
230
+ /// \param[in] options counting options, see CountOptions for more information
231
+ /// \param[in] datum to count
232
+ /// \param[in] ctx the function execution context, optional
233
+ /// \return out resulting datum
234
+ ///
235
+ /// \since 1.0.0
236
+ /// \note API not yet finalized
237
+ ARROW_EXPORT
238
+ Result<Datum> Count(const Datum& datum,
239
+ const CountOptions& options = CountOptions::Defaults(),
240
+ ExecContext* ctx = NULLPTR);
241
+
242
+ /// \brief Compute the mean of a numeric array.
243
+ ///
244
+ /// \param[in] value datum to compute the mean, expecting Array
245
+ /// \param[in] options see ScalarAggregateOptions for more information
246
+ /// \param[in] ctx the function execution context, optional
247
+ /// \return datum of the computed mean as a DoubleScalar
248
+ ///
249
+ /// \since 1.0.0
250
+ /// \note API not yet finalized
251
+ ARROW_EXPORT
252
+ Result<Datum> Mean(
253
+ const Datum& value,
254
+ const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
255
+ ExecContext* ctx = NULLPTR);
256
+
257
+ /// \brief Compute the product of values of a numeric array.
258
+ ///
259
+ /// \param[in] value datum to compute product of, expecting Array or ChunkedArray
260
+ /// \param[in] options see ScalarAggregateOptions for more information
261
+ /// \param[in] ctx the function execution context, optional
262
+ /// \return datum of the computed sum as a Scalar
263
+ ///
264
+ /// \since 6.0.0
265
+ /// \note API not yet finalized
266
+ ARROW_EXPORT
267
+ Result<Datum> Product(
268
+ const Datum& value,
269
+ const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
270
+ ExecContext* ctx = NULLPTR);
271
+
272
+ /// \brief Sum values of a numeric array.
273
+ ///
274
+ /// \param[in] value datum to sum, expecting Array or ChunkedArray
275
+ /// \param[in] options see ScalarAggregateOptions for more information
276
+ /// \param[in] ctx the function execution context, optional
277
+ /// \return datum of the computed sum as a Scalar
278
+ ///
279
+ /// \since 1.0.0
280
+ /// \note API not yet finalized
281
+ ARROW_EXPORT
282
+ Result<Datum> Sum(
283
+ const Datum& value,
284
+ const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
285
+ ExecContext* ctx = NULLPTR);
286
+
287
+ /// \brief Calculate the first value of an array
288
+ ///
289
+ /// \param[in] value input datum, expecting Array or ChunkedArray
290
+ /// \param[in] options see ScalarAggregateOptions for more information
291
+ /// \param[in] ctx the function execution context, optional
292
+ /// \return datum of the computed first as Scalar
293
+ ///
294
+ /// \since 13.0.0
295
+ /// \note API not yet finalized
296
+ ARROW_EXPORT
297
+ Result<Datum> First(
298
+ const Datum& value,
299
+ const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
300
+ ExecContext* ctx = NULLPTR);
301
+
302
+ /// \brief Calculate the last value of an array
303
+ ///
304
+ /// \param[in] value input datum, expecting Array or ChunkedArray
305
+ /// \param[in] options see ScalarAggregateOptions for more information
306
+ /// \param[in] ctx the function execution context, optional
307
+ /// \return datum of the computed last as a Scalar
308
+ ///
309
+ /// \since 13.0.0
310
+ /// \note API not yet finalized
311
+ ARROW_EXPORT
312
+ Result<Datum> Last(
313
+ const Datum& value,
314
+ const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
315
+ ExecContext* ctx = NULLPTR);
316
+
317
+ /// \brief Calculate the min / max of a numeric array
318
+ ///
319
+ /// This function returns both the min and max as a struct scalar, with type
320
+ /// struct<min: T, max: T>, where T is the input type
321
+ ///
322
+ /// \param[in] value input datum, expecting Array or ChunkedArray
323
+ /// \param[in] options see ScalarAggregateOptions for more information
324
+ /// \param[in] ctx the function execution context, optional
325
+ /// \return resulting datum as a struct<min: T, max: T> scalar
326
+ ///
327
+ /// \since 1.0.0
328
+ /// \note API not yet finalized
329
+ ARROW_EXPORT
330
+ Result<Datum> MinMax(
331
+ const Datum& value,
332
+ const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
333
+ ExecContext* ctx = NULLPTR);
334
+
335
+ /// \brief Test whether any element in a boolean array evaluates to true.
336
+ ///
337
+ /// This function returns true if any of the elements in the array evaluates
338
+ /// to true and false otherwise. Null values are ignored by default.
339
+ /// If null values are taken into account by setting ScalarAggregateOptions
340
+ /// parameter skip_nulls = false then Kleene logic is used.
341
+ /// See KleeneOr for more details on Kleene logic.
342
+ ///
343
+ /// \param[in] value input datum, expecting a boolean array
344
+ /// \param[in] options see ScalarAggregateOptions for more information
345
+ /// \param[in] ctx the function execution context, optional
346
+ /// \return resulting datum as a BooleanScalar
347
+ ///
348
+ /// \since 3.0.0
349
+ /// \note API not yet finalized
350
+ ARROW_EXPORT
351
+ Result<Datum> Any(
352
+ const Datum& value,
353
+ const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
354
+ ExecContext* ctx = NULLPTR);
355
+
356
+ /// \brief Test whether all elements in a boolean array evaluate to true.
357
+ ///
358
+ /// This function returns true if all of the elements in the array evaluate
359
+ /// to true and false otherwise. Null values are ignored by default.
360
+ /// If null values are taken into account by setting ScalarAggregateOptions
361
+ /// parameter skip_nulls = false then Kleene logic is used.
362
+ /// See KleeneAnd for more details on Kleene logic.
363
+ ///
364
+ /// \param[in] value input datum, expecting a boolean array
365
+ /// \param[in] options see ScalarAggregateOptions for more information
366
+ /// \param[in] ctx the function execution context, optional
367
+ /// \return resulting datum as a BooleanScalar
368
+
369
+ /// \since 3.0.0
370
+ /// \note API not yet finalized
371
+ ARROW_EXPORT
372
+ Result<Datum> All(
373
+ const Datum& value,
374
+ const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
375
+ ExecContext* ctx = NULLPTR);
376
+
377
+ /// \brief Calculate the modal (most common) value of a numeric array
378
+ ///
379
+ /// This function returns top-n most common values and number of times they occur as
380
+ /// an array of `struct<mode: T, count: int64>`, where T is the input type.
381
+ /// Values with larger counts are returned before smaller ones.
382
+ /// If there are more than one values with same count, smaller value is returned first.
383
+ ///
384
+ /// \param[in] value input datum, expecting Array or ChunkedArray
385
+ /// \param[in] options see ModeOptions for more information
386
+ /// \param[in] ctx the function execution context, optional
387
+ /// \return resulting datum as an array of struct<mode: T, count: int64>
388
+ ///
389
+ /// \since 2.0.0
390
+ /// \note API not yet finalized
391
+ ARROW_EXPORT
392
+ Result<Datum> Mode(const Datum& value,
393
+ const ModeOptions& options = ModeOptions::Defaults(),
394
+ ExecContext* ctx = NULLPTR);
395
+
396
+ /// \brief Calculate the standard deviation of a numeric array
397
+ ///
398
+ /// \param[in] value input datum, expecting Array or ChunkedArray
399
+ /// \param[in] options see VarianceOptions for more information
400
+ /// \param[in] ctx the function execution context, optional
401
+ /// \return datum of the computed standard deviation as a DoubleScalar
402
+ ///
403
+ /// \since 2.0.0
404
+ /// \note API not yet finalized
405
+ ARROW_EXPORT
406
+ Result<Datum> Stddev(const Datum& value,
407
+ const VarianceOptions& options = VarianceOptions::Defaults(),
408
+ ExecContext* ctx = NULLPTR);
409
+
410
+ /// \brief Calculate the variance of a numeric array
411
+ ///
412
+ /// \param[in] value input datum, expecting Array or ChunkedArray
413
+ /// \param[in] options see VarianceOptions for more information
414
+ /// \param[in] ctx the function execution context, optional
415
+ /// \return datum of the computed variance as a DoubleScalar
416
+ ///
417
+ /// \since 2.0.0
418
+ /// \note API not yet finalized
419
+ ARROW_EXPORT
420
+ Result<Datum> Variance(const Datum& value,
421
+ const VarianceOptions& options = VarianceOptions::Defaults(),
422
+ ExecContext* ctx = NULLPTR);
423
+
424
+ /// \brief Calculate the quantiles of a numeric array
425
+ ///
426
+ /// \param[in] value input datum, expecting Array or ChunkedArray
427
+ /// \param[in] options see QuantileOptions for more information
428
+ /// \param[in] ctx the function execution context, optional
429
+ /// \return resulting datum as an array
430
+ ///
431
+ /// \since 4.0.0
432
+ /// \note API not yet finalized
433
+ ARROW_EXPORT
434
+ Result<Datum> Quantile(const Datum& value,
435
+ const QuantileOptions& options = QuantileOptions::Defaults(),
436
+ ExecContext* ctx = NULLPTR);
437
+
438
+ /// \brief Calculate the approximate quantiles of a numeric array with T-Digest algorithm
439
+ ///
440
+ /// \param[in] value input datum, expecting Array or ChunkedArray
441
+ /// \param[in] options see TDigestOptions for more information
442
+ /// \param[in] ctx the function execution context, optional
443
+ /// \return resulting datum as an array
444
+ ///
445
+ /// \since 4.0.0
446
+ /// \note API not yet finalized
447
+ ARROW_EXPORT
448
+ Result<Datum> TDigest(const Datum& value,
449
+ const TDigestOptions& options = TDigestOptions::Defaults(),
450
+ ExecContext* ctx = NULLPTR);
451
+
452
+ /// \brief Find the first index of a value in an array.
453
+ ///
454
+ /// \param[in] value The array to search.
455
+ /// \param[in] options The array to search for. See IndexOptions.
456
+ /// \param[in] ctx the function execution context, optional
457
+ /// \return out a Scalar containing the index (or -1 if not found).
458
+ ///
459
+ /// \since 5.0.0
460
+ /// \note API not yet finalized
461
+ ARROW_EXPORT
462
+ Result<Datum> Index(const Datum& value, const IndexOptions& options,
463
+ ExecContext* ctx = NULLPTR);
464
+
465
+ } // namespace compute
466
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_scalar.h ADDED
@@ -0,0 +1,1717 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Eager evaluation convenience APIs for invoking common functions, including
19
+ // necessary memory allocations
20
+
21
+ #pragma once
22
+
23
+ #include <optional>
24
+ #include <string>
25
+ #include <utility>
26
+
27
+ #include "arrow/compute/function_options.h"
28
+ #include "arrow/compute/type_fwd.h"
29
+ #include "arrow/datum.h"
30
+ #include "arrow/result.h"
31
+ #include "arrow/util/macros.h"
32
+ #include "arrow/util/visibility.h"
33
+
34
+ namespace arrow {
35
+ namespace compute {
36
+
37
+ /// \addtogroup compute-concrete-options
38
+ ///
39
+ /// @{
40
+
41
+ class ARROW_EXPORT ArithmeticOptions : public FunctionOptions {
42
+ public:
43
+ explicit ArithmeticOptions(bool check_overflow = false);
44
+ static constexpr char const kTypeName[] = "ArithmeticOptions";
45
+ bool check_overflow;
46
+ };
47
+
48
+ class ARROW_EXPORT ElementWiseAggregateOptions : public FunctionOptions {
49
+ public:
50
+ explicit ElementWiseAggregateOptions(bool skip_nulls = true);
51
+ static constexpr char const kTypeName[] = "ElementWiseAggregateOptions";
52
+ static ElementWiseAggregateOptions Defaults() { return ElementWiseAggregateOptions{}; }
53
+ bool skip_nulls;
54
+ };
55
+
56
+ /// Rounding and tie-breaking modes for round compute functions.
57
+ /// Additional details and examples are provided in compute.rst.
58
+ enum class RoundMode : int8_t {
59
+ /// Round to nearest integer less than or equal in magnitude (aka "floor")
60
+ DOWN,
61
+ /// Round to nearest integer greater than or equal in magnitude (aka "ceil")
62
+ UP,
63
+ /// Get the integral part without fractional digits (aka "trunc")
64
+ TOWARDS_ZERO,
65
+ /// Round negative values with DOWN rule
66
+ /// and positive values with UP rule (aka "away from zero")
67
+ TOWARDS_INFINITY,
68
+ /// Round ties with DOWN rule (also called "round half towards negative infinity")
69
+ HALF_DOWN,
70
+ /// Round ties with UP rule (also called "round half towards positive infinity")
71
+ HALF_UP,
72
+ /// Round ties with TOWARDS_ZERO rule (also called "round half away from infinity")
73
+ HALF_TOWARDS_ZERO,
74
+ /// Round ties with TOWARDS_INFINITY rule (also called "round half away from zero")
75
+ HALF_TOWARDS_INFINITY,
76
+ /// Round ties to nearest even integer
77
+ HALF_TO_EVEN,
78
+ /// Round ties to nearest odd integer
79
+ HALF_TO_ODD,
80
+ };
81
+
82
+ class ARROW_EXPORT RoundOptions : public FunctionOptions {
83
+ public:
84
+ explicit RoundOptions(int64_t ndigits = 0,
85
+ RoundMode round_mode = RoundMode::HALF_TO_EVEN);
86
+ static constexpr char const kTypeName[] = "RoundOptions";
87
+ static RoundOptions Defaults() { return RoundOptions(); }
88
+ /// Rounding precision (number of digits to round to)
89
+ int64_t ndigits;
90
+ /// Rounding and tie-breaking mode
91
+ RoundMode round_mode;
92
+ };
93
+
94
+ class ARROW_EXPORT RoundBinaryOptions : public FunctionOptions {
95
+ public:
96
+ explicit RoundBinaryOptions(RoundMode round_mode = RoundMode::HALF_TO_EVEN);
97
+ static constexpr char const kTypeName[] = "RoundBinaryOptions";
98
+ static RoundBinaryOptions Defaults() { return RoundBinaryOptions(); }
99
+ /// Rounding and tie-breaking mode
100
+ RoundMode round_mode;
101
+ };
102
+
103
+ enum class CalendarUnit : int8_t {
104
+ NANOSECOND,
105
+ MICROSECOND,
106
+ MILLISECOND,
107
+ SECOND,
108
+ MINUTE,
109
+ HOUR,
110
+ DAY,
111
+ WEEK,
112
+ MONTH,
113
+ QUARTER,
114
+ YEAR
115
+ };
116
+
117
+ class ARROW_EXPORT RoundTemporalOptions : public FunctionOptions {
118
+ public:
119
+ explicit RoundTemporalOptions(int multiple = 1, CalendarUnit unit = CalendarUnit::DAY,
120
+ bool week_starts_monday = true,
121
+ bool ceil_is_strictly_greater = false,
122
+ bool calendar_based_origin = false);
123
+ static constexpr char const kTypeName[] = "RoundTemporalOptions";
124
+ static RoundTemporalOptions Defaults() { return RoundTemporalOptions(); }
125
+
126
+ /// Number of units to round to
127
+ int multiple;
128
+ /// The unit used for rounding of time
129
+ CalendarUnit unit;
130
+ /// What day does the week start with (Monday=true, Sunday=false)
131
+ bool week_starts_monday;
132
+ /// Enable this flag to return a rounded value that is strictly greater than the input.
133
+ /// For example: ceiling 1970-01-01T00:00:00 to 3 hours would yield 1970-01-01T03:00:00
134
+ /// if set to true and 1970-01-01T00:00:00 if set to false.
135
+ /// This applies for ceiling only.
136
+ bool ceil_is_strictly_greater;
137
+ /// By default time is rounded to a multiple of units since 1970-01-01T00:00:00.
138
+ /// By setting calendar_based_origin to true, time will be rounded to a number
139
+ /// of units since the last greater calendar unit.
140
+ /// For example: rounding to a multiple of days since the beginning of the month or
141
+ /// to hours since the beginning of the day.
142
+ /// Exceptions: week and quarter are not used as greater units, therefore days will
143
+ /// will be rounded to the beginning of the month not week. Greater unit of week
144
+ /// is year.
145
+ /// Note that ceiling and rounding might change sorting order of an array near greater
146
+ /// unit change. For example rounding YYYY-mm-dd 23:00:00 to 5 hours will ceil and
147
+ /// round to YYYY-mm-dd+1 01:00:00 and floor to YYYY-mm-dd 20:00:00. On the other hand
148
+ /// YYYY-mm-dd+1 00:00:00 will ceil, round and floor to YYYY-mm-dd+1 00:00:00. This
149
+ /// can break the order of an already ordered array.
150
+ bool calendar_based_origin;
151
+ };
152
+
153
+ class ARROW_EXPORT RoundToMultipleOptions : public FunctionOptions {
154
+ public:
155
+ explicit RoundToMultipleOptions(double multiple = 1.0,
156
+ RoundMode round_mode = RoundMode::HALF_TO_EVEN);
157
+ explicit RoundToMultipleOptions(std::shared_ptr<Scalar> multiple,
158
+ RoundMode round_mode = RoundMode::HALF_TO_EVEN);
159
+ static constexpr char const kTypeName[] = "RoundToMultipleOptions";
160
+ static RoundToMultipleOptions Defaults() { return RoundToMultipleOptions(); }
161
+ /// Rounding scale (multiple to round to).
162
+ ///
163
+ /// Should be a positive numeric scalar of a type compatible with the
164
+ /// argument to be rounded. The cast kernel is used to convert the rounding
165
+ /// multiple to match the result type.
166
+ std::shared_ptr<Scalar> multiple;
167
+ /// Rounding and tie-breaking mode
168
+ RoundMode round_mode;
169
+ };
170
+
171
+ /// Options for var_args_join.
172
+ class ARROW_EXPORT JoinOptions : public FunctionOptions {
173
+ public:
174
+ /// How to handle null values. (A null separator always results in a null output.)
175
+ enum NullHandlingBehavior {
176
+ /// A null in any input results in a null in the output.
177
+ EMIT_NULL,
178
+ /// Nulls in inputs are skipped.
179
+ SKIP,
180
+ /// Nulls in inputs are replaced with the replacement string.
181
+ REPLACE,
182
+ };
183
+ explicit JoinOptions(NullHandlingBehavior null_handling = EMIT_NULL,
184
+ std::string null_replacement = "");
185
+ static constexpr char const kTypeName[] = "JoinOptions";
186
+ static JoinOptions Defaults() { return JoinOptions(); }
187
+ NullHandlingBehavior null_handling;
188
+ std::string null_replacement;
189
+ };
190
+
191
+ class ARROW_EXPORT MatchSubstringOptions : public FunctionOptions {
192
+ public:
193
+ explicit MatchSubstringOptions(std::string pattern, bool ignore_case = false);
194
+ MatchSubstringOptions();
195
+ static constexpr char const kTypeName[] = "MatchSubstringOptions";
196
+
197
+ /// The exact substring (or regex, depending on kernel) to look for inside input values.
198
+ std::string pattern;
199
+ /// Whether to perform a case-insensitive match.
200
+ bool ignore_case;
201
+ };
202
+
203
+ class ARROW_EXPORT SplitOptions : public FunctionOptions {
204
+ public:
205
+ explicit SplitOptions(int64_t max_splits = -1, bool reverse = false);
206
+ static constexpr char const kTypeName[] = "SplitOptions";
207
+
208
+ /// Maximum number of splits allowed, or unlimited when -1
209
+ int64_t max_splits;
210
+ /// Start splitting from the end of the string (only relevant when max_splits != -1)
211
+ bool reverse;
212
+ };
213
+
214
+ class ARROW_EXPORT SplitPatternOptions : public FunctionOptions {
215
+ public:
216
+ explicit SplitPatternOptions(std::string pattern, int64_t max_splits = -1,
217
+ bool reverse = false);
218
+ SplitPatternOptions();
219
+ static constexpr char const kTypeName[] = "SplitPatternOptions";
220
+
221
+ /// The exact substring to split on.
222
+ std::string pattern;
223
+ /// Maximum number of splits allowed, or unlimited when -1
224
+ int64_t max_splits;
225
+ /// Start splitting from the end of the string (only relevant when max_splits != -1)
226
+ bool reverse;
227
+ };
228
+
229
+ class ARROW_EXPORT ReplaceSliceOptions : public FunctionOptions {
230
+ public:
231
+ explicit ReplaceSliceOptions(int64_t start, int64_t stop, std::string replacement);
232
+ ReplaceSliceOptions();
233
+ static constexpr char const kTypeName[] = "ReplaceSliceOptions";
234
+
235
+ /// Index to start slicing at
236
+ int64_t start;
237
+ /// Index to stop slicing at
238
+ int64_t stop;
239
+ /// String to replace the slice with
240
+ std::string replacement;
241
+ };
242
+
243
+ class ARROW_EXPORT ReplaceSubstringOptions : public FunctionOptions {
244
+ public:
245
+ explicit ReplaceSubstringOptions(std::string pattern, std::string replacement,
246
+ int64_t max_replacements = -1);
247
+ ReplaceSubstringOptions();
248
+ static constexpr char const kTypeName[] = "ReplaceSubstringOptions";
249
+
250
+ /// Pattern to match, literal, or regular expression depending on which kernel is used
251
+ std::string pattern;
252
+ /// String to replace the pattern with
253
+ std::string replacement;
254
+ /// Max number of substrings to replace (-1 means unbounded)
255
+ int64_t max_replacements;
256
+ };
257
+
258
+ class ARROW_EXPORT ExtractRegexOptions : public FunctionOptions {
259
+ public:
260
+ explicit ExtractRegexOptions(std::string pattern);
261
+ ExtractRegexOptions();
262
+ static constexpr char const kTypeName[] = "ExtractRegexOptions";
263
+
264
+ /// Regular expression with named capture fields
265
+ std::string pattern;
266
+ };
267
+
268
+ /// Options for IsIn and IndexIn functions
269
+ class ARROW_EXPORT SetLookupOptions : public FunctionOptions {
270
+ public:
271
+ /// How to handle null values.
272
+ enum NullMatchingBehavior {
273
+ /// MATCH, any null in `value_set` is successfully matched in
274
+ /// the input.
275
+ MATCH,
276
+ /// SKIP, any null in `value_set` is ignored and nulls in the input
277
+ /// produce null (IndexIn) or false (IsIn) values in the output.
278
+ SKIP,
279
+ /// EMIT_NULL, any null in `value_set` is ignored and nulls in the
280
+ /// input produce null (IndexIn and IsIn) values in the output.
281
+ EMIT_NULL,
282
+ /// INCONCLUSIVE, null values are regarded as unknown values, which is
283
+ /// sql-compatible. nulls in the input produce null (IndexIn and IsIn)
284
+ /// values in the output. Besides, if `value_set` contains a null,
285
+ /// non-null unmatched values in the input also produce null values
286
+ /// (IndexIn and IsIn) in the output.
287
+ INCONCLUSIVE
288
+ };
289
+
290
+ explicit SetLookupOptions(Datum value_set, NullMatchingBehavior = MATCH);
291
+ SetLookupOptions();
292
+
293
+ // DEPRECATED(will be removed after removing of skip_nulls)
294
+ explicit SetLookupOptions(Datum value_set, bool skip_nulls);
295
+
296
+ static constexpr char const kTypeName[] = "SetLookupOptions";
297
+
298
+ /// The set of values to look up input values into.
299
+ Datum value_set;
300
+
301
+ NullMatchingBehavior null_matching_behavior;
302
+
303
+ // DEPRECATED(will be removed after removing of skip_nulls)
304
+ NullMatchingBehavior GetNullMatchingBehavior() const;
305
+
306
+ // DEPRECATED(use null_matching_behavior instead)
307
+ /// Whether nulls in `value_set` count for lookup.
308
+ ///
309
+ /// If true, any null in `value_set` is ignored and nulls in the input
310
+ /// produce null (IndexIn) or false (IsIn) values in the output.
311
+ /// If false, any null in `value_set` is successfully matched in
312
+ /// the input.
313
+ std::optional<bool> skip_nulls;
314
+ };
315
+
316
+ /// Options for struct_field function
317
+ class ARROW_EXPORT StructFieldOptions : public FunctionOptions {
318
+ public:
319
+ explicit StructFieldOptions(std::vector<int> indices);
320
+ explicit StructFieldOptions(std::initializer_list<int>);
321
+ explicit StructFieldOptions(FieldRef field_ref);
322
+ StructFieldOptions();
323
+ static constexpr char const kTypeName[] = "StructFieldOptions";
324
+
325
+ /// The FieldRef specifying what to extract from struct or union.
326
+ FieldRef field_ref;
327
+ };
328
+
329
+ class ARROW_EXPORT StrptimeOptions : public FunctionOptions {
330
+ public:
331
+ explicit StrptimeOptions(std::string format, TimeUnit::type unit,
332
+ bool error_is_null = false);
333
+ StrptimeOptions();
334
+ static constexpr char const kTypeName[] = "StrptimeOptions";
335
+
336
+ /// The desired format string.
337
+ std::string format;
338
+ /// The desired time resolution
339
+ TimeUnit::type unit;
340
+ /// Return null on parsing errors if true or raise if false
341
+ bool error_is_null;
342
+ };
343
+
344
+ class ARROW_EXPORT StrftimeOptions : public FunctionOptions {
345
+ public:
346
+ explicit StrftimeOptions(std::string format, std::string locale = "C");
347
+ StrftimeOptions();
348
+
349
+ static constexpr char const kTypeName[] = "StrftimeOptions";
350
+
351
+ static constexpr const char* kDefaultFormat = "%Y-%m-%dT%H:%M:%S";
352
+
353
+ /// The desired format string.
354
+ std::string format;
355
+ /// The desired output locale string.
356
+ std::string locale;
357
+ };
358
+
359
+ class ARROW_EXPORT PadOptions : public FunctionOptions {
360
+ public:
361
+ explicit PadOptions(int64_t width, std::string padding = " ");
362
+ PadOptions();
363
+ static constexpr char const kTypeName[] = "PadOptions";
364
+
365
+ /// The desired string length.
366
+ int64_t width;
367
+ /// What to pad the string with. Should be one codepoint (Unicode)/byte (ASCII).
368
+ std::string padding;
369
+ };
370
+
371
+ class ARROW_EXPORT TrimOptions : public FunctionOptions {
372
+ public:
373
+ explicit TrimOptions(std::string characters);
374
+ TrimOptions();
375
+ static constexpr char const kTypeName[] = "TrimOptions";
376
+
377
+ /// The individual characters to be trimmed from the string.
378
+ std::string characters;
379
+ };
380
+
381
+ class ARROW_EXPORT SliceOptions : public FunctionOptions {
382
+ public:
383
+ explicit SliceOptions(int64_t start, int64_t stop = std::numeric_limits<int64_t>::max(),
384
+ int64_t step = 1);
385
+ SliceOptions();
386
+ static constexpr char const kTypeName[] = "SliceOptions";
387
+ int64_t start, stop, step;
388
+ };
389
+
390
+ class ARROW_EXPORT ListSliceOptions : public FunctionOptions {
391
+ public:
392
+ explicit ListSliceOptions(int64_t start, std::optional<int64_t> stop = std::nullopt,
393
+ int64_t step = 1,
394
+ std::optional<bool> return_fixed_size_list = std::nullopt);
395
+ ListSliceOptions();
396
+ static constexpr char const kTypeName[] = "ListSliceOptions";
397
+ /// The start of list slicing.
398
+ int64_t start;
399
+ /// Optional stop of list slicing. If not set, then slice to end. (NotImplemented)
400
+ std::optional<int64_t> stop;
401
+ /// Slicing step
402
+ int64_t step;
403
+ // Whether to return a FixedSizeListArray. If true _and_ stop is after
404
+ // a list element's length, nulls will be appended to create the requested slice size.
405
+ // Default of `nullopt` will return whatever type it got in.
406
+ std::optional<bool> return_fixed_size_list;
407
+ };
408
+
409
+ class ARROW_EXPORT NullOptions : public FunctionOptions {
410
+ public:
411
+ explicit NullOptions(bool nan_is_null = false);
412
+ static constexpr char const kTypeName[] = "NullOptions";
413
+ static NullOptions Defaults() { return NullOptions{}; }
414
+
415
+ bool nan_is_null;
416
+ };
417
+
418
+ enum CompareOperator : int8_t {
419
+ EQUAL,
420
+ NOT_EQUAL,
421
+ GREATER,
422
+ GREATER_EQUAL,
423
+ LESS,
424
+ LESS_EQUAL,
425
+ };
426
+
427
+ struct ARROW_EXPORT CompareOptions {
428
+ explicit CompareOptions(CompareOperator op) : op(op) {}
429
+ CompareOptions() : CompareOptions(CompareOperator::EQUAL) {}
430
+ enum CompareOperator op;
431
+ };
432
+
433
+ class ARROW_EXPORT MakeStructOptions : public FunctionOptions {
434
+ public:
435
+ MakeStructOptions(std::vector<std::string> n, std::vector<bool> r,
436
+ std::vector<std::shared_ptr<const KeyValueMetadata>> m);
437
+ explicit MakeStructOptions(std::vector<std::string> n);
438
+ MakeStructOptions();
439
+ static constexpr char const kTypeName[] = "MakeStructOptions";
440
+
441
+ /// Names for wrapped columns
442
+ std::vector<std::string> field_names;
443
+
444
+ /// Nullability bits for wrapped columns
445
+ std::vector<bool> field_nullability;
446
+
447
+ /// Metadata attached to wrapped columns
448
+ std::vector<std::shared_ptr<const KeyValueMetadata>> field_metadata;
449
+ };
450
+
451
+ struct ARROW_EXPORT DayOfWeekOptions : public FunctionOptions {
452
+ public:
453
+ explicit DayOfWeekOptions(bool count_from_zero = true, uint32_t week_start = 1);
454
+ static constexpr char const kTypeName[] = "DayOfWeekOptions";
455
+ static DayOfWeekOptions Defaults() { return DayOfWeekOptions(); }
456
+
457
+ /// Number days from 0 if true and from 1 if false
458
+ bool count_from_zero;
459
+ /// What day does the week start with (Monday=1, Sunday=7).
460
+ /// The numbering is unaffected by the count_from_zero parameter.
461
+ uint32_t week_start;
462
+ };
463
+
464
+ /// Used to control timestamp timezone conversion and handling ambiguous/nonexistent
465
+ /// times.
466
+ struct ARROW_EXPORT AssumeTimezoneOptions : public FunctionOptions {
467
+ public:
468
+ /// \brief How to interpret ambiguous local times that can be interpreted as
469
+ /// multiple instants (normally two) due to DST shifts.
470
+ ///
471
+ /// AMBIGUOUS_EARLIEST emits the earliest instant amongst possible interpretations.
472
+ /// AMBIGUOUS_LATEST emits the latest instant amongst possible interpretations.
473
+ enum Ambiguous { AMBIGUOUS_RAISE, AMBIGUOUS_EARLIEST, AMBIGUOUS_LATEST };
474
+
475
+ /// \brief How to handle local times that do not exist due to DST shifts.
476
+ ///
477
+ /// NONEXISTENT_EARLIEST emits the instant "just before" the DST shift instant
478
+ /// in the given timestamp precision (for example, for a nanoseconds precision
479
+ /// timestamp, this is one nanosecond before the DST shift instant).
480
+ /// NONEXISTENT_LATEST emits the DST shift instant.
481
+ enum Nonexistent { NONEXISTENT_RAISE, NONEXISTENT_EARLIEST, NONEXISTENT_LATEST };
482
+
483
+ explicit AssumeTimezoneOptions(std::string timezone,
484
+ Ambiguous ambiguous = AMBIGUOUS_RAISE,
485
+ Nonexistent nonexistent = NONEXISTENT_RAISE);
486
+ AssumeTimezoneOptions();
487
+ static constexpr char const kTypeName[] = "AssumeTimezoneOptions";
488
+
489
+ /// Timezone to convert timestamps from
490
+ std::string timezone;
491
+
492
+ /// How to interpret ambiguous local times (due to DST shifts)
493
+ Ambiguous ambiguous;
494
+ /// How to interpret nonexistent local times (due to DST shifts)
495
+ Nonexistent nonexistent;
496
+ };
497
+
498
+ struct ARROW_EXPORT WeekOptions : public FunctionOptions {
499
+ public:
500
+ explicit WeekOptions(bool week_starts_monday = true, bool count_from_zero = false,
501
+ bool first_week_is_fully_in_year = false);
502
+ static constexpr char const kTypeName[] = "WeekOptions";
503
+ static WeekOptions Defaults() { return WeekOptions{}; }
504
+ static WeekOptions ISODefaults() {
505
+ return WeekOptions{/*week_starts_monday*/ true,
506
+ /*count_from_zero=*/false,
507
+ /*first_week_is_fully_in_year=*/false};
508
+ }
509
+ static WeekOptions USDefaults() {
510
+ return WeekOptions{/*week_starts_monday*/ false,
511
+ /*count_from_zero=*/false,
512
+ /*first_week_is_fully_in_year=*/false};
513
+ }
514
+
515
+ /// What day does the week start with (Monday=true, Sunday=false)
516
+ bool week_starts_monday;
517
+ /// Dates from current year that fall into last ISO week of the previous year return
518
+ /// 0 if true and 52 or 53 if false.
519
+ bool count_from_zero;
520
+ /// Must the first week be fully in January (true), or is a week that begins on
521
+ /// December 29, 30, or 31 considered to be the first week of the new year (false)?
522
+ bool first_week_is_fully_in_year;
523
+ };
524
+
525
+ struct ARROW_EXPORT Utf8NormalizeOptions : public FunctionOptions {
526
+ public:
527
+ enum Form { NFC, NFKC, NFD, NFKD };
528
+
529
+ explicit Utf8NormalizeOptions(Form form = NFC);
530
+ static Utf8NormalizeOptions Defaults() { return Utf8NormalizeOptions(); }
531
+ static constexpr char const kTypeName[] = "Utf8NormalizeOptions";
532
+
533
+ /// The Unicode normalization form to apply
534
+ Form form;
535
+ };
536
+
537
+ class ARROW_EXPORT RandomOptions : public FunctionOptions {
538
+ public:
539
+ enum Initializer { SystemRandom, Seed };
540
+
541
+ static RandomOptions FromSystemRandom() { return RandomOptions{SystemRandom, 0}; }
542
+ static RandomOptions FromSeed(uint64_t seed) { return RandomOptions{Seed, seed}; }
543
+
544
+ RandomOptions(Initializer initializer, uint64_t seed);
545
+ RandomOptions();
546
+ static constexpr char const kTypeName[] = "RandomOptions";
547
+ static RandomOptions Defaults() { return RandomOptions(); }
548
+
549
+ /// The type of initialization for random number generation - system or provided seed.
550
+ Initializer initializer;
551
+ /// The seed value used to initialize the random number generation.
552
+ uint64_t seed;
553
+ };
554
+
555
+ /// Options for map_lookup function
556
+ class ARROW_EXPORT MapLookupOptions : public FunctionOptions {
557
+ public:
558
+ enum Occurrence {
559
+ /// Return the first matching value
560
+ FIRST,
561
+ /// Return the last matching value
562
+ LAST,
563
+ /// Return all matching values
564
+ ALL
565
+ };
566
+
567
+ explicit MapLookupOptions(std::shared_ptr<Scalar> query_key, Occurrence occurrence);
568
+ MapLookupOptions();
569
+
570
+ constexpr static char const kTypeName[] = "MapLookupOptions";
571
+
572
+ /// The key to lookup in the map
573
+ std::shared_ptr<Scalar> query_key;
574
+
575
+ /// Whether to return the first, last, or all matching values
576
+ Occurrence occurrence;
577
+ };
578
+
579
+ /// @}
580
+
581
+ /// \brief Get the absolute value of a value.
582
+ ///
583
+ /// If argument is null the result will be null.
584
+ ///
585
+ /// \param[in] arg the value transformed
586
+ /// \param[in] options arithmetic options (overflow handling), optional
587
+ /// \param[in] ctx the function execution context, optional
588
+ /// \return the elementwise absolute value
589
+ ARROW_EXPORT
590
+ Result<Datum> AbsoluteValue(const Datum& arg,
591
+ ArithmeticOptions options = ArithmeticOptions(),
592
+ ExecContext* ctx = NULLPTR);
593
+
594
+ /// \brief Add two values together. Array values must be the same length. If
595
+ /// either addend is null the result will be null.
596
+ ///
597
+ /// \param[in] left the first addend
598
+ /// \param[in] right the second addend
599
+ /// \param[in] options arithmetic options (overflow handling), optional
600
+ /// \param[in] ctx the function execution context, optional
601
+ /// \return the elementwise sum
602
+ ARROW_EXPORT
603
+ Result<Datum> Add(const Datum& left, const Datum& right,
604
+ ArithmeticOptions options = ArithmeticOptions(),
605
+ ExecContext* ctx = NULLPTR);
606
+
607
+ /// \brief Subtract two values. Array values must be the same length. If the
608
+ /// minuend or subtrahend is null the result will be null.
609
+ ///
610
+ /// \param[in] left the value subtracted from (minuend)
611
+ /// \param[in] right the value by which the minuend is reduced (subtrahend)
612
+ /// \param[in] options arithmetic options (overflow handling), optional
613
+ /// \param[in] ctx the function execution context, optional
614
+ /// \return the elementwise difference
615
+ ARROW_EXPORT
616
+ Result<Datum> Subtract(const Datum& left, const Datum& right,
617
+ ArithmeticOptions options = ArithmeticOptions(),
618
+ ExecContext* ctx = NULLPTR);
619
+
620
+ /// \brief Multiply two values. Array values must be the same length. If either
621
+ /// factor is null the result will be null.
622
+ ///
623
+ /// \param[in] left the first factor
624
+ /// \param[in] right the second factor
625
+ /// \param[in] options arithmetic options (overflow handling), optional
626
+ /// \param[in] ctx the function execution context, optional
627
+ /// \return the elementwise product
628
+ ARROW_EXPORT
629
+ Result<Datum> Multiply(const Datum& left, const Datum& right,
630
+ ArithmeticOptions options = ArithmeticOptions(),
631
+ ExecContext* ctx = NULLPTR);
632
+
633
+ /// \brief Divide two values. Array values must be the same length. If either
634
+ /// argument is null the result will be null. For integer types, if there is
635
+ /// a zero divisor, an error will be raised.
636
+ ///
637
+ /// \param[in] left the dividend
638
+ /// \param[in] right the divisor
639
+ /// \param[in] options arithmetic options (enable/disable overflow checking), optional
640
+ /// \param[in] ctx the function execution context, optional
641
+ /// \return the elementwise quotient
642
+ ARROW_EXPORT
643
+ Result<Datum> Divide(const Datum& left, const Datum& right,
644
+ ArithmeticOptions options = ArithmeticOptions(),
645
+ ExecContext* ctx = NULLPTR);
646
+
647
+ /// \brief Negate values.
648
+ ///
649
+ /// If argument is null the result will be null.
650
+ ///
651
+ /// \param[in] arg the value negated
652
+ /// \param[in] options arithmetic options (overflow handling), optional
653
+ /// \param[in] ctx the function execution context, optional
654
+ /// \return the elementwise negation
655
+ ARROW_EXPORT
656
+ Result<Datum> Negate(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
657
+ ExecContext* ctx = NULLPTR);
658
+
659
+ /// \brief Raise the values of base array to the power of the exponent array values.
660
+ /// Array values must be the same length. If either base or exponent is null the result
661
+ /// will be null.
662
+ ///
663
+ /// \param[in] left the base
664
+ /// \param[in] right the exponent
665
+ /// \param[in] options arithmetic options (enable/disable overflow checking), optional
666
+ /// \param[in] ctx the function execution context, optional
667
+ /// \return the elementwise base value raised to the power of exponent
668
+ ARROW_EXPORT
669
+ Result<Datum> Power(const Datum& left, const Datum& right,
670
+ ArithmeticOptions options = ArithmeticOptions(),
671
+ ExecContext* ctx = NULLPTR);
672
+
673
+ /// \brief Raise Euler's number to the power of specified exponent, element-wise.
674
+ /// If the exponent value is null the result will be null.
675
+ ///
676
+ /// \param[in] arg the exponent
677
+ /// \param[in] ctx the function execution context, optional
678
+ /// \return the element-wise Euler's number raised to the power of exponent
679
+ ARROW_EXPORT
680
+ Result<Datum> Exp(const Datum& arg, ExecContext* ctx = NULLPTR);
681
+
682
+ /// \brief Left shift the left array by the right array. Array values must be the
683
+ /// same length. If either operand is null, the result will be null.
684
+ ///
685
+ /// \param[in] left the value to shift
686
+ /// \param[in] right the value to shift by
687
+ /// \param[in] options arithmetic options (enable/disable overflow checking), optional
688
+ /// \param[in] ctx the function execution context, optional
689
+ /// \return the elementwise left value shifted left by the right value
690
+ ARROW_EXPORT
691
+ Result<Datum> ShiftLeft(const Datum& left, const Datum& right,
692
+ ArithmeticOptions options = ArithmeticOptions(),
693
+ ExecContext* ctx = NULLPTR);
694
+
695
+ /// \brief Right shift the left array by the right array. Array values must be the
696
+ /// same length. If either operand is null, the result will be null. Performs a
697
+ /// logical shift for unsigned values, and an arithmetic shift for signed values.
698
+ ///
699
+ /// \param[in] left the value to shift
700
+ /// \param[in] right the value to shift by
701
+ /// \param[in] options arithmetic options (enable/disable overflow checking), optional
702
+ /// \param[in] ctx the function execution context, optional
703
+ /// \return the elementwise left value shifted right by the right value
704
+ ARROW_EXPORT
705
+ Result<Datum> ShiftRight(const Datum& left, const Datum& right,
706
+ ArithmeticOptions options = ArithmeticOptions(),
707
+ ExecContext* ctx = NULLPTR);
708
+
709
+ /// \brief Compute the sine of the array values.
710
+ /// \param[in] arg The values to compute the sine for.
711
+ /// \param[in] options arithmetic options (enable/disable overflow checking), optional
712
+ /// \param[in] ctx the function execution context, optional
713
+ /// \return the elementwise sine of the values
714
+ ARROW_EXPORT
715
+ Result<Datum> Sin(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
716
+ ExecContext* ctx = NULLPTR);
717
+
718
+ /// \brief Compute the cosine of the array values.
719
+ /// \param[in] arg The values to compute the cosine for.
720
+ /// \param[in] options arithmetic options (enable/disable overflow checking), optional
721
+ /// \param[in] ctx the function execution context, optional
722
+ /// \return the elementwise cosine of the values
723
+ ARROW_EXPORT
724
+ Result<Datum> Cos(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
725
+ ExecContext* ctx = NULLPTR);
726
+
727
+ /// \brief Compute the inverse sine (arcsine) of the array values.
728
+ /// \param[in] arg The values to compute the inverse sine for.
729
+ /// \param[in] options arithmetic options (enable/disable overflow checking), optional
730
+ /// \param[in] ctx the function execution context, optional
731
+ /// \return the elementwise inverse sine of the values
732
+ ARROW_EXPORT
733
+ Result<Datum> Asin(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
734
+ ExecContext* ctx = NULLPTR);
735
+
736
+ /// \brief Compute the inverse cosine (arccosine) of the array values.
737
+ /// \param[in] arg The values to compute the inverse cosine for.
738
+ /// \param[in] options arithmetic options (enable/disable overflow checking), optional
739
+ /// \param[in] ctx the function execution context, optional
740
+ /// \return the elementwise inverse cosine of the values
741
+ ARROW_EXPORT
742
+ Result<Datum> Acos(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
743
+ ExecContext* ctx = NULLPTR);
744
+
745
+ /// \brief Compute the tangent of the array values.
746
+ /// \param[in] arg The values to compute the tangent for.
747
+ /// \param[in] options arithmetic options (enable/disable overflow checking), optional
748
+ /// \param[in] ctx the function execution context, optional
749
+ /// \return the elementwise tangent of the values
750
+ ARROW_EXPORT
751
+ Result<Datum> Tan(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
752
+ ExecContext* ctx = NULLPTR);
753
+
754
+ /// \brief Compute the inverse tangent (arctangent) of the array values.
755
+ /// \param[in] arg The values to compute the inverse tangent for.
756
+ /// \param[in] ctx the function execution context, optional
757
+ /// \return the elementwise inverse tangent of the values
758
+ ARROW_EXPORT
759
+ Result<Datum> Atan(const Datum& arg, ExecContext* ctx = NULLPTR);
760
+
761
+ /// \brief Compute the inverse tangent (arctangent) of y/x, using the
762
+ /// argument signs to determine the correct quadrant.
763
+ /// \param[in] y The y-values to compute the inverse tangent for.
764
+ /// \param[in] x The x-values to compute the inverse tangent for.
765
+ /// \param[in] ctx the function execution context, optional
766
+ /// \return the elementwise inverse tangent of the values
767
+ ARROW_EXPORT
768
+ Result<Datum> Atan2(const Datum& y, const Datum& x, ExecContext* ctx = NULLPTR);
769
+
770
+ /// \brief Get the natural log of a value.
771
+ ///
772
+ /// If argument is null the result will be null.
773
+ ///
774
+ /// \param[in] arg The values to compute the logarithm for.
775
+ /// \param[in] options arithmetic options (overflow handling), optional
776
+ /// \param[in] ctx the function execution context, optional
777
+ /// \return the elementwise natural log
778
+ ARROW_EXPORT
779
+ Result<Datum> Ln(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
780
+ ExecContext* ctx = NULLPTR);
781
+
782
+ /// \brief Get the log base 10 of a value.
783
+ ///
784
+ /// If argument is null the result will be null.
785
+ ///
786
+ /// \param[in] arg The values to compute the logarithm for.
787
+ /// \param[in] options arithmetic options (overflow handling), optional
788
+ /// \param[in] ctx the function execution context, optional
789
+ /// \return the elementwise log base 10
790
+ ARROW_EXPORT
791
+ Result<Datum> Log10(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
792
+ ExecContext* ctx = NULLPTR);
793
+
794
+ /// \brief Get the log base 2 of a value.
795
+ ///
796
+ /// If argument is null the result will be null.
797
+ ///
798
+ /// \param[in] arg The values to compute the logarithm for.
799
+ /// \param[in] options arithmetic options (overflow handling), optional
800
+ /// \param[in] ctx the function execution context, optional
801
+ /// \return the elementwise log base 2
802
+ ARROW_EXPORT
803
+ Result<Datum> Log2(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
804
+ ExecContext* ctx = NULLPTR);
805
+
806
+ /// \brief Get the natural log of (1 + value).
807
+ ///
808
+ /// If argument is null the result will be null.
809
+ /// This function may be more accurate than Log(1 + value) for values close to zero.
810
+ ///
811
+ /// \param[in] arg The values to compute the logarithm for.
812
+ /// \param[in] options arithmetic options (overflow handling), optional
813
+ /// \param[in] ctx the function execution context, optional
814
+ /// \return the elementwise natural log
815
+ ARROW_EXPORT
816
+ Result<Datum> Log1p(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
817
+ ExecContext* ctx = NULLPTR);
818
+
819
+ /// \brief Get the log of a value to the given base.
820
+ ///
821
+ /// If argument is null the result will be null.
822
+ ///
823
+ /// \param[in] arg The values to compute the logarithm for.
824
+ /// \param[in] base The given base.
825
+ /// \param[in] options arithmetic options (overflow handling), optional
826
+ /// \param[in] ctx the function execution context, optional
827
+ /// \return the elementwise log to the given base
828
+ ARROW_EXPORT
829
+ Result<Datum> Logb(const Datum& arg, const Datum& base,
830
+ ArithmeticOptions options = ArithmeticOptions(),
831
+ ExecContext* ctx = NULLPTR);
832
+
833
+ /// \brief Get the square-root of a value.
834
+ ///
835
+ /// If argument is null the result will be null.
836
+ ///
837
+ /// \param[in] arg The values to compute the square-root for.
838
+ /// \param[in] options arithmetic options (overflow handling), optional
839
+ /// \param[in] ctx the function execution context, optional
840
+ /// \return the elementwise square-root
841
+ ARROW_EXPORT
842
+ Result<Datum> Sqrt(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
843
+ ExecContext* ctx = NULLPTR);
844
+
845
+ /// \brief Round to the nearest integer less than or equal in magnitude to the
846
+ /// argument.
847
+ ///
848
+ /// If argument is null the result will be null.
849
+ ///
850
+ /// \param[in] arg the value to round
851
+ /// \param[in] ctx the function execution context, optional
852
+ /// \return the rounded value
853
+ ARROW_EXPORT
854
+ Result<Datum> Floor(const Datum& arg, ExecContext* ctx = NULLPTR);
855
+
856
+ /// \brief Round to the nearest integer greater than or equal in magnitude to the
857
+ /// argument.
858
+ ///
859
+ /// If argument is null the result will be null.
860
+ ///
861
+ /// \param[in] arg the value to round
862
+ /// \param[in] ctx the function execution context, optional
863
+ /// \return the rounded value
864
+ ARROW_EXPORT
865
+ Result<Datum> Ceil(const Datum& arg, ExecContext* ctx = NULLPTR);
866
+
867
+ /// \brief Get the integral part without fractional digits.
868
+ ///
869
+ /// If argument is null the result will be null.
870
+ ///
871
+ /// \param[in] arg the value to truncate
872
+ /// \param[in] ctx the function execution context, optional
873
+ /// \return the truncated value
874
+ ARROW_EXPORT
875
+ Result<Datum> Trunc(const Datum& arg, ExecContext* ctx = NULLPTR);
876
+
877
+ /// \brief Find the element-wise maximum of any number of arrays or scalars.
878
+ /// Array values must be the same length.
879
+ ///
880
+ /// \param[in] args arrays or scalars to operate on.
881
+ /// \param[in] options options for handling nulls, optional
882
+ /// \param[in] ctx the function execution context, optional
883
+ /// \return the element-wise maximum
884
+ ARROW_EXPORT
885
+ Result<Datum> MaxElementWise(
886
+ const std::vector<Datum>& args,
887
+ ElementWiseAggregateOptions options = ElementWiseAggregateOptions::Defaults(),
888
+ ExecContext* ctx = NULLPTR);
889
+
890
+ /// \brief Find the element-wise minimum of any number of arrays or scalars.
891
+ /// Array values must be the same length.
892
+ ///
893
+ /// \param[in] args arrays or scalars to operate on.
894
+ /// \param[in] options options for handling nulls, optional
895
+ /// \param[in] ctx the function execution context, optional
896
+ /// \return the element-wise minimum
897
+ ARROW_EXPORT
898
+ Result<Datum> MinElementWise(
899
+ const std::vector<Datum>& args,
900
+ ElementWiseAggregateOptions options = ElementWiseAggregateOptions::Defaults(),
901
+ ExecContext* ctx = NULLPTR);
902
+
903
+ /// \brief Get the sign of a value. Array values can be of arbitrary length. If argument
904
+ /// is null the result will be null.
905
+ ///
906
+ /// \param[in] arg the value to extract sign from
907
+ /// \param[in] ctx the function execution context, optional
908
+ /// \return the element-wise sign function
909
+ ARROW_EXPORT
910
+ Result<Datum> Sign(const Datum& arg, ExecContext* ctx = NULLPTR);
911
+
912
+ /// \brief Round a value to a given precision.
913
+ ///
914
+ /// If arg is null the result will be null.
915
+ ///
916
+ /// \param[in] arg the value to be rounded
917
+ /// \param[in] options rounding options (rounding mode and number of digits), optional
918
+ /// \param[in] ctx the function execution context, optional
919
+ /// \return the element-wise rounded value
920
+ ARROW_EXPORT
921
+ Result<Datum> Round(const Datum& arg, RoundOptions options = RoundOptions::Defaults(),
922
+ ExecContext* ctx = NULLPTR);
923
+
924
+ /// \brief Round a value to a given precision.
925
+ ///
926
+ /// If arg1 is null the result will be null.
927
+ /// If arg2 is null then the result will be null. If arg2 is negative, then the rounding
928
+ /// place will be shifted to the left (thus -1 would correspond to rounding to the nearest
929
+ /// ten). If positive, the rounding place will shift to the right (and +1 would
930
+ /// correspond to rounding to the nearest tenth).
931
+ ///
932
+ /// \param[in] arg1 the value to be rounded
933
+ /// \param[in] arg2 the number of significant digits to round to
934
+ /// \param[in] options rounding options, optional
935
+ /// \param[in] ctx the function execution context, optional
936
+ /// \return the element-wise rounded value
937
+ ARROW_EXPORT
938
+ Result<Datum> RoundBinary(const Datum& arg1, const Datum& arg2,
939
+ RoundBinaryOptions options = RoundBinaryOptions::Defaults(),
940
+ ExecContext* ctx = NULLPTR);
941
+
942
+ /// \brief Round a value to a given multiple.
943
+ ///
944
+ /// If argument is null the result will be null.
945
+ ///
946
+ /// \param[in] arg the value to round
947
+ /// \param[in] options rounding options (rounding mode and multiple), optional
948
+ /// \param[in] ctx the function execution context, optional
949
+ /// \return the element-wise rounded value
950
+ ARROW_EXPORT
951
+ Result<Datum> RoundToMultiple(
952
+ const Datum& arg, RoundToMultipleOptions options = RoundToMultipleOptions::Defaults(),
953
+ ExecContext* ctx = NULLPTR);
954
+
955
+ /// \brief Ceil a temporal value to a given frequency
956
+ ///
957
+ /// If argument is null the result will be null.
958
+ ///
959
+ /// \param[in] arg the temporal value to ceil
960
+ /// \param[in] options temporal rounding options, optional
961
+ /// \param[in] ctx the function execution context, optional
962
+ /// \return the element-wise rounded value
963
+ ///
964
+ /// \since 7.0.0
965
+ /// \note API not yet finalized
966
+ ARROW_EXPORT
967
+ Result<Datum> CeilTemporal(
968
+ const Datum& arg, RoundTemporalOptions options = RoundTemporalOptions::Defaults(),
969
+ ExecContext* ctx = NULLPTR);
970
+
971
+ /// \brief Floor a temporal value to a given frequency
972
+ ///
973
+ /// If argument is null the result will be null.
974
+ ///
975
+ /// \param[in] arg the temporal value to floor
976
+ /// \param[in] options temporal rounding options, optional
977
+ /// \param[in] ctx the function execution context, optional
978
+ /// \return the element-wise rounded value
979
+ ///
980
+ /// \since 7.0.0
981
+ /// \note API not yet finalized
982
+ ARROW_EXPORT
983
+ Result<Datum> FloorTemporal(
984
+ const Datum& arg, RoundTemporalOptions options = RoundTemporalOptions::Defaults(),
985
+ ExecContext* ctx = NULLPTR);
986
+
987
+ /// \brief Round a temporal value to a given frequency
988
+ ///
989
+ /// If argument is null the result will be null.
990
+ ///
991
+ /// \param[in] arg the temporal value to round
992
+ /// \param[in] options temporal rounding options, optional
993
+ /// \param[in] ctx the function execution context, optional
994
+ /// \return the element-wise rounded value
995
+ ///
996
+ /// \since 7.0.0
997
+ /// \note API not yet finalized
998
+ ARROW_EXPORT
999
+ Result<Datum> RoundTemporal(
1000
+ const Datum& arg, RoundTemporalOptions options = RoundTemporalOptions::Defaults(),
1001
+ ExecContext* ctx = NULLPTR);
1002
+
1003
+ /// \brief Invert the values of a boolean datum
1004
+ /// \param[in] value datum to invert
1005
+ /// \param[in] ctx the function execution context, optional
1006
+ /// \return the resulting datum
1007
+ ///
1008
+ /// \since 1.0.0
1009
+ /// \note API not yet finalized
1010
+ ARROW_EXPORT
1011
+ Result<Datum> Invert(const Datum& value, ExecContext* ctx = NULLPTR);
1012
+
1013
+ /// \brief Element-wise AND of two boolean datums which always propagates nulls
1014
+ /// (null and false is null).
1015
+ ///
1016
+ /// \param[in] left left operand
1017
+ /// \param[in] right right operand
1018
+ /// \param[in] ctx the function execution context, optional
1019
+ /// \return the resulting datum
1020
+ ///
1021
+ /// \since 1.0.0
1022
+ /// \note API not yet finalized
1023
+ ARROW_EXPORT
1024
+ Result<Datum> And(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR);
1025
+
1026
+ /// \brief Element-wise AND of two boolean datums with a Kleene truth table
1027
+ /// (null and false is false).
1028
+ ///
1029
+ /// \param[in] left left operand
1030
+ /// \param[in] right right operand
1031
+ /// \param[in] ctx the function execution context, optional
1032
+ /// \return the resulting datum
1033
+ ///
1034
+ /// \since 1.0.0
1035
+ /// \note API not yet finalized
1036
+ ARROW_EXPORT
1037
+ Result<Datum> KleeneAnd(const Datum& left, const Datum& right,
1038
+ ExecContext* ctx = NULLPTR);
1039
+
1040
+ /// \brief Element-wise OR of two boolean datums which always propagates nulls
1041
+ /// (null and true is null).
1042
+ ///
1043
+ /// \param[in] left left operand
1044
+ /// \param[in] right right operand
1045
+ /// \param[in] ctx the function execution context, optional
1046
+ /// \return the resulting datum
1047
+ ///
1048
+ /// \since 1.0.0
1049
+ /// \note API not yet finalized
1050
+ ARROW_EXPORT
1051
+ Result<Datum> Or(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR);
1052
+
1053
+ /// \brief Element-wise OR of two boolean datums with a Kleene truth table
1054
+ /// (null or true is true).
1055
+ ///
1056
+ /// \param[in] left left operand
1057
+ /// \param[in] right right operand
1058
+ /// \param[in] ctx the function execution context, optional
1059
+ /// \return the resulting datum
1060
+ ///
1061
+ /// \since 1.0.0
1062
+ /// \note API not yet finalized
1063
+ ARROW_EXPORT
1064
+ Result<Datum> KleeneOr(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR);
1065
+
1066
+ /// \brief Element-wise XOR of two boolean datums
1067
+ /// \param[in] left left operand
1068
+ /// \param[in] right right operand
1069
+ /// \param[in] ctx the function execution context, optional
1070
+ /// \return the resulting datum
1071
+ ///
1072
+ /// \since 1.0.0
1073
+ /// \note API not yet finalized
1074
+ ARROW_EXPORT
1075
+ Result<Datum> Xor(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR);
1076
+
1077
+ /// \brief Element-wise AND NOT of two boolean datums which always propagates nulls
1078
+ /// (null and not true is null).
1079
+ ///
1080
+ /// \param[in] left left operand
1081
+ /// \param[in] right right operand
1082
+ /// \param[in] ctx the function execution context, optional
1083
+ /// \return the resulting datum
1084
+ ///
1085
+ /// \since 3.0.0
1086
+ /// \note API not yet finalized
1087
+ ARROW_EXPORT
1088
+ Result<Datum> AndNot(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR);
1089
+
1090
+ /// \brief Element-wise AND NOT of two boolean datums with a Kleene truth table
1091
+ /// (false and not null is false, null and not true is false).
1092
+ ///
1093
+ /// \param[in] left left operand
1094
+ /// \param[in] right right operand
1095
+ /// \param[in] ctx the function execution context, optional
1096
+ /// \return the resulting datum
1097
+ ///
1098
+ /// \since 3.0.0
1099
+ /// \note API not yet finalized
1100
+ ARROW_EXPORT
1101
+ Result<Datum> KleeneAndNot(const Datum& left, const Datum& right,
1102
+ ExecContext* ctx = NULLPTR);
1103
+
1104
+ /// \brief IsIn returns true for each element of `values` that is contained in
1105
+ /// `value_set`
1106
+ ///
1107
+ /// Behaviour of nulls is governed by SetLookupOptions::skip_nulls.
1108
+ ///
1109
+ /// \param[in] values array-like input to look up in value_set
1110
+ /// \param[in] options SetLookupOptions
1111
+ /// \param[in] ctx the function execution context, optional
1112
+ /// \return the resulting datum
1113
+ ///
1114
+ /// \since 1.0.0
1115
+ /// \note API not yet finalized
1116
+ ARROW_EXPORT
1117
+ Result<Datum> IsIn(const Datum& values, const SetLookupOptions& options,
1118
+ ExecContext* ctx = NULLPTR);
1119
+ ARROW_EXPORT
1120
+ Result<Datum> IsIn(const Datum& values, const Datum& value_set,
1121
+ ExecContext* ctx = NULLPTR);
1122
+
1123
+ /// \brief IndexIn examines each slot in the values against a value_set array.
1124
+ /// If the value is not found in value_set, null will be output.
1125
+ /// If found, the index of occurrence within value_set (ignoring duplicates)
1126
+ /// will be output.
1127
+ ///
1128
+ /// For example given values = [99, 42, 3, null] and
1129
+ /// value_set = [3, 3, 99], the output will be = [2, null, 0, null]
1130
+ ///
1131
+ /// Behaviour of nulls is governed by SetLookupOptions::skip_nulls.
1132
+ ///
1133
+ /// \param[in] values array-like input
1134
+ /// \param[in] options SetLookupOptions
1135
+ /// \param[in] ctx the function execution context, optional
1136
+ /// \return the resulting datum
1137
+ ///
1138
+ /// \since 1.0.0
1139
+ /// \note API not yet finalized
1140
+ ARROW_EXPORT
1141
+ Result<Datum> IndexIn(const Datum& values, const SetLookupOptions& options,
1142
+ ExecContext* ctx = NULLPTR);
1143
+ ARROW_EXPORT
1144
+ Result<Datum> IndexIn(const Datum& values, const Datum& value_set,
1145
+ ExecContext* ctx = NULLPTR);
1146
+
1147
+ /// \brief IsValid returns true for each element of `values` that is not null,
1148
+ /// false otherwise
1149
+ ///
1150
+ /// \param[in] values input to examine for validity
1151
+ /// \param[in] ctx the function execution context, optional
1152
+ /// \return the resulting datum
1153
+ ///
1154
+ /// \since 1.0.0
1155
+ /// \note API not yet finalized
1156
+ ARROW_EXPORT
1157
+ Result<Datum> IsValid(const Datum& values, ExecContext* ctx = NULLPTR);
1158
+
1159
+ /// \brief IsNull returns true for each element of `values` that is null,
1160
+ /// false otherwise
1161
+ ///
1162
+ /// \param[in] values input to examine for nullity
1163
+ /// \param[in] options NullOptions
1164
+ /// \param[in] ctx the function execution context, optional
1165
+ /// \return the resulting datum
1166
+ ///
1167
+ /// \since 1.0.0
1168
+ /// \note API not yet finalized
1169
+ ARROW_EXPORT
1170
+ Result<Datum> IsNull(const Datum& values, NullOptions options = NullOptions::Defaults(),
1171
+ ExecContext* ctx = NULLPTR);
1172
+
1173
+ /// \brief IsNan returns true for each element of `values` that is NaN,
1174
+ /// false otherwise
1175
+ ///
1176
+ /// \param[in] values input to look for NaN
1177
+ /// \param[in] ctx the function execution context, optional
1178
+ /// \return the resulting datum
1179
+ ///
1180
+ /// \since 3.0.0
1181
+ /// \note API not yet finalized
1182
+ ARROW_EXPORT
1183
+ Result<Datum> IsNan(const Datum& values, ExecContext* ctx = NULLPTR);
1184
+
1185
+ /// \brief IfElse returns elements chosen from `left` or `right`
1186
+ /// depending on `cond`. `null` values in `cond` will be promoted to the result
1187
+ ///
1188
+ /// \param[in] cond `Boolean` condition Scalar/ Array
1189
+ /// \param[in] left Scalar/ Array
1190
+ /// \param[in] right Scalar/ Array
1191
+ /// \param[in] ctx the function execution context, optional
1192
+ ///
1193
+ /// \return the resulting datum
1194
+ ///
1195
+ /// \since 5.0.0
1196
+ /// \note API not yet finalized
1197
+ ARROW_EXPORT
1198
+ Result<Datum> IfElse(const Datum& cond, const Datum& left, const Datum& right,
1199
+ ExecContext* ctx = NULLPTR);
1200
+
1201
+ /// \brief CaseWhen behaves like a switch/case or if-else if-else statement: for
1202
+ /// each row, select the first value for which the corresponding condition is
1203
+ /// true, or (if given) select the 'else' value, else emit null. Note that a
1204
+ /// null condition is the same as false.
1205
+ ///
1206
+ /// \param[in] cond Conditions (Boolean)
1207
+ /// \param[in] cases Values (any type), along with an optional 'else' value.
1208
+ /// \param[in] ctx the function execution context, optional
1209
+ ///
1210
+ /// \return the resulting datum
1211
+ ///
1212
+ /// \since 5.0.0
1213
+ /// \note API not yet finalized
1214
+ ARROW_EXPORT
1215
+ Result<Datum> CaseWhen(const Datum& cond, const std::vector<Datum>& cases,
1216
+ ExecContext* ctx = NULLPTR);
1217
+
1218
+ /// \brief Year returns year for each element of `values`
1219
+ ///
1220
+ /// \param[in] values input to extract year from
1221
+ /// \param[in] ctx the function execution context, optional
1222
+ /// \return the resulting datum
1223
+ ///
1224
+ /// \since 5.0.0
1225
+ /// \note API not yet finalized
1226
+ ARROW_EXPORT
1227
+ Result<Datum> Year(const Datum& values, ExecContext* ctx = NULLPTR);
1228
+
1229
+ /// \brief IsLeapYear returns if a year is a leap year for each element of `values`
1230
+ ///
1231
+ /// \param[in] values input to extract leap year indicator from
1232
+ /// \param[in] ctx the function execution context, optional
1233
+ /// \return the resulting datum
1234
+ ///
1235
+ /// \since 8.0.0
1236
+ /// \note API not yet finalized
1237
+ ARROW_EXPORT
1238
+ Result<Datum> IsLeapYear(const Datum& values, ExecContext* ctx = NULLPTR);
1239
+
1240
+ /// \brief Month returns month for each element of `values`.
1241
+ /// Month is encoded as January=1, December=12
1242
+ ///
1243
+ /// \param[in] values input to extract month from
1244
+ /// \param[in] ctx the function execution context, optional
1245
+ /// \return the resulting datum
1246
+ ///
1247
+ /// \since 5.0.0
1248
+ /// \note API not yet finalized
1249
+ ARROW_EXPORT
1250
+ Result<Datum> Month(const Datum& values, ExecContext* ctx = NULLPTR);
1251
+
1252
+ /// \brief Day returns day number for each element of `values`
1253
+ ///
1254
+ /// \param[in] values input to extract day from
1255
+ /// \param[in] ctx the function execution context, optional
1256
+ /// \return the resulting datum
1257
+ ///
1258
+ /// \since 5.0.0
1259
+ /// \note API not yet finalized
1260
+ ARROW_EXPORT
1261
+ Result<Datum> Day(const Datum& values, ExecContext* ctx = NULLPTR);
1262
+
1263
+ /// \brief YearMonthDay returns a struct containing the Year, Month and Day value for
1264
+ /// each element of `values`.
1265
+ ///
1266
+ /// \param[in] values input to extract (year, month, day) struct from
1267
+ /// \param[in] ctx the function execution context, optional
1268
+ /// \return the resulting datum
1269
+ ///
1270
+ /// \since 7.0.0
1271
+ /// \note API not yet finalized
1272
+ ARROW_EXPORT
1273
+ Result<Datum> YearMonthDay(const Datum& values, ExecContext* ctx = NULLPTR);
1274
+
1275
+ /// \brief DayOfWeek returns number of the day of the week value for each element of
1276
+ /// `values`.
1277
+ ///
1278
+ /// By default week starts on Monday denoted by 0 and ends on Sunday denoted
1279
+ /// by 6. Start day of the week (Monday=1, Sunday=7) and numbering base (0 or 1) can be
1280
+ /// set using DayOfWeekOptions
1281
+ ///
1282
+ /// \param[in] values input to extract number of the day of the week from
1283
+ /// \param[in] options for setting start of the week and day numbering
1284
+ /// \param[in] ctx the function execution context, optional
1285
+ /// \return the resulting datum
1286
+ ///
1287
+ /// \since 5.0.0
1288
+ /// \note API not yet finalized
1289
+ ARROW_EXPORT Result<Datum> DayOfWeek(const Datum& values,
1290
+ DayOfWeekOptions options = DayOfWeekOptions(),
1291
+ ExecContext* ctx = NULLPTR);
1292
+
1293
+ /// \brief DayOfYear returns number of day of the year for each element of `values`.
1294
+ /// January 1st maps to day number 1, February 1st to 32, etc.
1295
+ ///
1296
+ /// \param[in] values input to extract number of day of the year from
1297
+ /// \param[in] ctx the function execution context, optional
1298
+ /// \return the resulting datum
1299
+ ///
1300
+ /// \since 5.0.0
1301
+ /// \note API not yet finalized
1302
+ ARROW_EXPORT Result<Datum> DayOfYear(const Datum& values, ExecContext* ctx = NULLPTR);
1303
+
1304
+ /// \brief ISOYear returns ISO year number for each element of `values`.
1305
+ /// First week of an ISO year has the majority (4 or more) of its days in January.
1306
+ ///
1307
+ /// \param[in] values input to extract ISO year from
1308
+ /// \param[in] ctx the function execution context, optional
1309
+ /// \return the resulting datum
1310
+ ///
1311
+ /// \since 5.0.0
1312
+ /// \note API not yet finalized
1313
+ ARROW_EXPORT
1314
+ Result<Datum> ISOYear(const Datum& values, ExecContext* ctx = NULLPTR);
1315
+
1316
+ /// \brief USYear returns US epidemiological year number for each element of `values`.
1317
+ /// First week of US epidemiological year has the majority (4 or more) of it's
1318
+ /// days in January. Last week of US epidemiological year has the year's last
1319
+ /// Wednesday in it. US epidemiological week starts on Sunday.
1320
+ ///
1321
+ /// \param[in] values input to extract US epidemiological year from
1322
+ /// \param[in] ctx the function execution context, optional
1323
+ /// \return the resulting datum
1324
+ ///
1325
+ /// \since 8.0.0
1326
+ /// \note API not yet finalized
1327
+ ARROW_EXPORT
1328
+ Result<Datum> USYear(const Datum& values, ExecContext* ctx = NULLPTR);
1329
+
1330
+ /// \brief ISOWeek returns ISO week of year number for each element of `values`.
1331
+ /// First ISO week has the majority (4 or more) of its days in January.
1332
+ /// ISO week starts on Monday. Year can have 52 or 53 weeks.
1333
+ /// Week numbering can start with 1.
1334
+ ///
1335
+ /// \param[in] values input to extract ISO week of year from
1336
+ /// \param[in] ctx the function execution context, optional
1337
+ /// \return the resulting datum
1338
+ ///
1339
+ /// \since 5.0.0
1340
+ /// \note API not yet finalized
1341
+ ARROW_EXPORT Result<Datum> ISOWeek(const Datum& values, ExecContext* ctx = NULLPTR);
1342
+
1343
+ /// \brief USWeek returns US week of year number for each element of `values`.
1344
+ /// First US week has the majority (4 or more) of its days in January.
1345
+ /// US week starts on Sunday. Year can have 52 or 53 weeks.
1346
+ /// Week numbering starts with 1.
1347
+ ///
1348
+ /// \param[in] values input to extract US week of year from
1349
+ /// \param[in] ctx the function execution context, optional
1350
+ /// \return the resulting datum
1351
+ ///
1352
+ /// \since 6.0.0
1353
+ /// \note API not yet finalized
1354
+ ARROW_EXPORT Result<Datum> USWeek(const Datum& values, ExecContext* ctx = NULLPTR);
1355
+
1356
+ /// \brief Week returns week of year number for each element of `values`.
1357
+ /// First ISO week has the majority (4 or more) of its days in January.
1358
+ /// Year can have 52 or 53 weeks. Week numbering can start with 0 or 1
1359
+ /// depending on DayOfWeekOptions.count_from_zero.
1360
+ ///
1361
+ /// \param[in] values input to extract week of year from
1362
+ /// \param[in] options for setting numbering start
1363
+ /// \param[in] ctx the function execution context, optional
1364
+ /// \return the resulting datum
1365
+ ///
1366
+ /// \since 6.0.0
1367
+ /// \note API not yet finalized
1368
+ ARROW_EXPORT Result<Datum> Week(const Datum& values, WeekOptions options = WeekOptions(),
1369
+ ExecContext* ctx = NULLPTR);
1370
+
1371
+ /// \brief ISOCalendar returns a (ISO year, ISO week, ISO day of week) struct for
1372
+ /// each element of `values`.
1373
+ /// ISO week starts on Monday denoted by 1 and ends on Sunday denoted by 7.
1374
+ ///
1375
+ /// \param[in] values input to ISO calendar struct from
1376
+ /// \param[in] ctx the function execution context, optional
1377
+ /// \return the resulting datum
1378
+ ///
1379
+ /// \since 5.0.0
1380
+ /// \note API not yet finalized
1381
+ ARROW_EXPORT Result<Datum> ISOCalendar(const Datum& values, ExecContext* ctx = NULLPTR);
1382
+
1383
+ /// \brief Quarter returns the quarter of year number for each element of `values`
1384
+ /// First quarter maps to 1 and fourth quarter maps to 4.
1385
+ ///
1386
+ /// \param[in] values input to extract quarter of year from
1387
+ /// \param[in] ctx the function execution context, optional
1388
+ /// \return the resulting datum
1389
+ ///
1390
+ /// \since 5.0.0
1391
+ /// \note API not yet finalized
1392
+ ARROW_EXPORT Result<Datum> Quarter(const Datum& values, ExecContext* ctx = NULLPTR);
1393
+
1394
+ /// \brief Hour returns hour value for each element of `values`
1395
+ ///
1396
+ /// \param[in] values input to extract hour from
1397
+ /// \param[in] ctx the function execution context, optional
1398
+ /// \return the resulting datum
1399
+ ///
1400
+ /// \since 5.0.0
1401
+ /// \note API not yet finalized
1402
+ ARROW_EXPORT
1403
+ Result<Datum> Hour(const Datum& values, ExecContext* ctx = NULLPTR);
1404
+
1405
+ /// \brief Minute returns minutes value for each element of `values`
1406
+ ///
1407
+ /// \param[in] values input to extract minutes from
1408
+ /// \param[in] ctx the function execution context, optional
1409
+ /// \return the resulting datum
1410
+ ///
1411
+ /// \since 5.0.0
1412
+ /// \note API not yet finalized
1413
+ ARROW_EXPORT
1414
+ Result<Datum> Minute(const Datum& values, ExecContext* ctx = NULLPTR);
1415
+
1416
+ /// \brief Second returns seconds value for each element of `values`
1417
+ ///
1418
+ /// \param[in] values input to extract seconds from
1419
+ /// \param[in] ctx the function execution context, optional
1420
+ /// \return the resulting datum
1421
+ ///
1422
+ /// \since 5.0.0
1423
+ /// \note API not yet finalized
1424
+ ARROW_EXPORT
1425
+ Result<Datum> Second(const Datum& values, ExecContext* ctx = NULLPTR);
1426
+
1427
+ /// \brief Millisecond returns number of milliseconds since the last full second
1428
+ /// for each element of `values`
1429
+ ///
1430
+ /// \param[in] values input to extract milliseconds from
1431
+ /// \param[in] ctx the function execution context, optional
1432
+ /// \return the resulting datum
1433
+ ///
1434
+ /// \since 5.0.0
1435
+ /// \note API not yet finalized
1436
+ ARROW_EXPORT
1437
+ Result<Datum> Millisecond(const Datum& values, ExecContext* ctx = NULLPTR);
1438
+
1439
+ /// \brief Microsecond returns number of microseconds since the last full millisecond
1440
+ /// for each element of `values`
1441
+ ///
1442
+ /// \param[in] values input to extract microseconds from
1443
+ /// \param[in] ctx the function execution context, optional
1444
+ /// \return the resulting datum
1445
+ ///
1446
+ /// \since 5.0.0
1447
+ /// \note API not yet finalized
1448
+ ARROW_EXPORT
1449
+ Result<Datum> Microsecond(const Datum& values, ExecContext* ctx = NULLPTR);
1450
+
1451
+ /// \brief Nanosecond returns number of nanoseconds since the last full millisecond
1452
+ /// for each element of `values`
1453
+ ///
1454
+ /// \param[in] values input to extract nanoseconds from
1455
+ /// \param[in] ctx the function execution context, optional
1456
+ /// \return the resulting datum
1457
+ ///
1458
+ /// \since 5.0.0
1459
+ /// \note API not yet finalized
1460
+ ARROW_EXPORT
1461
+ Result<Datum> Nanosecond(const Datum& values, ExecContext* ctx = NULLPTR);
1462
+
1463
+ /// \brief Subsecond returns the fraction of second elapsed since last full second
1464
+ /// as a float for each element of `values`
1465
+ ///
1466
+ /// \param[in] values input to extract subsecond from
1467
+ /// \param[in] ctx the function execution context, optional
1468
+ /// \return the resulting datum
1469
+ ///
1470
+ /// \since 5.0.0
1471
+ /// \note API not yet finalized
1472
+ ARROW_EXPORT Result<Datum> Subsecond(const Datum& values, ExecContext* ctx = NULLPTR);
1473
+
1474
+ /// \brief Format timestamps according to a format string
1475
+ ///
1476
+ /// Return formatted time strings according to the format string
1477
+ /// `StrftimeOptions::format` and to the locale specifier `Strftime::locale`.
1478
+ ///
1479
+ /// \param[in] values input timestamps
1480
+ /// \param[in] options for setting format string and locale
1481
+ /// \param[in] ctx the function execution context, optional
1482
+ /// \return the resulting datum
1483
+ ///
1484
+ /// \since 6.0.0
1485
+ /// \note API not yet finalized
1486
+ ARROW_EXPORT Result<Datum> Strftime(const Datum& values, StrftimeOptions options,
1487
+ ExecContext* ctx = NULLPTR);
1488
+
1489
+ /// \brief Parse timestamps according to a format string
1490
+ ///
1491
+ /// Return parsed timestamps according to the format string
1492
+ /// `StrptimeOptions::format` at time resolution `Strftime::unit`. Parse errors are
1493
+ /// raised depending on the `Strftime::error_is_null` setting.
1494
+ ///
1495
+ /// \param[in] values input strings
1496
+ /// \param[in] options for setting format string, unit and error_is_null
1497
+ /// \param[in] ctx the function execution context, optional
1498
+ /// \return the resulting datum
1499
+ ///
1500
+ /// \since 8.0.0
1501
+ /// \note API not yet finalized
1502
+ ARROW_EXPORT Result<Datum> Strptime(const Datum& values, StrptimeOptions options,
1503
+ ExecContext* ctx = NULLPTR);
1504
+
1505
+ /// \brief Converts timestamps from local timestamp without a timezone to a timestamp with
1506
+ /// timezone, interpreting the local timestamp as being in the specified timezone for each
1507
+ /// element of `values`
1508
+ ///
1509
+ /// \param[in] values input to convert
1510
+ /// \param[in] options for setting source timezone, exception and ambiguous timestamp
1511
+ /// handling.
1512
+ /// \param[in] ctx the function execution context, optional
1513
+ /// \return the resulting datum
1514
+ ///
1515
+ /// \since 6.0.0
1516
+ /// \note API not yet finalized
1517
+ ARROW_EXPORT Result<Datum> AssumeTimezone(const Datum& values,
1518
+ AssumeTimezoneOptions options,
1519
+ ExecContext* ctx = NULLPTR);
1520
+
1521
+ /// \brief IsDaylightSavings extracts if currently observing daylight savings for each
1522
+ /// element of `values`
1523
+ ///
1524
+ /// \param[in] values input to extract daylight savings indicator from
1525
+ /// \param[in] ctx the function execution context, optional
1526
+ /// \return the resulting datum
1527
+ ///
1528
+ /// \since 8.0.0
1529
+ /// \note API not yet finalized
1530
+ ARROW_EXPORT Result<Datum> IsDaylightSavings(const Datum& values,
1531
+ ExecContext* ctx = NULLPTR);
1532
+
1533
+ /// \brief LocalTimestamp converts timestamp to timezone naive local timestamp
1534
+ ///
1535
+ /// \param[in] values input to convert to local time
1536
+ /// \param[in] ctx the function execution context, optional
1537
+ /// \return the resulting datum
1538
+ ///
1539
+ /// \since 12.0.0
1540
+ /// \note API not yet finalized
1541
+ ARROW_EXPORT Result<Datum> LocalTimestamp(const Datum& values,
1542
+ ExecContext* ctx = NULLPTR);
1543
+
1544
+ /// \brief Years Between finds the number of years between two values
1545
+ ///
1546
+ /// \param[in] left input treated as the start time
1547
+ /// \param[in] right input treated as the end time
1548
+ /// \param[in] ctx the function execution context, optional
1549
+ /// \return the resulting datum
1550
+ ///
1551
+ /// \since 8.0.0
1552
+ /// \note API not yet finalized
1553
+ ARROW_EXPORT Result<Datum> YearsBetween(const Datum& left, const Datum& right,
1554
+ ExecContext* ctx = NULLPTR);
1555
+
1556
+ /// \brief Quarters Between finds the number of quarters between two values
1557
+ ///
1558
+ /// \param[in] left input treated as the start time
1559
+ /// \param[in] right input treated as the end time
1560
+ /// \param[in] ctx the function execution context, optional
1561
+ /// \return the resulting datum
1562
+ ///
1563
+ /// \since 8.0.0
1564
+ /// \note API not yet finalized
1565
+ ARROW_EXPORT Result<Datum> QuartersBetween(const Datum& left, const Datum& right,
1566
+ ExecContext* ctx = NULLPTR);
1567
+
1568
+ /// \brief Months Between finds the number of month between two values
1569
+ ///
1570
+ /// \param[in] left input treated as the start time
1571
+ /// \param[in] right input treated as the end time
1572
+ /// \param[in] ctx the function execution context, optional
1573
+ /// \return the resulting datum
1574
+ ///
1575
+ /// \since 8.0.0
1576
+ /// \note API not yet finalized
1577
+ ARROW_EXPORT Result<Datum> MonthsBetween(const Datum& left, const Datum& right,
1578
+ ExecContext* ctx = NULLPTR);
1579
+
1580
+ /// \brief Weeks Between finds the number of weeks between two values
1581
+ ///
1582
+ /// \param[in] left input treated as the start time
1583
+ /// \param[in] right input treated as the end time
1584
+ /// \param[in] ctx the function execution context, optional
1585
+ /// \return the resulting datum
1586
+ ///
1587
+ /// \since 8.0.0
1588
+ /// \note API not yet finalized
1589
+ ARROW_EXPORT Result<Datum> WeeksBetween(const Datum& left, const Datum& right,
1590
+ ExecContext* ctx = NULLPTR);
1591
+
1592
+ /// \brief Month Day Nano Between finds the number of months, days, and nanoseconds
1593
+ /// between two values
1594
+ ///
1595
+ /// \param[in] left input treated as the start time
1596
+ /// \param[in] right input treated as the end time
1597
+ /// \param[in] ctx the function execution context, optional
1598
+ /// \return the resulting datum
1599
+ ///
1600
+ /// \since 8.0.0
1601
+ /// \note API not yet finalized
1602
+ ARROW_EXPORT Result<Datum> MonthDayNanoBetween(const Datum& left, const Datum& right,
1603
+ ExecContext* ctx = NULLPTR);
1604
+
1605
+ /// \brief DayTime Between finds the number of days and milliseconds between two values
1606
+ ///
1607
+ /// \param[in] left input treated as the start time
1608
+ /// \param[in] right input treated as the end time
1609
+ /// \param[in] ctx the function execution context, optional
1610
+ /// \return the resulting datum
1611
+ ///
1612
+ /// \since 8.0.0
1613
+ /// \note API not yet finalized
1614
+ ARROW_EXPORT Result<Datum> DayTimeBetween(const Datum& left, const Datum& right,
1615
+ ExecContext* ctx = NULLPTR);
1616
+
1617
+ /// \brief Days Between finds the number of days between two values
1618
+ ///
1619
+ /// \param[in] left input treated as the start time
1620
+ /// \param[in] right input treated as the end time
1621
+ /// \param[in] ctx the function execution context, optional
1622
+ /// \return the resulting datum
1623
+ ///
1624
+ /// \since 8.0.0
1625
+ /// \note API not yet finalized
1626
+ ARROW_EXPORT Result<Datum> DaysBetween(const Datum& left, const Datum& right,
1627
+ ExecContext* ctx = NULLPTR);
1628
+
1629
+ /// \brief Hours Between finds the number of hours between two values
1630
+ ///
1631
+ /// \param[in] left input treated as the start time
1632
+ /// \param[in] right input treated as the end time
1633
+ /// \param[in] ctx the function execution context, optional
1634
+ /// \return the resulting datum
1635
+ ///
1636
+ /// \since 8.0.0
1637
+ /// \note API not yet finalized
1638
+ ARROW_EXPORT Result<Datum> HoursBetween(const Datum& left, const Datum& right,
1639
+ ExecContext* ctx = NULLPTR);
1640
+
1641
+ /// \brief Minutes Between finds the number of minutes between two values
1642
+ ///
1643
+ /// \param[in] left input treated as the start time
1644
+ /// \param[in] right input treated as the end time
1645
+ /// \param[in] ctx the function execution context, optional
1646
+ /// \return the resulting datum
1647
+ ///
1648
+ /// \since 8.0.0
1649
+ /// \note API not yet finalized
1650
+ ARROW_EXPORT Result<Datum> MinutesBetween(const Datum& left, const Datum& right,
1651
+ ExecContext* ctx = NULLPTR);
1652
+
1653
+ /// \brief Seconds Between finds the number of hours between two values
1654
+ ///
1655
+ /// \param[in] left input treated as the start time
1656
+ /// \param[in] right input treated as the end time
1657
+ /// \param[in] ctx the function execution context, optional
1658
+ /// \return the resulting datum
1659
+ ///
1660
+ /// \since 8.0.0
1661
+ /// \note API not yet finalized
1662
+ ARROW_EXPORT Result<Datum> SecondsBetween(const Datum& left, const Datum& right,
1663
+ ExecContext* ctx = NULLPTR);
1664
+
1665
+ /// \brief Milliseconds Between finds the number of milliseconds between two values
1666
+ ///
1667
+ /// \param[in] left input treated as the start time
1668
+ /// \param[in] right input treated as the end time
1669
+ /// \param[in] ctx the function execution context, optional
1670
+ /// \return the resulting datum
1671
+ ///
1672
+ /// \since 8.0.0
1673
+ /// \note API not yet finalized
1674
+ ARROW_EXPORT Result<Datum> MillisecondsBetween(const Datum& left, const Datum& right,
1675
+ ExecContext* ctx = NULLPTR);
1676
+
1677
+ /// \brief Microseconds Between finds the number of microseconds between two values
1678
+ ///
1679
+ /// \param[in] left input treated as the start time
1680
+ /// \param[in] right input treated as the end time
1681
+ /// \param[in] ctx the function execution context, optional
1682
+ /// \return the resulting datum
1683
+ ///
1684
+ /// \since 8.0.0
1685
+ /// \note API not yet finalized
1686
+ ARROW_EXPORT Result<Datum> MicrosecondsBetween(const Datum& left, const Datum& right,
1687
+ ExecContext* ctx = NULLPTR);
1688
+
1689
+ /// \brief Nanoseconds Between finds the number of nanoseconds between two values
1690
+ ///
1691
+ /// \param[in] left input treated as the start time
1692
+ /// \param[in] right input treated as the end time
1693
+ /// \param[in] ctx the function execution context, optional
1694
+ /// \return the resulting datum
1695
+ ///
1696
+ /// \since 8.0.0
1697
+ /// \note API not yet finalized
1698
+ ARROW_EXPORT Result<Datum> NanosecondsBetween(const Datum& left, const Datum& right,
1699
+ ExecContext* ctx = NULLPTR);
1700
+
1701
+ /// \brief Finds either the FIRST, LAST, or ALL items with a key that matches the given
1702
+ /// query key in a map.
1703
+ ///
1704
+ /// Returns an array of items for FIRST and LAST, and an array of list of items for ALL.
1705
+ ///
1706
+ /// \param[in] map to look in
1707
+ /// \param[in] options to pass a query key and choose which matching keys to return
1708
+ /// (FIRST, LAST or ALL)
1709
+ /// \param[in] ctx the function execution context, optional
1710
+ /// \return the resulting datum
1711
+ ///
1712
+ /// \since 8.0.0
1713
+ /// \note API not yet finalized
1714
+ ARROW_EXPORT Result<Datum> MapLookup(const Datum& map, MapLookupOptions options,
1715
+ ExecContext* ctx = NULLPTR);
1716
+ } // namespace compute
1717
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_vector.h ADDED
@@ -0,0 +1,697 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <utility>
22
+
23
+ #include "arrow/compute/function_options.h"
24
+ #include "arrow/compute/ordering.h"
25
+ #include "arrow/result.h"
26
+ #include "arrow/type_fwd.h"
27
+
28
+ namespace arrow {
29
+ namespace compute {
30
+
31
+ class ExecContext;
32
+
33
+ /// \addtogroup compute-concrete-options
34
+ /// @{
35
+
36
+ class ARROW_EXPORT FilterOptions : public FunctionOptions {
37
+ public:
38
+ /// Configure the action taken when a slot of the selection mask is null
39
+ enum NullSelectionBehavior {
40
+ /// The corresponding filtered value will be removed in the output.
41
+ DROP,
42
+ /// The corresponding filtered value will be null in the output.
43
+ EMIT_NULL,
44
+ };
45
+
46
+ explicit FilterOptions(NullSelectionBehavior null_selection = DROP);
47
+ static constexpr char const kTypeName[] = "FilterOptions";
48
+ static FilterOptions Defaults() { return FilterOptions(); }
49
+
50
+ NullSelectionBehavior null_selection_behavior = DROP;
51
+ };
52
+
53
+ class ARROW_EXPORT TakeOptions : public FunctionOptions {
54
+ public:
55
+ explicit TakeOptions(bool boundscheck = true);
56
+ static constexpr char const kTypeName[] = "TakeOptions";
57
+ static TakeOptions BoundsCheck() { return TakeOptions(true); }
58
+ static TakeOptions NoBoundsCheck() { return TakeOptions(false); }
59
+ static TakeOptions Defaults() { return BoundsCheck(); }
60
+
61
+ bool boundscheck = true;
62
+ };
63
+
64
+ /// \brief Options for the dictionary encode function
65
+ class ARROW_EXPORT DictionaryEncodeOptions : public FunctionOptions {
66
+ public:
67
+ /// Configure how null values will be encoded
68
+ enum NullEncodingBehavior {
69
+ /// The null value will be added to the dictionary with a proper index.
70
+ ENCODE,
71
+ /// The null value will be masked in the indices array.
72
+ MASK
73
+ };
74
+
75
+ explicit DictionaryEncodeOptions(NullEncodingBehavior null_encoding = MASK);
76
+ static constexpr char const kTypeName[] = "DictionaryEncodeOptions";
77
+ static DictionaryEncodeOptions Defaults() { return DictionaryEncodeOptions(); }
78
+
79
+ NullEncodingBehavior null_encoding_behavior = MASK;
80
+ };
81
+
82
+ /// \brief Options for the run-end encode function
83
+ class ARROW_EXPORT RunEndEncodeOptions : public FunctionOptions {
84
+ public:
85
+ explicit RunEndEncodeOptions(std::shared_ptr<DataType> run_end_type = int32());
86
+ static constexpr char const kTypeName[] = "RunEndEncodeOptions";
87
+ static RunEndEncodeOptions Defaults() { return RunEndEncodeOptions(); }
88
+
89
+ std::shared_ptr<DataType> run_end_type;
90
+ };
91
+
92
+ class ARROW_EXPORT ArraySortOptions : public FunctionOptions {
93
+ public:
94
+ explicit ArraySortOptions(SortOrder order = SortOrder::Ascending,
95
+ NullPlacement null_placement = NullPlacement::AtEnd);
96
+ static constexpr char const kTypeName[] = "ArraySortOptions";
97
+ static ArraySortOptions Defaults() { return ArraySortOptions(); }
98
+
99
+ /// Sorting order
100
+ SortOrder order;
101
+ /// Whether nulls and NaNs are placed at the start or at the end
102
+ NullPlacement null_placement;
103
+ };
104
+
105
+ class ARROW_EXPORT SortOptions : public FunctionOptions {
106
+ public:
107
+ explicit SortOptions(std::vector<SortKey> sort_keys = {},
108
+ NullPlacement null_placement = NullPlacement::AtEnd);
109
+ explicit SortOptions(const Ordering& ordering);
110
+ static constexpr char const kTypeName[] = "SortOptions";
111
+ static SortOptions Defaults() { return SortOptions(); }
112
+ /// Convenience constructor to create an ordering from SortOptions
113
+ ///
114
+ /// Note: Both classes contain the exact same information. However,
115
+ /// sort_options should only be used in a "function options" context while Ordering
116
+ /// is used more generally.
117
+ Ordering AsOrdering() && { return Ordering(std::move(sort_keys), null_placement); }
118
+ Ordering AsOrdering() const& { return Ordering(sort_keys, null_placement); }
119
+
120
+ /// Column key(s) to order by and how to order by these sort keys.
121
+ std::vector<SortKey> sort_keys;
122
+ /// Whether nulls and NaNs are placed at the start or at the end
123
+ NullPlacement null_placement;
124
+ };
125
+
126
+ /// \brief SelectK options
127
+ class ARROW_EXPORT SelectKOptions : public FunctionOptions {
128
+ public:
129
+ explicit SelectKOptions(int64_t k = -1, std::vector<SortKey> sort_keys = {});
130
+ static constexpr char const kTypeName[] = "SelectKOptions";
131
+ static SelectKOptions Defaults() { return SelectKOptions(); }
132
+
133
+ static SelectKOptions TopKDefault(int64_t k, std::vector<std::string> key_names = {}) {
134
+ std::vector<SortKey> keys;
135
+ for (const auto& name : key_names) {
136
+ keys.emplace_back(SortKey(name, SortOrder::Descending));
137
+ }
138
+ if (key_names.empty()) {
139
+ keys.emplace_back(SortKey("not-used", SortOrder::Descending));
140
+ }
141
+ return SelectKOptions{k, keys};
142
+ }
143
+ static SelectKOptions BottomKDefault(int64_t k,
144
+ std::vector<std::string> key_names = {}) {
145
+ std::vector<SortKey> keys;
146
+ for (const auto& name : key_names) {
147
+ keys.emplace_back(SortKey(name, SortOrder::Ascending));
148
+ }
149
+ if (key_names.empty()) {
150
+ keys.emplace_back(SortKey("not-used", SortOrder::Ascending));
151
+ }
152
+ return SelectKOptions{k, keys};
153
+ }
154
+
155
+ /// The number of `k` elements to keep.
156
+ int64_t k;
157
+ /// Column key(s) to order by and how to order by these sort keys.
158
+ std::vector<SortKey> sort_keys;
159
+ };
160
+
161
+ /// \brief Rank options
162
+ class ARROW_EXPORT RankOptions : public FunctionOptions {
163
+ public:
164
+ /// Configure how ties between equal values are handled
165
+ enum Tiebreaker {
166
+ /// Ties get the smallest possible rank in sorted order.
167
+ Min,
168
+ /// Ties get the largest possible rank in sorted order.
169
+ Max,
170
+ /// Ranks are assigned in order of when ties appear in the input.
171
+ /// This ensures the ranks are a stable permutation of the input.
172
+ First,
173
+ /// The ranks span a dense [1, M] interval where M is the number
174
+ /// of distinct values in the input.
175
+ Dense
176
+ };
177
+
178
+ explicit RankOptions(std::vector<SortKey> sort_keys = {},
179
+ NullPlacement null_placement = NullPlacement::AtEnd,
180
+ Tiebreaker tiebreaker = RankOptions::First);
181
+ /// Convenience constructor for array inputs
182
+ explicit RankOptions(SortOrder order,
183
+ NullPlacement null_placement = NullPlacement::AtEnd,
184
+ Tiebreaker tiebreaker = RankOptions::First)
185
+ : RankOptions({SortKey("", order)}, null_placement, tiebreaker) {}
186
+
187
+ static constexpr char const kTypeName[] = "RankOptions";
188
+ static RankOptions Defaults() { return RankOptions(); }
189
+
190
+ /// Column key(s) to order by and how to order by these sort keys.
191
+ std::vector<SortKey> sort_keys;
192
+ /// Whether nulls and NaNs are placed at the start or at the end
193
+ NullPlacement null_placement;
194
+ /// Tiebreaker for dealing with equal values in ranks
195
+ Tiebreaker tiebreaker;
196
+ };
197
+
198
+ /// \brief Partitioning options for NthToIndices
199
+ class ARROW_EXPORT PartitionNthOptions : public FunctionOptions {
200
+ public:
201
+ explicit PartitionNthOptions(int64_t pivot,
202
+ NullPlacement null_placement = NullPlacement::AtEnd);
203
+ PartitionNthOptions() : PartitionNthOptions(0) {}
204
+ static constexpr char const kTypeName[] = "PartitionNthOptions";
205
+
206
+ /// The index into the equivalent sorted array of the partition pivot element.
207
+ int64_t pivot;
208
+ /// Whether nulls and NaNs are partitioned at the start or at the end
209
+ NullPlacement null_placement;
210
+ };
211
+
212
+ /// \brief Options for cumulative functions
213
+ /// \note Also aliased as CumulativeSumOptions for backward compatibility
214
+ class ARROW_EXPORT CumulativeOptions : public FunctionOptions {
215
+ public:
216
+ explicit CumulativeOptions(bool skip_nulls = false);
217
+ explicit CumulativeOptions(double start, bool skip_nulls = false);
218
+ explicit CumulativeOptions(std::shared_ptr<Scalar> start, bool skip_nulls = false);
219
+ static constexpr char const kTypeName[] = "CumulativeOptions";
220
+ static CumulativeOptions Defaults() { return CumulativeOptions(); }
221
+
222
+ /// Optional starting value for cumulative operation computation, default depends on the
223
+ /// operation and input type.
224
+ /// - sum: 0
225
+ /// - prod: 1
226
+ /// - min: maximum of the input type
227
+ /// - max: minimum of the input type
228
+ /// - mean: start is ignored because it has no meaning for mean
229
+ std::optional<std::shared_ptr<Scalar>> start;
230
+
231
+ /// If true, nulls in the input are ignored and produce a corresponding null output.
232
+ /// When false, the first null encountered is propagated through the remaining output.
233
+ bool skip_nulls = false;
234
+ };
235
+ using CumulativeSumOptions = CumulativeOptions; // For backward compatibility
236
+
237
+ /// \brief Options for pairwise functions
238
+ class ARROW_EXPORT PairwiseOptions : public FunctionOptions {
239
+ public:
240
+ explicit PairwiseOptions(int64_t periods = 1);
241
+ static constexpr char const kTypeName[] = "PairwiseOptions";
242
+ static PairwiseOptions Defaults() { return PairwiseOptions(); }
243
+
244
+ /// Periods to shift for applying the binary operation, accepts negative values.
245
+ int64_t periods = 1;
246
+ };
247
+
248
+ /// @}
249
+
250
+ /// \brief Filter with a boolean selection filter
251
+ ///
252
+ /// The output will be populated with values from the input at positions
253
+ /// where the selection filter is not 0. Nulls in the filter will be handled
254
+ /// based on options.null_selection_behavior.
255
+ ///
256
+ /// For example given values = ["a", "b", "c", null, "e", "f"] and
257
+ /// filter = [0, 1, 1, 0, null, 1], the output will be
258
+ /// (null_selection_behavior == DROP) = ["b", "c", "f"]
259
+ /// (null_selection_behavior == EMIT_NULL) = ["b", "c", null, "f"]
260
+ ///
261
+ /// \param[in] values array to filter
262
+ /// \param[in] filter indicates which values should be filtered out
263
+ /// \param[in] options configures null_selection_behavior
264
+ /// \param[in] ctx the function execution context, optional
265
+ /// \return the resulting datum
266
+ ARROW_EXPORT
267
+ Result<Datum> Filter(const Datum& values, const Datum& filter,
268
+ const FilterOptions& options = FilterOptions::Defaults(),
269
+ ExecContext* ctx = NULLPTR);
270
+
271
+ namespace internal {
272
+
273
+ // These internal functions are implemented in kernels/vector_selection.cc
274
+
275
+ /// \brief Return the number of selected indices in the boolean filter
276
+ ///
277
+ /// \param filter a plain or run-end encoded boolean array with or without nulls
278
+ /// \param null_selection how to handle nulls in the filter
279
+ ARROW_EXPORT
280
+ int64_t GetFilterOutputSize(const ArraySpan& filter,
281
+ FilterOptions::NullSelectionBehavior null_selection);
282
+
283
+ /// \brief Compute uint64 selection indices for use with Take given a boolean
284
+ /// filter
285
+ ///
286
+ /// \param filter a plain or run-end encoded boolean array with or without nulls
287
+ /// \param null_selection how to handle nulls in the filter
288
+ ARROW_EXPORT
289
+ Result<std::shared_ptr<ArrayData>> GetTakeIndices(
290
+ const ArraySpan& filter, FilterOptions::NullSelectionBehavior null_selection,
291
+ MemoryPool* memory_pool = default_memory_pool());
292
+
293
+ } // namespace internal
294
+
295
+ /// \brief ReplaceWithMask replaces each value in the array corresponding
296
+ /// to a true value in the mask with the next element from `replacements`.
297
+ ///
298
+ /// \param[in] values Array input to replace
299
+ /// \param[in] mask Array or Scalar of Boolean mask values
300
+ /// \param[in] replacements The replacement values to draw from. There must
301
+ /// be as many replacement values as true values in the mask.
302
+ /// \param[in] ctx the function execution context, optional
303
+ ///
304
+ /// \return the resulting datum
305
+ ///
306
+ /// \since 5.0.0
307
+ /// \note API not yet finalized
308
+ ARROW_EXPORT
309
+ Result<Datum> ReplaceWithMask(const Datum& values, const Datum& mask,
310
+ const Datum& replacements, ExecContext* ctx = NULLPTR);
311
+
312
+ /// \brief FillNullForward fill null values in forward direction
313
+ ///
314
+ /// The output array will be of the same type as the input values
315
+ /// array, with replaced null values in forward direction.
316
+ ///
317
+ /// For example given values = ["a", "b", "c", null, null, "f"],
318
+ /// the output will be = ["a", "b", "c", "c", "c", "f"]
319
+ ///
320
+ /// \param[in] values datum from which to take
321
+ /// \param[in] ctx the function execution context, optional
322
+ /// \return the resulting datum
323
+ ARROW_EXPORT
324
+ Result<Datum> FillNullForward(const Datum& values, ExecContext* ctx = NULLPTR);
325
+
326
+ /// \brief FillNullBackward fill null values in backward direction
327
+ ///
328
+ /// The output array will be of the same type as the input values
329
+ /// array, with replaced null values in backward direction.
330
+ ///
331
+ /// For example given values = ["a", "b", "c", null, null, "f"],
332
+ /// the output will be = ["a", "b", "c", "f", "f", "f"]
333
+ ///
334
+ /// \param[in] values datum from which to take
335
+ /// \param[in] ctx the function execution context, optional
336
+ /// \return the resulting datum
337
+ ARROW_EXPORT
338
+ Result<Datum> FillNullBackward(const Datum& values, ExecContext* ctx = NULLPTR);
339
+
340
+ /// \brief Take from an array of values at indices in another array
341
+ ///
342
+ /// The output array will be of the same type as the input values
343
+ /// array, with elements taken from the values array at the given
344
+ /// indices. If an index is null then the taken element will be null.
345
+ ///
346
+ /// For example given values = ["a", "b", "c", null, "e", "f"] and
347
+ /// indices = [2, 1, null, 3], the output will be
348
+ /// = [values[2], values[1], null, values[3]]
349
+ /// = ["c", "b", null, null]
350
+ ///
351
+ /// \param[in] values datum from which to take
352
+ /// \param[in] indices which values to take
353
+ /// \param[in] options options
354
+ /// \param[in] ctx the function execution context, optional
355
+ /// \return the resulting datum
356
+ ARROW_EXPORT
357
+ Result<Datum> Take(const Datum& values, const Datum& indices,
358
+ const TakeOptions& options = TakeOptions::Defaults(),
359
+ ExecContext* ctx = NULLPTR);
360
+
361
+ /// \brief Take with Array inputs and output
362
+ ARROW_EXPORT
363
+ Result<std::shared_ptr<Array>> Take(const Array& values, const Array& indices,
364
+ const TakeOptions& options = TakeOptions::Defaults(),
365
+ ExecContext* ctx = NULLPTR);
366
+
367
+ /// \brief Drop Null from an array of values
368
+ ///
369
+ /// The output array will be of the same type as the input values
370
+ /// array, with elements taken from the values array without nulls.
371
+ ///
372
+ /// For example given values = ["a", "b", "c", null, "e", "f"],
373
+ /// the output will be = ["a", "b", "c", "e", "f"]
374
+ ///
375
+ /// \param[in] values datum from which to take
376
+ /// \param[in] ctx the function execution context, optional
377
+ /// \return the resulting datum
378
+ ARROW_EXPORT
379
+ Result<Datum> DropNull(const Datum& values, ExecContext* ctx = NULLPTR);
380
+
381
+ /// \brief DropNull with Array inputs and output
382
+ ARROW_EXPORT
383
+ Result<std::shared_ptr<Array>> DropNull(const Array& values, ExecContext* ctx = NULLPTR);
384
+
385
+ /// \brief Return indices that partition an array around n-th sorted element.
386
+ ///
387
+ /// Find index of n-th(0 based) smallest value and perform indirect
388
+ /// partition of an array around that element. Output indices[0 ~ n-1]
389
+ /// holds values no greater than n-th element, and indices[n+1 ~ end]
390
+ /// holds values no less than n-th element. Elements in each partition
391
+ /// is not sorted. Nulls will be partitioned to the end of the output.
392
+ /// Output is not guaranteed to be stable.
393
+ ///
394
+ /// \param[in] values array to be partitioned
395
+ /// \param[in] n pivot array around sorted n-th element
396
+ /// \param[in] ctx the function execution context, optional
397
+ /// \return offsets indices that would partition an array
398
+ ARROW_EXPORT
399
+ Result<std::shared_ptr<Array>> NthToIndices(const Array& values, int64_t n,
400
+ ExecContext* ctx = NULLPTR);
401
+
402
+ /// \brief Return indices that partition an array around n-th sorted element.
403
+ ///
404
+ /// This overload takes a PartitionNthOptions specifying the pivot index
405
+ /// and the null handling.
406
+ ///
407
+ /// \param[in] values array to be partitioned
408
+ /// \param[in] options options including pivot index and null handling
409
+ /// \param[in] ctx the function execution context, optional
410
+ /// \return offsets indices that would partition an array
411
+ ARROW_EXPORT
412
+ Result<std::shared_ptr<Array>> NthToIndices(const Array& values,
413
+ const PartitionNthOptions& options,
414
+ ExecContext* ctx = NULLPTR);
415
+
416
+ /// \brief Return indices that would select the first `k` elements.
417
+ ///
418
+ /// Perform an indirect sort of the datum, keeping only the first `k` elements. The output
419
+ /// array will contain indices such that the item indicated by the k-th index will be in
420
+ /// the position it would be if the datum were sorted by `options.sort_keys`. However,
421
+ /// indices of null values will not be part of the output. The sort is not guaranteed to
422
+ /// be stable.
423
+ ///
424
+ /// \param[in] datum datum to be partitioned
425
+ /// \param[in] options options
426
+ /// \param[in] ctx the function execution context, optional
427
+ /// \return a datum with the same schema as the input
428
+ ARROW_EXPORT
429
+ Result<std::shared_ptr<Array>> SelectKUnstable(const Datum& datum,
430
+ const SelectKOptions& options,
431
+ ExecContext* ctx = NULLPTR);
432
+
433
+ /// \brief Return the indices that would sort an array.
434
+ ///
435
+ /// Perform an indirect sort of array. The output array will contain
436
+ /// indices that would sort an array, which would be the same length
437
+ /// as input. Nulls will be stably partitioned to the end of the output
438
+ /// regardless of order.
439
+ ///
440
+ /// For example given array = [null, 1, 3.3, null, 2, 5.3] and order
441
+ /// = SortOrder::DESCENDING, the output will be [5, 2, 4, 1, 0,
442
+ /// 3].
443
+ ///
444
+ /// \param[in] array array to sort
445
+ /// \param[in] order ascending or descending
446
+ /// \param[in] ctx the function execution context, optional
447
+ /// \return offsets indices that would sort an array
448
+ ARROW_EXPORT
449
+ Result<std::shared_ptr<Array>> SortIndices(const Array& array,
450
+ SortOrder order = SortOrder::Ascending,
451
+ ExecContext* ctx = NULLPTR);
452
+
453
+ /// \brief Return the indices that would sort an array.
454
+ ///
455
+ /// This overload takes a ArraySortOptions specifying the sort order
456
+ /// and the null handling.
457
+ ///
458
+ /// \param[in] array array to sort
459
+ /// \param[in] options options including sort order and null handling
460
+ /// \param[in] ctx the function execution context, optional
461
+ /// \return offsets indices that would sort an array
462
+ ARROW_EXPORT
463
+ Result<std::shared_ptr<Array>> SortIndices(const Array& array,
464
+ const ArraySortOptions& options,
465
+ ExecContext* ctx = NULLPTR);
466
+
467
+ /// \brief Return the indices that would sort a chunked array.
468
+ ///
469
+ /// Perform an indirect sort of chunked array. The output array will
470
+ /// contain indices that would sort a chunked array, which would be
471
+ /// the same length as input. Nulls will be stably partitioned to the
472
+ /// end of the output regardless of order.
473
+ ///
474
+ /// For example given chunked_array = [[null, 1], [3.3], [null, 2,
475
+ /// 5.3]] and order = SortOrder::DESCENDING, the output will be [5, 2,
476
+ /// 4, 1, 0, 3].
477
+ ///
478
+ /// \param[in] chunked_array chunked array to sort
479
+ /// \param[in] order ascending or descending
480
+ /// \param[in] ctx the function execution context, optional
481
+ /// \return offsets indices that would sort an array
482
+ ARROW_EXPORT
483
+ Result<std::shared_ptr<Array>> SortIndices(const ChunkedArray& chunked_array,
484
+ SortOrder order = SortOrder::Ascending,
485
+ ExecContext* ctx = NULLPTR);
486
+
487
+ /// \brief Return the indices that would sort a chunked array.
488
+ ///
489
+ /// This overload takes a ArraySortOptions specifying the sort order
490
+ /// and the null handling.
491
+ ///
492
+ /// \param[in] chunked_array chunked array to sort
493
+ /// \param[in] options options including sort order and null handling
494
+ /// \param[in] ctx the function execution context, optional
495
+ /// \return offsets indices that would sort an array
496
+ ARROW_EXPORT
497
+ Result<std::shared_ptr<Array>> SortIndices(const ChunkedArray& chunked_array,
498
+ const ArraySortOptions& options,
499
+ ExecContext* ctx = NULLPTR);
500
+
501
+ /// \brief Return the indices that would sort an input in the
502
+ /// specified order. Input is one of array, chunked array record batch
503
+ /// or table.
504
+ ///
505
+ /// Perform an indirect sort of input. The output array will contain
506
+ /// indices that would sort an input, which would be the same length
507
+ /// as input. Nulls will be stably partitioned to the start or to the end
508
+ /// of the output depending on SortOrder::null_placement.
509
+ ///
510
+ /// For example given input (table) = {
511
+ /// "column1": [[null, 1], [ 3, null, 2, 1]],
512
+ /// "column2": [[ 5], [3, null, null, 5, 5]],
513
+ /// } and options = {
514
+ /// {"column1", SortOrder::Ascending},
515
+ /// {"column2", SortOrder::Descending},
516
+ /// }, the output will be [5, 1, 4, 2, 0, 3].
517
+ ///
518
+ /// \param[in] datum array, chunked array, record batch or table to sort
519
+ /// \param[in] options options
520
+ /// \param[in] ctx the function execution context, optional
521
+ /// \return offsets indices that would sort a table
522
+ ARROW_EXPORT
523
+ Result<std::shared_ptr<Array>> SortIndices(const Datum& datum, const SortOptions& options,
524
+ ExecContext* ctx = NULLPTR);
525
+
526
+ /// \brief Compute unique elements from an array-like object
527
+ ///
528
+ /// Note if a null occurs in the input it will NOT be included in the output.
529
+ ///
530
+ /// \param[in] datum array-like input
531
+ /// \param[in] ctx the function execution context, optional
532
+ /// \return result as Array
533
+ ///
534
+ /// \since 1.0.0
535
+ /// \note API not yet finalized
536
+ ARROW_EXPORT
537
+ Result<std::shared_ptr<Array>> Unique(const Datum& datum, ExecContext* ctx = NULLPTR);
538
+
539
+ // Constants for accessing the output of ValueCounts
540
+ ARROW_EXPORT extern const char kValuesFieldName[];
541
+ ARROW_EXPORT extern const char kCountsFieldName[];
542
+ ARROW_EXPORT extern const int32_t kValuesFieldIndex;
543
+ ARROW_EXPORT extern const int32_t kCountsFieldIndex;
544
+
545
+ /// \brief Return counts of unique elements from an array-like object.
546
+ ///
547
+ /// Note that the counts do not include counts for nulls in the array. These can be
548
+ /// obtained separately from metadata.
549
+ ///
550
+ /// For floating point arrays there is no attempt to normalize -0.0, 0.0 and NaN values
551
+ /// which can lead to unexpected results if the input Array has these values.
552
+ ///
553
+ /// \param[in] value array-like input
554
+ /// \param[in] ctx the function execution context, optional
555
+ /// \return counts An array of <input type "Values", int64_t "Counts"> structs.
556
+ ///
557
+ /// \since 1.0.0
558
+ /// \note API not yet finalized
559
+ ARROW_EXPORT
560
+ Result<std::shared_ptr<StructArray>> ValueCounts(const Datum& value,
561
+ ExecContext* ctx = NULLPTR);
562
+
563
+ /// \brief Dictionary-encode values in an array-like object
564
+ ///
565
+ /// Any nulls encountered in the dictionary will be handled according to the
566
+ /// specified null encoding behavior.
567
+ ///
568
+ /// For example, given values ["a", "b", null, "a", null] the output will be
569
+ /// (null_encoding == ENCODE) Indices: [0, 1, 2, 0, 2] / Dict: ["a", "b", null]
570
+ /// (null_encoding == MASK) Indices: [0, 1, null, 0, null] / Dict: ["a", "b"]
571
+ ///
572
+ /// If the input is already dictionary encoded this function is a no-op unless
573
+ /// it needs to modify the null_encoding (TODO)
574
+ ///
575
+ /// \param[in] data array-like input
576
+ /// \param[in] ctx the function execution context, optional
577
+ /// \param[in] options configures null encoding behavior
578
+ /// \return result with same shape and type as input
579
+ ///
580
+ /// \since 1.0.0
581
+ /// \note API not yet finalized
582
+ ARROW_EXPORT
583
+ Result<Datum> DictionaryEncode(
584
+ const Datum& data,
585
+ const DictionaryEncodeOptions& options = DictionaryEncodeOptions::Defaults(),
586
+ ExecContext* ctx = NULLPTR);
587
+
588
+ /// \brief Run-end-encode values in an array-like object
589
+ ///
590
+ /// The returned run-end encoded type uses the same value type of the input and
591
+ /// run-end type defined in the options.
592
+ ///
593
+ /// \param[in] value array-like input
594
+ /// \param[in] options configures encoding behavior
595
+ /// \param[in] ctx the function execution context, optional
596
+ /// \return result with same shape but run-end encoded
597
+ ///
598
+ /// \since 12.0.0
599
+ /// \note API not yet finalized
600
+ ARROW_EXPORT
601
+ Result<Datum> RunEndEncode(
602
+ const Datum& value,
603
+ const RunEndEncodeOptions& options = RunEndEncodeOptions::Defaults(),
604
+ ExecContext* ctx = NULLPTR);
605
+
606
+ /// \brief Decode a Run-End Encoded array to a plain array
607
+ ///
608
+ /// The output data type is the same as the values array type of run-end encoded
609
+ /// input.
610
+ ///
611
+ /// \param[in] value run-end-encoded input
612
+ /// \param[in] ctx the function execution context, optional
613
+ /// \return plain array resulting from decoding the run-end encoded input
614
+ ///
615
+ /// \since 12.0.0
616
+ /// \note API not yet finalized
617
+ ARROW_EXPORT
618
+ Result<Datum> RunEndDecode(const Datum& value, ExecContext* ctx = NULLPTR);
619
+
620
+ /// \brief Compute the cumulative sum of an array-like object
621
+ ///
622
+ /// \param[in] values array-like input
623
+ /// \param[in] options configures cumulative sum behavior
624
+ /// \param[in] check_overflow whether to check for overflow, if true, return Invalid
625
+ /// status on overflow, otherwise wrap around on overflow
626
+ /// \param[in] ctx the function execution context, optional
627
+ ARROW_EXPORT
628
+ Result<Datum> CumulativeSum(
629
+ const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(),
630
+ bool check_overflow = false, ExecContext* ctx = NULLPTR);
631
+
632
+ /// \brief Compute the cumulative product of an array-like object
633
+ ///
634
+ /// \param[in] values array-like input
635
+ /// \param[in] options configures cumulative prod behavior
636
+ /// \param[in] check_overflow whether to check for overflow, if true, return Invalid
637
+ /// status on overflow, otherwise wrap around on overflow
638
+ /// \param[in] ctx the function execution context, optional
639
+ ARROW_EXPORT
640
+ Result<Datum> CumulativeProd(
641
+ const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(),
642
+ bool check_overflow = false, ExecContext* ctx = NULLPTR);
643
+
644
+ /// \brief Compute the cumulative max of an array-like object
645
+ ///
646
+ /// \param[in] values array-like input
647
+ /// \param[in] options configures cumulative max behavior
648
+ /// \param[in] ctx the function execution context, optional
649
+ ARROW_EXPORT
650
+ Result<Datum> CumulativeMax(
651
+ const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(),
652
+ ExecContext* ctx = NULLPTR);
653
+
654
+ /// \brief Compute the cumulative min of an array-like object
655
+ ///
656
+ /// \param[in] values array-like input
657
+ /// \param[in] options configures cumulative min behavior
658
+ /// \param[in] ctx the function execution context, optional
659
+ ARROW_EXPORT
660
+ Result<Datum> CumulativeMin(
661
+ const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(),
662
+ ExecContext* ctx = NULLPTR);
663
+
664
+ /// \brief Compute the cumulative mean of an array-like object
665
+ ///
666
+ /// \param[in] values array-like input
667
+ /// \param[in] options configures cumulative mean behavior, `start` is ignored
668
+ /// \param[in] ctx the function execution context, optional
669
+ ARROW_EXPORT
670
+ Result<Datum> CumulativeMean(
671
+ const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(),
672
+ ExecContext* ctx = NULLPTR);
673
+
674
+ /// \brief Return the first order difference of an array.
675
+ ///
676
+ /// Computes the first order difference of an array, i.e.
677
+ /// output[i] = input[i] - input[i - p] if i >= p
678
+ /// output[i] = null otherwise
679
+ /// where p is the period. For example, with p = 1,
680
+ /// Diff([1, 4, 9, 10, 15]) = [null, 3, 5, 1, 5].
681
+ /// With p = 2,
682
+ /// Diff([1, 4, 9, 10, 15]) = [null, null, 8, 6, 6]
683
+ /// p can also be negative, in which case the diff is computed in
684
+ /// the opposite direction.
685
+ /// \param[in] array array input
686
+ /// \param[in] options options, specifying overflow behavior and period
687
+ /// \param[in] check_overflow whether to return error on overflow
688
+ /// \param[in] ctx the function execution context, optional
689
+ /// \return result as array
690
+ ARROW_EXPORT
691
+ Result<std::shared_ptr<Array>> PairwiseDiff(const Array& array,
692
+ const PairwiseOptions& options,
693
+ bool check_overflow = false,
694
+ ExecContext* ctx = NULLPTR);
695
+
696
+ } // namespace compute
697
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/exec.h ADDED
@@ -0,0 +1,489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // NOTE: API is EXPERIMENTAL and will change without going through a
19
+ // deprecation cycle
20
+
21
+ #pragma once
22
+
23
+ #include <atomic>
24
+ #include <cstdint>
25
+ #include <limits>
26
+ #include <memory>
27
+ #include <optional>
28
+ #include <string>
29
+ #include <utility>
30
+ #include <vector>
31
+
32
+ #include "arrow/array/data.h"
33
+ #include "arrow/compute/expression.h"
34
+ #include "arrow/compute/type_fwd.h"
35
+ #include "arrow/datum.h"
36
+ #include "arrow/result.h"
37
+ #include "arrow/type_fwd.h"
38
+ #include "arrow/util/macros.h"
39
+ #include "arrow/util/type_fwd.h"
40
+ #include "arrow/util/visibility.h"
41
+
42
+ namespace arrow {
43
+ namespace compute {
44
+
45
+ // It seems like 64K might be a good default chunksize to use for execution
46
+ // based on the experience of other query processing systems. The current
47
+ // default is not to chunk contiguous arrays, though, but this may change in
48
+ // the future once parallel execution is implemented
49
+ static constexpr int64_t kDefaultExecChunksize = UINT16_MAX;
50
+
51
+ /// \brief Context for expression-global variables and options used by
52
+ /// function evaluation
53
+ class ARROW_EXPORT ExecContext {
54
+ public:
55
+ // If no function registry passed, the default is used.
56
+ explicit ExecContext(MemoryPool* pool = default_memory_pool(),
57
+ ::arrow::internal::Executor* executor = NULLPTR,
58
+ FunctionRegistry* func_registry = NULLPTR);
59
+
60
+ /// \brief The MemoryPool used for allocations, default is
61
+ /// default_memory_pool().
62
+ MemoryPool* memory_pool() const { return pool_; }
63
+
64
+ const ::arrow::internal::CpuInfo* cpu_info() const;
65
+
66
+ /// \brief An Executor which may be used to parallelize execution.
67
+ ::arrow::internal::Executor* executor() const { return executor_; }
68
+
69
+ /// \brief The FunctionRegistry for looking up functions by name and
70
+ /// selecting kernels for execution. Defaults to the library-global function
71
+ /// registry provided by GetFunctionRegistry.
72
+ FunctionRegistry* func_registry() const { return func_registry_; }
73
+
74
+ // \brief Set maximum length unit of work for kernel execution. Larger
75
+ // contiguous array inputs will be split into smaller chunks, and, if
76
+ // possible and enabled, processed in parallel. The default chunksize is
77
+ // INT64_MAX, so contiguous arrays are not split.
78
+ void set_exec_chunksize(int64_t chunksize) { exec_chunksize_ = chunksize; }
79
+
80
+ // \brief Maximum length for ExecBatch data chunks processed by
81
+ // kernels. Contiguous array inputs with longer length will be split into
82
+ // smaller chunks.
83
+ int64_t exec_chunksize() const { return exec_chunksize_; }
84
+
85
+ /// \brief Set whether to use multiple threads for function execution. This
86
+ /// is not yet used.
87
+ void set_use_threads(bool use_threads = true) { use_threads_ = use_threads; }
88
+
89
+ /// \brief If true, then utilize multiple threads where relevant for function
90
+ /// execution. This is not yet used.
91
+ bool use_threads() const { return use_threads_; }
92
+
93
+ // Set the preallocation strategy for kernel execution as it relates to
94
+ // chunked execution. For chunked execution, whether via ChunkedArray inputs
95
+ // or splitting larger Array arguments into smaller pieces, contiguous
96
+ // allocation (if permitted by the kernel) will allocate one large array to
97
+ // write output into yielding it to the caller at the end. If this option is
98
+ // set to off, then preallocations will be performed independently for each
99
+ // chunk of execution
100
+ //
101
+ // TODO: At some point we might want the limit the size of contiguous
102
+ // preallocations. For example, even if the exec_chunksize is 64K or less, we
103
+ // might limit contiguous allocations to 1M records, say.
104
+ void set_preallocate_contiguous(bool preallocate) {
105
+ preallocate_contiguous_ = preallocate;
106
+ }
107
+
108
+ /// \brief If contiguous preallocations should be used when doing chunked
109
+ /// execution as specified by exec_chunksize(). See
110
+ /// set_preallocate_contiguous() for more information.
111
+ bool preallocate_contiguous() const { return preallocate_contiguous_; }
112
+
113
+ private:
114
+ MemoryPool* pool_;
115
+ ::arrow::internal::Executor* executor_;
116
+ FunctionRegistry* func_registry_;
117
+ int64_t exec_chunksize_ = std::numeric_limits<int64_t>::max();
118
+ bool preallocate_contiguous_ = true;
119
+ bool use_threads_ = true;
120
+ };
121
+
122
+ // TODO: Consider standardizing on uint16 selection vectors and only use them
123
+ // when we can ensure that each value is 64K length or smaller
124
+
125
+ /// \brief Container for an array of value selection indices that were
126
+ /// materialized from a filter.
127
+ ///
128
+ /// Columnar query engines (see e.g. [1]) have found that rather than
129
+ /// materializing filtered data, the filter can instead be converted to an
130
+ /// array of the "on" indices and then "fusing" these indices in operator
131
+ /// implementations. This is especially relevant for aggregations but also
132
+ /// applies to scalar operations.
133
+ ///
134
+ /// We are not yet using this so this is mostly a placeholder for now.
135
+ ///
136
+ /// [1]: http://cidrdb.org/cidr2005/papers/P19.pdf
137
+ class ARROW_EXPORT SelectionVector {
138
+ public:
139
+ explicit SelectionVector(std::shared_ptr<ArrayData> data);
140
+
141
+ explicit SelectionVector(const Array& arr);
142
+
143
+ /// \brief Create SelectionVector from boolean mask
144
+ static Result<std::shared_ptr<SelectionVector>> FromMask(const BooleanArray& arr);
145
+
146
+ const int32_t* indices() const { return indices_; }
147
+ int32_t length() const;
148
+
149
+ private:
150
+ std::shared_ptr<ArrayData> data_;
151
+ const int32_t* indices_;
152
+ };
153
+
154
+ /// An index to represent that a batch does not belong to an ordered stream
155
+ constexpr int64_t kUnsequencedIndex = -1;
156
+
157
+ /// \brief A unit of work for kernel execution. It contains a collection of
158
+ /// Array and Scalar values and an optional SelectionVector indicating that
159
+ /// there is an unmaterialized filter that either must be materialized, or (if
160
+ /// the kernel supports it) pushed down into the kernel implementation.
161
+ ///
162
+ /// ExecBatch is semantically similar to RecordBatch in that in a SQL context
163
+ /// it represents a collection of records, but constant "columns" are
164
+ /// represented by Scalar values rather than having to be converted into arrays
165
+ /// with repeated values.
166
+ ///
167
+ /// TODO: Datum uses arrow/util/variant.h which may be a bit heavier-weight
168
+ /// than is desirable for this class. Microbenchmarks would help determine for
169
+ /// sure. See ARROW-8928.
170
+
171
+ /// \addtogroup acero-internals
172
+ /// @{
173
+
174
+ struct ARROW_EXPORT ExecBatch {
175
+ ExecBatch() = default;
176
+ ExecBatch(std::vector<Datum> values, int64_t length)
177
+ : values(std::move(values)), length(length) {}
178
+
179
+ explicit ExecBatch(const RecordBatch& batch);
180
+
181
+ /// \brief Infer the ExecBatch length from values.
182
+ static Result<int64_t> InferLength(const std::vector<Datum>& values);
183
+
184
+ /// Creates an ExecBatch with length-validation.
185
+ ///
186
+ /// If any value is given, then all values must have a common length. If the given
187
+ /// length is negative, then the length of the ExecBatch is set to this common length,
188
+ /// or to 1 if no values are given. Otherwise, the given length must equal the common
189
+ /// length, if any value is given.
190
+ static Result<ExecBatch> Make(std::vector<Datum> values, int64_t length = -1);
191
+
192
+ Result<std::shared_ptr<RecordBatch>> ToRecordBatch(
193
+ std::shared_ptr<Schema> schema, MemoryPool* pool = default_memory_pool()) const;
194
+
195
+ /// The values representing positional arguments to be passed to a kernel's
196
+ /// exec function for processing.
197
+ std::vector<Datum> values;
198
+
199
+ /// A deferred filter represented as an array of indices into the values.
200
+ ///
201
+ /// For example, the filter [true, true, false, true] would be represented as
202
+ /// the selection vector [0, 1, 3]. When the selection vector is set,
203
+ /// ExecBatch::length is equal to the length of this array.
204
+ std::shared_ptr<SelectionVector> selection_vector;
205
+
206
+ /// A predicate Expression guaranteed to evaluate to true for all rows in this batch.
207
+ Expression guarantee = literal(true);
208
+
209
+ /// The semantic length of the ExecBatch. When the values are all scalars,
210
+ /// the length should be set to 1 for non-aggregate kernels, otherwise the
211
+ /// length is taken from the array values, except when there is a selection
212
+ /// vector. When there is a selection vector set, the length of the batch is
213
+ /// the length of the selection. Aggregate kernels can have an ExecBatch
214
+ /// formed by projecting just the partition columns from a batch in which
215
+ /// case, it would have scalar rows with length greater than 1.
216
+ ///
217
+ /// If the array values are of length 0 then the length is 0 regardless of
218
+ /// whether any values are Scalar.
219
+ int64_t length = 0;
220
+
221
+ /// \brief index of this batch in a sorted stream of batches
222
+ ///
223
+ /// This index must be strictly monotonic starting at 0 without gaps or
224
+ /// it can be set to kUnsequencedIndex if there is no meaningful order
225
+ int64_t index = kUnsequencedIndex;
226
+
227
+ /// \brief The sum of bytes in each buffer referenced by the batch
228
+ ///
229
+ /// Note: Scalars are not counted
230
+ /// Note: Some values may referenced only part of a buffer, for
231
+ /// example, an array with an offset. The actual data
232
+ /// visible to this batch will be smaller than the total
233
+ /// buffer size in this case.
234
+ int64_t TotalBufferSize() const;
235
+
236
+ /// \brief Return the value at the i-th index
237
+ template <typename index_type>
238
+ inline const Datum& operator[](index_type i) const {
239
+ return values[i];
240
+ }
241
+
242
+ bool Equals(const ExecBatch& other) const;
243
+
244
+ /// \brief A convenience for the number of values / arguments.
245
+ int num_values() const { return static_cast<int>(values.size()); }
246
+
247
+ ExecBatch Slice(int64_t offset, int64_t length) const;
248
+
249
+ Result<ExecBatch> SelectValues(const std::vector<int>& ids) const;
250
+
251
+ /// \brief A convenience for returning the types from the batch.
252
+ std::vector<TypeHolder> GetTypes() const {
253
+ std::vector<TypeHolder> result;
254
+ for (const auto& value : this->values) {
255
+ result.emplace_back(value.type());
256
+ }
257
+ return result;
258
+ }
259
+
260
+ std::string ToString() const;
261
+ };
262
+
263
+ inline bool operator==(const ExecBatch& l, const ExecBatch& r) { return l.Equals(r); }
264
+ inline bool operator!=(const ExecBatch& l, const ExecBatch& r) { return !l.Equals(r); }
265
+
266
+ ARROW_EXPORT void PrintTo(const ExecBatch&, std::ostream*);
267
+
268
+ /// @}
269
+
270
+ /// \defgroup compute-internals Utilities for calling functions, useful for those
271
+ /// extending the function registry
272
+ ///
273
+ /// @{
274
+
275
+ struct ExecValue {
276
+ ArraySpan array = {};
277
+ const Scalar* scalar = NULLPTR;
278
+
279
+ ExecValue(Scalar* scalar) // NOLINT implicit conversion
280
+ : scalar(scalar) {}
281
+
282
+ ExecValue(ArraySpan array) // NOLINT implicit conversion
283
+ : array(std::move(array)) {}
284
+
285
+ ExecValue(const ArrayData& array) { // NOLINT implicit conversion
286
+ this->array.SetMembers(array);
287
+ }
288
+
289
+ ExecValue() = default;
290
+ ExecValue(const ExecValue& other) = default;
291
+ ExecValue& operator=(const ExecValue& other) = default;
292
+ ExecValue(ExecValue&& other) = default;
293
+ ExecValue& operator=(ExecValue&& other) = default;
294
+
295
+ int64_t length() const { return this->is_array() ? this->array.length : 1; }
296
+
297
+ bool is_array() const { return this->scalar == NULLPTR; }
298
+ bool is_scalar() const { return !this->is_array(); }
299
+
300
+ void SetArray(const ArrayData& array) {
301
+ this->array.SetMembers(array);
302
+ this->scalar = NULLPTR;
303
+ }
304
+
305
+ void SetScalar(const Scalar* scalar) { this->scalar = scalar; }
306
+
307
+ template <typename ExactType>
308
+ const ExactType& scalar_as() const {
309
+ return ::arrow::internal::checked_cast<const ExactType&>(*this->scalar);
310
+ }
311
+
312
+ /// XXX: here temporarily for compatibility with datum, see
313
+ /// e.g. MakeStructExec in scalar_nested.cc
314
+ int64_t null_count() const {
315
+ if (this->is_array()) {
316
+ return this->array.GetNullCount();
317
+ } else {
318
+ return this->scalar->is_valid ? 0 : 1;
319
+ }
320
+ }
321
+
322
+ const DataType* type() const {
323
+ if (this->is_array()) {
324
+ return array.type;
325
+ } else {
326
+ return scalar->type.get();
327
+ }
328
+ }
329
+ };
330
+
331
+ struct ARROW_EXPORT ExecResult {
332
+ // The default value of the variant is ArraySpan
333
+ std::variant<ArraySpan, std::shared_ptr<ArrayData>> value;
334
+
335
+ int64_t length() const {
336
+ if (this->is_array_span()) {
337
+ return this->array_span()->length;
338
+ } else {
339
+ return this->array_data()->length;
340
+ }
341
+ }
342
+
343
+ const DataType* type() const {
344
+ if (this->is_array_span()) {
345
+ return this->array_span()->type;
346
+ } else {
347
+ return this->array_data()->type.get();
348
+ }
349
+ }
350
+
351
+ const ArraySpan* array_span() const { return &std::get<ArraySpan>(this->value); }
352
+ ArraySpan* array_span_mutable() { return &std::get<ArraySpan>(this->value); }
353
+
354
+ bool is_array_span() const { return this->value.index() == 0; }
355
+
356
+ const std::shared_ptr<ArrayData>& array_data() const {
357
+ return std::get<std::shared_ptr<ArrayData>>(this->value);
358
+ }
359
+ ArrayData* array_data_mutable() {
360
+ return std::get<std::shared_ptr<ArrayData>>(this->value).get();
361
+ }
362
+
363
+ bool is_array_data() const { return this->value.index() == 1; }
364
+ };
365
+
366
+ /// \brief A "lightweight" column batch object which contains no
367
+ /// std::shared_ptr objects and does not have any memory ownership
368
+ /// semantics. Can represent a view onto an "owning" ExecBatch.
369
+ struct ARROW_EXPORT ExecSpan {
370
+ ExecSpan() = default;
371
+ ExecSpan(const ExecSpan& other) = default;
372
+ ExecSpan& operator=(const ExecSpan& other) = default;
373
+ ExecSpan(ExecSpan&& other) = default;
374
+ ExecSpan& operator=(ExecSpan&& other) = default;
375
+
376
+ explicit ExecSpan(std::vector<ExecValue> values, int64_t length)
377
+ : length(length), values(std::move(values)) {}
378
+
379
+ explicit ExecSpan(const ExecBatch& batch) {
380
+ this->length = batch.length;
381
+ this->values.resize(batch.values.size());
382
+ for (size_t i = 0; i < batch.values.size(); ++i) {
383
+ const Datum& in_value = batch[i];
384
+ ExecValue* out_value = &this->values[i];
385
+ if (in_value.is_array()) {
386
+ out_value->SetArray(*in_value.array());
387
+ } else {
388
+ out_value->SetScalar(in_value.scalar().get());
389
+ }
390
+ }
391
+ }
392
+
393
+ /// \brief Return the value at the i-th index
394
+ template <typename index_type>
395
+ inline const ExecValue& operator[](index_type i) const {
396
+ return values[i];
397
+ }
398
+
399
+ /// \brief A convenience for the number of values / arguments.
400
+ int num_values() const { return static_cast<int>(values.size()); }
401
+
402
+ std::vector<TypeHolder> GetTypes() const {
403
+ std::vector<TypeHolder> result;
404
+ for (const auto& value : this->values) {
405
+ result.emplace_back(value.type());
406
+ }
407
+ return result;
408
+ }
409
+
410
+ ExecBatch ToExecBatch() const {
411
+ ExecBatch result;
412
+ result.length = this->length;
413
+ for (const ExecValue& value : this->values) {
414
+ if (value.is_array()) {
415
+ result.values.push_back(value.array.ToArrayData());
416
+ } else {
417
+ result.values.push_back(value.scalar->GetSharedPtr());
418
+ }
419
+ }
420
+ return result;
421
+ }
422
+
423
+ int64_t length = 0;
424
+ std::vector<ExecValue> values;
425
+ };
426
+
427
+ /// \defgroup compute-call-function One-shot calls to compute functions
428
+ ///
429
+ /// @{
430
+
431
+ /// \brief One-shot invoker for all types of functions.
432
+ ///
433
+ /// Does kernel dispatch, argument checking, iteration of ChunkedArray inputs,
434
+ /// and wrapping of outputs.
435
+ ARROW_EXPORT
436
+ Result<Datum> CallFunction(const std::string& func_name, const std::vector<Datum>& args,
437
+ const FunctionOptions* options, ExecContext* ctx = NULLPTR);
438
+
439
+ /// \brief Variant of CallFunction which uses a function's default options.
440
+ ///
441
+ /// NB: Some functions require FunctionOptions be provided.
442
+ ARROW_EXPORT
443
+ Result<Datum> CallFunction(const std::string& func_name, const std::vector<Datum>& args,
444
+ ExecContext* ctx = NULLPTR);
445
+
446
+ /// \brief One-shot invoker for all types of functions.
447
+ ///
448
+ /// Does kernel dispatch, argument checking, iteration of ChunkedArray inputs,
449
+ /// and wrapping of outputs.
450
+ ARROW_EXPORT
451
+ Result<Datum> CallFunction(const std::string& func_name, const ExecBatch& batch,
452
+ const FunctionOptions* options, ExecContext* ctx = NULLPTR);
453
+
454
+ /// \brief Variant of CallFunction which uses a function's default options.
455
+ ///
456
+ /// NB: Some functions require FunctionOptions be provided.
457
+ ARROW_EXPORT
458
+ Result<Datum> CallFunction(const std::string& func_name, const ExecBatch& batch,
459
+ ExecContext* ctx = NULLPTR);
460
+
461
+ /// @}
462
+
463
+ /// \defgroup compute-function-executor One-shot calls to obtain function executors
464
+ ///
465
+ /// @{
466
+
467
+ /// \brief One-shot executor provider for all types of functions.
468
+ ///
469
+ /// This function creates and initializes a `FunctionExecutor` appropriate
470
+ /// for the given function name, input types and function options.
471
+ ARROW_EXPORT
472
+ Result<std::shared_ptr<FunctionExecutor>> GetFunctionExecutor(
473
+ const std::string& func_name, std::vector<TypeHolder> in_types,
474
+ const FunctionOptions* options = NULLPTR, FunctionRegistry* func_registry = NULLPTR);
475
+
476
+ /// \brief One-shot executor provider for all types of functions.
477
+ ///
478
+ /// This function creates and initializes a `FunctionExecutor` appropriate
479
+ /// for the given function name, input types (taken from the Datum arguments)
480
+ /// and function options.
481
+ ARROW_EXPORT
482
+ Result<std::shared_ptr<FunctionExecutor>> GetFunctionExecutor(
483
+ const std::string& func_name, const std::vector<Datum>& args,
484
+ const FunctionOptions* options = NULLPTR, FunctionRegistry* func_registry = NULLPTR);
485
+
486
+ /// @}
487
+
488
+ } // namespace compute
489
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/expression.h ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+ #include <string>
24
+ #include <utility>
25
+ #include <variant>
26
+ #include <vector>
27
+
28
+ #include "arrow/compute/type_fwd.h"
29
+ #include "arrow/datum.h"
30
+ #include "arrow/type_fwd.h"
31
+ #include "arrow/util/small_vector.h"
32
+
33
+ namespace arrow {
34
+ namespace compute {
35
+
36
+ /// \defgroup expression-core Expressions to describe data transformations
37
+ ///
38
+ /// @{
39
+
40
+ /// An unbound expression which maps a single Datum to another Datum.
41
+ /// An expression is one of
42
+ /// - A literal Datum.
43
+ /// - A reference to a single (potentially nested) field of the input Datum.
44
+ /// - A call to a compute function, with arguments specified by other Expressions.
45
+ class ARROW_EXPORT Expression {
46
+ public:
47
+ struct Call {
48
+ std::string function_name;
49
+ std::vector<Expression> arguments;
50
+ std::shared_ptr<FunctionOptions> options;
51
+ // Cached hash value
52
+ size_t hash;
53
+
54
+ // post-Bind properties:
55
+ std::shared_ptr<Function> function;
56
+ const Kernel* kernel = NULLPTR;
57
+ std::shared_ptr<KernelState> kernel_state;
58
+ TypeHolder type;
59
+
60
+ void ComputeHash();
61
+ };
62
+
63
+ std::string ToString() const;
64
+ bool Equals(const Expression& other) const;
65
+ size_t hash() const;
66
+ struct Hash {
67
+ size_t operator()(const Expression& expr) const { return expr.hash(); }
68
+ };
69
+
70
+ /// Bind this expression to the given input type, looking up Kernels and field types.
71
+ /// Some expression simplification may be performed and implicit casts will be inserted.
72
+ /// Any state necessary for execution will be initialized and returned.
73
+ Result<Expression> Bind(const TypeHolder& in, ExecContext* = NULLPTR) const;
74
+ Result<Expression> Bind(const Schema& in_schema, ExecContext* = NULLPTR) const;
75
+
76
+ // XXX someday
77
+ // Clone all KernelState in this bound expression. If any function referenced by this
78
+ // expression has mutable KernelState, it is not safe to execute or apply simplification
79
+ // passes to it (or copies of it!) from multiple threads. Cloning state produces new
80
+ // KernelStates where necessary to ensure that Expressions may be manipulated safely
81
+ // on multiple threads.
82
+ // Result<ExpressionState> CloneState() const;
83
+ // Status SetState(ExpressionState);
84
+
85
+ /// Return true if all an expression's field references have explicit types
86
+ /// and all of its functions' kernels are looked up.
87
+ bool IsBound() const;
88
+
89
+ /// Return true if this expression is composed only of Scalar literals, field
90
+ /// references, and calls to ScalarFunctions.
91
+ bool IsScalarExpression() const;
92
+
93
+ /// Return true if this expression is literal and entirely null.
94
+ bool IsNullLiteral() const;
95
+
96
+ /// Return true if this expression could evaluate to true. Will return true for any
97
+ /// unbound or non-boolean Expressions. IsSatisfiable does not (currently) do any
98
+ /// canonicalization or simplification of the expression, so even Expressions
99
+ /// which are unsatisfiable may spuriously return `true` here. This function is
100
+ /// intended for use in predicate pushdown where a filter expression is simplified
101
+ /// by a guarantee, so it assumes that trying to simplify again would be redundant.
102
+ bool IsSatisfiable() const;
103
+
104
+ // XXX someday
105
+ // Result<PipelineGraph> GetPipelines();
106
+
107
+ bool is_valid() const { return impl_ != NULLPTR; }
108
+
109
+ /// Access a Call or return nullptr if this expression is not a call
110
+ const Call* call() const;
111
+ /// Access a Datum or return nullptr if this expression is not a literal
112
+ const Datum* literal() const;
113
+ /// Access a FieldRef or return nullptr if this expression is not a field_ref
114
+ const FieldRef* field_ref() const;
115
+
116
+ /// The type to which this expression will evaluate
117
+ const DataType* type() const;
118
+ // XXX someday
119
+ // NullGeneralization::type nullable() const;
120
+
121
+ struct Parameter {
122
+ FieldRef ref;
123
+
124
+ // post-bind properties
125
+ TypeHolder type;
126
+ ::arrow::internal::SmallVector<int, 2> indices;
127
+ };
128
+ const Parameter* parameter() const;
129
+
130
+ Expression() = default;
131
+ explicit Expression(Call call);
132
+ explicit Expression(Datum literal);
133
+ explicit Expression(Parameter parameter);
134
+
135
+ private:
136
+ using Impl = std::variant<Datum, Parameter, Call>;
137
+ std::shared_ptr<Impl> impl_;
138
+
139
+ ARROW_FRIEND_EXPORT friend bool Identical(const Expression& l, const Expression& r);
140
+ };
141
+
142
+ inline bool operator==(const Expression& l, const Expression& r) { return l.Equals(r); }
143
+ inline bool operator!=(const Expression& l, const Expression& r) { return !l.Equals(r); }
144
+
145
+ ARROW_EXPORT void PrintTo(const Expression&, std::ostream*);
146
+
147
+ // Factories
148
+
149
+ ARROW_EXPORT
150
+ Expression literal(Datum lit);
151
+
152
+ template <typename Arg>
153
+ Expression literal(Arg&& arg) {
154
+ return literal(Datum(std::forward<Arg>(arg)));
155
+ }
156
+
157
+ ARROW_EXPORT
158
+ Expression field_ref(FieldRef ref);
159
+
160
+ ARROW_EXPORT
161
+ Expression call(std::string function, std::vector<Expression> arguments,
162
+ std::shared_ptr<FunctionOptions> options = NULLPTR);
163
+
164
+ template <typename Options, typename = typename std::enable_if<
165
+ std::is_base_of<FunctionOptions, Options>::value>::type>
166
+ Expression call(std::string function, std::vector<Expression> arguments,
167
+ Options options) {
168
+ return call(std::move(function), std::move(arguments),
169
+ std::make_shared<Options>(std::move(options)));
170
+ }
171
+
172
+ /// Assemble a list of all fields referenced by an Expression at any depth.
173
+ ARROW_EXPORT
174
+ std::vector<FieldRef> FieldsInExpression(const Expression&);
175
+
176
+ /// Check if the expression references any fields.
177
+ ARROW_EXPORT
178
+ bool ExpressionHasFieldRefs(const Expression&);
179
+
180
+ struct ARROW_EXPORT KnownFieldValues;
181
+
182
+ /// Assemble a mapping from field references to known values. This derives known values
183
+ /// from "equal" and "is_null" Expressions referencing a field and a literal.
184
+ ARROW_EXPORT
185
+ Result<KnownFieldValues> ExtractKnownFieldValues(
186
+ const Expression& guaranteed_true_predicate);
187
+
188
+ /// @}
189
+
190
+ /// \defgroup expression-passes Functions for modification of Expressions
191
+ ///
192
+ /// @{
193
+ ///
194
+ /// These transform bound expressions. Some transforms utilize a guarantee, which is
195
+ /// provided as an Expression which is guaranteed to evaluate to true. The
196
+ /// guaranteed_true_predicate need not be bound, but canonicalization is currently
197
+ /// deferred to producers of guarantees. For example in order to be recognized as a
198
+ /// guarantee on a field value, an Expression must be a call to "equal" with field_ref LHS
199
+ /// and literal RHS. Flipping the arguments, "is_in" with a one-long value_set, ... or
200
+ /// other semantically identical Expressions will not be recognized.
201
+
202
+ /// Weak canonicalization which establishes guarantees for subsequent passes. Even
203
+ /// equivalent Expressions may result in different canonicalized expressions.
204
+ /// TODO this could be a strong canonicalization
205
+ ARROW_EXPORT
206
+ Result<Expression> Canonicalize(Expression, ExecContext* = NULLPTR);
207
+
208
+ /// Simplify Expressions based on literal arguments (for example, add(null, x) will always
209
+ /// be null so replace the call with a null literal). Includes early evaluation of all
210
+ /// calls whose arguments are entirely literal.
211
+ ARROW_EXPORT
212
+ Result<Expression> FoldConstants(Expression);
213
+
214
+ /// Simplify Expressions by replacing with known values of the fields which it references.
215
+ ARROW_EXPORT
216
+ Result<Expression> ReplaceFieldsWithKnownValues(const KnownFieldValues& known_values,
217
+ Expression);
218
+
219
+ /// Simplify an expression by replacing subexpressions based on a guarantee:
220
+ /// a boolean expression which is guaranteed to evaluate to `true`. For example, this is
221
+ /// used to remove redundant function calls from a filter expression or to replace a
222
+ /// reference to a constant-value field with a literal.
223
+ ARROW_EXPORT
224
+ Result<Expression> SimplifyWithGuarantee(Expression,
225
+ const Expression& guaranteed_true_predicate);
226
+
227
+ /// Replace all named field refs (e.g. "x" or "x.y") with field paths (e.g. [0] or [1,3])
228
+ ///
229
+ /// This isn't usually needed and does not offer any simplification by itself. However,
230
+ /// it can be useful to normalize an expression to paths to make it simpler to work with.
231
+ ARROW_EXPORT Result<Expression> RemoveNamedRefs(Expression expression);
232
+
233
+ /// @}
234
+
235
+ // Execution
236
+
237
+ /// Create an ExecBatch suitable for passing to ExecuteScalarExpression() from a
238
+ /// RecordBatch which may have missing or incorrectly ordered columns.
239
+ /// Missing fields will be replaced with null scalars.
240
+ ARROW_EXPORT Result<ExecBatch> MakeExecBatch(const Schema& full_schema,
241
+ const Datum& partial,
242
+ Expression guarantee = literal(true));
243
+
244
+ /// Execute a scalar expression against the provided state and input ExecBatch. This
245
+ /// expression must be bound.
246
+ ARROW_EXPORT
247
+ Result<Datum> ExecuteScalarExpression(const Expression&, const ExecBatch& input,
248
+ ExecContext* = NULLPTR);
249
+
250
+ /// Convenience function for invoking against a RecordBatch
251
+ ARROW_EXPORT
252
+ Result<Datum> ExecuteScalarExpression(const Expression&, const Schema& full_schema,
253
+ const Datum& partial_input, ExecContext* = NULLPTR);
254
+
255
+ // Serialization
256
+
257
+ ARROW_EXPORT
258
+ Result<std::shared_ptr<Buffer>> Serialize(const Expression&);
259
+
260
+ ARROW_EXPORT
261
+ Result<Expression> Deserialize(std::shared_ptr<Buffer>);
262
+
263
+ /// \defgroup expression-convenience Helpers for convenient expression creation
264
+ ///
265
+ /// @{
266
+
267
+ ARROW_EXPORT Expression project(std::vector<Expression> values,
268
+ std::vector<std::string> names);
269
+
270
+ ARROW_EXPORT Expression equal(Expression lhs, Expression rhs);
271
+
272
+ ARROW_EXPORT Expression not_equal(Expression lhs, Expression rhs);
273
+
274
+ ARROW_EXPORT Expression less(Expression lhs, Expression rhs);
275
+
276
+ ARROW_EXPORT Expression less_equal(Expression lhs, Expression rhs);
277
+
278
+ ARROW_EXPORT Expression greater(Expression lhs, Expression rhs);
279
+
280
+ ARROW_EXPORT Expression greater_equal(Expression lhs, Expression rhs);
281
+
282
+ ARROW_EXPORT Expression is_null(Expression lhs, bool nan_is_null = false);
283
+
284
+ ARROW_EXPORT Expression is_valid(Expression lhs);
285
+
286
+ ARROW_EXPORT Expression and_(Expression lhs, Expression rhs);
287
+ ARROW_EXPORT Expression and_(const std::vector<Expression>&);
288
+ ARROW_EXPORT Expression or_(Expression lhs, Expression rhs);
289
+ ARROW_EXPORT Expression or_(const std::vector<Expression>&);
290
+ ARROW_EXPORT Expression not_(Expression operand);
291
+
292
+ /// @}
293
+
294
+ } // namespace compute
295
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/kernel.h ADDED
@@ -0,0 +1,752 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // NOTE: API is EXPERIMENTAL and will change without going through a
19
+ // deprecation cycle
20
+
21
+ #pragma once
22
+
23
+ #include <cstddef>
24
+ #include <cstdint>
25
+ #include <functional>
26
+ #include <memory>
27
+ #include <string>
28
+ #include <utility>
29
+ #include <vector>
30
+
31
+ #include "arrow/buffer.h"
32
+ #include "arrow/compute/exec.h"
33
+ #include "arrow/datum.h"
34
+ #include "arrow/memory_pool.h"
35
+ #include "arrow/result.h"
36
+ #include "arrow/status.h"
37
+ #include "arrow/type.h"
38
+ #include "arrow/util/macros.h"
39
+ #include "arrow/util/visibility.h"
40
+
41
+ // macOS defines PREALLOCATE as a preprocessor macro in the header sys/vnode.h.
42
+ // No other BSD seems to do so. The name is used as an identifier in MemAllocation enum.
43
+ #if defined(__APPLE__) && defined(PREALLOCATE)
44
+ #undef PREALLOCATE
45
+ #endif
46
+
47
+ namespace arrow {
48
+ namespace compute {
49
+
50
+ class FunctionOptions;
51
+
52
+ /// \brief Base class for opaque kernel-specific state. For example, if there
53
+ /// is some kind of initialization required.
54
+ struct ARROW_EXPORT KernelState {
55
+ virtual ~KernelState() = default;
56
+ };
57
+
58
+ /// \brief Context/state for the execution of a particular kernel.
59
+ class ARROW_EXPORT KernelContext {
60
+ public:
61
+ // Can pass optional backreference; not used consistently for the
62
+ // moment but will be made so in the future
63
+ explicit KernelContext(ExecContext* exec_ctx, const Kernel* kernel = NULLPTR)
64
+ : exec_ctx_(exec_ctx), kernel_(kernel) {}
65
+
66
+ /// \brief Allocate buffer from the context's memory pool. The contents are
67
+ /// not initialized.
68
+ Result<std::shared_ptr<ResizableBuffer>> Allocate(int64_t nbytes);
69
+
70
+ /// \brief Allocate buffer for bitmap from the context's memory pool. Like
71
+ /// Allocate, the contents of the buffer are not initialized but the last
72
+ /// byte is preemptively zeroed to help avoid ASAN or valgrind issues.
73
+ Result<std::shared_ptr<ResizableBuffer>> AllocateBitmap(int64_t num_bits);
74
+
75
+ /// \brief Assign the active KernelState to be utilized for each stage of
76
+ /// kernel execution. Ownership and memory lifetime of the KernelState must
77
+ /// be minded separately.
78
+ void SetState(KernelState* state) { state_ = state; }
79
+
80
+ // Set kernel that is being invoked since some kernel
81
+ // implementations will examine the kernel state.
82
+ void SetKernel(const Kernel* kernel) { kernel_ = kernel; }
83
+
84
+ KernelState* state() { return state_; }
85
+
86
+ /// \brief Configuration related to function execution that is to be shared
87
+ /// across multiple kernels.
88
+ ExecContext* exec_context() { return exec_ctx_; }
89
+
90
+ /// \brief The memory pool to use for allocations. For now, it uses the
91
+ /// MemoryPool contained in the ExecContext used to create the KernelContext.
92
+ MemoryPool* memory_pool() { return exec_ctx_->memory_pool(); }
93
+
94
+ const Kernel* kernel() const { return kernel_; }
95
+
96
+ private:
97
+ ExecContext* exec_ctx_;
98
+ KernelState* state_ = NULLPTR;
99
+ const Kernel* kernel_ = NULLPTR;
100
+ };
101
+
102
+ /// \brief An type-checking interface to permit customizable validation rules
103
+ /// for use with InputType and KernelSignature. This is for scenarios where the
104
+ /// acceptance is not an exact type instance, such as a TIMESTAMP type for a
105
+ /// specific TimeUnit, but permitting any time zone.
106
+ struct ARROW_EXPORT TypeMatcher {
107
+ virtual ~TypeMatcher() = default;
108
+
109
+ /// \brief Return true if this matcher accepts the data type.
110
+ virtual bool Matches(const DataType& type) const = 0;
111
+
112
+ /// \brief A human-interpretable string representation of what the type
113
+ /// matcher checks for, usable when printing KernelSignature or formatting
114
+ /// error messages.
115
+ virtual std::string ToString() const = 0;
116
+
117
+ /// \brief Return true if this TypeMatcher contains the same matching rule as
118
+ /// the other. Currently depends on RTTI.
119
+ virtual bool Equals(const TypeMatcher& other) const = 0;
120
+ };
121
+
122
+ namespace match {
123
+
124
+ /// \brief Match any DataType instance having the same DataType::id.
125
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> SameTypeId(Type::type type_id);
126
+
127
+ /// \brief Match any TimestampType instance having the same unit, but the time
128
+ /// zones can be different.
129
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> TimestampTypeUnit(TimeUnit::type unit);
130
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> Time32TypeUnit(TimeUnit::type unit);
131
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> Time64TypeUnit(TimeUnit::type unit);
132
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> DurationTypeUnit(TimeUnit::type unit);
133
+
134
+ // \brief Match any integer type
135
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> Integer();
136
+
137
+ // Match types using 32-bit varbinary representation
138
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> BinaryLike();
139
+
140
+ // Match types using 64-bit varbinary representation
141
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> LargeBinaryLike();
142
+
143
+ // Match any fixed binary type
144
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> FixedSizeBinaryLike();
145
+
146
+ // \brief Match any primitive type (boolean or any type representable as a C
147
+ // Type)
148
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> Primitive();
149
+
150
+ // \brief Match any integer type that can be used as run-end in run-end encoded
151
+ // arrays
152
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> RunEndInteger();
153
+
154
+ /// \brief Match run-end encoded types that use any valid run-end type and
155
+ /// encode specific value types
156
+ ///
157
+ /// @param[in] value_type_matcher a matcher that is applied to the values field
158
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> RunEndEncoded(
159
+ std::shared_ptr<TypeMatcher> value_type_matcher);
160
+
161
+ /// \brief Match run-end encoded types that use any valid run-end type and
162
+ /// encode specific value types
163
+ ///
164
+ /// @param[in] value_type_id a type id that the type of the values field should match
165
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> RunEndEncoded(Type::type value_type_id);
166
+
167
+ /// \brief Match run-end encoded types that encode specific run-end and value types
168
+ ///
169
+ /// @param[in] run_end_type_matcher a matcher that is applied to the run_ends field
170
+ /// @param[in] value_type_matcher a matcher that is applied to the values field
171
+ ARROW_EXPORT std::shared_ptr<TypeMatcher> RunEndEncoded(
172
+ std::shared_ptr<TypeMatcher> run_end_type_matcher,
173
+ std::shared_ptr<TypeMatcher> value_type_matcher);
174
+
175
+ } // namespace match
176
+
177
+ /// \brief An object used for type-checking arguments to be passed to a kernel
178
+ /// and stored in a KernelSignature. The type-checking rule can be supplied
179
+ /// either with an exact DataType instance or a custom TypeMatcher.
180
+ class ARROW_EXPORT InputType {
181
+ public:
182
+ /// \brief The kind of type-checking rule that the InputType contains.
183
+ enum Kind {
184
+ /// \brief Accept any value type.
185
+ ANY_TYPE,
186
+
187
+ /// \brief A fixed arrow::DataType and will only exact match having this
188
+ /// exact type (e.g. same TimestampType unit, same decimal scale and
189
+ /// precision, or same nested child types).
190
+ EXACT_TYPE,
191
+
192
+ /// \brief Uses a TypeMatcher implementation to check the type.
193
+ USE_TYPE_MATCHER
194
+ };
195
+
196
+ /// \brief Accept any value type
197
+ InputType() : kind_(ANY_TYPE) {}
198
+
199
+ /// \brief Accept an exact value type.
200
+ InputType(std::shared_ptr<DataType> type) // NOLINT implicit construction
201
+ : kind_(EXACT_TYPE), type_(std::move(type)) {}
202
+
203
+ /// \brief Use the passed TypeMatcher to type check.
204
+ InputType(std::shared_ptr<TypeMatcher> type_matcher) // NOLINT implicit construction
205
+ : kind_(USE_TYPE_MATCHER), type_matcher_(std::move(type_matcher)) {}
206
+
207
+ /// \brief Match any type with the given Type::type. Uses a TypeMatcher for
208
+ /// its implementation.
209
+ InputType(Type::type type_id) // NOLINT implicit construction
210
+ : InputType(match::SameTypeId(type_id)) {}
211
+
212
+ InputType(const InputType& other) { CopyInto(other); }
213
+
214
+ void operator=(const InputType& other) { CopyInto(other); }
215
+
216
+ InputType(InputType&& other) { MoveInto(std::forward<InputType>(other)); }
217
+
218
+ void operator=(InputType&& other) { MoveInto(std::forward<InputType>(other)); }
219
+
220
+ // \brief Match any input (array, scalar of any type)
221
+ static InputType Any() { return InputType(); }
222
+
223
+ /// \brief Return true if this input type matches the same type cases as the
224
+ /// other.
225
+ bool Equals(const InputType& other) const;
226
+
227
+ bool operator==(const InputType& other) const { return this->Equals(other); }
228
+
229
+ bool operator!=(const InputType& other) const { return !(*this == other); }
230
+
231
+ /// \brief Return hash code.
232
+ size_t Hash() const;
233
+
234
+ /// \brief Render a human-readable string representation.
235
+ std::string ToString() const;
236
+
237
+ /// \brief Return true if the Datum matches this argument kind in
238
+ /// type (and only allows scalar or array-like Datums).
239
+ bool Matches(const Datum& value) const;
240
+
241
+ /// \brief Return true if the type matches this InputType
242
+ bool Matches(const DataType& type) const;
243
+
244
+ /// \brief The type matching rule that this InputType uses.
245
+ Kind kind() const { return kind_; }
246
+
247
+ /// \brief For InputType::EXACT_TYPE kind, the exact type that this InputType
248
+ /// must match. Otherwise this function should not be used and will assert in
249
+ /// debug builds.
250
+ const std::shared_ptr<DataType>& type() const;
251
+
252
+ /// \brief For InputType::USE_TYPE_MATCHER, the TypeMatcher to be used for
253
+ /// checking the type of a value. Otherwise this function should not be used
254
+ /// and will assert in debug builds.
255
+ const TypeMatcher& type_matcher() const;
256
+
257
+ private:
258
+ void CopyInto(const InputType& other) {
259
+ this->kind_ = other.kind_;
260
+ this->type_ = other.type_;
261
+ this->type_matcher_ = other.type_matcher_;
262
+ }
263
+
264
+ void MoveInto(InputType&& other) {
265
+ this->kind_ = other.kind_;
266
+ this->type_ = std::move(other.type_);
267
+ this->type_matcher_ = std::move(other.type_matcher_);
268
+ }
269
+
270
+ Kind kind_;
271
+
272
+ // For EXACT_TYPE Kind
273
+ std::shared_ptr<DataType> type_;
274
+
275
+ // For USE_TYPE_MATCHER Kind
276
+ std::shared_ptr<TypeMatcher> type_matcher_;
277
+ };
278
+
279
+ /// \brief Container to capture both exact and input-dependent output types.
280
+ class ARROW_EXPORT OutputType {
281
+ public:
282
+ /// \brief An enum indicating whether the value type is an invariant fixed
283
+ /// value or one that's computed by a kernel-defined resolver function.
284
+ enum ResolveKind { FIXED, COMPUTED };
285
+
286
+ /// Type resolution function. Given input types, return output type. This
287
+ /// function MAY may use the kernel state to decide the output type based on
288
+ /// the FunctionOptions.
289
+ ///
290
+ /// This function SHOULD _not_ be used to check for arity, that is to be
291
+ /// performed one or more layers above.
292
+ using Resolver =
293
+ std::function<Result<TypeHolder>(KernelContext*, const std::vector<TypeHolder>&)>;
294
+
295
+ /// \brief Output an exact type
296
+ OutputType(std::shared_ptr<DataType> type) // NOLINT implicit construction
297
+ : kind_(FIXED), type_(std::move(type)) {}
298
+
299
+ /// \brief Output a computed type depending on actual input types
300
+ template <typename Fn>
301
+ OutputType(Fn resolver) // NOLINT implicit construction
302
+ : kind_(COMPUTED), resolver_(std::move(resolver)) {}
303
+
304
+ OutputType(const OutputType& other) {
305
+ this->kind_ = other.kind_;
306
+ this->type_ = other.type_;
307
+ this->resolver_ = other.resolver_;
308
+ }
309
+
310
+ OutputType(OutputType&& other) {
311
+ this->kind_ = other.kind_;
312
+ this->type_ = std::move(other.type_);
313
+ this->resolver_ = other.resolver_;
314
+ }
315
+
316
+ OutputType& operator=(const OutputType&) = default;
317
+ OutputType& operator=(OutputType&&) = default;
318
+
319
+ /// \brief Return the type of the expected output value of the kernel given
320
+ /// the input argument types. The resolver may make use of state information
321
+ /// kept in the KernelContext.
322
+ Result<TypeHolder> Resolve(KernelContext* ctx,
323
+ const std::vector<TypeHolder>& args) const;
324
+
325
+ /// \brief The exact output value type for the FIXED kind.
326
+ const std::shared_ptr<DataType>& type() const;
327
+
328
+ /// \brief For use with COMPUTED resolution strategy. It may be more
329
+ /// convenient to invoke this with OutputType::Resolve returned from this
330
+ /// method.
331
+ const Resolver& resolver() const;
332
+
333
+ /// \brief Render a human-readable string representation.
334
+ std::string ToString() const;
335
+
336
+ /// \brief Return the kind of type resolution of this output type, whether
337
+ /// fixed/invariant or computed by a resolver.
338
+ ResolveKind kind() const { return kind_; }
339
+
340
+ private:
341
+ ResolveKind kind_;
342
+
343
+ // For FIXED resolution
344
+ std::shared_ptr<DataType> type_;
345
+
346
+ // For COMPUTED resolution
347
+ Resolver resolver_ = NULLPTR;
348
+ };
349
+
350
+ /// \brief Holds the input types and output type of the kernel.
351
+ ///
352
+ /// VarArgs functions with minimum N arguments should pass up to N input types to be
353
+ /// used to validate the input types of a function invocation. The first N-1 types
354
+ /// will be matched against the first N-1 arguments, and the last type will be
355
+ /// matched against the remaining arguments.
356
+ class ARROW_EXPORT KernelSignature {
357
+ public:
358
+ KernelSignature(std::vector<InputType> in_types, OutputType out_type,
359
+ bool is_varargs = false);
360
+
361
+ /// \brief Convenience ctor since make_shared can be awkward
362
+ static std::shared_ptr<KernelSignature> Make(std::vector<InputType> in_types,
363
+ OutputType out_type,
364
+ bool is_varargs = false);
365
+
366
+ /// \brief Return true if the signature if compatible with the list of input
367
+ /// value descriptors.
368
+ bool MatchesInputs(const std::vector<TypeHolder>& types) const;
369
+
370
+ /// \brief Returns true if the input types of each signature are
371
+ /// equal. Well-formed functions should have a deterministic output type
372
+ /// given input types, but currently it is the responsibility of the
373
+ /// developer to ensure this.
374
+ bool Equals(const KernelSignature& other) const;
375
+
376
+ bool operator==(const KernelSignature& other) const { return this->Equals(other); }
377
+
378
+ bool operator!=(const KernelSignature& other) const { return !(*this == other); }
379
+
380
+ /// \brief Compute a hash code for the signature
381
+ size_t Hash() const;
382
+
383
+ /// \brief The input types for the kernel. For VarArgs functions, this should
384
+ /// generally contain a single validator to use for validating all of the
385
+ /// function arguments.
386
+ const std::vector<InputType>& in_types() const { return in_types_; }
387
+
388
+ /// \brief The output type for the kernel. Use Resolve to return the
389
+ /// exact output given input argument types, since many kernels'
390
+ /// output types depend on their input types (or their type
391
+ /// metadata).
392
+ const OutputType& out_type() const { return out_type_; }
393
+
394
+ /// \brief Render a human-readable string representation
395
+ std::string ToString() const;
396
+
397
+ bool is_varargs() const { return is_varargs_; }
398
+
399
+ private:
400
+ std::vector<InputType> in_types_;
401
+ OutputType out_type_;
402
+ bool is_varargs_;
403
+
404
+ // For caching the hash code after it's computed the first time
405
+ mutable uint64_t hash_code_;
406
+ };
407
+
408
+ /// \brief A function may contain multiple variants of a kernel for a given
409
+ /// type combination for different SIMD levels. Based on the active system's
410
+ /// CPU info or the user's preferences, we can elect to use one over the other.
411
+ struct SimdLevel {
412
+ enum type { NONE = 0, SSE4_2, AVX, AVX2, AVX512, NEON, MAX };
413
+ };
414
+
415
+ /// \brief The strategy to use for propagating or otherwise populating the
416
+ /// validity bitmap of a kernel output.
417
+ struct NullHandling {
418
+ enum type {
419
+ /// Compute the output validity bitmap by intersecting the validity bitmaps
420
+ /// of the arguments using bitwise-and operations. This means that values
421
+ /// in the output are valid/non-null only if the corresponding values in
422
+ /// all input arguments were valid/non-null. Kernel generally need not
423
+ /// touch the bitmap thereafter, but a kernel's exec function is permitted
424
+ /// to alter the bitmap after the null intersection is computed if it needs
425
+ /// to.
426
+ INTERSECTION,
427
+
428
+ /// Kernel expects a pre-allocated buffer to write the result bitmap
429
+ /// into. The preallocated memory is not zeroed (except for the last byte),
430
+ /// so the kernel should ensure to completely populate the bitmap.
431
+ COMPUTED_PREALLOCATE,
432
+
433
+ /// Kernel allocates and sets the validity bitmap of the output.
434
+ COMPUTED_NO_PREALLOCATE,
435
+
436
+ /// Kernel output is never null and a validity bitmap does not need to be
437
+ /// allocated.
438
+ OUTPUT_NOT_NULL
439
+ };
440
+ };
441
+
442
+ /// \brief The preference for memory preallocation of fixed-width type outputs
443
+ /// in kernel execution.
444
+ struct MemAllocation {
445
+ enum type {
446
+ // For data types that support pre-allocation (i.e. fixed-width), the
447
+ // kernel expects to be provided a pre-allocated data buffer to write
448
+ // into. Non-fixed-width types must always allocate their own data
449
+ // buffers. The allocation made for the same length as the execution batch,
450
+ // so vector kernels yielding differently sized output should not use this.
451
+ //
452
+ // It is valid for the data to not be preallocated but the validity bitmap
453
+ // is (or is computed using the intersection/bitwise-and method).
454
+ //
455
+ // For variable-size output types like BinaryType or StringType, or for
456
+ // nested types, this option has no effect.
457
+ PREALLOCATE,
458
+
459
+ // The kernel is responsible for allocating its own data buffer for
460
+ // fixed-width type outputs.
461
+ NO_PREALLOCATE
462
+ };
463
+ };
464
+
465
+ struct Kernel;
466
+
467
+ /// \brief Arguments to pass to an KernelInit function. A struct is used to help
468
+ /// avoid API breakage should the arguments passed need to be expanded.
469
+ struct KernelInitArgs {
470
+ /// \brief A pointer to the kernel being initialized. The init function may
471
+ /// depend on the kernel's KernelSignature or other data contained there.
472
+ const Kernel* kernel;
473
+
474
+ /// \brief The types of the input arguments that the kernel is
475
+ /// about to be executed against.
476
+ const std::vector<TypeHolder>& inputs;
477
+
478
+ /// \brief Opaque options specific to this kernel. May be nullptr for functions
479
+ /// that do not require options.
480
+ const FunctionOptions* options;
481
+ };
482
+
483
+ /// \brief Common initializer function for all kernel types.
484
+ using KernelInit = std::function<Result<std::unique_ptr<KernelState>>(
485
+ KernelContext*, const KernelInitArgs&)>;
486
+
487
+ /// \brief Base type for kernels. Contains the function signature and
488
+ /// optionally the state initialization function, along with some common
489
+ /// attributes
490
+ struct ARROW_EXPORT Kernel {
491
+ Kernel() = default;
492
+
493
+ Kernel(std::shared_ptr<KernelSignature> sig, KernelInit init)
494
+ : signature(std::move(sig)), init(std::move(init)) {}
495
+
496
+ Kernel(std::vector<InputType> in_types, OutputType out_type, KernelInit init)
497
+ : Kernel(KernelSignature::Make(std::move(in_types), std::move(out_type)),
498
+ std::move(init)) {}
499
+
500
+ /// \brief The "signature" of the kernel containing the InputType input
501
+ /// argument validators and OutputType output type resolver.
502
+ std::shared_ptr<KernelSignature> signature;
503
+
504
+ /// \brief Create a new KernelState for invocations of this kernel, e.g. to
505
+ /// set up any options or state relevant for execution.
506
+ KernelInit init;
507
+
508
+ /// \brief Create a vector of new KernelState for invocations of this kernel.
509
+ static Status InitAll(KernelContext*, const KernelInitArgs&,
510
+ std::vector<std::unique_ptr<KernelState>>*);
511
+
512
+ /// \brief Indicates whether execution can benefit from parallelization
513
+ /// (splitting large chunks into smaller chunks and using multiple
514
+ /// threads). Some kernels may not support parallel execution at
515
+ /// all. Synchronization and concurrency-related issues are currently the
516
+ /// responsibility of the Kernel's implementation.
517
+ bool parallelizable = true;
518
+
519
+ /// \brief Indicates the level of SIMD instruction support in the host CPU is
520
+ /// required to use the function. The intention is for functions to be able to
521
+ /// contain multiple kernels with the same signature but different levels of SIMD,
522
+ /// so that the most optimized kernel supported on a host's processor can be chosen.
523
+ SimdLevel::type simd_level = SimdLevel::NONE;
524
+
525
+ // Additional kernel-specific data
526
+ std::shared_ptr<KernelState> data;
527
+ };
528
+
529
+ /// \brief The scalar kernel execution API that must be implemented for SCALAR
530
+ /// kernel types. This includes both stateless and stateful kernels. Kernels
531
+ /// depending on some execution state access that state via subclasses of
532
+ /// KernelState set on the KernelContext object. Implementations should
533
+ /// endeavor to write into pre-allocated memory if they are able, though for
534
+ /// some kernels (e.g. in cases when a builder like StringBuilder) must be
535
+ /// employed this may not be possible.
536
+ using ArrayKernelExec = Status (*)(KernelContext*, const ExecSpan&, ExecResult*);
537
+
538
+ /// \brief Kernel data structure for implementations of ScalarFunction. In
539
+ /// addition to the members found in Kernel, contains the null handling
540
+ /// and memory pre-allocation preferences.
541
+ struct ARROW_EXPORT ScalarKernel : public Kernel {
542
+ ScalarKernel() = default;
543
+
544
+ ScalarKernel(std::shared_ptr<KernelSignature> sig, ArrayKernelExec exec,
545
+ KernelInit init = NULLPTR)
546
+ : Kernel(std::move(sig), init), exec(exec) {}
547
+
548
+ ScalarKernel(std::vector<InputType> in_types, OutputType out_type, ArrayKernelExec exec,
549
+ KernelInit init = NULLPTR)
550
+ : Kernel(std::move(in_types), std::move(out_type), std::move(init)), exec(exec) {}
551
+
552
+ /// \brief Perform a single invocation of this kernel. Depending on the
553
+ /// implementation, it may only write into preallocated memory, while in some
554
+ /// cases it will allocate its own memory. Any required state is managed
555
+ /// through the KernelContext.
556
+ ArrayKernelExec exec;
557
+
558
+ /// \brief Writing execution results into larger contiguous allocations
559
+ /// requires that the kernel be able to write into sliced output ArrayData*,
560
+ /// including sliced output validity bitmaps. Some kernel implementations may
561
+ /// not be able to do this, so setting this to false disables this
562
+ /// functionality.
563
+ bool can_write_into_slices = true;
564
+
565
+ // For scalar functions preallocated data and intersecting arg validity
566
+ // bitmaps is a reasonable default
567
+ NullHandling::type null_handling = NullHandling::INTERSECTION;
568
+ MemAllocation::type mem_allocation = MemAllocation::PREALLOCATE;
569
+ };
570
+
571
+ // ----------------------------------------------------------------------
572
+ // VectorKernel (for VectorFunction)
573
+
574
+ /// \brief Kernel data structure for implementations of VectorFunction. In
575
+ /// contains an optional finalizer function, the null handling and memory
576
+ /// pre-allocation preferences (which have different defaults from
577
+ /// ScalarKernel), and some other execution-related options.
578
+ struct ARROW_EXPORT VectorKernel : public Kernel {
579
+ /// \brief See VectorKernel::finalize member for usage
580
+ using FinalizeFunc = std::function<Status(KernelContext*, std::vector<Datum>*)>;
581
+
582
+ /// \brief Function for executing a stateful VectorKernel against a
583
+ /// ChunkedArray input. Does not need to be defined for all VectorKernels
584
+ using ChunkedExec = Status (*)(KernelContext*, const ExecBatch&, Datum* out);
585
+
586
+ VectorKernel() = default;
587
+
588
+ VectorKernel(std::vector<InputType> in_types, OutputType out_type, ArrayKernelExec exec,
589
+ KernelInit init = NULLPTR, FinalizeFunc finalize = NULLPTR)
590
+ : Kernel(std::move(in_types), std::move(out_type), std::move(init)),
591
+ exec(exec),
592
+ finalize(std::move(finalize)) {}
593
+
594
+ VectorKernel(std::shared_ptr<KernelSignature> sig, ArrayKernelExec exec,
595
+ KernelInit init = NULLPTR, FinalizeFunc finalize = NULLPTR)
596
+ : Kernel(std::move(sig), std::move(init)),
597
+ exec(exec),
598
+ finalize(std::move(finalize)) {}
599
+
600
+ /// \brief Perform a single invocation of this kernel. Any required state is
601
+ /// managed through the KernelContext.
602
+ ArrayKernelExec exec;
603
+
604
+ /// \brief Execute the kernel on a ChunkedArray. Does not need to be defined
605
+ ChunkedExec exec_chunked = NULLPTR;
606
+
607
+ /// \brief For VectorKernel, convert intermediate results into finalized
608
+ /// results. Mutates input argument. Some kernels may accumulate state
609
+ /// (example: hashing-related functions) through processing chunked inputs, and
610
+ /// then need to attach some accumulated state to each of the outputs of
611
+ /// processing each chunk of data.
612
+ FinalizeFunc finalize;
613
+
614
+ /// Since vector kernels generally are implemented rather differently from
615
+ /// scalar/elementwise kernels (and they may not even yield arrays of the same
616
+ /// size), so we make the developer opt-in to any memory preallocation rather
617
+ /// than having to turn it off.
618
+ NullHandling::type null_handling = NullHandling::COMPUTED_NO_PREALLOCATE;
619
+ MemAllocation::type mem_allocation = MemAllocation::NO_PREALLOCATE;
620
+
621
+ /// \brief Writing execution results into larger contiguous allocations
622
+ /// requires that the kernel be able to write into sliced output ArrayData*,
623
+ /// including sliced output validity bitmaps. Some kernel implementations may
624
+ /// not be able to do this, so setting this to false disables this
625
+ /// functionality.
626
+ bool can_write_into_slices = true;
627
+
628
+ /// Some vector kernels can do chunkwise execution using ExecSpanIterator,
629
+ /// in some cases accumulating some state. Other kernels (like Take) need to
630
+ /// be passed whole arrays and don't work on ChunkedArray inputs
631
+ bool can_execute_chunkwise = true;
632
+
633
+ /// Some kernels (like unique and value_counts) yield non-chunked output from
634
+ /// chunked-array inputs. This option controls how the results are boxed when
635
+ /// returned from ExecVectorFunction
636
+ ///
637
+ /// true -> ChunkedArray
638
+ /// false -> Array
639
+ bool output_chunked = true;
640
+ };
641
+
642
+ // ----------------------------------------------------------------------
643
+ // ScalarAggregateKernel (for ScalarAggregateFunction)
644
+
645
+ using ScalarAggregateConsume = Status (*)(KernelContext*, const ExecSpan&);
646
+ using ScalarAggregateMerge = Status (*)(KernelContext*, KernelState&&, KernelState*);
647
+ // Finalize returns Datum to permit multiple return values
648
+ using ScalarAggregateFinalize = Status (*)(KernelContext*, Datum*);
649
+
650
+ /// \brief Kernel data structure for implementations of
651
+ /// ScalarAggregateFunction. The four necessary components of an aggregation
652
+ /// kernel are the init, consume, merge, and finalize functions.
653
+ ///
654
+ /// * init: creates a new KernelState for a kernel.
655
+ /// * consume: processes an ExecSpan and updates the KernelState found in the
656
+ /// KernelContext.
657
+ /// * merge: combines one KernelState with another.
658
+ /// * finalize: produces the end result of the aggregation using the
659
+ /// KernelState in the KernelContext.
660
+ struct ARROW_EXPORT ScalarAggregateKernel : public Kernel {
661
+ ScalarAggregateKernel(std::shared_ptr<KernelSignature> sig, KernelInit init,
662
+ ScalarAggregateConsume consume, ScalarAggregateMerge merge,
663
+ ScalarAggregateFinalize finalize, const bool ordered)
664
+ : Kernel(std::move(sig), std::move(init)),
665
+ consume(consume),
666
+ merge(merge),
667
+ finalize(finalize),
668
+ ordered(ordered) {}
669
+
670
+ ScalarAggregateKernel(std::vector<InputType> in_types, OutputType out_type,
671
+ KernelInit init, ScalarAggregateConsume consume,
672
+ ScalarAggregateMerge merge, ScalarAggregateFinalize finalize,
673
+ const bool ordered)
674
+ : ScalarAggregateKernel(
675
+ KernelSignature::Make(std::move(in_types), std::move(out_type)),
676
+ std::move(init), consume, merge, finalize, ordered) {}
677
+
678
+ /// \brief Merge a vector of KernelStates into a single KernelState.
679
+ /// The merged state will be returned and will be set on the KernelContext.
680
+ static Result<std::unique_ptr<KernelState>> MergeAll(
681
+ const ScalarAggregateKernel* kernel, KernelContext* ctx,
682
+ std::vector<std::unique_ptr<KernelState>> states);
683
+
684
+ ScalarAggregateConsume consume;
685
+ ScalarAggregateMerge merge;
686
+ ScalarAggregateFinalize finalize;
687
+ /// \brief Whether this kernel requires ordering
688
+ /// Some aggregations, such as, "first", requires some kind of input order. The
689
+ /// order can be implicit, e.g., the order of the input data, or explicit, e.g.
690
+ /// the ordering specified with a window aggregation.
691
+ /// The caller of the aggregate kernel is responsible for passing data in some
692
+ /// defined order to the kernel. The flag here is a way for the kernel to tell
693
+ /// the caller that data passed to the kernel must be defined in some order.
694
+ bool ordered = false;
695
+ };
696
+
697
+ // ----------------------------------------------------------------------
698
+ // HashAggregateKernel (for HashAggregateFunction)
699
+
700
+ using HashAggregateResize = Status (*)(KernelContext*, int64_t);
701
+ using HashAggregateConsume = Status (*)(KernelContext*, const ExecSpan&);
702
+ using HashAggregateMerge = Status (*)(KernelContext*, KernelState&&, const ArrayData&);
703
+
704
+ // Finalize returns Datum to permit multiple return values
705
+ using HashAggregateFinalize = Status (*)(KernelContext*, Datum*);
706
+
707
+ /// \brief Kernel data structure for implementations of
708
+ /// HashAggregateFunction. The four necessary components of an aggregation
709
+ /// kernel are the init, consume, merge, and finalize functions.
710
+ ///
711
+ /// * init: creates a new KernelState for a kernel.
712
+ /// * resize: ensure that the KernelState can accommodate the specified number of groups.
713
+ /// * consume: processes an ExecSpan (which includes the argument as well
714
+ /// as an array of group identifiers) and updates the KernelState found in the
715
+ /// KernelContext.
716
+ /// * merge: combines one KernelState with another.
717
+ /// * finalize: produces the end result of the aggregation using the
718
+ /// KernelState in the KernelContext.
719
+ struct ARROW_EXPORT HashAggregateKernel : public Kernel {
720
+ HashAggregateKernel() = default;
721
+
722
+ HashAggregateKernel(std::shared_ptr<KernelSignature> sig, KernelInit init,
723
+ HashAggregateResize resize, HashAggregateConsume consume,
724
+ HashAggregateMerge merge, HashAggregateFinalize finalize,
725
+ const bool ordered)
726
+ : Kernel(std::move(sig), std::move(init)),
727
+ resize(resize),
728
+ consume(consume),
729
+ merge(merge),
730
+ finalize(finalize),
731
+ ordered(ordered) {}
732
+
733
+ HashAggregateKernel(std::vector<InputType> in_types, OutputType out_type,
734
+ KernelInit init, HashAggregateConsume consume,
735
+ HashAggregateResize resize, HashAggregateMerge merge,
736
+ HashAggregateFinalize finalize, const bool ordered)
737
+ : HashAggregateKernel(
738
+ KernelSignature::Make(std::move(in_types), std::move(out_type)),
739
+ std::move(init), resize, consume, merge, finalize, ordered) {}
740
+
741
+ HashAggregateResize resize;
742
+ HashAggregateConsume consume;
743
+ HashAggregateMerge merge;
744
+ HashAggregateFinalize finalize;
745
+ /// @brief whether the summarizer requires ordering
746
+ /// This is similar to ScalarAggregateKernel. See ScalarAggregateKernel
747
+ /// for detailed doc of this variable.
748
+ bool ordered = false;
749
+ };
750
+
751
+ } // namespace compute
752
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/compute/type_fwd.h ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/util/visibility.h"
21
+
22
+ namespace arrow {
23
+
24
+ struct Datum;
25
+ struct TypeHolder;
26
+
27
+ namespace compute {
28
+
29
+ class Function;
30
+ class ScalarAggregateFunction;
31
+ class FunctionExecutor;
32
+ class FunctionOptions;
33
+ class FunctionRegistry;
34
+
35
+ /// \brief Return the process-global function registry.
36
+ // Defined in registry.cc
37
+ ARROW_EXPORT FunctionRegistry* GetFunctionRegistry();
38
+
39
+ class CastOptions;
40
+
41
+ struct ExecBatch;
42
+ class ExecContext;
43
+ class KernelContext;
44
+
45
+ struct Kernel;
46
+ struct ScalarKernel;
47
+ struct ScalarAggregateKernel;
48
+ struct VectorKernel;
49
+
50
+ struct KernelState;
51
+
52
+ class Expression;
53
+
54
+ ARROW_EXPORT ExecContext* default_exec_context();
55
+ ARROW_EXPORT ExecContext* threaded_exec_context();
56
+
57
+ } // namespace compute
58
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/result.h"
21
+
22
+ namespace arrow {
23
+
24
+ template <typename InputIterator, typename OutputIterator, typename UnaryOperation>
25
+ Status MaybeTransform(InputIterator first, InputIterator last, OutputIterator out,
26
+ UnaryOperation unary_op) {
27
+ for (; first != last; ++first, (void)++out) {
28
+ ARROW_ASSIGN_OR_RAISE(*out, unary_op(*first));
29
+ }
30
+ return Status::OK();
31
+ }
32
+
33
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+
22
+ #include "arrow/memory_pool.h"
23
+ #include "arrow/type_fwd.h"
24
+ #include "arrow/util/bit_util.h"
25
+
26
+ namespace arrow {
27
+ namespace internal {
28
+
29
+ struct BitmapWordAlignParams {
30
+ int64_t leading_bits;
31
+ int64_t trailing_bits;
32
+ int64_t trailing_bit_offset;
33
+ const uint8_t* aligned_start;
34
+ int64_t aligned_bits;
35
+ int64_t aligned_words;
36
+ };
37
+
38
+ // Compute parameters for accessing a bitmap using aligned word instructions.
39
+ // The returned parameters describe:
40
+ // - a leading area of size `leading_bits` before the aligned words
41
+ // - a word-aligned area of size `aligned_bits`
42
+ // - a trailing area of size `trailing_bits` after the aligned words
43
+ template <uint64_t ALIGN_IN_BYTES>
44
+ inline BitmapWordAlignParams BitmapWordAlign(const uint8_t* data, int64_t bit_offset,
45
+ int64_t length) {
46
+ static_assert(bit_util::IsPowerOf2(ALIGN_IN_BYTES),
47
+ "ALIGN_IN_BYTES should be a positive power of two");
48
+ constexpr uint64_t ALIGN_IN_BITS = ALIGN_IN_BYTES * 8;
49
+
50
+ BitmapWordAlignParams p;
51
+
52
+ // Compute a "bit address" that we can align up to ALIGN_IN_BITS.
53
+ // We don't care about losing the upper bits since we are only interested in the
54
+ // difference between both addresses.
55
+ const uint64_t bit_addr =
56
+ reinterpret_cast<size_t>(data) * 8 + static_cast<uint64_t>(bit_offset);
57
+ const uint64_t aligned_bit_addr = bit_util::RoundUpToPowerOf2(bit_addr, ALIGN_IN_BITS);
58
+
59
+ p.leading_bits = std::min<int64_t>(length, aligned_bit_addr - bit_addr);
60
+ p.aligned_words = (length - p.leading_bits) / ALIGN_IN_BITS;
61
+ p.aligned_bits = p.aligned_words * ALIGN_IN_BITS;
62
+ p.trailing_bits = length - p.leading_bits - p.aligned_bits;
63
+ p.trailing_bit_offset = bit_offset + p.leading_bits + p.aligned_bits;
64
+
65
+ p.aligned_start = data + (bit_offset + p.leading_bits) / 8;
66
+ return p;
67
+ }
68
+ } // namespace internal
69
+
70
+ namespace util {
71
+
72
+ // Functions to check if the provided Arrow object is aligned by the specified alignment
73
+
74
+ /// \brief Special alignment value to use data type-specific alignment
75
+ ///
76
+ /// If this is passed as the `alignment` in one of the CheckAlignment or EnsureAlignment
77
+ /// functions, then the function will ensure each buffer is suitably aligned
78
+ /// for the data type of the array. For example, given an int32 buffer the values
79
+ /// buffer's address must be a multiple of 4. Given a large_string buffer the offsets
80
+ /// buffer's address must be a multiple of 8.
81
+ constexpr int64_t kValueAlignment = -3;
82
+
83
+ /// \brief Calculate if the buffer's address is a multiple of `alignment`
84
+ ///
85
+ /// If `alignment` is less than or equal to 0 then this method will always return true
86
+ /// \param buffer the buffer to check
87
+ /// \param alignment the alignment (in bytes) to check for
88
+ ARROW_EXPORT bool CheckAlignment(const Buffer& buffer, int64_t alignment);
89
+ /// \brief Calculate if all buffers in the array data are aligned
90
+ ///
91
+ /// This will also check the buffers in the dictionary and any children
92
+ /// \param array the array data to check
93
+ /// \param alignment the alignment (in bytes) to check for
94
+ ARROW_EXPORT bool CheckAlignment(const ArrayData& array, int64_t alignment);
95
+ /// \brief Calculate if all buffers in the array are aligned
96
+ ///
97
+ /// This will also check the buffers in the dictionary and any children
98
+ /// \param array the array to check
99
+ /// \param alignment the alignment (in bytes) to check for
100
+ ARROW_EXPORT bool CheckAlignment(const Array& array, int64_t alignment);
101
+
102
+ // Following functions require an additional boolean vector which stores the
103
+ // alignment check bits of the constituent objects.
104
+ // For example, needs_alignment vector for a ChunkedArray will contain the
105
+ // check bits of the constituent Arrays.
106
+ // The boolean vector check was introduced to minimize the repetitive checks
107
+ // of the constituent objects during the EnsureAlignment function where certain
108
+ // objects can be ignored for further checking if we already know that they are
109
+ // completely aligned.
110
+
111
+ /// \brief Calculate which (if any) chunks in a chunked array are unaligned
112
+ /// \param array the array to check
113
+ /// \param alignment the alignment (in bytes) to check for
114
+ /// \param needs_alignment an output vector that will store the results of the check
115
+ /// it must be set to a valid vector. Extra elements will be added to the end
116
+ /// of the vector for each chunk that is checked. `true` will be stored if
117
+ /// the chunk is unaligned.
118
+ /// \param offset the index of the chunk to start checking
119
+ /// \return true if all chunks (starting at `offset`) are aligned, false otherwise
120
+ ARROW_EXPORT bool CheckAlignment(const ChunkedArray& array, int64_t alignment,
121
+ std::vector<bool>* needs_alignment, int offset = 0);
122
+
123
+ /// \brief calculate which (if any) columns in a record batch are unaligned
124
+ /// \param batch the batch to check
125
+ /// \param alignment the alignment (in bytes) to check for
126
+ /// \param needs_alignment an output vector that will store the results of the
127
+ /// check. It must be set to a valid vector. Extra elements will be added
128
+ /// to the end of the vector for each column that is checked. `true` will be
129
+ /// stored if the column is unaligned.
130
+ ARROW_EXPORT bool CheckAlignment(const RecordBatch& batch, int64_t alignment,
131
+ std::vector<bool>* needs_alignment);
132
+
133
+ /// \brief calculate which (if any) columns in a table are unaligned
134
+ /// \param table the table to check
135
+ /// \param alignment the alignment (in bytes) to check for
136
+ /// \param needs_alignment an output vector that will store the results of the
137
+ /// check. It must be set to a valid vector. Extra elements will be added
138
+ /// to the end of the vector for each column that is checked. `true` will be
139
+ /// stored if the column is unaligned.
140
+ ARROW_EXPORT bool CheckAlignment(const Table& table, int64_t alignment,
141
+ std::vector<bool>* needs_alignment);
142
+
143
+ /// \brief return a buffer that has the given alignment and the same data as the input
144
+ /// buffer
145
+ ///
146
+ /// If the input buffer is already aligned then this method will return the input buffer
147
+ /// If the input buffer is not already aligned then this method will allocate a new
148
+ /// buffer. The alignment of the new buffer will have at least
149
+ /// max(kDefaultBufferAlignment, alignment) bytes of alignment.
150
+ ///
151
+ /// \param buffer the buffer to check
152
+ /// \param alignment the alignment (in bytes) to check for
153
+ /// \param memory_pool a memory pool that will be used to allocate a new buffer if the
154
+ /// input buffer is not sufficiently aligned
155
+ ARROW_EXPORT Result<std::shared_ptr<Buffer>> EnsureAlignment(
156
+ std::shared_ptr<Buffer> buffer, int64_t alignment, MemoryPool* memory_pool);
157
+
158
+ /// \brief return an array data where all buffers are aligned by the given alignment
159
+ ///
160
+ /// If any input buffer is already aligned then this method will reuse that same input
161
+ /// buffer.
162
+ ///
163
+ /// \param array_data the array data to check
164
+ /// \param alignment the alignment (in bytes) to check for
165
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
166
+ /// input buffer is not sufficiently aligned
167
+ ARROW_EXPORT Result<std::shared_ptr<ArrayData>> EnsureAlignment(
168
+ std::shared_ptr<ArrayData> array_data, int64_t alignment, MemoryPool* memory_pool);
169
+
170
+ /// \brief return an array where all buffers are aligned by the given alignment
171
+ ///
172
+ /// If any input buffer is already aligned then this method will reuse that same input
173
+ /// buffer.
174
+ ///
175
+ /// \param array the array to check
176
+ /// \param alignment the alignment (in bytes) to check for
177
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
178
+ /// input buffer is not sufficiently aligned
179
+ ARROW_EXPORT Result<std::shared_ptr<Array>> EnsureAlignment(std::shared_ptr<Array> array,
180
+ int64_t alignment,
181
+ MemoryPool* memory_pool);
182
+
183
+ /// \brief return a chunked array where all buffers are aligned by the given alignment
184
+ ///
185
+ /// If any input buffer is already aligned then this method will reuse that same input
186
+ /// buffer.
187
+ ///
188
+ /// \param array the chunked array to check
189
+ /// \param alignment the alignment (in bytes) to check for
190
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
191
+ /// input buffer is not sufficiently aligned
192
+ ARROW_EXPORT Result<std::shared_ptr<ChunkedArray>> EnsureAlignment(
193
+ std::shared_ptr<ChunkedArray> array, int64_t alignment, MemoryPool* memory_pool);
194
+
195
+ /// \brief return a record batch where all buffers are aligned by the given alignment
196
+ ///
197
+ /// If any input buffer is already aligned then this method will reuse that same input
198
+ /// buffer.
199
+ ///
200
+ /// \param batch the batch to check
201
+ /// \param alignment the alignment (in bytes) to check for
202
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
203
+ /// input buffer is not sufficiently aligned
204
+ ARROW_EXPORT Result<std::shared_ptr<RecordBatch>> EnsureAlignment(
205
+ std::shared_ptr<RecordBatch> batch, int64_t alignment, MemoryPool* memory_pool);
206
+
207
+ /// \brief return a table where all buffers are aligned by the given alignment
208
+ ///
209
+ /// If any input buffer is already aligned then this method will reuse that same input
210
+ /// buffer.
211
+ ///
212
+ /// \param table the table to check
213
+ /// \param alignment the alignment (in bytes) to check for
214
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
215
+ /// input buffer is not sufficiently aligned
216
+ ARROW_EXPORT Result<std::shared_ptr<Table>> EnsureAlignment(std::shared_ptr<Table> table,
217
+ int64_t alignment,
218
+ MemoryPool* memory_pool);
219
+
220
+ } // namespace util
221
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/aligned_storage.h ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstring>
21
+ #include <type_traits>
22
+ #include <utility>
23
+
24
+ #include "arrow/util/launder.h"
25
+ #include "arrow/util/macros.h"
26
+
27
+ namespace arrow {
28
+ namespace internal {
29
+
30
+ template <typename T>
31
+ class AlignedStorage {
32
+ public:
33
+ static constexpr bool can_memcpy = std::is_trivial<T>::value;
34
+
35
+ constexpr T* get() noexcept {
36
+ return arrow::internal::launder(reinterpret_cast<T*>(&data_));
37
+ }
38
+
39
+ constexpr const T* get() const noexcept {
40
+ // Use fully qualified name to avoid ambiguities with MSVC (ARROW-14800)
41
+ return arrow::internal::launder(reinterpret_cast<const T*>(&data_));
42
+ }
43
+
44
+ void destroy() noexcept {
45
+ if (!std::is_trivially_destructible<T>::value) {
46
+ get()->~T();
47
+ }
48
+ }
49
+
50
+ template <typename... A>
51
+ void construct(A&&... args) noexcept {
52
+ new (&data_) T(std::forward<A>(args)...);
53
+ }
54
+
55
+ template <typename V>
56
+ void assign(V&& v) noexcept {
57
+ *get() = std::forward<V>(v);
58
+ }
59
+
60
+ void move_construct(AlignedStorage* other) noexcept {
61
+ new (&data_) T(std::move(*other->get()));
62
+ }
63
+
64
+ void move_assign(AlignedStorage* other) noexcept { *get() = std::move(*other->get()); }
65
+
66
+ template <bool CanMemcpy = can_memcpy>
67
+ static typename std::enable_if<CanMemcpy>::type move_construct_several(
68
+ AlignedStorage* ARROW_RESTRICT src, AlignedStorage* ARROW_RESTRICT dest, size_t n,
69
+ size_t memcpy_length) noexcept {
70
+ memcpy(dest->get(), src->get(), memcpy_length * sizeof(T));
71
+ }
72
+
73
+ template <bool CanMemcpy = can_memcpy>
74
+ static typename std::enable_if<CanMemcpy>::type
75
+ move_construct_several_and_destroy_source(AlignedStorage* ARROW_RESTRICT src,
76
+ AlignedStorage* ARROW_RESTRICT dest, size_t n,
77
+ size_t memcpy_length) noexcept {
78
+ memcpy(dest->get(), src->get(), memcpy_length * sizeof(T));
79
+ }
80
+
81
+ template <bool CanMemcpy = can_memcpy>
82
+ static typename std::enable_if<!CanMemcpy>::type move_construct_several(
83
+ AlignedStorage* ARROW_RESTRICT src, AlignedStorage* ARROW_RESTRICT dest, size_t n,
84
+ size_t memcpy_length) noexcept {
85
+ for (size_t i = 0; i < n; ++i) {
86
+ new (dest[i].get()) T(std::move(*src[i].get()));
87
+ }
88
+ }
89
+
90
+ template <bool CanMemcpy = can_memcpy>
91
+ static typename std::enable_if<!CanMemcpy>::type
92
+ move_construct_several_and_destroy_source(AlignedStorage* ARROW_RESTRICT src,
93
+ AlignedStorage* ARROW_RESTRICT dest, size_t n,
94
+ size_t memcpy_length) noexcept {
95
+ for (size_t i = 0; i < n; ++i) {
96
+ new (dest[i].get()) T(std::move(*src[i].get()));
97
+ src[i].destroy();
98
+ }
99
+ }
100
+
101
+ static void move_construct_several(AlignedStorage* ARROW_RESTRICT src,
102
+ AlignedStorage* ARROW_RESTRICT dest,
103
+ size_t n) noexcept {
104
+ move_construct_several(src, dest, n, n);
105
+ }
106
+
107
+ static void move_construct_several_and_destroy_source(
108
+ AlignedStorage* ARROW_RESTRICT src, AlignedStorage* ARROW_RESTRICT dest,
109
+ size_t n) noexcept {
110
+ move_construct_several_and_destroy_source(src, dest, n, n);
111
+ }
112
+
113
+ static void destroy_several(AlignedStorage* p, size_t n) noexcept {
114
+ if (!std::is_trivially_destructible<T>::value) {
115
+ for (size_t i = 0; i < n; ++i) {
116
+ p[i].destroy();
117
+ }
118
+ }
119
+ }
120
+
121
+ private:
122
+ #if !defined(__clang__) && defined(__GNUC__) && defined(__i386__)
123
+ // Workaround for GCC bug on i386:
124
+ // alignof(int64 | float64) can give different results depending on the
125
+ // compilation context, leading to internal ABI mismatch manifesting
126
+ // in incorrect propagation of Result<int64 | float64> between
127
+ // compilation units.
128
+ // (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88115)
129
+ static constexpr size_t alignment() {
130
+ if (std::is_integral_v<T> && sizeof(T) == 8) {
131
+ return 4;
132
+ } else if (std::is_floating_point_v<T> && sizeof(T) == 8) {
133
+ return 4;
134
+ }
135
+ return alignof(T);
136
+ }
137
+
138
+ typename std::aligned_storage<sizeof(T), alignment()>::type data_;
139
+ #else
140
+ typename std::aligned_storage<sizeof(T), alignof(T)>::type data_;
141
+ #endif
142
+ };
143
+
144
+ } // namespace internal
145
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator.h ADDED
@@ -0,0 +1,2058 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <atomic>
21
+ #include <cassert>
22
+ #include <cstring>
23
+ #include <deque>
24
+ #include <limits>
25
+ #include <optional>
26
+ #include <queue>
27
+
28
+ #include "arrow/util/async_generator_fwd.h"
29
+ #include "arrow/util/async_util.h"
30
+ #include "arrow/util/functional.h"
31
+ #include "arrow/util/future.h"
32
+ #include "arrow/util/io_util.h"
33
+ #include "arrow/util/iterator.h"
34
+ #include "arrow/util/mutex.h"
35
+ #include "arrow/util/queue.h"
36
+ #include "arrow/util/thread_pool.h"
37
+
38
+ namespace arrow {
39
+
40
+ // The methods in this file create, modify, and utilize AsyncGenerator which is an
41
+ // iterator of futures. This allows an asynchronous source (like file input) to be run
42
+ // through a pipeline in the same way that iterators can be used to create pipelined
43
+ // workflows.
44
+ //
45
+ // In order to support pipeline parallelism we introduce the concept of asynchronous
46
+ // reentrancy. This is different than synchronous reentrancy. With synchronous code a
47
+ // function is reentrant if the function can be called again while a previous call to that
48
+ // function is still running. Unless otherwise specified none of these generators are
49
+ // synchronously reentrant. Care should be taken to avoid calling them in such a way (and
50
+ // the utilities Visit/Collect/Await take care to do this).
51
+ //
52
+ // Asynchronous reentrancy on the other hand means the function is called again before the
53
+ // future returned by the function is marked finished (but after the call to get the
54
+ // future returns). Some of these generators are async-reentrant while others (e.g.
55
+ // those that depend on ordered processing like decompression) are not. Read the MakeXYZ
56
+ // function comments to determine which generators support async reentrancy.
57
+ //
58
+ // Note: Generators that are not asynchronously reentrant can still support readahead
59
+ // (\see MakeSerialReadaheadGenerator).
60
+ //
61
+ // Readahead operators, and some other operators, may introduce queueing. Any operators
62
+ // that introduce buffering should detail the amount of buffering they introduce in their
63
+ // MakeXYZ function comments.
64
+ //
65
+ // A generator should always be fully consumed before it is destroyed.
66
+ // A generator should not mark a future complete with an error status or a terminal value
67
+ // until all outstanding futures have completed. Generators that spawn multiple
68
+ // concurrent futures may need to hold onto an error while other concurrent futures wrap
69
+ // up.
70
+ template <typename T>
71
+ struct IterationTraits<AsyncGenerator<T>> {
72
+ /// \brief by default when iterating through a sequence of AsyncGenerator<T>,
73
+ /// an empty function indicates the end of iteration.
74
+ static AsyncGenerator<T> End() { return AsyncGenerator<T>(); }
75
+
76
+ static bool IsEnd(const AsyncGenerator<T>& val) { return !val; }
77
+ };
78
+
79
+ template <typename T>
80
+ Future<T> AsyncGeneratorEnd() {
81
+ return Future<T>::MakeFinished(IterationTraits<T>::End());
82
+ }
83
+
84
+ /// returning a future that completes when all have been visited
85
+ template <typename T, typename Visitor>
86
+ Future<> VisitAsyncGenerator(AsyncGenerator<T> generator, Visitor visitor) {
87
+ struct LoopBody {
88
+ struct Callback {
89
+ Result<ControlFlow<>> operator()(const T& next) {
90
+ if (IsIterationEnd(next)) {
91
+ return Break();
92
+ } else {
93
+ auto visited = visitor(next);
94
+ if (visited.ok()) {
95
+ return Continue();
96
+ } else {
97
+ return visited;
98
+ }
99
+ }
100
+ }
101
+
102
+ Visitor visitor;
103
+ };
104
+
105
+ Future<ControlFlow<>> operator()() {
106
+ Callback callback{visitor};
107
+ auto next = generator();
108
+ return next.Then(std::move(callback));
109
+ }
110
+
111
+ AsyncGenerator<T> generator;
112
+ Visitor visitor;
113
+ };
114
+
115
+ return Loop(LoopBody{std::move(generator), std::move(visitor)});
116
+ }
117
+
118
+ /// \brief Wait for an async generator to complete, discarding results.
119
+ template <typename T>
120
+ Future<> DiscardAllFromAsyncGenerator(AsyncGenerator<T> generator) {
121
+ std::function<Status(T)> visitor = [](const T&) { return Status::OK(); };
122
+ return VisitAsyncGenerator(generator, visitor);
123
+ }
124
+
125
+ /// \brief Collect the results of an async generator into a vector
126
+ template <typename T>
127
+ Future<std::vector<T>> CollectAsyncGenerator(AsyncGenerator<T> generator) {
128
+ auto vec = std::make_shared<std::vector<T>>();
129
+ auto loop_body = [generator = std::move(generator),
130
+ vec = std::move(vec)]() -> Future<ControlFlow<std::vector<T>>> {
131
+ auto next = generator();
132
+ return next.Then([vec](const T& result) -> Result<ControlFlow<std::vector<T>>> {
133
+ if (IsIterationEnd(result)) {
134
+ return Break(*vec);
135
+ } else {
136
+ vec->push_back(result);
137
+ return Continue();
138
+ }
139
+ });
140
+ };
141
+ return Loop(std::move(loop_body));
142
+ }
143
+
144
+ /// \see MakeMappedGenerator
145
+ template <typename T, typename V>
146
+ class MappingGenerator {
147
+ public:
148
+ MappingGenerator(AsyncGenerator<T> source, std::function<Future<V>(const T&)> map)
149
+ : state_(std::make_shared<State>(std::move(source), std::move(map))) {}
150
+
151
+ Future<V> operator()() {
152
+ auto future = Future<V>::Make();
153
+ bool should_trigger;
154
+ {
155
+ auto guard = state_->mutex.Lock();
156
+ if (state_->finished) {
157
+ return AsyncGeneratorEnd<V>();
158
+ }
159
+ should_trigger = state_->waiting_jobs.empty();
160
+ state_->waiting_jobs.push_back(future);
161
+ }
162
+ if (should_trigger) {
163
+ state_->source().AddCallback(Callback{state_});
164
+ }
165
+ return future;
166
+ }
167
+
168
+ private:
169
+ struct State {
170
+ State(AsyncGenerator<T> source, std::function<Future<V>(const T&)> map)
171
+ : source(std::move(source)),
172
+ map(std::move(map)),
173
+ waiting_jobs(),
174
+ mutex(),
175
+ finished(false) {}
176
+
177
+ void Purge() {
178
+ // This might be called by an original callback (if the source iterator fails or
179
+ // ends) or by a mapped callback (if the map function fails or ends prematurely).
180
+ // Either way it should only be called once and after finished is set so there is no
181
+ // need to guard access to `waiting_jobs`.
182
+ while (!waiting_jobs.empty()) {
183
+ waiting_jobs.front().MarkFinished(IterationTraits<V>::End());
184
+ waiting_jobs.pop_front();
185
+ }
186
+ }
187
+
188
+ AsyncGenerator<T> source;
189
+ std::function<Future<V>(const T&)> map;
190
+ std::deque<Future<V>> waiting_jobs;
191
+ util::Mutex mutex;
192
+ bool finished;
193
+ };
194
+
195
+ struct Callback;
196
+
197
+ struct MappedCallback {
198
+ void operator()(const Result<V>& maybe_next) {
199
+ bool end = !maybe_next.ok() || IsIterationEnd(*maybe_next);
200
+ bool should_purge = false;
201
+ if (end) {
202
+ {
203
+ auto guard = state->mutex.Lock();
204
+ should_purge = !state->finished;
205
+ state->finished = true;
206
+ }
207
+ }
208
+ sink.MarkFinished(maybe_next);
209
+ if (should_purge) {
210
+ state->Purge();
211
+ }
212
+ }
213
+ std::shared_ptr<State> state;
214
+ Future<V> sink;
215
+ };
216
+
217
+ struct Callback {
218
+ void operator()(const Result<T>& maybe_next) {
219
+ Future<V> sink;
220
+ bool end = !maybe_next.ok() || IsIterationEnd(*maybe_next);
221
+ bool should_purge = false;
222
+ bool should_trigger;
223
+ {
224
+ auto guard = state->mutex.Lock();
225
+ // A MappedCallback may have purged or be purging the queue;
226
+ // we shouldn't do anything here.
227
+ if (state->finished) return;
228
+ if (end) {
229
+ should_purge = !state->finished;
230
+ state->finished = true;
231
+ }
232
+ sink = state->waiting_jobs.front();
233
+ state->waiting_jobs.pop_front();
234
+ should_trigger = !end && !state->waiting_jobs.empty();
235
+ }
236
+ if (should_purge) {
237
+ state->Purge();
238
+ }
239
+ if (should_trigger) {
240
+ state->source().AddCallback(Callback{state});
241
+ }
242
+ if (maybe_next.ok()) {
243
+ const T& val = maybe_next.ValueUnsafe();
244
+ if (IsIterationEnd(val)) {
245
+ sink.MarkFinished(IterationTraits<V>::End());
246
+ } else {
247
+ Future<V> mapped_fut = state->map(val);
248
+ mapped_fut.AddCallback(MappedCallback{std::move(state), std::move(sink)});
249
+ }
250
+ } else {
251
+ sink.MarkFinished(maybe_next.status());
252
+ }
253
+ }
254
+
255
+ std::shared_ptr<State> state;
256
+ };
257
+
258
+ std::shared_ptr<State> state_;
259
+ };
260
+
261
+ /// \brief Create a generator that will apply the map function to each element of
262
+ /// source. The map function is not called on the end token.
263
+ ///
264
+ /// Note: This function makes a copy of `map` for each item
265
+ /// Note: Errors returned from the `map` function will be propagated
266
+ ///
267
+ /// If the source generator is async-reentrant then this generator will be also
268
+ template <typename T, typename MapFn,
269
+ typename Mapped = detail::result_of_t<MapFn(const T&)>,
270
+ typename V = typename EnsureFuture<Mapped>::type::ValueType>
271
+ AsyncGenerator<V> MakeMappedGenerator(AsyncGenerator<T> source_generator, MapFn map) {
272
+ auto map_callback = [map = std::move(map)](const T& val) mutable -> Future<V> {
273
+ return ToFuture(map(val));
274
+ };
275
+ return MappingGenerator<T, V>(std::move(source_generator), std::move(map_callback));
276
+ }
277
+
278
+ /// \brief Create a generator that will apply the map function to
279
+ /// each element of source. The map function is not called on the end
280
+ /// token. The result of the map function should be another
281
+ /// generator; all these generators will then be flattened to produce
282
+ /// a single stream of items.
283
+ ///
284
+ /// Note: This function makes a copy of `map` for each item
285
+ /// Note: Errors returned from the `map` function will be propagated
286
+ ///
287
+ /// If the source generator is async-reentrant then this generator will be also
288
+ template <typename T, typename MapFn,
289
+ typename Mapped = detail::result_of_t<MapFn(const T&)>,
290
+ typename V = typename EnsureFuture<Mapped>::type::ValueType>
291
+ AsyncGenerator<T> MakeFlatMappedGenerator(AsyncGenerator<T> source_generator, MapFn map) {
292
+ return MakeConcatenatedGenerator(
293
+ MakeMappedGenerator(std::move(source_generator), std::move(map)));
294
+ }
295
+
296
+ /// \see MakeSequencingGenerator
297
+ template <typename T, typename ComesAfter, typename IsNext>
298
+ class SequencingGenerator {
299
+ public:
300
+ SequencingGenerator(AsyncGenerator<T> source, ComesAfter compare, IsNext is_next,
301
+ T initial_value)
302
+ : state_(std::make_shared<State>(std::move(source), std::move(compare),
303
+ std::move(is_next), std::move(initial_value))) {}
304
+
305
+ Future<T> operator()() {
306
+ {
307
+ auto guard = state_->mutex.Lock();
308
+ // We can send a result immediately if the top of the queue is either an
309
+ // error or the next item
310
+ if (!state_->queue.empty() &&
311
+ (!state_->queue.top().ok() ||
312
+ state_->is_next(state_->previous_value, *state_->queue.top()))) {
313
+ auto result = std::move(state_->queue.top());
314
+ if (result.ok()) {
315
+ state_->previous_value = *result;
316
+ }
317
+ state_->queue.pop();
318
+ return Future<T>::MakeFinished(result);
319
+ }
320
+ if (state_->finished) {
321
+ return AsyncGeneratorEnd<T>();
322
+ }
323
+ // The next item is not in the queue so we will need to wait
324
+ auto new_waiting_fut = Future<T>::Make();
325
+ state_->waiting_future = new_waiting_fut;
326
+ guard.Unlock();
327
+ state_->source().AddCallback(Callback{state_});
328
+ return new_waiting_fut;
329
+ }
330
+ }
331
+
332
+ private:
333
+ struct WrappedComesAfter {
334
+ bool operator()(const Result<T>& left, const Result<T>& right) {
335
+ if (!left.ok() || !right.ok()) {
336
+ // Should never happen
337
+ return false;
338
+ }
339
+ return compare(*left, *right);
340
+ }
341
+ ComesAfter compare;
342
+ };
343
+
344
+ struct State {
345
+ State(AsyncGenerator<T> source, ComesAfter compare, IsNext is_next, T initial_value)
346
+ : source(std::move(source)),
347
+ is_next(std::move(is_next)),
348
+ previous_value(std::move(initial_value)),
349
+ waiting_future(),
350
+ queue(WrappedComesAfter{compare}),
351
+ finished(false),
352
+ mutex() {}
353
+
354
+ AsyncGenerator<T> source;
355
+ IsNext is_next;
356
+ T previous_value;
357
+ Future<T> waiting_future;
358
+ std::priority_queue<Result<T>, std::vector<Result<T>>, WrappedComesAfter> queue;
359
+ bool finished;
360
+ util::Mutex mutex;
361
+ };
362
+
363
+ class Callback {
364
+ public:
365
+ explicit Callback(std::shared_ptr<State> state) : state_(std::move(state)) {}
366
+
367
+ void operator()(const Result<T> result) {
368
+ Future<T> to_deliver;
369
+ bool finished;
370
+ {
371
+ auto guard = state_->mutex.Lock();
372
+ bool ready_to_deliver = false;
373
+ if (!result.ok()) {
374
+ // Clear any cached results
375
+ while (!state_->queue.empty()) {
376
+ state_->queue.pop();
377
+ }
378
+ ready_to_deliver = true;
379
+ state_->finished = true;
380
+ } else if (IsIterationEnd<T>(result.ValueUnsafe())) {
381
+ ready_to_deliver = state_->queue.empty();
382
+ state_->finished = true;
383
+ } else {
384
+ ready_to_deliver = state_->is_next(state_->previous_value, *result);
385
+ }
386
+
387
+ if (ready_to_deliver && state_->waiting_future.is_valid()) {
388
+ to_deliver = state_->waiting_future;
389
+ if (result.ok()) {
390
+ state_->previous_value = *result;
391
+ }
392
+ } else {
393
+ state_->queue.push(result);
394
+ }
395
+ // Capture state_->finished so we can access it outside the mutex
396
+ finished = state_->finished;
397
+ }
398
+ // Must deliver result outside of the mutex
399
+ if (to_deliver.is_valid()) {
400
+ to_deliver.MarkFinished(result);
401
+ } else {
402
+ // Otherwise, if we didn't get the next item (or a terminal item), we
403
+ // need to keep looking
404
+ if (!finished) {
405
+ state_->source().AddCallback(Callback{state_});
406
+ }
407
+ }
408
+ }
409
+
410
+ private:
411
+ const std::shared_ptr<State> state_;
412
+ };
413
+
414
+ const std::shared_ptr<State> state_;
415
+ };
416
+
417
+ /// \brief Buffer an AsyncGenerator to return values in sequence order ComesAfter
418
+ /// and IsNext determine the sequence order.
419
+ ///
420
+ /// ComesAfter should be a BinaryPredicate that only returns true if a comes after b
421
+ ///
422
+ /// IsNext should be a BinaryPredicate that returns true, given `a` and `b`, only if
423
+ /// `b` follows immediately after `a`. It should return true given `initial_value` and
424
+ /// `b` if `b` is the first item in the sequence.
425
+ ///
426
+ /// This operator will queue unboundedly while waiting for the next item. It is intended
427
+ /// for jittery sources that might scatter an ordered sequence. It is NOT intended to
428
+ /// sort. Using it to try and sort could result in excessive RAM usage. This generator
429
+ /// will queue up to N blocks where N is the max "out of order"ness of the source.
430
+ ///
431
+ /// For example, if the source is 1,6,2,5,4,3 it will queue 3 blocks because 3 is 3
432
+ /// blocks beyond where it belongs.
433
+ ///
434
+ /// This generator is not async-reentrant but it consists only of a simple log(n)
435
+ /// insertion into a priority queue.
436
+ template <typename T, typename ComesAfter, typename IsNext>
437
+ AsyncGenerator<T> MakeSequencingGenerator(AsyncGenerator<T> source_generator,
438
+ ComesAfter compare, IsNext is_next,
439
+ T initial_value) {
440
+ return SequencingGenerator<T, ComesAfter, IsNext>(
441
+ std::move(source_generator), std::move(compare), std::move(is_next),
442
+ std::move(initial_value));
443
+ }
444
+
445
+ /// \see MakeTransformedGenerator
446
+ template <typename T, typename V>
447
+ class TransformingGenerator {
448
+ // The transforming generator state will be referenced as an async generator but will
449
+ // also be referenced via callback to various futures. If the async generator owner
450
+ // moves it around we need the state to be consistent for future callbacks.
451
+ struct TransformingGeneratorState
452
+ : std::enable_shared_from_this<TransformingGeneratorState> {
453
+ TransformingGeneratorState(AsyncGenerator<T> generator, Transformer<T, V> transformer)
454
+ : generator_(std::move(generator)),
455
+ transformer_(std::move(transformer)),
456
+ last_value_(),
457
+ finished_() {}
458
+
459
+ Future<V> operator()() {
460
+ while (true) {
461
+ auto maybe_next_result = Pump();
462
+ if (!maybe_next_result.ok()) {
463
+ return Future<V>::MakeFinished(maybe_next_result.status());
464
+ }
465
+ auto maybe_next = std::move(maybe_next_result).ValueUnsafe();
466
+ if (maybe_next.has_value()) {
467
+ return Future<V>::MakeFinished(*std::move(maybe_next));
468
+ }
469
+
470
+ auto next_fut = generator_();
471
+ // If finished already, process results immediately inside the loop to avoid
472
+ // stack overflow
473
+ if (next_fut.is_finished()) {
474
+ auto next_result = next_fut.result();
475
+ if (next_result.ok()) {
476
+ last_value_ = *next_result;
477
+ } else {
478
+ return Future<V>::MakeFinished(next_result.status());
479
+ }
480
+ // Otherwise, if not finished immediately, add callback to process results
481
+ } else {
482
+ auto self = this->shared_from_this();
483
+ return next_fut.Then([self](const T& next_result) {
484
+ self->last_value_ = next_result;
485
+ return (*self)();
486
+ });
487
+ }
488
+ }
489
+ }
490
+
491
+ // See comment on TransformingIterator::Pump
492
+ Result<std::optional<V>> Pump() {
493
+ if (!finished_ && last_value_.has_value()) {
494
+ ARROW_ASSIGN_OR_RAISE(TransformFlow<V> next, transformer_(*last_value_));
495
+ if (next.ReadyForNext()) {
496
+ if (IsIterationEnd(*last_value_)) {
497
+ finished_ = true;
498
+ }
499
+ last_value_.reset();
500
+ }
501
+ if (next.Finished()) {
502
+ finished_ = true;
503
+ }
504
+ if (next.HasValue()) {
505
+ return next.Value();
506
+ }
507
+ }
508
+ if (finished_) {
509
+ return IterationTraits<V>::End();
510
+ }
511
+ return std::nullopt;
512
+ }
513
+
514
+ AsyncGenerator<T> generator_;
515
+ Transformer<T, V> transformer_;
516
+ std::optional<T> last_value_;
517
+ bool finished_;
518
+ };
519
+
520
+ public:
521
+ explicit TransformingGenerator(AsyncGenerator<T> generator,
522
+ Transformer<T, V> transformer)
523
+ : state_(std::make_shared<TransformingGeneratorState>(std::move(generator),
524
+ std::move(transformer))) {}
525
+
526
+ Future<V> operator()() { return (*state_)(); }
527
+
528
+ protected:
529
+ std::shared_ptr<TransformingGeneratorState> state_;
530
+ };
531
+
532
+ /// \brief Transform an async generator using a transformer function returning a new
533
+ /// AsyncGenerator
534
+ ///
535
+ /// The transform function here behaves exactly the same as the transform function in
536
+ /// MakeTransformedIterator and you can safely use the same transform function to
537
+ /// transform both synchronous and asynchronous streams.
538
+ ///
539
+ /// This generator is not async-reentrant
540
+ ///
541
+ /// This generator may queue up to 1 instance of T but will not delay
542
+ template <typename T, typename V>
543
+ AsyncGenerator<V> MakeTransformedGenerator(AsyncGenerator<T> generator,
544
+ Transformer<T, V> transformer) {
545
+ return TransformingGenerator<T, V>(generator, transformer);
546
+ }
547
+
548
+ /// \see MakeSerialReadaheadGenerator
549
+ template <typename T>
550
+ class SerialReadaheadGenerator {
551
+ public:
552
+ SerialReadaheadGenerator(AsyncGenerator<T> source_generator, int max_readahead)
553
+ : state_(std::make_shared<State>(std::move(source_generator), max_readahead)) {}
554
+
555
+ Future<T> operator()() {
556
+ if (state_->first_) {
557
+ // Lazy generator, need to wait for the first ask to prime the pump
558
+ state_->first_ = false;
559
+ auto next = state_->source_();
560
+ return next.Then(Callback{state_}, ErrCallback{state_});
561
+ }
562
+
563
+ // This generator is not async-reentrant. We won't be called until the last
564
+ // future finished so we know there is something in the queue
565
+ auto finished = state_->finished_.load();
566
+ if (finished && state_->readahead_queue_.IsEmpty()) {
567
+ return AsyncGeneratorEnd<T>();
568
+ }
569
+
570
+ std::shared_ptr<Future<T>> next;
571
+ if (!state_->readahead_queue_.Read(next)) {
572
+ return Status::UnknownError("Could not read from readahead_queue");
573
+ }
574
+
575
+ auto last_available = state_->spaces_available_.fetch_add(1);
576
+ if (last_available == 0 && !finished) {
577
+ // Reader idled out, we need to restart it
578
+ ARROW_RETURN_NOT_OK(state_->Pump(state_));
579
+ }
580
+ return *next;
581
+ }
582
+
583
+ private:
584
+ struct State {
585
+ State(AsyncGenerator<T> source, int max_readahead)
586
+ : first_(true),
587
+ source_(std::move(source)),
588
+ finished_(false),
589
+ // There is one extra "space" for the in-flight request
590
+ spaces_available_(max_readahead + 1),
591
+ // The SPSC queue has size-1 "usable" slots so we need to overallocate 1
592
+ readahead_queue_(max_readahead + 1) {}
593
+
594
+ Status Pump(const std::shared_ptr<State>& self) {
595
+ // Can't do readahead_queue.write(source().Then(...)) because then the
596
+ // callback might run immediately and add itself to the queue before this gets added
597
+ // to the queue messing up the order.
598
+ auto next_slot = std::make_shared<Future<T>>();
599
+ auto written = readahead_queue_.Write(next_slot);
600
+ if (!written) {
601
+ return Status::UnknownError("Could not write to readahead_queue");
602
+ }
603
+ // If this Pump is being called from a callback it is possible for the source to
604
+ // poll and read from the queue between the Write and this spot where we fill the
605
+ // value in. However, it is not possible for the future to read this value we are
606
+ // writing. That is because this callback (the callback for future X) must be
607
+ // finished before future X is marked complete and this source is not pulled
608
+ // reentrantly so it will not poll for future X+1 until this callback has completed.
609
+ *next_slot = source_().Then(Callback{self}, ErrCallback{self});
610
+ return Status::OK();
611
+ }
612
+
613
+ // Only accessed by the consumer end
614
+ bool first_;
615
+ // Accessed by both threads
616
+ AsyncGenerator<T> source_;
617
+ std::atomic<bool> finished_;
618
+ // The queue has a size but it is not atomic. We keep track of how many spaces are
619
+ // left in the queue here so we know if we've just written the last value and we need
620
+ // to stop reading ahead or if we've just read from a full queue and we need to
621
+ // restart reading ahead
622
+ std::atomic<uint32_t> spaces_available_;
623
+ // Needs to be a queue of shared_ptr and not Future because we set the value of the
624
+ // future after we add it to the queue
625
+ util::SpscQueue<std::shared_ptr<Future<T>>> readahead_queue_;
626
+ };
627
+
628
+ struct Callback {
629
+ Result<T> operator()(const T& next) {
630
+ if (IsIterationEnd(next)) {
631
+ state_->finished_.store(true);
632
+ return next;
633
+ }
634
+ auto last_available = state_->spaces_available_.fetch_sub(1);
635
+ if (last_available > 1) {
636
+ ARROW_RETURN_NOT_OK(state_->Pump(state_));
637
+ }
638
+ return next;
639
+ }
640
+
641
+ std::shared_ptr<State> state_;
642
+ };
643
+
644
+ struct ErrCallback {
645
+ Result<T> operator()(const Status& st) {
646
+ state_->finished_.store(true);
647
+ return st;
648
+ }
649
+
650
+ std::shared_ptr<State> state_;
651
+ };
652
+
653
+ std::shared_ptr<State> state_;
654
+ };
655
+
656
+ /// \see MakeFromFuture
657
+ template <typename T>
658
+ class FutureFirstGenerator {
659
+ public:
660
+ explicit FutureFirstGenerator(Future<AsyncGenerator<T>> future)
661
+ : state_(std::make_shared<State>(std::move(future))) {}
662
+
663
+ Future<T> operator()() {
664
+ if (state_->source_) {
665
+ return state_->source_();
666
+ } else {
667
+ auto state = state_;
668
+ return state_->future_.Then([state](const AsyncGenerator<T>& source) {
669
+ state->source_ = source;
670
+ return state->source_();
671
+ });
672
+ }
673
+ }
674
+
675
+ private:
676
+ struct State {
677
+ explicit State(Future<AsyncGenerator<T>> future) : future_(future), source_() {}
678
+
679
+ Future<AsyncGenerator<T>> future_;
680
+ AsyncGenerator<T> source_;
681
+ };
682
+
683
+ std::shared_ptr<State> state_;
684
+ };
685
+
686
+ /// \brief Transform a Future<AsyncGenerator<T>> into an AsyncGenerator<T>
687
+ /// that waits for the future to complete as part of the first item.
688
+ ///
689
+ /// This generator is not async-reentrant (even if the generator yielded by future is)
690
+ ///
691
+ /// This generator does not queue
692
+ template <typename T>
693
+ AsyncGenerator<T> MakeFromFuture(Future<AsyncGenerator<T>> future) {
694
+ return FutureFirstGenerator<T>(std::move(future));
695
+ }
696
+
697
+ /// \brief Create a generator that will pull from the source into a queue. Unlike
698
+ /// MakeReadaheadGenerator this will not pull reentrantly from the source.
699
+ ///
700
+ /// The source generator does not need to be async-reentrant
701
+ ///
702
+ /// This generator is not async-reentrant (even if the source is)
703
+ ///
704
+ /// This generator may queue up to max_readahead additional instances of T
705
+ template <typename T>
706
+ AsyncGenerator<T> MakeSerialReadaheadGenerator(AsyncGenerator<T> source_generator,
707
+ int max_readahead) {
708
+ return SerialReadaheadGenerator<T>(std::move(source_generator), max_readahead);
709
+ }
710
+
711
+ /// \brief Create a generator that immediately pulls from the source
712
+ ///
713
+ /// Typical generators do not pull from their source until they themselves
714
+ /// are pulled. This generator does not follow that convention and will call
715
+ /// generator() once before it returns. The returned generator will otherwise
716
+ /// mirror the source.
717
+ ///
718
+ /// This generator forwards async-reentrant pressure to the source
719
+ /// This generator buffers one item (the first result) until it is delivered.
720
+ template <typename T>
721
+ AsyncGenerator<T> MakeAutoStartingGenerator(AsyncGenerator<T> generator) {
722
+ struct AutostartGenerator {
723
+ Future<T> operator()() {
724
+ if (first_future->is_valid()) {
725
+ Future<T> result = *first_future;
726
+ *first_future = Future<T>();
727
+ return result;
728
+ }
729
+ return source();
730
+ }
731
+
732
+ std::shared_ptr<Future<T>> first_future;
733
+ AsyncGenerator<T> source;
734
+ };
735
+
736
+ std::shared_ptr<Future<T>> first_future = std::make_shared<Future<T>>(generator());
737
+ return AutostartGenerator{std::move(first_future), std::move(generator)};
738
+ }
739
+
740
+ /// \see MakeReadaheadGenerator
741
+ template <typename T>
742
+ class ReadaheadGenerator {
743
+ public:
744
+ ReadaheadGenerator(AsyncGenerator<T> source_generator, int max_readahead)
745
+ : state_(std::make_shared<State>(std::move(source_generator), max_readahead)) {}
746
+
747
+ Future<T> AddMarkFinishedContinuation(Future<T> fut) {
748
+ auto state = state_;
749
+ return fut.Then(
750
+ [state](const T& result) -> Future<T> {
751
+ state->MarkFinishedIfDone(result);
752
+ if (state->finished.load()) {
753
+ if (state->num_running.fetch_sub(1) == 1) {
754
+ state->final_future.MarkFinished();
755
+ }
756
+ } else {
757
+ state->num_running.fetch_sub(1);
758
+ }
759
+ return result;
760
+ },
761
+ [state](const Status& err) -> Future<T> {
762
+ // If there is an error we need to make sure all running
763
+ // tasks finish before we return the error.
764
+ state->finished.store(true);
765
+ if (state->num_running.fetch_sub(1) == 1) {
766
+ state->final_future.MarkFinished();
767
+ }
768
+ return state->final_future.Then([err]() -> Result<T> { return err; });
769
+ });
770
+ }
771
+
772
+ Future<T> operator()() {
773
+ if (state_->readahead_queue.empty()) {
774
+ // This is the first request, let's pump the underlying queue
775
+ state_->num_running.store(state_->max_readahead);
776
+ for (int i = 0; i < state_->max_readahead; i++) {
777
+ auto next = state_->source_generator();
778
+ auto next_after_check = AddMarkFinishedContinuation(std::move(next));
779
+ state_->readahead_queue.push(std::move(next_after_check));
780
+ }
781
+ }
782
+ // Pop one and add one
783
+ auto result = state_->readahead_queue.front();
784
+ state_->readahead_queue.pop();
785
+ if (state_->finished.load()) {
786
+ state_->readahead_queue.push(AsyncGeneratorEnd<T>());
787
+ } else {
788
+ state_->num_running.fetch_add(1);
789
+ auto back_of_queue = state_->source_generator();
790
+ auto back_of_queue_after_check =
791
+ AddMarkFinishedContinuation(std::move(back_of_queue));
792
+ state_->readahead_queue.push(std::move(back_of_queue_after_check));
793
+ }
794
+ return result;
795
+ }
796
+
797
+ private:
798
+ struct State {
799
+ State(AsyncGenerator<T> source_generator, int max_readahead)
800
+ : source_generator(std::move(source_generator)), max_readahead(max_readahead) {}
801
+
802
+ void MarkFinishedIfDone(const T& next_result) {
803
+ if (IsIterationEnd(next_result)) {
804
+ finished.store(true);
805
+ }
806
+ }
807
+
808
+ AsyncGenerator<T> source_generator;
809
+ int max_readahead;
810
+ Future<> final_future = Future<>::Make();
811
+ std::atomic<int> num_running{0};
812
+ std::atomic<bool> finished{false};
813
+ std::queue<Future<T>> readahead_queue;
814
+ };
815
+
816
+ std::shared_ptr<State> state_;
817
+ };
818
+
819
+ /// \brief A generator where the producer pushes items on a queue.
820
+ ///
821
+ /// No back-pressure is applied, so this generator is mostly useful when
822
+ /// producing the values is neither CPU- nor memory-expensive (e.g. fetching
823
+ /// filesystem metadata).
824
+ ///
825
+ /// This generator is not async-reentrant.
826
+ template <typename T>
827
+ class PushGenerator {
828
+ struct State {
829
+ State() {}
830
+
831
+ util::Mutex mutex;
832
+ std::deque<Result<T>> result_q;
833
+ std::optional<Future<T>> consumer_fut;
834
+ bool finished = false;
835
+ };
836
+
837
+ public:
838
+ /// Producer API for PushGenerator
839
+ class Producer {
840
+ public:
841
+ explicit Producer(const std::shared_ptr<State>& state) : weak_state_(state) {}
842
+
843
+ /// \brief Push a value on the queue
844
+ ///
845
+ /// True is returned if the value was pushed, false if the generator is
846
+ /// already closed or destroyed. If the latter, it is recommended to stop
847
+ /// producing any further values.
848
+ bool Push(Result<T> result) {
849
+ auto state = weak_state_.lock();
850
+ if (!state) {
851
+ // Generator was destroyed
852
+ return false;
853
+ }
854
+ auto lock = state->mutex.Lock();
855
+ if (state->finished) {
856
+ // Closed early
857
+ return false;
858
+ }
859
+ if (state->consumer_fut.has_value()) {
860
+ auto fut = std::move(state->consumer_fut.value());
861
+ state->consumer_fut.reset();
862
+ lock.Unlock(); // unlock before potentially invoking a callback
863
+ fut.MarkFinished(std::move(result));
864
+ } else {
865
+ state->result_q.push_back(std::move(result));
866
+ }
867
+ return true;
868
+ }
869
+
870
+ /// \brief Tell the consumer we have finished producing
871
+ ///
872
+ /// It is allowed to call this and later call Push() again ("early close").
873
+ /// In this case, calls to Push() after the queue is closed are silently
874
+ /// ignored. This can help implementing non-trivial cancellation cases.
875
+ ///
876
+ /// True is returned on success, false if the generator is already closed
877
+ /// or destroyed.
878
+ bool Close() {
879
+ auto state = weak_state_.lock();
880
+ if (!state) {
881
+ // Generator was destroyed
882
+ return false;
883
+ }
884
+ auto lock = state->mutex.Lock();
885
+ if (state->finished) {
886
+ // Already closed
887
+ return false;
888
+ }
889
+ state->finished = true;
890
+ if (state->consumer_fut.has_value()) {
891
+ auto fut = std::move(state->consumer_fut.value());
892
+ state->consumer_fut.reset();
893
+ lock.Unlock(); // unlock before potentially invoking a callback
894
+ fut.MarkFinished(IterationTraits<T>::End());
895
+ }
896
+ return true;
897
+ }
898
+
899
+ /// Return whether the generator was closed or destroyed.
900
+ bool is_closed() const {
901
+ auto state = weak_state_.lock();
902
+ if (!state) {
903
+ // Generator was destroyed
904
+ return true;
905
+ }
906
+ auto lock = state->mutex.Lock();
907
+ return state->finished;
908
+ }
909
+
910
+ private:
911
+ const std::weak_ptr<State> weak_state_;
912
+ };
913
+
914
+ PushGenerator() : state_(std::make_shared<State>()) {}
915
+
916
+ /// Read an item from the queue
917
+ Future<T> operator()() const {
918
+ auto lock = state_->mutex.Lock();
919
+ assert(!state_->consumer_fut.has_value()); // Non-reentrant
920
+ if (!state_->result_q.empty()) {
921
+ auto fut = Future<T>::MakeFinished(std::move(state_->result_q.front()));
922
+ state_->result_q.pop_front();
923
+ return fut;
924
+ }
925
+ if (state_->finished) {
926
+ return AsyncGeneratorEnd<T>();
927
+ }
928
+ auto fut = Future<T>::Make();
929
+ state_->consumer_fut = fut;
930
+ return fut;
931
+ }
932
+
933
+ /// \brief Return producer-side interface
934
+ ///
935
+ /// The returned object must be used by the producer to push values on the queue.
936
+ /// Only a single Producer object should be instantiated.
937
+ Producer producer() { return Producer{state_}; }
938
+
939
+ private:
940
+ const std::shared_ptr<State> state_;
941
+ };
942
+
943
+ /// \brief Create a generator that pulls reentrantly from a source
944
+ /// This generator will pull reentrantly from a source, ensuring that max_readahead
945
+ /// requests are active at any given time.
946
+ ///
947
+ /// The source generator must be async-reentrant
948
+ ///
949
+ /// This generator itself is async-reentrant.
950
+ ///
951
+ /// This generator may queue up to max_readahead instances of T
952
+ template <typename T>
953
+ AsyncGenerator<T> MakeReadaheadGenerator(AsyncGenerator<T> source_generator,
954
+ int max_readahead) {
955
+ return ReadaheadGenerator<T>(std::move(source_generator), max_readahead);
956
+ }
957
+
958
+ /// \brief Creates a generator that will yield finished futures from a vector
959
+ ///
960
+ /// This generator is async-reentrant
961
+ template <typename T>
962
+ AsyncGenerator<T> MakeVectorGenerator(std::vector<T> vec) {
963
+ struct State {
964
+ explicit State(std::vector<T> vec_) : vec(std::move(vec_)), vec_idx(0) {}
965
+
966
+ std::vector<T> vec;
967
+ std::atomic<std::size_t> vec_idx;
968
+ };
969
+
970
+ auto state = std::make_shared<State>(std::move(vec));
971
+ return [state]() {
972
+ auto idx = state->vec_idx.fetch_add(1);
973
+ if (idx >= state->vec.size()) {
974
+ // Eagerly return memory
975
+ state->vec.clear();
976
+ return AsyncGeneratorEnd<T>();
977
+ }
978
+ return Future<T>::MakeFinished(state->vec[idx]);
979
+ };
980
+ }
981
+
982
+ /// \see MakeMergedGenerator
983
+ template <typename T>
984
+ class MergedGenerator {
985
+ // Note, the implementation of this class is quite complex at the moment (PRs to
986
+ // simplify are always welcome)
987
+ //
988
+ // Terminology is borrowed from rxjs. This is a pull based implementation of the
989
+ // mergeAll operator. The "outer subscription" refers to the async
990
+ // generator that the caller provided when creating this. The outer subscription
991
+ // yields generators.
992
+ //
993
+ // Each of these generators is then subscribed to (up to max_subscriptions) and these
994
+ // are referred to as "inner subscriptions".
995
+ //
996
+ // As soon as we start we try and establish `max_subscriptions` inner subscriptions. For
997
+ // each inner subscription we will cache up to 1 value. This means we may have more
998
+ // values than we have been asked for. In our example, if a caller asks for one record
999
+ // batch we will start scanning `max_subscriptions` different files. For each file we
1000
+ // will only queue up to 1 batch (so a separate readahead is needed on the file if batch
1001
+ // readahead is desired).
1002
+ //
1003
+ // If the caller is slow we may accumulate ready-to-deliver items. These are stored
1004
+ // in `delivered_jobs`.
1005
+ //
1006
+ // If the caller is very quick we may accumulate requests. These are stored in
1007
+ // `waiting_jobs`.
1008
+ //
1009
+ // It may be helpful to consider an example, in the scanner the outer subscription
1010
+ // is some kind of asynchronous directory listing. The inner subscription is
1011
+ // then a scan on a file yielded by the directory listing.
1012
+ //
1013
+ // An "outstanding" request is when we have polled either the inner or outer
1014
+ // subscription but that future hasn't completed yet.
1015
+ //
1016
+ // There are three possible "events" that can happen.
1017
+ // * A caller could request the next future
1018
+ // * An outer callback occurs when the next subscription is ready (e.g. the directory
1019
+ // listing has produced a new file)
1020
+ // * An inner callback occurs when one of the inner subscriptions emits a value (e.g.
1021
+ // a file scan emits a record batch)
1022
+ //
1023
+ // Any time an event happens the logic is broken into two phases. First, we grab the
1024
+ // lock and modify the shared state. While doing this we figure out what callbacks we
1025
+ // will need to execute. Then, we give up the lock and execute these callbacks. It is
1026
+ // important to execute these callbacks without the lock to avoid deadlock.
1027
+ public:
1028
+ explicit MergedGenerator(AsyncGenerator<AsyncGenerator<T>> source,
1029
+ int max_subscriptions)
1030
+ : state_(std::make_shared<State>(std::move(source), max_subscriptions)) {}
1031
+
1032
+ Future<T> operator()() {
1033
+ // A caller has requested a future
1034
+ Future<T> waiting_future;
1035
+ std::shared_ptr<DeliveredJob> delivered_job;
1036
+ bool mark_generator_complete = false;
1037
+ {
1038
+ auto guard = state_->mutex.Lock();
1039
+ if (!state_->delivered_jobs.empty()) {
1040
+ // If we have a job sitting around we can deliver it
1041
+ delivered_job = std::move(state_->delivered_jobs.front());
1042
+ state_->delivered_jobs.pop_front();
1043
+ if (state_->IsCompleteUnlocked(guard)) {
1044
+ // It's possible this waiting job was the only thing left to handle and
1045
+ // we have now completed the generator.
1046
+ mark_generator_complete = true;
1047
+ } else {
1048
+ // Since we had a job sitting around we also had an inner subscription
1049
+ // that had paused. We are going to restart this inner subscription and
1050
+ // so there will be a new outstanding request.
1051
+ state_->outstanding_requests++;
1052
+ }
1053
+ } else if (state_->broken ||
1054
+ (!state_->first && state_->num_running_subscriptions == 0)) {
1055
+ // If we are broken or exhausted then prepare a terminal item but
1056
+ // we won't complete it until we've finished.
1057
+ Result<T> end_res = IterationEnd<T>();
1058
+ if (!state_->final_error.ok()) {
1059
+ end_res = state_->final_error;
1060
+ state_->final_error = Status::OK();
1061
+ }
1062
+ return state_->all_finished.Then([end_res]() -> Result<T> { return end_res; });
1063
+ } else {
1064
+ // Otherwise we just queue the request and it will be completed when one of the
1065
+ // ongoing inner subscriptions delivers a result
1066
+ waiting_future = Future<T>::Make();
1067
+ state_->waiting_jobs.push_back(std::make_shared<Future<T>>(waiting_future));
1068
+ }
1069
+ if (state_->first) {
1070
+ // On the first request we are going to try and immediately fill our queue
1071
+ // of subscriptions. We assume we are going to be able to start them all.
1072
+ state_->outstanding_requests +=
1073
+ static_cast<int>(state_->active_subscriptions.size());
1074
+ state_->num_running_subscriptions +=
1075
+ static_cast<int>(state_->active_subscriptions.size());
1076
+ }
1077
+ }
1078
+ // If we grabbed a finished item from the delivered_jobs queue then we may need
1079
+ // to mark the generator finished or issue a request for a new item to fill in
1080
+ // the spot we just vacated. Notice that we issue that request to the same
1081
+ // subscription that delivered it (deliverer).
1082
+ if (delivered_job) {
1083
+ if (mark_generator_complete) {
1084
+ state_->all_finished.MarkFinished();
1085
+ } else {
1086
+ delivered_job->deliverer().AddCallback(
1087
+ InnerCallback(state_, delivered_job->index));
1088
+ }
1089
+ return std::move(delivered_job->value);
1090
+ }
1091
+ // On the first call we try and fill up our subscriptions. It's possible the outer
1092
+ // generator only has a few items and we can't fill up to what we were hoping. In
1093
+ // that case we have to bail early.
1094
+ if (state_->first) {
1095
+ state_->first = false;
1096
+ mark_generator_complete = false;
1097
+ for (int i = 0; i < static_cast<int>(state_->active_subscriptions.size()); i++) {
1098
+ state_->PullSource().AddCallback(
1099
+ OuterCallback{state_, static_cast<std::size_t>(i)});
1100
+ // If we have to bail early then we need to update the shared state again so
1101
+ // we need to reacquire the lock.
1102
+ auto guard = state_->mutex.Lock();
1103
+ if (state_->source_exhausted) {
1104
+ int excess_requests =
1105
+ static_cast<int>(state_->active_subscriptions.size()) - i - 1;
1106
+ state_->outstanding_requests -= excess_requests;
1107
+ state_->num_running_subscriptions -= excess_requests;
1108
+ if (excess_requests > 0) {
1109
+ // It's possible that we are completing the generator by reducing the number
1110
+ // of outstanding requests (e.g. this happens when the outer subscription and
1111
+ // all inner subscriptions are synchronous)
1112
+ mark_generator_complete = state_->IsCompleteUnlocked(guard);
1113
+ }
1114
+ break;
1115
+ }
1116
+ }
1117
+ if (mark_generator_complete) {
1118
+ state_->MarkFinishedAndPurge();
1119
+ }
1120
+ }
1121
+ return waiting_future;
1122
+ }
1123
+
1124
+ private:
1125
+ struct DeliveredJob {
1126
+ explicit DeliveredJob(AsyncGenerator<T> deliverer_, Result<T> value_,
1127
+ std::size_t index_)
1128
+ : deliverer(deliverer_), value(std::move(value_)), index(index_) {}
1129
+
1130
+ // The generator that delivered this result, we will request another item
1131
+ // from this generator once the result is delivered
1132
+ AsyncGenerator<T> deliverer;
1133
+ // The result we received from the generator
1134
+ Result<T> value;
1135
+ // The index of the generator (in active_subscriptions) that delivered this
1136
+ // result. This is used if we need to replace a finished generator.
1137
+ std::size_t index;
1138
+ };
1139
+
1140
+ struct State {
1141
+ State(AsyncGenerator<AsyncGenerator<T>> source, int max_subscriptions)
1142
+ : source(std::move(source)),
1143
+ active_subscriptions(max_subscriptions),
1144
+ delivered_jobs(),
1145
+ waiting_jobs(),
1146
+ mutex(),
1147
+ first(true),
1148
+ broken(false),
1149
+ source_exhausted(false),
1150
+ outstanding_requests(0),
1151
+ num_running_subscriptions(0),
1152
+ final_error(Status::OK()) {}
1153
+
1154
+ Future<AsyncGenerator<T>> PullSource() {
1155
+ // Need to guard access to source() so we don't pull sync-reentrantly which
1156
+ // is never valid.
1157
+ auto lock = mutex.Lock();
1158
+ return source();
1159
+ }
1160
+
1161
+ void SignalErrorUnlocked(const util::Mutex::Guard& guard) {
1162
+ broken = true;
1163
+ // Empty any results that have arrived but not asked for.
1164
+ while (!delivered_jobs.empty()) {
1165
+ delivered_jobs.pop_front();
1166
+ }
1167
+ }
1168
+
1169
+ // This function is called outside the mutex but it will only ever be
1170
+ // called once
1171
+ void MarkFinishedAndPurge() {
1172
+ all_finished.MarkFinished();
1173
+ while (!waiting_jobs.empty()) {
1174
+ waiting_jobs.front()->MarkFinished(IterationEnd<T>());
1175
+ waiting_jobs.pop_front();
1176
+ }
1177
+ }
1178
+
1179
+ // This is called outside the mutex but it is only ever called
1180
+ // once and Future<>::AddCallback is thread-safe
1181
+ void MarkFinalError(const Status& err, Future<T> maybe_sink) {
1182
+ if (maybe_sink.is_valid()) {
1183
+ // Someone is waiting for this error so lets mark it complete when
1184
+ // all the work is done
1185
+ all_finished.AddCallback([maybe_sink, err](const Status& status) mutable {
1186
+ maybe_sink.MarkFinished(err);
1187
+ });
1188
+ } else {
1189
+ // No one is waiting for this error right now so it will be delivered
1190
+ // next.
1191
+ final_error = err;
1192
+ }
1193
+ }
1194
+
1195
+ bool IsCompleteUnlocked(const util::Mutex::Guard& guard) {
1196
+ return outstanding_requests == 0 &&
1197
+ (broken || (source_exhausted && num_running_subscriptions == 0 &&
1198
+ delivered_jobs.empty()));
1199
+ }
1200
+
1201
+ bool MarkTaskFinishedUnlocked(const util::Mutex::Guard& guard) {
1202
+ --outstanding_requests;
1203
+ return IsCompleteUnlocked(guard);
1204
+ }
1205
+
1206
+ // The outer generator. Each item we pull from this will be its own generator
1207
+ // and become an inner subscription
1208
+ AsyncGenerator<AsyncGenerator<T>> source;
1209
+ // active_subscriptions and delivered_jobs will be bounded by max_subscriptions
1210
+ std::vector<AsyncGenerator<T>> active_subscriptions;
1211
+ // Results delivered by the inner subscriptions that weren't yet asked for by the
1212
+ // caller
1213
+ std::deque<std::shared_ptr<DeliveredJob>> delivered_jobs;
1214
+ // waiting_jobs is unbounded, reentrant pulls (e.g. AddReadahead) will provide the
1215
+ // backpressure
1216
+ std::deque<std::shared_ptr<Future<T>>> waiting_jobs;
1217
+ // A future that will be marked complete when the terminal item has arrived and all
1218
+ // outstanding futures have completed. It is used to hold off emission of an error
1219
+ // until all outstanding work is done.
1220
+ Future<> all_finished = Future<>::Make();
1221
+ util::Mutex mutex;
1222
+ // A flag cleared when the caller firsts asks for a future. Used to start polling.
1223
+ bool first;
1224
+ // A flag set when an error arrives, prevents us from issuing new requests.
1225
+ bool broken;
1226
+ // A flag set when the outer subscription has been exhausted. Prevents us from
1227
+ // pulling it further (even though it would be generally harmless) and lets us know we
1228
+ // are finishing up.
1229
+ bool source_exhausted;
1230
+ // The number of futures that we have requested from either the outer or inner
1231
+ // subscriptions that have not yet completed. We cannot mark all_finished until this
1232
+ // reaches 0. This will never be greater than max_subscriptions
1233
+ int outstanding_requests;
1234
+ // The number of running subscriptions. We ramp this up to `max_subscriptions` as
1235
+ // soon as the first item is requested and then it stays at that level (each exhausted
1236
+ // inner subscription is replaced by a new inner subscription) until the outer
1237
+ // subscription is exhausted at which point this descends to 0 (and source_exhausted)
1238
+ // is then set to true.
1239
+ int num_running_subscriptions;
1240
+ // If an error arrives, and the caller hasn't asked for that item, we store the error
1241
+ // here. It is analagous to delivered_jobs but for errors instead of finished
1242
+ // results.
1243
+ Status final_error;
1244
+ };
1245
+
1246
+ struct InnerCallback {
1247
+ InnerCallback(std::shared_ptr<State> state, std::size_t index, bool recursive = false)
1248
+ : state(std::move(state)), index(index), recursive(recursive) {}
1249
+
1250
+ void operator()(const Result<T>& maybe_next_ref) {
1251
+ // An item has been delivered by one of the inner subscriptions
1252
+ Future<T> next_fut;
1253
+ const Result<T>* maybe_next = &maybe_next_ref;
1254
+
1255
+ // When an item is delivered (and the caller has asked for it) we grab the
1256
+ // next item from the inner subscription. To avoid this behavior leading to an
1257
+ // infinite loop (this can happen if the caller's callback asks for the next item)
1258
+ // we use a while loop.
1259
+ while (true) {
1260
+ Future<T> sink;
1261
+ bool sub_finished = maybe_next->ok() && IsIterationEnd(**maybe_next);
1262
+ bool pull_next_sub = false;
1263
+ bool was_broken = false;
1264
+ bool should_mark_gen_complete = false;
1265
+ bool should_mark_final_error = false;
1266
+ {
1267
+ auto guard = state->mutex.Lock();
1268
+ if (state->broken) {
1269
+ // We've errored out previously so ignore the result. If anyone was waiting
1270
+ // for this they will get IterationEnd when we purge
1271
+ was_broken = true;
1272
+ } else {
1273
+ if (!sub_finished) {
1274
+ // There is a result to deliver. Either we can deliver it now or we will
1275
+ // queue it up
1276
+ if (state->waiting_jobs.empty()) {
1277
+ state->delivered_jobs.push_back(std::make_shared<DeliveredJob>(
1278
+ state->active_subscriptions[index], *maybe_next, index));
1279
+ } else {
1280
+ sink = std::move(*state->waiting_jobs.front());
1281
+ state->waiting_jobs.pop_front();
1282
+ }
1283
+ }
1284
+
1285
+ // If this is the first error then we transition the state to a broken state
1286
+ if (!maybe_next->ok()) {
1287
+ should_mark_final_error = true;
1288
+ state->SignalErrorUnlocked(guard);
1289
+ }
1290
+ }
1291
+
1292
+ // If we finished this inner subscription then we need to grab a new inner
1293
+ // subscription to take its spot. If we can't (because we're broken or
1294
+ // exhausted) then we aren't going to be starting any new futures and so
1295
+ // the number of running subscriptions drops.
1296
+ pull_next_sub = sub_finished && !state->source_exhausted && !was_broken;
1297
+ if (sub_finished && !pull_next_sub) {
1298
+ state->num_running_subscriptions--;
1299
+ }
1300
+ // There are three situations we won't pull again. If an error occurred or we
1301
+ // are already finished or if no one was waiting for our result and so we queued
1302
+ // it up. We will decrement outstanding_requests and possibly mark the
1303
+ // generator completed.
1304
+ if (state->broken || (!sink.is_valid() && !sub_finished) ||
1305
+ (sub_finished && state->source_exhausted)) {
1306
+ if (state->MarkTaskFinishedUnlocked(guard)) {
1307
+ should_mark_gen_complete = true;
1308
+ }
1309
+ }
1310
+ }
1311
+
1312
+ // Now we have given up the lock and we can take all the actions we decided we
1313
+ // need to take.
1314
+ if (should_mark_final_error) {
1315
+ state->MarkFinalError(maybe_next->status(), std::move(sink));
1316
+ }
1317
+
1318
+ if (should_mark_gen_complete) {
1319
+ state->MarkFinishedAndPurge();
1320
+ }
1321
+
1322
+ // An error occurred elsewhere so there is no need to mark any future
1323
+ // finished (will happen during the purge) or pull from anything
1324
+ if (was_broken) {
1325
+ return;
1326
+ }
1327
+
1328
+ if (pull_next_sub) {
1329
+ if (recursive) {
1330
+ was_empty = true;
1331
+ return;
1332
+ }
1333
+ // We pulled an end token so we need to start a new subscription
1334
+ // in our spot
1335
+ state->PullSource().AddCallback(OuterCallback{state, index});
1336
+ } else if (sink.is_valid()) {
1337
+ // We pulled a valid result and there was someone waiting for it
1338
+ // so lets fetch the next result from our subscription
1339
+ sink.MarkFinished(*maybe_next);
1340
+ next_fut = state->active_subscriptions[index]();
1341
+ if (next_fut.TryAddCallback([this]() { return InnerCallback(state, index); })) {
1342
+ return;
1343
+ }
1344
+ // Already completed. Avoid very deep recursion by looping
1345
+ // here instead of relying on the callback.
1346
+ maybe_next = &next_fut.result();
1347
+ continue;
1348
+ }
1349
+ // else: We pulled a valid result but no one was waiting for it so
1350
+ // we can just stop.
1351
+ return;
1352
+ }
1353
+ }
1354
+ std::shared_ptr<State> state;
1355
+ std::size_t index;
1356
+ bool recursive;
1357
+ bool was_empty = false;
1358
+ };
1359
+
1360
+ struct OuterCallback {
1361
+ void operator()(const Result<AsyncGenerator<T>>& initial_maybe_next) {
1362
+ Result<AsyncGenerator<T>> maybe_next = initial_maybe_next;
1363
+ while (true) {
1364
+ // We have been given a new inner subscription
1365
+ bool should_continue = false;
1366
+ bool should_mark_gen_complete = false;
1367
+ bool should_deliver_error = false;
1368
+ bool source_exhausted = maybe_next.ok() && IsIterationEnd(*maybe_next);
1369
+ Future<T> error_sink;
1370
+ {
1371
+ auto guard = state->mutex.Lock();
1372
+ if (!maybe_next.ok() || source_exhausted || state->broken) {
1373
+ // If here then we will not pull any more from the outer source
1374
+ if (!state->broken && !maybe_next.ok()) {
1375
+ state->SignalErrorUnlocked(guard);
1376
+ // If here then we are the first error so we need to deliver it
1377
+ should_deliver_error = true;
1378
+ if (!state->waiting_jobs.empty()) {
1379
+ error_sink = std::move(*state->waiting_jobs.front());
1380
+ state->waiting_jobs.pop_front();
1381
+ }
1382
+ }
1383
+ if (source_exhausted) {
1384
+ state->source_exhausted = true;
1385
+ state->num_running_subscriptions--;
1386
+ }
1387
+ if (state->MarkTaskFinishedUnlocked(guard)) {
1388
+ should_mark_gen_complete = true;
1389
+ }
1390
+ } else {
1391
+ state->active_subscriptions[index] = *maybe_next;
1392
+ should_continue = true;
1393
+ }
1394
+ }
1395
+ if (should_deliver_error) {
1396
+ state->MarkFinalError(maybe_next.status(), std::move(error_sink));
1397
+ }
1398
+ if (should_mark_gen_complete) {
1399
+ state->MarkFinishedAndPurge();
1400
+ }
1401
+ if (should_continue) {
1402
+ // There is a possibility that a large sequence of immediately available inner
1403
+ // callbacks could lead to a stack overflow. To avoid this we need to
1404
+ // synchronously loop through inner/outer callbacks until we either find an
1405
+ // unfinished future or we find an actual item to deliver.
1406
+ Future<T> next_item = (*maybe_next)();
1407
+ if (!next_item.TryAddCallback([this] { return InnerCallback(state, index); })) {
1408
+ // By setting recursive to true we signal to the inner callback that, if it is
1409
+ // empty, instead of adding a new outer callback, it should just immediately
1410
+ // return, flagging was_empty so that we know we need to check the next
1411
+ // subscription.
1412
+ InnerCallback immediate_inner(state, index, /*recursive=*/true);
1413
+ immediate_inner(next_item.result());
1414
+ if (immediate_inner.was_empty) {
1415
+ Future<AsyncGenerator<T>> next_source = state->PullSource();
1416
+ if (next_source.TryAddCallback([this] {
1417
+ return OuterCallback{state, index};
1418
+ })) {
1419
+ // We hit an unfinished future so we can stop looping
1420
+ return;
1421
+ }
1422
+ // The current subscription was immediately and synchronously empty
1423
+ // and we were able to synchronously pull the next subscription so we
1424
+ // can keep looping.
1425
+ maybe_next = next_source.result();
1426
+ continue;
1427
+ }
1428
+ }
1429
+ }
1430
+ return;
1431
+ }
1432
+ }
1433
+ std::shared_ptr<State> state;
1434
+ std::size_t index;
1435
+ };
1436
+
1437
+ std::shared_ptr<State> state_;
1438
+ };
1439
+
1440
+ /// \brief Create a generator that takes in a stream of generators and pulls from up to
1441
+ /// max_subscriptions at a time
1442
+ ///
1443
+ /// Note: This may deliver items out of sequence. For example, items from the third
1444
+ /// AsyncGenerator generated by the source may be emitted before some items from the first
1445
+ /// AsyncGenerator generated by the source.
1446
+ ///
1447
+ /// This generator will pull from source async-reentrantly unless max_subscriptions is 1
1448
+ /// This generator will not pull from the individual subscriptions reentrantly. Add
1449
+ /// readahead to the individual subscriptions if that is desired.
1450
+ /// This generator is async-reentrant
1451
+ ///
1452
+ /// This generator may queue up to max_subscriptions instances of T
1453
+ template <typename T>
1454
+ AsyncGenerator<T> MakeMergedGenerator(AsyncGenerator<AsyncGenerator<T>> source,
1455
+ int max_subscriptions) {
1456
+ return MergedGenerator<T>(std::move(source), max_subscriptions);
1457
+ }
1458
+
1459
+ template <typename T>
1460
+ Result<AsyncGenerator<T>> MakeSequencedMergedGenerator(
1461
+ AsyncGenerator<AsyncGenerator<T>> source, int max_subscriptions) {
1462
+ if (max_subscriptions < 0) {
1463
+ return Status::Invalid("max_subscriptions must be a positive integer");
1464
+ }
1465
+ if (max_subscriptions == 1) {
1466
+ return Status::Invalid("Use MakeConcatenatedGenerator if max_subscriptions is 1");
1467
+ }
1468
+ AsyncGenerator<AsyncGenerator<T>> autostarting_source = MakeMappedGenerator(
1469
+ std::move(source),
1470
+ [](const AsyncGenerator<T>& sub) { return MakeAutoStartingGenerator(sub); });
1471
+ AsyncGenerator<AsyncGenerator<T>> sub_readahead =
1472
+ MakeSerialReadaheadGenerator(std::move(autostarting_source), max_subscriptions - 1);
1473
+ return MakeConcatenatedGenerator(std::move(sub_readahead));
1474
+ }
1475
+
1476
+ /// \brief Create a generator that takes in a stream of generators and pulls from each
1477
+ /// one in sequence.
1478
+ ///
1479
+ /// This generator is async-reentrant but will never pull from source reentrantly and
1480
+ /// will never pull from any subscription reentrantly.
1481
+ ///
1482
+ /// This generator may queue 1 instance of T
1483
+ ///
1484
+ /// TODO: Could potentially make a bespoke implementation instead of MergedGenerator that
1485
+ /// forwards async-reentrant requests instead of buffering them (which is what
1486
+ /// MergedGenerator does)
1487
+ template <typename T>
1488
+ AsyncGenerator<T> MakeConcatenatedGenerator(AsyncGenerator<AsyncGenerator<T>> source) {
1489
+ return MergedGenerator<T>(std::move(source), 1);
1490
+ }
1491
+
1492
+ template <typename T>
1493
+ struct Enumerated {
1494
+ T value;
1495
+ int index;
1496
+ bool last;
1497
+ };
1498
+
1499
+ template <typename T>
1500
+ struct IterationTraits<Enumerated<T>> {
1501
+ static Enumerated<T> End() { return Enumerated<T>{IterationEnd<T>(), -1, false}; }
1502
+ static bool IsEnd(const Enumerated<T>& val) { return val.index < 0; }
1503
+ };
1504
+
1505
+ /// \see MakeEnumeratedGenerator
1506
+ template <typename T>
1507
+ class EnumeratingGenerator {
1508
+ public:
1509
+ EnumeratingGenerator(AsyncGenerator<T> source, T initial_value)
1510
+ : state_(std::make_shared<State>(std::move(source), std::move(initial_value))) {}
1511
+
1512
+ Future<Enumerated<T>> operator()() {
1513
+ if (state_->finished) {
1514
+ return AsyncGeneratorEnd<Enumerated<T>>();
1515
+ } else {
1516
+ auto state = state_;
1517
+ return state->source().Then([state](const T& next) {
1518
+ auto finished = IsIterationEnd<T>(next);
1519
+ auto prev = Enumerated<T>{state->prev_value, state->prev_index, finished};
1520
+ state->prev_value = next;
1521
+ state->prev_index++;
1522
+ state->finished = finished;
1523
+ return prev;
1524
+ });
1525
+ }
1526
+ }
1527
+
1528
+ private:
1529
+ struct State {
1530
+ State(AsyncGenerator<T> source, T initial_value)
1531
+ : source(std::move(source)), prev_value(std::move(initial_value)), prev_index(0) {
1532
+ finished = IsIterationEnd<T>(prev_value);
1533
+ }
1534
+
1535
+ AsyncGenerator<T> source;
1536
+ T prev_value;
1537
+ int prev_index;
1538
+ bool finished;
1539
+ };
1540
+
1541
+ std::shared_ptr<State> state_;
1542
+ };
1543
+
1544
+ /// Wrap items from a source generator with positional information
1545
+ ///
1546
+ /// When used with MakeMergedGenerator and MakeSequencingGenerator this allows items to be
1547
+ /// processed in a "first-available" fashion and later resequenced which can reduce the
1548
+ /// impact of sources with erratic performance (e.g. a filesystem where some items may
1549
+ /// take longer to read than others).
1550
+ ///
1551
+ /// TODO(ARROW-12371) Would require this generator be async-reentrant
1552
+ ///
1553
+ /// \see MakeSequencingGenerator for an example of putting items back in order
1554
+ ///
1555
+ /// This generator is not async-reentrant
1556
+ ///
1557
+ /// This generator buffers one item (so it knows which item is the last item)
1558
+ template <typename T>
1559
+ AsyncGenerator<Enumerated<T>> MakeEnumeratedGenerator(AsyncGenerator<T> source) {
1560
+ return FutureFirstGenerator<Enumerated<T>>(
1561
+ source().Then([source](const T& initial_value) -> AsyncGenerator<Enumerated<T>> {
1562
+ return EnumeratingGenerator<T>(std::move(source), initial_value);
1563
+ }));
1564
+ }
1565
+
1566
+ /// \see MakeTransferredGenerator
1567
+ template <typename T>
1568
+ class TransferringGenerator {
1569
+ public:
1570
+ explicit TransferringGenerator(AsyncGenerator<T> source, internal::Executor* executor)
1571
+ : source_(std::move(source)), executor_(executor) {}
1572
+
1573
+ Future<T> operator()() { return executor_->Transfer(source_()); }
1574
+
1575
+ private:
1576
+ AsyncGenerator<T> source_;
1577
+ internal::Executor* executor_;
1578
+ };
1579
+
1580
+ /// \brief Transfer a future to an underlying executor.
1581
+ ///
1582
+ /// Continuations run on the returned future will be run on the given executor
1583
+ /// if they cannot be run synchronously.
1584
+ ///
1585
+ /// This is often needed to move computation off I/O threads or other external
1586
+ /// completion sources and back on to the CPU executor so the I/O thread can
1587
+ /// stay busy and focused on I/O
1588
+ ///
1589
+ /// Keep in mind that continuations called on an already completed future will
1590
+ /// always be run synchronously and so no transfer will happen in that case.
1591
+ ///
1592
+ /// This generator is async reentrant if the source is
1593
+ ///
1594
+ /// This generator will not queue
1595
+ template <typename T>
1596
+ AsyncGenerator<T> MakeTransferredGenerator(AsyncGenerator<T> source,
1597
+ internal::Executor* executor) {
1598
+ return TransferringGenerator<T>(std::move(source), executor);
1599
+ }
1600
+
1601
+ /// \see MakeBackgroundGenerator
1602
+ template <typename T>
1603
+ class BackgroundGenerator {
1604
+ public:
1605
+ explicit BackgroundGenerator(Iterator<T> it, internal::Executor* io_executor, int max_q,
1606
+ int q_restart)
1607
+ : state_(std::make_shared<State>(io_executor, std::move(it), max_q, q_restart)),
1608
+ cleanup_(std::make_shared<Cleanup>(state_.get())) {}
1609
+
1610
+ Future<T> operator()() {
1611
+ auto guard = state_->mutex.Lock();
1612
+ Future<T> waiting_future;
1613
+ if (state_->queue.empty()) {
1614
+ if (state_->finished) {
1615
+ return AsyncGeneratorEnd<T>();
1616
+ } else {
1617
+ waiting_future = Future<T>::Make();
1618
+ state_->waiting_future = waiting_future;
1619
+ }
1620
+ } else {
1621
+ auto next = Future<T>::MakeFinished(std::move(state_->queue.front()));
1622
+ state_->queue.pop();
1623
+ if (state_->NeedsRestart()) {
1624
+ return state_->RestartTask(state_, std::move(guard), std::move(next));
1625
+ }
1626
+ return next;
1627
+ }
1628
+ // This should only trigger the very first time this method is called
1629
+ if (state_->NeedsRestart()) {
1630
+ return state_->RestartTask(state_, std::move(guard), std::move(waiting_future));
1631
+ }
1632
+ return waiting_future;
1633
+ }
1634
+
1635
+ protected:
1636
+ static constexpr uint64_t kUnlikelyThreadId{std::numeric_limits<uint64_t>::max()};
1637
+
1638
+ struct State {
1639
+ State(internal::Executor* io_executor, Iterator<T> it, int max_q, int q_restart)
1640
+ : io_executor(io_executor),
1641
+ max_q(max_q),
1642
+ q_restart(q_restart),
1643
+ it(std::move(it)),
1644
+ reading(false),
1645
+ finished(false),
1646
+ should_shutdown(false) {}
1647
+
1648
+ void ClearQueue() {
1649
+ while (!queue.empty()) {
1650
+ queue.pop();
1651
+ }
1652
+ }
1653
+
1654
+ bool TaskIsRunning() const { return task_finished.is_valid(); }
1655
+
1656
+ bool NeedsRestart() const {
1657
+ return !finished && !reading && static_cast<int>(queue.size()) <= q_restart;
1658
+ }
1659
+
1660
+ void DoRestartTask(std::shared_ptr<State> state, util::Mutex::Guard guard) {
1661
+ // If we get here we are actually going to start a new task so let's create a
1662
+ // task_finished future for it
1663
+ state->task_finished = Future<>::Make();
1664
+ state->reading = true;
1665
+ auto spawn_status = io_executor->Spawn(
1666
+ [state]() { BackgroundGenerator::WorkerTask(std::move(state)); });
1667
+ if (!spawn_status.ok()) {
1668
+ // If we can't spawn a new task then send an error to the consumer (either via a
1669
+ // waiting future or the queue) and mark ourselves finished
1670
+ state->finished = true;
1671
+ state->task_finished = Future<>();
1672
+ if (waiting_future.has_value()) {
1673
+ auto to_deliver = std::move(waiting_future.value());
1674
+ waiting_future.reset();
1675
+ guard.Unlock();
1676
+ to_deliver.MarkFinished(spawn_status);
1677
+ } else {
1678
+ ClearQueue();
1679
+ queue.push(spawn_status);
1680
+ }
1681
+ }
1682
+ }
1683
+
1684
+ Future<T> RestartTask(std::shared_ptr<State> state, util::Mutex::Guard guard,
1685
+ Future<T> next) {
1686
+ if (TaskIsRunning()) {
1687
+ // If the task is still cleaning up we need to wait for it to finish before
1688
+ // restarting. We also want to block the consumer until we've restarted the
1689
+ // reader to avoid multiple restarts
1690
+ return task_finished.Then([state, next]() {
1691
+ // This may appear dangerous (recursive mutex) but we should be guaranteed the
1692
+ // outer guard has been released by this point. We know...
1693
+ // * task_finished is not already finished (it would be invalid in that case)
1694
+ // * task_finished will not be marked complete until we've given up the mutex
1695
+ auto guard_ = state->mutex.Lock();
1696
+ state->DoRestartTask(state, std::move(guard_));
1697
+ return next;
1698
+ });
1699
+ }
1700
+ // Otherwise we can restart immediately
1701
+ DoRestartTask(std::move(state), std::move(guard));
1702
+ return next;
1703
+ }
1704
+
1705
+ internal::Executor* io_executor;
1706
+ const int max_q;
1707
+ const int q_restart;
1708
+ Iterator<T> it;
1709
+ std::atomic<uint64_t> worker_thread_id{kUnlikelyThreadId};
1710
+
1711
+ // If true, the task is actively pumping items from the queue and does not need a
1712
+ // restart
1713
+ bool reading;
1714
+ // Set to true when a terminal item arrives
1715
+ bool finished;
1716
+ // Signal to the background task to end early because consumers have given up on it
1717
+ bool should_shutdown;
1718
+ // If the queue is empty, the consumer will create a waiting future and wait for it
1719
+ std::queue<Result<T>> queue;
1720
+ std::optional<Future<T>> waiting_future;
1721
+ // Every background task is given a future to complete when it is entirely finished
1722
+ // processing and ready for the next task to start or for State to be destroyed
1723
+ Future<> task_finished;
1724
+ util::Mutex mutex;
1725
+ };
1726
+
1727
+ // Cleanup task that will be run when all consumer references to the generator are lost
1728
+ struct Cleanup {
1729
+ explicit Cleanup(State* state) : state(state) {}
1730
+ ~Cleanup() {
1731
+ /// TODO: Once ARROW-13109 is available then we can be force consumers to spawn and
1732
+ /// there is no need to perform this check.
1733
+ ///
1734
+ /// It's a deadlock if we enter cleanup from
1735
+ /// the worker thread but it can happen if the consumer doesn't transfer away
1736
+ assert(state->worker_thread_id.load() != ::arrow::internal::GetThreadId());
1737
+ Future<> finish_fut;
1738
+ {
1739
+ auto lock = state->mutex.Lock();
1740
+ if (!state->TaskIsRunning()) {
1741
+ return;
1742
+ }
1743
+ // Signal the current task to stop and wait for it to finish
1744
+ state->should_shutdown = true;
1745
+ finish_fut = state->task_finished;
1746
+ }
1747
+ // Using future as a condition variable here
1748
+ Status st = finish_fut.status();
1749
+ ARROW_UNUSED(st);
1750
+ }
1751
+ State* state;
1752
+ };
1753
+
1754
+ static void WorkerTask(std::shared_ptr<State> state) {
1755
+ state->worker_thread_id.store(::arrow::internal::GetThreadId());
1756
+ // We need to capture the state to read while outside the mutex
1757
+ bool reading = true;
1758
+ while (reading) {
1759
+ auto next = state->it.Next();
1760
+ // Need to capture state->waiting_future inside the mutex to mark finished outside
1761
+ Future<T> waiting_future;
1762
+ {
1763
+ auto guard = state->mutex.Lock();
1764
+
1765
+ if (state->should_shutdown) {
1766
+ state->finished = true;
1767
+ break;
1768
+ }
1769
+
1770
+ if (!next.ok() || IsIterationEnd<T>(*next)) {
1771
+ // Terminal item. Mark finished to true, send this last item, and quit
1772
+ state->finished = true;
1773
+ if (!next.ok()) {
1774
+ state->ClearQueue();
1775
+ }
1776
+ }
1777
+ // At this point we are going to send an item. Either we will add it to the
1778
+ // queue or deliver it to a waiting future.
1779
+ if (state->waiting_future.has_value()) {
1780
+ waiting_future = std::move(state->waiting_future.value());
1781
+ state->waiting_future.reset();
1782
+ } else {
1783
+ state->queue.push(std::move(next));
1784
+ // We just filled up the queue so it is time to quit. We may need to notify
1785
+ // a cleanup task so we transition to Quitting
1786
+ if (static_cast<int>(state->queue.size()) >= state->max_q) {
1787
+ state->reading = false;
1788
+ }
1789
+ }
1790
+ reading = state->reading && !state->finished;
1791
+ }
1792
+ // This should happen outside the mutex. Presumably there is a
1793
+ // transferring generator on the other end that will quickly transfer any
1794
+ // callbacks off of this thread so we can continue looping. Still, best not to
1795
+ // rely on that
1796
+ if (waiting_future.is_valid()) {
1797
+ waiting_future.MarkFinished(next);
1798
+ }
1799
+ }
1800
+ // Once we've sent our last item we can notify any waiters that we are done and so
1801
+ // either state can be cleaned up or a new background task can be started
1802
+ Future<> task_finished;
1803
+ {
1804
+ auto guard = state->mutex.Lock();
1805
+ // After we give up the mutex state can be safely deleted. We will no longer
1806
+ // reference it. We can safely transition to idle now.
1807
+ task_finished = state->task_finished;
1808
+ state->task_finished = Future<>();
1809
+ state->worker_thread_id.store(kUnlikelyThreadId);
1810
+ }
1811
+ task_finished.MarkFinished();
1812
+ }
1813
+
1814
+ std::shared_ptr<State> state_;
1815
+ // state_ is held by both the generator and the background thread so it won't be cleaned
1816
+ // up when all consumer references are relinquished. cleanup_ is only held by the
1817
+ // generator so it will be destructed when the last consumer reference is gone. We use
1818
+ // this to cleanup / stop the background generator in case the consuming end stops
1819
+ // listening (e.g. due to a downstream error)
1820
+ std::shared_ptr<Cleanup> cleanup_;
1821
+ };
1822
+
1823
+ constexpr int kDefaultBackgroundMaxQ = 32;
1824
+ constexpr int kDefaultBackgroundQRestart = 16;
1825
+
1826
+ /// \brief Create an AsyncGenerator<T> by iterating over an Iterator<T> on a background
1827
+ /// thread
1828
+ ///
1829
+ /// The parameter max_q and q_restart control queue size and background thread task
1830
+ /// management. If the background task is fast you typically don't want it creating a
1831
+ /// thread task for every item. Instead the background thread will run until it fills
1832
+ /// up a readahead queue.
1833
+ ///
1834
+ /// Once the queue has filled up the background thread task will terminate (allowing other
1835
+ /// I/O tasks to use the thread). Once the queue has been drained enough (specified by
1836
+ /// q_restart) then the background thread task will be restarted. If q_restart is too low
1837
+ /// then you may exhaust the queue waiting for the background thread task to start running
1838
+ /// again. If it is too high then it will be constantly stopping and restarting the
1839
+ /// background queue task
1840
+ ///
1841
+ /// The "background thread" is a logical thread and will run as tasks on the io_executor.
1842
+ /// This thread may stop and start when the queue fills up but there will only be one
1843
+ /// active background thread task at any given time. You MUST transfer away from this
1844
+ /// background generator. Otherwise there could be a race condition if a callback on the
1845
+ /// background thread deletes the last consumer reference to the background generator. You
1846
+ /// can transfer onto the same executor as the background thread, it is only necessary to
1847
+ /// create a new thread task, not to switch executors.
1848
+ ///
1849
+ /// This generator is not async-reentrant
1850
+ ///
1851
+ /// This generator will queue up to max_q blocks
1852
+ template <typename T>
1853
+ static Result<AsyncGenerator<T>> MakeBackgroundGenerator(
1854
+ Iterator<T> iterator, internal::Executor* io_executor,
1855
+ int max_q = kDefaultBackgroundMaxQ, int q_restart = kDefaultBackgroundQRestart) {
1856
+ if (max_q < q_restart) {
1857
+ return Status::Invalid("max_q must be >= q_restart");
1858
+ }
1859
+ return BackgroundGenerator<T>(std::move(iterator), io_executor, max_q, q_restart);
1860
+ }
1861
+
1862
+ /// \brief Create an AsyncGenerator<T> by iterating over an Iterator<T> synchronously
1863
+ ///
1864
+ /// This should only be used if you know the source iterator does not involve any
1865
+ /// I/O (or other blocking calls). Otherwise a CPU thread will be blocked and, depending
1866
+ /// on the complexity of the iterator, it may lead to deadlock.
1867
+ ///
1868
+ /// If you are not certain if there will be I/O then it is better to use
1869
+ /// MakeBackgroundGenerator. If helpful you can think of this as the AsyncGenerator
1870
+ /// equivalent of Future::MakeFinished
1871
+ ///
1872
+ /// It is impossible to call this in an async-reentrant manner since the returned
1873
+ /// future will be completed by the time it is polled.
1874
+ ///
1875
+ /// This generator does not queue
1876
+ template <typename T>
1877
+ static Result<AsyncGenerator<T>> MakeBlockingGenerator(
1878
+ std::shared_ptr<Iterator<T>> iterator) {
1879
+ return [it = std::move(iterator)]() mutable -> Future<T> {
1880
+ return Future<T>::MakeFinished(it->Next());
1881
+ };
1882
+ }
1883
+
1884
+ template <typename T>
1885
+ static Result<AsyncGenerator<T>> MakeBlockingGenerator(Iterator<T> iterator) {
1886
+ return MakeBlockingGenerator(std::make_shared<Iterator<T>>(std::move(iterator)));
1887
+ }
1888
+
1889
+ /// \see MakeGeneratorIterator
1890
+ template <typename T>
1891
+ class GeneratorIterator {
1892
+ public:
1893
+ explicit GeneratorIterator(AsyncGenerator<T> source) : source_(std::move(source)) {}
1894
+
1895
+ Result<T> Next() { return source_().result(); }
1896
+
1897
+ private:
1898
+ AsyncGenerator<T> source_;
1899
+ };
1900
+
1901
+ /// \brief Convert an AsyncGenerator<T> to an Iterator<T> which blocks until each future
1902
+ /// is finished
1903
+ template <typename T>
1904
+ Iterator<T> MakeGeneratorIterator(AsyncGenerator<T> source) {
1905
+ return Iterator<T>(GeneratorIterator<T>(std::move(source)));
1906
+ }
1907
+
1908
+ /// \brief Add readahead to an iterator using a background thread.
1909
+ ///
1910
+ /// Under the hood this is converting the iterator to a generator using
1911
+ /// MakeBackgroundGenerator, adding readahead to the converted generator with
1912
+ /// MakeReadaheadGenerator, and then converting back to an iterator using
1913
+ /// MakeGeneratorIterator.
1914
+ template <typename T>
1915
+ Result<Iterator<T>> MakeReadaheadIterator(Iterator<T> it, int readahead_queue_size) {
1916
+ ARROW_ASSIGN_OR_RAISE(auto io_executor, internal::ThreadPool::Make(1));
1917
+ auto max_q = readahead_queue_size;
1918
+ auto q_restart = std::max(1, max_q / 2);
1919
+ ARROW_ASSIGN_OR_RAISE(
1920
+ auto background_generator,
1921
+ MakeBackgroundGenerator(std::move(it), io_executor.get(), max_q, q_restart));
1922
+ // Capture io_executor to keep it alive as long as owned_bg_generator is still
1923
+ // referenced
1924
+ AsyncGenerator<T> owned_bg_generator = [io_executor, background_generator]() {
1925
+ return background_generator();
1926
+ };
1927
+ return MakeGeneratorIterator(std::move(owned_bg_generator));
1928
+ }
1929
+
1930
+ /// \brief Make a generator that returns a single pre-generated future
1931
+ ///
1932
+ /// This generator is async-reentrant.
1933
+ template <typename T>
1934
+ std::function<Future<T>()> MakeSingleFutureGenerator(Future<T> future) {
1935
+ assert(future.is_valid());
1936
+ auto state = std::make_shared<Future<T>>(std::move(future));
1937
+ return [state]() -> Future<T> {
1938
+ auto fut = std::move(*state);
1939
+ if (fut.is_valid()) {
1940
+ return fut;
1941
+ } else {
1942
+ return AsyncGeneratorEnd<T>();
1943
+ }
1944
+ };
1945
+ }
1946
+
1947
+ /// \brief Make a generator that immediately ends.
1948
+ ///
1949
+ /// This generator is async-reentrant.
1950
+ template <typename T>
1951
+ std::function<Future<T>()> MakeEmptyGenerator() {
1952
+ return []() -> Future<T> { return AsyncGeneratorEnd<T>(); };
1953
+ }
1954
+
1955
+ /// \brief Make a generator that always fails with a given error
1956
+ ///
1957
+ /// This generator is async-reentrant.
1958
+ template <typename T>
1959
+ AsyncGenerator<T> MakeFailingGenerator(Status st) {
1960
+ assert(!st.ok());
1961
+ auto state = std::make_shared<Status>(std::move(st));
1962
+ return [state]() -> Future<T> {
1963
+ auto st = std::move(*state);
1964
+ if (!st.ok()) {
1965
+ return std::move(st);
1966
+ } else {
1967
+ return AsyncGeneratorEnd<T>();
1968
+ }
1969
+ };
1970
+ }
1971
+
1972
+ /// \brief Make a generator that always fails with a given error
1973
+ ///
1974
+ /// This overload allows inferring the return type from the argument.
1975
+ template <typename T>
1976
+ AsyncGenerator<T> MakeFailingGenerator(const Result<T>& result) {
1977
+ return MakeFailingGenerator<T>(result.status());
1978
+ }
1979
+
1980
+ /// \brief Prepend initial_values onto a generator
1981
+ ///
1982
+ /// This generator is async-reentrant but will buffer requests and will not
1983
+ /// pull from following_values async-reentrantly.
1984
+ template <typename T>
1985
+ AsyncGenerator<T> MakeGeneratorStartsWith(std::vector<T> initial_values,
1986
+ AsyncGenerator<T> following_values) {
1987
+ auto initial_values_vec_gen = MakeVectorGenerator(std::move(initial_values));
1988
+ auto gen_gen = MakeVectorGenerator<AsyncGenerator<T>>(
1989
+ {std::move(initial_values_vec_gen), std::move(following_values)});
1990
+ return MakeConcatenatedGenerator(std::move(gen_gen));
1991
+ }
1992
+
1993
+ template <typename T>
1994
+ struct CancellableGenerator {
1995
+ Future<T> operator()() {
1996
+ if (stop_token.IsStopRequested()) {
1997
+ return stop_token.Poll();
1998
+ }
1999
+ return source();
2000
+ }
2001
+
2002
+ AsyncGenerator<T> source;
2003
+ StopToken stop_token;
2004
+ };
2005
+
2006
+ /// \brief Allow an async generator to be cancelled
2007
+ ///
2008
+ /// This generator is async-reentrant
2009
+ template <typename T>
2010
+ AsyncGenerator<T> MakeCancellable(AsyncGenerator<T> source, StopToken stop_token) {
2011
+ return CancellableGenerator<T>{std::move(source), std::move(stop_token)};
2012
+ }
2013
+
2014
+ template <typename T>
2015
+ class DefaultIfEmptyGenerator {
2016
+ public:
2017
+ DefaultIfEmptyGenerator(AsyncGenerator<T> source, T or_value)
2018
+ : state_(std::make_shared<State>(std::move(source), std::move(or_value))) {}
2019
+
2020
+ Future<T> operator()() {
2021
+ if (state_->first) {
2022
+ state_->first = false;
2023
+ struct {
2024
+ T or_value;
2025
+
2026
+ Result<T> operator()(const T& value) {
2027
+ if (IterationTraits<T>::IsEnd(value)) {
2028
+ return std::move(or_value);
2029
+ }
2030
+ return value;
2031
+ }
2032
+ } Continuation;
2033
+ Continuation.or_value = std::move(state_->or_value);
2034
+ return state_->source().Then(std::move(Continuation));
2035
+ }
2036
+ return state_->source();
2037
+ }
2038
+
2039
+ private:
2040
+ struct State {
2041
+ AsyncGenerator<T> source;
2042
+ T or_value;
2043
+ bool first;
2044
+ State(AsyncGenerator<T> source_, T or_value_)
2045
+ : source(std::move(source_)), or_value(std::move(or_value_)), first(true) {}
2046
+ };
2047
+ std::shared_ptr<State> state_;
2048
+ };
2049
+
2050
+ /// \brief If the generator is empty, return the given value, else
2051
+ /// forward the values from the generator.
2052
+ ///
2053
+ /// This generator is async-reentrant.
2054
+ template <typename T>
2055
+ AsyncGenerator<T> MakeDefaultIfEmptyGenerator(AsyncGenerator<T> source, T or_value) {
2056
+ return DefaultIfEmptyGenerator<T>(std::move(source), std::move(or_value));
2057
+ }
2058
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_util.h ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <atomic>
21
+ #include <functional>
22
+ #include <list>
23
+ #include <memory>
24
+
25
+ #include "arrow/result.h"
26
+ #include "arrow/status.h"
27
+ #include "arrow/util/cancel.h"
28
+ #include "arrow/util/functional.h"
29
+ #include "arrow/util/future.h"
30
+ #include "arrow/util/iterator.h"
31
+ #include "arrow/util/mutex.h"
32
+ #include "arrow/util/thread_pool.h"
33
+ #include "arrow/util/tracing.h"
34
+
35
+ namespace arrow {
36
+
37
+ using internal::FnOnce;
38
+
39
+ namespace util {
40
+
41
+ /// A utility which keeps tracks of, and schedules, asynchronous tasks
42
+ ///
43
+ /// An asynchronous task has a synchronous component and an asynchronous component.
44
+ /// The synchronous component typically schedules some kind of work on an external
45
+ /// resource (e.g. the I/O thread pool or some kind of kernel-based asynchronous
46
+ /// resource like io_uring). The asynchronous part represents the work
47
+ /// done on that external resource. Executing the synchronous part will be referred
48
+ /// to as "submitting the task" since this usually includes submitting the asynchronous
49
+ /// portion to the external thread pool.
50
+ ///
51
+ /// By default the scheduler will submit the task (execute the synchronous part) as
52
+ /// soon as it is added, assuming the underlying thread pool hasn't terminated or the
53
+ /// scheduler hasn't aborted. In this mode, the scheduler is simply acting as
54
+ /// a simple task group.
55
+ ///
56
+ /// A task scheduler starts with an initial task. That task, and all subsequent tasks
57
+ /// are free to add subtasks. Once all submitted tasks finish the scheduler will
58
+ /// finish. Note, it is not an error to add additional tasks after a scheduler has
59
+ /// aborted. These tasks will be ignored and never submitted. The scheduler returns a
60
+ /// future which will complete when all submitted tasks have finished executing. Once all
61
+ /// tasks have been finished the scheduler is invalid and should no longer be used.
62
+ ///
63
+ /// Task failure (either the synchronous portion or the asynchronous portion) will cause
64
+ /// the scheduler to enter an aborted state. The first such failure will be reported in
65
+ /// the final task future.
66
+ class ARROW_EXPORT AsyncTaskScheduler {
67
+ public:
68
+ /// Destructor for AsyncTaskScheduler
69
+ ///
70
+ /// The lifetime of the task scheduled is managed automatically. The scheduler
71
+ /// will remain valid while any tasks are running (and can always be safely accessed)
72
+ /// within tasks) and will be destroyed as soon as all tasks have finished.
73
+ virtual ~AsyncTaskScheduler() = default;
74
+ /// An interface for a task
75
+ ///
76
+ /// Users may want to override this, for example, to add priority
77
+ /// information for use by a queue.
78
+ class Task {
79
+ public:
80
+ virtual ~Task() = default;
81
+ /// Submit the task
82
+ ///
83
+ /// This will be called by the scheduler at most once when there
84
+ /// is space to run the task. This is expected to be a fairly quick
85
+ /// function that simply submits the actual task work to an external
86
+ /// resource (e.g. I/O thread pool).
87
+ ///
88
+ /// If this call fails then the scheduler will enter an aborted state.
89
+ virtual Result<Future<>> operator()() = 0;
90
+ /// The cost of the task
91
+ ///
92
+ /// A ThrottledAsyncTaskScheduler can be used to limit the number of concurrent tasks.
93
+ /// A custom cost may be used, for example, if you would like to limit the number of
94
+ /// tasks based on the total expected RAM usage of the tasks (this is done in the
95
+ /// scanner)
96
+ virtual int cost() const { return 1; }
97
+ /// The name of the task
98
+ ///
99
+ /// This is used for debugging and traceability. The returned view must remain
100
+ /// valid for the lifetime of the task.
101
+ virtual std::string_view name() const = 0;
102
+
103
+ /// a span tied to the lifetime of the task, for internal use only
104
+ tracing::Span span;
105
+ };
106
+
107
+ /// Add a task to the scheduler
108
+ ///
109
+ /// If the scheduler is in an aborted state this call will return false and the task
110
+ /// will never be run. This is harmless and does not need to be guarded against.
111
+ ///
112
+ /// The return value for this call can usually be ignored. There is little harm in
113
+ /// attempting to add tasks to an aborted scheduler. It is only included for callers
114
+ /// that want to avoid future task generation to save effort.
115
+ ///
116
+ /// \param task the task to submit
117
+ ///
118
+ /// A task's name must remain valid for the duration of the task. It is used for
119
+ /// debugging (e.g. when debugging a deadlock to see which tasks still remain) and for
120
+ /// traceability (the name will be used for spans assigned to the task)
121
+ ///
122
+ /// \return true if the task was submitted or queued, false if the task was ignored
123
+ virtual bool AddTask(std::unique_ptr<Task> task) = 0;
124
+
125
+ /// Adds an async generator to the scheduler
126
+ ///
127
+ /// The async generator will be visited, one item at a time. Submitting a task
128
+ /// will consist of polling the generator for the next future. The generator's future
129
+ /// will then represent the task itself.
130
+ ///
131
+ /// This visits the task serially without readahead. If readahead or parallelism
132
+ /// is desired then it should be added in the generator itself.
133
+ ///
134
+ /// The generator itself will be kept alive until all tasks have been completed.
135
+ /// However, if the scheduler is aborted, the generator will be destroyed as soon as the
136
+ /// next item would be requested.
137
+ ///
138
+ /// \param generator the generator to submit to the scheduler
139
+ /// \param visitor a function which visits each generator future as it completes
140
+ /// \param name a name which will be used for each submitted task
141
+ template <typename T>
142
+ bool AddAsyncGenerator(std::function<Future<T>()> generator,
143
+ std::function<Status(const T&)> visitor, std::string_view name);
144
+
145
+ template <typename Callable>
146
+ struct SimpleTask : public Task {
147
+ SimpleTask(Callable callable, std::string_view name)
148
+ : callable(std::move(callable)), name_(name) {}
149
+ SimpleTask(Callable callable, std::string name)
150
+ : callable(std::move(callable)), owned_name_(std::move(name)) {
151
+ name_ = *owned_name_;
152
+ }
153
+ Result<Future<>> operator()() override { return callable(); }
154
+ std::string_view name() const override { return name_; }
155
+ Callable callable;
156
+ std::string_view name_;
157
+ std::optional<std::string> owned_name_;
158
+ };
159
+
160
+ /// Add a task with cost 1 to the scheduler
161
+ ///
162
+ /// \param callable a "submit" function that should return a future
163
+ /// \param name a name for the task
164
+ ///
165
+ /// `name` must remain valid until the task has been submitted AND the returned
166
+ /// future completes. It is used for debugging and tracing.
167
+ ///
168
+ /// \see AddTask for more details
169
+ template <typename Callable>
170
+ bool AddSimpleTask(Callable callable, std::string_view name) {
171
+ return AddTask(std::make_unique<SimpleTask<Callable>>(std::move(callable), name));
172
+ }
173
+
174
+ /// Add a task with cost 1 to the scheduler
175
+ ///
176
+ /// This is an overload of \see AddSimpleTask that keeps `name` alive
177
+ /// in the task.
178
+ template <typename Callable>
179
+ bool AddSimpleTask(Callable callable, std::string name) {
180
+ return AddTask(
181
+ std::make_unique<SimpleTask<Callable>>(std::move(callable), std::move(name)));
182
+ }
183
+
184
+ /// Construct a scheduler
185
+ ///
186
+ /// \param initial_task The initial task which is responsible for adding
187
+ /// the first subtasks to the scheduler.
188
+ /// \param abort_callback A callback that will be triggered immediately after a task
189
+ /// fails while other tasks may still be running. Nothing needs to be done here,
190
+ /// when a task fails the scheduler will stop accepting new tasks and eventually
191
+ /// return the error. However, this callback can be used to more quickly end
192
+ /// long running tasks that have already been submitted. Defaults to doing
193
+ /// nothing.
194
+ /// \param stop_token An optional stop token that will allow cancellation of the
195
+ /// scheduler. This will be checked before each task is submitted and, in the
196
+ /// event of a cancellation, the scheduler will enter an aborted state. This is
197
+ /// a graceful cancellation and submitted tasks will still complete.
198
+ /// \return A future that will be completed when the initial task and all subtasks have
199
+ /// finished.
200
+ static Future<> Make(
201
+ FnOnce<Status(AsyncTaskScheduler*)> initial_task,
202
+ FnOnce<void(const Status&)> abort_callback = [](const Status&) {},
203
+ StopToken stop_token = StopToken::Unstoppable());
204
+
205
+ /// A span tracking execution of the scheduler's tasks, for internal use only
206
+ virtual const tracing::Span& span() const = 0;
207
+ };
208
+
209
+ class ARROW_EXPORT ThrottledAsyncTaskScheduler : public AsyncTaskScheduler {
210
+ public:
211
+ /// An interface for a task queue
212
+ ///
213
+ /// A queue's methods will not be called concurrently
214
+ class Queue {
215
+ public:
216
+ virtual ~Queue() = default;
217
+ /// Push a task to the queue
218
+ ///
219
+ /// \param task the task to enqueue
220
+ virtual void Push(std::unique_ptr<Task> task) = 0;
221
+ /// Pop the next task from the queue
222
+ virtual std::unique_ptr<Task> Pop() = 0;
223
+ /// Peek the next task in the queue
224
+ virtual const Task& Peek() = 0;
225
+ /// Check if the queue is empty
226
+ virtual bool Empty() = 0;
227
+ /// Purge the queue of all items
228
+ virtual void Purge() = 0;
229
+ virtual std::size_t Size() const = 0;
230
+ };
231
+
232
+ class Throttle {
233
+ public:
234
+ virtual ~Throttle() = default;
235
+ /// Acquire amt permits
236
+ ///
237
+ /// If nullopt is returned then the permits were immediately
238
+ /// acquired and the caller can proceed. If a future is returned then the caller
239
+ /// should wait for the future to complete first. When the returned future completes
240
+ /// the permits have NOT been acquired and the caller must call Acquire again
241
+ ///
242
+ /// \param amt the number of permits to acquire
243
+ virtual std::optional<Future<>> TryAcquire(int amt) = 0;
244
+ /// Release amt permits
245
+ ///
246
+ /// This will possibly complete waiting futures and should probably not be
247
+ /// called while holding locks.
248
+ ///
249
+ /// \param amt the number of permits to release
250
+ virtual void Release(int amt) = 0;
251
+
252
+ /// The size of the largest task that can run
253
+ ///
254
+ /// Incoming tasks will have their cost latched to this value to ensure
255
+ /// they can still run (although they will be the only thing allowed to
256
+ /// run at that time).
257
+ virtual int Capacity() = 0;
258
+
259
+ /// Pause the throttle
260
+ ///
261
+ /// Any tasks that have been submitted already will continue. However, no new tasks
262
+ /// will be run until the throttle is resumed.
263
+ virtual void Pause() = 0;
264
+ /// Resume the throttle
265
+ ///
266
+ /// Allows task to be submitted again. If there is a max_concurrent_cost limit then
267
+ /// it will still apply.
268
+ virtual void Resume() = 0;
269
+ };
270
+
271
+ /// Pause the throttle
272
+ ///
273
+ /// Any tasks that have been submitted already will continue. However, no new tasks
274
+ /// will be run until the throttle is resumed.
275
+ virtual void Pause() = 0;
276
+ /// Resume the throttle
277
+ ///
278
+ /// Allows task to be submitted again. If there is a max_concurrent_cost limit then
279
+ /// it will still apply.
280
+ virtual void Resume() = 0;
281
+ /// Return the number of tasks queued but not yet submitted
282
+ virtual std::size_t QueueSize() = 0;
283
+
284
+ /// Create a throttled view of a scheduler
285
+ ///
286
+ /// Tasks added via this view will be subjected to the throttle and, if the tasks cannot
287
+ /// run immediately, will be placed into a queue.
288
+ ///
289
+ /// Although a shared_ptr is returned it should generally be assumed that the caller
290
+ /// is being given exclusive ownership. The shared_ptr is used to share the view with
291
+ /// queued and submitted tasks and the lifetime of those is unpredictable. It is
292
+ /// important the caller keep the returned pointer alive for as long as they plan to add
293
+ /// tasks to the view.
294
+ ///
295
+ /// \param scheduler a scheduler to submit tasks to after throttling
296
+ ///
297
+ /// This can be the root scheduler, another throttled scheduler, or a task group. These
298
+ /// are all composable.
299
+ ///
300
+ /// \param max_concurrent_cost the maximum amount of cost allowed to run at any one time
301
+ ///
302
+ /// If a task is added that has a cost greater than max_concurrent_cost then its cost
303
+ /// will be reduced to max_concurrent_cost so that it is still possible for the task to
304
+ /// run.
305
+ ///
306
+ /// \param queue the queue to use when tasks cannot be submitted
307
+ ///
308
+ /// By default a FIFO queue will be used. However, a custom queue can be provided if
309
+ /// some tasks have higher priority than other tasks.
310
+ static std::shared_ptr<ThrottledAsyncTaskScheduler> Make(
311
+ AsyncTaskScheduler* scheduler, int max_concurrent_cost,
312
+ std::unique_ptr<Queue> queue = NULLPTR);
313
+
314
+ /// @brief Create a ThrottledAsyncTaskScheduler using a custom throttle
315
+ ///
316
+ /// \see Make
317
+ static std::shared_ptr<ThrottledAsyncTaskScheduler> MakeWithCustomThrottle(
318
+ AsyncTaskScheduler* scheduler, std::unique_ptr<Throttle> throttle,
319
+ std::unique_ptr<Queue> queue = NULLPTR);
320
+ };
321
+
322
+ /// A utility to keep track of a collection of tasks
323
+ ///
324
+ /// Often it is useful to keep track of some state that only needs to stay alive
325
+ /// for some small collection of tasks, or to perform some kind of final cleanup
326
+ /// when a collection of tasks is finished.
327
+ ///
328
+ /// For example, when scanning, we need to keep the file reader alive while all scan
329
+ /// tasks run for a given file, and then we can gracefully close it when we finish the
330
+ /// file.
331
+ class ARROW_EXPORT AsyncTaskGroup : public AsyncTaskScheduler {
332
+ public:
333
+ /// Destructor for the task group
334
+ ///
335
+ /// The destructor might trigger the finish callback. If the finish callback fails
336
+ /// then the error will be reported as a task on the scheduler.
337
+ ///
338
+ /// Failure to destroy the async task group will not prevent the scheduler from
339
+ /// finishing. If the scheduler finishes before the async task group is done then
340
+ /// the finish callback will be run immediately when the async task group finishes.
341
+ ///
342
+ /// If the scheduler has aborted then the finish callback will not run.
343
+ ~AsyncTaskGroup() = default;
344
+ /// Create an async task group
345
+ ///
346
+ /// The finish callback will not run until the task group is destroyed and all
347
+ /// tasks are finished so you will generally want to reset / destroy the returned
348
+ /// unique_ptr at some point.
349
+ ///
350
+ /// \param scheduler The underlying scheduler to submit tasks to
351
+ /// \param finish_callback A callback that will be run only after the task group has
352
+ /// been destroyed and all tasks added by the group have
353
+ /// finished.
354
+ ///
355
+ /// Note: in error scenarios the finish callback may not run. However, it will still,
356
+ /// of course, be destroyed.
357
+ static std::unique_ptr<AsyncTaskGroup> Make(AsyncTaskScheduler* scheduler,
358
+ FnOnce<Status()> finish_callback);
359
+ };
360
+
361
+ /// Create a task group that is also throttled
362
+ ///
363
+ /// This is a utility factory that creates a throttled view of a scheduler and then
364
+ /// wraps that throttled view with a task group that destroys the throttle when finished.
365
+ ///
366
+ /// \see ThrottledAsyncTaskScheduler
367
+ /// \see AsyncTaskGroup
368
+ /// \param target the underlying scheduler to submit tasks to
369
+ /// \param max_concurrent_cost the maximum amount of cost allowed to run at any one time
370
+ /// \param queue the queue to use when tasks cannot be submitted
371
+ /// \param finish_callback A callback that will be run only after the task group has
372
+ /// been destroyed and all tasks added by the group have finished
373
+ ARROW_EXPORT std::unique_ptr<ThrottledAsyncTaskScheduler> MakeThrottledAsyncTaskGroup(
374
+ AsyncTaskScheduler* target, int max_concurrent_cost,
375
+ std::unique_ptr<ThrottledAsyncTaskScheduler::Queue> queue,
376
+ FnOnce<Status()> finish_callback);
377
+
378
+ // Defined down here to avoid circular dependency between AsyncTaskScheduler and
379
+ // AsyncTaskGroup
380
+ template <typename T>
381
+ bool AsyncTaskScheduler::AddAsyncGenerator(std::function<Future<T>()> generator,
382
+ std::function<Status(const T&)> visitor,
383
+ std::string_view name) {
384
+ struct State {
385
+ State(std::function<Future<T>()> generator, std::function<Status(const T&)> visitor,
386
+ std::unique_ptr<AsyncTaskGroup> task_group, std::string_view name)
387
+ : generator(std::move(generator)),
388
+ visitor(std::move(visitor)),
389
+ task_group(std::move(task_group)),
390
+ name(name) {}
391
+ std::function<Future<T>()> generator;
392
+ std::function<Status(const T&)> visitor;
393
+ std::unique_ptr<AsyncTaskGroup> task_group;
394
+ std::string_view name;
395
+ };
396
+ struct SubmitTask : public Task {
397
+ explicit SubmitTask(std::unique_ptr<State> state_holder)
398
+ : state_holder(std::move(state_holder)) {}
399
+
400
+ struct SubmitTaskCallback {
401
+ SubmitTaskCallback(std::unique_ptr<State> state_holder, Future<> task_completion)
402
+ : state_holder(std::move(state_holder)),
403
+ task_completion(std::move(task_completion)) {}
404
+ void operator()(const Result<T>& maybe_item) {
405
+ if (!maybe_item.ok()) {
406
+ task_completion.MarkFinished(maybe_item.status());
407
+ return;
408
+ }
409
+ const auto& item = *maybe_item;
410
+ if (IsIterationEnd(item)) {
411
+ task_completion.MarkFinished();
412
+ return;
413
+ }
414
+ Status visit_st = state_holder->visitor(item);
415
+ if (!visit_st.ok()) {
416
+ task_completion.MarkFinished(std::move(visit_st));
417
+ return;
418
+ }
419
+ state_holder->task_group->AddTask(
420
+ std::make_unique<SubmitTask>(std::move(state_holder)));
421
+ task_completion.MarkFinished();
422
+ }
423
+ std::unique_ptr<State> state_holder;
424
+ Future<> task_completion;
425
+ };
426
+
427
+ Result<Future<>> operator()() {
428
+ Future<> task = Future<>::Make();
429
+ // Consume as many items as we can (those that are already finished)
430
+ // synchronously to avoid recursion / stack overflow.
431
+ while (true) {
432
+ Future<T> next = state_holder->generator();
433
+ if (next.TryAddCallback(
434
+ [&] { return SubmitTaskCallback(std::move(state_holder), task); })) {
435
+ return task;
436
+ }
437
+ ARROW_ASSIGN_OR_RAISE(T item, next.result());
438
+ if (IsIterationEnd(item)) {
439
+ task.MarkFinished();
440
+ return task;
441
+ }
442
+ ARROW_RETURN_NOT_OK(state_holder->visitor(item));
443
+ }
444
+ }
445
+
446
+ std::string_view name() const { return state_holder->name; }
447
+
448
+ std::unique_ptr<State> state_holder;
449
+ };
450
+ std::unique_ptr<AsyncTaskGroup> task_group =
451
+ AsyncTaskGroup::Make(this, [] { return Status::OK(); });
452
+ AsyncTaskGroup* task_group_view = task_group.get();
453
+ std::unique_ptr<State> state_holder = std::make_unique<State>(
454
+ std::move(generator), std::move(visitor), std::move(task_group), name);
455
+ task_group_view->AddTask(std::make_unique<SubmitTask>(std::move(state_holder)));
456
+ return true;
457
+ }
458
+
459
+ } // namespace util
460
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/basic_decimal.h ADDED
@@ -0,0 +1,492 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <array>
21
+ #include <cstdint>
22
+ #include <cstring>
23
+ #include <limits>
24
+ #include <string>
25
+ #include <type_traits>
26
+
27
+ #include "arrow/util/endian.h"
28
+ #include "arrow/util/macros.h"
29
+ #include "arrow/util/type_traits.h"
30
+ #include "arrow/util/visibility.h"
31
+
32
+ namespace arrow {
33
+
34
+ enum class DecimalStatus {
35
+ kSuccess,
36
+ kDivideByZero,
37
+ kOverflow,
38
+ kRescaleDataLoss,
39
+ };
40
+
41
+ template <typename Derived, int BIT_WIDTH, int NWORDS = BIT_WIDTH / 64>
42
+ class ARROW_EXPORT GenericBasicDecimal {
43
+ protected:
44
+ struct LittleEndianArrayTag {};
45
+
46
+ #if ARROW_LITTLE_ENDIAN
47
+ static constexpr int kHighWordIndex = NWORDS - 1;
48
+ static constexpr int kLowWordIndex = 0;
49
+ #else
50
+ static constexpr int kHighWordIndex = 0;
51
+ static constexpr int kLowWordIndex = NWORDS - 1;
52
+ #endif
53
+
54
+ public:
55
+ static constexpr int kBitWidth = BIT_WIDTH;
56
+ static constexpr int kByteWidth = kBitWidth / 8;
57
+ static constexpr int kNumWords = NWORDS;
58
+
59
+ // A constructor tag to introduce a little-endian encoded array
60
+ static constexpr LittleEndianArrayTag LittleEndianArray{};
61
+
62
+ using WordArray = std::array<uint64_t, NWORDS>;
63
+
64
+ /// \brief Empty constructor creates a decimal with a value of 0.
65
+ constexpr GenericBasicDecimal() noexcept : array_({0}) {}
66
+
67
+ /// \brief Create a decimal from the two's complement representation.
68
+ ///
69
+ /// Input array is assumed to be in native endianness.
70
+ explicit constexpr GenericBasicDecimal(const WordArray& array) noexcept
71
+ : array_(array) {}
72
+
73
+ /// \brief Create a decimal from the two's complement representation.
74
+ ///
75
+ /// Input array is assumed to be in little endianness, with native endian elements.
76
+ GenericBasicDecimal(LittleEndianArrayTag, const WordArray& array) noexcept
77
+ : GenericBasicDecimal(bit_util::little_endian::ToNative(array)) {}
78
+
79
+ /// \brief Create a decimal from any integer not wider than 64 bits.
80
+ template <typename T,
81
+ typename = typename std::enable_if<
82
+ std::is_integral<T>::value && (sizeof(T) <= sizeof(uint64_t)), T>::type>
83
+ constexpr GenericBasicDecimal(T value) noexcept // NOLINT(runtime/explicit)
84
+ : array_(WordsFromLowBits(value)) {}
85
+
86
+ /// \brief Create a decimal from an array of bytes.
87
+ ///
88
+ /// Bytes are assumed to be in native-endian byte order.
89
+ explicit GenericBasicDecimal(const uint8_t* bytes) {
90
+ memcpy(array_.data(), bytes, sizeof(array_));
91
+ }
92
+
93
+ /// \brief Get the bits of the two's complement representation of the number.
94
+ ///
95
+ /// The elements are in native endian order. The bits within each uint64_t element
96
+ /// are in native endian order. For example, on a little endian machine,
97
+ /// BasicDecimal128(123).native_endian_array() = {123, 0};
98
+ /// but on a big endian machine,
99
+ /// BasicDecimal128(123).native_endian_array() = {0, 123};
100
+ constexpr const WordArray& native_endian_array() const { return array_; }
101
+
102
+ /// \brief Get the bits of the two's complement representation of the number.
103
+ ///
104
+ /// The elements are in little endian order. However, the bits within each
105
+ /// uint64_t element are in native endian order.
106
+ /// For example, BasicDecimal128(123).little_endian_array() = {123, 0};
107
+ WordArray little_endian_array() const {
108
+ return bit_util::little_endian::FromNative(array_);
109
+ }
110
+
111
+ const uint8_t* native_endian_bytes() const {
112
+ return reinterpret_cast<const uint8_t*>(array_.data());
113
+ }
114
+
115
+ uint8_t* mutable_native_endian_bytes() {
116
+ return reinterpret_cast<uint8_t*>(array_.data());
117
+ }
118
+
119
+ /// \brief Return the raw bytes of the value in native-endian byte order.
120
+ std::array<uint8_t, kByteWidth> ToBytes() const {
121
+ std::array<uint8_t, kByteWidth> out{{0}};
122
+ memcpy(out.data(), array_.data(), kByteWidth);
123
+ return out;
124
+ }
125
+
126
+ /// \brief Copy the raw bytes of the value in native-endian byte order.
127
+ void ToBytes(uint8_t* out) const { memcpy(out, array_.data(), kByteWidth); }
128
+
129
+ /// Return 1 if positive or zero, -1 if strictly negative.
130
+ int64_t Sign() const {
131
+ return 1 | (static_cast<int64_t>(array_[kHighWordIndex]) >> 63);
132
+ }
133
+
134
+ bool IsNegative() const { return static_cast<int64_t>(array_[kHighWordIndex]) < 0; }
135
+
136
+ explicit operator bool() const { return array_ != WordArray{}; }
137
+
138
+ friend bool operator==(const GenericBasicDecimal& left,
139
+ const GenericBasicDecimal& right) {
140
+ return left.array_ == right.array_;
141
+ }
142
+
143
+ friend bool operator!=(const GenericBasicDecimal& left,
144
+ const GenericBasicDecimal& right) {
145
+ return left.array_ != right.array_;
146
+ }
147
+
148
+ protected:
149
+ WordArray array_;
150
+
151
+ template <typename T>
152
+ static constexpr uint64_t SignExtend(T low_bits) noexcept {
153
+ return low_bits >= T{} ? uint64_t{0} : ~uint64_t{0};
154
+ }
155
+
156
+ template <typename T>
157
+ static constexpr WordArray WordsFromLowBits(T low_bits) {
158
+ WordArray words{};
159
+ if (low_bits < T{}) {
160
+ for (auto& word : words) {
161
+ word = ~uint64_t{0};
162
+ }
163
+ }
164
+ words[kLowWordIndex] = static_cast<uint64_t>(low_bits);
165
+ return words;
166
+ }
167
+ };
168
+
169
+ /// Represents a signed 128-bit integer in two's complement.
170
+ ///
171
+ /// This class is also compiled into LLVM IR - so, it should not have cpp references like
172
+ /// streams and boost.
173
+ class ARROW_EXPORT BasicDecimal128 : public GenericBasicDecimal<BasicDecimal128, 128> {
174
+ public:
175
+ static constexpr int kMaxPrecision = 38;
176
+ static constexpr int kMaxScale = 38;
177
+
178
+ using GenericBasicDecimal::GenericBasicDecimal;
179
+
180
+ constexpr BasicDecimal128() noexcept : GenericBasicDecimal() {}
181
+
182
+ /// \brief Create a BasicDecimal128 from the two's complement representation.
183
+ #if ARROW_LITTLE_ENDIAN
184
+ constexpr BasicDecimal128(int64_t high, uint64_t low) noexcept
185
+ : BasicDecimal128(WordArray{low, static_cast<uint64_t>(high)}) {}
186
+ #else
187
+ constexpr BasicDecimal128(int64_t high, uint64_t low) noexcept
188
+ : BasicDecimal128(WordArray{static_cast<uint64_t>(high), low}) {}
189
+ #endif
190
+
191
+ /// \brief Negate the current value (in-place)
192
+ BasicDecimal128& Negate();
193
+
194
+ /// \brief Absolute value (in-place)
195
+ BasicDecimal128& Abs();
196
+
197
+ /// \brief Absolute value
198
+ static BasicDecimal128 Abs(const BasicDecimal128& left);
199
+
200
+ /// \brief Add a number to this one. The result is truncated to 128 bits.
201
+ BasicDecimal128& operator+=(const BasicDecimal128& right);
202
+
203
+ /// \brief Subtract a number from this one. The result is truncated to 128 bits.
204
+ BasicDecimal128& operator-=(const BasicDecimal128& right);
205
+
206
+ /// \brief Multiply this number by another number. The result is truncated to 128 bits.
207
+ BasicDecimal128& operator*=(const BasicDecimal128& right);
208
+
209
+ /// Divide this number by right and return the result.
210
+ ///
211
+ /// This operation is not destructive.
212
+ /// The answer rounds to zero. Signs work like:
213
+ /// 21 / 5 -> 4, 1
214
+ /// -21 / 5 -> -4, -1
215
+ /// 21 / -5 -> -4, 1
216
+ /// -21 / -5 -> 4, -1
217
+ /// \param[in] divisor the number to divide by
218
+ /// \param[out] result the quotient
219
+ /// \param[out] remainder the remainder after the division
220
+ DecimalStatus Divide(const BasicDecimal128& divisor, BasicDecimal128* result,
221
+ BasicDecimal128* remainder) const;
222
+
223
+ /// \brief In-place division.
224
+ BasicDecimal128& operator/=(const BasicDecimal128& right);
225
+
226
+ /// \brief Bitwise "or" between two BasicDecimal128.
227
+ BasicDecimal128& operator|=(const BasicDecimal128& right);
228
+
229
+ /// \brief Bitwise "and" between two BasicDecimal128.
230
+ BasicDecimal128& operator&=(const BasicDecimal128& right);
231
+
232
+ /// \brief Shift left by the given number of bits.
233
+ BasicDecimal128& operator<<=(uint32_t bits);
234
+
235
+ BasicDecimal128 operator<<(uint32_t bits) const {
236
+ auto res = *this;
237
+ res <<= bits;
238
+ return res;
239
+ }
240
+
241
+ /// \brief Shift right by the given number of bits.
242
+ ///
243
+ /// Negative values will sign-extend.
244
+ BasicDecimal128& operator>>=(uint32_t bits);
245
+
246
+ BasicDecimal128 operator>>(uint32_t bits) const {
247
+ auto res = *this;
248
+ res >>= bits;
249
+ return res;
250
+ }
251
+
252
+ /// \brief Get the high bits of the two's complement representation of the number.
253
+ constexpr int64_t high_bits() const {
254
+ #if ARROW_LITTLE_ENDIAN
255
+ return static_cast<int64_t>(array_[1]);
256
+ #else
257
+ return static_cast<int64_t>(array_[0]);
258
+ #endif
259
+ }
260
+
261
+ /// \brief Get the low bits of the two's complement representation of the number.
262
+ constexpr uint64_t low_bits() const {
263
+ #if ARROW_LITTLE_ENDIAN
264
+ return array_[0];
265
+ #else
266
+ return array_[1];
267
+ #endif
268
+ }
269
+
270
+ /// \brief separate the integer and fractional parts for the given scale.
271
+ void GetWholeAndFraction(int32_t scale, BasicDecimal128* whole,
272
+ BasicDecimal128* fraction) const;
273
+
274
+ /// \brief Scale multiplier for given scale value.
275
+ static const BasicDecimal128& GetScaleMultiplier(int32_t scale);
276
+ /// \brief Half-scale multiplier for given scale value.
277
+ static const BasicDecimal128& GetHalfScaleMultiplier(int32_t scale);
278
+
279
+ /// \brief Convert BasicDecimal128 from one scale to another
280
+ DecimalStatus Rescale(int32_t original_scale, int32_t new_scale,
281
+ BasicDecimal128* out) const;
282
+
283
+ /// \brief Scale up.
284
+ BasicDecimal128 IncreaseScaleBy(int32_t increase_by) const;
285
+
286
+ /// \brief Scale down.
287
+ /// - If 'round' is true, the right-most digits are dropped and the result value is
288
+ /// rounded up (+1 for +ve, -1 for -ve) based on the value of the dropped digits
289
+ /// (>= 10^reduce_by / 2).
290
+ /// - If 'round' is false, the right-most digits are simply dropped.
291
+ BasicDecimal128 ReduceScaleBy(int32_t reduce_by, bool round = true) const;
292
+
293
+ /// \brief Whether this number fits in the given precision
294
+ ///
295
+ /// Return true if the number of significant digits is less or equal to `precision`.
296
+ bool FitsInPrecision(int32_t precision) const;
297
+
298
+ /// \brief count the number of leading binary zeroes.
299
+ int32_t CountLeadingBinaryZeros() const;
300
+
301
+ /// \brief Get the maximum valid unscaled decimal value.
302
+ static const BasicDecimal128& GetMaxValue();
303
+
304
+ /// \brief Get the maximum valid unscaled decimal value for the given precision.
305
+ static BasicDecimal128 GetMaxValue(int32_t precision);
306
+
307
+ /// \brief Get the maximum decimal value (is not a valid value).
308
+ static constexpr BasicDecimal128 GetMaxSentinel() {
309
+ return BasicDecimal128(/*high=*/std::numeric_limits<int64_t>::max(),
310
+ /*low=*/std::numeric_limits<uint64_t>::max());
311
+ }
312
+ /// \brief Get the minimum decimal value (is not a valid value).
313
+ static constexpr BasicDecimal128 GetMinSentinel() {
314
+ return BasicDecimal128(/*high=*/std::numeric_limits<int64_t>::min(),
315
+ /*low=*/std::numeric_limits<uint64_t>::min());
316
+ }
317
+ };
318
+
319
+ ARROW_EXPORT bool operator<(const BasicDecimal128& left, const BasicDecimal128& right);
320
+ ARROW_EXPORT bool operator<=(const BasicDecimal128& left, const BasicDecimal128& right);
321
+ ARROW_EXPORT bool operator>(const BasicDecimal128& left, const BasicDecimal128& right);
322
+ ARROW_EXPORT bool operator>=(const BasicDecimal128& left, const BasicDecimal128& right);
323
+
324
+ ARROW_EXPORT BasicDecimal128 operator-(const BasicDecimal128& operand);
325
+ ARROW_EXPORT BasicDecimal128 operator~(const BasicDecimal128& operand);
326
+ ARROW_EXPORT BasicDecimal128 operator+(const BasicDecimal128& left,
327
+ const BasicDecimal128& right);
328
+ ARROW_EXPORT BasicDecimal128 operator-(const BasicDecimal128& left,
329
+ const BasicDecimal128& right);
330
+ ARROW_EXPORT BasicDecimal128 operator*(const BasicDecimal128& left,
331
+ const BasicDecimal128& right);
332
+ ARROW_EXPORT BasicDecimal128 operator/(const BasicDecimal128& left,
333
+ const BasicDecimal128& right);
334
+ ARROW_EXPORT BasicDecimal128 operator%(const BasicDecimal128& left,
335
+ const BasicDecimal128& right);
336
+
337
+ class ARROW_EXPORT BasicDecimal256 : public GenericBasicDecimal<BasicDecimal256, 256> {
338
+ public:
339
+ using GenericBasicDecimal::GenericBasicDecimal;
340
+
341
+ static constexpr int kMaxPrecision = 76;
342
+ static constexpr int kMaxScale = 76;
343
+
344
+ constexpr BasicDecimal256() noexcept : GenericBasicDecimal() {}
345
+
346
+ explicit BasicDecimal256(const BasicDecimal128& value) noexcept
347
+ : BasicDecimal256(bit_util::little_endian::ToNative<uint64_t, 4>(
348
+ {value.low_bits(), static_cast<uint64_t>(value.high_bits()),
349
+ SignExtend(value.high_bits()), SignExtend(value.high_bits())})) {}
350
+
351
+ /// \brief Negate the current value (in-place)
352
+ BasicDecimal256& Negate();
353
+
354
+ /// \brief Absolute value (in-place)
355
+ BasicDecimal256& Abs();
356
+
357
+ /// \brief Absolute value
358
+ static BasicDecimal256 Abs(const BasicDecimal256& left);
359
+
360
+ /// \brief Add a number to this one. The result is truncated to 256 bits.
361
+ BasicDecimal256& operator+=(const BasicDecimal256& right);
362
+
363
+ /// \brief Subtract a number from this one. The result is truncated to 256 bits.
364
+ BasicDecimal256& operator-=(const BasicDecimal256& right);
365
+
366
+ /// \brief Get the lowest bits of the two's complement representation of the number.
367
+ uint64_t low_bits() const { return bit_util::little_endian::Make(array_)[0]; }
368
+
369
+ /// \brief separate the integer and fractional parts for the given scale.
370
+ void GetWholeAndFraction(int32_t scale, BasicDecimal256* whole,
371
+ BasicDecimal256* fraction) const;
372
+
373
+ /// \brief Scale multiplier for given scale value.
374
+ static const BasicDecimal256& GetScaleMultiplier(int32_t scale);
375
+ /// \brief Half-scale multiplier for given scale value.
376
+ static const BasicDecimal256& GetHalfScaleMultiplier(int32_t scale);
377
+
378
+ /// \brief Convert BasicDecimal256 from one scale to another
379
+ DecimalStatus Rescale(int32_t original_scale, int32_t new_scale,
380
+ BasicDecimal256* out) const;
381
+
382
+ /// \brief Scale up.
383
+ BasicDecimal256 IncreaseScaleBy(int32_t increase_by) const;
384
+
385
+ /// \brief Scale down.
386
+ /// - If 'round' is true, the right-most digits are dropped and the result value is
387
+ /// rounded up (+1 for positive, -1 for negative) based on the value of the
388
+ /// dropped digits (>= 10^reduce_by / 2).
389
+ /// - If 'round' is false, the right-most digits are simply dropped.
390
+ BasicDecimal256 ReduceScaleBy(int32_t reduce_by, bool round = true) const;
391
+
392
+ /// \brief Whether this number fits in the given precision
393
+ ///
394
+ /// Return true if the number of significant digits is less or equal to `precision`.
395
+ bool FitsInPrecision(int32_t precision) const;
396
+
397
+ /// \brief Multiply this number by another number. The result is truncated to 256 bits.
398
+ BasicDecimal256& operator*=(const BasicDecimal256& right);
399
+
400
+ /// Divide this number by right and return the result.
401
+ ///
402
+ /// This operation is not destructive.
403
+ /// The answer rounds to zero. Signs work like:
404
+ /// 21 / 5 -> 4, 1
405
+ /// -21 / 5 -> -4, -1
406
+ /// 21 / -5 -> -4, 1
407
+ /// -21 / -5 -> 4, -1
408
+ /// \param[in] divisor the number to divide by
409
+ /// \param[out] result the quotient
410
+ /// \param[out] remainder the remainder after the division
411
+ DecimalStatus Divide(const BasicDecimal256& divisor, BasicDecimal256* result,
412
+ BasicDecimal256* remainder) const;
413
+
414
+ /// \brief Shift left by the given number of bits.
415
+ BasicDecimal256& operator<<=(uint32_t bits);
416
+
417
+ BasicDecimal256 operator<<(uint32_t bits) const {
418
+ auto res = *this;
419
+ res <<= bits;
420
+ return res;
421
+ }
422
+
423
+ /// \brief Shift right by the given number of bits.
424
+ ///
425
+ /// Negative values will sign-extend.
426
+ BasicDecimal256& operator>>=(uint32_t bits);
427
+
428
+ BasicDecimal256 operator>>(uint32_t bits) const {
429
+ auto res = *this;
430
+ res >>= bits;
431
+ return res;
432
+ }
433
+
434
+ /// \brief In-place division.
435
+ BasicDecimal256& operator/=(const BasicDecimal256& right);
436
+
437
+ /// \brief Get the maximum valid unscaled decimal value for the given precision.
438
+ static BasicDecimal256 GetMaxValue(int32_t precision);
439
+
440
+ /// \brief Get the maximum decimal value (is not a valid value).
441
+ static constexpr BasicDecimal256 GetMaxSentinel() {
442
+ #if ARROW_LITTLE_ENDIAN
443
+ return BasicDecimal256({std::numeric_limits<uint64_t>::max(),
444
+ std::numeric_limits<uint64_t>::max(),
445
+ std::numeric_limits<uint64_t>::max(),
446
+ static_cast<uint64_t>(std::numeric_limits<int64_t>::max())});
447
+ #else
448
+ return BasicDecimal256({static_cast<uint64_t>(std::numeric_limits<int64_t>::max()),
449
+ std::numeric_limits<uint64_t>::max(),
450
+ std::numeric_limits<uint64_t>::max(),
451
+ std::numeric_limits<uint64_t>::max()});
452
+ #endif
453
+ }
454
+ /// \brief Get the minimum decimal value (is not a valid value).
455
+ static constexpr BasicDecimal256 GetMinSentinel() {
456
+ #if ARROW_LITTLE_ENDIAN
457
+ return BasicDecimal256(
458
+ {0, 0, 0, static_cast<uint64_t>(std::numeric_limits<int64_t>::min())});
459
+ #else
460
+ return BasicDecimal256(
461
+ {static_cast<uint64_t>(std::numeric_limits<int64_t>::min()), 0, 0, 0});
462
+ #endif
463
+ }
464
+ };
465
+
466
+ ARROW_EXPORT bool operator<(const BasicDecimal256& left, const BasicDecimal256& right);
467
+
468
+ ARROW_EXPORT inline bool operator<=(const BasicDecimal256& left,
469
+ const BasicDecimal256& right) {
470
+ return !operator<(right, left);
471
+ }
472
+
473
+ ARROW_EXPORT inline bool operator>(const BasicDecimal256& left,
474
+ const BasicDecimal256& right) {
475
+ return operator<(right, left);
476
+ }
477
+
478
+ ARROW_EXPORT inline bool operator>=(const BasicDecimal256& left,
479
+ const BasicDecimal256& right) {
480
+ return !operator<(left, right);
481
+ }
482
+
483
+ ARROW_EXPORT BasicDecimal256 operator-(const BasicDecimal256& operand);
484
+ ARROW_EXPORT BasicDecimal256 operator~(const BasicDecimal256& operand);
485
+ ARROW_EXPORT BasicDecimal256 operator+(const BasicDecimal256& left,
486
+ const BasicDecimal256& right);
487
+ ARROW_EXPORT BasicDecimal256 operator*(const BasicDecimal256& left,
488
+ const BasicDecimal256& right);
489
+ ARROW_EXPORT BasicDecimal256 operator/(const BasicDecimal256& left,
490
+ const BasicDecimal256& right);
491
+
492
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/benchmark_util.h ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include <algorithm>
19
+ #include <cstdint>
20
+ #include <string>
21
+
22
+ #include "benchmark/benchmark.h"
23
+
24
+ #include "arrow/memory_pool.h"
25
+ #include "arrow/type_fwd.h"
26
+ #include "arrow/util/cpu_info.h"
27
+ #include "arrow/util/logging.h" // IWYU pragma: keep
28
+
29
+ namespace arrow {
30
+
31
+ // Benchmark changed its parameter type between releases from
32
+ // int to int64_t. As it doesn't have version macros, we need
33
+ // to apply C++ template magic.
34
+
35
+ template <typename Func>
36
+ struct BenchmarkArgsType;
37
+
38
+ // Pattern matching that extracts the vector element type of Benchmark::Args()
39
+ template <typename Values>
40
+ struct BenchmarkArgsType<benchmark::internal::Benchmark* (
41
+ benchmark::internal::Benchmark::*)(const std::vector<Values>&)> {
42
+ using type = Values;
43
+ };
44
+
45
+ using ArgsType =
46
+ typename BenchmarkArgsType<decltype(&benchmark::internal::Benchmark::Args)>::type;
47
+
48
+ using internal::CpuInfo;
49
+
50
+ static const CpuInfo* cpu_info = CpuInfo::GetInstance();
51
+
52
+ static const int64_t kL1Size = cpu_info->CacheSize(CpuInfo::CacheLevel::L1);
53
+ static const int64_t kL2Size = cpu_info->CacheSize(CpuInfo::CacheLevel::L2);
54
+ static const int64_t kL3Size = cpu_info->CacheSize(CpuInfo::CacheLevel::L3);
55
+ static const int64_t kCantFitInL3Size = kL3Size * 4;
56
+ static const std::vector<int64_t> kMemorySizes = {kL1Size, kL2Size, kL3Size,
57
+ kCantFitInL3Size};
58
+ // 0 is treated as "no nulls"
59
+ static const std::vector<ArgsType> kInverseNullProportions = {10000, 100, 10, 2, 1, 0};
60
+
61
+ struct GenericItemsArgs {
62
+ // number of items processed per iteration
63
+ const int64_t size;
64
+
65
+ // proportion of nulls in generated arrays
66
+ double null_proportion;
67
+
68
+ explicit GenericItemsArgs(benchmark::State& state)
69
+ : size(state.range(0)), state_(state) {
70
+ if (state.range(1) == 0) {
71
+ this->null_proportion = 0.0;
72
+ } else {
73
+ this->null_proportion = std::min(1., 1. / static_cast<double>(state.range(1)));
74
+ }
75
+ }
76
+
77
+ ~GenericItemsArgs() {
78
+ state_.counters["size"] = static_cast<double>(size);
79
+ state_.counters["null_percent"] = null_proportion * 100;
80
+ state_.SetItemsProcessed(state_.iterations() * size);
81
+ }
82
+
83
+ private:
84
+ benchmark::State& state_;
85
+ };
86
+
87
+ void BenchmarkSetArgsWithSizes(benchmark::internal::Benchmark* bench,
88
+ const std::vector<int64_t>& sizes = kMemorySizes) {
89
+ bench->Unit(benchmark::kMicrosecond);
90
+
91
+ for (const auto size : sizes) {
92
+ for (const auto inverse_null_proportion : kInverseNullProportions) {
93
+ bench->Args({static_cast<ArgsType>(size), inverse_null_proportion});
94
+ }
95
+ }
96
+ }
97
+
98
+ void BenchmarkSetArgs(benchmark::internal::Benchmark* bench) {
99
+ BenchmarkSetArgsWithSizes(bench, kMemorySizes);
100
+ }
101
+
102
+ void RegressionSetArgs(benchmark::internal::Benchmark* bench) {
103
+ // Regression do not need to account for cache hierarchy, thus optimize for
104
+ // the best case.
105
+ BenchmarkSetArgsWithSizes(bench, {kL1Size});
106
+ }
107
+
108
+ // RAII struct to handle some of the boilerplate in regression benchmarks
109
+ struct RegressionArgs {
110
+ // size of memory tested (per iteration) in bytes
111
+ int64_t size;
112
+
113
+ // proportion of nulls in generated arrays
114
+ double null_proportion;
115
+
116
+ // If size_is_bytes is true, then it's a number of bytes, otherwise it's the
117
+ // number of items processed (for reporting)
118
+ explicit RegressionArgs(benchmark::State& state, bool size_is_bytes = true)
119
+ : size(state.range(0)), state_(state), size_is_bytes_(size_is_bytes) {
120
+ if (state.range(1) == 0) {
121
+ this->null_proportion = 0.0;
122
+ } else {
123
+ this->null_proportion = std::min(1., 1. / static_cast<double>(state.range(1)));
124
+ }
125
+ }
126
+
127
+ ~RegressionArgs() {
128
+ state_.counters["size"] = static_cast<double>(size);
129
+ state_.counters["null_percent"] = null_proportion * 100;
130
+ if (size_is_bytes_) {
131
+ state_.SetBytesProcessed(state_.iterations() * size);
132
+ } else {
133
+ state_.SetItemsProcessed(state_.iterations() * size);
134
+ }
135
+ }
136
+
137
+ private:
138
+ benchmark::State& state_;
139
+ bool size_is_bytes_;
140
+ };
141
+
142
+ class MemoryPoolMemoryManager : public benchmark::MemoryManager {
143
+ void Start() override {
144
+ memory_pool = std::make_shared<ProxyMemoryPool>(default_memory_pool());
145
+
146
+ MemoryPool* default_pool = default_memory_pool();
147
+ global_allocations_start = default_pool->num_allocations();
148
+ }
149
+
150
+ // BENCHMARK_DONT_OPTIMIZE is used here to detect Google Benchmark
151
+ // 1.8.0. We can remove this Stop(Result*) when we require Google
152
+ // Benchmark 1.8.0 or later.
153
+ #ifndef BENCHMARK_DONT_OPTIMIZE
154
+ void Stop(Result* result) override { Stop(*result); }
155
+ #endif
156
+
157
+ void Stop(benchmark::MemoryManager::Result& result) override {
158
+ // If num_allocations is still zero, we assume that the memory pool wasn't passed down
159
+ // so we should record them.
160
+ MemoryPool* default_pool = default_memory_pool();
161
+ int64_t new_default_allocations =
162
+ default_pool->num_allocations() - global_allocations_start;
163
+
164
+ // Only record metrics if (1) there were allocations and (2) we
165
+ // recorded at least one.
166
+ if (new_default_allocations > 0 && memory_pool->num_allocations() > 0) {
167
+ if (new_default_allocations > memory_pool->num_allocations()) {
168
+ // If we missed some, let's report that.
169
+ int64_t missed_allocations =
170
+ new_default_allocations - memory_pool->num_allocations();
171
+ ARROW_LOG(WARNING) << "BenchmarkMemoryTracker recorded some allocations "
172
+ << "for a benchmark, but missed " << missed_allocations
173
+ << " allocations.\n";
174
+ }
175
+
176
+ result.max_bytes_used = memory_pool->max_memory();
177
+ result.total_allocated_bytes = memory_pool->total_bytes_allocated();
178
+ result.num_allocs = memory_pool->num_allocations();
179
+ }
180
+ }
181
+
182
+ public:
183
+ std::shared_ptr<::arrow::ProxyMemoryPool> memory_pool;
184
+
185
+ protected:
186
+ int64_t global_allocations_start;
187
+ };
188
+
189
+ /// \brief Track memory pool allocations in benchmarks.
190
+ ///
191
+ /// Instantiate as a global variable to register the hooks into Google Benchmark
192
+ /// to collect memory metrics. Before each benchmark, a new ProxyMemoryPool is
193
+ /// created. It can then be accessed with memory_pool(). Once the benchmark is
194
+ /// complete, the hook will record the maximum memory used, the total bytes
195
+ /// allocated, and the total number of allocations. If no allocations were seen,
196
+ /// (for example, if you forgot to pass down the memory pool), then these metrics
197
+ /// will not be saved.
198
+ ///
199
+ /// Since this is used as one global variable, this will not work if multiple
200
+ /// benchmarks are run concurrently or for multi-threaded benchmarks (ones
201
+ /// that use `->ThreadRange(...)`).
202
+ class BenchmarkMemoryTracker {
203
+ public:
204
+ BenchmarkMemoryTracker() : manager_() { ::benchmark::RegisterMemoryManager(&manager_); }
205
+ ::arrow::MemoryPool* memory_pool() const { return manager_.memory_pool.get(); }
206
+
207
+ protected:
208
+ ::arrow::MemoryPoolMemoryManager manager_;
209
+ };
210
+
211
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string_view>
21
+ #include <utility>
22
+
23
+ #include "arrow/type.h"
24
+ #include "arrow/util/span.h"
25
+
26
+ namespace arrow::util {
27
+
28
+ inline BinaryViewType::c_type ToInlineBinaryView(const void* data, int32_t size) {
29
+ // Small string: inlined. Bytes beyond size are zeroed
30
+ BinaryViewType::c_type out;
31
+ out.inlined = {size, {}};
32
+ memcpy(&out.inlined.data, data, size);
33
+ return out;
34
+ }
35
+
36
+ inline BinaryViewType::c_type ToInlineBinaryView(std::string_view v) {
37
+ return ToInlineBinaryView(v.data(), static_cast<int32_t>(v.size()));
38
+ }
39
+
40
+ inline BinaryViewType::c_type ToBinaryView(const void* data, int32_t size,
41
+ int32_t buffer_index, int32_t offset) {
42
+ if (size <= BinaryViewType::kInlineSize) {
43
+ return ToInlineBinaryView(data, size);
44
+ }
45
+
46
+ // Large string: store index/offset.
47
+ BinaryViewType::c_type out;
48
+ out.ref = {size, {}, buffer_index, offset};
49
+ memcpy(&out.ref.prefix, data, sizeof(out.ref.prefix));
50
+ return out;
51
+ }
52
+
53
+ inline BinaryViewType::c_type ToBinaryView(std::string_view v, int32_t buffer_index,
54
+ int32_t offset) {
55
+ return ToBinaryView(v.data(), static_cast<int32_t>(v.size()), buffer_index, offset);
56
+ }
57
+
58
+ template <typename BufferPtr>
59
+ std::string_view FromBinaryView(const BinaryViewType::c_type& v,
60
+ const BufferPtr* data_buffers) {
61
+ auto* data = v.is_inline() ? v.inlined.data.data()
62
+ : data_buffers[v.ref.buffer_index]->data() + v.ref.offset;
63
+ return {reinterpret_cast<const char*>(data), static_cast<size_t>(v.size())};
64
+ }
65
+ template <typename BufferPtr>
66
+ std::string_view FromBinaryView(BinaryViewType::c_type&&, const BufferPtr*) = delete;
67
+
68
+ template <typename BufferPtr>
69
+ bool EqualBinaryView(BinaryViewType::c_type l, BinaryViewType::c_type r,
70
+ const BufferPtr* l_buffers, const BufferPtr* r_buffers) {
71
+ int64_t l_size_and_prefix, r_size_and_prefix;
72
+ memcpy(&l_size_and_prefix, &l, sizeof(l_size_and_prefix));
73
+ memcpy(&r_size_and_prefix, &r, sizeof(r_size_and_prefix));
74
+
75
+ if (l_size_and_prefix != r_size_and_prefix) return false;
76
+
77
+ if (l.is_inline()) {
78
+ // The columnar spec mandates that the inlined part be zero-padded, so we can compare
79
+ // a word at a time regardless of the exact size.
80
+ int64_t l_inlined, r_inlined;
81
+ memcpy(&l_inlined, l.inline_data() + BinaryViewType::kPrefixSize, sizeof(l_inlined));
82
+ memcpy(&r_inlined, r.inline_data() + BinaryViewType::kPrefixSize, sizeof(r_inlined));
83
+ return l_inlined == r_inlined;
84
+ }
85
+
86
+ // Sizes are equal and this is not inline, therefore both are out
87
+ // of line and have kPrefixSize first in common.
88
+ const uint8_t* l_data = l_buffers[l.ref.buffer_index]->data() + l.ref.offset;
89
+ const uint8_t* r_data = r_buffers[r.ref.buffer_index]->data() + r.ref.offset;
90
+ return memcmp(l_data + BinaryViewType::kPrefixSize,
91
+ r_data + BinaryViewType::kPrefixSize,
92
+ l.size() - BinaryViewType::kPrefixSize) == 0;
93
+ }
94
+
95
+ } // namespace arrow::util
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_block_counter.h ADDED
@@ -0,0 +1,570 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+ #include <cstdint>
22
+ #include <limits>
23
+ #include <memory>
24
+
25
+ #include "arrow/buffer.h"
26
+ #include "arrow/status.h"
27
+ #include "arrow/util/bit_util.h"
28
+ #include "arrow/util/endian.h"
29
+ #include "arrow/util/macros.h"
30
+ #include "arrow/util/ubsan.h"
31
+ #include "arrow/util/visibility.h"
32
+
33
+ namespace arrow {
34
+ namespace internal {
35
+ namespace detail {
36
+
37
+ inline uint64_t LoadWord(const uint8_t* bytes) {
38
+ return bit_util::ToLittleEndian(util::SafeLoadAs<uint64_t>(bytes));
39
+ }
40
+
41
+ inline uint64_t ShiftWord(uint64_t current, uint64_t next, int64_t shift) {
42
+ if (shift == 0) {
43
+ return current;
44
+ }
45
+ return (current >> shift) | (next << (64 - shift));
46
+ }
47
+
48
+ // These templates are here to help with unit tests
49
+
50
+ template <typename T>
51
+ constexpr T BitNot(T x) {
52
+ return ~x;
53
+ }
54
+
55
+ template <>
56
+ constexpr bool BitNot(bool x) {
57
+ return !x;
58
+ }
59
+
60
+ struct BitBlockAnd {
61
+ template <typename T>
62
+ static constexpr T Call(T left, T right) {
63
+ return left & right;
64
+ }
65
+ };
66
+
67
+ struct BitBlockAndNot {
68
+ template <typename T>
69
+ static constexpr T Call(T left, T right) {
70
+ return left & BitNot(right);
71
+ }
72
+ };
73
+
74
+ struct BitBlockOr {
75
+ template <typename T>
76
+ static constexpr T Call(T left, T right) {
77
+ return left | right;
78
+ }
79
+ };
80
+
81
+ struct BitBlockOrNot {
82
+ template <typename T>
83
+ static constexpr T Call(T left, T right) {
84
+ return left | BitNot(right);
85
+ }
86
+ };
87
+
88
+ } // namespace detail
89
+
90
+ /// \brief Return value from bit block counters: the total number of bits and
91
+ /// the number of set bits.
92
+ struct BitBlockCount {
93
+ int16_t length;
94
+ int16_t popcount;
95
+
96
+ bool NoneSet() const { return this->popcount == 0; }
97
+ bool AllSet() const { return this->length == this->popcount; }
98
+ };
99
+
100
+ /// \brief A class that scans through a true/false bitmap to compute popcounts
101
+ /// 64 or 256 bits at a time. This is used to accelerate processing of
102
+ /// mostly-not-null array data.
103
+ class ARROW_EXPORT BitBlockCounter {
104
+ public:
105
+ BitBlockCounter(const uint8_t* bitmap, int64_t start_offset, int64_t length)
106
+ : bitmap_(util::MakeNonNull(bitmap) + start_offset / 8),
107
+ bits_remaining_(length),
108
+ offset_(start_offset % 8) {}
109
+
110
+ /// \brief The bit size of each word run
111
+ static constexpr int64_t kWordBits = 64;
112
+
113
+ /// \brief The bit size of four words run
114
+ static constexpr int64_t kFourWordsBits = kWordBits * 4;
115
+
116
+ /// \brief Return the next run of available bits, usually 256. The returned
117
+ /// pair contains the size of run and the number of true values. The last
118
+ /// block will have a length less than 256 if the bitmap length is not a
119
+ /// multiple of 256, and will return 0-length blocks in subsequent
120
+ /// invocations.
121
+ BitBlockCount NextFourWords() {
122
+ using detail::LoadWord;
123
+ using detail::ShiftWord;
124
+
125
+ if (!bits_remaining_) {
126
+ return {0, 0};
127
+ }
128
+ int64_t total_popcount = 0;
129
+ if (offset_ == 0) {
130
+ if (bits_remaining_ < kFourWordsBits) {
131
+ return GetBlockSlow(kFourWordsBits);
132
+ }
133
+ total_popcount += bit_util::PopCount(LoadWord(bitmap_));
134
+ total_popcount += bit_util::PopCount(LoadWord(bitmap_ + 8));
135
+ total_popcount += bit_util::PopCount(LoadWord(bitmap_ + 16));
136
+ total_popcount += bit_util::PopCount(LoadWord(bitmap_ + 24));
137
+ } else {
138
+ // When the offset is > 0, we need there to be a word beyond the last
139
+ // aligned word in the bitmap for the bit shifting logic.
140
+ if (bits_remaining_ < 5 * kFourWordsBits - offset_) {
141
+ return GetBlockSlow(kFourWordsBits);
142
+ }
143
+ auto current = LoadWord(bitmap_);
144
+ auto next = LoadWord(bitmap_ + 8);
145
+ total_popcount += bit_util::PopCount(ShiftWord(current, next, offset_));
146
+ current = next;
147
+ next = LoadWord(bitmap_ + 16);
148
+ total_popcount += bit_util::PopCount(ShiftWord(current, next, offset_));
149
+ current = next;
150
+ next = LoadWord(bitmap_ + 24);
151
+ total_popcount += bit_util::PopCount(ShiftWord(current, next, offset_));
152
+ current = next;
153
+ next = LoadWord(bitmap_ + 32);
154
+ total_popcount += bit_util::PopCount(ShiftWord(current, next, offset_));
155
+ }
156
+ bitmap_ += bit_util::BytesForBits(kFourWordsBits);
157
+ bits_remaining_ -= kFourWordsBits;
158
+ return {256, static_cast<int16_t>(total_popcount)};
159
+ }
160
+
161
+ /// \brief Return the next run of available bits, usually 64. The returned
162
+ /// pair contains the size of run and the number of true values. The last
163
+ /// block will have a length less than 64 if the bitmap length is not a
164
+ /// multiple of 64, and will return 0-length blocks in subsequent
165
+ /// invocations.
166
+ BitBlockCount NextWord() {
167
+ using detail::LoadWord;
168
+ using detail::ShiftWord;
169
+
170
+ if (!bits_remaining_) {
171
+ return {0, 0};
172
+ }
173
+ int64_t popcount = 0;
174
+ if (offset_ == 0) {
175
+ if (bits_remaining_ < kWordBits) {
176
+ return GetBlockSlow(kWordBits);
177
+ }
178
+ popcount = bit_util::PopCount(LoadWord(bitmap_));
179
+ } else {
180
+ // When the offset is > 0, we need there to be a word beyond the last
181
+ // aligned word in the bitmap for the bit shifting logic.
182
+ if (bits_remaining_ < 2 * kWordBits - offset_) {
183
+ return GetBlockSlow(kWordBits);
184
+ }
185
+ popcount = bit_util::PopCount(
186
+ ShiftWord(LoadWord(bitmap_), LoadWord(bitmap_ + 8), offset_));
187
+ }
188
+ bitmap_ += kWordBits / 8;
189
+ bits_remaining_ -= kWordBits;
190
+ return {64, static_cast<int16_t>(popcount)};
191
+ }
192
+
193
+ private:
194
+ /// \brief Return block with the requested size when doing word-wise
195
+ /// computation is not possible due to inadequate bits remaining.
196
+ BitBlockCount GetBlockSlow(int64_t block_size) noexcept;
197
+
198
+ const uint8_t* bitmap_;
199
+ int64_t bits_remaining_;
200
+ int64_t offset_;
201
+ };
202
+
203
+ /// \brief A tool to iterate through a possibly nonexistent validity bitmap,
204
+ /// to allow us to write one code path for both the with-nulls and no-nulls
205
+ /// cases without giving up a lot of performance.
206
+ class ARROW_EXPORT OptionalBitBlockCounter {
207
+ public:
208
+ // validity_bitmap may be NULLPTR
209
+ OptionalBitBlockCounter(const uint8_t* validity_bitmap, int64_t offset, int64_t length);
210
+
211
+ // validity_bitmap may be null
212
+ OptionalBitBlockCounter(const std::shared_ptr<Buffer>& validity_bitmap, int64_t offset,
213
+ int64_t length);
214
+
215
+ /// Return block count for next word when the bitmap is available otherwise
216
+ /// return a block with length up to INT16_MAX when there is no validity
217
+ /// bitmap (so all the referenced values are not null).
218
+ BitBlockCount NextBlock() {
219
+ static constexpr int64_t kMaxBlockSize = std::numeric_limits<int16_t>::max();
220
+ if (has_bitmap_) {
221
+ BitBlockCount block = counter_.NextWord();
222
+ position_ += block.length;
223
+ return block;
224
+ } else {
225
+ int16_t block_size =
226
+ static_cast<int16_t>(std::min(kMaxBlockSize, length_ - position_));
227
+ position_ += block_size;
228
+ // All values are non-null
229
+ return {block_size, block_size};
230
+ }
231
+ }
232
+
233
+ // Like NextBlock, but returns a word-sized block even when there is no
234
+ // validity bitmap
235
+ BitBlockCount NextWord() {
236
+ static constexpr int64_t kWordSize = 64;
237
+ if (has_bitmap_) {
238
+ BitBlockCount block = counter_.NextWord();
239
+ position_ += block.length;
240
+ return block;
241
+ } else {
242
+ int16_t block_size = static_cast<int16_t>(std::min(kWordSize, length_ - position_));
243
+ position_ += block_size;
244
+ // All values are non-null
245
+ return {block_size, block_size};
246
+ }
247
+ }
248
+
249
+ private:
250
+ const bool has_bitmap_;
251
+ int64_t position_;
252
+ int64_t length_;
253
+ BitBlockCounter counter_;
254
+ };
255
+
256
+ /// \brief A class that computes popcounts on the result of bitwise operations
257
+ /// between two bitmaps, 64 bits at a time. A 64-bit word is loaded from each
258
+ /// bitmap, then the popcount is computed on e.g. the bitwise-and of the two
259
+ /// words.
260
+ class ARROW_EXPORT BinaryBitBlockCounter {
261
+ public:
262
+ BinaryBitBlockCounter(const uint8_t* left_bitmap, int64_t left_offset,
263
+ const uint8_t* right_bitmap, int64_t right_offset, int64_t length)
264
+ : left_bitmap_(util::MakeNonNull(left_bitmap) + left_offset / 8),
265
+ left_offset_(left_offset % 8),
266
+ right_bitmap_(util::MakeNonNull(right_bitmap) + right_offset / 8),
267
+ right_offset_(right_offset % 8),
268
+ bits_remaining_(length) {}
269
+
270
+ /// \brief Return the popcount of the bitwise-and of the next run of
271
+ /// available bits, up to 64. The returned pair contains the size of run and
272
+ /// the number of true values. The last block will have a length less than 64
273
+ /// if the bitmap length is not a multiple of 64, and will return 0-length
274
+ /// blocks in subsequent invocations.
275
+ BitBlockCount NextAndWord() { return NextWord<detail::BitBlockAnd>(); }
276
+
277
+ /// \brief Computes "x & ~y" block for each available run of bits.
278
+ BitBlockCount NextAndNotWord() { return NextWord<detail::BitBlockAndNot>(); }
279
+
280
+ /// \brief Computes "x | y" block for each available run of bits.
281
+ BitBlockCount NextOrWord() { return NextWord<detail::BitBlockOr>(); }
282
+
283
+ /// \brief Computes "x | ~y" block for each available run of bits.
284
+ BitBlockCount NextOrNotWord() { return NextWord<detail::BitBlockOrNot>(); }
285
+
286
+ private:
287
+ template <class Op>
288
+ BitBlockCount NextWord() {
289
+ using detail::LoadWord;
290
+ using detail::ShiftWord;
291
+
292
+ if (!bits_remaining_) {
293
+ return {0, 0};
294
+ }
295
+ // When the offset is > 0, we need there to be a word beyond the last aligned
296
+ // word in the bitmap for the bit shifting logic.
297
+ constexpr int64_t kWordBits = BitBlockCounter::kWordBits;
298
+ const int64_t bits_required_to_use_words =
299
+ std::max(left_offset_ == 0 ? 64 : 64 + (64 - left_offset_),
300
+ right_offset_ == 0 ? 64 : 64 + (64 - right_offset_));
301
+ if (bits_remaining_ < bits_required_to_use_words) {
302
+ const int16_t run_length =
303
+ static_cast<int16_t>(std::min(bits_remaining_, kWordBits));
304
+ int16_t popcount = 0;
305
+ for (int64_t i = 0; i < run_length; ++i) {
306
+ if (Op::Call(bit_util::GetBit(left_bitmap_, left_offset_ + i),
307
+ bit_util::GetBit(right_bitmap_, right_offset_ + i))) {
308
+ ++popcount;
309
+ }
310
+ }
311
+ // This code path should trigger _at most_ 2 times. In the "two times"
312
+ // case, the first time the run length will be a multiple of 8.
313
+ left_bitmap_ += run_length / 8;
314
+ right_bitmap_ += run_length / 8;
315
+ bits_remaining_ -= run_length;
316
+ return {run_length, popcount};
317
+ }
318
+
319
+ int64_t popcount = 0;
320
+ if (left_offset_ == 0 && right_offset_ == 0) {
321
+ popcount =
322
+ bit_util::PopCount(Op::Call(LoadWord(left_bitmap_), LoadWord(right_bitmap_)));
323
+ } else {
324
+ auto left_word =
325
+ ShiftWord(LoadWord(left_bitmap_), LoadWord(left_bitmap_ + 8), left_offset_);
326
+ auto right_word =
327
+ ShiftWord(LoadWord(right_bitmap_), LoadWord(right_bitmap_ + 8), right_offset_);
328
+ popcount = bit_util::PopCount(Op::Call(left_word, right_word));
329
+ }
330
+ left_bitmap_ += kWordBits / 8;
331
+ right_bitmap_ += kWordBits / 8;
332
+ bits_remaining_ -= kWordBits;
333
+ return {64, static_cast<int16_t>(popcount)};
334
+ }
335
+
336
+ const uint8_t* left_bitmap_;
337
+ int64_t left_offset_;
338
+ const uint8_t* right_bitmap_;
339
+ int64_t right_offset_;
340
+ int64_t bits_remaining_;
341
+ };
342
+
343
+ class ARROW_EXPORT OptionalBinaryBitBlockCounter {
344
+ public:
345
+ // Any bitmap may be NULLPTR
346
+ OptionalBinaryBitBlockCounter(const uint8_t* left_bitmap, int64_t left_offset,
347
+ const uint8_t* right_bitmap, int64_t right_offset,
348
+ int64_t length);
349
+
350
+ // Any bitmap may be null
351
+ OptionalBinaryBitBlockCounter(const std::shared_ptr<Buffer>& left_bitmap,
352
+ int64_t left_offset,
353
+ const std::shared_ptr<Buffer>& right_bitmap,
354
+ int64_t right_offset, int64_t length);
355
+
356
+ BitBlockCount NextAndBlock() {
357
+ static constexpr int64_t kMaxBlockSize = std::numeric_limits<int16_t>::max();
358
+ switch (has_bitmap_) {
359
+ case HasBitmap::BOTH: {
360
+ BitBlockCount block = binary_counter_.NextAndWord();
361
+ position_ += block.length;
362
+ return block;
363
+ }
364
+ case HasBitmap::ONE: {
365
+ BitBlockCount block = unary_counter_.NextWord();
366
+ position_ += block.length;
367
+ return block;
368
+ }
369
+ case HasBitmap::NONE:
370
+ default: {
371
+ const int16_t block_size =
372
+ static_cast<int16_t>(std::min(kMaxBlockSize, length_ - position_));
373
+ position_ += block_size;
374
+ // All values are non-null
375
+ return {block_size, block_size};
376
+ }
377
+ }
378
+ }
379
+
380
+ BitBlockCount NextOrNotBlock() {
381
+ static constexpr int64_t kMaxBlockSize = std::numeric_limits<int16_t>::max();
382
+ switch (has_bitmap_) {
383
+ case HasBitmap::BOTH: {
384
+ BitBlockCount block = binary_counter_.NextOrNotWord();
385
+ position_ += block.length;
386
+ return block;
387
+ }
388
+ case HasBitmap::ONE: {
389
+ BitBlockCount block = unary_counter_.NextWord();
390
+ position_ += block.length;
391
+ return block;
392
+ }
393
+ case HasBitmap::NONE:
394
+ default: {
395
+ const int16_t block_size =
396
+ static_cast<int16_t>(std::min(kMaxBlockSize, length_ - position_));
397
+ position_ += block_size;
398
+ // All values are non-null
399
+ return {block_size, block_size};
400
+ }
401
+ }
402
+ }
403
+
404
+ private:
405
+ enum class HasBitmap : int { BOTH, ONE, NONE };
406
+
407
+ const HasBitmap has_bitmap_;
408
+ int64_t position_;
409
+ int64_t length_;
410
+ BitBlockCounter unary_counter_;
411
+ BinaryBitBlockCounter binary_counter_;
412
+
413
+ static HasBitmap HasBitmapFromBitmaps(bool has_left, bool has_right) {
414
+ switch (static_cast<int>(has_left) + static_cast<int>(has_right)) {
415
+ case 0:
416
+ return HasBitmap::NONE;
417
+ case 1:
418
+ return HasBitmap::ONE;
419
+ default: // 2
420
+ return HasBitmap::BOTH;
421
+ }
422
+ }
423
+ };
424
+
425
+ // Functional-style bit block visitors.
426
+
427
+ template <typename VisitNotNull, typename VisitNull>
428
+ static Status VisitBitBlocks(const uint8_t* bitmap, int64_t offset, int64_t length,
429
+ VisitNotNull&& visit_not_null, VisitNull&& visit_null) {
430
+ internal::OptionalBitBlockCounter bit_counter(bitmap, offset, length);
431
+ int64_t position = 0;
432
+ while (position < length) {
433
+ internal::BitBlockCount block = bit_counter.NextBlock();
434
+ if (block.AllSet()) {
435
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
436
+ ARROW_RETURN_NOT_OK(visit_not_null(position));
437
+ }
438
+ } else if (block.NoneSet()) {
439
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
440
+ ARROW_RETURN_NOT_OK(visit_null());
441
+ }
442
+ } else {
443
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
444
+ if (bit_util::GetBit(bitmap, offset + position)) {
445
+ ARROW_RETURN_NOT_OK(visit_not_null(position));
446
+ } else {
447
+ ARROW_RETURN_NOT_OK(visit_null());
448
+ }
449
+ }
450
+ }
451
+ }
452
+ return Status::OK();
453
+ }
454
+
455
+ template <typename VisitNotNull, typename VisitNull>
456
+ static void VisitBitBlocksVoid(const uint8_t* bitmap, int64_t offset, int64_t length,
457
+ VisitNotNull&& visit_not_null, VisitNull&& visit_null) {
458
+ internal::OptionalBitBlockCounter bit_counter(bitmap, offset, length);
459
+ int64_t position = 0;
460
+ while (position < length) {
461
+ internal::BitBlockCount block = bit_counter.NextBlock();
462
+ if (block.AllSet()) {
463
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
464
+ visit_not_null(position);
465
+ }
466
+ } else if (block.NoneSet()) {
467
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
468
+ visit_null();
469
+ }
470
+ } else {
471
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
472
+ if (bit_util::GetBit(bitmap, offset + position)) {
473
+ visit_not_null(position);
474
+ } else {
475
+ visit_null();
476
+ }
477
+ }
478
+ }
479
+ }
480
+ }
481
+
482
+ template <typename VisitNotNull, typename VisitNull>
483
+ static Status VisitTwoBitBlocks(const uint8_t* left_bitmap, int64_t left_offset,
484
+ const uint8_t* right_bitmap, int64_t right_offset,
485
+ int64_t length, VisitNotNull&& visit_not_null,
486
+ VisitNull&& visit_null) {
487
+ if (left_bitmap == NULLPTR || right_bitmap == NULLPTR) {
488
+ // At most one bitmap is present
489
+ if (left_bitmap == NULLPTR) {
490
+ return VisitBitBlocks(right_bitmap, right_offset, length,
491
+ std::forward<VisitNotNull>(visit_not_null),
492
+ std::forward<VisitNull>(visit_null));
493
+ } else {
494
+ return VisitBitBlocks(left_bitmap, left_offset, length,
495
+ std::forward<VisitNotNull>(visit_not_null),
496
+ std::forward<VisitNull>(visit_null));
497
+ }
498
+ }
499
+ BinaryBitBlockCounter bit_counter(left_bitmap, left_offset, right_bitmap, right_offset,
500
+ length);
501
+ int64_t position = 0;
502
+ while (position < length) {
503
+ BitBlockCount block = bit_counter.NextAndWord();
504
+ if (block.AllSet()) {
505
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
506
+ ARROW_RETURN_NOT_OK(visit_not_null(position));
507
+ }
508
+ } else if (block.NoneSet()) {
509
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
510
+ ARROW_RETURN_NOT_OK(visit_null());
511
+ }
512
+ } else {
513
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
514
+ if (bit_util::GetBit(left_bitmap, left_offset + position) &&
515
+ bit_util::GetBit(right_bitmap, right_offset + position)) {
516
+ ARROW_RETURN_NOT_OK(visit_not_null(position));
517
+ } else {
518
+ ARROW_RETURN_NOT_OK(visit_null());
519
+ }
520
+ }
521
+ }
522
+ }
523
+ return Status::OK();
524
+ }
525
+
526
+ template <typename VisitNotNull, typename VisitNull>
527
+ static void VisitTwoBitBlocksVoid(const uint8_t* left_bitmap, int64_t left_offset,
528
+ const uint8_t* right_bitmap, int64_t right_offset,
529
+ int64_t length, VisitNotNull&& visit_not_null,
530
+ VisitNull&& visit_null) {
531
+ if (left_bitmap == NULLPTR || right_bitmap == NULLPTR) {
532
+ // At most one bitmap is present
533
+ if (left_bitmap == NULLPTR) {
534
+ return VisitBitBlocksVoid(right_bitmap, right_offset, length,
535
+ std::forward<VisitNotNull>(visit_not_null),
536
+ std::forward<VisitNull>(visit_null));
537
+ } else {
538
+ return VisitBitBlocksVoid(left_bitmap, left_offset, length,
539
+ std::forward<VisitNotNull>(visit_not_null),
540
+ std::forward<VisitNull>(visit_null));
541
+ }
542
+ }
543
+ BinaryBitBlockCounter bit_counter(left_bitmap, left_offset, right_bitmap, right_offset,
544
+ length);
545
+ int64_t position = 0;
546
+ while (position < length) {
547
+ BitBlockCount block = bit_counter.NextAndWord();
548
+ if (block.AllSet()) {
549
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
550
+ visit_not_null(position);
551
+ }
552
+ } else if (block.NoneSet()) {
553
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
554
+ visit_null();
555
+ }
556
+ } else {
557
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
558
+ if (bit_util::GetBit(left_bitmap, left_offset + position) &&
559
+ bit_util::GetBit(right_bitmap, right_offset + position)) {
560
+ visit_not_null(position);
561
+ } else {
562
+ visit_null();
563
+ }
564
+ }
565
+ }
566
+ }
567
+ }
568
+
569
+ } // namespace internal
570
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_run_reader.h ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cassert>
21
+ #include <cstdint>
22
+ #include <cstring>
23
+ #include <string>
24
+
25
+ #include "arrow/util/bit_util.h"
26
+ #include "arrow/util/bitmap_reader.h"
27
+ #include "arrow/util/endian.h"
28
+ #include "arrow/util/macros.h"
29
+ #include "arrow/util/visibility.h"
30
+
31
+ namespace arrow {
32
+ namespace internal {
33
+
34
+ struct BitRun {
35
+ int64_t length;
36
+ // Whether bits are set at this point.
37
+ bool set;
38
+
39
+ std::string ToString() const {
40
+ return std::string("{Length: ") + std::to_string(length) +
41
+ ", set=" + std::to_string(set) + "}";
42
+ }
43
+ };
44
+
45
+ inline bool operator==(const BitRun& lhs, const BitRun& rhs) {
46
+ return lhs.length == rhs.length && lhs.set == rhs.set;
47
+ }
48
+
49
+ inline bool operator!=(const BitRun& lhs, const BitRun& rhs) {
50
+ return lhs.length != rhs.length || lhs.set != rhs.set;
51
+ }
52
+
53
+ class BitRunReaderLinear {
54
+ public:
55
+ BitRunReaderLinear(const uint8_t* bitmap, int64_t start_offset, int64_t length)
56
+ : reader_(bitmap, start_offset, length) {}
57
+
58
+ BitRun NextRun() {
59
+ BitRun rl = {/*length=*/0, reader_.IsSet()};
60
+ // Advance while the values are equal and not at the end of list.
61
+ while (reader_.position() < reader_.length() && reader_.IsSet() == rl.set) {
62
+ rl.length++;
63
+ reader_.Next();
64
+ }
65
+ return rl;
66
+ }
67
+
68
+ private:
69
+ BitmapReader reader_;
70
+ };
71
+
72
+ #if ARROW_LITTLE_ENDIAN
73
+ /// A convenience class for counting the number of contiguous set/unset bits
74
+ /// in a bitmap.
75
+ class ARROW_EXPORT BitRunReader {
76
+ public:
77
+ /// \brief Constructs new BitRunReader.
78
+ ///
79
+ /// \param[in] bitmap source data
80
+ /// \param[in] start_offset bit offset into the source data
81
+ /// \param[in] length number of bits to copy
82
+ BitRunReader(const uint8_t* bitmap, int64_t start_offset, int64_t length);
83
+
84
+ /// Returns a new BitRun containing the number of contiguous
85
+ /// bits with the same value. length == 0 indicates the
86
+ /// end of the bitmap.
87
+ BitRun NextRun() {
88
+ if (ARROW_PREDICT_FALSE(position_ >= length_)) {
89
+ return {/*length=*/0, false};
90
+ }
91
+ // This implementation relies on a efficient implementations of
92
+ // CountTrailingZeros and assumes that runs are more often then
93
+ // not. The logic is to incrementally find the next bit change
94
+ // from the current position. This is done by zeroing all
95
+ // bits in word_ up to position_ and using the TrailingZeroCount
96
+ // to find the index of the next set bit.
97
+
98
+ // The runs alternate on each call, so flip the bit.
99
+ current_run_bit_set_ = !current_run_bit_set_;
100
+
101
+ int64_t start_position = position_;
102
+ int64_t start_bit_offset = start_position & 63;
103
+ // Invert the word for proper use of CountTrailingZeros and
104
+ // clear bits so CountTrailingZeros can do it magic.
105
+ word_ = ~word_ & ~bit_util::LeastSignificantBitMask(start_bit_offset);
106
+
107
+ // Go forward until the next change from unset to set.
108
+ int64_t new_bits = bit_util::CountTrailingZeros(word_) - start_bit_offset;
109
+ position_ += new_bits;
110
+
111
+ if (ARROW_PREDICT_FALSE(bit_util::IsMultipleOf64(position_)) &&
112
+ ARROW_PREDICT_TRUE(position_ < length_)) {
113
+ // Continue extending position while we can advance an entire word.
114
+ // (updates position_ accordingly).
115
+ AdvanceUntilChange();
116
+ }
117
+
118
+ return {/*length=*/position_ - start_position, current_run_bit_set_};
119
+ }
120
+
121
+ private:
122
+ void AdvanceUntilChange() {
123
+ int64_t new_bits = 0;
124
+ do {
125
+ // Advance the position of the bitmap for loading.
126
+ bitmap_ += sizeof(uint64_t);
127
+ LoadNextWord();
128
+ new_bits = bit_util::CountTrailingZeros(word_);
129
+ // Continue calculating run length.
130
+ position_ += new_bits;
131
+ } while (ARROW_PREDICT_FALSE(bit_util::IsMultipleOf64(position_)) &&
132
+ ARROW_PREDICT_TRUE(position_ < length_) && new_bits > 0);
133
+ }
134
+
135
+ void LoadNextWord() { return LoadWord(length_ - position_); }
136
+
137
+ // Helper method for Loading the next word.
138
+ void LoadWord(int64_t bits_remaining) {
139
+ word_ = 0;
140
+ // we need at least an extra byte in this case.
141
+ if (ARROW_PREDICT_TRUE(bits_remaining >= 64)) {
142
+ std::memcpy(&word_, bitmap_, 8);
143
+ } else {
144
+ int64_t bytes_to_load = bit_util::BytesForBits(bits_remaining);
145
+ auto word_ptr = reinterpret_cast<uint8_t*>(&word_);
146
+ std::memcpy(word_ptr, bitmap_, bytes_to_load);
147
+ // Ensure stoppage at last bit in bitmap by reversing the next higher
148
+ // order bit.
149
+ bit_util::SetBitTo(word_ptr, bits_remaining,
150
+ !bit_util::GetBit(word_ptr, bits_remaining - 1));
151
+ }
152
+
153
+ // Two cases:
154
+ // 1. For unset, CountTrailingZeros works naturally so we don't
155
+ // invert the word.
156
+ // 2. Otherwise invert so we can use CountTrailingZeros.
157
+ if (current_run_bit_set_) {
158
+ word_ = ~word_;
159
+ }
160
+ }
161
+ const uint8_t* bitmap_;
162
+ int64_t position_;
163
+ int64_t length_;
164
+ uint64_t word_;
165
+ bool current_run_bit_set_;
166
+ };
167
+ #else
168
+ using BitRunReader = BitRunReaderLinear;
169
+ #endif
170
+
171
+ struct SetBitRun {
172
+ int64_t position;
173
+ int64_t length;
174
+
175
+ bool AtEnd() const { return length == 0; }
176
+
177
+ std::string ToString() const {
178
+ return std::string("{pos=") + std::to_string(position) +
179
+ ", len=" + std::to_string(length) + "}";
180
+ }
181
+
182
+ bool operator==(const SetBitRun& other) const {
183
+ return position == other.position && length == other.length;
184
+ }
185
+ bool operator!=(const SetBitRun& other) const {
186
+ return position != other.position || length != other.length;
187
+ }
188
+ };
189
+
190
+ template <bool Reverse>
191
+ class BaseSetBitRunReader {
192
+ public:
193
+ /// \brief Constructs new SetBitRunReader.
194
+ ///
195
+ /// \param[in] bitmap source data
196
+ /// \param[in] start_offset bit offset into the source data
197
+ /// \param[in] length number of bits to copy
198
+ ARROW_NOINLINE
199
+ BaseSetBitRunReader(const uint8_t* bitmap, int64_t start_offset, int64_t length)
200
+ : bitmap_(util::MakeNonNull(bitmap)),
201
+ length_(length),
202
+ remaining_(length_),
203
+ current_word_(0),
204
+ current_num_bits_(0) {
205
+ if (Reverse) {
206
+ bitmap_ += (start_offset + length) / 8;
207
+ const int8_t end_bit_offset = static_cast<int8_t>((start_offset + length) % 8);
208
+ if (length > 0 && end_bit_offset) {
209
+ // Get LSBs from last byte
210
+ ++bitmap_;
211
+ current_num_bits_ =
212
+ std::min(static_cast<int32_t>(length), static_cast<int32_t>(end_bit_offset));
213
+ current_word_ = LoadPartialWord(8 - end_bit_offset, current_num_bits_);
214
+ }
215
+ } else {
216
+ bitmap_ += start_offset / 8;
217
+ const int8_t bit_offset = static_cast<int8_t>(start_offset % 8);
218
+ if (length > 0 && bit_offset) {
219
+ // Get MSBs from first byte
220
+ current_num_bits_ =
221
+ std::min(static_cast<int32_t>(length), static_cast<int32_t>(8 - bit_offset));
222
+ current_word_ = LoadPartialWord(bit_offset, current_num_bits_);
223
+ }
224
+ }
225
+ }
226
+
227
+ ARROW_NOINLINE
228
+ SetBitRun NextRun() {
229
+ int64_t pos = 0;
230
+ int64_t len = 0;
231
+ if (current_num_bits_) {
232
+ const auto run = FindCurrentRun();
233
+ assert(remaining_ >= 0);
234
+ if (run.length && current_num_bits_) {
235
+ // The run ends in current_word_
236
+ return AdjustRun(run);
237
+ }
238
+ pos = run.position;
239
+ len = run.length;
240
+ }
241
+ if (!len) {
242
+ // We didn't get any ones in current_word_, so we can skip any zeros
243
+ // in the following words
244
+ SkipNextZeros();
245
+ if (remaining_ == 0) {
246
+ return {0, 0};
247
+ }
248
+ assert(current_num_bits_);
249
+ pos = position();
250
+ } else if (!current_num_bits_) {
251
+ if (ARROW_PREDICT_TRUE(remaining_ >= 64)) {
252
+ current_word_ = LoadFullWord();
253
+ current_num_bits_ = 64;
254
+ } else if (remaining_ > 0) {
255
+ current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_);
256
+ current_num_bits_ = static_cast<int32_t>(remaining_);
257
+ } else {
258
+ // No bits remaining, perhaps we found a run?
259
+ return AdjustRun({pos, len});
260
+ }
261
+ // If current word starts with a zero, we got a full run
262
+ if (!(current_word_ & kFirstBit)) {
263
+ return AdjustRun({pos, len});
264
+ }
265
+ }
266
+ // Current word should now start with a set bit
267
+ len += CountNextOnes();
268
+ return AdjustRun({pos, len});
269
+ }
270
+
271
+ protected:
272
+ int64_t position() const {
273
+ if (Reverse) {
274
+ return remaining_;
275
+ } else {
276
+ return length_ - remaining_;
277
+ }
278
+ }
279
+
280
+ SetBitRun AdjustRun(SetBitRun run) {
281
+ if (Reverse) {
282
+ assert(run.position >= run.length);
283
+ run.position -= run.length;
284
+ }
285
+ return run;
286
+ }
287
+
288
+ uint64_t LoadFullWord() {
289
+ uint64_t word;
290
+ if (Reverse) {
291
+ bitmap_ -= 8;
292
+ }
293
+ memcpy(&word, bitmap_, 8);
294
+ if (!Reverse) {
295
+ bitmap_ += 8;
296
+ }
297
+ return bit_util::ToLittleEndian(word);
298
+ }
299
+
300
+ uint64_t LoadPartialWord(int8_t bit_offset, int64_t num_bits) {
301
+ assert(num_bits > 0);
302
+ uint64_t word = 0;
303
+ const int64_t num_bytes = bit_util::BytesForBits(num_bits);
304
+ if (Reverse) {
305
+ // Read in the most significant bytes of the word
306
+ bitmap_ -= num_bytes;
307
+ memcpy(reinterpret_cast<char*>(&word) + 8 - num_bytes, bitmap_, num_bytes);
308
+ // XXX MostSignificantBitmask
309
+ return (bit_util::ToLittleEndian(word) << bit_offset) &
310
+ ~bit_util::LeastSignificantBitMask(64 - num_bits);
311
+ } else {
312
+ memcpy(&word, bitmap_, num_bytes);
313
+ bitmap_ += num_bytes;
314
+ return (bit_util::ToLittleEndian(word) >> bit_offset) &
315
+ bit_util::LeastSignificantBitMask(num_bits);
316
+ }
317
+ }
318
+
319
+ void SkipNextZeros() {
320
+ assert(current_num_bits_ == 0);
321
+ while (ARROW_PREDICT_TRUE(remaining_ >= 64)) {
322
+ current_word_ = LoadFullWord();
323
+ const auto num_zeros = CountFirstZeros(current_word_);
324
+ if (num_zeros < 64) {
325
+ // Run of zeros ends here
326
+ current_word_ = ConsumeBits(current_word_, num_zeros);
327
+ current_num_bits_ = 64 - num_zeros;
328
+ remaining_ -= num_zeros;
329
+ assert(remaining_ >= 0);
330
+ assert(current_num_bits_ >= 0);
331
+ return;
332
+ }
333
+ remaining_ -= 64;
334
+ }
335
+ // Run of zeros continues in last bitmap word
336
+ if (remaining_ > 0) {
337
+ current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_);
338
+ current_num_bits_ = static_cast<int32_t>(remaining_);
339
+ const auto num_zeros =
340
+ std::min<int32_t>(current_num_bits_, CountFirstZeros(current_word_));
341
+ current_word_ = ConsumeBits(current_word_, num_zeros);
342
+ current_num_bits_ -= num_zeros;
343
+ remaining_ -= num_zeros;
344
+ assert(remaining_ >= 0);
345
+ assert(current_num_bits_ >= 0);
346
+ }
347
+ }
348
+
349
+ int64_t CountNextOnes() {
350
+ assert(current_word_ & kFirstBit);
351
+
352
+ int64_t len;
353
+ if (~current_word_) {
354
+ const auto num_ones = CountFirstZeros(~current_word_);
355
+ assert(num_ones <= current_num_bits_);
356
+ assert(num_ones <= remaining_);
357
+ remaining_ -= num_ones;
358
+ current_word_ = ConsumeBits(current_word_, num_ones);
359
+ current_num_bits_ -= num_ones;
360
+ if (current_num_bits_) {
361
+ // Run of ones ends here
362
+ return num_ones;
363
+ }
364
+ len = num_ones;
365
+ } else {
366
+ // current_word_ is all ones
367
+ remaining_ -= 64;
368
+ current_num_bits_ = 0;
369
+ len = 64;
370
+ }
371
+
372
+ while (ARROW_PREDICT_TRUE(remaining_ >= 64)) {
373
+ current_word_ = LoadFullWord();
374
+ const auto num_ones = CountFirstZeros(~current_word_);
375
+ len += num_ones;
376
+ remaining_ -= num_ones;
377
+ if (num_ones < 64) {
378
+ // Run of ones ends here
379
+ current_word_ = ConsumeBits(current_word_, num_ones);
380
+ current_num_bits_ = 64 - num_ones;
381
+ return len;
382
+ }
383
+ }
384
+ // Run of ones continues in last bitmap word
385
+ if (remaining_ > 0) {
386
+ current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_);
387
+ current_num_bits_ = static_cast<int32_t>(remaining_);
388
+ const auto num_ones = CountFirstZeros(~current_word_);
389
+ assert(num_ones <= current_num_bits_);
390
+ assert(num_ones <= remaining_);
391
+ current_word_ = ConsumeBits(current_word_, num_ones);
392
+ current_num_bits_ -= num_ones;
393
+ remaining_ -= num_ones;
394
+ len += num_ones;
395
+ }
396
+ return len;
397
+ }
398
+
399
+ SetBitRun FindCurrentRun() {
400
+ // Skip any pending zeros
401
+ const auto num_zeros = CountFirstZeros(current_word_);
402
+ if (num_zeros >= current_num_bits_) {
403
+ remaining_ -= current_num_bits_;
404
+ current_word_ = 0;
405
+ current_num_bits_ = 0;
406
+ return {0, 0};
407
+ }
408
+ assert(num_zeros <= remaining_);
409
+ current_word_ = ConsumeBits(current_word_, num_zeros);
410
+ current_num_bits_ -= num_zeros;
411
+ remaining_ -= num_zeros;
412
+ const int64_t pos = position();
413
+ // Count any ones
414
+ const auto num_ones = CountFirstZeros(~current_word_);
415
+ assert(num_ones <= current_num_bits_);
416
+ assert(num_ones <= remaining_);
417
+ current_word_ = ConsumeBits(current_word_, num_ones);
418
+ current_num_bits_ -= num_ones;
419
+ remaining_ -= num_ones;
420
+ return {pos, num_ones};
421
+ }
422
+
423
+ inline int CountFirstZeros(uint64_t word);
424
+ inline uint64_t ConsumeBits(uint64_t word, int32_t num_bits);
425
+
426
+ const uint8_t* bitmap_;
427
+ const int64_t length_;
428
+ int64_t remaining_;
429
+ uint64_t current_word_;
430
+ int32_t current_num_bits_;
431
+
432
+ static constexpr uint64_t kFirstBit = Reverse ? 0x8000000000000000ULL : 1;
433
+ };
434
+
435
+ template <>
436
+ inline int BaseSetBitRunReader<false>::CountFirstZeros(uint64_t word) {
437
+ return bit_util::CountTrailingZeros(word);
438
+ }
439
+
440
+ template <>
441
+ inline int BaseSetBitRunReader<true>::CountFirstZeros(uint64_t word) {
442
+ return bit_util::CountLeadingZeros(word);
443
+ }
444
+
445
+ template <>
446
+ inline uint64_t BaseSetBitRunReader<false>::ConsumeBits(uint64_t word, int32_t num_bits) {
447
+ return word >> num_bits;
448
+ }
449
+
450
+ template <>
451
+ inline uint64_t BaseSetBitRunReader<true>::ConsumeBits(uint64_t word, int32_t num_bits) {
452
+ return word << num_bits;
453
+ }
454
+
455
+ using SetBitRunReader = BaseSetBitRunReader</*Reverse=*/false>;
456
+ using ReverseSetBitRunReader = BaseSetBitRunReader</*Reverse=*/true>;
457
+
458
+ // Functional-style bit run visitors.
459
+
460
+ // XXX: Try to make this function small so the compiler can inline and optimize
461
+ // the `visit` function, which is normally a hot loop with vectorizable code.
462
+ // - don't inline SetBitRunReader constructor, it doesn't hurt performance
463
+ // - un-inline NextRun hurts 'many null' cases a bit, but improves normal cases
464
+ template <typename Visit>
465
+ inline Status VisitSetBitRuns(const uint8_t* bitmap, int64_t offset, int64_t length,
466
+ Visit&& visit) {
467
+ if (bitmap == NULLPTR) {
468
+ // Assuming all set (as in a null bitmap)
469
+ return visit(static_cast<int64_t>(0), static_cast<int64_t>(length));
470
+ }
471
+ SetBitRunReader reader(bitmap, offset, length);
472
+ while (true) {
473
+ const auto run = reader.NextRun();
474
+ if (run.length == 0) {
475
+ break;
476
+ }
477
+ ARROW_RETURN_NOT_OK(visit(run.position, run.length));
478
+ }
479
+ return Status::OK();
480
+ }
481
+
482
+ template <typename Visit>
483
+ inline void VisitSetBitRunsVoid(const uint8_t* bitmap, int64_t offset, int64_t length,
484
+ Visit&& visit) {
485
+ if (bitmap == NULLPTR) {
486
+ // Assuming all set (as in a null bitmap)
487
+ visit(static_cast<int64_t>(0), static_cast<int64_t>(length));
488
+ return;
489
+ }
490
+ SetBitRunReader reader(bitmap, offset, length);
491
+ while (true) {
492
+ const auto run = reader.NextRun();
493
+ if (run.length == 0) {
494
+ break;
495
+ }
496
+ visit(run.position, run.length);
497
+ }
498
+ }
499
+
500
+ template <typename Visit>
501
+ inline Status VisitSetBitRuns(const std::shared_ptr<Buffer>& bitmap, int64_t offset,
502
+ int64_t length, Visit&& visit) {
503
+ return VisitSetBitRuns(bitmap ? bitmap->data() : NULLPTR, offset, length,
504
+ std::forward<Visit>(visit));
505
+ }
506
+
507
+ template <typename Visit>
508
+ inline void VisitSetBitRunsVoid(const std::shared_ptr<Buffer>& bitmap, int64_t offset,
509
+ int64_t length, Visit&& visit) {
510
+ VisitSetBitRunsVoid(bitmap ? bitmap->data() : NULLPTR, offset, length,
511
+ std::forward<Visit>(visit));
512
+ }
513
+
514
+ } // namespace internal
515
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_stream_utils.h ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // From Apache Impala (incubating) as of 2016-01-29
19
+
20
+ #pragma once
21
+
22
+ #include <algorithm>
23
+ #include <cstdint>
24
+ #include <cstring>
25
+
26
+ #include "arrow/util/bit_util.h"
27
+ #include "arrow/util/bpacking.h"
28
+ #include "arrow/util/logging.h"
29
+ #include "arrow/util/macros.h"
30
+ #include "arrow/util/ubsan.h"
31
+
32
+ namespace arrow {
33
+ namespace bit_util {
34
+
35
+ /// Utility class to write bit/byte streams. This class can write data to either be
36
+ /// bit packed or byte aligned (and a single stream that has a mix of both).
37
+ /// This class does not allocate memory.
38
+ class BitWriter {
39
+ public:
40
+ /// buffer: buffer to write bits to. Buffer should be preallocated with
41
+ /// 'buffer_len' bytes.
42
+ BitWriter(uint8_t* buffer, int buffer_len) : buffer_(buffer), max_bytes_(buffer_len) {
43
+ Clear();
44
+ }
45
+
46
+ void Clear() {
47
+ buffered_values_ = 0;
48
+ byte_offset_ = 0;
49
+ bit_offset_ = 0;
50
+ }
51
+
52
+ /// The number of current bytes written, including the current byte (i.e. may include a
53
+ /// fraction of a byte). Includes buffered values.
54
+ int bytes_written() const {
55
+ return byte_offset_ + static_cast<int>(bit_util::BytesForBits(bit_offset_));
56
+ }
57
+ uint8_t* buffer() const { return buffer_; }
58
+ int buffer_len() const { return max_bytes_; }
59
+
60
+ /// Writes a value to buffered_values_, flushing to buffer_ if necessary. This is bit
61
+ /// packed. Returns false if there was not enough space. num_bits must be <= 32.
62
+ bool PutValue(uint64_t v, int num_bits);
63
+
64
+ /// Writes v to the next aligned byte using num_bytes. If T is larger than
65
+ /// num_bytes, the extra high-order bytes will be ignored. Returns false if
66
+ /// there was not enough space.
67
+ /// Assume the v is stored in buffer_ as a little-endian format
68
+ template <typename T>
69
+ bool PutAligned(T v, int num_bytes);
70
+
71
+ /// Write a Vlq encoded int to the buffer. Returns false if there was not enough
72
+ /// room. The value is written byte aligned.
73
+ /// For more details on vlq:
74
+ /// en.wikipedia.org/wiki/Variable-length_quantity
75
+ bool PutVlqInt(uint32_t v);
76
+
77
+ // Writes an int zigzag encoded.
78
+ bool PutZigZagVlqInt(int32_t v);
79
+
80
+ /// Write a Vlq encoded int64 to the buffer. Returns false if there was not enough
81
+ /// room. The value is written byte aligned.
82
+ /// For more details on vlq:
83
+ /// en.wikipedia.org/wiki/Variable-length_quantity
84
+ bool PutVlqInt(uint64_t v);
85
+
86
+ // Writes an int64 zigzag encoded.
87
+ bool PutZigZagVlqInt(int64_t v);
88
+
89
+ /// Get a pointer to the next aligned byte and advance the underlying buffer
90
+ /// by num_bytes.
91
+ /// Returns NULL if there was not enough space.
92
+ uint8_t* GetNextBytePtr(int num_bytes = 1);
93
+
94
+ /// Flushes all buffered values to the buffer. Call this when done writing to
95
+ /// the buffer. If 'align' is true, buffered_values_ is reset and any future
96
+ /// writes will be written to the next byte boundary.
97
+ void Flush(bool align = false);
98
+
99
+ private:
100
+ uint8_t* buffer_;
101
+ int max_bytes_;
102
+
103
+ /// Bit-packed values are initially written to this variable before being memcpy'd to
104
+ /// buffer_. This is faster than writing values byte by byte directly to buffer_.
105
+ uint64_t buffered_values_;
106
+
107
+ int byte_offset_; // Offset in buffer_
108
+ int bit_offset_; // Offset in buffered_values_
109
+ };
110
+
111
+ namespace detail {
112
+
113
+ inline uint64_t ReadLittleEndianWord(const uint8_t* buffer, int bytes_remaining) {
114
+ uint64_t le_value = 0;
115
+ if (ARROW_PREDICT_TRUE(bytes_remaining >= 8)) {
116
+ memcpy(&le_value, buffer, 8);
117
+ } else {
118
+ memcpy(&le_value, buffer, bytes_remaining);
119
+ }
120
+ return arrow::bit_util::FromLittleEndian(le_value);
121
+ }
122
+
123
+ } // namespace detail
124
+
125
+ /// Utility class to read bit/byte stream. This class can read bits or bytes
126
+ /// that are either byte aligned or not. It also has utilities to read multiple
127
+ /// bytes in one read (e.g. encoded int).
128
+ class BitReader {
129
+ public:
130
+ BitReader() = default;
131
+
132
+ /// 'buffer' is the buffer to read from. The buffer's length is 'buffer_len'.
133
+ BitReader(const uint8_t* buffer, int buffer_len) : BitReader() {
134
+ Reset(buffer, buffer_len);
135
+ }
136
+
137
+ void Reset(const uint8_t* buffer, int buffer_len) {
138
+ buffer_ = buffer;
139
+ max_bytes_ = buffer_len;
140
+ byte_offset_ = 0;
141
+ bit_offset_ = 0;
142
+ buffered_values_ =
143
+ detail::ReadLittleEndianWord(buffer_ + byte_offset_, max_bytes_ - byte_offset_);
144
+ }
145
+
146
+ /// Gets the next value from the buffer. Returns true if 'v' could be read or false if
147
+ /// there are not enough bytes left.
148
+ template <typename T>
149
+ bool GetValue(int num_bits, T* v);
150
+
151
+ /// Get a number of values from the buffer. Return the number of values actually read.
152
+ template <typename T>
153
+ int GetBatch(int num_bits, T* v, int batch_size);
154
+
155
+ /// Reads a 'num_bytes'-sized value from the buffer and stores it in 'v'. T
156
+ /// needs to be a little-endian native type and big enough to store
157
+ /// 'num_bytes'. The value is assumed to be byte-aligned so the stream will
158
+ /// be advanced to the start of the next byte before 'v' is read. Returns
159
+ /// false if there are not enough bytes left.
160
+ /// Assume the v was stored in buffer_ as a little-endian format
161
+ template <typename T>
162
+ bool GetAligned(int num_bytes, T* v);
163
+
164
+ /// Advances the stream by a number of bits. Returns true if succeed or false if there
165
+ /// are not enough bits left.
166
+ bool Advance(int64_t num_bits);
167
+
168
+ /// Reads a vlq encoded int from the stream. The encoded int must start at
169
+ /// the beginning of a byte. Return false if there were not enough bytes in
170
+ /// the buffer.
171
+ bool GetVlqInt(uint32_t* v);
172
+
173
+ // Reads a zigzag encoded int `into` v.
174
+ bool GetZigZagVlqInt(int32_t* v);
175
+
176
+ /// Reads a vlq encoded int64 from the stream. The encoded int must start at
177
+ /// the beginning of a byte. Return false if there were not enough bytes in
178
+ /// the buffer.
179
+ bool GetVlqInt(uint64_t* v);
180
+
181
+ // Reads a zigzag encoded int64 `into` v.
182
+ bool GetZigZagVlqInt(int64_t* v);
183
+
184
+ /// Returns the number of bytes left in the stream, not including the current
185
+ /// byte (i.e., there may be an additional fraction of a byte).
186
+ int bytes_left() const {
187
+ return max_bytes_ -
188
+ (byte_offset_ + static_cast<int>(bit_util::BytesForBits(bit_offset_)));
189
+ }
190
+
191
+ /// Maximum byte length of a vlq encoded int
192
+ static constexpr int kMaxVlqByteLength = 5;
193
+
194
+ /// Maximum byte length of a vlq encoded int64
195
+ static constexpr int kMaxVlqByteLengthForInt64 = 10;
196
+
197
+ private:
198
+ const uint8_t* buffer_;
199
+ int max_bytes_;
200
+
201
+ /// Bytes are memcpy'd from buffer_ and values are read from this variable. This is
202
+ /// faster than reading values byte by byte directly from buffer_.
203
+ uint64_t buffered_values_;
204
+
205
+ int byte_offset_; // Offset in buffer_
206
+ int bit_offset_; // Offset in buffered_values_
207
+ };
208
+
209
+ inline bool BitWriter::PutValue(uint64_t v, int num_bits) {
210
+ DCHECK_LE(num_bits, 64);
211
+ if (num_bits < 64) {
212
+ DCHECK_EQ(v >> num_bits, 0) << "v = " << v << ", num_bits = " << num_bits;
213
+ }
214
+
215
+ if (ARROW_PREDICT_FALSE(byte_offset_ * 8 + bit_offset_ + num_bits > max_bytes_ * 8))
216
+ return false;
217
+
218
+ buffered_values_ |= v << bit_offset_;
219
+ bit_offset_ += num_bits;
220
+
221
+ if (ARROW_PREDICT_FALSE(bit_offset_ >= 64)) {
222
+ // Flush buffered_values_ and write out bits of v that did not fit
223
+ buffered_values_ = arrow::bit_util::ToLittleEndian(buffered_values_);
224
+ memcpy(buffer_ + byte_offset_, &buffered_values_, 8);
225
+ buffered_values_ = 0;
226
+ byte_offset_ += 8;
227
+ bit_offset_ -= 64;
228
+ buffered_values_ =
229
+ (num_bits - bit_offset_ == 64) ? 0 : (v >> (num_bits - bit_offset_));
230
+ }
231
+ DCHECK_LT(bit_offset_, 64);
232
+ return true;
233
+ }
234
+
235
+ inline void BitWriter::Flush(bool align) {
236
+ int num_bytes = static_cast<int>(bit_util::BytesForBits(bit_offset_));
237
+ DCHECK_LE(byte_offset_ + num_bytes, max_bytes_);
238
+ auto buffered_values = arrow::bit_util::ToLittleEndian(buffered_values_);
239
+ memcpy(buffer_ + byte_offset_, &buffered_values, num_bytes);
240
+
241
+ if (align) {
242
+ buffered_values_ = 0;
243
+ byte_offset_ += num_bytes;
244
+ bit_offset_ = 0;
245
+ }
246
+ }
247
+
248
+ inline uint8_t* BitWriter::GetNextBytePtr(int num_bytes) {
249
+ Flush(/* align */ true);
250
+ DCHECK_LE(byte_offset_, max_bytes_);
251
+ if (byte_offset_ + num_bytes > max_bytes_) return NULL;
252
+ uint8_t* ptr = buffer_ + byte_offset_;
253
+ byte_offset_ += num_bytes;
254
+ return ptr;
255
+ }
256
+
257
+ template <typename T>
258
+ inline bool BitWriter::PutAligned(T val, int num_bytes) {
259
+ uint8_t* ptr = GetNextBytePtr(num_bytes);
260
+ if (ptr == NULL) return false;
261
+ val = arrow::bit_util::ToLittleEndian(val);
262
+ memcpy(ptr, &val, num_bytes);
263
+ return true;
264
+ }
265
+
266
+ namespace detail {
267
+
268
+ template <typename T>
269
+ inline void GetValue_(int num_bits, T* v, int max_bytes, const uint8_t* buffer,
270
+ int* bit_offset, int* byte_offset, uint64_t* buffered_values) {
271
+ #ifdef _MSC_VER
272
+ #pragma warning(push)
273
+ #pragma warning(disable : 4800)
274
+ #endif
275
+ *v = static_cast<T>(bit_util::TrailingBits(*buffered_values, *bit_offset + num_bits) >>
276
+ *bit_offset);
277
+ #ifdef _MSC_VER
278
+ #pragma warning(pop)
279
+ #endif
280
+ *bit_offset += num_bits;
281
+ if (*bit_offset >= 64) {
282
+ *byte_offset += 8;
283
+ *bit_offset -= 64;
284
+
285
+ *buffered_values =
286
+ detail::ReadLittleEndianWord(buffer + *byte_offset, max_bytes - *byte_offset);
287
+ #ifdef _MSC_VER
288
+ #pragma warning(push)
289
+ #pragma warning(disable : 4800 4805)
290
+ #endif
291
+ // Read bits of v that crossed into new buffered_values_
292
+ if (ARROW_PREDICT_TRUE(num_bits - *bit_offset < static_cast<int>(8 * sizeof(T)))) {
293
+ // if shift exponent(num_bits - *bit_offset) is not less than sizeof(T), *v will not
294
+ // change and the following code may cause a runtime error that the shift exponent
295
+ // is too large
296
+ *v = *v | static_cast<T>(bit_util::TrailingBits(*buffered_values, *bit_offset)
297
+ << (num_bits - *bit_offset));
298
+ }
299
+ #ifdef _MSC_VER
300
+ #pragma warning(pop)
301
+ #endif
302
+ DCHECK_LE(*bit_offset, 64);
303
+ }
304
+ }
305
+
306
+ } // namespace detail
307
+
308
+ template <typename T>
309
+ inline bool BitReader::GetValue(int num_bits, T* v) {
310
+ return GetBatch(num_bits, v, 1) == 1;
311
+ }
312
+
313
+ template <typename T>
314
+ inline int BitReader::GetBatch(int num_bits, T* v, int batch_size) {
315
+ DCHECK(buffer_ != NULL);
316
+ DCHECK_LE(num_bits, static_cast<int>(sizeof(T) * 8)) << "num_bits: " << num_bits;
317
+
318
+ int bit_offset = bit_offset_;
319
+ int byte_offset = byte_offset_;
320
+ uint64_t buffered_values = buffered_values_;
321
+ int max_bytes = max_bytes_;
322
+ const uint8_t* buffer = buffer_;
323
+
324
+ const int64_t needed_bits = num_bits * static_cast<int64_t>(batch_size);
325
+ constexpr uint64_t kBitsPerByte = 8;
326
+ const int64_t remaining_bits =
327
+ static_cast<int64_t>(max_bytes - byte_offset) * kBitsPerByte - bit_offset;
328
+ if (remaining_bits < needed_bits) {
329
+ batch_size = static_cast<int>(remaining_bits / num_bits);
330
+ }
331
+
332
+ int i = 0;
333
+ if (ARROW_PREDICT_FALSE(bit_offset != 0)) {
334
+ for (; i < batch_size && bit_offset != 0; ++i) {
335
+ detail::GetValue_(num_bits, &v[i], max_bytes, buffer, &bit_offset, &byte_offset,
336
+ &buffered_values);
337
+ }
338
+ }
339
+
340
+ if (sizeof(T) == 4) {
341
+ int num_unpacked =
342
+ internal::unpack32(reinterpret_cast<const uint32_t*>(buffer + byte_offset),
343
+ reinterpret_cast<uint32_t*>(v + i), batch_size - i, num_bits);
344
+ i += num_unpacked;
345
+ byte_offset += num_unpacked * num_bits / 8;
346
+ } else if (sizeof(T) == 8 && num_bits > 32) {
347
+ // Use unpack64 only if num_bits is larger than 32
348
+ // TODO (ARROW-13677): improve the performance of internal::unpack64
349
+ // and remove the restriction of num_bits
350
+ int num_unpacked =
351
+ internal::unpack64(buffer + byte_offset, reinterpret_cast<uint64_t*>(v + i),
352
+ batch_size - i, num_bits);
353
+ i += num_unpacked;
354
+ byte_offset += num_unpacked * num_bits / 8;
355
+ } else {
356
+ // TODO: revisit this limit if necessary
357
+ DCHECK_LE(num_bits, 32);
358
+ const int buffer_size = 1024;
359
+ uint32_t unpack_buffer[buffer_size];
360
+ while (i < batch_size) {
361
+ int unpack_size = std::min(buffer_size, batch_size - i);
362
+ int num_unpacked =
363
+ internal::unpack32(reinterpret_cast<const uint32_t*>(buffer + byte_offset),
364
+ unpack_buffer, unpack_size, num_bits);
365
+ if (num_unpacked == 0) {
366
+ break;
367
+ }
368
+ for (int k = 0; k < num_unpacked; ++k) {
369
+ #ifdef _MSC_VER
370
+ #pragma warning(push)
371
+ #pragma warning(disable : 4800)
372
+ #endif
373
+ v[i + k] = static_cast<T>(unpack_buffer[k]);
374
+ #ifdef _MSC_VER
375
+ #pragma warning(pop)
376
+ #endif
377
+ }
378
+ i += num_unpacked;
379
+ byte_offset += num_unpacked * num_bits / 8;
380
+ }
381
+ }
382
+
383
+ buffered_values =
384
+ detail::ReadLittleEndianWord(buffer + byte_offset, max_bytes - byte_offset);
385
+
386
+ for (; i < batch_size; ++i) {
387
+ detail::GetValue_(num_bits, &v[i], max_bytes, buffer, &bit_offset, &byte_offset,
388
+ &buffered_values);
389
+ }
390
+
391
+ bit_offset_ = bit_offset;
392
+ byte_offset_ = byte_offset;
393
+ buffered_values_ = buffered_values;
394
+
395
+ return batch_size;
396
+ }
397
+
398
+ template <typename T>
399
+ inline bool BitReader::GetAligned(int num_bytes, T* v) {
400
+ if (ARROW_PREDICT_FALSE(num_bytes > static_cast<int>(sizeof(T)))) {
401
+ return false;
402
+ }
403
+
404
+ int bytes_read = static_cast<int>(bit_util::BytesForBits(bit_offset_));
405
+ if (ARROW_PREDICT_FALSE(byte_offset_ + bytes_read + num_bytes > max_bytes_)) {
406
+ return false;
407
+ }
408
+
409
+ // Advance byte_offset to next unread byte and read num_bytes
410
+ byte_offset_ += bytes_read;
411
+ if constexpr (std::is_same_v<T, bool>) {
412
+ // ARROW-18031: if we're trying to get an aligned bool, just check
413
+ // the LSB of the next byte and move on. If we memcpy + FromLittleEndian
414
+ // as usual, we have potential undefined behavior for bools if the value
415
+ // isn't 0 or 1
416
+ *v = *(buffer_ + byte_offset_) & 1;
417
+ } else {
418
+ memcpy(v, buffer_ + byte_offset_, num_bytes);
419
+ *v = arrow::bit_util::FromLittleEndian(*v);
420
+ }
421
+ byte_offset_ += num_bytes;
422
+
423
+ bit_offset_ = 0;
424
+ buffered_values_ =
425
+ detail::ReadLittleEndianWord(buffer_ + byte_offset_, max_bytes_ - byte_offset_);
426
+ return true;
427
+ }
428
+
429
+ inline bool BitReader::Advance(int64_t num_bits) {
430
+ int64_t bits_required = bit_offset_ + num_bits;
431
+ int64_t bytes_required = bit_util::BytesForBits(bits_required);
432
+ if (ARROW_PREDICT_FALSE(bytes_required > max_bytes_ - byte_offset_)) {
433
+ return false;
434
+ }
435
+ byte_offset_ += static_cast<int>(bits_required >> 3);
436
+ bit_offset_ = static_cast<int>(bits_required & 7);
437
+ buffered_values_ =
438
+ detail::ReadLittleEndianWord(buffer_ + byte_offset_, max_bytes_ - byte_offset_);
439
+ return true;
440
+ }
441
+
442
+ inline bool BitWriter::PutVlqInt(uint32_t v) {
443
+ bool result = true;
444
+ while ((v & 0xFFFFFF80UL) != 0UL) {
445
+ result &= PutAligned<uint8_t>(static_cast<uint8_t>((v & 0x7F) | 0x80), 1);
446
+ v >>= 7;
447
+ }
448
+ result &= PutAligned<uint8_t>(static_cast<uint8_t>(v & 0x7F), 1);
449
+ return result;
450
+ }
451
+
452
+ inline bool BitReader::GetVlqInt(uint32_t* v) {
453
+ uint32_t tmp = 0;
454
+
455
+ for (int i = 0; i < kMaxVlqByteLength; i++) {
456
+ uint8_t byte = 0;
457
+ if (ARROW_PREDICT_FALSE(!GetAligned<uint8_t>(1, &byte))) {
458
+ return false;
459
+ }
460
+ tmp |= static_cast<uint32_t>(byte & 0x7F) << (7 * i);
461
+
462
+ if ((byte & 0x80) == 0) {
463
+ *v = tmp;
464
+ return true;
465
+ }
466
+ }
467
+
468
+ return false;
469
+ }
470
+
471
+ inline bool BitWriter::PutZigZagVlqInt(int32_t v) {
472
+ uint32_t u_v = ::arrow::util::SafeCopy<uint32_t>(v);
473
+ u_v = (u_v << 1) ^ static_cast<uint32_t>(v >> 31);
474
+ return PutVlqInt(u_v);
475
+ }
476
+
477
+ inline bool BitReader::GetZigZagVlqInt(int32_t* v) {
478
+ uint32_t u;
479
+ if (!GetVlqInt(&u)) return false;
480
+ u = (u >> 1) ^ (~(u & 1) + 1);
481
+ *v = ::arrow::util::SafeCopy<int32_t>(u);
482
+ return true;
483
+ }
484
+
485
+ inline bool BitWriter::PutVlqInt(uint64_t v) {
486
+ bool result = true;
487
+ while ((v & 0xFFFFFFFFFFFFFF80ULL) != 0ULL) {
488
+ result &= PutAligned<uint8_t>(static_cast<uint8_t>((v & 0x7F) | 0x80), 1);
489
+ v >>= 7;
490
+ }
491
+ result &= PutAligned<uint8_t>(static_cast<uint8_t>(v & 0x7F), 1);
492
+ return result;
493
+ }
494
+
495
+ inline bool BitReader::GetVlqInt(uint64_t* v) {
496
+ uint64_t tmp = 0;
497
+
498
+ for (int i = 0; i < kMaxVlqByteLengthForInt64; i++) {
499
+ uint8_t byte = 0;
500
+ if (ARROW_PREDICT_FALSE(!GetAligned<uint8_t>(1, &byte))) {
501
+ return false;
502
+ }
503
+ tmp |= static_cast<uint64_t>(byte & 0x7F) << (7 * i);
504
+
505
+ if ((byte & 0x80) == 0) {
506
+ *v = tmp;
507
+ return true;
508
+ }
509
+ }
510
+
511
+ return false;
512
+ }
513
+
514
+ inline bool BitWriter::PutZigZagVlqInt(int64_t v) {
515
+ uint64_t u_v = ::arrow::util::SafeCopy<uint64_t>(v);
516
+ u_v = (u_v << 1) ^ static_cast<uint64_t>(v >> 63);
517
+ return PutVlqInt(u_v);
518
+ }
519
+
520
+ inline bool BitReader::GetZigZagVlqInt(int64_t* v) {
521
+ uint64_t u;
522
+ if (!GetVlqInt(&u)) return false;
523
+ u = (u >> 1) ^ (~(u & 1) + 1);
524
+ *v = ::arrow::util::SafeCopy<int64_t>(u);
525
+ return true;
526
+ }
527
+
528
+ } // namespace bit_util
529
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_util.h ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #if defined(_MSC_VER)
21
+ #if defined(_M_AMD64) || defined(_M_X64)
22
+ #include <intrin.h> // IWYU pragma: keep
23
+ #include <nmmintrin.h>
24
+ #endif
25
+
26
+ #pragma intrinsic(_BitScanReverse)
27
+ #pragma intrinsic(_BitScanForward)
28
+ #define ARROW_POPCOUNT64 __popcnt64
29
+ #define ARROW_POPCOUNT32 __popcnt
30
+ #else
31
+ #define ARROW_POPCOUNT64 __builtin_popcountll
32
+ #define ARROW_POPCOUNT32 __builtin_popcount
33
+ #endif
34
+
35
+ #include <cstdint>
36
+ #include <type_traits>
37
+
38
+ #include "arrow/util/macros.h"
39
+ #include "arrow/util/visibility.h"
40
+
41
+ namespace arrow {
42
+ namespace detail {
43
+
44
+ template <typename Integer>
45
+ typename std::make_unsigned<Integer>::type as_unsigned(Integer x) {
46
+ return static_cast<typename std::make_unsigned<Integer>::type>(x);
47
+ }
48
+
49
+ } // namespace detail
50
+
51
+ namespace bit_util {
52
+
53
+ // The number of set bits in a given unsigned byte value, pre-computed
54
+ //
55
+ // Generated with the following Python code
56
+ // output = 'static constexpr uint8_t kBytePopcount[] = {{{0}}};'
57
+ // popcounts = [str(bin(i).count('1')) for i in range(0, 256)]
58
+ // print(output.format(', '.join(popcounts)))
59
+ static constexpr uint8_t kBytePopcount[] = {
60
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3,
61
+ 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4,
62
+ 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4,
63
+ 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5,
64
+ 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2,
65
+ 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5,
66
+ 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4,
67
+ 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6,
68
+ 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8};
69
+
70
+ static inline uint64_t PopCount(uint64_t bitmap) { return ARROW_POPCOUNT64(bitmap); }
71
+ static inline uint32_t PopCount(uint32_t bitmap) { return ARROW_POPCOUNT32(bitmap); }
72
+
73
+ //
74
+ // Bit-related computations on integer values
75
+ //
76
+
77
+ // Returns the ceil of value/divisor
78
+ constexpr int64_t CeilDiv(int64_t value, int64_t divisor) {
79
+ return (value == 0) ? 0 : 1 + (value - 1) / divisor;
80
+ }
81
+
82
+ // Return the number of bytes needed to fit the given number of bits
83
+ constexpr int64_t BytesForBits(int64_t bits) {
84
+ // This formula avoids integer overflow on very large `bits`
85
+ return (bits >> 3) + ((bits & 7) != 0);
86
+ }
87
+
88
+ constexpr bool IsPowerOf2(int64_t value) {
89
+ return value > 0 && (value & (value - 1)) == 0;
90
+ }
91
+
92
+ constexpr bool IsPowerOf2(uint64_t value) {
93
+ return value > 0 && (value & (value - 1)) == 0;
94
+ }
95
+
96
+ // Returns the smallest power of two that contains v. If v is already a
97
+ // power of two, it is returned as is.
98
+ static inline int64_t NextPower2(int64_t n) {
99
+ // Taken from
100
+ // http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
101
+ n--;
102
+ n |= n >> 1;
103
+ n |= n >> 2;
104
+ n |= n >> 4;
105
+ n |= n >> 8;
106
+ n |= n >> 16;
107
+ n |= n >> 32;
108
+ n++;
109
+ return n;
110
+ }
111
+
112
+ constexpr bool IsMultipleOf64(int64_t n) { return (n & 63) == 0; }
113
+
114
+ constexpr bool IsMultipleOf8(int64_t n) { return (n & 7) == 0; }
115
+
116
+ // Returns a mask for the bit_index lower order bits.
117
+ // Only valid for bit_index in the range [0, 64).
118
+ constexpr uint64_t LeastSignificantBitMask(int64_t bit_index) {
119
+ return (static_cast<uint64_t>(1) << bit_index) - 1;
120
+ }
121
+
122
+ // Returns 'value' rounded up to the nearest multiple of 'factor'
123
+ constexpr int64_t RoundUp(int64_t value, int64_t factor) {
124
+ return CeilDiv(value, factor) * factor;
125
+ }
126
+
127
+ // Returns 'value' rounded down to the nearest multiple of 'factor'
128
+ constexpr int64_t RoundDown(int64_t value, int64_t factor) {
129
+ return (value / factor) * factor;
130
+ }
131
+
132
+ // Returns 'value' rounded up to the nearest multiple of 'factor' when factor
133
+ // is a power of two.
134
+ // The result is undefined on overflow, i.e. if `value > 2**64 - factor`,
135
+ // since we cannot return the correct result which would be 2**64.
136
+ constexpr int64_t RoundUpToPowerOf2(int64_t value, int64_t factor) {
137
+ // DCHECK(value >= 0);
138
+ // DCHECK(IsPowerOf2(factor));
139
+ return (value + (factor - 1)) & ~(factor - 1);
140
+ }
141
+
142
+ constexpr uint64_t RoundUpToPowerOf2(uint64_t value, uint64_t factor) {
143
+ // DCHECK(IsPowerOf2(factor));
144
+ return (value + (factor - 1)) & ~(factor - 1);
145
+ }
146
+
147
+ constexpr int64_t RoundUpToMultipleOf8(int64_t num) { return RoundUpToPowerOf2(num, 8); }
148
+
149
+ constexpr int64_t RoundUpToMultipleOf64(int64_t num) {
150
+ return RoundUpToPowerOf2(num, 64);
151
+ }
152
+
153
+ // Returns the number of bytes covering a sliced bitmap. Find the length
154
+ // rounded to cover full bytes on both extremities.
155
+ //
156
+ // The following example represents a slice (offset=10, length=9)
157
+ //
158
+ // 0 8 16 24
159
+ // |-------|-------|------|
160
+ // [ ] (slice)
161
+ // [ ] (same slice aligned to bytes bounds, length=16)
162
+ //
163
+ // The covering bytes is the length (in bytes) of this new aligned slice.
164
+ constexpr int64_t CoveringBytes(int64_t offset, int64_t length) {
165
+ return (bit_util::RoundUp(length + offset, 8) - bit_util::RoundDown(offset, 8)) / 8;
166
+ }
167
+
168
+ // Returns the 'num_bits' least-significant bits of 'v'.
169
+ static inline uint64_t TrailingBits(uint64_t v, int num_bits) {
170
+ if (ARROW_PREDICT_FALSE(num_bits == 0)) return 0;
171
+ if (ARROW_PREDICT_FALSE(num_bits >= 64)) return v;
172
+ int n = 64 - num_bits;
173
+ return (v << n) >> n;
174
+ }
175
+
176
+ /// \brief Count the number of leading zeros in an unsigned integer.
177
+ static inline int CountLeadingZeros(uint32_t value) {
178
+ #if defined(__clang__) || defined(__GNUC__)
179
+ if (value == 0) return 32;
180
+ return static_cast<int>(__builtin_clz(value));
181
+ #elif defined(_MSC_VER)
182
+ unsigned long index; // NOLINT
183
+ if (_BitScanReverse(&index, static_cast<unsigned long>(value))) { // NOLINT
184
+ return 31 - static_cast<int>(index);
185
+ } else {
186
+ return 32;
187
+ }
188
+ #else
189
+ int bitpos = 0;
190
+ while (value != 0) {
191
+ value >>= 1;
192
+ ++bitpos;
193
+ }
194
+ return 32 - bitpos;
195
+ #endif
196
+ }
197
+
198
+ static inline int CountLeadingZeros(uint64_t value) {
199
+ #if defined(__clang__) || defined(__GNUC__)
200
+ if (value == 0) return 64;
201
+ return static_cast<int>(__builtin_clzll(value));
202
+ #elif defined(_MSC_VER)
203
+ unsigned long index; // NOLINT
204
+ if (_BitScanReverse64(&index, value)) { // NOLINT
205
+ return 63 - static_cast<int>(index);
206
+ } else {
207
+ return 64;
208
+ }
209
+ #else
210
+ int bitpos = 0;
211
+ while (value != 0) {
212
+ value >>= 1;
213
+ ++bitpos;
214
+ }
215
+ return 64 - bitpos;
216
+ #endif
217
+ }
218
+
219
+ static inline int CountTrailingZeros(uint32_t value) {
220
+ #if defined(__clang__) || defined(__GNUC__)
221
+ if (value == 0) return 32;
222
+ return static_cast<int>(__builtin_ctzl(value));
223
+ #elif defined(_MSC_VER)
224
+ unsigned long index; // NOLINT
225
+ if (_BitScanForward(&index, value)) {
226
+ return static_cast<int>(index);
227
+ } else {
228
+ return 32;
229
+ }
230
+ #else
231
+ int bitpos = 0;
232
+ if (value) {
233
+ while (value & 1 == 0) {
234
+ value >>= 1;
235
+ ++bitpos;
236
+ }
237
+ } else {
238
+ bitpos = 32;
239
+ }
240
+ return bitpos;
241
+ #endif
242
+ }
243
+
244
+ static inline int CountTrailingZeros(uint64_t value) {
245
+ #if defined(__clang__) || defined(__GNUC__)
246
+ if (value == 0) return 64;
247
+ return static_cast<int>(__builtin_ctzll(value));
248
+ #elif defined(_MSC_VER)
249
+ unsigned long index; // NOLINT
250
+ if (_BitScanForward64(&index, value)) {
251
+ return static_cast<int>(index);
252
+ } else {
253
+ return 64;
254
+ }
255
+ #else
256
+ int bitpos = 0;
257
+ if (value) {
258
+ while (value & 1 == 0) {
259
+ value >>= 1;
260
+ ++bitpos;
261
+ }
262
+ } else {
263
+ bitpos = 64;
264
+ }
265
+ return bitpos;
266
+ #endif
267
+ }
268
+
269
+ // Returns the minimum number of bits needed to represent an unsigned value
270
+ static inline int NumRequiredBits(uint64_t x) { return 64 - CountLeadingZeros(x); }
271
+
272
+ // Returns ceil(log2(x)).
273
+ static inline int Log2(uint64_t x) {
274
+ // DCHECK_GT(x, 0);
275
+ return NumRequiredBits(x - 1);
276
+ }
277
+
278
+ //
279
+ // Utilities for reading and writing individual bits by their index
280
+ // in a memory area.
281
+ //
282
+
283
+ // Bitmask selecting the k-th bit in a byte
284
+ static constexpr uint8_t kBitmask[] = {1, 2, 4, 8, 16, 32, 64, 128};
285
+
286
+ // the bitwise complement version of kBitmask
287
+ static constexpr uint8_t kFlippedBitmask[] = {254, 253, 251, 247, 239, 223, 191, 127};
288
+
289
+ // Bitmask selecting the (k - 1) preceding bits in a byte
290
+ static constexpr uint8_t kPrecedingBitmask[] = {0, 1, 3, 7, 15, 31, 63, 127};
291
+ static constexpr uint8_t kPrecedingWrappingBitmask[] = {255, 1, 3, 7, 15, 31, 63, 127};
292
+
293
+ // the bitwise complement version of kPrecedingBitmask
294
+ static constexpr uint8_t kTrailingBitmask[] = {255, 254, 252, 248, 240, 224, 192, 128};
295
+
296
+ static constexpr bool GetBit(const uint8_t* bits, uint64_t i) {
297
+ return (bits[i >> 3] >> (i & 0x07)) & 1;
298
+ }
299
+
300
+ // Gets the i-th bit from a byte. Should only be used with i <= 7.
301
+ static constexpr bool GetBitFromByte(uint8_t byte, uint8_t i) {
302
+ return byte & kBitmask[i];
303
+ }
304
+
305
+ static inline void ClearBit(uint8_t* bits, int64_t i) {
306
+ bits[i / 8] &= kFlippedBitmask[i % 8];
307
+ }
308
+
309
+ static inline void SetBit(uint8_t* bits, int64_t i) { bits[i / 8] |= kBitmask[i % 8]; }
310
+
311
+ static inline void SetBitTo(uint8_t* bits, int64_t i, bool bit_is_set) {
312
+ // https://graphics.stanford.edu/~seander/bithacks.html
313
+ // "Conditionally set or clear bits without branching"
314
+ // NOTE: this seems to confuse Valgrind as it reads from potentially
315
+ // uninitialized memory
316
+ bits[i / 8] ^= static_cast<uint8_t>(-static_cast<uint8_t>(bit_is_set) ^ bits[i / 8]) &
317
+ kBitmask[i % 8];
318
+ }
319
+
320
+ /// \brief set or clear a range of bits quickly
321
+ ARROW_EXPORT
322
+ void SetBitsTo(uint8_t* bits, int64_t start_offset, int64_t length, bool bits_are_set);
323
+
324
+ /// \brief Sets all bits in the bitmap to true
325
+ ARROW_EXPORT
326
+ void SetBitmap(uint8_t* data, int64_t offset, int64_t length);
327
+
328
+ /// \brief Clears all bits in the bitmap (set to false)
329
+ ARROW_EXPORT
330
+ void ClearBitmap(uint8_t* data, int64_t offset, int64_t length);
331
+
332
+ /// Returns a mask with lower i bits set to 1. If i >= sizeof(Word)*8, all-ones will be
333
+ /// returned
334
+ /// ex:
335
+ /// ref: https://stackoverflow.com/a/59523400
336
+ template <typename Word>
337
+ constexpr Word PrecedingWordBitmask(unsigned int const i) {
338
+ return static_cast<Word>(static_cast<Word>(i < sizeof(Word) * 8)
339
+ << (i & (sizeof(Word) * 8 - 1))) -
340
+ 1;
341
+ }
342
+ static_assert(PrecedingWordBitmask<uint8_t>(0) == 0x00, "");
343
+ static_assert(PrecedingWordBitmask<uint8_t>(4) == 0x0f, "");
344
+ static_assert(PrecedingWordBitmask<uint8_t>(8) == 0xff, "");
345
+ static_assert(PrecedingWordBitmask<uint16_t>(8) == 0x00ff, "");
346
+
347
+ /// \brief Create a word with low `n` bits from `low` and high `sizeof(Word)-n` bits
348
+ /// from `high`.
349
+ /// Word ret
350
+ /// for (i = 0; i < sizeof(Word)*8; i++){
351
+ /// ret[i]= i < n ? low[i]: high[i];
352
+ /// }
353
+ template <typename Word>
354
+ constexpr Word SpliceWord(int n, Word low, Word high) {
355
+ return (high & ~PrecedingWordBitmask<Word>(n)) | (low & PrecedingWordBitmask<Word>(n));
356
+ }
357
+
358
+ /// \brief Pack integers into a bitmap in batches of 8
359
+ template <int batch_size>
360
+ void PackBits(const uint32_t* values, uint8_t* out) {
361
+ for (int i = 0; i < batch_size / 8; ++i) {
362
+ *out++ = static_cast<uint8_t>(values[0] | values[1] << 1 | values[2] << 2 |
363
+ values[3] << 3 | values[4] << 4 | values[5] << 5 |
364
+ values[6] << 6 | values[7] << 7);
365
+ values += 8;
366
+ }
367
+ }
368
+
369
+ } // namespace bit_util
370
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <vector>
23
+
24
+ #include "arrow/result.h"
25
+ #include "arrow/type_fwd.h"
26
+ #include "arrow/util/visibility.h"
27
+
28
+ namespace arrow {
29
+ namespace internal {
30
+
31
+ /// \brief Generate Bitmap with all position to `value` except for one found
32
+ /// at `straggler_pos`.
33
+ ARROW_EXPORT
34
+ Result<std::shared_ptr<Buffer>> BitmapAllButOne(MemoryPool* pool, int64_t length,
35
+ int64_t straggler_pos, bool value = true);
36
+
37
+ /// \brief Convert vector of bytes to bitmap buffer
38
+ ARROW_EXPORT
39
+ Result<std::shared_ptr<Buffer>> BytesToBits(const std::vector<uint8_t>&,
40
+ MemoryPool* pool = default_memory_pool());
41
+
42
+ } // namespace internal
43
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+
23
+ #include "arrow/buffer.h"
24
+ #include "arrow/memory_pool.h"
25
+ #include "arrow/result.h"
26
+ #include "arrow/util/bit_util.h"
27
+ #include "arrow/util/visibility.h"
28
+
29
+ namespace arrow {
30
+ namespace internal {
31
+
32
+ // A std::generate() like function to write sequential bits into a bitmap area.
33
+ // Bits preceding the bitmap area are preserved, bits following the bitmap
34
+ // area may be clobbered.
35
+
36
+ template <class Generator>
37
+ void GenerateBits(uint8_t* bitmap, int64_t start_offset, int64_t length, Generator&& g) {
38
+ if (length == 0) {
39
+ return;
40
+ }
41
+ uint8_t* cur = bitmap + start_offset / 8;
42
+ uint8_t bit_mask = bit_util::kBitmask[start_offset % 8];
43
+ uint8_t current_byte = *cur & bit_util::kPrecedingBitmask[start_offset % 8];
44
+
45
+ for (int64_t index = 0; index < length; ++index) {
46
+ const bool bit = g();
47
+ current_byte = bit ? (current_byte | bit_mask) : current_byte;
48
+ bit_mask = static_cast<uint8_t>(bit_mask << 1);
49
+ if (bit_mask == 0) {
50
+ bit_mask = 1;
51
+ *cur++ = current_byte;
52
+ current_byte = 0;
53
+ }
54
+ }
55
+ if (bit_mask != 1) {
56
+ *cur++ = current_byte;
57
+ }
58
+ }
59
+
60
+ // Like GenerateBits(), but unrolls its main loop for higher performance.
61
+
62
+ template <class Generator>
63
+ void GenerateBitsUnrolled(uint8_t* bitmap, int64_t start_offset, int64_t length,
64
+ Generator&& g) {
65
+ static_assert(std::is_same<decltype(std::declval<Generator>()()), bool>::value,
66
+ "Functor passed to GenerateBitsUnrolled must return bool");
67
+
68
+ if (length == 0) {
69
+ return;
70
+ }
71
+ uint8_t current_byte;
72
+ uint8_t* cur = bitmap + start_offset / 8;
73
+ const uint64_t start_bit_offset = start_offset % 8;
74
+ uint8_t bit_mask = bit_util::kBitmask[start_bit_offset];
75
+ int64_t remaining = length;
76
+
77
+ if (bit_mask != 0x01) {
78
+ current_byte = *cur & bit_util::kPrecedingBitmask[start_bit_offset];
79
+ while (bit_mask != 0 && remaining > 0) {
80
+ current_byte |= g() * bit_mask;
81
+ bit_mask = static_cast<uint8_t>(bit_mask << 1);
82
+ --remaining;
83
+ }
84
+ *cur++ = current_byte;
85
+ }
86
+
87
+ int64_t remaining_bytes = remaining / 8;
88
+ uint8_t out_results[8];
89
+ while (remaining_bytes-- > 0) {
90
+ for (int i = 0; i < 8; ++i) {
91
+ out_results[i] = g();
92
+ }
93
+ *cur++ = static_cast<uint8_t>(out_results[0] | out_results[1] << 1 |
94
+ out_results[2] << 2 | out_results[3] << 3 |
95
+ out_results[4] << 4 | out_results[5] << 5 |
96
+ out_results[6] << 6 | out_results[7] << 7);
97
+ }
98
+
99
+ int64_t remaining_bits = remaining % 8;
100
+ if (remaining_bits) {
101
+ current_byte = 0;
102
+ bit_mask = 0x01;
103
+ while (remaining_bits-- > 0) {
104
+ current_byte |= g() * bit_mask;
105
+ bit_mask = static_cast<uint8_t>(bit_mask << 1);
106
+ }
107
+ *cur++ = current_byte;
108
+ }
109
+ }
110
+
111
+ } // namespace internal
112
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_reader.h ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cassert>
21
+ #include <cstdint>
22
+ #include <cstring>
23
+
24
+ #include "arrow/buffer.h"
25
+ #include "arrow/util/bit_util.h"
26
+ #include "arrow/util/endian.h"
27
+ #include "arrow/util/macros.h"
28
+
29
+ namespace arrow {
30
+ namespace internal {
31
+
32
+ class BitmapReader {
33
+ public:
34
+ BitmapReader(const uint8_t* bitmap, int64_t start_offset, int64_t length)
35
+ : bitmap_(bitmap), position_(0), length_(length) {
36
+ current_byte_ = 0;
37
+ byte_offset_ = start_offset / 8;
38
+ bit_offset_ = start_offset % 8;
39
+ if (length > 0) {
40
+ current_byte_ = bitmap[byte_offset_];
41
+ }
42
+ }
43
+
44
+ bool IsSet() const { return (current_byte_ & (1 << bit_offset_)) != 0; }
45
+
46
+ bool IsNotSet() const { return (current_byte_ & (1 << bit_offset_)) == 0; }
47
+
48
+ void Next() {
49
+ ++bit_offset_;
50
+ ++position_;
51
+ if (ARROW_PREDICT_FALSE(bit_offset_ == 8)) {
52
+ bit_offset_ = 0;
53
+ ++byte_offset_;
54
+ if (ARROW_PREDICT_TRUE(position_ < length_)) {
55
+ current_byte_ = bitmap_[byte_offset_];
56
+ }
57
+ }
58
+ }
59
+
60
+ int64_t position() const { return position_; }
61
+
62
+ int64_t length() const { return length_; }
63
+
64
+ private:
65
+ const uint8_t* bitmap_;
66
+ int64_t position_;
67
+ int64_t length_;
68
+
69
+ uint8_t current_byte_;
70
+ int64_t byte_offset_;
71
+ int64_t bit_offset_;
72
+ };
73
+
74
+ // XXX Cannot name it BitmapWordReader because the name is already used
75
+ // in bitmap_ops.cc
76
+
77
+ class BitmapUInt64Reader {
78
+ public:
79
+ BitmapUInt64Reader(const uint8_t* bitmap, int64_t start_offset, int64_t length)
80
+ : bitmap_(util::MakeNonNull(bitmap) + start_offset / 8),
81
+ num_carry_bits_(8 - start_offset % 8),
82
+ length_(length),
83
+ remaining_length_(length_),
84
+ carry_bits_(0) {
85
+ if (length_ > 0) {
86
+ // Load carry bits from the first byte's MSBs
87
+ if (length_ >= num_carry_bits_) {
88
+ carry_bits_ =
89
+ LoadPartialWord(static_cast<int8_t>(8 - num_carry_bits_), num_carry_bits_);
90
+ } else {
91
+ carry_bits_ = LoadPartialWord(static_cast<int8_t>(8 - num_carry_bits_), length_);
92
+ }
93
+ }
94
+ }
95
+
96
+ uint64_t NextWord() {
97
+ if (ARROW_PREDICT_TRUE(remaining_length_ >= 64 + num_carry_bits_)) {
98
+ // We can load a full word
99
+ uint64_t next_word = LoadFullWord();
100
+ // Carry bits come first, then the (64 - num_carry_bits_) LSBs from next_word
101
+ uint64_t word = carry_bits_ | (next_word << num_carry_bits_);
102
+ carry_bits_ = next_word >> (64 - num_carry_bits_);
103
+ remaining_length_ -= 64;
104
+ return word;
105
+ } else if (remaining_length_ > num_carry_bits_) {
106
+ // We can load a partial word
107
+ uint64_t next_word =
108
+ LoadPartialWord(/*bit_offset=*/0, remaining_length_ - num_carry_bits_);
109
+ uint64_t word = carry_bits_ | (next_word << num_carry_bits_);
110
+ carry_bits_ = next_word >> (64 - num_carry_bits_);
111
+ remaining_length_ = std::max<int64_t>(remaining_length_ - 64, 0);
112
+ return word;
113
+ } else {
114
+ remaining_length_ = 0;
115
+ return carry_bits_;
116
+ }
117
+ }
118
+
119
+ int64_t position() const { return length_ - remaining_length_; }
120
+
121
+ int64_t length() const { return length_; }
122
+
123
+ private:
124
+ uint64_t LoadFullWord() {
125
+ uint64_t word;
126
+ memcpy(&word, bitmap_, 8);
127
+ bitmap_ += 8;
128
+ return bit_util::ToLittleEndian(word);
129
+ }
130
+
131
+ uint64_t LoadPartialWord(int8_t bit_offset, int64_t num_bits) {
132
+ uint64_t word = 0;
133
+ const int64_t num_bytes = bit_util::BytesForBits(num_bits);
134
+ memcpy(&word, bitmap_, num_bytes);
135
+ bitmap_ += num_bytes;
136
+ return (bit_util::ToLittleEndian(word) >> bit_offset) &
137
+ bit_util::LeastSignificantBitMask(num_bits);
138
+ }
139
+
140
+ const uint8_t* bitmap_;
141
+ const int64_t num_carry_bits_; // in [1, 8]
142
+ const int64_t length_;
143
+ int64_t remaining_length_;
144
+ uint64_t carry_bits_;
145
+ };
146
+
147
+ // BitmapWordReader here is faster than BitmapUInt64Reader (in bitmap_reader.h)
148
+ // on sufficiently large inputs. However, it has a larger prolog / epilog overhead
149
+ // and should probably not be used for small bitmaps.
150
+
151
+ template <typename Word, bool may_have_byte_offset = true>
152
+ class BitmapWordReader {
153
+ public:
154
+ BitmapWordReader() = default;
155
+ BitmapWordReader(const uint8_t* bitmap, int64_t offset, int64_t length)
156
+ : offset_(static_cast<int64_t>(may_have_byte_offset) * (offset % 8)),
157
+ bitmap_(bitmap + offset / 8),
158
+ bitmap_end_(bitmap_ + bit_util::BytesForBits(offset_ + length)) {
159
+ // decrement word count by one as we may touch two adjacent words in one iteration
160
+ nwords_ = length / (sizeof(Word) * 8) - 1;
161
+ if (nwords_ < 0) {
162
+ nwords_ = 0;
163
+ }
164
+ trailing_bits_ = static_cast<int>(length - nwords_ * sizeof(Word) * 8);
165
+ trailing_bytes_ = static_cast<int>(bit_util::BytesForBits(trailing_bits_));
166
+
167
+ if (nwords_ > 0) {
168
+ current_data.word_ = load<Word>(bitmap_);
169
+ } else if (length > 0) {
170
+ current_data.epi.byte_ = load<uint8_t>(bitmap_);
171
+ }
172
+ }
173
+
174
+ Word NextWord() {
175
+ bitmap_ += sizeof(Word);
176
+ const Word next_word = load<Word>(bitmap_);
177
+ Word word = current_data.word_;
178
+ if (may_have_byte_offset && offset_) {
179
+ // combine two adjacent words into one word
180
+ // |<------ next ----->|<---- current ---->|
181
+ // +-------------+-----+-------------+-----+
182
+ // | --- | A | B | --- |
183
+ // +-------------+-----+-------------+-----+
184
+ // | | offset
185
+ // v v
186
+ // +-----+-------------+
187
+ // | A | B |
188
+ // +-----+-------------+
189
+ // |<------ word ----->|
190
+ word >>= offset_;
191
+ word |= next_word << (sizeof(Word) * 8 - offset_);
192
+ }
193
+ current_data.word_ = next_word;
194
+ return word;
195
+ }
196
+
197
+ uint8_t NextTrailingByte(int& valid_bits) {
198
+ uint8_t byte;
199
+ assert(trailing_bits_ > 0);
200
+
201
+ if (trailing_bits_ <= 8) {
202
+ // last byte
203
+ valid_bits = trailing_bits_;
204
+ trailing_bits_ = 0;
205
+ byte = 0;
206
+ internal::BitmapReader reader(bitmap_, offset_, valid_bits);
207
+ for (int i = 0; i < valid_bits; ++i) {
208
+ byte >>= 1;
209
+ if (reader.IsSet()) {
210
+ byte |= 0x80;
211
+ }
212
+ reader.Next();
213
+ }
214
+ byte >>= (8 - valid_bits);
215
+ } else {
216
+ ++bitmap_;
217
+ const uint8_t next_byte = load<uint8_t>(bitmap_);
218
+ byte = current_data.epi.byte_;
219
+ if (may_have_byte_offset && offset_) {
220
+ byte >>= offset_;
221
+ byte |= next_byte << (8 - offset_);
222
+ }
223
+ current_data.epi.byte_ = next_byte;
224
+ trailing_bits_ -= 8;
225
+ trailing_bytes_--;
226
+ valid_bits = 8;
227
+ }
228
+ return byte;
229
+ }
230
+
231
+ int64_t words() const { return nwords_; }
232
+ int trailing_bytes() const { return trailing_bytes_; }
233
+
234
+ private:
235
+ int64_t offset_;
236
+ const uint8_t* bitmap_;
237
+
238
+ const uint8_t* bitmap_end_;
239
+ int64_t nwords_;
240
+ int trailing_bits_;
241
+ int trailing_bytes_;
242
+ union {
243
+ Word word_;
244
+ struct {
245
+ #if ARROW_LITTLE_ENDIAN == 0
246
+ uint8_t padding_bytes_[sizeof(Word) - 1];
247
+ #endif
248
+ uint8_t byte_;
249
+ } epi;
250
+ } current_data;
251
+
252
+ template <typename DType>
253
+ DType load(const uint8_t* bitmap) {
254
+ assert(bitmap + sizeof(DType) <= bitmap_end_);
255
+ return bit_util::ToLittleEndian(util::SafeLoadAs<DType>(bitmap));
256
+ }
257
+ };
258
+
259
+ /// \brief Index into a possibly nonexistent bitmap
260
+ struct OptionalBitIndexer {
261
+ const uint8_t* bitmap;
262
+ const int64_t offset;
263
+
264
+ explicit OptionalBitIndexer(const uint8_t* buffer = NULLPTR, int64_t offset = 0)
265
+ : bitmap(buffer), offset(offset) {}
266
+
267
+ bool operator[](int64_t i) const {
268
+ return bitmap == NULLPTR || bit_util::GetBit(bitmap, offset + i);
269
+ }
270
+ };
271
+
272
+ } // namespace internal
273
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_writer.h ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <cstring>
22
+
23
+ #include "arrow/util/bit_util.h"
24
+ #include "arrow/util/endian.h"
25
+ #include "arrow/util/macros.h"
26
+
27
+ namespace arrow {
28
+ namespace internal {
29
+
30
+ class BitmapWriter {
31
+ // A sequential bitwise writer that preserves surrounding bit values.
32
+
33
+ public:
34
+ BitmapWriter(uint8_t* bitmap, int64_t start_offset, int64_t length)
35
+ : bitmap_(bitmap), position_(0), length_(length) {
36
+ byte_offset_ = start_offset / 8;
37
+ bit_mask_ = bit_util::kBitmask[start_offset % 8];
38
+ if (length > 0) {
39
+ current_byte_ = bitmap[byte_offset_];
40
+ } else {
41
+ current_byte_ = 0;
42
+ }
43
+ }
44
+
45
+ void Set() { current_byte_ |= bit_mask_; }
46
+
47
+ void Clear() { current_byte_ &= bit_mask_ ^ 0xFF; }
48
+
49
+ void Next() {
50
+ bit_mask_ = static_cast<uint8_t>(bit_mask_ << 1);
51
+ ++position_;
52
+ if (bit_mask_ == 0) {
53
+ // Finished this byte, need advancing
54
+ bit_mask_ = 0x01;
55
+ bitmap_[byte_offset_++] = current_byte_;
56
+ if (ARROW_PREDICT_TRUE(position_ < length_)) {
57
+ current_byte_ = bitmap_[byte_offset_];
58
+ }
59
+ }
60
+ }
61
+
62
+ void Finish() {
63
+ // Store current byte if we didn't went past bitmap storage
64
+ if (length_ > 0 && (bit_mask_ != 0x01 || position_ < length_)) {
65
+ bitmap_[byte_offset_] = current_byte_;
66
+ }
67
+ }
68
+
69
+ int64_t position() const { return position_; }
70
+
71
+ private:
72
+ uint8_t* bitmap_;
73
+ int64_t position_;
74
+ int64_t length_;
75
+
76
+ uint8_t current_byte_;
77
+ uint8_t bit_mask_;
78
+ int64_t byte_offset_;
79
+ };
80
+
81
+ class FirstTimeBitmapWriter {
82
+ // Like BitmapWriter, but any bit values *following* the bits written
83
+ // might be clobbered. It is hence faster than BitmapWriter, and can
84
+ // also avoid false positives with Valgrind.
85
+
86
+ public:
87
+ FirstTimeBitmapWriter(uint8_t* bitmap, int64_t start_offset, int64_t length)
88
+ : bitmap_(bitmap), position_(0), length_(length) {
89
+ current_byte_ = 0;
90
+ byte_offset_ = start_offset / 8;
91
+ bit_mask_ = bit_util::kBitmask[start_offset % 8];
92
+ if (length > 0) {
93
+ current_byte_ =
94
+ bitmap[byte_offset_] & bit_util::kPrecedingBitmask[start_offset % 8];
95
+ } else {
96
+ current_byte_ = 0;
97
+ }
98
+ }
99
+
100
+ /// Appends number_of_bits from word to valid_bits and valid_bits_offset.
101
+ ///
102
+ /// \param[in] word The LSB bitmap to append. Any bits past number_of_bits are assumed
103
+ /// to be unset (i.e. 0).
104
+ /// \param[in] number_of_bits The number of bits to append from word.
105
+ void AppendWord(uint64_t word, int64_t number_of_bits) {
106
+ if (ARROW_PREDICT_FALSE(number_of_bits == 0)) {
107
+ return;
108
+ }
109
+
110
+ // Location that the first byte needs to be written to.
111
+ uint8_t* append_position = bitmap_ + byte_offset_;
112
+
113
+ // Update state variables except for current_byte_ here.
114
+ position_ += number_of_bits;
115
+ int64_t bit_offset = bit_util::CountTrailingZeros(static_cast<uint32_t>(bit_mask_));
116
+ bit_mask_ = bit_util::kBitmask[(bit_offset + number_of_bits) % 8];
117
+ byte_offset_ += (bit_offset + number_of_bits) / 8;
118
+
119
+ if (bit_offset != 0) {
120
+ // We are in the middle of the byte. This code updates the byte and shifts
121
+ // bits appropriately within word so it can be memcpy'd below.
122
+ int64_t bits_to_carry = 8 - bit_offset;
123
+ // Carry over bits from word to current_byte_. We assume any extra bits in word
124
+ // unset so no additional accounting is needed for when number_of_bits <
125
+ // bits_to_carry.
126
+ current_byte_ |= (word & bit_util::kPrecedingBitmask[bits_to_carry]) << bit_offset;
127
+ // Check if everything is transferred into current_byte_.
128
+ if (ARROW_PREDICT_FALSE(number_of_bits < bits_to_carry)) {
129
+ return;
130
+ }
131
+ *append_position = current_byte_;
132
+ append_position++;
133
+ // Move the carry bits off of word.
134
+ word = word >> bits_to_carry;
135
+ number_of_bits -= bits_to_carry;
136
+ }
137
+ word = bit_util::ToLittleEndian(word);
138
+ int64_t bytes_for_word = ::arrow::bit_util::BytesForBits(number_of_bits);
139
+ std::memcpy(append_position, &word, bytes_for_word);
140
+ // At this point, the previous current_byte_ has been written to bitmap_.
141
+ // The new current_byte_ is either the last relevant byte in 'word'
142
+ // or cleared if the new position is byte aligned (i.e. a fresh byte).
143
+ if (bit_mask_ == 0x1) {
144
+ current_byte_ = 0;
145
+ } else {
146
+ current_byte_ = *(append_position + bytes_for_word - 1);
147
+ }
148
+ }
149
+
150
+ void Set() { current_byte_ |= bit_mask_; }
151
+
152
+ void Clear() {}
153
+
154
+ void Next() {
155
+ bit_mask_ = static_cast<uint8_t>(bit_mask_ << 1);
156
+ ++position_;
157
+ if (bit_mask_ == 0) {
158
+ // Finished this byte, need advancing
159
+ bit_mask_ = 0x01;
160
+ bitmap_[byte_offset_++] = current_byte_;
161
+ current_byte_ = 0;
162
+ }
163
+ }
164
+
165
+ void Finish() {
166
+ // Store current byte if we didn't went go bitmap storage
167
+ if (length_ > 0 && (bit_mask_ != 0x01 || position_ < length_)) {
168
+ bitmap_[byte_offset_] = current_byte_;
169
+ }
170
+ }
171
+
172
+ int64_t position() const { return position_; }
173
+
174
+ private:
175
+ uint8_t* bitmap_;
176
+ int64_t position_;
177
+ int64_t length_;
178
+
179
+ uint8_t current_byte_;
180
+ uint8_t bit_mask_;
181
+ int64_t byte_offset_;
182
+ };
183
+
184
+ template <typename Word, bool may_have_byte_offset = true>
185
+ class BitmapWordWriter {
186
+ public:
187
+ BitmapWordWriter() = default;
188
+ BitmapWordWriter(uint8_t* bitmap, int64_t offset, int64_t length)
189
+ : offset_(static_cast<int64_t>(may_have_byte_offset) * (offset % 8)),
190
+ bitmap_(bitmap + offset / 8),
191
+ bitmap_end_(bitmap_ + bit_util::BytesForBits(offset_ + length)),
192
+ mask_((1U << offset_) - 1) {
193
+ if (offset_) {
194
+ if (length >= static_cast<int>(sizeof(Word) * 8)) {
195
+ current_data.word_ = load<Word>(bitmap_);
196
+ } else if (length > 0) {
197
+ current_data.epi.byte_ = load<uint8_t>(bitmap_);
198
+ }
199
+ }
200
+ }
201
+
202
+ void PutNextWord(Word word) {
203
+ if (may_have_byte_offset && offset_) {
204
+ // split one word into two adjacent words, don't touch unused bits
205
+ // |<------ word ----->|
206
+ // +-----+-------------+
207
+ // | A | B |
208
+ // +-----+-------------+
209
+ // | |
210
+ // v v offset
211
+ // +-------------+-----+-------------+-----+
212
+ // | --- | A | B | --- |
213
+ // +-------------+-----+-------------+-----+
214
+ // |<------ next ----->|<---- current ---->|
215
+ word = (word << offset_) | (word >> (sizeof(Word) * 8 - offset_));
216
+ Word next_word = load<Word>(bitmap_ + sizeof(Word));
217
+ current_data.word_ = (current_data.word_ & mask_) | (word & ~mask_);
218
+ next_word = (next_word & ~mask_) | (word & mask_);
219
+ store<Word>(bitmap_, current_data.word_);
220
+ store<Word>(bitmap_ + sizeof(Word), next_word);
221
+ current_data.word_ = next_word;
222
+ } else {
223
+ store<Word>(bitmap_, word);
224
+ }
225
+ bitmap_ += sizeof(Word);
226
+ }
227
+
228
+ void PutNextTrailingByte(uint8_t byte, int valid_bits) {
229
+ if (valid_bits == 8) {
230
+ if (may_have_byte_offset && offset_) {
231
+ byte = (byte << offset_) | (byte >> (8 - offset_));
232
+ uint8_t next_byte = load<uint8_t>(bitmap_ + 1);
233
+ current_data.epi.byte_ = (current_data.epi.byte_ & mask_) | (byte & ~mask_);
234
+ next_byte = (next_byte & ~mask_) | (byte & mask_);
235
+ store<uint8_t>(bitmap_, current_data.epi.byte_);
236
+ store<uint8_t>(bitmap_ + 1, next_byte);
237
+ current_data.epi.byte_ = next_byte;
238
+ } else {
239
+ store<uint8_t>(bitmap_, byte);
240
+ }
241
+ ++bitmap_;
242
+ } else {
243
+ assert(valid_bits > 0);
244
+ assert(valid_bits < 8);
245
+ assert(bitmap_ + bit_util::BytesForBits(offset_ + valid_bits) <= bitmap_end_);
246
+ internal::BitmapWriter writer(bitmap_, offset_, valid_bits);
247
+ for (int i = 0; i < valid_bits; ++i) {
248
+ (byte & 0x01) ? writer.Set() : writer.Clear();
249
+ writer.Next();
250
+ byte >>= 1;
251
+ }
252
+ writer.Finish();
253
+ }
254
+ }
255
+
256
+ private:
257
+ int64_t offset_;
258
+ uint8_t* bitmap_;
259
+
260
+ const uint8_t* bitmap_end_;
261
+ uint64_t mask_;
262
+ union {
263
+ Word word_;
264
+ struct {
265
+ #if ARROW_LITTLE_ENDIAN == 0
266
+ uint8_t padding_bytes_[sizeof(Word) - 1];
267
+ #endif
268
+ uint8_t byte_;
269
+ } epi;
270
+ } current_data;
271
+
272
+ template <typename DType>
273
+ DType load(const uint8_t* bitmap) {
274
+ assert(bitmap + sizeof(DType) <= bitmap_end_);
275
+ return bit_util::ToLittleEndian(util::SafeLoadAs<DType>(bitmap));
276
+ }
277
+
278
+ template <typename DType>
279
+ void store(uint8_t* bitmap, DType data) {
280
+ assert(bitmap + sizeof(DType) <= bitmap_end_);
281
+ util::SafeStore(bitmap, bit_util::FromLittleEndian(data));
282
+ }
283
+ };
284
+
285
+ } // namespace internal
286
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitset_stack.h ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+ #include <array>
22
+ #include <bitset>
23
+ #include <cassert>
24
+ #include <cstdint>
25
+ #include <cstring>
26
+ #include <memory>
27
+ #include <string>
28
+ #include <string_view>
29
+ #include <type_traits>
30
+ #include <utility>
31
+ #include <vector>
32
+
33
+ #include "arrow/buffer.h"
34
+ #include "arrow/memory_pool.h"
35
+ #include "arrow/result.h"
36
+ #include "arrow/type_fwd.h"
37
+ #include "arrow/util/bit_util.h"
38
+ #include "arrow/util/compare.h"
39
+ #include "arrow/util/functional.h"
40
+ #include "arrow/util/macros.h"
41
+ #include "arrow/util/string_builder.h"
42
+ #include "arrow/util/type_traits.h"
43
+ #include "arrow/util/visibility.h"
44
+
45
+ namespace arrow {
46
+ namespace internal {
47
+
48
+ /// \brief Store a stack of bitsets efficiently. The top bitset may be
49
+ /// accessed and its bits may be modified, but it may not be resized.
50
+ class BitsetStack {
51
+ public:
52
+ using reference = typename std::vector<bool>::reference;
53
+
54
+ /// \brief push a bitset onto the stack
55
+ /// \param size number of bits in the next bitset
56
+ /// \param value initial value for bits in the pushed bitset
57
+ void Push(int size, bool value) {
58
+ offsets_.push_back(bit_count());
59
+ bits_.resize(bit_count() + size, value);
60
+ }
61
+
62
+ /// \brief number of bits in the bitset at the top of the stack
63
+ int TopSize() const {
64
+ if (offsets_.size() == 0) return 0;
65
+ return bit_count() - offsets_.back();
66
+ }
67
+
68
+ /// \brief pop a bitset off the stack
69
+ void Pop() {
70
+ bits_.resize(offsets_.back());
71
+ offsets_.pop_back();
72
+ }
73
+
74
+ /// \brief get the value of a bit in the top bitset
75
+ /// \param i index of the bit to access
76
+ bool operator[](int i) const { return bits_[offsets_.back() + i]; }
77
+
78
+ /// \brief get a mutable reference to a bit in the top bitset
79
+ /// \param i index of the bit to access
80
+ reference operator[](int i) { return bits_[offsets_.back() + i]; }
81
+
82
+ private:
83
+ int bit_count() const { return static_cast<int>(bits_.size()); }
84
+ std::vector<bool> bits_;
85
+ std::vector<int> offsets_;
86
+ };
87
+
88
+ } // namespace internal
89
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/util/endian.h"
21
+ #include "arrow/util/visibility.h"
22
+
23
+ #include <stdint.h>
24
+
25
+ namespace arrow {
26
+ namespace internal {
27
+
28
+ ARROW_EXPORT
29
+ int unpack32(const uint32_t* in, uint32_t* out, int batch_size, int num_bits);
30
+ ARROW_EXPORT
31
+ int unpack64(const uint8_t* in, uint64_t* out, int batch_size, int num_bits);
32
+
33
+ } // namespace internal
34
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking64_default.h ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx2.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <stdint.h>
21
+
22
+ namespace arrow {
23
+ namespace internal {
24
+
25
+ int unpack32_avx2(const uint32_t* in, uint32_t* out, int batch_size, int num_bits);
26
+
27
+ } // namespace internal
28
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx512.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <stdint.h>
21
+
22
+ namespace arrow {
23
+ namespace internal {
24
+
25
+ int unpack32_avx512(const uint32_t* in, uint32_t* out, int batch_size, int num_bits);
26
+
27
+ } // namespace internal
28
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_default.h ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/byte_size.h ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+
22
+ #include "arrow/type_fwd.h"
23
+
24
+ namespace arrow {
25
+
26
+ namespace util {
27
+
28
+ /// \brief The sum of bytes in each buffer referenced by the array
29
+ ///
30
+ /// Note: An array may only reference a portion of a buffer.
31
+ /// This method will overestimate in this case and return the
32
+ /// byte size of the entire buffer.
33
+ /// Note: If a buffer is referenced multiple times then it will
34
+ /// only be counted once.
35
+ ARROW_EXPORT int64_t TotalBufferSize(const ArrayData& array_data);
36
+ /// \brief The sum of bytes in each buffer referenced by the array
37
+ /// \see TotalBufferSize(const ArrayData& array_data) for details
38
+ ARROW_EXPORT int64_t TotalBufferSize(const Array& array);
39
+ /// \brief The sum of bytes in each buffer referenced by the array
40
+ /// \see TotalBufferSize(const ArrayData& array_data) for details
41
+ ARROW_EXPORT int64_t TotalBufferSize(const ChunkedArray& chunked_array);
42
+ /// \brief The sum of bytes in each buffer referenced by the batch
43
+ /// \see TotalBufferSize(const ArrayData& array_data) for details
44
+ ARROW_EXPORT int64_t TotalBufferSize(const RecordBatch& record_batch);
45
+ /// \brief The sum of bytes in each buffer referenced by the table
46
+ /// \see TotalBufferSize(const ArrayData& array_data) for details
47
+ ARROW_EXPORT int64_t TotalBufferSize(const Table& table);
48
+
49
+ /// \brief Calculate the buffer ranges referenced by the array
50
+ ///
51
+ /// These ranges will take into account array offsets
52
+ ///
53
+ /// The ranges may contain duplicates
54
+ ///
55
+ /// Dictionary arrays will ignore the offset of their containing array
56
+ ///
57
+ /// The return value will be a struct array corresponding to the schema:
58
+ /// schema({field("start", uint64()), field("offset", uint64()), field("length",
59
+ /// uint64()))
60
+ ARROW_EXPORT Result<std::shared_ptr<Array>> ReferencedRanges(const ArrayData& array_data);
61
+
62
+ /// \brief Returns the sum of bytes from all buffer ranges referenced
63
+ ///
64
+ /// Unlike TotalBufferSize this method will account for array
65
+ /// offsets.
66
+ ///
67
+ /// If buffers are shared between arrays then the shared
68
+ /// portion will be counted multiple times.
69
+ ///
70
+ /// Dictionary arrays will always be counted in their entirety
71
+ /// even if the array only references a portion of the dictionary.
72
+ ARROW_EXPORT Result<int64_t> ReferencedBufferSize(const ArrayData& array_data);
73
+ /// \brief Returns the sum of bytes from all buffer ranges referenced
74
+ /// \see ReferencedBufferSize(const ArrayData& array_data) for details
75
+ ARROW_EXPORT Result<int64_t> ReferencedBufferSize(const Array& array_data);
76
+ /// \brief Returns the sum of bytes from all buffer ranges referenced
77
+ /// \see ReferencedBufferSize(const ArrayData& array_data) for details
78
+ ARROW_EXPORT Result<int64_t> ReferencedBufferSize(const ChunkedArray& array_data);
79
+ /// \brief Returns the sum of bytes from all buffer ranges referenced
80
+ /// \see ReferencedBufferSize(const ArrayData& array_data) for details
81
+ ARROW_EXPORT Result<int64_t> ReferencedBufferSize(const RecordBatch& array_data);
82
+ /// \brief Returns the sum of bytes from all buffer ranges referenced
83
+ /// \see ReferencedBufferSize(const ArrayData& array_data) for details
84
+ ARROW_EXPORT Result<int64_t> ReferencedBufferSize(const Table& array_data);
85
+
86
+ } // namespace util
87
+
88
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <limits>
22
+ #include <memory>
23
+ #include <optional>
24
+ #include <string>
25
+
26
+ #include "arrow/result.h"
27
+ #include "arrow/status.h"
28
+ #include "arrow/util/type_fwd.h"
29
+ #include "arrow/util/visibility.h"
30
+
31
+ namespace arrow {
32
+ namespace util {
33
+
34
+ constexpr int kUseDefaultCompressionLevel = std::numeric_limits<int>::min();
35
+
36
+ /// \brief Streaming compressor interface
37
+ ///
38
+ class ARROW_EXPORT Compressor {
39
+ public:
40
+ virtual ~Compressor() = default;
41
+
42
+ struct CompressResult {
43
+ int64_t bytes_read;
44
+ int64_t bytes_written;
45
+ };
46
+ struct FlushResult {
47
+ int64_t bytes_written;
48
+ bool should_retry;
49
+ };
50
+ struct EndResult {
51
+ int64_t bytes_written;
52
+ bool should_retry;
53
+ };
54
+
55
+ /// \brief Compress some input.
56
+ ///
57
+ /// If bytes_read is 0 on return, then a larger output buffer should be supplied.
58
+ virtual Result<CompressResult> Compress(int64_t input_len, const uint8_t* input,
59
+ int64_t output_len, uint8_t* output) = 0;
60
+
61
+ /// \brief Flush part of the compressed output.
62
+ ///
63
+ /// If should_retry is true on return, Flush() should be called again
64
+ /// with a larger buffer.
65
+ virtual Result<FlushResult> Flush(int64_t output_len, uint8_t* output) = 0;
66
+
67
+ /// \brief End compressing, doing whatever is necessary to end the stream.
68
+ ///
69
+ /// If should_retry is true on return, End() should be called again
70
+ /// with a larger buffer. Otherwise, the Compressor should not be used anymore.
71
+ ///
72
+ /// End() implies Flush().
73
+ virtual Result<EndResult> End(int64_t output_len, uint8_t* output) = 0;
74
+
75
+ // XXX add methods for buffer size heuristics?
76
+ };
77
+
78
+ /// \brief Streaming decompressor interface
79
+ ///
80
+ class ARROW_EXPORT Decompressor {
81
+ public:
82
+ virtual ~Decompressor() = default;
83
+
84
+ struct DecompressResult {
85
+ // XXX is need_more_output necessary? (Brotli?)
86
+ int64_t bytes_read;
87
+ int64_t bytes_written;
88
+ bool need_more_output;
89
+ };
90
+
91
+ /// \brief Decompress some input.
92
+ ///
93
+ /// If need_more_output is true on return, a larger output buffer needs
94
+ /// to be supplied.
95
+ virtual Result<DecompressResult> Decompress(int64_t input_len, const uint8_t* input,
96
+ int64_t output_len, uint8_t* output) = 0;
97
+
98
+ /// \brief Return whether the compressed stream is finished.
99
+ ///
100
+ /// This is a heuristic. If true is returned, then it is guaranteed
101
+ /// that the stream is finished. If false is returned, however, it may
102
+ /// simply be that the underlying library isn't able to provide the information.
103
+ virtual bool IsFinished() = 0;
104
+
105
+ /// \brief Reinitialize decompressor, making it ready for a new compressed stream.
106
+ virtual Status Reset() = 0;
107
+
108
+ // XXX add methods for buffer size heuristics?
109
+ };
110
+
111
+ /// \brief Compression codec options
112
+ class ARROW_EXPORT CodecOptions {
113
+ public:
114
+ explicit CodecOptions(int compression_level = kUseDefaultCompressionLevel)
115
+ : compression_level(compression_level) {}
116
+
117
+ virtual ~CodecOptions() = default;
118
+
119
+ int compression_level;
120
+ };
121
+
122
+ // ----------------------------------------------------------------------
123
+ // GZip codec options implementation
124
+
125
+ enum class GZipFormat {
126
+ ZLIB,
127
+ DEFLATE,
128
+ GZIP,
129
+ };
130
+
131
+ class ARROW_EXPORT GZipCodecOptions : public CodecOptions {
132
+ public:
133
+ GZipFormat gzip_format = GZipFormat::GZIP;
134
+ std::optional<int> window_bits;
135
+ };
136
+
137
+ // ----------------------------------------------------------------------
138
+ // brotli codec options implementation
139
+
140
+ class ARROW_EXPORT BrotliCodecOptions : public CodecOptions {
141
+ public:
142
+ std::optional<int> window_bits;
143
+ };
144
+
145
+ /// \brief Compression codec
146
+ class ARROW_EXPORT Codec {
147
+ public:
148
+ virtual ~Codec() = default;
149
+
150
+ /// \brief Return special value to indicate that a codec implementation
151
+ /// should use its default compression level
152
+ static int UseDefaultCompressionLevel();
153
+
154
+ /// \brief Return a string name for compression type
155
+ static const std::string& GetCodecAsString(Compression::type t);
156
+
157
+ /// \brief Return compression type for name (all lower case)
158
+ static Result<Compression::type> GetCompressionType(const std::string& name);
159
+
160
+ /// \brief Create a codec for the given compression algorithm with CodecOptions
161
+ static Result<std::unique_ptr<Codec>> Create(
162
+ Compression::type codec, const CodecOptions& codec_options = CodecOptions{});
163
+
164
+ /// \brief Create a codec for the given compression algorithm
165
+ static Result<std::unique_ptr<Codec>> Create(Compression::type codec,
166
+ int compression_level);
167
+
168
+ /// \brief Return true if support for indicated codec has been enabled
169
+ static bool IsAvailable(Compression::type codec);
170
+
171
+ /// \brief Return true if indicated codec supports setting a compression level
172
+ static bool SupportsCompressionLevel(Compression::type codec);
173
+
174
+ /// \brief Return the smallest supported compression level for the codec
175
+ /// Note: This function creates a temporary Codec instance
176
+ static Result<int> MinimumCompressionLevel(Compression::type codec);
177
+
178
+ /// \brief Return the largest supported compression level for the codec
179
+ /// Note: This function creates a temporary Codec instance
180
+ static Result<int> MaximumCompressionLevel(Compression::type codec);
181
+
182
+ /// \brief Return the default compression level
183
+ /// Note: This function creates a temporary Codec instance
184
+ static Result<int> DefaultCompressionLevel(Compression::type codec);
185
+
186
+ /// \brief Return the smallest supported compression level
187
+ virtual int minimum_compression_level() const = 0;
188
+
189
+ /// \brief Return the largest supported compression level
190
+ virtual int maximum_compression_level() const = 0;
191
+
192
+ /// \brief Return the default compression level
193
+ virtual int default_compression_level() const = 0;
194
+
195
+ /// \brief One-shot decompression function
196
+ ///
197
+ /// output_buffer_len must be correct and therefore be obtained in advance.
198
+ /// The actual decompressed length is returned.
199
+ ///
200
+ /// \note One-shot decompression is not always compatible with streaming
201
+ /// compression. Depending on the codec (e.g. LZ4), different formats may
202
+ /// be used.
203
+ virtual Result<int64_t> Decompress(int64_t input_len, const uint8_t* input,
204
+ int64_t output_buffer_len,
205
+ uint8_t* output_buffer) = 0;
206
+
207
+ /// \brief One-shot compression function
208
+ ///
209
+ /// output_buffer_len must first have been computed using MaxCompressedLen().
210
+ /// The actual compressed length is returned.
211
+ ///
212
+ /// \note One-shot compression is not always compatible with streaming
213
+ /// decompression. Depending on the codec (e.g. LZ4), different formats may
214
+ /// be used.
215
+ virtual Result<int64_t> Compress(int64_t input_len, const uint8_t* input,
216
+ int64_t output_buffer_len, uint8_t* output_buffer) = 0;
217
+
218
+ virtual int64_t MaxCompressedLen(int64_t input_len, const uint8_t* input) = 0;
219
+
220
+ /// \brief Create a streaming compressor instance
221
+ virtual Result<std::shared_ptr<Compressor>> MakeCompressor() = 0;
222
+
223
+ /// \brief Create a streaming compressor instance
224
+ virtual Result<std::shared_ptr<Decompressor>> MakeDecompressor() = 0;
225
+
226
+ /// \brief This Codec's compression type
227
+ virtual Compression::type compression_type() const = 0;
228
+
229
+ /// \brief The name of this Codec's compression type
230
+ const std::string& name() const { return GetCodecAsString(compression_type()); }
231
+
232
+ /// \brief This Codec's compression level, if applicable
233
+ virtual int compression_level() const { return UseDefaultCompressionLevel(); }
234
+
235
+ private:
236
+ /// \brief Initializes the codec's resources.
237
+ virtual Status Init();
238
+ };
239
+
240
+ } // namespace util
241
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include <memory>
19
+ #include <string>
20
+ #include <utility>
21
+ #include <vector>
22
+
23
+ #include "arrow/array.h"
24
+ #include "arrow/chunked_array.h"
25
+ #include "arrow/status.h"
26
+ #include "arrow/type.h"
27
+ #include "arrow/type_traits.h"
28
+ #include "arrow/util/checked_cast.h"
29
+ #include "arrow/visit_type_inline.h"
30
+
31
+ namespace arrow {
32
+ namespace internal {
33
+
34
+ template <typename BaseConverter, template <typename...> class ConverterTrait>
35
+ static Result<std::unique_ptr<BaseConverter>> MakeConverter(
36
+ std::shared_ptr<DataType> type, typename BaseConverter::OptionsType options,
37
+ MemoryPool* pool);
38
+
39
+ template <typename Input, typename Options>
40
+ class Converter {
41
+ public:
42
+ using Self = Converter<Input, Options>;
43
+ using InputType = Input;
44
+ using OptionsType = Options;
45
+
46
+ virtual ~Converter() = default;
47
+
48
+ Status Construct(std::shared_ptr<DataType> type, OptionsType options,
49
+ MemoryPool* pool) {
50
+ type_ = std::move(type);
51
+ options_ = std::move(options);
52
+ return Init(pool);
53
+ }
54
+
55
+ virtual Status Append(InputType value) { return Status::NotImplemented("Append"); }
56
+
57
+ virtual Status Extend(InputType values, int64_t size, int64_t offset = 0) {
58
+ return Status::NotImplemented("Extend");
59
+ }
60
+
61
+ virtual Status ExtendMasked(InputType values, InputType mask, int64_t size,
62
+ int64_t offset = 0) {
63
+ return Status::NotImplemented("ExtendMasked");
64
+ }
65
+
66
+ const std::shared_ptr<ArrayBuilder>& builder() const { return builder_; }
67
+
68
+ const std::shared_ptr<DataType>& type() const { return type_; }
69
+
70
+ OptionsType options() const { return options_; }
71
+
72
+ bool may_overflow() const { return may_overflow_; }
73
+
74
+ bool rewind_on_overflow() const { return rewind_on_overflow_; }
75
+
76
+ virtual Status Reserve(int64_t additional_capacity) {
77
+ return builder_->Reserve(additional_capacity);
78
+ }
79
+
80
+ Status AppendNull() { return builder_->AppendNull(); }
81
+
82
+ virtual Result<std::shared_ptr<Array>> ToArray() { return builder_->Finish(); }
83
+
84
+ virtual Result<std::shared_ptr<Array>> ToArray(int64_t length) {
85
+ ARROW_ASSIGN_OR_RAISE(auto arr, this->ToArray());
86
+ return arr->Slice(0, length);
87
+ }
88
+
89
+ virtual Result<std::shared_ptr<ChunkedArray>> ToChunkedArray() {
90
+ ARROW_ASSIGN_OR_RAISE(auto array, ToArray());
91
+ std::vector<std::shared_ptr<Array>> chunks = {std::move(array)};
92
+ return std::make_shared<ChunkedArray>(chunks);
93
+ }
94
+
95
+ protected:
96
+ virtual Status Init(MemoryPool* pool) { return Status::OK(); }
97
+
98
+ std::shared_ptr<DataType> type_;
99
+ std::shared_ptr<ArrayBuilder> builder_;
100
+ OptionsType options_;
101
+ bool may_overflow_ = false;
102
+ bool rewind_on_overflow_ = false;
103
+ };
104
+
105
+ template <typename ArrowType, typename BaseConverter>
106
+ class PrimitiveConverter : public BaseConverter {
107
+ public:
108
+ using BuilderType = typename TypeTraits<ArrowType>::BuilderType;
109
+
110
+ protected:
111
+ Status Init(MemoryPool* pool) override {
112
+ this->builder_ = std::make_shared<BuilderType>(this->type_, pool);
113
+ // Narrow variable-sized binary types may overflow
114
+ this->may_overflow_ = is_binary_like(this->type_->id());
115
+ primitive_type_ = checked_cast<const ArrowType*>(this->type_.get());
116
+ primitive_builder_ = checked_cast<BuilderType*>(this->builder_.get());
117
+ return Status::OK();
118
+ }
119
+
120
+ const ArrowType* primitive_type_;
121
+ BuilderType* primitive_builder_;
122
+ };
123
+
124
+ template <typename ArrowType, typename BaseConverter,
125
+ template <typename...> class ConverterTrait>
126
+ class ListConverter : public BaseConverter {
127
+ public:
128
+ using BuilderType = typename TypeTraits<ArrowType>::BuilderType;
129
+ using ConverterType = typename ConverterTrait<ArrowType>::type;
130
+
131
+ protected:
132
+ Status Init(MemoryPool* pool) override {
133
+ list_type_ = checked_cast<const ArrowType*>(this->type_.get());
134
+ ARROW_ASSIGN_OR_RAISE(value_converter_,
135
+ (MakeConverter<BaseConverter, ConverterTrait>(
136
+ list_type_->value_type(), this->options_, pool)));
137
+ this->builder_ =
138
+ std::make_shared<BuilderType>(pool, value_converter_->builder(), this->type_);
139
+ list_builder_ = checked_cast<BuilderType*>(this->builder_.get());
140
+ // Narrow list types may overflow
141
+ this->may_overflow_ = this->rewind_on_overflow_ =
142
+ sizeof(typename ArrowType::offset_type) < sizeof(int64_t);
143
+ return Status::OK();
144
+ }
145
+
146
+ const ArrowType* list_type_;
147
+ BuilderType* list_builder_;
148
+ std::unique_ptr<BaseConverter> value_converter_;
149
+ };
150
+
151
+ template <typename BaseConverter, template <typename...> class ConverterTrait>
152
+ class StructConverter : public BaseConverter {
153
+ public:
154
+ using ConverterType = typename ConverterTrait<StructType>::type;
155
+
156
+ Status Reserve(int64_t additional_capacity) override {
157
+ ARROW_RETURN_NOT_OK(this->builder_->Reserve(additional_capacity));
158
+ for (const auto& child : children_) {
159
+ ARROW_RETURN_NOT_OK(child->Reserve(additional_capacity));
160
+ }
161
+ return Status::OK();
162
+ }
163
+
164
+ protected:
165
+ Status Init(MemoryPool* pool) override {
166
+ std::unique_ptr<BaseConverter> child_converter;
167
+ std::vector<std::shared_ptr<ArrayBuilder>> child_builders;
168
+
169
+ struct_type_ = checked_cast<const StructType*>(this->type_.get());
170
+ for (const auto& field : struct_type_->fields()) {
171
+ ARROW_ASSIGN_OR_RAISE(child_converter,
172
+ (MakeConverter<BaseConverter, ConverterTrait>(
173
+ field->type(), this->options_, pool)));
174
+ this->may_overflow_ |= child_converter->may_overflow();
175
+ this->rewind_on_overflow_ = this->may_overflow_;
176
+ child_builders.push_back(child_converter->builder());
177
+ children_.push_back(std::move(child_converter));
178
+ }
179
+
180
+ this->builder_ =
181
+ std::make_shared<StructBuilder>(this->type_, pool, std::move(child_builders));
182
+ struct_builder_ = checked_cast<StructBuilder*>(this->builder_.get());
183
+
184
+ return Status::OK();
185
+ }
186
+
187
+ const StructType* struct_type_;
188
+ StructBuilder* struct_builder_;
189
+ std::vector<std::unique_ptr<BaseConverter>> children_;
190
+ };
191
+
192
+ template <typename ValueType, typename BaseConverter>
193
+ class DictionaryConverter : public BaseConverter {
194
+ public:
195
+ using BuilderType = DictionaryBuilder<ValueType>;
196
+
197
+ protected:
198
+ Status Init(MemoryPool* pool) override {
199
+ std::unique_ptr<ArrayBuilder> builder;
200
+ ARROW_RETURN_NOT_OK(MakeDictionaryBuilder(pool, this->type_, NULLPTR, &builder));
201
+ this->builder_ = std::move(builder);
202
+ this->may_overflow_ = false;
203
+ dict_type_ = checked_cast<const DictionaryType*>(this->type_.get());
204
+ value_type_ = checked_cast<const ValueType*>(dict_type_->value_type().get());
205
+ value_builder_ = checked_cast<BuilderType*>(this->builder_.get());
206
+ return Status::OK();
207
+ }
208
+
209
+ const DictionaryType* dict_type_;
210
+ const ValueType* value_type_;
211
+ BuilderType* value_builder_;
212
+ };
213
+
214
+ template <typename BaseConverter, template <typename...> class ConverterTrait>
215
+ struct MakeConverterImpl {
216
+ template <typename T, typename ConverterType = typename ConverterTrait<T>::type>
217
+ Status Visit(const T&) {
218
+ out.reset(new ConverterType());
219
+ return out->Construct(std::move(type), std::move(options), pool);
220
+ }
221
+
222
+ Status Visit(const DictionaryType& t) {
223
+ switch (t.value_type()->id()) {
224
+ #define DICTIONARY_CASE(TYPE) \
225
+ case TYPE::type_id: \
226
+ out = std::make_unique< \
227
+ typename ConverterTrait<DictionaryType>::template dictionary_type<TYPE>>(); \
228
+ break;
229
+ DICTIONARY_CASE(BooleanType);
230
+ DICTIONARY_CASE(Int8Type);
231
+ DICTIONARY_CASE(Int16Type);
232
+ DICTIONARY_CASE(Int32Type);
233
+ DICTIONARY_CASE(Int64Type);
234
+ DICTIONARY_CASE(UInt8Type);
235
+ DICTIONARY_CASE(UInt16Type);
236
+ DICTIONARY_CASE(UInt32Type);
237
+ DICTIONARY_CASE(UInt64Type);
238
+ DICTIONARY_CASE(FloatType);
239
+ DICTIONARY_CASE(DoubleType);
240
+ DICTIONARY_CASE(BinaryType);
241
+ DICTIONARY_CASE(StringType);
242
+ DICTIONARY_CASE(FixedSizeBinaryType);
243
+ #undef DICTIONARY_CASE
244
+ default:
245
+ return Status::NotImplemented("DictionaryArray converter for type ", t.ToString(),
246
+ " not implemented");
247
+ }
248
+ return out->Construct(std::move(type), std::move(options), pool);
249
+ }
250
+
251
+ Status Visit(const DataType& t) { return Status::NotImplemented(t.name()); }
252
+
253
+ std::shared_ptr<DataType> type;
254
+ typename BaseConverter::OptionsType options;
255
+ MemoryPool* pool;
256
+ std::unique_ptr<BaseConverter> out;
257
+ };
258
+
259
+ template <typename BaseConverter, template <typename...> class ConverterTrait>
260
+ static Result<std::unique_ptr<BaseConverter>> MakeConverter(
261
+ std::shared_ptr<DataType> type, typename BaseConverter::OptionsType options,
262
+ MemoryPool* pool) {
263
+ MakeConverterImpl<BaseConverter, ConverterTrait> visitor{
264
+ std::move(type), std::move(options), pool, NULLPTR};
265
+ ARROW_RETURN_NOT_OK(VisitTypeInline(*visitor.type, &visitor));
266
+ return std::move(visitor.out);
267
+ }
268
+
269
+ template <typename Converter>
270
+ class Chunker {
271
+ public:
272
+ using InputType = typename Converter::InputType;
273
+
274
+ explicit Chunker(std::unique_ptr<Converter> converter)
275
+ : converter_(std::move(converter)) {}
276
+
277
+ Status Reserve(int64_t additional_capacity) {
278
+ ARROW_RETURN_NOT_OK(converter_->Reserve(additional_capacity));
279
+ reserved_ += additional_capacity;
280
+ return Status::OK();
281
+ }
282
+
283
+ Status AppendNull() {
284
+ auto status = converter_->AppendNull();
285
+ if (ARROW_PREDICT_FALSE(status.IsCapacityError())) {
286
+ if (converter_->builder()->length() == 0) {
287
+ // Builder length == 0 means the individual element is too large to append.
288
+ // In this case, no need to try again.
289
+ return status;
290
+ }
291
+ ARROW_RETURN_NOT_OK(FinishChunk());
292
+ return converter_->AppendNull();
293
+ }
294
+ ++length_;
295
+ return status;
296
+ }
297
+
298
+ Status Append(InputType value) {
299
+ auto status = converter_->Append(value);
300
+ if (ARROW_PREDICT_FALSE(status.IsCapacityError())) {
301
+ if (converter_->builder()->length() == 0) {
302
+ return status;
303
+ }
304
+ ARROW_RETURN_NOT_OK(FinishChunk());
305
+ return Append(value);
306
+ }
307
+ ++length_;
308
+ return status;
309
+ }
310
+
311
+ Status Extend(InputType values, int64_t size, int64_t offset = 0) {
312
+ while (offset < size) {
313
+ auto length_before = converter_->builder()->length();
314
+ auto status = converter_->Extend(values, size, offset);
315
+ auto length_after = converter_->builder()->length();
316
+ auto num_converted = length_after - length_before;
317
+
318
+ offset += num_converted;
319
+ length_ += num_converted;
320
+
321
+ if (status.IsCapacityError()) {
322
+ if (converter_->builder()->length() == 0) {
323
+ // Builder length == 0 means the individual element is too large to append.
324
+ // In this case, no need to try again.
325
+ return status;
326
+ } else if (converter_->rewind_on_overflow()) {
327
+ // The list-like and binary-like conversion paths may raise a capacity error,
328
+ // we need to handle them differently. While the binary-like converters check
329
+ // the capacity before append/extend the list-like converters just check after
330
+ // append/extend. Thus depending on the implementation semantics we may need
331
+ // to rewind (slice) the output chunk by one.
332
+ length_ -= 1;
333
+ offset -= 1;
334
+ }
335
+ ARROW_RETURN_NOT_OK(FinishChunk());
336
+ } else if (!status.ok()) {
337
+ return status;
338
+ }
339
+ }
340
+ return Status::OK();
341
+ }
342
+
343
+ Status ExtendMasked(InputType values, InputType mask, int64_t size,
344
+ int64_t offset = 0) {
345
+ while (offset < size) {
346
+ auto length_before = converter_->builder()->length();
347
+ auto status = converter_->ExtendMasked(values, mask, size, offset);
348
+ auto length_after = converter_->builder()->length();
349
+ auto num_converted = length_after - length_before;
350
+
351
+ offset += num_converted;
352
+ length_ += num_converted;
353
+
354
+ if (status.IsCapacityError()) {
355
+ if (converter_->builder()->length() == 0) {
356
+ // Builder length == 0 means the individual element is too large to append.
357
+ // In this case, no need to try again.
358
+ return status;
359
+ } else if (converter_->rewind_on_overflow()) {
360
+ // The list-like and binary-like conversion paths may raise a capacity error,
361
+ // we need to handle them differently. While the binary-like converters check
362
+ // the capacity before append/extend the list-like converters just check after
363
+ // append/extend. Thus depending on the implementation semantics we may need
364
+ // to rewind (slice) the output chunk by one.
365
+ length_ -= 1;
366
+ offset -= 1;
367
+ }
368
+ ARROW_RETURN_NOT_OK(FinishChunk());
369
+ } else if (!status.ok()) {
370
+ return status;
371
+ }
372
+ }
373
+ return Status::OK();
374
+ }
375
+
376
+ Status FinishChunk() {
377
+ ARROW_ASSIGN_OR_RAISE(auto chunk, converter_->ToArray(length_));
378
+ chunks_.push_back(chunk);
379
+ // Reserve space for the remaining items.
380
+ // Besides being an optimization, it is also required if the converter's
381
+ // implementation relies on unsafe builder methods in converter->Append().
382
+ auto remaining = reserved_ - length_;
383
+ Reset();
384
+ return Reserve(remaining);
385
+ }
386
+
387
+ Result<std::shared_ptr<ChunkedArray>> ToChunkedArray() {
388
+ ARROW_RETURN_NOT_OK(FinishChunk());
389
+ return std::make_shared<ChunkedArray>(chunks_);
390
+ }
391
+
392
+ protected:
393
+ void Reset() {
394
+ converter_->builder()->Reset();
395
+ length_ = 0;
396
+ reserved_ = 0;
397
+ }
398
+
399
+ int64_t length_ = 0;
400
+ int64_t reserved_ = 0;
401
+ std::unique_ptr<Converter> converter_;
402
+ std::vector<std::shared_ptr<Array>> chunks_;
403
+ };
404
+
405
+ template <typename T>
406
+ static Result<std::unique_ptr<Chunker<T>>> MakeChunker(std::unique_ptr<T> converter) {
407
+ return std::make_unique<Chunker<T>>(std::move(converter));
408
+ }
409
+
410
+ } // namespace internal
411
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/cpu_info.h ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // From Apache Impala (incubating) as of 2016-01-29. Pared down to a minimal
19
+ // set of functions needed for Apache Arrow / Apache parquet-cpp
20
+
21
+ #pragma once
22
+
23
+ #include <cstdint>
24
+ #include <memory>
25
+ #include <string>
26
+
27
+ #include "arrow/util/macros.h"
28
+ #include "arrow/util/visibility.h"
29
+
30
+ namespace arrow {
31
+ namespace internal {
32
+
33
+ /// CpuInfo is an interface to query for cpu information at runtime. The caller can
34
+ /// ask for the sizes of the caches and what hardware features are supported.
35
+ /// On Linux, this information is pulled from a couple of sys files (/proc/cpuinfo and
36
+ /// /sys/devices)
37
+ class ARROW_EXPORT CpuInfo {
38
+ public:
39
+ ~CpuInfo();
40
+
41
+ /// x86 features
42
+ static constexpr int64_t SSSE3 = (1LL << 0);
43
+ static constexpr int64_t SSE4_1 = (1LL << 1);
44
+ static constexpr int64_t SSE4_2 = (1LL << 2);
45
+ static constexpr int64_t POPCNT = (1LL << 3);
46
+ static constexpr int64_t AVX = (1LL << 4);
47
+ static constexpr int64_t AVX2 = (1LL << 5);
48
+ static constexpr int64_t AVX512F = (1LL << 6);
49
+ static constexpr int64_t AVX512CD = (1LL << 7);
50
+ static constexpr int64_t AVX512VL = (1LL << 8);
51
+ static constexpr int64_t AVX512DQ = (1LL << 9);
52
+ static constexpr int64_t AVX512BW = (1LL << 10);
53
+ static constexpr int64_t AVX512 = AVX512F | AVX512CD | AVX512VL | AVX512DQ | AVX512BW;
54
+ static constexpr int64_t BMI1 = (1LL << 11);
55
+ static constexpr int64_t BMI2 = (1LL << 12);
56
+
57
+ /// Arm features
58
+ static constexpr int64_t ASIMD = (1LL << 32);
59
+
60
+ /// Cache enums for L1 (data), L2 and L3
61
+ enum class CacheLevel { L1 = 0, L2, L3, Last = L3 };
62
+
63
+ /// CPU vendors
64
+ enum class Vendor { Unknown, Intel, AMD };
65
+
66
+ static const CpuInfo* GetInstance();
67
+
68
+ /// Returns all the flags for this cpu
69
+ int64_t hardware_flags() const;
70
+
71
+ /// Returns the number of cores (including hyper-threaded) on this machine.
72
+ int num_cores() const;
73
+
74
+ /// Returns the vendor of the cpu.
75
+ Vendor vendor() const;
76
+
77
+ /// Returns the model name of the cpu (e.g. Intel i7-2600)
78
+ const std::string& model_name() const;
79
+
80
+ /// Returns the size of the cache in KB at this cache level
81
+ int64_t CacheSize(CacheLevel level) const;
82
+
83
+ /// \brief Returns whether or not the given feature is enabled.
84
+ ///
85
+ /// IsSupported() is true iff IsDetected() is also true and the feature
86
+ /// wasn't disabled by the user (for example by setting the ARROW_USER_SIMD_LEVEL
87
+ /// environment variable).
88
+ bool IsSupported(int64_t flags) const;
89
+
90
+ /// Returns whether or not the given feature is available on the CPU.
91
+ bool IsDetected(int64_t flags) const;
92
+
93
+ /// Determine if the CPU meets the minimum CPU requirements and if not, issue an error
94
+ /// and terminate.
95
+ void VerifyCpuRequirements() const;
96
+
97
+ /// Toggle a hardware feature on and off. It is not valid to turn on a feature
98
+ /// that the underlying hardware cannot support. This is useful for testing.
99
+ void EnableFeature(int64_t flag, bool enable);
100
+
101
+ bool HasEfficientBmi2() const {
102
+ // BMI2 (pext, pdep) is only efficient on Intel X86 processors.
103
+ return vendor() == Vendor::Intel && IsSupported(BMI2);
104
+ }
105
+
106
+ private:
107
+ CpuInfo();
108
+
109
+ struct Impl;
110
+ std::unique_ptr<Impl> impl_;
111
+ };
112
+
113
+ } // namespace internal
114
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/crc32.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include <cstddef>
19
+ #include <cstdint>
20
+
21
+ #include "arrow/util/visibility.h"
22
+
23
+ namespace arrow {
24
+ namespace internal {
25
+
26
+ /// \brief Compute the CRC32 checksum of the given data
27
+ ///
28
+ /// This function computes CRC32 with the polynomial 0x04C11DB7,
29
+ /// as used in zlib and others (note this is different from CRC32C).
30
+ /// To compute a running CRC32, pass the previous value in `prev`,
31
+ /// otherwise `prev` should be 0.
32
+ ARROW_EXPORT
33
+ uint32_t crc32(uint32_t prev, const void* data, size_t length);
34
+
35
+ } // namespace internal
36
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/debug.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/util/visibility.h"
21
+
22
+ namespace arrow {
23
+ namespace internal {
24
+
25
+ ARROW_EXPORT
26
+ void DebugTrap();
27
+
28
+ } // namespace internal
29
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/dispatch.h ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <utility>
21
+ #include <vector>
22
+
23
+ #include "arrow/status.h"
24
+ #include "arrow/util/cpu_info.h"
25
+
26
+ namespace arrow {
27
+ namespace internal {
28
+
29
+ enum class DispatchLevel : int {
30
+ // These dispatch levels, corresponding to instruction set features,
31
+ // are sorted in increasing order of preference.
32
+ NONE = 0,
33
+ SSE4_2,
34
+ AVX2,
35
+ AVX512,
36
+ NEON,
37
+ MAX
38
+ };
39
+
40
+ /*
41
+ A facility for dynamic dispatch according to available DispatchLevel.
42
+
43
+ Typical use:
44
+
45
+ static void my_function_default(...);
46
+ static void my_function_avx2(...);
47
+
48
+ struct MyDynamicFunction {
49
+ using FunctionType = decltype(&my_function_default);
50
+
51
+ static std::vector<std::pair<DispatchLevel, FunctionType>> implementations() {
52
+ return {
53
+ { DispatchLevel::NONE, my_function_default }
54
+ #if defined(ARROW_HAVE_RUNTIME_AVX2)
55
+ , { DispatchLevel::AVX2, my_function_avx2 }
56
+ #endif
57
+ };
58
+ }
59
+ };
60
+
61
+ void my_function(...) {
62
+ static DynamicDispatch<MyDynamicFunction> dispatch;
63
+ return dispatch.func(...);
64
+ }
65
+ */
66
+ template <typename DynamicFunction>
67
+ class DynamicDispatch {
68
+ protected:
69
+ using FunctionType = typename DynamicFunction::FunctionType;
70
+ using Implementation = std::pair<DispatchLevel, FunctionType>;
71
+
72
+ public:
73
+ DynamicDispatch() { Resolve(DynamicFunction::implementations()); }
74
+
75
+ FunctionType func = {};
76
+
77
+ protected:
78
+ // Use the Implementation with the highest DispatchLevel
79
+ void Resolve(const std::vector<Implementation>& implementations) {
80
+ Implementation cur{DispatchLevel::NONE, {}};
81
+
82
+ for (const auto& impl : implementations) {
83
+ if (impl.first >= cur.first && IsSupported(impl.first)) {
84
+ // Higher (or same) level than current
85
+ cur = impl;
86
+ }
87
+ }
88
+
89
+ if (!cur.second) {
90
+ Status::Invalid("No appropriate implementation found").Abort();
91
+ }
92
+ func = cur.second;
93
+ }
94
+
95
+ private:
96
+ bool IsSupported(DispatchLevel level) const {
97
+ static const auto cpu_info = arrow::internal::CpuInfo::GetInstance();
98
+
99
+ switch (level) {
100
+ case DispatchLevel::NONE:
101
+ return true;
102
+ case DispatchLevel::SSE4_2:
103
+ return cpu_info->IsSupported(CpuInfo::SSE4_2);
104
+ case DispatchLevel::AVX2:
105
+ return cpu_info->IsSupported(CpuInfo::AVX2);
106
+ case DispatchLevel::AVX512:
107
+ return cpu_info->IsSupported(CpuInfo::AVX512);
108
+ default:
109
+ return false;
110
+ }
111
+ }
112
+ };
113
+
114
+ } // namespace internal
115
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/double_conversion.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/vendored/double-conversion/double-conversion.h" // IWYU pragma: export
21
+
22
+ namespace arrow {
23
+ namespace util {
24
+ namespace double_conversion {
25
+
26
+ using ::arrow_vendored::double_conversion::DoubleToStringConverter;
27
+ using ::arrow_vendored::double_conversion::StringBuilder;
28
+ using ::arrow_vendored::double_conversion::StringToDoubleConverter;
29
+
30
+ } // namespace double_conversion
31
+ } // namespace util
32
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/endian.h ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #ifdef _WIN32
21
+ #define ARROW_LITTLE_ENDIAN 1
22
+ #else
23
+ #if defined(__APPLE__) || defined(__FreeBSD__)
24
+ #include <machine/endian.h> // IWYU pragma: keep
25
+ #elif defined(sun) || defined(__sun)
26
+ #include <sys/byteorder.h> // IWYU pragma: keep
27
+ #else
28
+ #include <endian.h> // IWYU pragma: keep
29
+ #endif
30
+ #
31
+ #ifndef __BYTE_ORDER__
32
+ #error "__BYTE_ORDER__ not defined"
33
+ #endif
34
+ #
35
+ #ifndef __ORDER_LITTLE_ENDIAN__
36
+ #error "__ORDER_LITTLE_ENDIAN__ not defined"
37
+ #endif
38
+ #
39
+ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
40
+ #define ARROW_LITTLE_ENDIAN 1
41
+ #else
42
+ #define ARROW_LITTLE_ENDIAN 0
43
+ #endif
44
+ #endif
45
+
46
+ #if defined(_MSC_VER)
47
+ #include <intrin.h> // IWYU pragma: keep
48
+ #define ARROW_BYTE_SWAP64 _byteswap_uint64
49
+ #define ARROW_BYTE_SWAP32 _byteswap_ulong
50
+ #else
51
+ #define ARROW_BYTE_SWAP64 __builtin_bswap64
52
+ #define ARROW_BYTE_SWAP32 __builtin_bswap32
53
+ #endif
54
+
55
+ #include <algorithm>
56
+ #include <array>
57
+
58
+ #include "arrow/util/type_traits.h"
59
+ #include "arrow/util/ubsan.h"
60
+
61
+ namespace arrow {
62
+ namespace bit_util {
63
+
64
+ //
65
+ // Byte-swap 16-bit, 32-bit and 64-bit values
66
+ //
67
+
68
+ // Swap the byte order (i.e. endianness)
69
+ static inline int64_t ByteSwap(int64_t value) { return ARROW_BYTE_SWAP64(value); }
70
+ static inline uint64_t ByteSwap(uint64_t value) {
71
+ return static_cast<uint64_t>(ARROW_BYTE_SWAP64(value));
72
+ }
73
+ static inline int32_t ByteSwap(int32_t value) { return ARROW_BYTE_SWAP32(value); }
74
+ static inline uint32_t ByteSwap(uint32_t value) {
75
+ return static_cast<uint32_t>(ARROW_BYTE_SWAP32(value));
76
+ }
77
+ static inline int16_t ByteSwap(int16_t value) {
78
+ constexpr auto m = static_cast<int16_t>(0xff);
79
+ return static_cast<int16_t>(((value >> 8) & m) | ((value & m) << 8));
80
+ }
81
+ static inline uint16_t ByteSwap(uint16_t value) {
82
+ return static_cast<uint16_t>(ByteSwap(static_cast<int16_t>(value)));
83
+ }
84
+ static inline uint8_t ByteSwap(uint8_t value) { return value; }
85
+ static inline int8_t ByteSwap(int8_t value) { return value; }
86
+ static inline double ByteSwap(double value) {
87
+ const uint64_t swapped = ARROW_BYTE_SWAP64(util::SafeCopy<uint64_t>(value));
88
+ return util::SafeCopy<double>(swapped);
89
+ }
90
+ static inline float ByteSwap(float value) {
91
+ const uint32_t swapped = ARROW_BYTE_SWAP32(util::SafeCopy<uint32_t>(value));
92
+ return util::SafeCopy<float>(swapped);
93
+ }
94
+
95
+ // Write the swapped bytes into dst. Src and dst cannot overlap.
96
+ static inline void ByteSwap(void* dst, const void* src, int len) {
97
+ switch (len) {
98
+ case 1:
99
+ *reinterpret_cast<int8_t*>(dst) = *reinterpret_cast<const int8_t*>(src);
100
+ return;
101
+ case 2:
102
+ *reinterpret_cast<int16_t*>(dst) = ByteSwap(*reinterpret_cast<const int16_t*>(src));
103
+ return;
104
+ case 4:
105
+ *reinterpret_cast<int32_t*>(dst) = ByteSwap(*reinterpret_cast<const int32_t*>(src));
106
+ return;
107
+ case 8:
108
+ *reinterpret_cast<int64_t*>(dst) = ByteSwap(*reinterpret_cast<const int64_t*>(src));
109
+ return;
110
+ default:
111
+ break;
112
+ }
113
+
114
+ auto d = reinterpret_cast<uint8_t*>(dst);
115
+ auto s = reinterpret_cast<const uint8_t*>(src);
116
+ for (int i = 0; i < len; ++i) {
117
+ d[i] = s[len - i - 1];
118
+ }
119
+ }
120
+
121
+ // Convert to little/big endian format from the machine's native endian format.
122
+ #if ARROW_LITTLE_ENDIAN
123
+ template <typename T, typename = internal::EnableIfIsOneOf<
124
+ T, int64_t, uint64_t, int32_t, uint32_t, int16_t, uint16_t,
125
+ uint8_t, int8_t, float, double, bool>>
126
+ static inline T ToBigEndian(T value) {
127
+ return ByteSwap(value);
128
+ }
129
+
130
+ template <typename T, typename = internal::EnableIfIsOneOf<
131
+ T, int64_t, uint64_t, int32_t, uint32_t, int16_t, uint16_t,
132
+ uint8_t, int8_t, float, double, bool>>
133
+ static inline T ToLittleEndian(T value) {
134
+ return value;
135
+ }
136
+ #else
137
+ template <typename T, typename = internal::EnableIfIsOneOf<
138
+ T, int64_t, uint64_t, int32_t, uint32_t, int16_t, uint16_t,
139
+ uint8_t, int8_t, float, double, bool>>
140
+ static inline T ToBigEndian(T value) {
141
+ return value;
142
+ }
143
+
144
+ template <typename T, typename = internal::EnableIfIsOneOf<
145
+ T, int64_t, uint64_t, int32_t, uint32_t, int16_t, uint16_t,
146
+ uint8_t, int8_t, float, double, bool>>
147
+ static inline T ToLittleEndian(T value) {
148
+ return ByteSwap(value);
149
+ }
150
+ #endif
151
+
152
+ // Convert from big/little endian format to the machine's native endian format.
153
+ #if ARROW_LITTLE_ENDIAN
154
+ template <typename T, typename = internal::EnableIfIsOneOf<
155
+ T, int64_t, uint64_t, int32_t, uint32_t, int16_t, uint16_t,
156
+ uint8_t, int8_t, float, double, bool>>
157
+ static inline T FromBigEndian(T value) {
158
+ return ByteSwap(value);
159
+ }
160
+
161
+ template <typename T, typename = internal::EnableIfIsOneOf<
162
+ T, int64_t, uint64_t, int32_t, uint32_t, int16_t, uint16_t,
163
+ uint8_t, int8_t, float, double, bool>>
164
+ static inline T FromLittleEndian(T value) {
165
+ return value;
166
+ }
167
+ #else
168
+ template <typename T, typename = internal::EnableIfIsOneOf<
169
+ T, int64_t, uint64_t, int32_t, uint32_t, int16_t, uint16_t,
170
+ uint8_t, int8_t, float, double, bool>>
171
+ static inline T FromBigEndian(T value) {
172
+ return value;
173
+ }
174
+
175
+ template <typename T, typename = internal::EnableIfIsOneOf<
176
+ T, int64_t, uint64_t, int32_t, uint32_t, int16_t, uint16_t,
177
+ uint8_t, int8_t, float, double, bool>>
178
+ static inline T FromLittleEndian(T value) {
179
+ return ByteSwap(value);
180
+ }
181
+ #endif
182
+
183
+ // Handle endianness in *word* granularity (keep individual array element untouched)
184
+ namespace little_endian {
185
+
186
+ namespace detail {
187
+
188
+ // Read a native endian array as little endian
189
+ template <typename T, size_t N>
190
+ struct Reader {
191
+ const std::array<T, N>& native_array;
192
+
193
+ explicit Reader(const std::array<T, N>& native_array) : native_array(native_array) {}
194
+
195
+ const T& operator[](size_t i) const {
196
+ return native_array[ARROW_LITTLE_ENDIAN ? i : N - 1 - i];
197
+ }
198
+ };
199
+
200
+ // Read/write a native endian array as little endian
201
+ template <typename T, size_t N>
202
+ struct Writer {
203
+ std::array<T, N>* native_array;
204
+
205
+ explicit Writer(std::array<T, N>* native_array) : native_array(native_array) {}
206
+
207
+ const T& operator[](size_t i) const {
208
+ return (*native_array)[ARROW_LITTLE_ENDIAN ? i : N - 1 - i];
209
+ }
210
+ T& operator[](size_t i) { return (*native_array)[ARROW_LITTLE_ENDIAN ? i : N - 1 - i]; }
211
+ };
212
+
213
+ } // namespace detail
214
+
215
+ // Construct array reader and try to deduce template augments
216
+ template <typename T, size_t N>
217
+ static inline detail::Reader<T, N> Make(const std::array<T, N>& native_array) {
218
+ return detail::Reader<T, N>(native_array);
219
+ }
220
+
221
+ // Construct array writer and try to deduce template augments
222
+ template <typename T, size_t N>
223
+ static inline detail::Writer<T, N> Make(std::array<T, N>* native_array) {
224
+ return detail::Writer<T, N>(native_array);
225
+ }
226
+
227
+ // Convert little endian array to native endian
228
+ template <typename T, size_t N>
229
+ static inline std::array<T, N> ToNative(std::array<T, N> array) {
230
+ if (!ARROW_LITTLE_ENDIAN) {
231
+ std::reverse(array.begin(), array.end());
232
+ }
233
+ return array;
234
+ }
235
+
236
+ // Convert native endian array to little endian
237
+ template <typename T, size_t N>
238
+ static inline std::array<T, N> FromNative(std::array<T, N> array) {
239
+ return ToNative(array);
240
+ }
241
+
242
+ } // namespace little_endian
243
+
244
+ } // namespace bit_util
245
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/formatting.h ADDED
@@ -0,0 +1,656 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This is a private header for number-to-string formatting utilities
19
+
20
+ #pragma once
21
+
22
+ #include <array>
23
+ #include <cassert>
24
+ #include <chrono>
25
+ #include <limits>
26
+ #include <memory>
27
+ #include <string>
28
+ #include <string_view>
29
+ #include <type_traits>
30
+ #include <utility>
31
+
32
+ #include "arrow/status.h"
33
+ #include "arrow/type.h"
34
+ #include "arrow/type_traits.h"
35
+ #include "arrow/util/double_conversion.h"
36
+ #include "arrow/util/macros.h"
37
+ #include "arrow/util/string.h"
38
+ #include "arrow/util/time.h"
39
+ #include "arrow/util/visibility.h"
40
+ #include "arrow/vendored/datetime.h"
41
+
42
+ namespace arrow {
43
+ namespace internal {
44
+
45
+ /// \brief The entry point for conversion to strings.
46
+ template <typename ARROW_TYPE, typename Enable = void>
47
+ class StringFormatter;
48
+
49
+ template <typename T>
50
+ struct is_formattable {
51
+ template <typename U, typename = typename StringFormatter<U>::value_type>
52
+ static std::true_type Test(U*);
53
+
54
+ template <typename U>
55
+ static std::false_type Test(...);
56
+
57
+ static constexpr bool value = decltype(Test<T>(NULLPTR))::value;
58
+ };
59
+
60
+ template <typename T, typename R = void>
61
+ using enable_if_formattable = enable_if_t<is_formattable<T>::value, R>;
62
+
63
+ template <typename Appender>
64
+ using Return = decltype(std::declval<Appender>()(std::string_view{}));
65
+
66
+ /////////////////////////////////////////////////////////////////////////
67
+ // Boolean formatting
68
+
69
+ template <>
70
+ class StringFormatter<BooleanType> {
71
+ public:
72
+ explicit StringFormatter(const DataType* = NULLPTR) {}
73
+
74
+ using value_type = bool;
75
+
76
+ template <typename Appender>
77
+ Return<Appender> operator()(bool value, Appender&& append) {
78
+ if (value) {
79
+ const char string[] = "true";
80
+ return append(std::string_view(string));
81
+ } else {
82
+ const char string[] = "false";
83
+ return append(std::string_view(string));
84
+ }
85
+ }
86
+ };
87
+
88
+ /////////////////////////////////////////////////////////////////////////
89
+ // Decimals formatting
90
+
91
+ template <typename ARROW_TYPE>
92
+ class DecimalToStringFormatterMixin {
93
+ public:
94
+ explicit DecimalToStringFormatterMixin(const DataType* type)
95
+ : scale_(static_cast<const ARROW_TYPE*>(type)->scale()) {}
96
+
97
+ using value_type = typename TypeTraits<ARROW_TYPE>::CType;
98
+
99
+ template <typename Appender>
100
+ Return<Appender> operator()(const value_type& value, Appender&& append) {
101
+ return append(value.ToString(scale_));
102
+ }
103
+
104
+ private:
105
+ int32_t scale_;
106
+ };
107
+
108
+ template <>
109
+ class StringFormatter<Decimal128Type>
110
+ : public DecimalToStringFormatterMixin<Decimal128Type> {
111
+ using DecimalToStringFormatterMixin::DecimalToStringFormatterMixin;
112
+ };
113
+
114
+ template <>
115
+ class StringFormatter<Decimal256Type>
116
+ : public DecimalToStringFormatterMixin<Decimal256Type> {
117
+ using DecimalToStringFormatterMixin::DecimalToStringFormatterMixin;
118
+ };
119
+
120
+ /////////////////////////////////////////////////////////////////////////
121
+ // Integer formatting
122
+
123
+ namespace detail {
124
+
125
+ // A 2x100 direct table mapping integers in [0..99] to their decimal representations.
126
+ ARROW_EXPORT extern const char digit_pairs[];
127
+
128
+ // Based on fmtlib's format_int class:
129
+ // Write digits from right to left into a stack allocated buffer.
130
+ // \pre *cursor points to the byte after the one that will be written.
131
+ // \post *cursor points to the byte that was written.
132
+ inline void FormatOneChar(char c, char** cursor) { *(--(*cursor)) = c; }
133
+
134
+ template <typename Int>
135
+ void FormatOneDigit(Int value, char** cursor) {
136
+ assert(value >= 0 && value <= 9);
137
+ FormatOneChar(static_cast<char>('0' + value), cursor);
138
+ }
139
+
140
+ // GH-35662: I don't know why but the following combination causes SEGV:
141
+ // * template implementation without inline
142
+ // * MinGW
143
+ // * Release build
144
+ template <typename Int>
145
+ inline void FormatTwoDigits(Int value, char** cursor) {
146
+ assert(value >= 0 && value <= 99);
147
+ auto digit_pair = &digit_pairs[value * 2];
148
+ FormatOneChar(digit_pair[1], cursor);
149
+ FormatOneChar(digit_pair[0], cursor);
150
+ }
151
+
152
+ template <typename Int>
153
+ void FormatAllDigits(Int value, char** cursor) {
154
+ assert(value >= 0);
155
+ while (value >= 100) {
156
+ FormatTwoDigits(value % 100, cursor);
157
+ value /= 100;
158
+ }
159
+
160
+ if (value >= 10) {
161
+ FormatTwoDigits(value, cursor);
162
+ } else {
163
+ FormatOneDigit(value, cursor);
164
+ }
165
+ }
166
+
167
+ template <typename Int>
168
+ void FormatAllDigitsLeftPadded(Int value, size_t pad, char pad_char, char** cursor) {
169
+ auto end = *cursor - pad;
170
+ FormatAllDigits(value, cursor);
171
+ while (*cursor > end) {
172
+ FormatOneChar(pad_char, cursor);
173
+ }
174
+ }
175
+
176
+ template <size_t BUFFER_SIZE>
177
+ std::string_view ViewDigitBuffer(const std::array<char, BUFFER_SIZE>& buffer,
178
+ char* cursor) {
179
+ auto buffer_end = buffer.data() + BUFFER_SIZE;
180
+ return {cursor, static_cast<size_t>(buffer_end - cursor)};
181
+ }
182
+
183
+ template <typename Int, typename UInt = typename std::make_unsigned<Int>::type>
184
+ constexpr UInt Abs(Int value) {
185
+ return value < 0 ? ~static_cast<UInt>(value) + 1 : static_cast<UInt>(value);
186
+ }
187
+
188
+ template <typename Int>
189
+ constexpr size_t Digits10(Int value) {
190
+ return value <= 9 ? 1 : Digits10(value / 10) + 1;
191
+ }
192
+
193
+ } // namespace detail
194
+
195
+ template <typename ARROW_TYPE>
196
+ class IntToStringFormatterMixin {
197
+ public:
198
+ explicit IntToStringFormatterMixin(const DataType* = NULLPTR) {}
199
+
200
+ using value_type = typename ARROW_TYPE::c_type;
201
+
202
+ template <typename Appender>
203
+ Return<Appender> operator()(value_type value, Appender&& append) {
204
+ constexpr size_t buffer_size =
205
+ detail::Digits10(std::numeric_limits<value_type>::max()) + 1;
206
+
207
+ std::array<char, buffer_size> buffer;
208
+ char* cursor = buffer.data() + buffer_size;
209
+ detail::FormatAllDigits(detail::Abs(value), &cursor);
210
+ if (value < 0) {
211
+ detail::FormatOneChar('-', &cursor);
212
+ }
213
+ return append(detail::ViewDigitBuffer(buffer, cursor));
214
+ }
215
+ };
216
+
217
+ template <>
218
+ class StringFormatter<Int8Type> : public IntToStringFormatterMixin<Int8Type> {
219
+ using IntToStringFormatterMixin::IntToStringFormatterMixin;
220
+ };
221
+
222
+ template <>
223
+ class StringFormatter<Int16Type> : public IntToStringFormatterMixin<Int16Type> {
224
+ using IntToStringFormatterMixin::IntToStringFormatterMixin;
225
+ };
226
+
227
+ template <>
228
+ class StringFormatter<Int32Type> : public IntToStringFormatterMixin<Int32Type> {
229
+ using IntToStringFormatterMixin::IntToStringFormatterMixin;
230
+ };
231
+
232
+ template <>
233
+ class StringFormatter<Int64Type> : public IntToStringFormatterMixin<Int64Type> {
234
+ using IntToStringFormatterMixin::IntToStringFormatterMixin;
235
+ };
236
+
237
+ template <>
238
+ class StringFormatter<UInt8Type> : public IntToStringFormatterMixin<UInt8Type> {
239
+ using IntToStringFormatterMixin::IntToStringFormatterMixin;
240
+ };
241
+
242
+ template <>
243
+ class StringFormatter<UInt16Type> : public IntToStringFormatterMixin<UInt16Type> {
244
+ using IntToStringFormatterMixin::IntToStringFormatterMixin;
245
+ };
246
+
247
+ template <>
248
+ class StringFormatter<UInt32Type> : public IntToStringFormatterMixin<UInt32Type> {
249
+ using IntToStringFormatterMixin::IntToStringFormatterMixin;
250
+ };
251
+
252
+ template <>
253
+ class StringFormatter<UInt64Type> : public IntToStringFormatterMixin<UInt64Type> {
254
+ using IntToStringFormatterMixin::IntToStringFormatterMixin;
255
+ };
256
+
257
+ /////////////////////////////////////////////////////////////////////////
258
+ // Floating-point formatting
259
+
260
+ class ARROW_EXPORT FloatToStringFormatter {
261
+ public:
262
+ FloatToStringFormatter();
263
+ FloatToStringFormatter(int flags, const char* inf_symbol, const char* nan_symbol,
264
+ char exp_character, int decimal_in_shortest_low,
265
+ int decimal_in_shortest_high,
266
+ int max_leading_padding_zeroes_in_precision_mode,
267
+ int max_trailing_padding_zeroes_in_precision_mode);
268
+ ~FloatToStringFormatter();
269
+
270
+ // Returns the number of characters written
271
+ int FormatFloat(float v, char* out_buffer, int out_size);
272
+ int FormatFloat(double v, char* out_buffer, int out_size);
273
+ int FormatFloat(uint16_t v, char* out_buffer, int out_size);
274
+
275
+ protected:
276
+ struct Impl;
277
+ std::unique_ptr<Impl> impl_;
278
+ };
279
+
280
+ template <typename ARROW_TYPE>
281
+ class FloatToStringFormatterMixin : public FloatToStringFormatter {
282
+ public:
283
+ using value_type = typename ARROW_TYPE::c_type;
284
+
285
+ static constexpr int buffer_size = 50;
286
+
287
+ explicit FloatToStringFormatterMixin(const DataType* = NULLPTR) {}
288
+
289
+ FloatToStringFormatterMixin(int flags, const char* inf_symbol, const char* nan_symbol,
290
+ char exp_character, int decimal_in_shortest_low,
291
+ int decimal_in_shortest_high,
292
+ int max_leading_padding_zeroes_in_precision_mode,
293
+ int max_trailing_padding_zeroes_in_precision_mode)
294
+ : FloatToStringFormatter(flags, inf_symbol, nan_symbol, exp_character,
295
+ decimal_in_shortest_low, decimal_in_shortest_high,
296
+ max_leading_padding_zeroes_in_precision_mode,
297
+ max_trailing_padding_zeroes_in_precision_mode) {}
298
+
299
+ template <typename Appender>
300
+ Return<Appender> operator()(value_type value, Appender&& append) {
301
+ char buffer[buffer_size];
302
+ int size = FormatFloat(value, buffer, buffer_size);
303
+ return append(std::string_view(buffer, size));
304
+ }
305
+ };
306
+
307
+ template <>
308
+ class StringFormatter<HalfFloatType> : public FloatToStringFormatterMixin<HalfFloatType> {
309
+ public:
310
+ using FloatToStringFormatterMixin::FloatToStringFormatterMixin;
311
+ };
312
+
313
+ template <>
314
+ class StringFormatter<FloatType> : public FloatToStringFormatterMixin<FloatType> {
315
+ public:
316
+ using FloatToStringFormatterMixin::FloatToStringFormatterMixin;
317
+ };
318
+
319
+ template <>
320
+ class StringFormatter<DoubleType> : public FloatToStringFormatterMixin<DoubleType> {
321
+ public:
322
+ using FloatToStringFormatterMixin::FloatToStringFormatterMixin;
323
+ };
324
+
325
+ /////////////////////////////////////////////////////////////////////////
326
+ // Temporal formatting
327
+
328
+ namespace detail {
329
+
330
+ constexpr size_t BufferSizeYYYY_MM_DD() {
331
+ // "-"? "99999-12-31"
332
+ return 1 + detail::Digits10(99999) + 1 + detail::Digits10(12) + 1 +
333
+ detail::Digits10(31);
334
+ }
335
+
336
+ inline void FormatYYYY_MM_DD(arrow_vendored::date::year_month_day ymd, char** cursor) {
337
+ FormatTwoDigits(static_cast<unsigned>(ymd.day()), cursor);
338
+ FormatOneChar('-', cursor);
339
+ FormatTwoDigits(static_cast<unsigned>(ymd.month()), cursor);
340
+ FormatOneChar('-', cursor);
341
+ auto year = static_cast<int>(ymd.year());
342
+ const auto is_neg_year = year < 0;
343
+ year = std::abs(year);
344
+ assert(year <= 99999);
345
+ FormatTwoDigits(year % 100, cursor);
346
+ year /= 100;
347
+ FormatTwoDigits(year % 100, cursor);
348
+ if (year >= 100) {
349
+ FormatOneDigit(year / 100, cursor);
350
+ }
351
+ if (is_neg_year) {
352
+ FormatOneChar('-', cursor);
353
+ }
354
+ }
355
+
356
+ template <typename Duration>
357
+ constexpr size_t BufferSizeHH_MM_SS() {
358
+ // "23:59:59" ("." "9"+)?
359
+ return detail::Digits10(23) + 1 + detail::Digits10(59) + 1 + detail::Digits10(59) + 1 +
360
+ detail::Digits10(Duration::period::den) - 1;
361
+ }
362
+
363
+ template <typename Duration>
364
+ void FormatHH_MM_SS(arrow_vendored::date::hh_mm_ss<Duration> hms, char** cursor) {
365
+ constexpr size_t subsecond_digits = Digits10(Duration::period::den) - 1;
366
+ if (subsecond_digits != 0) {
367
+ FormatAllDigitsLeftPadded(hms.subseconds().count(), subsecond_digits, '0', cursor);
368
+ FormatOneChar('.', cursor);
369
+ }
370
+ FormatTwoDigits(hms.seconds().count(), cursor);
371
+ FormatOneChar(':', cursor);
372
+ FormatTwoDigits(hms.minutes().count(), cursor);
373
+ FormatOneChar(':', cursor);
374
+ FormatTwoDigits(hms.hours().count(), cursor);
375
+ }
376
+
377
+ // Some out-of-bound datetime values would result in erroneous printing
378
+ // because of silent integer wraparound in the `arrow_vendored::date` library.
379
+ //
380
+ // To avoid such misprinting, we must therefore check the bounds explicitly.
381
+ // The bounds correspond to start of year -32767 and end of year 32767,
382
+ // respectively (-32768 is an invalid year value in `arrow_vendored::date`).
383
+ //
384
+ // Note these values are the same as documented for C++20:
385
+ // https://en.cppreference.com/w/cpp/chrono/year_month_day/operator_days
386
+ template <typename Unit>
387
+ bool IsDateTimeInRange(Unit duration) {
388
+ constexpr Unit kMinIncl =
389
+ std::chrono::duration_cast<Unit>(arrow_vendored::date::days{-12687428});
390
+ constexpr Unit kMaxExcl =
391
+ std::chrono::duration_cast<Unit>(arrow_vendored::date::days{11248738});
392
+ return duration >= kMinIncl && duration < kMaxExcl;
393
+ }
394
+
395
+ // IsDateTimeInRange() specialization for nanoseconds: a 64-bit number of
396
+ // nanoseconds cannot represent years outside of the [-32767, 32767]
397
+ // range, and the {kMinIncl, kMaxExcl} constants above would overflow.
398
+ constexpr bool IsDateTimeInRange(std::chrono::nanoseconds duration) { return true; }
399
+
400
+ template <typename Unit>
401
+ bool IsTimeInRange(Unit duration) {
402
+ constexpr Unit kMinIncl = std::chrono::duration_cast<Unit>(std::chrono::seconds{0});
403
+ constexpr Unit kMaxExcl = std::chrono::duration_cast<Unit>(std::chrono::seconds{86400});
404
+ return duration >= kMinIncl && duration < kMaxExcl;
405
+ }
406
+
407
+ template <typename RawValue, typename Appender>
408
+ Return<Appender> FormatOutOfRange(RawValue&& raw_value, Appender&& append) {
409
+ // XXX locale-sensitive but good enough for now
410
+ std::string formatted = "<value out of range: " + ToChars(raw_value) + ">";
411
+ return append(std::move(formatted));
412
+ }
413
+
414
+ const auto kEpoch = arrow_vendored::date::sys_days{arrow_vendored::date::jan / 1 / 1970};
415
+
416
+ } // namespace detail
417
+
418
+ template <>
419
+ class StringFormatter<DurationType> : public IntToStringFormatterMixin<DurationType> {
420
+ using IntToStringFormatterMixin::IntToStringFormatterMixin;
421
+ };
422
+
423
+ class DateToStringFormatterMixin {
424
+ public:
425
+ explicit DateToStringFormatterMixin(const DataType* = NULLPTR) {}
426
+
427
+ protected:
428
+ template <typename Appender>
429
+ Return<Appender> FormatDays(arrow_vendored::date::days since_epoch, Appender&& append) {
430
+ arrow_vendored::date::sys_days timepoint_days{since_epoch};
431
+
432
+ constexpr size_t buffer_size = detail::BufferSizeYYYY_MM_DD();
433
+
434
+ std::array<char, buffer_size> buffer;
435
+ char* cursor = buffer.data() + buffer_size;
436
+
437
+ detail::FormatYYYY_MM_DD(arrow_vendored::date::year_month_day{timepoint_days},
438
+ &cursor);
439
+ return append(detail::ViewDigitBuffer(buffer, cursor));
440
+ }
441
+ };
442
+
443
+ template <>
444
+ class StringFormatter<Date32Type> : public DateToStringFormatterMixin {
445
+ public:
446
+ using value_type = typename Date32Type::c_type;
447
+
448
+ using DateToStringFormatterMixin::DateToStringFormatterMixin;
449
+
450
+ template <typename Appender>
451
+ Return<Appender> operator()(value_type value, Appender&& append) {
452
+ const auto since_epoch = arrow_vendored::date::days{value};
453
+ if (!ARROW_PREDICT_TRUE(detail::IsDateTimeInRange(since_epoch))) {
454
+ return detail::FormatOutOfRange(value, append);
455
+ }
456
+ return FormatDays(since_epoch, std::forward<Appender>(append));
457
+ }
458
+ };
459
+
460
+ template <>
461
+ class StringFormatter<Date64Type> : public DateToStringFormatterMixin {
462
+ public:
463
+ using value_type = typename Date64Type::c_type;
464
+
465
+ using DateToStringFormatterMixin::DateToStringFormatterMixin;
466
+
467
+ template <typename Appender>
468
+ Return<Appender> operator()(value_type value, Appender&& append) {
469
+ const auto since_epoch = std::chrono::milliseconds{value};
470
+ if (!ARROW_PREDICT_TRUE(detail::IsDateTimeInRange(since_epoch))) {
471
+ return detail::FormatOutOfRange(value, append);
472
+ }
473
+ return FormatDays(std::chrono::duration_cast<arrow_vendored::date::days>(since_epoch),
474
+ std::forward<Appender>(append));
475
+ }
476
+ };
477
+
478
+ template <>
479
+ class StringFormatter<TimestampType> {
480
+ public:
481
+ using value_type = int64_t;
482
+
483
+ explicit StringFormatter(const DataType* type)
484
+ : unit_(checked_cast<const TimestampType&>(*type).unit()),
485
+ timezone_(checked_cast<const TimestampType&>(*type).timezone()) {}
486
+
487
+ template <typename Duration, typename Appender>
488
+ Return<Appender> operator()(Duration, value_type value, Appender&& append) {
489
+ using arrow_vendored::date::days;
490
+
491
+ const Duration since_epoch{value};
492
+ if (!ARROW_PREDICT_TRUE(detail::IsDateTimeInRange(since_epoch))) {
493
+ return detail::FormatOutOfRange(value, append);
494
+ }
495
+
496
+ const auto timepoint = detail::kEpoch + since_epoch;
497
+ // Round days towards zero
498
+ // (the naive approach of using arrow_vendored::date::floor() would
499
+ // result in UB for very large negative timestamps, similarly as
500
+ // https://github.com/HowardHinnant/date/issues/696)
501
+ auto timepoint_days = std::chrono::time_point_cast<days>(timepoint);
502
+ Duration since_midnight;
503
+ if (timepoint_days <= timepoint) {
504
+ // Year >= 1970
505
+ since_midnight = timepoint - timepoint_days;
506
+ } else {
507
+ // Year < 1970
508
+ since_midnight = days(1) - (timepoint_days - timepoint);
509
+ timepoint_days -= days(1);
510
+ }
511
+
512
+ // YYYY_MM_DD " " HH_MM_SS "Z"?
513
+ constexpr size_t buffer_size =
514
+ detail::BufferSizeYYYY_MM_DD() + 1 + detail::BufferSizeHH_MM_SS<Duration>() + 1;
515
+
516
+ std::array<char, buffer_size> buffer;
517
+ char* cursor = buffer.data() + buffer_size;
518
+
519
+ if (timezone_.size() > 0) {
520
+ detail::FormatOneChar('Z', &cursor);
521
+ }
522
+ detail::FormatHH_MM_SS(arrow_vendored::date::make_time(since_midnight), &cursor);
523
+ detail::FormatOneChar(' ', &cursor);
524
+ detail::FormatYYYY_MM_DD(timepoint_days, &cursor);
525
+ return append(detail::ViewDigitBuffer(buffer, cursor));
526
+ }
527
+
528
+ template <typename Appender>
529
+ Return<Appender> operator()(value_type value, Appender&& append) {
530
+ return util::VisitDuration(unit_, *this, value, std::forward<Appender>(append));
531
+ }
532
+
533
+ private:
534
+ TimeUnit::type unit_;
535
+ std::string timezone_;
536
+ };
537
+
538
+ template <typename T>
539
+ class StringFormatter<T, enable_if_time<T>> {
540
+ public:
541
+ using value_type = typename T::c_type;
542
+
543
+ explicit StringFormatter(const DataType* type)
544
+ : unit_(checked_cast<const T&>(*type).unit()) {}
545
+
546
+ template <typename Duration, typename Appender>
547
+ Return<Appender> operator()(Duration, value_type count, Appender&& append) {
548
+ const Duration since_midnight{count};
549
+ if (!ARROW_PREDICT_TRUE(detail::IsTimeInRange(since_midnight))) {
550
+ return detail::FormatOutOfRange(count, append);
551
+ }
552
+
553
+ constexpr size_t buffer_size = detail::BufferSizeHH_MM_SS<Duration>();
554
+
555
+ std::array<char, buffer_size> buffer;
556
+ char* cursor = buffer.data() + buffer_size;
557
+
558
+ detail::FormatHH_MM_SS(arrow_vendored::date::make_time(since_midnight), &cursor);
559
+ return append(detail::ViewDigitBuffer(buffer, cursor));
560
+ }
561
+
562
+ template <typename Appender>
563
+ Return<Appender> operator()(value_type value, Appender&& append) {
564
+ return util::VisitDuration(unit_, *this, value, std::forward<Appender>(append));
565
+ }
566
+
567
+ private:
568
+ TimeUnit::type unit_;
569
+ };
570
+
571
+ template <>
572
+ class StringFormatter<MonthIntervalType> {
573
+ public:
574
+ using value_type = MonthIntervalType::c_type;
575
+
576
+ explicit StringFormatter(const DataType*) {}
577
+
578
+ template <typename Appender>
579
+ Return<Appender> operator()(value_type interval, Appender&& append) {
580
+ constexpr size_t buffer_size =
581
+ /*'m'*/ 3 + /*negative signs*/ 1 +
582
+ /*months*/ detail::Digits10(std::numeric_limits<value_type>::max());
583
+ std::array<char, buffer_size> buffer;
584
+ char* cursor = buffer.data() + buffer_size;
585
+
586
+ detail::FormatOneChar('M', &cursor);
587
+ detail::FormatAllDigits(detail::Abs(interval), &cursor);
588
+ if (interval < 0) detail::FormatOneChar('-', &cursor);
589
+
590
+ return append(detail::ViewDigitBuffer(buffer, cursor));
591
+ }
592
+ };
593
+
594
+ template <>
595
+ class StringFormatter<DayTimeIntervalType> {
596
+ public:
597
+ using value_type = DayTimeIntervalType::DayMilliseconds;
598
+
599
+ explicit StringFormatter(const DataType*) {}
600
+
601
+ template <typename Appender>
602
+ Return<Appender> operator()(value_type interval, Appender&& append) {
603
+ constexpr size_t buffer_size =
604
+ /*d, ms*/ 3 + /*negative signs*/ 2 +
605
+ /*days/milliseconds*/ 2 * detail::Digits10(std::numeric_limits<int32_t>::max());
606
+ std::array<char, buffer_size> buffer;
607
+ char* cursor = buffer.data() + buffer_size;
608
+
609
+ detail::FormatOneChar('s', &cursor);
610
+ detail::FormatOneChar('m', &cursor);
611
+ detail::FormatAllDigits(detail::Abs(interval.milliseconds), &cursor);
612
+ if (interval.milliseconds < 0) detail::FormatOneChar('-', &cursor);
613
+
614
+ detail::FormatOneChar('d', &cursor);
615
+ detail::FormatAllDigits(detail::Abs(interval.days), &cursor);
616
+ if (interval.days < 0) detail::FormatOneChar('-', &cursor);
617
+
618
+ return append(detail::ViewDigitBuffer(buffer, cursor));
619
+ }
620
+ };
621
+
622
+ template <>
623
+ class StringFormatter<MonthDayNanoIntervalType> {
624
+ public:
625
+ using value_type = MonthDayNanoIntervalType::MonthDayNanos;
626
+
627
+ explicit StringFormatter(const DataType*) {}
628
+
629
+ template <typename Appender>
630
+ Return<Appender> operator()(value_type interval, Appender&& append) {
631
+ constexpr size_t buffer_size =
632
+ /*m, d, ns*/ 4 + /*negative signs*/ 3 +
633
+ /*months/days*/ 2 * detail::Digits10(std::numeric_limits<int32_t>::max()) +
634
+ /*nanoseconds*/ detail::Digits10(std::numeric_limits<int64_t>::max());
635
+ std::array<char, buffer_size> buffer;
636
+ char* cursor = buffer.data() + buffer_size;
637
+
638
+ detail::FormatOneChar('s', &cursor);
639
+ detail::FormatOneChar('n', &cursor);
640
+ detail::FormatAllDigits(detail::Abs(interval.nanoseconds), &cursor);
641
+ if (interval.nanoseconds < 0) detail::FormatOneChar('-', &cursor);
642
+
643
+ detail::FormatOneChar('d', &cursor);
644
+ detail::FormatAllDigits(detail::Abs(interval.days), &cursor);
645
+ if (interval.days < 0) detail::FormatOneChar('-', &cursor);
646
+
647
+ detail::FormatOneChar('M', &cursor);
648
+ detail::FormatAllDigits(detail::Abs(interval.months), &cursor);
649
+ if (interval.months < 0) detail::FormatOneChar('-', &cursor);
650
+
651
+ return append(detail::ViewDigitBuffer(buffer, cursor));
652
+ }
653
+ };
654
+
655
+ } // namespace internal
656
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/functional.h ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <tuple>
22
+ #include <type_traits>
23
+
24
+ #include "arrow/result.h"
25
+ #include "arrow/util/macros.h"
26
+
27
+ namespace arrow {
28
+ namespace internal {
29
+
30
+ struct Empty {
31
+ static Result<Empty> ToResult(Status s) {
32
+ if (ARROW_PREDICT_TRUE(s.ok())) {
33
+ return Empty{};
34
+ }
35
+ return s;
36
+ }
37
+ };
38
+
39
+ /// Helper struct for examining lambdas and other callables.
40
+ /// TODO(ARROW-12655) support function pointers
41
+ struct call_traits {
42
+ public:
43
+ template <typename R, typename... A>
44
+ static std::false_type is_overloaded_impl(R(A...));
45
+
46
+ template <typename F>
47
+ static std::false_type is_overloaded_impl(decltype(&F::operator())*);
48
+
49
+ template <typename F>
50
+ static std::true_type is_overloaded_impl(...);
51
+
52
+ template <typename F, typename R, typename... A>
53
+ static R return_type_impl(R (F::*)(A...));
54
+
55
+ template <typename F, typename R, typename... A>
56
+ static R return_type_impl(R (F::*)(A...) const);
57
+
58
+ template <std::size_t I, typename F, typename R, typename... A>
59
+ static typename std::tuple_element<I, std::tuple<A...>>::type argument_type_impl(
60
+ R (F::*)(A...));
61
+
62
+ template <std::size_t I, typename F, typename R, typename... A>
63
+ static typename std::tuple_element<I, std::tuple<A...>>::type argument_type_impl(
64
+ R (F::*)(A...) const);
65
+
66
+ template <std::size_t I, typename F, typename R, typename... A>
67
+ static typename std::tuple_element<I, std::tuple<A...>>::type argument_type_impl(
68
+ R (F::*)(A...) &&);
69
+
70
+ template <typename F, typename R, typename... A>
71
+ static std::integral_constant<int, sizeof...(A)> argument_count_impl(R (F::*)(A...));
72
+
73
+ template <typename F, typename R, typename... A>
74
+ static std::integral_constant<int, sizeof...(A)> argument_count_impl(R (F::*)(A...)
75
+ const);
76
+
77
+ template <typename F, typename R, typename... A>
78
+ static std::integral_constant<int, sizeof...(A)> argument_count_impl(R (F::*)(A...) &&);
79
+
80
+ /// bool constant indicating whether F is a callable with more than one possible
81
+ /// signature. Will be true_type for objects which define multiple operator() or which
82
+ /// define a template operator()
83
+ template <typename F>
84
+ using is_overloaded =
85
+ decltype(is_overloaded_impl<typename std::decay<F>::type>(NULLPTR));
86
+
87
+ template <typename F, typename T = void>
88
+ using enable_if_overloaded = typename std::enable_if<is_overloaded<F>::value, T>::type;
89
+
90
+ template <typename F, typename T = void>
91
+ using disable_if_overloaded =
92
+ typename std::enable_if<!is_overloaded<F>::value, T>::type;
93
+
94
+ /// If F is not overloaded, the argument types of its call operator can be
95
+ /// extracted via call_traits::argument_type<Index, F>
96
+ template <std::size_t I, typename F>
97
+ using argument_type = decltype(argument_type_impl<I>(&std::decay<F>::type::operator()));
98
+
99
+ template <typename F>
100
+ using argument_count = decltype(argument_count_impl(&std::decay<F>::type::operator()));
101
+
102
+ template <typename F>
103
+ using return_type = decltype(return_type_impl(&std::decay<F>::type::operator()));
104
+
105
+ template <typename F, typename T, typename RT = T>
106
+ using enable_if_return =
107
+ typename std::enable_if<std::is_same<return_type<F>, T>::value, RT>;
108
+
109
+ template <typename T, typename R = void>
110
+ using enable_if_empty = typename std::enable_if<std::is_same<T, Empty>::value, R>::type;
111
+
112
+ template <typename T, typename R = void>
113
+ using enable_if_not_empty =
114
+ typename std::enable_if<!std::is_same<T, Empty>::value, R>::type;
115
+ };
116
+
117
+ /// A type erased callable object which may only be invoked once.
118
+ /// It can be constructed from any lambda which matches the provided call signature.
119
+ /// Invoking it results in destruction of the lambda, freeing any state/references
120
+ /// immediately. Invoking a default constructed FnOnce or one which has already been
121
+ /// invoked will segfault.
122
+ template <typename Signature>
123
+ class FnOnce;
124
+
125
+ template <typename R, typename... A>
126
+ class FnOnce<R(A...)> {
127
+ public:
128
+ FnOnce() = default;
129
+
130
+ template <typename Fn,
131
+ typename = typename std::enable_if<std::is_convertible<
132
+ decltype(std::declval<Fn&&>()(std::declval<A>()...)), R>::value>::type>
133
+ FnOnce(Fn fn) : impl_(new FnImpl<Fn>(std::move(fn))) { // NOLINT runtime/explicit
134
+ }
135
+
136
+ explicit operator bool() const { return impl_ != NULLPTR; }
137
+
138
+ R operator()(A... a) && {
139
+ auto bye = std::move(impl_);
140
+ return bye->invoke(std::forward<A&&>(a)...);
141
+ }
142
+
143
+ private:
144
+ struct Impl {
145
+ virtual ~Impl() = default;
146
+ virtual R invoke(A&&... a) = 0;
147
+ };
148
+
149
+ template <typename Fn>
150
+ struct FnImpl : Impl {
151
+ explicit FnImpl(Fn fn) : fn_(std::move(fn)) {}
152
+ R invoke(A&&... a) override { return std::move(fn_)(std::forward<A&&>(a)...); }
153
+ Fn fn_;
154
+ };
155
+
156
+ std::unique_ptr<Impl> impl_;
157
+ };
158
+
159
+ } // namespace internal
160
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/hashing.h ADDED
@@ -0,0 +1,944 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Private header, not to be exported
19
+
20
+ #pragma once
21
+
22
+ #include <algorithm>
23
+ #include <cassert>
24
+ #include <cmath>
25
+ #include <cstdint>
26
+ #include <cstring>
27
+ #include <limits>
28
+ #include <memory>
29
+ #include <string>
30
+ #include <type_traits>
31
+ #include <utility>
32
+ #include <vector>
33
+
34
+ #include "arrow/array/builder_binary.h"
35
+ #include "arrow/buffer_builder.h"
36
+ #include "arrow/result.h"
37
+ #include "arrow/status.h"
38
+ #include "arrow/type_fwd.h"
39
+ #include "arrow/type_traits.h"
40
+ #include "arrow/util/bit_util.h"
41
+ #include "arrow/util/bitmap_builders.h"
42
+ #include "arrow/util/endian.h"
43
+ #include "arrow/util/logging.h"
44
+ #include "arrow/util/macros.h"
45
+ #include "arrow/util/ubsan.h"
46
+
47
+ #define XXH_INLINE_ALL
48
+
49
+ #include "arrow/vendored/xxhash.h" // IWYU pragma: keep
50
+
51
+ namespace arrow {
52
+ namespace internal {
53
+
54
+ // XXX would it help to have a 32-bit hash value on large datasets?
55
+ typedef uint64_t hash_t;
56
+
57
+ // Notes about the choice of a hash function.
58
+ // - XXH3 is extremely fast on most data sizes, from small to huge;
59
+ // faster even than HW CRC-based hashing schemes
60
+ // - our custom hash function for tiny values (< 16 bytes) is still
61
+ // significantly faster (~30%), at least on this machine and compiler
62
+
63
+ template <uint64_t AlgNum>
64
+ inline hash_t ComputeStringHash(const void* data, int64_t length);
65
+
66
+ /// \brief A hash function for bitmaps that can handle offsets and lengths in
67
+ /// terms of number of bits. The hash only depends on the bits actually hashed.
68
+ ///
69
+ /// It's the caller's responsibility to ensure that bits_offset + num_bits are
70
+ /// readable from the bitmap.
71
+ ///
72
+ /// \pre bits_offset >= 0
73
+ /// \pre num_bits >= 0
74
+ /// \pre (bits_offset + num_bits + 7) / 8 <= readable length in bytes from bitmap
75
+ ///
76
+ /// \param bitmap The pointer to the bitmap.
77
+ /// \param seed The seed for the hash function (useful when chaining hash functions).
78
+ /// \param bits_offset The offset in bits relative to the start of the bitmap.
79
+ /// \param num_bits The number of bits after the offset to be hashed.
80
+ ARROW_EXPORT hash_t ComputeBitmapHash(const uint8_t* bitmap, hash_t seed,
81
+ int64_t bits_offset, int64_t num_bits);
82
+
83
+ template <typename Scalar, uint64_t AlgNum>
84
+ struct ScalarHelperBase {
85
+ static bool CompareScalars(Scalar u, Scalar v) { return u == v; }
86
+
87
+ static hash_t ComputeHash(const Scalar& value) {
88
+ // Generic hash computation for scalars. Simply apply the string hash
89
+ // to the bit representation of the value.
90
+
91
+ // XXX in the case of FP values, we'd like equal values to have the same hash,
92
+ // even if they have different bit representations...
93
+ return ComputeStringHash<AlgNum>(&value, sizeof(value));
94
+ }
95
+ };
96
+
97
+ template <typename Scalar, uint64_t AlgNum = 0, typename Enable = void>
98
+ struct ScalarHelper : public ScalarHelperBase<Scalar, AlgNum> {};
99
+
100
+ template <typename Scalar, uint64_t AlgNum>
101
+ struct ScalarHelper<Scalar, AlgNum, enable_if_t<std::is_integral<Scalar>::value>>
102
+ : public ScalarHelperBase<Scalar, AlgNum> {
103
+ // ScalarHelper specialization for integers
104
+
105
+ static hash_t ComputeHash(const Scalar& value) {
106
+ // Faster hash computation for integers.
107
+
108
+ // Two of xxhash's prime multipliers (which are chosen for their
109
+ // bit dispersion properties)
110
+ static constexpr uint64_t multipliers[] = {11400714785074694791ULL,
111
+ 14029467366897019727ULL};
112
+
113
+ // Multiplying by the prime number mixes the low bits into the high bits,
114
+ // then byte-swapping (which is a single CPU instruction) allows the
115
+ // combined high and low bits to participate in the initial hash table index.
116
+ auto h = static_cast<hash_t>(value);
117
+ return bit_util::ByteSwap(multipliers[AlgNum] * h);
118
+ }
119
+ };
120
+
121
+ template <typename Scalar, uint64_t AlgNum>
122
+ struct ScalarHelper<Scalar, AlgNum,
123
+ enable_if_t<std::is_same<std::string_view, Scalar>::value>>
124
+ : public ScalarHelperBase<Scalar, AlgNum> {
125
+ // ScalarHelper specialization for std::string_view
126
+
127
+ static hash_t ComputeHash(std::string_view value) {
128
+ return ComputeStringHash<AlgNum>(value.data(), static_cast<int64_t>(value.size()));
129
+ }
130
+ };
131
+
132
+ template <typename Scalar, uint64_t AlgNum>
133
+ struct ScalarHelper<Scalar, AlgNum, enable_if_t<std::is_floating_point<Scalar>::value>>
134
+ : public ScalarHelperBase<Scalar, AlgNum> {
135
+ // ScalarHelper specialization for reals
136
+
137
+ static bool CompareScalars(Scalar u, Scalar v) {
138
+ if (std::isnan(u)) {
139
+ // XXX should we do a bit-precise comparison?
140
+ return std::isnan(v);
141
+ }
142
+ return u == v;
143
+ }
144
+ };
145
+
146
+ template <uint64_t AlgNum = 0>
147
+ hash_t ComputeStringHash(const void* data, int64_t length) {
148
+ if (ARROW_PREDICT_TRUE(length <= 16)) {
149
+ // Specialize for small hash strings, as they are quite common as
150
+ // hash table keys. Even XXH3 isn't quite as fast.
151
+ auto p = reinterpret_cast<const uint8_t*>(data);
152
+ auto n = static_cast<uint32_t>(length);
153
+ if (n <= 8) {
154
+ if (n <= 3) {
155
+ if (n == 0) {
156
+ return 1U;
157
+ }
158
+ uint32_t x = (n << 24) ^ (p[0] << 16) ^ (p[n / 2] << 8) ^ p[n - 1];
159
+ return ScalarHelper<uint32_t, AlgNum>::ComputeHash(x);
160
+ }
161
+ // 4 <= length <= 8
162
+ // We can read the string as two overlapping 32-bit ints, apply
163
+ // different hash functions to each of them in parallel, then XOR
164
+ // the results
165
+ uint32_t x, y;
166
+ hash_t hx, hy;
167
+ x = util::SafeLoadAs<uint32_t>(p + n - 4);
168
+ y = util::SafeLoadAs<uint32_t>(p);
169
+ hx = ScalarHelper<uint32_t, AlgNum>::ComputeHash(x);
170
+ hy = ScalarHelper<uint32_t, AlgNum ^ 1>::ComputeHash(y);
171
+ return n ^ hx ^ hy;
172
+ }
173
+ // 8 <= length <= 16
174
+ // Apply the same principle as above
175
+ uint64_t x, y;
176
+ hash_t hx, hy;
177
+ x = util::SafeLoadAs<uint64_t>(p + n - 8);
178
+ y = util::SafeLoadAs<uint64_t>(p);
179
+ hx = ScalarHelper<uint64_t, AlgNum>::ComputeHash(x);
180
+ hy = ScalarHelper<uint64_t, AlgNum ^ 1>::ComputeHash(y);
181
+ return n ^ hx ^ hy;
182
+ }
183
+
184
+ #if XXH3_SECRET_SIZE_MIN != 136
185
+ #error XXH3_SECRET_SIZE_MIN changed, please fix kXxh3Secrets
186
+ #endif
187
+
188
+ // XXH3_64bits_withSeed generates a secret based on the seed, which is too slow.
189
+ // Instead, we use hard-coded random secrets. To maximize cache efficiency,
190
+ // they reuse the same memory area.
191
+ static constexpr unsigned char kXxh3Secrets[XXH3_SECRET_SIZE_MIN + 1] = {
192
+ 0xe7, 0x8b, 0x13, 0xf9, 0xfc, 0xb5, 0x8e, 0xef, 0x81, 0x48, 0x2c, 0xbf, 0xf9, 0x9f,
193
+ 0xc1, 0x1e, 0x43, 0x6d, 0xbf, 0xa6, 0x6d, 0xb5, 0x72, 0xbc, 0x97, 0xd8, 0x61, 0x24,
194
+ 0x0f, 0x12, 0xe3, 0x05, 0x21, 0xf7, 0x5c, 0x66, 0x67, 0xa5, 0x65, 0x03, 0x96, 0x26,
195
+ 0x69, 0xd8, 0x29, 0x20, 0xf8, 0xc7, 0xb0, 0x3d, 0xdd, 0x7d, 0x18, 0xa0, 0x60, 0x75,
196
+ 0x92, 0xa4, 0xce, 0xba, 0xc0, 0x77, 0xf4, 0xac, 0xb7, 0x03, 0x53, 0xf0, 0x98, 0xce,
197
+ 0xe6, 0x2b, 0x20, 0xc7, 0x82, 0x91, 0xab, 0xbf, 0x68, 0x5c, 0x62, 0x4d, 0x33, 0xa3,
198
+ 0xe1, 0xb3, 0xff, 0x97, 0x54, 0x4c, 0x44, 0x34, 0xb5, 0xb9, 0x32, 0x4c, 0x75, 0x42,
199
+ 0x89, 0x53, 0x94, 0xd4, 0x9f, 0x2b, 0x76, 0x4d, 0x4e, 0xe6, 0xfa, 0x15, 0x3e, 0xc1,
200
+ 0xdb, 0x71, 0x4b, 0x2c, 0x94, 0xf5, 0xfc, 0x8c, 0x89, 0x4b, 0xfb, 0xc1, 0x82, 0xa5,
201
+ 0x6a, 0x53, 0xf9, 0x4a, 0xba, 0xce, 0x1f, 0xc0, 0x97, 0x1a, 0x87};
202
+
203
+ static_assert(AlgNum < 2, "AlgNum too large");
204
+ static constexpr auto secret = kXxh3Secrets + AlgNum;
205
+ return XXH3_64bits_withSecret(data, static_cast<size_t>(length), secret,
206
+ XXH3_SECRET_SIZE_MIN);
207
+ }
208
+
209
+ // XXX add a HashEq<ArrowType> struct with both hash and compare functions?
210
+
211
+ // ----------------------------------------------------------------------
212
+ // An open-addressing insert-only hash table (no deletes)
213
+
214
+ template <typename Payload>
215
+ class HashTable {
216
+ public:
217
+ static constexpr hash_t kSentinel = 0ULL;
218
+ static constexpr int64_t kLoadFactor = 2UL;
219
+
220
+ struct Entry {
221
+ hash_t h;
222
+ Payload payload;
223
+
224
+ // An entry is valid if the hash is different from the sentinel value
225
+ operator bool() const { return h != kSentinel; }
226
+ };
227
+
228
+ HashTable(MemoryPool* pool, uint64_t capacity) : entries_builder_(pool) {
229
+ DCHECK_NE(pool, nullptr);
230
+ // Minimum of 32 elements
231
+ capacity = std::max<uint64_t>(capacity, 32UL);
232
+ capacity_ = bit_util::NextPower2(capacity);
233
+ capacity_mask_ = capacity_ - 1;
234
+ size_ = 0;
235
+
236
+ DCHECK_OK(UpsizeBuffer(capacity_));
237
+ }
238
+
239
+ // Lookup with non-linear probing
240
+ // cmp_func should have signature bool(const Payload*).
241
+ // Return a (Entry*, found) pair.
242
+ template <typename CmpFunc>
243
+ std::pair<Entry*, bool> Lookup(hash_t h, CmpFunc&& cmp_func) {
244
+ auto p = Lookup<DoCompare, CmpFunc>(h, entries_, capacity_mask_,
245
+ std::forward<CmpFunc>(cmp_func));
246
+ return {&entries_[p.first], p.second};
247
+ }
248
+
249
+ template <typename CmpFunc>
250
+ std::pair<const Entry*, bool> Lookup(hash_t h, CmpFunc&& cmp_func) const {
251
+ auto p = Lookup<DoCompare, CmpFunc>(h, entries_, capacity_mask_,
252
+ std::forward<CmpFunc>(cmp_func));
253
+ return {&entries_[p.first], p.second};
254
+ }
255
+
256
+ Status Insert(Entry* entry, hash_t h, const Payload& payload) {
257
+ // Ensure entry is empty before inserting
258
+ assert(!*entry);
259
+ entry->h = FixHash(h);
260
+ entry->payload = payload;
261
+ ++size_;
262
+
263
+ if (ARROW_PREDICT_FALSE(NeedUpsizing())) {
264
+ // Resize less frequently since it is expensive
265
+ return Upsize(capacity_ * kLoadFactor * 2);
266
+ }
267
+ return Status::OK();
268
+ }
269
+
270
+ uint64_t size() const { return size_; }
271
+
272
+ // Visit all non-empty entries in the table
273
+ // The visit_func should have signature void(const Entry*)
274
+ template <typename VisitFunc>
275
+ void VisitEntries(VisitFunc&& visit_func) const {
276
+ for (uint64_t i = 0; i < capacity_; i++) {
277
+ const auto& entry = entries_[i];
278
+ if (entry) {
279
+ visit_func(&entry);
280
+ }
281
+ }
282
+ }
283
+
284
+ protected:
285
+ // NoCompare is for when the value is known not to exist in the table
286
+ enum CompareKind { DoCompare, NoCompare };
287
+
288
+ // The workhorse lookup function
289
+ template <CompareKind CKind, typename CmpFunc>
290
+ std::pair<uint64_t, bool> Lookup(hash_t h, const Entry* entries, uint64_t size_mask,
291
+ CmpFunc&& cmp_func) const {
292
+ static constexpr uint8_t perturb_shift = 5;
293
+
294
+ uint64_t index, perturb;
295
+ const Entry* entry;
296
+
297
+ h = FixHash(h);
298
+ index = h & size_mask;
299
+ perturb = (h >> perturb_shift) + 1U;
300
+
301
+ while (true) {
302
+ entry = &entries[index];
303
+ if (CompareEntry<CKind, CmpFunc>(h, entry, std::forward<CmpFunc>(cmp_func))) {
304
+ // Found
305
+ return {index, true};
306
+ }
307
+ if (entry->h == kSentinel) {
308
+ // Empty slot
309
+ return {index, false};
310
+ }
311
+
312
+ // Perturbation logic inspired from CPython's set / dict object.
313
+ // The goal is that all 64 bits of the unmasked hash value eventually
314
+ // participate in the probing sequence, to minimize clustering.
315
+ index = (index + perturb) & size_mask;
316
+ perturb = (perturb >> perturb_shift) + 1U;
317
+ }
318
+ }
319
+
320
+ template <CompareKind CKind, typename CmpFunc>
321
+ bool CompareEntry(hash_t h, const Entry* entry, CmpFunc&& cmp_func) const {
322
+ if (CKind == NoCompare) {
323
+ return false;
324
+ } else {
325
+ return entry->h == h && cmp_func(&entry->payload);
326
+ }
327
+ }
328
+
329
+ bool NeedUpsizing() const {
330
+ // Keep the load factor <= 1/2
331
+ return size_ * kLoadFactor >= capacity_;
332
+ }
333
+
334
+ Status UpsizeBuffer(uint64_t capacity) {
335
+ RETURN_NOT_OK(entries_builder_.Resize(capacity));
336
+ entries_ = entries_builder_.mutable_data();
337
+ memset(static_cast<void*>(entries_), 0, capacity * sizeof(Entry));
338
+
339
+ return Status::OK();
340
+ }
341
+
342
+ Status Upsize(uint64_t new_capacity) {
343
+ assert(new_capacity > capacity_);
344
+ uint64_t new_mask = new_capacity - 1;
345
+ assert((new_capacity & new_mask) == 0); // it's a power of two
346
+
347
+ // Stash old entries and seal builder, effectively resetting the Buffer
348
+ const Entry* old_entries = entries_;
349
+ ARROW_ASSIGN_OR_RAISE(auto previous, entries_builder_.FinishWithLength(capacity_));
350
+ // Allocate new buffer
351
+ RETURN_NOT_OK(UpsizeBuffer(new_capacity));
352
+
353
+ for (uint64_t i = 0; i < capacity_; i++) {
354
+ const auto& entry = old_entries[i];
355
+ if (entry) {
356
+ // Dummy compare function will not be called
357
+ auto p = Lookup<NoCompare>(entry.h, entries_, new_mask,
358
+ [](const Payload*) { return false; });
359
+ // Lookup<NoCompare> (and CompareEntry<NoCompare>) ensure that an
360
+ // empty slots is always returned
361
+ assert(!p.second);
362
+ entries_[p.first] = entry;
363
+ }
364
+ }
365
+ capacity_ = new_capacity;
366
+ capacity_mask_ = new_mask;
367
+
368
+ return Status::OK();
369
+ }
370
+
371
+ hash_t FixHash(hash_t h) const { return (h == kSentinel) ? 42U : h; }
372
+
373
+ // The number of slots available in the hash table array.
374
+ uint64_t capacity_;
375
+ uint64_t capacity_mask_;
376
+ // The number of used slots in the hash table array.
377
+ uint64_t size_;
378
+
379
+ Entry* entries_;
380
+ TypedBufferBuilder<Entry> entries_builder_;
381
+ };
382
+
383
+ // XXX typedef memo_index_t int32_t ?
384
+
385
+ constexpr int32_t kKeyNotFound = -1;
386
+
387
+ // ----------------------------------------------------------------------
388
+ // A base class for memoization table.
389
+
390
+ class MemoTable {
391
+ public:
392
+ virtual ~MemoTable() = default;
393
+
394
+ virtual int32_t size() const = 0;
395
+ };
396
+
397
+ // ----------------------------------------------------------------------
398
+ // A memoization table for memory-cheap scalar values.
399
+
400
+ // The memoization table remembers and allows to look up the insertion
401
+ // index for each key.
402
+
403
+ template <typename Scalar, template <class> class HashTableTemplateType = HashTable>
404
+ class ScalarMemoTable : public MemoTable {
405
+ public:
406
+ explicit ScalarMemoTable(MemoryPool* pool, int64_t entries = 0)
407
+ : hash_table_(pool, static_cast<uint64_t>(entries)) {}
408
+
409
+ int32_t Get(const Scalar& value) const {
410
+ auto cmp_func = [value](const Payload* payload) -> bool {
411
+ return ScalarHelper<Scalar, 0>::CompareScalars(payload->value, value);
412
+ };
413
+ hash_t h = ComputeHash(value);
414
+ auto p = hash_table_.Lookup(h, cmp_func);
415
+ if (p.second) {
416
+ return p.first->payload.memo_index;
417
+ } else {
418
+ return kKeyNotFound;
419
+ }
420
+ }
421
+
422
+ template <typename Func1, typename Func2>
423
+ Status GetOrInsert(const Scalar& value, Func1&& on_found, Func2&& on_not_found,
424
+ int32_t* out_memo_index) {
425
+ auto cmp_func = [value](const Payload* payload) -> bool {
426
+ return ScalarHelper<Scalar, 0>::CompareScalars(value, payload->value);
427
+ };
428
+ hash_t h = ComputeHash(value);
429
+ auto p = hash_table_.Lookup(h, cmp_func);
430
+ int32_t memo_index;
431
+ if (p.second) {
432
+ memo_index = p.first->payload.memo_index;
433
+ on_found(memo_index);
434
+ } else {
435
+ memo_index = size();
436
+ RETURN_NOT_OK(hash_table_.Insert(p.first, h, {value, memo_index}));
437
+ on_not_found(memo_index);
438
+ }
439
+ *out_memo_index = memo_index;
440
+ return Status::OK();
441
+ }
442
+
443
+ Status GetOrInsert(const Scalar& value, int32_t* out_memo_index) {
444
+ return GetOrInsert(
445
+ value, [](int32_t i) {}, [](int32_t i) {}, out_memo_index);
446
+ }
447
+
448
+ int32_t GetNull() const { return null_index_; }
449
+
450
+ template <typename Func1, typename Func2>
451
+ int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) {
452
+ int32_t memo_index = GetNull();
453
+ if (memo_index != kKeyNotFound) {
454
+ on_found(memo_index);
455
+ } else {
456
+ null_index_ = memo_index = size();
457
+ on_not_found(memo_index);
458
+ }
459
+ return memo_index;
460
+ }
461
+
462
+ int32_t GetOrInsertNull() {
463
+ return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {});
464
+ }
465
+
466
+ // The number of entries in the memo table +1 if null was added.
467
+ // (which is also 1 + the largest memo index)
468
+ int32_t size() const override {
469
+ return static_cast<int32_t>(hash_table_.size()) + (GetNull() != kKeyNotFound);
470
+ }
471
+
472
+ // Copy values starting from index `start` into `out_data`
473
+ void CopyValues(int32_t start, Scalar* out_data) const {
474
+ hash_table_.VisitEntries([=](const HashTableEntry* entry) {
475
+ int32_t index = entry->payload.memo_index - start;
476
+ if (index >= 0) {
477
+ out_data[index] = entry->payload.value;
478
+ }
479
+ });
480
+ // Zero-initialize the null entry
481
+ if (null_index_ != kKeyNotFound) {
482
+ int32_t index = null_index_ - start;
483
+ if (index >= 0) {
484
+ out_data[index] = Scalar{};
485
+ }
486
+ }
487
+ }
488
+
489
+ void CopyValues(Scalar* out_data) const { CopyValues(0, out_data); }
490
+
491
+ protected:
492
+ struct Payload {
493
+ Scalar value;
494
+ int32_t memo_index;
495
+ };
496
+
497
+ using HashTableType = HashTableTemplateType<Payload>;
498
+ using HashTableEntry = typename HashTableType::Entry;
499
+ HashTableType hash_table_;
500
+ int32_t null_index_ = kKeyNotFound;
501
+
502
+ hash_t ComputeHash(const Scalar& value) const {
503
+ return ScalarHelper<Scalar, 0>::ComputeHash(value);
504
+ }
505
+
506
+ public:
507
+ // defined here so that `HashTableType` is visible
508
+ // Merge entries from `other_table` into `this->hash_table_`.
509
+ Status MergeTable(const ScalarMemoTable& other_table) {
510
+ const HashTableType& other_hashtable = other_table.hash_table_;
511
+
512
+ other_hashtable.VisitEntries([this](const HashTableEntry* other_entry) {
513
+ int32_t unused;
514
+ DCHECK_OK(this->GetOrInsert(other_entry->payload.value, &unused));
515
+ });
516
+ // TODO: ARROW-17074 - implement proper error handling
517
+ return Status::OK();
518
+ }
519
+ };
520
+
521
+ // ----------------------------------------------------------------------
522
+ // A memoization table for small scalar values, using direct indexing
523
+
524
+ template <typename Scalar, typename Enable = void>
525
+ struct SmallScalarTraits {};
526
+
527
+ template <>
528
+ struct SmallScalarTraits<bool> {
529
+ static constexpr int32_t cardinality = 2;
530
+
531
+ static uint32_t AsIndex(bool value) { return value ? 1 : 0; }
532
+ };
533
+
534
+ template <typename Scalar>
535
+ struct SmallScalarTraits<Scalar, enable_if_t<std::is_integral<Scalar>::value>> {
536
+ using Unsigned = typename std::make_unsigned<Scalar>::type;
537
+
538
+ static constexpr int32_t cardinality = 1U + std::numeric_limits<Unsigned>::max();
539
+
540
+ static uint32_t AsIndex(Scalar value) { return static_cast<Unsigned>(value); }
541
+ };
542
+
543
+ template <typename Scalar, template <class> class HashTableTemplateType = HashTable>
544
+ class SmallScalarMemoTable : public MemoTable {
545
+ public:
546
+ explicit SmallScalarMemoTable(MemoryPool* pool, int64_t entries = 0) {
547
+ std::fill(value_to_index_, value_to_index_ + cardinality + 1, kKeyNotFound);
548
+ index_to_value_.reserve(cardinality);
549
+ }
550
+
551
+ int32_t Get(const Scalar value) const {
552
+ auto value_index = AsIndex(value);
553
+ return value_to_index_[value_index];
554
+ }
555
+
556
+ template <typename Func1, typename Func2>
557
+ Status GetOrInsert(const Scalar value, Func1&& on_found, Func2&& on_not_found,
558
+ int32_t* out_memo_index) {
559
+ auto value_index = AsIndex(value);
560
+ auto memo_index = value_to_index_[value_index];
561
+ if (memo_index == kKeyNotFound) {
562
+ memo_index = static_cast<int32_t>(index_to_value_.size());
563
+ index_to_value_.push_back(value);
564
+ value_to_index_[value_index] = memo_index;
565
+ DCHECK_LT(memo_index, cardinality + 1);
566
+ on_not_found(memo_index);
567
+ } else {
568
+ on_found(memo_index);
569
+ }
570
+ *out_memo_index = memo_index;
571
+ return Status::OK();
572
+ }
573
+
574
+ Status GetOrInsert(const Scalar value, int32_t* out_memo_index) {
575
+ return GetOrInsert(
576
+ value, [](int32_t i) {}, [](int32_t i) {}, out_memo_index);
577
+ }
578
+
579
+ int32_t GetNull() const { return value_to_index_[cardinality]; }
580
+
581
+ template <typename Func1, typename Func2>
582
+ int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) {
583
+ auto memo_index = GetNull();
584
+ if (memo_index == kKeyNotFound) {
585
+ memo_index = value_to_index_[cardinality] = size();
586
+ index_to_value_.push_back(0);
587
+ on_not_found(memo_index);
588
+ } else {
589
+ on_found(memo_index);
590
+ }
591
+ return memo_index;
592
+ }
593
+
594
+ int32_t GetOrInsertNull() {
595
+ return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {});
596
+ }
597
+
598
+ // The number of entries in the memo table
599
+ // (which is also 1 + the largest memo index)
600
+ int32_t size() const override { return static_cast<int32_t>(index_to_value_.size()); }
601
+
602
+ // Merge entries from `other_table` into `this`.
603
+ Status MergeTable(const SmallScalarMemoTable& other_table) {
604
+ for (const Scalar& other_val : other_table.index_to_value_) {
605
+ int32_t unused;
606
+ RETURN_NOT_OK(this->GetOrInsert(other_val, &unused));
607
+ }
608
+ return Status::OK();
609
+ }
610
+
611
+ // Copy values starting from index `start` into `out_data`
612
+ void CopyValues(int32_t start, Scalar* out_data) const {
613
+ DCHECK_GE(start, 0);
614
+ DCHECK_LE(static_cast<size_t>(start), index_to_value_.size());
615
+ int64_t offset = start * static_cast<int32_t>(sizeof(Scalar));
616
+ memcpy(out_data, index_to_value_.data() + offset, (size() - start) * sizeof(Scalar));
617
+ }
618
+
619
+ void CopyValues(Scalar* out_data) const { CopyValues(0, out_data); }
620
+
621
+ const std::vector<Scalar>& values() const { return index_to_value_; }
622
+
623
+ protected:
624
+ static constexpr auto cardinality = SmallScalarTraits<Scalar>::cardinality;
625
+ static_assert(cardinality <= 256, "cardinality too large for direct-addressed table");
626
+
627
+ uint32_t AsIndex(Scalar value) const {
628
+ return SmallScalarTraits<Scalar>::AsIndex(value);
629
+ }
630
+
631
+ // The last index is reserved for the null element.
632
+ int32_t value_to_index_[cardinality + 1];
633
+ std::vector<Scalar> index_to_value_;
634
+ };
635
+
636
+ // ----------------------------------------------------------------------
637
+ // A memoization table for variable-sized binary data.
638
+
639
+ template <typename BinaryBuilderT>
640
+ class BinaryMemoTable : public MemoTable {
641
+ public:
642
+ using builder_offset_type = typename BinaryBuilderT::offset_type;
643
+ explicit BinaryMemoTable(MemoryPool* pool, int64_t entries = 0,
644
+ int64_t values_size = -1)
645
+ : hash_table_(pool, static_cast<uint64_t>(entries)), binary_builder_(pool) {
646
+ const int64_t data_size = (values_size < 0) ? entries * 4 : values_size;
647
+ DCHECK_OK(binary_builder_.Resize(entries));
648
+ DCHECK_OK(binary_builder_.ReserveData(data_size));
649
+ }
650
+
651
+ int32_t Get(const void* data, builder_offset_type length) const {
652
+ hash_t h = ComputeStringHash<0>(data, length);
653
+ auto p = Lookup(h, data, length);
654
+ if (p.second) {
655
+ return p.first->payload.memo_index;
656
+ } else {
657
+ return kKeyNotFound;
658
+ }
659
+ }
660
+
661
+ int32_t Get(std::string_view value) const {
662
+ return Get(value.data(), static_cast<builder_offset_type>(value.length()));
663
+ }
664
+
665
+ template <typename Func1, typename Func2>
666
+ Status GetOrInsert(const void* data, builder_offset_type length, Func1&& on_found,
667
+ Func2&& on_not_found, int32_t* out_memo_index) {
668
+ hash_t h = ComputeStringHash<0>(data, length);
669
+ auto p = Lookup(h, data, length);
670
+ int32_t memo_index;
671
+ if (p.second) {
672
+ memo_index = p.first->payload.memo_index;
673
+ on_found(memo_index);
674
+ } else {
675
+ memo_index = size();
676
+ // Insert string value
677
+ RETURN_NOT_OK(binary_builder_.Append(static_cast<const char*>(data), length));
678
+ // Insert hash entry
679
+ RETURN_NOT_OK(
680
+ hash_table_.Insert(const_cast<HashTableEntry*>(p.first), h, {memo_index}));
681
+
682
+ on_not_found(memo_index);
683
+ }
684
+ *out_memo_index = memo_index;
685
+ return Status::OK();
686
+ }
687
+
688
+ template <typename Func1, typename Func2>
689
+ Status GetOrInsert(std::string_view value, Func1&& on_found, Func2&& on_not_found,
690
+ int32_t* out_memo_index) {
691
+ return GetOrInsert(value.data(), static_cast<builder_offset_type>(value.length()),
692
+ std::forward<Func1>(on_found), std::forward<Func2>(on_not_found),
693
+ out_memo_index);
694
+ }
695
+
696
+ Status GetOrInsert(const void* data, builder_offset_type length,
697
+ int32_t* out_memo_index) {
698
+ return GetOrInsert(
699
+ data, length, [](int32_t i) {}, [](int32_t i) {}, out_memo_index);
700
+ }
701
+
702
+ Status GetOrInsert(std::string_view value, int32_t* out_memo_index) {
703
+ return GetOrInsert(value.data(), static_cast<builder_offset_type>(value.length()),
704
+ out_memo_index);
705
+ }
706
+
707
+ int32_t GetNull() const { return null_index_; }
708
+
709
+ template <typename Func1, typename Func2>
710
+ int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) {
711
+ int32_t memo_index = GetNull();
712
+ if (memo_index == kKeyNotFound) {
713
+ memo_index = null_index_ = size();
714
+ DCHECK_OK(binary_builder_.AppendNull());
715
+ on_not_found(memo_index);
716
+ } else {
717
+ on_found(memo_index);
718
+ }
719
+ return memo_index;
720
+ }
721
+
722
+ int32_t GetOrInsertNull() {
723
+ return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {});
724
+ }
725
+
726
+ // The number of entries in the memo table
727
+ // (which is also 1 + the largest memo index)
728
+ int32_t size() const override {
729
+ return static_cast<int32_t>(hash_table_.size() + (GetNull() != kKeyNotFound));
730
+ }
731
+
732
+ int64_t values_size() const { return binary_builder_.value_data_length(); }
733
+
734
+ // Copy (n + 1) offsets starting from index `start` into `out_data`
735
+ template <class Offset>
736
+ void CopyOffsets(int32_t start, Offset* out_data) const {
737
+ DCHECK_LE(start, size());
738
+
739
+ const builder_offset_type* offsets = binary_builder_.offsets_data();
740
+ const builder_offset_type delta =
741
+ start < binary_builder_.length() ? offsets[start] : 0;
742
+ for (int32_t i = start; i < size(); ++i) {
743
+ const builder_offset_type adjusted_offset = offsets[i] - delta;
744
+ Offset cast_offset = static_cast<Offset>(adjusted_offset);
745
+ assert(static_cast<builder_offset_type>(cast_offset) ==
746
+ adjusted_offset); // avoid truncation
747
+ *out_data++ = cast_offset;
748
+ }
749
+
750
+ // Copy last value since BinaryBuilder only materializes it on in Finish()
751
+ *out_data = static_cast<Offset>(binary_builder_.value_data_length() - delta);
752
+ }
753
+
754
+ template <class Offset>
755
+ void CopyOffsets(Offset* out_data) const {
756
+ CopyOffsets(0, out_data);
757
+ }
758
+
759
+ // Copy values starting from index `start` into `out_data`
760
+ void CopyValues(int32_t start, uint8_t* out_data) const {
761
+ CopyValues(start, -1, out_data);
762
+ }
763
+
764
+ // Same as above, but check output size in debug mode
765
+ void CopyValues(int32_t start, int64_t out_size, uint8_t* out_data) const {
766
+ DCHECK_LE(start, size());
767
+
768
+ // The absolute byte offset of `start` value in the binary buffer.
769
+ const builder_offset_type offset = binary_builder_.offset(start);
770
+ const auto length = binary_builder_.value_data_length() - static_cast<size_t>(offset);
771
+
772
+ if (out_size != -1) {
773
+ assert(static_cast<int64_t>(length) <= out_size);
774
+ }
775
+
776
+ auto view = binary_builder_.GetView(start);
777
+ memcpy(out_data, view.data(), length);
778
+ }
779
+
780
+ void CopyValues(uint8_t* out_data) const { CopyValues(0, -1, out_data); }
781
+
782
+ void CopyValues(int64_t out_size, uint8_t* out_data) const {
783
+ CopyValues(0, out_size, out_data);
784
+ }
785
+
786
+ void CopyFixedWidthValues(int32_t start, int32_t width_size, int64_t out_size,
787
+ uint8_t* out_data) const {
788
+ // This method exists to cope with the fact that the BinaryMemoTable does
789
+ // not know the fixed width when inserting the null value. The data
790
+ // buffer hold a zero length string for the null value (if found).
791
+ //
792
+ // Thus, the method will properly inject an empty value of the proper width
793
+ // in the output buffer.
794
+ //
795
+ if (start >= size()) {
796
+ return;
797
+ }
798
+
799
+ int32_t null_index = GetNull();
800
+ if (null_index < start) {
801
+ // Nothing to skip, proceed as usual.
802
+ CopyValues(start, out_size, out_data);
803
+ return;
804
+ }
805
+
806
+ builder_offset_type left_offset = binary_builder_.offset(start);
807
+
808
+ // Ensure that the data length is exactly missing width_size bytes to fit
809
+ // in the expected output (n_values * width_size).
810
+ #ifndef NDEBUG
811
+ int64_t data_length = values_size() - static_cast<size_t>(left_offset);
812
+ assert(data_length + width_size == out_size);
813
+ ARROW_UNUSED(data_length);
814
+ #endif
815
+
816
+ auto in_data = binary_builder_.value_data() + left_offset;
817
+ // The null use 0-length in the data, slice the data in 2 and skip by
818
+ // width_size in out_data. [part_1][width_size][part_2]
819
+ auto null_data_offset = binary_builder_.offset(null_index);
820
+ auto left_size = null_data_offset - left_offset;
821
+ if (left_size > 0) {
822
+ memcpy(out_data, in_data + left_offset, left_size);
823
+ }
824
+ // Zero-initialize the null entry
825
+ memset(out_data + left_size, 0, width_size);
826
+
827
+ auto right_size = values_size() - static_cast<size_t>(null_data_offset);
828
+ if (right_size > 0) {
829
+ // skip the null fixed size value.
830
+ auto out_offset = left_size + width_size;
831
+ assert(out_data + out_offset + right_size == out_data + out_size);
832
+ memcpy(out_data + out_offset, in_data + null_data_offset, right_size);
833
+ }
834
+ }
835
+
836
+ // Visit the stored values in insertion order.
837
+ // The visitor function should have the signature `void(std::string_view)`
838
+ // or `void(const std::string_view&)`.
839
+ template <typename VisitFunc>
840
+ void VisitValues(int32_t start, VisitFunc&& visit) const {
841
+ for (int32_t i = start; i < size(); ++i) {
842
+ visit(binary_builder_.GetView(i));
843
+ }
844
+ }
845
+
846
+ protected:
847
+ struct Payload {
848
+ int32_t memo_index;
849
+ };
850
+
851
+ using HashTableType = HashTable<Payload>;
852
+ using HashTableEntry = typename HashTable<Payload>::Entry;
853
+ HashTableType hash_table_;
854
+ BinaryBuilderT binary_builder_;
855
+
856
+ int32_t null_index_ = kKeyNotFound;
857
+
858
+ std::pair<const HashTableEntry*, bool> Lookup(hash_t h, const void* data,
859
+ builder_offset_type length) const {
860
+ auto cmp_func = [&](const Payload* payload) {
861
+ std::string_view lhs = binary_builder_.GetView(payload->memo_index);
862
+ std::string_view rhs(static_cast<const char*>(data), length);
863
+ return lhs == rhs;
864
+ };
865
+ return hash_table_.Lookup(h, cmp_func);
866
+ }
867
+
868
+ public:
869
+ Status MergeTable(const BinaryMemoTable& other_table) {
870
+ other_table.VisitValues(0, [this](std::string_view other_value) {
871
+ int32_t unused;
872
+ DCHECK_OK(this->GetOrInsert(other_value, &unused));
873
+ });
874
+ return Status::OK();
875
+ }
876
+ };
877
+
878
+ template <typename T, typename Enable = void>
879
+ struct HashTraits {};
880
+
881
+ template <>
882
+ struct HashTraits<BooleanType> {
883
+ using MemoTableType = SmallScalarMemoTable<bool>;
884
+ };
885
+
886
+ template <typename T>
887
+ struct HashTraits<T, enable_if_8bit_int<T>> {
888
+ using c_type = typename T::c_type;
889
+ using MemoTableType = SmallScalarMemoTable<typename T::c_type>;
890
+ };
891
+
892
+ template <typename T>
893
+ struct HashTraits<T, enable_if_t<has_c_type<T>::value && !is_8bit_int<T>::value>> {
894
+ using c_type = typename T::c_type;
895
+ using MemoTableType = ScalarMemoTable<c_type, HashTable>;
896
+ };
897
+
898
+ template <typename T>
899
+ struct HashTraits<T, enable_if_t<has_string_view<T>::value &&
900
+ !std::is_base_of<LargeBinaryType, T>::value>> {
901
+ using MemoTableType = BinaryMemoTable<BinaryBuilder>;
902
+ };
903
+
904
+ template <typename T>
905
+ struct HashTraits<T, enable_if_decimal<T>> {
906
+ using MemoTableType = BinaryMemoTable<BinaryBuilder>;
907
+ };
908
+
909
+ template <typename T>
910
+ struct HashTraits<T, enable_if_t<std::is_base_of<LargeBinaryType, T>::value>> {
911
+ using MemoTableType = BinaryMemoTable<LargeBinaryBuilder>;
912
+ };
913
+
914
+ template <typename MemoTableType>
915
+ static inline Status ComputeNullBitmap(MemoryPool* pool, const MemoTableType& memo_table,
916
+ int64_t start_offset, int64_t* null_count,
917
+ std::shared_ptr<Buffer>* null_bitmap) {
918
+ int64_t dict_length = static_cast<int64_t>(memo_table.size()) - start_offset;
919
+ int64_t null_index = memo_table.GetNull();
920
+
921
+ *null_count = 0;
922
+ *null_bitmap = nullptr;
923
+
924
+ if (null_index != kKeyNotFound && null_index >= start_offset) {
925
+ null_index -= start_offset;
926
+ *null_count = 1;
927
+ ARROW_ASSIGN_OR_RAISE(*null_bitmap,
928
+ internal::BitmapAllButOne(pool, dict_length, null_index));
929
+ }
930
+
931
+ return Status::OK();
932
+ }
933
+
934
+ struct StringViewHash {
935
+ // std::hash compatible hasher for use with std::unordered_*
936
+ // (the std::hash specialization provided by nonstd constructs std::string
937
+ // temporaries then invokes std::hash<std::string> against those)
938
+ hash_t operator()(std::string_view value) const {
939
+ return ComputeStringHash<0>(value.data(), static_cast<int64_t>(value.size()));
940
+ }
941
+ };
942
+
943
+ } // namespace internal
944
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util_overflow.h ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <limits>
22
+ #include <type_traits>
23
+
24
+ #include "arrow/status.h"
25
+ #include "arrow/util/macros.h"
26
+ #include "arrow/util/visibility.h"
27
+
28
+ // "safe-math.h" includes <intsafe.h> from the Windows headers.
29
+ #include "arrow/util/windows_compatibility.h"
30
+ #include "arrow/vendored/portable-snippets/safe-math.h"
31
+ // clang-format off (avoid include reordering)
32
+ #include "arrow/util/windows_fixup.h"
33
+ // clang-format on
34
+
35
+ namespace arrow {
36
+ namespace internal {
37
+
38
+ // Define functions AddWithOverflow, SubtractWithOverflow, MultiplyWithOverflow
39
+ // with the signature `bool(T u, T v, T* out)` where T is an integer type.
40
+ // On overflow, these functions return true. Otherwise, false is returned
41
+ // and `out` is updated with the result of the operation.
42
+
43
+ #define OP_WITH_OVERFLOW(_func_name, _psnip_op, _type, _psnip_type) \
44
+ [[nodiscard]] static inline bool _func_name(_type u, _type v, _type* out) { \
45
+ return !psnip_safe_##_psnip_type##_##_psnip_op(out, u, v); \
46
+ }
47
+
48
+ #define OPS_WITH_OVERFLOW(_func_name, _psnip_op) \
49
+ OP_WITH_OVERFLOW(_func_name, _psnip_op, int8_t, int8) \
50
+ OP_WITH_OVERFLOW(_func_name, _psnip_op, int16_t, int16) \
51
+ OP_WITH_OVERFLOW(_func_name, _psnip_op, int32_t, int32) \
52
+ OP_WITH_OVERFLOW(_func_name, _psnip_op, int64_t, int64) \
53
+ OP_WITH_OVERFLOW(_func_name, _psnip_op, uint8_t, uint8) \
54
+ OP_WITH_OVERFLOW(_func_name, _psnip_op, uint16_t, uint16) \
55
+ OP_WITH_OVERFLOW(_func_name, _psnip_op, uint32_t, uint32) \
56
+ OP_WITH_OVERFLOW(_func_name, _psnip_op, uint64_t, uint64)
57
+
58
+ OPS_WITH_OVERFLOW(AddWithOverflow, add)
59
+ OPS_WITH_OVERFLOW(SubtractWithOverflow, sub)
60
+ OPS_WITH_OVERFLOW(MultiplyWithOverflow, mul)
61
+ OPS_WITH_OVERFLOW(DivideWithOverflow, div)
62
+
63
+ #undef OP_WITH_OVERFLOW
64
+ #undef OPS_WITH_OVERFLOW
65
+
66
+ // Define function NegateWithOverflow with the signature `bool(T u, T* out)`
67
+ // where T is a signed integer type. On overflow, these functions return true.
68
+ // Otherwise, false is returned and `out` is updated with the result of the
69
+ // operation.
70
+
71
+ #define UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, _type, _psnip_type) \
72
+ [[nodiscard]] static inline bool _func_name(_type u, _type* out) { \
73
+ return !psnip_safe_##_psnip_type##_##_psnip_op(out, u); \
74
+ }
75
+
76
+ #define SIGNED_UNARY_OPS_WITH_OVERFLOW(_func_name, _psnip_op) \
77
+ UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int8_t, int8) \
78
+ UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int16_t, int16) \
79
+ UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int32_t, int32) \
80
+ UNARY_OP_WITH_OVERFLOW(_func_name, _psnip_op, int64_t, int64)
81
+
82
+ SIGNED_UNARY_OPS_WITH_OVERFLOW(NegateWithOverflow, neg)
83
+
84
+ #undef UNARY_OP_WITH_OVERFLOW
85
+ #undef SIGNED_UNARY_OPS_WITH_OVERFLOW
86
+
87
+ /// Signed addition with well-defined behaviour on overflow (as unsigned)
88
+ template <typename SignedInt>
89
+ SignedInt SafeSignedAdd(SignedInt u, SignedInt v) {
90
+ using UnsignedInt = typename std::make_unsigned<SignedInt>::type;
91
+ return static_cast<SignedInt>(static_cast<UnsignedInt>(u) +
92
+ static_cast<UnsignedInt>(v));
93
+ }
94
+
95
+ /// Signed subtraction with well-defined behaviour on overflow (as unsigned)
96
+ template <typename SignedInt>
97
+ SignedInt SafeSignedSubtract(SignedInt u, SignedInt v) {
98
+ using UnsignedInt = typename std::make_unsigned<SignedInt>::type;
99
+ return static_cast<SignedInt>(static_cast<UnsignedInt>(u) -
100
+ static_cast<UnsignedInt>(v));
101
+ }
102
+
103
+ /// Signed negation with well-defined behaviour on overflow (as unsigned)
104
+ template <typename SignedInt>
105
+ SignedInt SafeSignedNegate(SignedInt u) {
106
+ using UnsignedInt = typename std::make_unsigned<SignedInt>::type;
107
+ return static_cast<SignedInt>(~static_cast<UnsignedInt>(u) + 1);
108
+ }
109
+
110
+ /// Signed left shift with well-defined behaviour on negative numbers or overflow
111
+ template <typename SignedInt, typename Shift>
112
+ SignedInt SafeLeftShift(SignedInt u, Shift shift) {
113
+ using UnsignedInt = typename std::make_unsigned<SignedInt>::type;
114
+ return static_cast<SignedInt>(static_cast<UnsignedInt>(u) << shift);
115
+ }
116
+
117
+ } // namespace internal
118
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #ifndef _WIN32
21
+ #define ARROW_HAVE_SIGACTION 1
22
+ #endif
23
+
24
+ #include <atomic>
25
+ #include <memory>
26
+ #include <optional>
27
+ #include <string>
28
+ #include <utility>
29
+ #include <vector>
30
+
31
+ #if ARROW_HAVE_SIGACTION
32
+ #include <csignal> // Needed for struct sigaction
33
+ #endif
34
+
35
+ #include "arrow/result.h"
36
+ #include "arrow/status.h"
37
+ #include "arrow/type_fwd.h"
38
+ #include "arrow/util/macros.h"
39
+ #include "arrow/util/windows_fixup.h"
40
+
41
+ namespace arrow::internal {
42
+
43
+ // NOTE: 8-bit path strings on Windows are encoded using UTF-8.
44
+ // Using MBCS would fail encoding some paths.
45
+
46
+ #if defined(_WIN32)
47
+ using NativePathString = std::wstring;
48
+ #else
49
+ using NativePathString = std::string;
50
+ #endif
51
+
52
+ class ARROW_EXPORT PlatformFilename {
53
+ public:
54
+ struct Impl;
55
+
56
+ ~PlatformFilename();
57
+ PlatformFilename();
58
+ PlatformFilename(const PlatformFilename&);
59
+ PlatformFilename(PlatformFilename&&);
60
+ PlatformFilename& operator=(const PlatformFilename&);
61
+ PlatformFilename& operator=(PlatformFilename&&);
62
+ explicit PlatformFilename(NativePathString path);
63
+ explicit PlatformFilename(const NativePathString::value_type* path);
64
+
65
+ const NativePathString& ToNative() const;
66
+ std::string ToString() const;
67
+
68
+ PlatformFilename Parent() const;
69
+ Result<PlatformFilename> Real() const;
70
+
71
+ // These functions can fail for character encoding reasons.
72
+ static Result<PlatformFilename> FromString(std::string_view file_name);
73
+ Result<PlatformFilename> Join(std::string_view child_name) const;
74
+
75
+ PlatformFilename Join(const PlatformFilename& child_name) const;
76
+
77
+ bool operator==(const PlatformFilename& other) const;
78
+ bool operator!=(const PlatformFilename& other) const;
79
+
80
+ // Made public to avoid the proliferation of friend declarations.
81
+ const Impl* impl() const { return impl_.get(); }
82
+
83
+ private:
84
+ std::unique_ptr<Impl> impl_;
85
+
86
+ explicit PlatformFilename(Impl impl);
87
+ };
88
+
89
+ /// Create a directory if it doesn't exist.
90
+ ///
91
+ /// Return whether the directory was created.
92
+ ARROW_EXPORT
93
+ Result<bool> CreateDir(const PlatformFilename& dir_path);
94
+
95
+ /// Create a directory and its parents if it doesn't exist.
96
+ ///
97
+ /// Return whether the directory was created.
98
+ ARROW_EXPORT
99
+ Result<bool> CreateDirTree(const PlatformFilename& dir_path);
100
+
101
+ /// Delete a directory's contents (but not the directory itself) if it exists.
102
+ ///
103
+ /// Return whether the directory existed.
104
+ ARROW_EXPORT
105
+ Result<bool> DeleteDirContents(const PlatformFilename& dir_path,
106
+ bool allow_not_found = true);
107
+
108
+ /// Delete a directory tree if it exists.
109
+ ///
110
+ /// Return whether the directory existed.
111
+ ARROW_EXPORT
112
+ Result<bool> DeleteDirTree(const PlatformFilename& dir_path, bool allow_not_found = true);
113
+
114
+ // Non-recursively list the contents of the given directory.
115
+ // The returned names are the children's base names, not including dir_path.
116
+ ARROW_EXPORT
117
+ Result<std::vector<PlatformFilename>> ListDir(const PlatformFilename& dir_path);
118
+
119
+ /// Delete a file if it exists.
120
+ ///
121
+ /// Return whether the file existed.
122
+ ARROW_EXPORT
123
+ Result<bool> DeleteFile(const PlatformFilename& file_path, bool allow_not_found = true);
124
+
125
+ /// Return whether a file exists.
126
+ ARROW_EXPORT
127
+ Result<bool> FileExists(const PlatformFilename& path);
128
+
129
+ // TODO expose this more publicly to make it available from io/file.h?
130
+ /// A RAII wrapper for a file descriptor.
131
+ ///
132
+ /// The underlying file descriptor is automatically closed on destruction.
133
+ /// Moving is supported with well-defined semantics.
134
+ /// Furthermore, closing is idempotent.
135
+ class ARROW_EXPORT FileDescriptor {
136
+ public:
137
+ FileDescriptor() = default;
138
+ explicit FileDescriptor(int fd) : fd_(fd) {}
139
+ FileDescriptor(FileDescriptor&&);
140
+ FileDescriptor& operator=(FileDescriptor&&);
141
+
142
+ ~FileDescriptor();
143
+
144
+ Status Close();
145
+
146
+ /// May return -1 if closed or default-initialized
147
+ int fd() const { return fd_.load(); }
148
+
149
+ /// Detach and return the underlying file descriptor
150
+ int Detach();
151
+
152
+ bool closed() const { return fd_.load() == -1; }
153
+
154
+ protected:
155
+ static void CloseFromDestructor(int fd);
156
+
157
+ std::atomic<int> fd_{-1};
158
+ };
159
+
160
+ /// Open a file for reading and return a file descriptor.
161
+ ARROW_EXPORT
162
+ Result<FileDescriptor> FileOpenReadable(const PlatformFilename& file_name);
163
+
164
+ /// Open a file for writing and return a file descriptor.
165
+ ARROW_EXPORT
166
+ Result<FileDescriptor> FileOpenWritable(const PlatformFilename& file_name,
167
+ bool write_only = true, bool truncate = true,
168
+ bool append = false);
169
+
170
+ /// Read from current file position. Return number of bytes read.
171
+ ARROW_EXPORT
172
+ Result<int64_t> FileRead(int fd, uint8_t* buffer, int64_t nbytes);
173
+ /// Read from given file position. Return number of bytes read.
174
+ ARROW_EXPORT
175
+ Result<int64_t> FileReadAt(int fd, uint8_t* buffer, int64_t position, int64_t nbytes);
176
+
177
+ ARROW_EXPORT
178
+ Status FileWrite(int fd, const uint8_t* buffer, const int64_t nbytes);
179
+ ARROW_EXPORT
180
+ Status FileTruncate(int fd, const int64_t size);
181
+
182
+ ARROW_EXPORT
183
+ Status FileSeek(int fd, int64_t pos);
184
+ ARROW_EXPORT
185
+ Status FileSeek(int fd, int64_t pos, int whence);
186
+ ARROW_EXPORT
187
+ Result<int64_t> FileTell(int fd);
188
+ ARROW_EXPORT
189
+ Result<int64_t> FileGetSize(int fd);
190
+
191
+ ARROW_EXPORT
192
+ Status FileClose(int fd);
193
+
194
+ struct Pipe {
195
+ FileDescriptor rfd;
196
+ FileDescriptor wfd;
197
+
198
+ Status Close() { return rfd.Close() & wfd.Close(); }
199
+ };
200
+
201
+ ARROW_EXPORT
202
+ Result<Pipe> CreatePipe();
203
+
204
+ ARROW_EXPORT
205
+ Status SetPipeFileDescriptorNonBlocking(int fd);
206
+
207
+ class ARROW_EXPORT SelfPipe {
208
+ public:
209
+ static Result<std::shared_ptr<SelfPipe>> Make(bool signal_safe);
210
+ virtual ~SelfPipe();
211
+
212
+ /// \brief Wait for a wakeup.
213
+ ///
214
+ /// Status::Invalid is returned if the pipe has been shutdown.
215
+ /// Otherwise the next sent payload is returned.
216
+ virtual Result<uint64_t> Wait() = 0;
217
+
218
+ /// \brief Wake up the pipe by sending a payload.
219
+ ///
220
+ /// This method is async-signal-safe if `signal_safe` was set to true.
221
+ virtual void Send(uint64_t payload) = 0;
222
+
223
+ /// \brief Wake up the pipe and shut it down.
224
+ virtual Status Shutdown() = 0;
225
+ };
226
+
227
+ ARROW_EXPORT
228
+ int64_t GetPageSize();
229
+
230
+ struct MemoryRegion {
231
+ void* addr;
232
+ size_t size;
233
+ };
234
+
235
+ ARROW_EXPORT
236
+ Status MemoryMapRemap(void* addr, size_t old_size, size_t new_size, int fildes,
237
+ void** new_addr);
238
+ ARROW_EXPORT
239
+ Status MemoryAdviseWillNeed(const std::vector<MemoryRegion>& regions);
240
+
241
+ ARROW_EXPORT
242
+ Result<std::string> GetEnvVar(const char* name);
243
+ ARROW_EXPORT
244
+ Result<std::string> GetEnvVar(const std::string& name);
245
+ ARROW_EXPORT
246
+ Result<NativePathString> GetEnvVarNative(const char* name);
247
+ ARROW_EXPORT
248
+ Result<NativePathString> GetEnvVarNative(const std::string& name);
249
+
250
+ ARROW_EXPORT
251
+ Status SetEnvVar(const char* name, const char* value);
252
+ ARROW_EXPORT
253
+ Status SetEnvVar(const std::string& name, const std::string& value);
254
+ ARROW_EXPORT
255
+ Status DelEnvVar(const char* name);
256
+ ARROW_EXPORT
257
+ Status DelEnvVar(const std::string& name);
258
+
259
+ ARROW_EXPORT
260
+ std::string ErrnoMessage(int errnum);
261
+ #if _WIN32
262
+ ARROW_EXPORT
263
+ std::string WinErrorMessage(int errnum);
264
+ #endif
265
+
266
+ ARROW_EXPORT
267
+ std::shared_ptr<StatusDetail> StatusDetailFromErrno(int errnum);
268
+ ARROW_EXPORT
269
+ std::optional<int> ErrnoFromStatusDetail(const StatusDetail& detail);
270
+ #if _WIN32
271
+ ARROW_EXPORT
272
+ std::shared_ptr<StatusDetail> StatusDetailFromWinError(int errnum);
273
+ #endif
274
+ ARROW_EXPORT
275
+ std::shared_ptr<StatusDetail> StatusDetailFromSignal(int signum);
276
+
277
+ template <typename... Args>
278
+ Status StatusFromErrno(int errnum, StatusCode code, Args&&... args) {
279
+ return Status::FromDetailAndArgs(code, StatusDetailFromErrno(errnum),
280
+ std::forward<Args>(args)...);
281
+ }
282
+
283
+ template <typename... Args>
284
+ Status IOErrorFromErrno(int errnum, Args&&... args) {
285
+ return StatusFromErrno(errnum, StatusCode::IOError, std::forward<Args>(args)...);
286
+ }
287
+
288
+ #if _WIN32
289
+ template <typename... Args>
290
+ Status StatusFromWinError(int errnum, StatusCode code, Args&&... args) {
291
+ return Status::FromDetailAndArgs(code, StatusDetailFromWinError(errnum),
292
+ std::forward<Args>(args)...);
293
+ }
294
+
295
+ template <typename... Args>
296
+ Status IOErrorFromWinError(int errnum, Args&&... args) {
297
+ return StatusFromWinError(errnum, StatusCode::IOError, std::forward<Args>(args)...);
298
+ }
299
+ #endif
300
+
301
+ template <typename... Args>
302
+ Status StatusFromSignal(int signum, StatusCode code, Args&&... args) {
303
+ return Status::FromDetailAndArgs(code, StatusDetailFromSignal(signum),
304
+ std::forward<Args>(args)...);
305
+ }
306
+
307
+ template <typename... Args>
308
+ Status CancelledFromSignal(int signum, Args&&... args) {
309
+ return StatusFromSignal(signum, StatusCode::Cancelled, std::forward<Args>(args)...);
310
+ }
311
+
312
+ ARROW_EXPORT
313
+ int ErrnoFromStatus(const Status&);
314
+
315
+ // Always returns 0 on non-Windows platforms (for Python).
316
+ ARROW_EXPORT
317
+ int WinErrorFromStatus(const Status&);
318
+
319
+ ARROW_EXPORT
320
+ int SignalFromStatus(const Status&);
321
+
322
+ class ARROW_EXPORT TemporaryDir {
323
+ public:
324
+ ~TemporaryDir();
325
+
326
+ /// '/'-terminated path to the temporary dir
327
+ const PlatformFilename& path() { return path_; }
328
+
329
+ /// Create a temporary subdirectory in the system temporary dir,
330
+ /// named starting with `prefix`.
331
+ static Result<std::unique_ptr<TemporaryDir>> Make(const std::string& prefix);
332
+
333
+ private:
334
+ PlatformFilename path_;
335
+
336
+ explicit TemporaryDir(PlatformFilename&&);
337
+ };
338
+
339
+ class ARROW_EXPORT SignalHandler {
340
+ public:
341
+ using Callback = void (*)(int);
342
+
343
+ SignalHandler();
344
+ explicit SignalHandler(Callback cb);
345
+ #if ARROW_HAVE_SIGACTION
346
+ explicit SignalHandler(const struct sigaction& sa);
347
+ #endif
348
+
349
+ Callback callback() const;
350
+ #if ARROW_HAVE_SIGACTION
351
+ const struct sigaction& action() const;
352
+ #endif
353
+
354
+ protected:
355
+ #if ARROW_HAVE_SIGACTION
356
+ // Storing the full sigaction allows to restore the entire signal handling
357
+ // configuration.
358
+ struct sigaction sa_;
359
+ #else
360
+ Callback cb_;
361
+ #endif
362
+ };
363
+
364
+ /// \brief Return the current handler for the given signal number.
365
+ ARROW_EXPORT
366
+ Result<SignalHandler> GetSignalHandler(int signum);
367
+
368
+ /// \brief Set a new handler for the given signal number.
369
+ ///
370
+ /// The old signal handler is returned.
371
+ ARROW_EXPORT
372
+ Result<SignalHandler> SetSignalHandler(int signum, const SignalHandler& handler);
373
+
374
+ /// \brief Reinstate the signal handler
375
+ ///
376
+ /// For use in signal handlers. This is needed on platforms without sigaction()
377
+ /// such as Windows, as the default signal handler is restored there as
378
+ /// soon as a signal is raised.
379
+ ARROW_EXPORT
380
+ void ReinstateSignalHandler(int signum, SignalHandler::Callback handler);
381
+
382
+ /// \brief Send a signal to the current process
383
+ ///
384
+ /// The thread which will receive the signal is unspecified.
385
+ ARROW_EXPORT
386
+ Status SendSignal(int signum);
387
+
388
+ /// \brief Send a signal to the given thread
389
+ ///
390
+ /// This function isn't supported on Windows.
391
+ ARROW_EXPORT
392
+ Status SendSignalToThread(int signum, uint64_t thread_id);
393
+
394
+ /// \brief Get an unpredictable random seed
395
+ ///
396
+ /// This function may be slightly costly, so should only be used to initialize
397
+ /// a PRNG, not to generate a large amount of random numbers.
398
+ /// It is better to use this function rather than std::random_device, unless
399
+ /// absolutely necessary (e.g. to generate a cryptographic secret).
400
+ ARROW_EXPORT
401
+ int64_t GetRandomSeed();
402
+
403
+ /// \brief Get the current thread id
404
+ ///
405
+ /// In addition to having the same properties as std::thread, the returned value
406
+ /// is a regular integer value, which is more convenient than an opaque type.
407
+ ARROW_EXPORT
408
+ uint64_t GetThreadId();
409
+
410
+ /// \brief Get the current memory used by the current process in bytes
411
+ ///
412
+ /// This function supports Windows, Linux, and Mac and will return 0 otherwise
413
+ ARROW_EXPORT
414
+ int64_t GetCurrentRSS();
415
+
416
+ /// \brief Get the total memory available to the system in bytes
417
+ ///
418
+ /// This function supports Windows, Linux, and Mac and will return 0 otherwise
419
+ ARROW_EXPORT
420
+ int64_t GetTotalMemoryBytes();
421
+
422
+ /// \brief Load a dynamic library
423
+ ///
424
+ /// This wraps dlopen() except on Windows, where LoadLibrary() is called.
425
+ /// These two platforms handle absolute paths consistently; relative paths
426
+ /// or the library's bare name may be handled but inconsistently.
427
+ ///
428
+ /// \return An opaque handle for the dynamic library, which can be used for
429
+ /// subsequent symbol lookup. Nullptr will never be returned; instead
430
+ /// an error will be raised.
431
+ ARROW_EXPORT Result<void*> LoadDynamicLibrary(const PlatformFilename& path);
432
+
433
+ /// \brief Load a dynamic library
434
+ ///
435
+ /// An overload taking null terminated string.
436
+ ARROW_EXPORT Result<void*> LoadDynamicLibrary(const char* path);
437
+
438
+ /// \brief Retrieve a symbol by name from a library handle.
439
+ ///
440
+ /// This wraps dlsym() except on Windows, where GetProcAddress() is called.
441
+ ///
442
+ /// \return The address associated with the named symbol. Nullptr will never be
443
+ /// returned; instead an error will be raised.
444
+ ARROW_EXPORT Result<void*> GetSymbol(void* handle, const char* name);
445
+
446
+ template <typename T>
447
+ Result<T*> GetSymbolAs(void* handle, const char* name) {
448
+ ARROW_ASSIGN_OR_RAISE(void* sym, GetSymbol(handle, name));
449
+ return reinterpret_cast<T*>(sym);
450
+ }
451
+
452
+ } // namespace arrow::internal
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/iterator.h ADDED
@@ -0,0 +1,568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cassert>
21
+ #include <functional>
22
+ #include <memory>
23
+ #include <optional>
24
+ #include <tuple>
25
+ #include <type_traits>
26
+ #include <utility>
27
+ #include <vector>
28
+
29
+ #include "arrow/result.h"
30
+ #include "arrow/status.h"
31
+ #include "arrow/util/compare.h"
32
+ #include "arrow/util/functional.h"
33
+ #include "arrow/util/macros.h"
34
+ #include "arrow/util/visibility.h"
35
+
36
+ namespace arrow {
37
+
38
+ template <typename T>
39
+ class Iterator;
40
+
41
+ template <typename T>
42
+ struct IterationTraits {
43
+ /// \brief a reserved value which indicates the end of iteration. By
44
+ /// default this is NULLPTR since most iterators yield pointer types.
45
+ /// Specialize IterationTraits if different end semantics are required.
46
+ ///
47
+ /// Note: This should not be used to determine if a given value is a
48
+ /// terminal value. Use IsIterationEnd (which uses IsEnd) instead. This
49
+ /// is only for returning terminal values.
50
+ static T End() { return T(NULLPTR); }
51
+
52
+ /// \brief Checks to see if the value is a terminal value.
53
+ /// A method is used here since T is not necessarily comparable in many
54
+ /// cases even though it has a distinct final value
55
+ static bool IsEnd(const T& val) { return val == End(); }
56
+ };
57
+
58
+ template <typename T>
59
+ T IterationEnd() {
60
+ return IterationTraits<T>::End();
61
+ }
62
+
63
+ template <typename T>
64
+ bool IsIterationEnd(const T& val) {
65
+ return IterationTraits<T>::IsEnd(val);
66
+ }
67
+
68
+ template <typename T>
69
+ struct IterationTraits<std::optional<T>> {
70
+ /// \brief by default when iterating through a sequence of optional,
71
+ /// nullopt indicates the end of iteration.
72
+ /// Specialize IterationTraits if different end semantics are required.
73
+ static std::optional<T> End() { return std::nullopt; }
74
+
75
+ /// \brief by default when iterating through a sequence of optional,
76
+ /// nullopt (!has_value()) indicates the end of iteration.
77
+ /// Specialize IterationTraits if different end semantics are required.
78
+ static bool IsEnd(const std::optional<T>& val) { return !val.has_value(); }
79
+
80
+ // TODO(bkietz) The range-for loop over Iterator<optional<T>> yields
81
+ // Result<optional<T>> which is unnecessary (since only the unyielded end optional
82
+ // is nullopt. Add IterationTraits::GetRangeElement() to handle this case
83
+ };
84
+
85
+ /// \brief A generic Iterator that can return errors
86
+ template <typename T>
87
+ class Iterator : public util::EqualityComparable<Iterator<T>> {
88
+ public:
89
+ /// \brief Iterator may be constructed from any type which has a member function
90
+ /// with signature Result<T> Next();
91
+ /// End of iterator is signalled by returning IteratorTraits<T>::End();
92
+ ///
93
+ /// The argument is moved or copied to the heap and kept in a unique_ptr<void>. Only
94
+ /// its destructor and its Next method (which are stored in function pointers) are
95
+ /// referenced after construction.
96
+ ///
97
+ /// This approach is used to dodge MSVC linkage hell (ARROW-6244, ARROW-6558) when using
98
+ /// an abstract template base class: instead of being inlined as usual for a template
99
+ /// function the base's virtual destructor will be exported, leading to multiple
100
+ /// definition errors when linking to any other TU where the base is instantiated.
101
+ template <typename Wrapped>
102
+ explicit Iterator(Wrapped has_next)
103
+ : ptr_(new Wrapped(std::move(has_next)), Delete<Wrapped>), next_(Next<Wrapped>) {}
104
+
105
+ Iterator() : ptr_(NULLPTR, [](void*) {}) {}
106
+
107
+ /// \brief Return the next element of the sequence, IterationTraits<T>::End() when the
108
+ /// iteration is completed. Calling this on a default constructed Iterator
109
+ /// will result in undefined behavior.
110
+ Result<T> Next() { return next_(ptr_.get()); }
111
+
112
+ /// Pass each element of the sequence to a visitor. Will return any error status
113
+ /// returned by the visitor, terminating iteration.
114
+ template <typename Visitor>
115
+ Status Visit(Visitor&& visitor) {
116
+ for (;;) {
117
+ ARROW_ASSIGN_OR_RAISE(auto value, Next());
118
+
119
+ if (IsIterationEnd(value)) break;
120
+
121
+ ARROW_RETURN_NOT_OK(visitor(std::move(value)));
122
+ }
123
+
124
+ return Status::OK();
125
+ }
126
+
127
+ /// Iterators will only compare equal if they are both null.
128
+ /// Equality comparability is required to make an Iterator of Iterators
129
+ /// (to check for the end condition).
130
+ bool Equals(const Iterator& other) const { return ptr_ == other.ptr_; }
131
+
132
+ explicit operator bool() const { return ptr_ != NULLPTR; }
133
+
134
+ class RangeIterator {
135
+ public:
136
+ RangeIterator() : value_(IterationTraits<T>::End()) {}
137
+
138
+ explicit RangeIterator(Iterator i)
139
+ : value_(IterationTraits<T>::End()),
140
+ iterator_(std::make_shared<Iterator>(std::move(i))) {
141
+ Next();
142
+ }
143
+
144
+ bool operator!=(const RangeIterator& other) const { return value_ != other.value_; }
145
+
146
+ RangeIterator& operator++() {
147
+ Next();
148
+ return *this;
149
+ }
150
+
151
+ Result<T> operator*() {
152
+ ARROW_RETURN_NOT_OK(value_.status());
153
+
154
+ auto value = std::move(value_);
155
+ value_ = IterationTraits<T>::End();
156
+ return value;
157
+ }
158
+
159
+ private:
160
+ void Next() {
161
+ if (!value_.ok()) {
162
+ value_ = IterationTraits<T>::End();
163
+ return;
164
+ }
165
+ value_ = iterator_->Next();
166
+ }
167
+
168
+ Result<T> value_;
169
+ std::shared_ptr<Iterator> iterator_;
170
+ };
171
+
172
+ RangeIterator begin() { return RangeIterator(std::move(*this)); }
173
+
174
+ RangeIterator end() { return RangeIterator(); }
175
+
176
+ /// \brief Move every element of this iterator into a vector.
177
+ Result<std::vector<T>> ToVector() {
178
+ std::vector<T> out;
179
+ for (auto maybe_element : *this) {
180
+ ARROW_ASSIGN_OR_RAISE(auto element, maybe_element);
181
+ out.push_back(std::move(element));
182
+ }
183
+ // ARROW-8193: On gcc-4.8 without the explicit move it tries to use the
184
+ // copy constructor, which may be deleted on the elements of type T
185
+ return std::move(out);
186
+ }
187
+
188
+ private:
189
+ /// Implementation of deleter for ptr_: Casts from void* to the wrapped type and
190
+ /// deletes that.
191
+ template <typename HasNext>
192
+ static void Delete(void* ptr) {
193
+ delete static_cast<HasNext*>(ptr);
194
+ }
195
+
196
+ /// Implementation of Next: Casts from void* to the wrapped type and invokes that
197
+ /// type's Next member function.
198
+ template <typename HasNext>
199
+ static Result<T> Next(void* ptr) {
200
+ return static_cast<HasNext*>(ptr)->Next();
201
+ }
202
+
203
+ /// ptr_ is a unique_ptr to void with a custom deleter: a function pointer which first
204
+ /// casts from void* to a pointer to the wrapped type then deletes that.
205
+ std::unique_ptr<void, void (*)(void*)> ptr_;
206
+
207
+ /// next_ is a function pointer which first casts from void* to a pointer to the wrapped
208
+ /// type then invokes its Next member function.
209
+ Result<T> (*next_)(void*) = NULLPTR;
210
+ };
211
+
212
+ template <typename T>
213
+ struct TransformFlow {
214
+ using YieldValueType = T;
215
+
216
+ TransformFlow(YieldValueType value, bool ready_for_next)
217
+ : finished_(false),
218
+ ready_for_next_(ready_for_next),
219
+ yield_value_(std::move(value)) {}
220
+ TransformFlow(bool finished, bool ready_for_next)
221
+ : finished_(finished), ready_for_next_(ready_for_next), yield_value_() {}
222
+
223
+ bool HasValue() const { return yield_value_.has_value(); }
224
+ bool Finished() const { return finished_; }
225
+ bool ReadyForNext() const { return ready_for_next_; }
226
+ T Value() const { return *yield_value_; }
227
+
228
+ bool finished_ = false;
229
+ bool ready_for_next_ = false;
230
+ std::optional<YieldValueType> yield_value_;
231
+ };
232
+
233
+ struct TransformFinish {
234
+ template <typename T>
235
+ operator TransformFlow<T>() && { // NOLINT explicit
236
+ return TransformFlow<T>(true, true);
237
+ }
238
+ };
239
+
240
+ struct TransformSkip {
241
+ template <typename T>
242
+ operator TransformFlow<T>() && { // NOLINT explicit
243
+ return TransformFlow<T>(false, true);
244
+ }
245
+ };
246
+
247
+ template <typename T>
248
+ TransformFlow<T> TransformYield(T value = {}, bool ready_for_next = true) {
249
+ return TransformFlow<T>(std::move(value), ready_for_next);
250
+ }
251
+
252
+ template <typename T, typename V>
253
+ using Transformer = std::function<Result<TransformFlow<V>>(T)>;
254
+
255
+ template <typename T, typename V>
256
+ class TransformIterator {
257
+ public:
258
+ explicit TransformIterator(Iterator<T> it, Transformer<T, V> transformer)
259
+ : it_(std::move(it)),
260
+ transformer_(std::move(transformer)),
261
+ last_value_(),
262
+ finished_() {}
263
+
264
+ Result<V> Next() {
265
+ while (!finished_) {
266
+ ARROW_ASSIGN_OR_RAISE(std::optional<V> next, Pump());
267
+ if (next.has_value()) {
268
+ return std::move(*next);
269
+ }
270
+ ARROW_ASSIGN_OR_RAISE(last_value_, it_.Next());
271
+ }
272
+ return IterationTraits<V>::End();
273
+ }
274
+
275
+ private:
276
+ // Calls the transform function on the current value. Can return in several ways
277
+ // * If the next value is requested (e.g. skip) it will return an empty optional
278
+ // * If an invalid status is encountered that will be returned
279
+ // * If finished it will return IterationTraits<V>::End()
280
+ // * If a value is returned by the transformer that will be returned
281
+ Result<std::optional<V>> Pump() {
282
+ if (!finished_ && last_value_.has_value()) {
283
+ auto next_res = transformer_(*last_value_);
284
+ if (!next_res.ok()) {
285
+ finished_ = true;
286
+ return next_res.status();
287
+ }
288
+ auto next = *next_res;
289
+ if (next.ReadyForNext()) {
290
+ if (IsIterationEnd(*last_value_)) {
291
+ finished_ = true;
292
+ }
293
+ last_value_.reset();
294
+ }
295
+ if (next.Finished()) {
296
+ finished_ = true;
297
+ }
298
+ if (next.HasValue()) {
299
+ return next.Value();
300
+ }
301
+ }
302
+ if (finished_) {
303
+ return IterationTraits<V>::End();
304
+ }
305
+ return std::nullopt;
306
+ }
307
+
308
+ Iterator<T> it_;
309
+ Transformer<T, V> transformer_;
310
+ std::optional<T> last_value_;
311
+ bool finished_ = false;
312
+ };
313
+
314
+ /// \brief Transforms an iterator according to a transformer, returning a new Iterator.
315
+ ///
316
+ /// The transformer will be called on each element of the source iterator and for each
317
+ /// call it can yield a value, skip, or finish the iteration. When yielding a value the
318
+ /// transformer can choose to consume the source item (the default, ready_for_next = true)
319
+ /// or to keep it and it will be called again on the same value.
320
+ ///
321
+ /// This is essentially a more generic form of the map operation that can return 0, 1, or
322
+ /// many values for each of the source items.
323
+ ///
324
+ /// The transformer will be exposed to the end of the source sequence
325
+ /// (IterationTraits::End) in case it needs to return some penultimate item(s).
326
+ ///
327
+ /// Any invalid status returned by the transformer will be returned immediately.
328
+ template <typename T, typename V>
329
+ Iterator<V> MakeTransformedIterator(Iterator<T> it, Transformer<T, V> op) {
330
+ return Iterator<V>(TransformIterator<T, V>(std::move(it), std::move(op)));
331
+ }
332
+
333
+ template <typename T>
334
+ struct IterationTraits<Iterator<T>> {
335
+ // The end condition for an Iterator of Iterators is a default constructed (null)
336
+ // Iterator.
337
+ static Iterator<T> End() { return Iterator<T>(); }
338
+ static bool IsEnd(const Iterator<T>& val) { return !val; }
339
+ };
340
+
341
+ template <typename Fn, typename T>
342
+ class FunctionIterator {
343
+ public:
344
+ explicit FunctionIterator(Fn fn) : fn_(std::move(fn)) {}
345
+
346
+ Result<T> Next() { return fn_(); }
347
+
348
+ private:
349
+ Fn fn_;
350
+ };
351
+
352
+ /// \brief Construct an Iterator which invokes a callable on Next()
353
+ template <typename Fn,
354
+ typename Ret = typename internal::call_traits::return_type<Fn>::ValueType>
355
+ Iterator<Ret> MakeFunctionIterator(Fn fn) {
356
+ return Iterator<Ret>(FunctionIterator<Fn, Ret>(std::move(fn)));
357
+ }
358
+
359
+ template <typename T>
360
+ Iterator<T> MakeEmptyIterator() {
361
+ return MakeFunctionIterator([]() -> Result<T> { return IterationTraits<T>::End(); });
362
+ }
363
+
364
+ template <typename T>
365
+ Iterator<T> MakeErrorIterator(Status s) {
366
+ return MakeFunctionIterator([s]() -> Result<T> {
367
+ ARROW_RETURN_NOT_OK(s);
368
+ return IterationTraits<T>::End();
369
+ });
370
+ }
371
+
372
+ /// \brief Simple iterator which yields the elements of a std::vector
373
+ template <typename T>
374
+ class VectorIterator {
375
+ public:
376
+ explicit VectorIterator(std::vector<T> v) : elements_(std::move(v)) {}
377
+
378
+ Result<T> Next() {
379
+ if (i_ == elements_.size()) {
380
+ return IterationTraits<T>::End();
381
+ }
382
+ return std::move(elements_[i_++]);
383
+ }
384
+
385
+ private:
386
+ std::vector<T> elements_;
387
+ size_t i_ = 0;
388
+ };
389
+
390
+ template <typename T>
391
+ Iterator<T> MakeVectorIterator(std::vector<T> v) {
392
+ return Iterator<T>(VectorIterator<T>(std::move(v)));
393
+ }
394
+
395
+ /// \brief Simple iterator which yields *pointers* to the elements of a std::vector<T>.
396
+ /// This is provided to support T where IterationTraits<T>::End is not specialized
397
+ template <typename T>
398
+ class VectorPointingIterator {
399
+ public:
400
+ explicit VectorPointingIterator(std::vector<T> v) : elements_(std::move(v)) {}
401
+
402
+ Result<T*> Next() {
403
+ if (i_ == elements_.size()) {
404
+ return NULLPTR;
405
+ }
406
+ return &elements_[i_++];
407
+ }
408
+
409
+ private:
410
+ std::vector<T> elements_;
411
+ size_t i_ = 0;
412
+ };
413
+
414
+ template <typename T>
415
+ Iterator<T*> MakeVectorPointingIterator(std::vector<T> v) {
416
+ return Iterator<T*>(VectorPointingIterator<T>(std::move(v)));
417
+ }
418
+
419
+ /// \brief MapIterator takes ownership of an iterator and a function to apply
420
+ /// on every element. The mapped function is not allowed to fail.
421
+ template <typename Fn, typename I, typename O>
422
+ class MapIterator {
423
+ public:
424
+ explicit MapIterator(Fn map, Iterator<I> it)
425
+ : map_(std::move(map)), it_(std::move(it)) {}
426
+
427
+ Result<O> Next() {
428
+ ARROW_ASSIGN_OR_RAISE(I i, it_.Next());
429
+
430
+ if (IsIterationEnd(i)) {
431
+ return IterationTraits<O>::End();
432
+ }
433
+
434
+ return map_(std::move(i));
435
+ }
436
+
437
+ private:
438
+ Fn map_;
439
+ Iterator<I> it_;
440
+ };
441
+
442
+ /// \brief MapIterator takes ownership of an iterator and a function to apply
443
+ /// on every element. The mapped function is not allowed to fail.
444
+ template <typename Fn, typename From = internal::call_traits::argument_type<0, Fn>,
445
+ typename To = internal::call_traits::return_type<Fn>>
446
+ Iterator<To> MakeMapIterator(Fn map, Iterator<From> it) {
447
+ return Iterator<To>(MapIterator<Fn, From, To>(std::move(map), std::move(it)));
448
+ }
449
+
450
+ /// \brief Like MapIterator, but where the function can fail.
451
+ template <typename Fn, typename From = internal::call_traits::argument_type<0, Fn>,
452
+ typename To = typename internal::call_traits::return_type<Fn>::ValueType>
453
+ Iterator<To> MakeMaybeMapIterator(Fn map, Iterator<From> it) {
454
+ return Iterator<To>(MapIterator<Fn, From, To>(std::move(map), std::move(it)));
455
+ }
456
+
457
+ struct FilterIterator {
458
+ enum Action { ACCEPT, REJECT };
459
+
460
+ template <typename To>
461
+ static Result<std::pair<To, Action>> Reject() {
462
+ return std::make_pair(IterationTraits<To>::End(), REJECT);
463
+ }
464
+
465
+ template <typename To>
466
+ static Result<std::pair<To, Action>> Accept(To out) {
467
+ return std::make_pair(std::move(out), ACCEPT);
468
+ }
469
+
470
+ template <typename To>
471
+ static Result<std::pair<To, Action>> MaybeAccept(Result<To> maybe_out) {
472
+ return std::move(maybe_out).Map(Accept<To>);
473
+ }
474
+
475
+ template <typename To>
476
+ static Result<std::pair<To, Action>> Error(Status s) {
477
+ return s;
478
+ }
479
+
480
+ template <typename Fn, typename From, typename To>
481
+ class Impl {
482
+ public:
483
+ explicit Impl(Fn filter, Iterator<From> it) : filter_(filter), it_(std::move(it)) {}
484
+
485
+ Result<To> Next() {
486
+ To out = IterationTraits<To>::End();
487
+ Action action;
488
+
489
+ for (;;) {
490
+ ARROW_ASSIGN_OR_RAISE(From i, it_.Next());
491
+
492
+ if (IsIterationEnd(i)) {
493
+ return IterationTraits<To>::End();
494
+ }
495
+
496
+ ARROW_ASSIGN_OR_RAISE(std::tie(out, action), filter_(std::move(i)));
497
+
498
+ if (action == ACCEPT) return out;
499
+ }
500
+ }
501
+
502
+ private:
503
+ Fn filter_;
504
+ Iterator<From> it_;
505
+ };
506
+ };
507
+
508
+ /// \brief Like MapIterator, but where the function can fail or reject elements.
509
+ template <
510
+ typename Fn, typename From = typename internal::call_traits::argument_type<0, Fn>,
511
+ typename Ret = typename internal::call_traits::return_type<Fn>::ValueType,
512
+ typename To = typename std::tuple_element<0, Ret>::type,
513
+ typename Enable = typename std::enable_if<std::is_same<
514
+ typename std::tuple_element<1, Ret>::type, FilterIterator::Action>::value>::type>
515
+ Iterator<To> MakeFilterIterator(Fn filter, Iterator<From> it) {
516
+ return Iterator<To>(
517
+ FilterIterator::Impl<Fn, From, To>(std::move(filter), std::move(it)));
518
+ }
519
+
520
+ /// \brief FlattenIterator takes an iterator generating iterators and yields a
521
+ /// unified iterator that flattens/concatenates in a single stream.
522
+ template <typename T>
523
+ class FlattenIterator {
524
+ public:
525
+ explicit FlattenIterator(Iterator<Iterator<T>> it) : parent_(std::move(it)) {}
526
+
527
+ Result<T> Next() {
528
+ if (IsIterationEnd(child_)) {
529
+ // Pop from parent's iterator.
530
+ ARROW_ASSIGN_OR_RAISE(child_, parent_.Next());
531
+
532
+ // Check if final iteration reached.
533
+ if (IsIterationEnd(child_)) {
534
+ return IterationTraits<T>::End();
535
+ }
536
+
537
+ return Next();
538
+ }
539
+
540
+ // Pop from child_ and check for depletion.
541
+ ARROW_ASSIGN_OR_RAISE(T out, child_.Next());
542
+ if (IsIterationEnd(out)) {
543
+ // Reset state such that we pop from parent on the recursive call
544
+ child_ = IterationTraits<Iterator<T>>::End();
545
+
546
+ return Next();
547
+ }
548
+
549
+ return out;
550
+ }
551
+
552
+ private:
553
+ Iterator<Iterator<T>> parent_;
554
+ Iterator<T> child_ = IterationTraits<Iterator<T>>::End();
555
+ };
556
+
557
+ template <typename T>
558
+ Iterator<T> MakeFlattenIterator(Iterator<Iterator<T>> it) {
559
+ return Iterator<T>(FlattenIterator<T>(std::move(it)));
560
+ }
561
+
562
+ template <typename Reader>
563
+ Iterator<typename Reader::ValueType> MakeIteratorFromReader(
564
+ const std::shared_ptr<Reader>& reader) {
565
+ return MakeFunctionIterator([reader] { return reader->Next(); });
566
+ }
567
+
568
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/launder.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <new>
21
+
22
+ namespace arrow {
23
+ namespace internal {
24
+
25
+ #if __cpp_lib_launder
26
+ using std::launder;
27
+ #else
28
+ template <class T>
29
+ constexpr T* launder(T* p) noexcept {
30
+ return p;
31
+ }
32
+ #endif
33
+
34
+ } // namespace internal
35
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/util/logging.h ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #ifdef GANDIVA_IR
21
+
22
+ // The LLVM IR code doesn't have an NDEBUG mode. And, it shouldn't include references to
23
+ // streams or stdc++. So, making the DCHECK calls void in that case.
24
+
25
+ #define ARROW_IGNORE_EXPR(expr) ((void)(expr))
26
+
27
+ #define DCHECK(condition) ARROW_IGNORE_EXPR(condition)
28
+ #define DCHECK_OK(status) ARROW_IGNORE_EXPR(status)
29
+ #define DCHECK_EQ(val1, val2) ARROW_IGNORE_EXPR(val1)
30
+ #define DCHECK_NE(val1, val2) ARROW_IGNORE_EXPR(val1)
31
+ #define DCHECK_LE(val1, val2) ARROW_IGNORE_EXPR(val1)
32
+ #define DCHECK_LT(val1, val2) ARROW_IGNORE_EXPR(val1)
33
+ #define DCHECK_GE(val1, val2) ARROW_IGNORE_EXPR(val1)
34
+ #define DCHECK_GT(val1, val2) ARROW_IGNORE_EXPR(val1)
35
+
36
+ #else // !GANDIVA_IR
37
+
38
+ #include <memory>
39
+ #include <ostream>
40
+ #include <string>
41
+
42
+ #include "arrow/util/macros.h"
43
+ #include "arrow/util/visibility.h"
44
+
45
+ namespace arrow {
46
+ namespace util {
47
+
48
+ enum class ArrowLogLevel : int {
49
+ ARROW_DEBUG = -1,
50
+ ARROW_INFO = 0,
51
+ ARROW_WARNING = 1,
52
+ ARROW_ERROR = 2,
53
+ ARROW_FATAL = 3
54
+ };
55
+
56
+ #define ARROW_LOG_INTERNAL(level) ::arrow::util::ArrowLog(__FILE__, __LINE__, level)
57
+ #define ARROW_LOG(level) ARROW_LOG_INTERNAL(::arrow::util::ArrowLogLevel::ARROW_##level)
58
+
59
+ #define ARROW_IGNORE_EXPR(expr) ((void)(expr))
60
+
61
+ #define ARROW_CHECK_OR_LOG(condition, level) \
62
+ ARROW_PREDICT_TRUE(condition) \
63
+ ? ARROW_IGNORE_EXPR(0) \
64
+ : ::arrow::util::Voidify() & ARROW_LOG(level) << " Check failed: " #condition " "
65
+
66
+ #define ARROW_CHECK(condition) ARROW_CHECK_OR_LOG(condition, FATAL)
67
+
68
+ // If 'to_call' returns a bad status, CHECK immediately with a logged message
69
+ // of 'msg' followed by the status.
70
+ #define ARROW_CHECK_OK_PREPEND(to_call, msg, level) \
71
+ do { \
72
+ ::arrow::Status _s = (to_call); \
73
+ ARROW_CHECK_OR_LOG(_s.ok(), level) \
74
+ << "Operation failed: " << ARROW_STRINGIFY(to_call) << "\n" \
75
+ << (msg) << ": " << _s.ToString(); \
76
+ } while (false)
77
+
78
+ // If the status is bad, CHECK immediately, appending the status to the
79
+ // logged message.
80
+ #define ARROW_CHECK_OK(s) ARROW_CHECK_OK_PREPEND(s, "Bad status", FATAL)
81
+
82
+ #define ARROW_CHECK_EQ(val1, val2) ARROW_CHECK((val1) == (val2))
83
+ #define ARROW_CHECK_NE(val1, val2) ARROW_CHECK((val1) != (val2))
84
+ #define ARROW_CHECK_LE(val1, val2) ARROW_CHECK((val1) <= (val2))
85
+ #define ARROW_CHECK_LT(val1, val2) ARROW_CHECK((val1) < (val2))
86
+ #define ARROW_CHECK_GE(val1, val2) ARROW_CHECK((val1) >= (val2))
87
+ #define ARROW_CHECK_GT(val1, val2) ARROW_CHECK((val1) > (val2))
88
+
89
+ #ifdef NDEBUG
90
+ #define ARROW_DFATAL ::arrow::util::ArrowLogLevel::ARROW_WARNING
91
+
92
+ // CAUTION: DCHECK_OK() always evaluates its argument, but other DCHECK*() macros
93
+ // only do so in debug mode.
94
+
95
+ #define ARROW_DCHECK(condition) \
96
+ while (false) ARROW_IGNORE_EXPR(condition); \
97
+ while (false) ::arrow::util::detail::NullLog()
98
+ #define ARROW_DCHECK_OK(s) \
99
+ ARROW_IGNORE_EXPR(s); \
100
+ while (false) ::arrow::util::detail::NullLog()
101
+ #define ARROW_DCHECK_EQ(val1, val2) \
102
+ while (false) ARROW_IGNORE_EXPR(val1); \
103
+ while (false) ARROW_IGNORE_EXPR(val2); \
104
+ while (false) ::arrow::util::detail::NullLog()
105
+ #define ARROW_DCHECK_NE(val1, val2) \
106
+ while (false) ARROW_IGNORE_EXPR(val1); \
107
+ while (false) ARROW_IGNORE_EXPR(val2); \
108
+ while (false) ::arrow::util::detail::NullLog()
109
+ #define ARROW_DCHECK_LE(val1, val2) \
110
+ while (false) ARROW_IGNORE_EXPR(val1); \
111
+ while (false) ARROW_IGNORE_EXPR(val2); \
112
+ while (false) ::arrow::util::detail::NullLog()
113
+ #define ARROW_DCHECK_LT(val1, val2) \
114
+ while (false) ARROW_IGNORE_EXPR(val1); \
115
+ while (false) ARROW_IGNORE_EXPR(val2); \
116
+ while (false) ::arrow::util::detail::NullLog()
117
+ #define ARROW_DCHECK_GE(val1, val2) \
118
+ while (false) ARROW_IGNORE_EXPR(val1); \
119
+ while (false) ARROW_IGNORE_EXPR(val2); \
120
+ while (false) ::arrow::util::detail::NullLog()
121
+ #define ARROW_DCHECK_GT(val1, val2) \
122
+ while (false) ARROW_IGNORE_EXPR(val1); \
123
+ while (false) ARROW_IGNORE_EXPR(val2); \
124
+ while (false) ::arrow::util::detail::NullLog()
125
+
126
+ #else
127
+ #define ARROW_DFATAL ::arrow::util::ArrowLogLevel::ARROW_FATAL
128
+
129
+ #define ARROW_DCHECK ARROW_CHECK
130
+ #define ARROW_DCHECK_OK ARROW_CHECK_OK
131
+ #define ARROW_DCHECK_EQ ARROW_CHECK_EQ
132
+ #define ARROW_DCHECK_NE ARROW_CHECK_NE
133
+ #define ARROW_DCHECK_LE ARROW_CHECK_LE
134
+ #define ARROW_DCHECK_LT ARROW_CHECK_LT
135
+ #define ARROW_DCHECK_GE ARROW_CHECK_GE
136
+ #define ARROW_DCHECK_GT ARROW_CHECK_GT
137
+
138
+ #endif // NDEBUG
139
+
140
+ #define DCHECK ARROW_DCHECK
141
+ #define DCHECK_OK ARROW_DCHECK_OK
142
+ #define DCHECK_EQ ARROW_DCHECK_EQ
143
+ #define DCHECK_NE ARROW_DCHECK_NE
144
+ #define DCHECK_LE ARROW_DCHECK_LE
145
+ #define DCHECK_LT ARROW_DCHECK_LT
146
+ #define DCHECK_GE ARROW_DCHECK_GE
147
+ #define DCHECK_GT ARROW_DCHECK_GT
148
+
149
+ // This code is adapted from
150
+ // https://github.com/ray-project/ray/blob/master/src/ray/util/logging.h.
151
+
152
+ // To make the logging lib pluggable with other logging libs and make
153
+ // the implementation unawared by the user, ArrowLog is only a declaration
154
+ // which hide the implementation into logging.cc file.
155
+ // In logging.cc, we can choose different log libs using different macros.
156
+
157
+ // This is also a null log which does not output anything.
158
+ class ARROW_EXPORT ArrowLogBase {
159
+ public:
160
+ virtual ~ArrowLogBase() {}
161
+
162
+ virtual bool IsEnabled() const { return false; }
163
+
164
+ template <typename T>
165
+ ArrowLogBase& operator<<(const T& t) {
166
+ if (IsEnabled()) {
167
+ Stream() << t;
168
+ }
169
+ return *this;
170
+ }
171
+
172
+ protected:
173
+ virtual std::ostream& Stream() = 0;
174
+ };
175
+
176
+ class ARROW_EXPORT ArrowLog : public ArrowLogBase {
177
+ public:
178
+ ArrowLog(const char* file_name, int line_number, ArrowLogLevel severity);
179
+ ~ArrowLog() override;
180
+
181
+ /// Return whether or not current logging instance is enabled.
182
+ ///
183
+ /// \return True if logging is enabled and false otherwise.
184
+ bool IsEnabled() const override;
185
+
186
+ /// The init function of arrow log for a program which should be called only once.
187
+ ///
188
+ /// \param appName The app name which starts the log.
189
+ /// \param severity_threshold Logging threshold for the program.
190
+ /// \param logDir Logging output file name. If empty, the log won't output to file.
191
+ static void StartArrowLog(const std::string& appName,
192
+ ArrowLogLevel severity_threshold = ArrowLogLevel::ARROW_INFO,
193
+ const std::string& logDir = "");
194
+
195
+ /// The shutdown function of arrow log, it should be used with StartArrowLog as a pair.
196
+ static void ShutDownArrowLog();
197
+
198
+ /// Install the failure signal handler to output call stack when crash.
199
+ /// If glog is not installed, this function won't do anything.
200
+ static void InstallFailureSignalHandler();
201
+
202
+ /// Uninstall the signal actions installed by InstallFailureSignalHandler.
203
+ static void UninstallSignalAction();
204
+
205
+ /// Return whether or not the log level is enabled in current setting.
206
+ ///
207
+ /// \param log_level The input log level to test.
208
+ /// \return True if input log level is not lower than the threshold.
209
+ static bool IsLevelEnabled(ArrowLogLevel log_level);
210
+
211
+ private:
212
+ ARROW_DISALLOW_COPY_AND_ASSIGN(ArrowLog);
213
+
214
+ // Hide the implementation of log provider by void *.
215
+ // Otherwise, lib user may define the same macro to use the correct header file.
216
+ void* logging_provider_;
217
+ /// True if log messages should be logged and false if they should be ignored.
218
+ bool is_enabled_;
219
+
220
+ static ArrowLogLevel severity_threshold_;
221
+
222
+ protected:
223
+ std::ostream& Stream() override;
224
+ };
225
+
226
+ // This class make ARROW_CHECK compilation pass to change the << operator to void.
227
+ // This class is copied from glog.
228
+ class ARROW_EXPORT Voidify {
229
+ public:
230
+ Voidify() {}
231
+ // This has to be an operator with a precedence lower than << but
232
+ // higher than ?:
233
+ void operator&(ArrowLogBase&) {}
234
+ };
235
+
236
+ namespace detail {
237
+
238
+ /// @brief A helper for the nil log sink.
239
+ ///
240
+ /// Using this helper is analogous to sending log messages to /dev/null:
241
+ /// nothing gets logged.
242
+ class NullLog {
243
+ public:
244
+ /// The no-op output operator.
245
+ ///
246
+ /// @param [in] t
247
+ /// The object to send into the nil sink.
248
+ /// @return Reference to the updated object.
249
+ template <class T>
250
+ NullLog& operator<<(const T& t) {
251
+ return *this;
252
+ }
253
+ };
254
+
255
+ } // namespace detail
256
+ } // namespace util
257
+ } // namespace arrow
258
+
259
+ #endif // GANDIVA_IR