applied-ai-018 commited on
Commit
d8fac73
·
verified ·
1 Parent(s): e0df13e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/api.h +39 -0
  3. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset.h +481 -0
  4. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset_writer.h +103 -0
  5. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_base.h +495 -0
  6. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_csv.h +144 -0
  7. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_ipc.h +123 -0
  8. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_json.h +98 -0
  9. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_orc.h +75 -0
  10. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_parquet.h +404 -0
  11. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/parquet_encryption_config.h +75 -0
  12. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/partition.h +432 -0
  13. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/pch.h +27 -0
  14. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/plan.h +33 -0
  15. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/projector.h +32 -0
  16. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/scanner.h +578 -0
  17. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/type_fwd.h +113 -0
  18. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/visibility.h +50 -0
  19. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/object_parser.h +54 -0
  20. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/reader.h +118 -0
  21. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/type_fwd.h +26 -0
  22. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/executor_util.h +55 -0
  23. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/future_util.h +142 -0
  24. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/generator.h +321 -0
  25. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/gtest_util.h +557 -0
  26. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/matchers.h +467 -0
  27. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/pch.h +25 -0
  28. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/uniform_real.h +84 -0
  29. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/util.h +140 -0
  30. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/visibility.h +48 -0
  31. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/aligned_storage.h +145 -0
  32. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator.h +2058 -0
  33. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_util.h +457 -0
  34. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/basic_decimal.h +492 -0
  35. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_block_counter.h +570 -0
  36. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_stream_utils.h +529 -0
  37. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_reader.h +273 -0
  38. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_visit.h +88 -0
  39. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitset_stack.h +89 -0
  40. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx2.h +28 -0
  41. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx512.h +28 -0
  42. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_default.h +0 -0
  43. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_neon.h +28 -0
  44. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/byte_size.h +88 -0
  45. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/cancel.h +118 -0
  46. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/compare.h +62 -0
  47. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/concurrent_map.h +68 -0
  48. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/config.h +61 -0
  49. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/counting_semaphore.h +60 -0
  50. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/cpu_info.h +114 -0
.gitattributes CHANGED
@@ -181,3 +181,4 @@ env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_python.so filter=lfs d
181
  env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1500 filter=lfs diff=lfs merge=lfs -text
182
  env-llmeval/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
183
  env-llmeval/lib/python3.10/site-packages/pyarrow/libparquet.so.1500 filter=lfs diff=lfs merge=lfs -text
 
 
181
  env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1500 filter=lfs diff=lfs merge=lfs -text
182
  env-llmeval/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
183
  env-llmeval/lib/python3.10/site-packages/pyarrow/libparquet.so.1500 filter=lfs diff=lfs merge=lfs -text
184
+ env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_acero.so.1500 filter=lfs diff=lfs merge=lfs -text
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/api.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include "arrow/compute/expression.h"
23
+ #include "arrow/dataset/dataset.h"
24
+ #include "arrow/dataset/discovery.h"
25
+ #include "arrow/dataset/file_base.h"
26
+ #ifdef ARROW_CSV
27
+ #include "arrow/dataset/file_csv.h"
28
+ #endif
29
+ #ifdef ARROW_JSON
30
+ #include "arrow/dataset/file_json.h"
31
+ #endif
32
+ #include "arrow/dataset/file_ipc.h"
33
+ #ifdef ARROW_ORC
34
+ #include "arrow/dataset/file_orc.h"
35
+ #endif
36
+ #ifdef ARROW_PARQUET
37
+ #include "arrow/dataset/file_parquet.h"
38
+ #endif
39
+ #include "arrow/dataset/scanner.h"
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset.h ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <optional>
25
+ #include <string>
26
+ #include <utility>
27
+ #include <vector>
28
+
29
+ #include "arrow/compute/expression.h"
30
+ #include "arrow/dataset/type_fwd.h"
31
+ #include "arrow/dataset/visibility.h"
32
+ #include "arrow/util/async_generator_fwd.h"
33
+ #include "arrow/util/future.h"
34
+ #include "arrow/util/macros.h"
35
+ #include "arrow/util/mutex.h"
36
+
37
+ namespace arrow {
38
+
39
+ namespace internal {
40
+ class Executor;
41
+ } // namespace internal
42
+
43
+ namespace dataset {
44
+
45
+ using RecordBatchGenerator = std::function<Future<std::shared_ptr<RecordBatch>>()>;
46
+
47
+ /// \brief Description of a column to scan
48
+ struct ARROW_DS_EXPORT FragmentSelectionColumn {
49
+ /// \brief The path to the column to load
50
+ FieldPath path;
51
+ /// \brief The type of the column in the dataset schema
52
+ ///
53
+ /// A format may choose to ignore this field completely. For example, when
54
+ /// reading from IPC the reader can just return the column in the data type
55
+ /// that is stored on disk. There is no point in doing anything special.
56
+ ///
57
+ /// However, some formats may be capable of casting on the fly. For example,
58
+ /// when reading from CSV, if we know the target type of the column, we can
59
+ /// convert from string to the target type as we read.
60
+ DataType* requested_type;
61
+ };
62
+
63
+ /// \brief A list of columns that should be loaded from a fragment
64
+ ///
65
+ /// The paths in this selection should be referring to the fragment schema. This class
66
+ /// contains a virtual destructor as it is expected evolution strategies will need to
67
+ /// extend this to add any information needed to later evolve the batches.
68
+ ///
69
+ /// For example, in the basic evolution strategy, we keep track of which columns
70
+ /// were missing from the file so that we can fill those in with null when evolving.
71
+ class ARROW_DS_EXPORT FragmentSelection {
72
+ public:
73
+ explicit FragmentSelection(std::vector<FragmentSelectionColumn> columns)
74
+ : columns_(std::move(columns)) {}
75
+ virtual ~FragmentSelection() = default;
76
+ /// The columns that should be loaded from the fragment
77
+ const std::vector<FragmentSelectionColumn>& columns() const { return columns_; }
78
+
79
+ private:
80
+ std::vector<FragmentSelectionColumn> columns_;
81
+ };
82
+
83
+ /// \brief Instructions for scanning a particular fragment
84
+ ///
85
+ /// The fragment scan request is derived from ScanV2Options. The main
86
+ /// difference is that the scan options are based on the dataset schema
87
+ /// while the fragment request is based on the fragment schema.
88
+ struct ARROW_DS_EXPORT FragmentScanRequest {
89
+ /// \brief A row filter
90
+ ///
91
+ /// The filter expression should be written against the fragment schema.
92
+ ///
93
+ /// \see ScanV2Options for details on how this filter should be applied
94
+ compute::Expression filter = compute::literal(true);
95
+
96
+ /// \brief The columns to scan
97
+ ///
98
+ /// These indices refer to the fragment schema
99
+ ///
100
+ /// Note: This is NOT a simple list of top-level column indices.
101
+ /// For more details \see ScanV2Options
102
+ ///
103
+ /// If possible a fragment should only read from disk the data needed
104
+ /// to satisfy these columns. If a format cannot partially read a nested
105
+ /// column (e.g. JSON) then it must apply the column selection (in memory)
106
+ /// before returning the scanned batch.
107
+ std::shared_ptr<FragmentSelection> fragment_selection;
108
+ /// \brief Options specific to the format being scanned
109
+ const FragmentScanOptions* format_scan_options;
110
+ };
111
+
112
+ /// \brief An iterator-like object that can yield batches created from a fragment
113
+ class ARROW_DS_EXPORT FragmentScanner {
114
+ public:
115
+ /// This instance will only be destroyed after all ongoing scan futures
116
+ /// have been completed.
117
+ ///
118
+ /// This means any callbacks created as part of the scan can safely
119
+ /// capture `this`
120
+ virtual ~FragmentScanner() = default;
121
+ /// \brief Scan a batch of data from the file
122
+ /// \param batch_number The index of the batch to read
123
+ virtual Future<std::shared_ptr<RecordBatch>> ScanBatch(int batch_number) = 0;
124
+ /// \brief Calculate an estimate of how many data bytes the given batch will represent
125
+ ///
126
+ /// "Data bytes" should be the total size of all the buffers once the data has been
127
+ /// decoded into the Arrow format.
128
+ virtual int64_t EstimatedDataBytes(int batch_number) = 0;
129
+ /// \brief The number of batches in the fragment to scan
130
+ virtual int NumBatches() = 0;
131
+ };
132
+
133
+ /// \brief Information learned about a fragment through inspection
134
+ ///
135
+ /// This information can be used to figure out which fields need
136
+ /// to be read from a file and how the data read in should be evolved
137
+ /// to match the dataset schema.
138
+ ///
139
+ /// For example, from a CSV file we can inspect and learn the column
140
+ /// names and use those column names to determine which columns to load
141
+ /// from the CSV file.
142
+ struct ARROW_DS_EXPORT InspectedFragment {
143
+ explicit InspectedFragment(std::vector<std::string> column_names)
144
+ : column_names(std::move(column_names)) {}
145
+ std::vector<std::string> column_names;
146
+ };
147
+
148
+ /// \brief A granular piece of a Dataset, such as an individual file.
149
+ ///
150
+ /// A Fragment can be read/scanned separately from other fragments. It yields a
151
+ /// collection of RecordBatches when scanned
152
+ ///
153
+ /// Note that Fragments have well defined physical schemas which are reconciled by
154
+ /// the Datasets which contain them; these physical schemas may differ from a parent
155
+ /// Dataset's schema and the physical schemas of sibling Fragments.
156
+ class ARROW_DS_EXPORT Fragment : public std::enable_shared_from_this<Fragment> {
157
+ public:
158
+ /// \brief An expression that represents no known partition information
159
+ static const compute::Expression kNoPartitionInformation;
160
+
161
+ /// \brief Return the physical schema of the Fragment.
162
+ ///
163
+ /// The physical schema is also called the writer schema.
164
+ /// This method is blocking and may suffer from high latency filesystem.
165
+ /// The schema is cached after being read once, or may be specified at construction.
166
+ Result<std::shared_ptr<Schema>> ReadPhysicalSchema();
167
+
168
+ /// An asynchronous version of Scan
169
+ virtual Result<RecordBatchGenerator> ScanBatchesAsync(
170
+ const std::shared_ptr<ScanOptions>& options) = 0;
171
+
172
+ /// \brief Inspect a fragment to learn basic information
173
+ ///
174
+ /// This will be called before a scan and a fragment should attach whatever
175
+ /// information will be needed to figure out an evolution strategy. This information
176
+ /// will then be passed to the call to BeginScan
177
+ virtual Future<std::shared_ptr<InspectedFragment>> InspectFragment(
178
+ const FragmentScanOptions* format_options, compute::ExecContext* exec_context);
179
+
180
+ /// \brief Start a scan operation
181
+ virtual Future<std::shared_ptr<FragmentScanner>> BeginScan(
182
+ const FragmentScanRequest& request, const InspectedFragment& inspected_fragment,
183
+ const FragmentScanOptions* format_options, compute::ExecContext* exec_context);
184
+
185
+ /// \brief Count the number of rows in this fragment matching the filter using metadata
186
+ /// only. That is, this method may perform I/O, but will not load data.
187
+ ///
188
+ /// If this is not possible, resolve with an empty optional. The fragment can perform
189
+ /// I/O (e.g. to read metadata) before it deciding whether it can satisfy the request.
190
+ virtual Future<std::optional<int64_t>> CountRows(
191
+ compute::Expression predicate, const std::shared_ptr<ScanOptions>& options);
192
+
193
+ virtual std::string type_name() const = 0;
194
+ virtual std::string ToString() const { return type_name(); }
195
+
196
+ /// \brief An expression which evaluates to true for all data viewed by this
197
+ /// Fragment.
198
+ const compute::Expression& partition_expression() const {
199
+ return partition_expression_;
200
+ }
201
+
202
+ virtual ~Fragment() = default;
203
+
204
+ protected:
205
+ Fragment() = default;
206
+ explicit Fragment(compute::Expression partition_expression,
207
+ std::shared_ptr<Schema> physical_schema);
208
+
209
+ virtual Result<std::shared_ptr<Schema>> ReadPhysicalSchemaImpl() = 0;
210
+
211
+ util::Mutex physical_schema_mutex_;
212
+ compute::Expression partition_expression_ = compute::literal(true);
213
+ std::shared_ptr<Schema> physical_schema_;
214
+ };
215
+
216
+ /// \brief Per-scan options for fragment(s) in a dataset.
217
+ ///
218
+ /// These options are not intrinsic to the format or fragment itself, but do affect
219
+ /// the results of a scan. These are options which make sense to change between
220
+ /// repeated reads of the same dataset, such as format-specific conversion options
221
+ /// (that do not affect the schema).
222
+ ///
223
+ /// \ingroup dataset-scanning
224
+ class ARROW_DS_EXPORT FragmentScanOptions {
225
+ public:
226
+ virtual std::string type_name() const = 0;
227
+ virtual std::string ToString() const { return type_name(); }
228
+ virtual ~FragmentScanOptions() = default;
229
+ };
230
+
231
+ /// \defgroup dataset-implementations Concrete implementations
232
+ ///
233
+ /// @{
234
+
235
+ /// \brief A trivial Fragment that yields ScanTask out of a fixed set of
236
+ /// RecordBatch.
237
+ class ARROW_DS_EXPORT InMemoryFragment : public Fragment {
238
+ public:
239
+ class Scanner;
240
+ InMemoryFragment(std::shared_ptr<Schema> schema, RecordBatchVector record_batches,
241
+ compute::Expression = compute::literal(true));
242
+ explicit InMemoryFragment(RecordBatchVector record_batches,
243
+ compute::Expression = compute::literal(true));
244
+
245
+ Result<RecordBatchGenerator> ScanBatchesAsync(
246
+ const std::shared_ptr<ScanOptions>& options) override;
247
+ Future<std::optional<int64_t>> CountRows(
248
+ compute::Expression predicate,
249
+ const std::shared_ptr<ScanOptions>& options) override;
250
+
251
+ Future<std::shared_ptr<InspectedFragment>> InspectFragment(
252
+ const FragmentScanOptions* format_options,
253
+ compute::ExecContext* exec_context) override;
254
+ Future<std::shared_ptr<FragmentScanner>> BeginScan(
255
+ const FragmentScanRequest& request, const InspectedFragment& inspected_fragment,
256
+ const FragmentScanOptions* format_options,
257
+ compute::ExecContext* exec_context) override;
258
+
259
+ std::string type_name() const override { return "in-memory"; }
260
+
261
+ protected:
262
+ Result<std::shared_ptr<Schema>> ReadPhysicalSchemaImpl() override;
263
+
264
+ RecordBatchVector record_batches_;
265
+ };
266
+
267
+ /// @}
268
+
269
+ using FragmentGenerator = AsyncGenerator<std::shared_ptr<Fragment>>;
270
+
271
+ /// \brief Rules for converting the dataset schema to and from fragment schemas
272
+ class ARROW_DS_EXPORT FragmentEvolutionStrategy {
273
+ public:
274
+ /// This instance will only be destroyed when all scan operations for the
275
+ /// fragment have completed.
276
+ virtual ~FragmentEvolutionStrategy() = default;
277
+ /// \brief A guarantee that applies to all batches of this fragment
278
+ ///
279
+ /// For example, if a fragment is missing one of the fields in the dataset
280
+ /// schema then a typical evolution strategy is to set that field to null.
281
+ ///
282
+ /// So if the column at index 3 is missing then the guarantee is
283
+ /// FieldRef(3) == null
284
+ ///
285
+ /// Individual field guarantees should be AND'd together and returned
286
+ /// as a single expression.
287
+ virtual Result<compute::Expression> GetGuarantee(
288
+ const std::vector<FieldPath>& dataset_schema_selection) const = 0;
289
+
290
+ /// \brief Return a fragment schema selection given a dataset schema selection
291
+ ///
292
+ /// For example, if the user wants fields 2 & 4 of the dataset schema and
293
+ /// in this fragment the field 2 is missing and the field 4 is at index 1 then
294
+ /// this should return {1}
295
+ virtual Result<std::unique_ptr<FragmentSelection>> DevolveSelection(
296
+ const std::vector<FieldPath>& dataset_schema_selection) const = 0;
297
+
298
+ /// \brief Return a filter expression bound to the fragment schema given
299
+ /// a filter expression bound to the dataset schema
300
+ ///
301
+ /// The dataset scan filter will first be simplified by the guarantee returned
302
+ /// by GetGuarantee. This means an evolution that only handles dropping or casting
303
+ /// fields doesn't need to do anything here except return the given filter.
304
+ ///
305
+ /// On the other hand, an evolution that is doing some kind of aliasing will likely
306
+ /// need to convert field references in the filter to the aliased field references
307
+ /// where appropriate.
308
+ virtual Result<compute::Expression> DevolveFilter(
309
+ const compute::Expression& filter) const = 0;
310
+
311
+ /// \brief Convert a batch from the fragment schema to the dataset schema
312
+ ///
313
+ /// Typically this involves casting columns from the data type stored on disk
314
+ /// to the data type of the dataset schema. For example, this fragment might
315
+ /// have columns stored as int32 and the dataset schema might have int64 for
316
+ /// the column. In this case we should cast the column from int32 to int64.
317
+ ///
318
+ /// Note: A fragment may perform this cast as the data is read from disk. In
319
+ /// that case a cast might not be needed.
320
+ virtual Result<compute::ExecBatch> EvolveBatch(
321
+ const std::shared_ptr<RecordBatch>& batch,
322
+ const std::vector<FieldPath>& dataset_selection,
323
+ const FragmentSelection& selection) const = 0;
324
+
325
+ /// \brief Return a string description of this strategy
326
+ virtual std::string ToString() const = 0;
327
+ };
328
+
329
+ /// \brief Lookup to create a FragmentEvolutionStrategy for a given fragment
330
+ class ARROW_DS_EXPORT DatasetEvolutionStrategy {
331
+ public:
332
+ virtual ~DatasetEvolutionStrategy() = default;
333
+ /// \brief Create a strategy for evolving from the given fragment
334
+ /// to the schema of the given dataset
335
+ virtual std::unique_ptr<FragmentEvolutionStrategy> GetStrategy(
336
+ const Dataset& dataset, const Fragment& fragment,
337
+ const InspectedFragment& inspected_fragment) = 0;
338
+
339
+ /// \brief Return a string description of this strategy
340
+ virtual std::string ToString() const = 0;
341
+ };
342
+
343
+ ARROW_DS_EXPORT std::unique_ptr<DatasetEvolutionStrategy>
344
+ MakeBasicDatasetEvolutionStrategy();
345
+
346
+ /// \brief A container of zero or more Fragments.
347
+ ///
348
+ /// A Dataset acts as a union of Fragments, e.g. files deeply nested in a
349
+ /// directory. A Dataset has a schema to which Fragments must align during a
350
+ /// scan operation. This is analogous to Avro's reader and writer schema.
351
+ class ARROW_DS_EXPORT Dataset : public std::enable_shared_from_this<Dataset> {
352
+ public:
353
+ /// \brief Begin to build a new Scan operation against this Dataset
354
+ Result<std::shared_ptr<ScannerBuilder>> NewScan();
355
+
356
+ /// \brief GetFragments returns an iterator of Fragments given a predicate.
357
+ Result<FragmentIterator> GetFragments(compute::Expression predicate);
358
+ Result<FragmentIterator> GetFragments();
359
+
360
+ /// \brief Async versions of `GetFragments`.
361
+ Result<FragmentGenerator> GetFragmentsAsync(compute::Expression predicate);
362
+ Result<FragmentGenerator> GetFragmentsAsync();
363
+
364
+ const std::shared_ptr<Schema>& schema() const { return schema_; }
365
+
366
+ /// \brief An expression which evaluates to true for all data viewed by this Dataset.
367
+ /// May be null, which indicates no information is available.
368
+ const compute::Expression& partition_expression() const {
369
+ return partition_expression_;
370
+ }
371
+
372
+ /// \brief The name identifying the kind of Dataset
373
+ virtual std::string type_name() const = 0;
374
+
375
+ /// \brief Return a copy of this Dataset with a different schema.
376
+ ///
377
+ /// The copy will view the same Fragments. If the new schema is not compatible with the
378
+ /// original dataset's schema then an error will be raised.
379
+ virtual Result<std::shared_ptr<Dataset>> ReplaceSchema(
380
+ std::shared_ptr<Schema> schema) const = 0;
381
+
382
+ /// \brief Rules used by this dataset to handle schema evolution
383
+ DatasetEvolutionStrategy* evolution_strategy() { return evolution_strategy_.get(); }
384
+
385
+ virtual ~Dataset() = default;
386
+
387
+ protected:
388
+ explicit Dataset(std::shared_ptr<Schema> schema) : schema_(std::move(schema)) {}
389
+
390
+ Dataset(std::shared_ptr<Schema> schema, compute::Expression partition_expression);
391
+
392
+ virtual Result<FragmentIterator> GetFragmentsImpl(compute::Expression predicate) = 0;
393
+ /// \brief Default non-virtual implementation method for the base
394
+ /// `GetFragmentsAsyncImpl` method, which creates a fragment generator for
395
+ /// the dataset, possibly filtering results with a predicate (forwarding to
396
+ /// the synchronous `GetFragmentsImpl` method and moving the computations
397
+ /// to the background, using the IO thread pool).
398
+ ///
399
+ /// Currently, `executor` is always the same as `internal::GetCPUThreadPool()`,
400
+ /// which means the results from the underlying fragment generator will be
401
+ /// transferred to the default CPU thread pool. The generator itself is
402
+ /// offloaded to run on the default IO thread pool.
403
+ virtual Result<FragmentGenerator> GetFragmentsAsyncImpl(
404
+ compute::Expression predicate, arrow::internal::Executor* executor);
405
+
406
+ std::shared_ptr<Schema> schema_;
407
+ compute::Expression partition_expression_ = compute::literal(true);
408
+ std::unique_ptr<DatasetEvolutionStrategy> evolution_strategy_ =
409
+ MakeBasicDatasetEvolutionStrategy();
410
+ };
411
+
412
+ /// \addtogroup dataset-implementations
413
+ ///
414
+ /// @{
415
+
416
+ /// \brief A Source which yields fragments wrapping a stream of record batches.
417
+ ///
418
+ /// The record batches must match the schema provided to the source at construction.
419
+ class ARROW_DS_EXPORT InMemoryDataset : public Dataset {
420
+ public:
421
+ class RecordBatchGenerator {
422
+ public:
423
+ virtual ~RecordBatchGenerator() = default;
424
+ virtual RecordBatchIterator Get() const = 0;
425
+ };
426
+
427
+ /// Construct a dataset from a schema and a factory of record batch iterators.
428
+ InMemoryDataset(std::shared_ptr<Schema> schema,
429
+ std::shared_ptr<RecordBatchGenerator> get_batches)
430
+ : Dataset(std::move(schema)), get_batches_(std::move(get_batches)) {}
431
+
432
+ /// Convenience constructor taking a fixed list of batches
433
+ InMemoryDataset(std::shared_ptr<Schema> schema, RecordBatchVector batches);
434
+
435
+ /// Convenience constructor taking a Table
436
+ explicit InMemoryDataset(std::shared_ptr<Table> table);
437
+
438
+ std::string type_name() const override { return "in-memory"; }
439
+
440
+ Result<std::shared_ptr<Dataset>> ReplaceSchema(
441
+ std::shared_ptr<Schema> schema) const override;
442
+
443
+ protected:
444
+ Result<FragmentIterator> GetFragmentsImpl(compute::Expression predicate) override;
445
+
446
+ std::shared_ptr<RecordBatchGenerator> get_batches_;
447
+ };
448
+
449
+ /// \brief A Dataset wrapping child Datasets.
450
+ class ARROW_DS_EXPORT UnionDataset : public Dataset {
451
+ public:
452
+ /// \brief Construct a UnionDataset wrapping child Datasets.
453
+ ///
454
+ /// \param[in] schema the schema of the resulting dataset.
455
+ /// \param[in] children one or more child Datasets. Their schemas must be identical to
456
+ /// schema.
457
+ static Result<std::shared_ptr<UnionDataset>> Make(std::shared_ptr<Schema> schema,
458
+ DatasetVector children);
459
+
460
+ const DatasetVector& children() const { return children_; }
461
+
462
+ std::string type_name() const override { return "union"; }
463
+
464
+ Result<std::shared_ptr<Dataset>> ReplaceSchema(
465
+ std::shared_ptr<Schema> schema) const override;
466
+
467
+ protected:
468
+ Result<FragmentIterator> GetFragmentsImpl(compute::Expression predicate) override;
469
+
470
+ explicit UnionDataset(std::shared_ptr<Schema> schema, DatasetVector children)
471
+ : Dataset(std::move(schema)), children_(std::move(children)) {}
472
+
473
+ DatasetVector children_;
474
+
475
+ friend class UnionDatasetFactory;
476
+ };
477
+
478
+ /// @}
479
+
480
+ } // namespace dataset
481
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset_writer.h ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+
22
+ #include "arrow/dataset/file_base.h"
23
+ #include "arrow/record_batch.h"
24
+ #include "arrow/status.h"
25
+ #include "arrow/util/async_util.h"
26
+ #include "arrow/util/future.h"
27
+
28
+ namespace arrow {
29
+ namespace dataset {
30
+ namespace internal {
31
+
32
+ // This lines up with our other defaults in the scanner and execution plan
33
+ constexpr uint64_t kDefaultDatasetWriterMaxRowsQueued = 8 * 1024 * 1024;
34
+
35
+ /// \brief Utility class that manages a set of writers to different paths
36
+ ///
37
+ /// Writers may be closed and reopened (and a new file created) based on the dataset
38
+ /// write options (for example, max_rows_per_file or max_open_files)
39
+ ///
40
+ /// The dataset writer enforces its own back pressure based on the # of rows (as opposed
41
+ /// to # of batches which is how it is typically enforced elsewhere) and # of files.
42
+ class ARROW_DS_EXPORT DatasetWriter {
43
+ public:
44
+ /// \brief Create a dataset writer
45
+ ///
46
+ /// Will fail if basename_template is invalid or if there is existing data and
47
+ /// existing_data_behavior is kError
48
+ ///
49
+ /// \param write_options options to control how the data should be written
50
+ /// \param max_rows_queued max # of rows allowed to be queued before the dataset_writer
51
+ /// will ask for backpressure
52
+ static Result<std::unique_ptr<DatasetWriter>> Make(
53
+ FileSystemDatasetWriteOptions write_options, util::AsyncTaskScheduler* scheduler,
54
+ std::function<void()> pause_callback, std::function<void()> resume_callback,
55
+ std::function<void()> finish_callback,
56
+ uint64_t max_rows_queued = kDefaultDatasetWriterMaxRowsQueued);
57
+
58
+ ~DatasetWriter();
59
+
60
+ /// \brief Write a batch to the dataset
61
+ /// \param[in] batch The batch to write
62
+ /// \param[in] directory The directory to write to
63
+ ///
64
+ /// Note: The written filename will be {directory}/{filename_factory(i)} where i is a
65
+ /// counter controlled by `max_open_files` and `max_rows_per_file`
66
+ ///
67
+ /// If multiple WriteRecordBatch calls arrive with the same `directory` then the batches
68
+ /// may be written to the same file.
69
+ ///
70
+ /// The returned future will be marked finished when the record batch has been queued
71
+ /// to be written. If the returned future is unfinished then this indicates the dataset
72
+ /// writer's queue is full and the data provider should pause.
73
+ ///
74
+ /// This method is NOT async reentrant. The returned future will only be unfinished
75
+ /// if back pressure needs to be applied. Async reentrancy is not necessary for
76
+ /// concurrent writes to happen. Calling this method again before the previous future
77
+ /// completes will not just violate max_rows_queued but likely lead to race conditions.
78
+ ///
79
+ /// One thing to note is that the ordering of your data can affect your maximum
80
+ /// potential parallelism. If this seems odd then consider a dataset where the first
81
+ /// 1000 batches go to the same directory and then the 1001st batch goes to a different
82
+ /// directory. The only way to get two parallel writes immediately would be to queue
83
+ /// all 1000 pending writes to the first directory.
84
+ void WriteRecordBatch(std::shared_ptr<RecordBatch> batch, const std::string& directory,
85
+ const std::string& prefix = "");
86
+
87
+ /// Finish all pending writes and close any open files
88
+ void Finish();
89
+
90
+ protected:
91
+ DatasetWriter(FileSystemDatasetWriteOptions write_options,
92
+ util::AsyncTaskScheduler* scheduler, std::function<void()> pause_callback,
93
+ std::function<void()> resume_callback,
94
+ std::function<void()> finish_callback,
95
+ uint64_t max_rows_queued = kDefaultDatasetWriterMaxRowsQueued);
96
+
97
+ class DatasetWriterImpl;
98
+ std::unique_ptr<DatasetWriterImpl> impl_;
99
+ };
100
+
101
+ } // namespace internal
102
+ } // namespace dataset
103
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_base.h ADDED
@@ -0,0 +1,495 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <string>
25
+ #include <utility>
26
+ #include <vector>
27
+
28
+ #include "arrow/buffer.h"
29
+ #include "arrow/dataset/dataset.h"
30
+ #include "arrow/dataset/partition.h"
31
+ #include "arrow/dataset/scanner.h"
32
+ #include "arrow/dataset/type_fwd.h"
33
+ #include "arrow/dataset/visibility.h"
34
+ #include "arrow/filesystem/filesystem.h"
35
+ #include "arrow/io/file.h"
36
+ #include "arrow/type_fwd.h"
37
+ #include "arrow/util/compression.h"
38
+
39
+ namespace arrow {
40
+
41
+ namespace dataset {
42
+
43
+ /// \defgroup dataset-file-formats File formats for reading and writing datasets
44
+ /// \defgroup dataset-filesystem File system datasets
45
+ ///
46
+ /// @{
47
+
48
+ /// \brief The path and filesystem where an actual file is located or a buffer which can
49
+ /// be read like a file
50
+ class ARROW_DS_EXPORT FileSource : public util::EqualityComparable<FileSource> {
51
+ public:
52
+ FileSource(std::string path, std::shared_ptr<fs::FileSystem> filesystem,
53
+ Compression::type compression = Compression::UNCOMPRESSED)
54
+ : file_info_(std::move(path)),
55
+ filesystem_(std::move(filesystem)),
56
+ compression_(compression) {}
57
+
58
+ FileSource(fs::FileInfo info, std::shared_ptr<fs::FileSystem> filesystem,
59
+ Compression::type compression = Compression::UNCOMPRESSED)
60
+ : file_info_(std::move(info)),
61
+ filesystem_(std::move(filesystem)),
62
+ compression_(compression) {}
63
+
64
+ explicit FileSource(std::shared_ptr<Buffer> buffer,
65
+ Compression::type compression = Compression::UNCOMPRESSED)
66
+ : buffer_(std::move(buffer)), compression_(compression) {}
67
+
68
+ using CustomOpen = std::function<Result<std::shared_ptr<io::RandomAccessFile>>()>;
69
+ FileSource(CustomOpen open, int64_t size)
70
+ : custom_open_(std::move(open)), custom_size_(size) {}
71
+
72
+ using CustomOpenWithCompression =
73
+ std::function<Result<std::shared_ptr<io::RandomAccessFile>>(Compression::type)>;
74
+ FileSource(CustomOpenWithCompression open_with_compression, int64_t size,
75
+ Compression::type compression = Compression::UNCOMPRESSED)
76
+ : custom_open_(std::bind(std::move(open_with_compression), compression)),
77
+ custom_size_(size),
78
+ compression_(compression) {}
79
+
80
+ FileSource(std::shared_ptr<io::RandomAccessFile> file, int64_t size,
81
+ Compression::type compression = Compression::UNCOMPRESSED)
82
+ : custom_open_([=] { return ToResult(file); }),
83
+ custom_size_(size),
84
+ compression_(compression) {}
85
+
86
+ explicit FileSource(std::shared_ptr<io::RandomAccessFile> file,
87
+ Compression::type compression = Compression::UNCOMPRESSED);
88
+
89
+ FileSource() : custom_open_(CustomOpen{&InvalidOpen}) {}
90
+
91
+ static std::vector<FileSource> FromPaths(const std::shared_ptr<fs::FileSystem>& fs,
92
+ std::vector<std::string> paths) {
93
+ std::vector<FileSource> sources;
94
+ for (auto&& path : paths) {
95
+ sources.emplace_back(std::move(path), fs);
96
+ }
97
+ return sources;
98
+ }
99
+
100
+ /// \brief Return the type of raw compression on the file, if any.
101
+ Compression::type compression() const { return compression_; }
102
+
103
+ /// \brief Return the file path, if any. Only valid when file source wraps a path.
104
+ const std::string& path() const {
105
+ static std::string buffer_path = "<Buffer>";
106
+ static std::string custom_open_path = "<Buffer>";
107
+ return filesystem_ ? file_info_.path() : buffer_ ? buffer_path : custom_open_path;
108
+ }
109
+
110
+ /// \brief Return the filesystem, if any. Otherwise returns nullptr
111
+ const std::shared_ptr<fs::FileSystem>& filesystem() const { return filesystem_; }
112
+
113
+ /// \brief Return the buffer containing the file, if any. Otherwise returns nullptr
114
+ const std::shared_ptr<Buffer>& buffer() const { return buffer_; }
115
+
116
+ /// \brief Get a RandomAccessFile which views this file source
117
+ Result<std::shared_ptr<io::RandomAccessFile>> Open() const;
118
+ Future<std::shared_ptr<io::RandomAccessFile>> OpenAsync() const;
119
+
120
+ /// \brief Get the size (in bytes) of the file or buffer
121
+ /// If the file is compressed this should be the compressed (on-disk) size.
122
+ int64_t Size() const;
123
+
124
+ /// \brief Get an InputStream which views this file source (and decompresses if needed)
125
+ /// \param[in] compression If nullopt, guess the compression scheme from the
126
+ /// filename, else decompress with the given codec
127
+ Result<std::shared_ptr<io::InputStream>> OpenCompressed(
128
+ std::optional<Compression::type> compression = std::nullopt) const;
129
+
130
+ /// \brief equality comparison with another FileSource
131
+ bool Equals(const FileSource& other) const;
132
+
133
+ private:
134
+ static Result<std::shared_ptr<io::RandomAccessFile>> InvalidOpen() {
135
+ return Status::Invalid("Called Open() on an uninitialized FileSource");
136
+ }
137
+
138
+ fs::FileInfo file_info_;
139
+ std::shared_ptr<fs::FileSystem> filesystem_;
140
+ std::shared_ptr<Buffer> buffer_;
141
+ CustomOpen custom_open_;
142
+ int64_t custom_size_ = 0;
143
+ Compression::type compression_ = Compression::UNCOMPRESSED;
144
+ };
145
+
146
+ /// \brief Base class for file format implementation
147
+ class ARROW_DS_EXPORT FileFormat : public std::enable_shared_from_this<FileFormat> {
148
+ public:
149
+ /// Options affecting how this format is scanned.
150
+ ///
151
+ /// The options here can be overridden at scan time.
152
+ std::shared_ptr<FragmentScanOptions> default_fragment_scan_options;
153
+
154
+ virtual ~FileFormat() = default;
155
+
156
+ /// \brief The name identifying the kind of file format
157
+ virtual std::string type_name() const = 0;
158
+
159
+ virtual bool Equals(const FileFormat& other) const = 0;
160
+
161
+ /// \brief Indicate if the FileSource is supported/readable by this format.
162
+ virtual Result<bool> IsSupported(const FileSource& source) const = 0;
163
+
164
+ /// \brief Return the schema of the file if possible.
165
+ virtual Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const = 0;
166
+
167
+ /// \brief Learn what we need about the file before we start scanning it
168
+ virtual Future<std::shared_ptr<InspectedFragment>> InspectFragment(
169
+ const FileSource& source, const FragmentScanOptions* format_options,
170
+ compute::ExecContext* exec_context) const;
171
+
172
+ virtual Result<RecordBatchGenerator> ScanBatchesAsync(
173
+ const std::shared_ptr<ScanOptions>& options,
174
+ const std::shared_ptr<FileFragment>& file) const = 0;
175
+
176
+ virtual Future<std::optional<int64_t>> CountRows(
177
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
178
+ const std::shared_ptr<ScanOptions>& options);
179
+
180
+ virtual Future<std::shared_ptr<FragmentScanner>> BeginScan(
181
+ const FragmentScanRequest& request, const InspectedFragment& inspected_fragment,
182
+ const FragmentScanOptions* format_options,
183
+ compute::ExecContext* exec_context) const;
184
+
185
+ /// \brief Open a fragment
186
+ virtual Result<std::shared_ptr<FileFragment>> MakeFragment(
187
+ FileSource source, compute::Expression partition_expression,
188
+ std::shared_ptr<Schema> physical_schema);
189
+
190
+ /// \brief Create a FileFragment for a FileSource.
191
+ Result<std::shared_ptr<FileFragment>> MakeFragment(
192
+ FileSource source, compute::Expression partition_expression);
193
+
194
+ /// \brief Create a FileFragment for a FileSource.
195
+ Result<std::shared_ptr<FileFragment>> MakeFragment(
196
+ FileSource source, std::shared_ptr<Schema> physical_schema = NULLPTR);
197
+
198
+ /// \brief Create a writer for this format.
199
+ virtual Result<std::shared_ptr<FileWriter>> MakeWriter(
200
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
201
+ std::shared_ptr<FileWriteOptions> options,
202
+ fs::FileLocator destination_locator) const = 0;
203
+
204
+ /// \brief Get default write options for this format.
205
+ ///
206
+ /// May return null shared_ptr if this file format does not yet support
207
+ /// writing datasets.
208
+ virtual std::shared_ptr<FileWriteOptions> DefaultWriteOptions() = 0;
209
+
210
+ protected:
211
+ explicit FileFormat(std::shared_ptr<FragmentScanOptions> default_fragment_scan_options)
212
+ : default_fragment_scan_options(std::move(default_fragment_scan_options)) {}
213
+ };
214
+
215
+ /// \brief A Fragment that is stored in a file with a known format
216
+ class ARROW_DS_EXPORT FileFragment : public Fragment,
217
+ public util::EqualityComparable<FileFragment> {
218
+ public:
219
+ Result<RecordBatchGenerator> ScanBatchesAsync(
220
+ const std::shared_ptr<ScanOptions>& options) override;
221
+ Future<std::optional<int64_t>> CountRows(
222
+ compute::Expression predicate,
223
+ const std::shared_ptr<ScanOptions>& options) override;
224
+ Future<std::shared_ptr<FragmentScanner>> BeginScan(
225
+ const FragmentScanRequest& request, const InspectedFragment& inspected_fragment,
226
+ const FragmentScanOptions* format_options,
227
+ compute::ExecContext* exec_context) override;
228
+ Future<std::shared_ptr<InspectedFragment>> InspectFragment(
229
+ const FragmentScanOptions* format_options,
230
+ compute::ExecContext* exec_context) override;
231
+
232
+ std::string type_name() const override { return format_->type_name(); }
233
+ std::string ToString() const override { return source_.path(); };
234
+
235
+ const FileSource& source() const { return source_; }
236
+ const std::shared_ptr<FileFormat>& format() const { return format_; }
237
+
238
+ bool Equals(const FileFragment& other) const;
239
+
240
+ protected:
241
+ FileFragment(FileSource source, std::shared_ptr<FileFormat> format,
242
+ compute::Expression partition_expression,
243
+ std::shared_ptr<Schema> physical_schema)
244
+ : Fragment(std::move(partition_expression), std::move(physical_schema)),
245
+ source_(std::move(source)),
246
+ format_(std::move(format)) {}
247
+
248
+ Result<std::shared_ptr<Schema>> ReadPhysicalSchemaImpl() override;
249
+
250
+ FileSource source_;
251
+ std::shared_ptr<FileFormat> format_;
252
+
253
+ friend class FileFormat;
254
+ };
255
+
256
+ /// \brief A Dataset of FileFragments.
257
+ ///
258
+ /// A FileSystemDataset is composed of one or more FileFragment. The fragments
259
+ /// are independent and don't need to share the same format and/or filesystem.
260
+ class ARROW_DS_EXPORT FileSystemDataset : public Dataset {
261
+ public:
262
+ /// \brief Create a FileSystemDataset.
263
+ ///
264
+ /// \param[in] schema the schema of the dataset
265
+ /// \param[in] root_partition the partition expression of the dataset
266
+ /// \param[in] format the format of each FileFragment.
267
+ /// \param[in] filesystem the filesystem of each FileFragment, or nullptr if the
268
+ /// fragments wrap buffers.
269
+ /// \param[in] fragments list of fragments to create the dataset from.
270
+ /// \param[in] partitioning the Partitioning object in case the dataset is created
271
+ /// with a known partitioning (e.g. from a discovered partitioning
272
+ /// through a DatasetFactory), or nullptr if not known.
273
+ ///
274
+ /// Note that fragments wrapping files resident in differing filesystems are not
275
+ /// permitted; to work with multiple filesystems use a UnionDataset.
276
+ ///
277
+ /// \return A constructed dataset.
278
+ static Result<std::shared_ptr<FileSystemDataset>> Make(
279
+ std::shared_ptr<Schema> schema, compute::Expression root_partition,
280
+ std::shared_ptr<FileFormat> format, std::shared_ptr<fs::FileSystem> filesystem,
281
+ std::vector<std::shared_ptr<FileFragment>> fragments,
282
+ std::shared_ptr<Partitioning> partitioning = NULLPTR);
283
+
284
+ /// \brief Write a dataset.
285
+ static Status Write(const FileSystemDatasetWriteOptions& write_options,
286
+ std::shared_ptr<Scanner> scanner);
287
+
288
+ /// \brief Return the type name of the dataset.
289
+ std::string type_name() const override { return "filesystem"; }
290
+
291
+ /// \brief Replace the schema of the dataset.
292
+ Result<std::shared_ptr<Dataset>> ReplaceSchema(
293
+ std::shared_ptr<Schema> schema) const override;
294
+
295
+ /// \brief Return the path of files.
296
+ std::vector<std::string> files() const;
297
+
298
+ /// \brief Return the format.
299
+ const std::shared_ptr<FileFormat>& format() const { return format_; }
300
+
301
+ /// \brief Return the filesystem. May be nullptr if the fragments wrap buffers.
302
+ const std::shared_ptr<fs::FileSystem>& filesystem() const { return filesystem_; }
303
+
304
+ /// \brief Return the partitioning. May be nullptr if the dataset was not constructed
305
+ /// with a partitioning.
306
+ const std::shared_ptr<Partitioning>& partitioning() const { return partitioning_; }
307
+
308
+ std::string ToString() const;
309
+
310
+ protected:
311
+ struct FragmentSubtrees;
312
+
313
+ explicit FileSystemDataset(std::shared_ptr<Schema> schema)
314
+ : Dataset(std::move(schema)) {}
315
+
316
+ FileSystemDataset(std::shared_ptr<Schema> schema,
317
+ compute::Expression partition_expression)
318
+ : Dataset(std::move(schema), partition_expression) {}
319
+
320
+ Result<FragmentIterator> GetFragmentsImpl(compute::Expression predicate) override;
321
+
322
+ void SetupSubtreePruning();
323
+
324
+ std::shared_ptr<FileFormat> format_;
325
+ std::shared_ptr<fs::FileSystem> filesystem_;
326
+ std::vector<std::shared_ptr<FileFragment>> fragments_;
327
+ std::shared_ptr<Partitioning> partitioning_;
328
+
329
+ std::shared_ptr<FragmentSubtrees> subtrees_;
330
+ };
331
+
332
+ /// \brief Options for writing a file of this format.
333
+ class ARROW_DS_EXPORT FileWriteOptions {
334
+ public:
335
+ virtual ~FileWriteOptions() = default;
336
+
337
+ const std::shared_ptr<FileFormat>& format() const { return format_; }
338
+
339
+ std::string type_name() const { return format_->type_name(); }
340
+
341
+ protected:
342
+ explicit FileWriteOptions(std::shared_ptr<FileFormat> format)
343
+ : format_(std::move(format)) {}
344
+
345
+ std::shared_ptr<FileFormat> format_;
346
+ };
347
+
348
+ /// \brief A writer for this format.
349
+ class ARROW_DS_EXPORT FileWriter {
350
+ public:
351
+ virtual ~FileWriter() = default;
352
+
353
+ /// \brief Write the given batch.
354
+ virtual Status Write(const std::shared_ptr<RecordBatch>& batch) = 0;
355
+
356
+ /// \brief Write all batches from the reader.
357
+ Status Write(RecordBatchReader* batches);
358
+
359
+ /// \brief Indicate that writing is done.
360
+ virtual Future<> Finish();
361
+
362
+ const std::shared_ptr<FileFormat>& format() const { return options_->format(); }
363
+ const std::shared_ptr<Schema>& schema() const { return schema_; }
364
+ const std::shared_ptr<FileWriteOptions>& options() const { return options_; }
365
+ const fs::FileLocator& destination() const { return destination_locator_; }
366
+
367
+ /// \brief After Finish() is called, provides number of bytes written to file.
368
+ Result<int64_t> GetBytesWritten() const;
369
+
370
+ protected:
371
+ FileWriter(std::shared_ptr<Schema> schema, std::shared_ptr<FileWriteOptions> options,
372
+ std::shared_ptr<io::OutputStream> destination,
373
+ fs::FileLocator destination_locator)
374
+ : schema_(std::move(schema)),
375
+ options_(std::move(options)),
376
+ destination_(std::move(destination)),
377
+ destination_locator_(std::move(destination_locator)) {}
378
+
379
+ virtual Future<> FinishInternal() = 0;
380
+
381
+ std::shared_ptr<Schema> schema_;
382
+ std::shared_ptr<FileWriteOptions> options_;
383
+ std::shared_ptr<io::OutputStream> destination_;
384
+ fs::FileLocator destination_locator_;
385
+ std::optional<int64_t> bytes_written_;
386
+ };
387
+
388
+ /// \brief Options for writing a dataset.
389
+ struct ARROW_DS_EXPORT FileSystemDatasetWriteOptions {
390
+ /// Options for individual fragment writing.
391
+ std::shared_ptr<FileWriteOptions> file_write_options;
392
+
393
+ /// FileSystem into which a dataset will be written.
394
+ std::shared_ptr<fs::FileSystem> filesystem;
395
+
396
+ /// Root directory into which the dataset will be written.
397
+ std::string base_dir;
398
+
399
+ /// Partitioning used to generate fragment paths.
400
+ std::shared_ptr<Partitioning> partitioning;
401
+
402
+ /// Maximum number of partitions any batch may be written into, default is 1K.
403
+ int max_partitions = 1024;
404
+
405
+ /// Template string used to generate fragment basenames.
406
+ /// {i} will be replaced by an auto incremented integer.
407
+ std::string basename_template;
408
+
409
+ /// A functor which will be applied on an incremented counter. The result will be
410
+ /// inserted into the basename_template in place of {i}.
411
+ ///
412
+ /// This can be used, for example, to left-pad the file counter.
413
+ std::function<std::string(int)> basename_template_functor;
414
+
415
+ /// If greater than 0 then this will limit the maximum number of files that can be left
416
+ /// open. If an attempt is made to open too many files then the least recently used file
417
+ /// will be closed. If this setting is set too low you may end up fragmenting your data
418
+ /// into many small files.
419
+ ///
420
+ /// The default is 900 which also allows some # of files to be open by the scanner
421
+ /// before hitting the default Linux limit of 1024
422
+ uint32_t max_open_files = 900;
423
+
424
+ /// If greater than 0 then this will limit how many rows are placed in any single file.
425
+ /// Otherwise there will be no limit and one file will be created in each output
426
+ /// directory unless files need to be closed to respect max_open_files
427
+ uint64_t max_rows_per_file = 0;
428
+
429
+ /// If greater than 0 then this will cause the dataset writer to batch incoming data
430
+ /// and only write the row groups to the disk when sufficient rows have accumulated.
431
+ /// The final row group size may be less than this value and other options such as
432
+ /// `max_open_files` or `max_rows_per_file` lead to smaller row group sizes.
433
+ uint64_t min_rows_per_group = 0;
434
+
435
+ /// If greater than 0 then the dataset writer may split up large incoming batches into
436
+ /// multiple row groups. If this value is set then min_rows_per_group should also be
437
+ /// set or else you may end up with very small row groups (e.g. if the incoming row
438
+ /// group size is just barely larger than this value).
439
+ uint64_t max_rows_per_group = 1 << 20;
440
+
441
+ /// Controls what happens if an output directory already exists.
442
+ ExistingDataBehavior existing_data_behavior = ExistingDataBehavior::kError;
443
+
444
+ /// \brief If false the dataset writer will not create directories
445
+ /// This is mainly intended for filesystems that do not require directories such as S3.
446
+ bool create_dir = true;
447
+
448
+ /// Callback to be invoked against all FileWriters before
449
+ /// they are finalized with FileWriter::Finish().
450
+ std::function<Status(FileWriter*)> writer_pre_finish = [](FileWriter*) {
451
+ return Status::OK();
452
+ };
453
+
454
+ /// Callback to be invoked against all FileWriters after they have
455
+ /// called FileWriter::Finish().
456
+ std::function<Status(FileWriter*)> writer_post_finish = [](FileWriter*) {
457
+ return Status::OK();
458
+ };
459
+
460
+ const std::shared_ptr<FileFormat>& format() const {
461
+ return file_write_options->format();
462
+ }
463
+ };
464
+
465
+ /// \brief Wraps FileSystemDatasetWriteOptions for consumption as compute::ExecNodeOptions
466
+ class ARROW_DS_EXPORT WriteNodeOptions : public acero::ExecNodeOptions {
467
+ public:
468
+ explicit WriteNodeOptions(
469
+ FileSystemDatasetWriteOptions options,
470
+ std::shared_ptr<const KeyValueMetadata> custom_metadata = NULLPTR)
471
+ : write_options(std::move(options)), custom_metadata(std::move(custom_metadata)) {}
472
+
473
+ /// \brief Options to control how to write the dataset
474
+ FileSystemDatasetWriteOptions write_options;
475
+ /// \brief Optional schema to attach to all written batches
476
+ ///
477
+ /// By default, we will use the output schema of the input.
478
+ ///
479
+ /// This can be used to alter schema metadata, field nullability, or field metadata.
480
+ /// However, this cannot be used to change the type of data. If the custom schema does
481
+ /// not have the same number of fields and the same data types as the input then the
482
+ /// plan will fail.
483
+ std::shared_ptr<Schema> custom_schema;
484
+ /// \brief Optional metadata to attach to written batches
485
+ std::shared_ptr<const KeyValueMetadata> custom_metadata;
486
+ };
487
+
488
+ /// @}
489
+
490
+ namespace internal {
491
+ ARROW_DS_EXPORT void InitializeDatasetWriter(arrow::acero::ExecFactoryRegistry* registry);
492
+ }
493
+
494
+ } // namespace dataset
495
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_csv.h ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <string>
22
+
23
+ #include "arrow/csv/options.h"
24
+ #include "arrow/dataset/dataset.h"
25
+ #include "arrow/dataset/file_base.h"
26
+ #include "arrow/dataset/type_fwd.h"
27
+ #include "arrow/dataset/visibility.h"
28
+ #include "arrow/ipc/type_fwd.h"
29
+ #include "arrow/status.h"
30
+ #include "arrow/util/compression.h"
31
+
32
+ namespace arrow {
33
+ namespace dataset {
34
+
35
+ constexpr char kCsvTypeName[] = "csv";
36
+
37
+ /// \addtogroup dataset-file-formats
38
+ ///
39
+ /// @{
40
+
41
+ /// \brief A FileFormat implementation that reads from and writes to Csv files
42
+ class ARROW_DS_EXPORT CsvFileFormat : public FileFormat {
43
+ public:
44
+ // TODO(ARROW-18328) Remove this, moved to CsvFragmentScanOptions
45
+ /// Options affecting the parsing of CSV files
46
+ csv::ParseOptions parse_options = csv::ParseOptions::Defaults();
47
+
48
+ CsvFileFormat();
49
+
50
+ std::string type_name() const override { return kCsvTypeName; }
51
+
52
+ bool Equals(const FileFormat& other) const override;
53
+
54
+ Result<bool> IsSupported(const FileSource& source) const override;
55
+
56
+ /// \brief Return the schema of the file if possible.
57
+ Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const override;
58
+
59
+ Future<std::shared_ptr<FragmentScanner>> BeginScan(
60
+ const FragmentScanRequest& request, const InspectedFragment& inspected_fragment,
61
+ const FragmentScanOptions* format_options,
62
+ compute::ExecContext* exec_context) const override;
63
+
64
+ Result<RecordBatchGenerator> ScanBatchesAsync(
65
+ const std::shared_ptr<ScanOptions>& scan_options,
66
+ const std::shared_ptr<FileFragment>& file) const override;
67
+
68
+ Future<std::shared_ptr<InspectedFragment>> InspectFragment(
69
+ const FileSource& source, const FragmentScanOptions* format_options,
70
+ compute::ExecContext* exec_context) const override;
71
+
72
+ Future<std::optional<int64_t>> CountRows(
73
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
74
+ const std::shared_ptr<ScanOptions>& options) override;
75
+
76
+ Result<std::shared_ptr<FileWriter>> MakeWriter(
77
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
78
+ std::shared_ptr<FileWriteOptions> options,
79
+ fs::FileLocator destination_locator) const override;
80
+
81
+ std::shared_ptr<FileWriteOptions> DefaultWriteOptions() override;
82
+ };
83
+
84
+ /// \brief Per-scan options for CSV fragments
85
+ struct ARROW_DS_EXPORT CsvFragmentScanOptions : public FragmentScanOptions {
86
+ std::string type_name() const override { return kCsvTypeName; }
87
+
88
+ using StreamWrapFunc = std::function<Result<std::shared_ptr<io::InputStream>>(
89
+ std::shared_ptr<io::InputStream>)>;
90
+
91
+ /// CSV conversion options
92
+ csv::ConvertOptions convert_options = csv::ConvertOptions::Defaults();
93
+
94
+ /// CSV reading options
95
+ ///
96
+ /// Note that use_threads is always ignored.
97
+ csv::ReadOptions read_options = csv::ReadOptions::Defaults();
98
+
99
+ /// CSV parse options
100
+ csv::ParseOptions parse_options = csv::ParseOptions::Defaults();
101
+
102
+ /// Optional stream wrapping function
103
+ ///
104
+ /// If defined, all open dataset file fragments will be passed
105
+ /// through this function. One possible use case is to transparently
106
+ /// transcode all input files from a given character set to utf8.
107
+ StreamWrapFunc stream_transform_func{};
108
+ };
109
+
110
+ class ARROW_DS_EXPORT CsvFileWriteOptions : public FileWriteOptions {
111
+ public:
112
+ /// Options passed to csv::MakeCSVWriter.
113
+ std::shared_ptr<csv::WriteOptions> write_options;
114
+
115
+ protected:
116
+ explicit CsvFileWriteOptions(std::shared_ptr<FileFormat> format)
117
+ : FileWriteOptions(std::move(format)) {}
118
+
119
+ friend class CsvFileFormat;
120
+ };
121
+
122
+ class ARROW_DS_EXPORT CsvFileWriter : public FileWriter {
123
+ public:
124
+ Status Write(const std::shared_ptr<RecordBatch>& batch) override;
125
+
126
+ private:
127
+ CsvFileWriter(std::shared_ptr<io::OutputStream> destination,
128
+ std::shared_ptr<ipc::RecordBatchWriter> writer,
129
+ std::shared_ptr<Schema> schema,
130
+ std::shared_ptr<CsvFileWriteOptions> options,
131
+ fs::FileLocator destination_locator);
132
+
133
+ Future<> FinishInternal() override;
134
+
135
+ std::shared_ptr<io::OutputStream> destination_;
136
+ std::shared_ptr<ipc::RecordBatchWriter> batch_writer_;
137
+
138
+ friend class CsvFileFormat;
139
+ };
140
+
141
+ /// @}
142
+
143
+ } // namespace dataset
144
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_ipc.h ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+ #include <string>
24
+
25
+ #include "arrow/dataset/file_base.h"
26
+ #include "arrow/dataset/type_fwd.h"
27
+ #include "arrow/dataset/visibility.h"
28
+ #include "arrow/io/type_fwd.h"
29
+ #include "arrow/ipc/type_fwd.h"
30
+ #include "arrow/result.h"
31
+
32
+ namespace arrow {
33
+ namespace dataset {
34
+
35
+ /// \addtogroup dataset-file-formats
36
+ ///
37
+ /// @{
38
+
39
+ constexpr char kIpcTypeName[] = "ipc";
40
+
41
+ /// \brief A FileFormat implementation that reads from and writes to Ipc files
42
+ class ARROW_DS_EXPORT IpcFileFormat : public FileFormat {
43
+ public:
44
+ std::string type_name() const override { return kIpcTypeName; }
45
+
46
+ IpcFileFormat();
47
+
48
+ bool Equals(const FileFormat& other) const override {
49
+ return type_name() == other.type_name();
50
+ }
51
+
52
+ Result<bool> IsSupported(const FileSource& source) const override;
53
+
54
+ /// \brief Return the schema of the file if possible.
55
+ Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const override;
56
+
57
+ Result<RecordBatchGenerator> ScanBatchesAsync(
58
+ const std::shared_ptr<ScanOptions>& options,
59
+ const std::shared_ptr<FileFragment>& file) const override;
60
+
61
+ Future<std::optional<int64_t>> CountRows(
62
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
63
+ const std::shared_ptr<ScanOptions>& options) override;
64
+
65
+ Result<std::shared_ptr<FileWriter>> MakeWriter(
66
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
67
+ std::shared_ptr<FileWriteOptions> options,
68
+ fs::FileLocator destination_locator) const override;
69
+
70
+ std::shared_ptr<FileWriteOptions> DefaultWriteOptions() override;
71
+ };
72
+
73
+ /// \brief Per-scan options for IPC fragments
74
+ class ARROW_DS_EXPORT IpcFragmentScanOptions : public FragmentScanOptions {
75
+ public:
76
+ std::string type_name() const override { return kIpcTypeName; }
77
+
78
+ /// Options passed to the IPC file reader.
79
+ /// included_fields, memory_pool, and use_threads are ignored.
80
+ std::shared_ptr<ipc::IpcReadOptions> options;
81
+ /// If present, the async scanner will enable I/O coalescing.
82
+ /// This is ignored by the sync scanner.
83
+ std::shared_ptr<io::CacheOptions> cache_options;
84
+ };
85
+
86
+ class ARROW_DS_EXPORT IpcFileWriteOptions : public FileWriteOptions {
87
+ public:
88
+ /// Options passed to ipc::MakeFileWriter. use_threads is ignored
89
+ std::shared_ptr<ipc::IpcWriteOptions> options;
90
+
91
+ /// custom_metadata written to the file's footer
92
+ std::shared_ptr<const KeyValueMetadata> metadata;
93
+
94
+ protected:
95
+ explicit IpcFileWriteOptions(std::shared_ptr<FileFormat> format)
96
+ : FileWriteOptions(std::move(format)) {}
97
+
98
+ friend class IpcFileFormat;
99
+ };
100
+
101
+ class ARROW_DS_EXPORT IpcFileWriter : public FileWriter {
102
+ public:
103
+ Status Write(const std::shared_ptr<RecordBatch>& batch) override;
104
+
105
+ private:
106
+ IpcFileWriter(std::shared_ptr<io::OutputStream> destination,
107
+ std::shared_ptr<ipc::RecordBatchWriter> writer,
108
+ std::shared_ptr<Schema> schema,
109
+ std::shared_ptr<IpcFileWriteOptions> options,
110
+ fs::FileLocator destination_locator);
111
+
112
+ Future<> FinishInternal() override;
113
+
114
+ std::shared_ptr<io::OutputStream> destination_;
115
+ std::shared_ptr<ipc::RecordBatchWriter> batch_writer_;
116
+
117
+ friend class IpcFileFormat;
118
+ };
119
+
120
+ /// @}
121
+
122
+ } // namespace dataset
123
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_json.h ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <optional>
22
+ #include <string>
23
+
24
+ #include "arrow/dataset/dataset.h"
25
+ #include "arrow/dataset/file_base.h"
26
+ #include "arrow/dataset/type_fwd.h"
27
+ #include "arrow/dataset/visibility.h"
28
+ #include "arrow/ipc/type_fwd.h"
29
+ #include "arrow/json/options.h"
30
+ #include "arrow/result.h"
31
+ #include "arrow/status.h"
32
+ #include "arrow/util/future.h"
33
+ #include "arrow/util/macros.h"
34
+
35
+ namespace arrow::dataset {
36
+
37
+ /// \addtogroup dataset-file-formats
38
+ ///
39
+ /// @{
40
+
41
+ constexpr char kJsonTypeName[] = "json";
42
+
43
+ /// \brief A FileFormat implementation that reads from JSON files
44
+ class ARROW_DS_EXPORT JsonFileFormat : public FileFormat {
45
+ public:
46
+ JsonFileFormat();
47
+
48
+ std::string type_name() const override { return kJsonTypeName; }
49
+
50
+ bool Equals(const FileFormat& other) const override;
51
+
52
+ Result<bool> IsSupported(const FileSource& source) const override;
53
+
54
+ Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const override;
55
+
56
+ Future<std::shared_ptr<InspectedFragment>> InspectFragment(
57
+ const FileSource& source, const FragmentScanOptions* format_options,
58
+ compute::ExecContext* exec_context) const override;
59
+
60
+ Future<std::shared_ptr<FragmentScanner>> BeginScan(
61
+ const FragmentScanRequest& scan_request, const InspectedFragment& inspected,
62
+ const FragmentScanOptions* format_options,
63
+ compute::ExecContext* exec_context) const override;
64
+
65
+ Result<RecordBatchGenerator> ScanBatchesAsync(
66
+ const std::shared_ptr<ScanOptions>& scan_options,
67
+ const std::shared_ptr<FileFragment>& file) const override;
68
+
69
+ Future<std::optional<int64_t>> CountRows(
70
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
71
+ const std::shared_ptr<ScanOptions>& scan_options) override;
72
+
73
+ Result<std::shared_ptr<FileWriter>> MakeWriter(
74
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
75
+ std::shared_ptr<FileWriteOptions> options,
76
+ fs::FileLocator destination_locator) const override {
77
+ return Status::NotImplemented("Writing JSON files is not currently supported");
78
+ }
79
+
80
+ std::shared_ptr<FileWriteOptions> DefaultWriteOptions() override { return NULLPTR; }
81
+ };
82
+
83
+ /// \brief Per-scan options for JSON fragments
84
+ struct ARROW_DS_EXPORT JsonFragmentScanOptions : public FragmentScanOptions {
85
+ std::string type_name() const override { return kJsonTypeName; }
86
+
87
+ /// @brief Options that affect JSON parsing
88
+ ///
89
+ /// Note: `explicit_schema` and `unexpected_field_behavior` are ignored.
90
+ json::ParseOptions parse_options = json::ParseOptions::Defaults();
91
+
92
+ /// @brief Options that affect JSON reading
93
+ json::ReadOptions read_options = json::ReadOptions::Defaults();
94
+ };
95
+
96
+ /// @}
97
+
98
+ } // namespace arrow::dataset
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_orc.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+ #include <string>
24
+
25
+ #include "arrow/dataset/file_base.h"
26
+ #include "arrow/dataset/type_fwd.h"
27
+ #include "arrow/dataset/visibility.h"
28
+ #include "arrow/io/type_fwd.h"
29
+ #include "arrow/result.h"
30
+
31
+ namespace arrow {
32
+ namespace dataset {
33
+
34
+ /// \addtogroup dataset-file-formats
35
+ ///
36
+ /// @{
37
+
38
+ constexpr char kOrcTypeName[] = "orc";
39
+
40
+ /// \brief A FileFormat implementation that reads from and writes to ORC files
41
+ class ARROW_DS_EXPORT OrcFileFormat : public FileFormat {
42
+ public:
43
+ OrcFileFormat();
44
+
45
+ std::string type_name() const override { return kOrcTypeName; }
46
+
47
+ bool Equals(const FileFormat& other) const override {
48
+ return type_name() == other.type_name();
49
+ }
50
+
51
+ Result<bool> IsSupported(const FileSource& source) const override;
52
+
53
+ /// \brief Return the schema of the file if possible.
54
+ Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const override;
55
+
56
+ Result<RecordBatchGenerator> ScanBatchesAsync(
57
+ const std::shared_ptr<ScanOptions>& options,
58
+ const std::shared_ptr<FileFragment>& file) const override;
59
+
60
+ Future<std::optional<int64_t>> CountRows(
61
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
62
+ const std::shared_ptr<ScanOptions>& options) override;
63
+
64
+ Result<std::shared_ptr<FileWriter>> MakeWriter(
65
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
66
+ std::shared_ptr<FileWriteOptions> options,
67
+ fs::FileLocator destination_locator) const override;
68
+
69
+ std::shared_ptr<FileWriteOptions> DefaultWriteOptions() override;
70
+ };
71
+
72
+ /// @}
73
+
74
+ } // namespace dataset
75
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_parquet.h ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+ #include <optional>
24
+ #include <string>
25
+ #include <unordered_set>
26
+ #include <utility>
27
+ #include <vector>
28
+
29
+ #include "arrow/dataset/discovery.h"
30
+ #include "arrow/dataset/file_base.h"
31
+ #include "arrow/dataset/type_fwd.h"
32
+ #include "arrow/dataset/visibility.h"
33
+ #include "arrow/io/caching.h"
34
+
35
+ namespace parquet {
36
+ class ParquetFileReader;
37
+ class Statistics;
38
+ class ColumnChunkMetaData;
39
+ class RowGroupMetaData;
40
+ class FileMetaData;
41
+ class FileDecryptionProperties;
42
+ class FileEncryptionProperties;
43
+
44
+ class ReaderProperties;
45
+ class ArrowReaderProperties;
46
+
47
+ class WriterProperties;
48
+ class ArrowWriterProperties;
49
+
50
+ namespace arrow {
51
+ class FileReader;
52
+ class FileWriter;
53
+ struct SchemaManifest;
54
+ } // namespace arrow
55
+ } // namespace parquet
56
+
57
+ namespace arrow {
58
+ namespace dataset {
59
+
60
+ struct ParquetDecryptionConfig;
61
+ struct ParquetEncryptionConfig;
62
+
63
+ /// \addtogroup dataset-file-formats
64
+ ///
65
+ /// @{
66
+
67
+ constexpr char kParquetTypeName[] = "parquet";
68
+
69
+ /// \brief A FileFormat implementation that reads from Parquet files
70
+ class ARROW_DS_EXPORT ParquetFileFormat : public FileFormat {
71
+ public:
72
+ ParquetFileFormat();
73
+
74
+ /// Convenience constructor which copies properties from a parquet::ReaderProperties.
75
+ /// memory_pool will be ignored.
76
+ explicit ParquetFileFormat(const parquet::ReaderProperties& reader_properties);
77
+
78
+ std::string type_name() const override { return kParquetTypeName; }
79
+
80
+ bool Equals(const FileFormat& other) const override;
81
+
82
+ struct ReaderOptions {
83
+ /// \defgroup parquet-file-format-arrow-reader-properties properties which correspond
84
+ /// to members of parquet::ArrowReaderProperties.
85
+ ///
86
+ /// We don't embed parquet::ReaderProperties directly because column names (rather
87
+ /// than indices) are used to indicate dictionary columns, and other options are
88
+ /// deferred to scan time.
89
+ ///
90
+ /// @{
91
+ std::unordered_set<std::string> dict_columns;
92
+ arrow::TimeUnit::type coerce_int96_timestamp_unit = arrow::TimeUnit::NANO;
93
+ /// @}
94
+ } reader_options;
95
+
96
+ Result<bool> IsSupported(const FileSource& source) const override;
97
+
98
+ /// \brief Return the schema of the file if possible.
99
+ Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const override;
100
+
101
+ Result<RecordBatchGenerator> ScanBatchesAsync(
102
+ const std::shared_ptr<ScanOptions>& options,
103
+ const std::shared_ptr<FileFragment>& file) const override;
104
+
105
+ Future<std::optional<int64_t>> CountRows(
106
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
107
+ const std::shared_ptr<ScanOptions>& options) override;
108
+
109
+ using FileFormat::MakeFragment;
110
+
111
+ /// \brief Create a Fragment targeting all RowGroups.
112
+ Result<std::shared_ptr<FileFragment>> MakeFragment(
113
+ FileSource source, compute::Expression partition_expression,
114
+ std::shared_ptr<Schema> physical_schema) override;
115
+
116
+ /// \brief Create a Fragment, restricted to the specified row groups.
117
+ Result<std::shared_ptr<ParquetFileFragment>> MakeFragment(
118
+ FileSource source, compute::Expression partition_expression,
119
+ std::shared_ptr<Schema> physical_schema, std::vector<int> row_groups);
120
+
121
+ /// \brief Return a FileReader on the given source.
122
+ Result<std::shared_ptr<parquet::arrow::FileReader>> GetReader(
123
+ const FileSource& source, const std::shared_ptr<ScanOptions>& options) const;
124
+
125
+ Result<std::shared_ptr<parquet::arrow::FileReader>> GetReader(
126
+ const FileSource& source, const std::shared_ptr<ScanOptions>& options,
127
+ const std::shared_ptr<parquet::FileMetaData>& metadata) const;
128
+
129
+ Future<std::shared_ptr<parquet::arrow::FileReader>> GetReaderAsync(
130
+ const FileSource& source, const std::shared_ptr<ScanOptions>& options) const;
131
+
132
+ Future<std::shared_ptr<parquet::arrow::FileReader>> GetReaderAsync(
133
+ const FileSource& source, const std::shared_ptr<ScanOptions>& options,
134
+ const std::shared_ptr<parquet::FileMetaData>& metadata) const;
135
+
136
+ Result<std::shared_ptr<FileWriter>> MakeWriter(
137
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
138
+ std::shared_ptr<FileWriteOptions> options,
139
+ fs::FileLocator destination_locator) const override;
140
+
141
+ std::shared_ptr<FileWriteOptions> DefaultWriteOptions() override;
142
+ };
143
+
144
+ /// \brief A FileFragment with parquet logic.
145
+ ///
146
+ /// ParquetFileFragment provides a lazy (with respect to IO) interface to
147
+ /// scan parquet files. Any heavy IO calls are deferred to the Scan() method.
148
+ ///
149
+ /// The caller can provide an optional list of selected RowGroups to limit the
150
+ /// number of scanned RowGroups, or to partition the scans across multiple
151
+ /// threads.
152
+ ///
153
+ /// Metadata can be explicitly provided, enabling pushdown predicate benefits without
154
+ /// the potentially heavy IO of loading Metadata from the file system. This can induce
155
+ /// significant performance boost when scanning high latency file systems.
156
+ class ARROW_DS_EXPORT ParquetFileFragment : public FileFragment {
157
+ public:
158
+ Result<FragmentVector> SplitByRowGroup(compute::Expression predicate);
159
+
160
+ /// \brief Return the RowGroups selected by this fragment.
161
+ const std::vector<int>& row_groups() const {
162
+ if (row_groups_) return *row_groups_;
163
+ static std::vector<int> empty;
164
+ return empty;
165
+ }
166
+
167
+ /// \brief Return the FileMetaData associated with this fragment.
168
+ std::shared_ptr<parquet::FileMetaData> metadata();
169
+
170
+ /// \brief Ensure this fragment's FileMetaData is in memory.
171
+ Status EnsureCompleteMetadata(parquet::arrow::FileReader* reader = NULLPTR);
172
+
173
+ /// \brief Return fragment which selects a filtered subset of this fragment's RowGroups.
174
+ Result<std::shared_ptr<Fragment>> Subset(compute::Expression predicate);
175
+ Result<std::shared_ptr<Fragment>> Subset(std::vector<int> row_group_ids);
176
+
177
+ static std::optional<compute::Expression> EvaluateStatisticsAsExpression(
178
+ const Field& field, const parquet::Statistics& statistics);
179
+
180
+ static std::optional<compute::Expression> EvaluateStatisticsAsExpression(
181
+ const Field& field, const FieldRef& field_ref,
182
+ const parquet::Statistics& statistics);
183
+
184
+ private:
185
+ ParquetFileFragment(FileSource source, std::shared_ptr<FileFormat> format,
186
+ compute::Expression partition_expression,
187
+ std::shared_ptr<Schema> physical_schema,
188
+ std::optional<std::vector<int>> row_groups);
189
+
190
+ Status SetMetadata(std::shared_ptr<parquet::FileMetaData> metadata,
191
+ std::shared_ptr<parquet::arrow::SchemaManifest> manifest,
192
+ std::shared_ptr<parquet::FileMetaData> original_metadata = {});
193
+
194
+ // Overridden to opportunistically set metadata since a reader must be opened anyway.
195
+ Result<std::shared_ptr<Schema>> ReadPhysicalSchemaImpl() override {
196
+ ARROW_RETURN_NOT_OK(EnsureCompleteMetadata());
197
+ return physical_schema_;
198
+ }
199
+
200
+ /// Return a filtered subset of row group indices.
201
+ Result<std::vector<int>> FilterRowGroups(compute::Expression predicate);
202
+ /// Simplify the predicate against the statistics of each row group.
203
+ Result<std::vector<compute::Expression>> TestRowGroups(compute::Expression predicate);
204
+ /// Try to count rows matching the predicate using metadata. Expects
205
+ /// metadata to be present, and expects the predicate to have been
206
+ /// simplified against the partition expression already.
207
+ Result<std::optional<int64_t>> TryCountRows(compute::Expression predicate);
208
+
209
+ ParquetFileFormat& parquet_format_;
210
+
211
+ /// Indices of row groups selected by this fragment,
212
+ /// or std::nullopt if all row groups are selected.
213
+ std::optional<std::vector<int>> row_groups_;
214
+
215
+ // the expressions (combined for all columns for which statistics have been
216
+ // processed) are stored per column group
217
+ std::vector<compute::Expression> statistics_expressions_;
218
+ // statistics status are kept track of by Parquet Schema column indices
219
+ // (i.e. not Arrow schema field index)
220
+ std::vector<bool> statistics_expressions_complete_;
221
+ std::shared_ptr<parquet::FileMetaData> metadata_;
222
+ std::shared_ptr<parquet::arrow::SchemaManifest> manifest_;
223
+ // The FileMetaData that owns the SchemaDescriptor pointed by SchemaManifest.
224
+ std::shared_ptr<parquet::FileMetaData> original_metadata_;
225
+
226
+ friend class ParquetFileFormat;
227
+ friend class ParquetDatasetFactory;
228
+ };
229
+
230
+ /// \brief Per-scan options for Parquet fragments
231
+ class ARROW_DS_EXPORT ParquetFragmentScanOptions : public FragmentScanOptions {
232
+ public:
233
+ ParquetFragmentScanOptions();
234
+ std::string type_name() const override { return kParquetTypeName; }
235
+
236
+ /// Reader properties. Not all properties are respected: memory_pool comes from
237
+ /// ScanOptions.
238
+ std::shared_ptr<parquet::ReaderProperties> reader_properties;
239
+ /// Arrow reader properties. Not all properties are respected: batch_size comes from
240
+ /// ScanOptions. Additionally, dictionary columns come from
241
+ /// ParquetFileFormat::ReaderOptions::dict_columns.
242
+ std::shared_ptr<parquet::ArrowReaderProperties> arrow_reader_properties;
243
+ /// A configuration structure that provides decryption properties for a dataset
244
+ std::shared_ptr<ParquetDecryptionConfig> parquet_decryption_config = NULLPTR;
245
+ };
246
+
247
+ class ARROW_DS_EXPORT ParquetFileWriteOptions : public FileWriteOptions {
248
+ public:
249
+ /// \brief Parquet writer properties.
250
+ std::shared_ptr<parquet::WriterProperties> writer_properties;
251
+
252
+ /// \brief Parquet Arrow writer properties.
253
+ std::shared_ptr<parquet::ArrowWriterProperties> arrow_writer_properties;
254
+
255
+ // A configuration structure that provides encryption properties for a dataset
256
+ std::shared_ptr<ParquetEncryptionConfig> parquet_encryption_config = NULLPTR;
257
+
258
+ protected:
259
+ explicit ParquetFileWriteOptions(std::shared_ptr<FileFormat> format)
260
+ : FileWriteOptions(std::move(format)) {}
261
+
262
+ friend class ParquetFileFormat;
263
+ };
264
+
265
+ class ARROW_DS_EXPORT ParquetFileWriter : public FileWriter {
266
+ public:
267
+ const std::shared_ptr<parquet::arrow::FileWriter>& parquet_writer() const {
268
+ return parquet_writer_;
269
+ }
270
+
271
+ Status Write(const std::shared_ptr<RecordBatch>& batch) override;
272
+
273
+ private:
274
+ ParquetFileWriter(std::shared_ptr<io::OutputStream> destination,
275
+ std::shared_ptr<parquet::arrow::FileWriter> writer,
276
+ std::shared_ptr<ParquetFileWriteOptions> options,
277
+ fs::FileLocator destination_locator);
278
+
279
+ Future<> FinishInternal() override;
280
+
281
+ std::shared_ptr<parquet::arrow::FileWriter> parquet_writer_;
282
+
283
+ friend class ParquetFileFormat;
284
+ };
285
+
286
+ /// \brief Options for making a FileSystemDataset from a Parquet _metadata file.
287
+ struct ParquetFactoryOptions {
288
+ /// Either an explicit Partitioning or a PartitioningFactory to discover one.
289
+ ///
290
+ /// If a factory is provided, it will be used to infer a schema for partition fields
291
+ /// based on file and directory paths then construct a Partitioning. The default
292
+ /// is a Partitioning which will yield no partition information.
293
+ ///
294
+ /// The (explicit or discovered) partitioning will be applied to discovered files
295
+ /// and the resulting partition information embedded in the Dataset.
296
+ PartitioningOrFactory partitioning{Partitioning::Default()};
297
+
298
+ /// For the purposes of applying the partitioning, paths will be stripped
299
+ /// of the partition_base_dir. Files not matching the partition_base_dir
300
+ /// prefix will be skipped for partition discovery. The ignored files will still
301
+ /// be part of the Dataset, but will not have partition information.
302
+ ///
303
+ /// Example:
304
+ /// partition_base_dir = "/dataset";
305
+ ///
306
+ /// - "/dataset/US/sales.csv" -> "US/sales.csv" will be given to the partitioning
307
+ ///
308
+ /// - "/home/john/late_sales.csv" -> Will be ignored for partition discovery.
309
+ ///
310
+ /// This is useful for partitioning which parses directory when ordering
311
+ /// is important, e.g. DirectoryPartitioning.
312
+ std::string partition_base_dir;
313
+
314
+ /// Assert that all ColumnChunk paths are consistent. The parquet spec allows for
315
+ /// ColumnChunk data to be stored in multiple files, but ParquetDatasetFactory
316
+ /// supports only a single file with all ColumnChunk data. If this flag is set
317
+ /// construction of a ParquetDatasetFactory will raise an error if ColumnChunk
318
+ /// data is not resident in a single file.
319
+ bool validate_column_chunk_paths = false;
320
+ };
321
+
322
+ /// \brief Create FileSystemDataset from custom `_metadata` cache file.
323
+ ///
324
+ /// Dask and other systems will generate a cache metadata file by concatenating
325
+ /// the RowGroupMetaData of multiple parquet files into a single parquet file
326
+ /// that only contains metadata and no ColumnChunk data.
327
+ ///
328
+ /// ParquetDatasetFactory creates a FileSystemDataset composed of
329
+ /// ParquetFileFragment where each fragment is pre-populated with the exact
330
+ /// number of row groups and statistics for each columns.
331
+ class ARROW_DS_EXPORT ParquetDatasetFactory : public DatasetFactory {
332
+ public:
333
+ /// \brief Create a ParquetDatasetFactory from a metadata path.
334
+ ///
335
+ /// The `metadata_path` will be read from `filesystem`. Each RowGroup
336
+ /// contained in the metadata file will be relative to `dirname(metadata_path)`.
337
+ ///
338
+ /// \param[in] metadata_path path of the metadata parquet file
339
+ /// \param[in] filesystem from which to open/read the path
340
+ /// \param[in] format to read the file with.
341
+ /// \param[in] options see ParquetFactoryOptions
342
+ static Result<std::shared_ptr<DatasetFactory>> Make(
343
+ const std::string& metadata_path, std::shared_ptr<fs::FileSystem> filesystem,
344
+ std::shared_ptr<ParquetFileFormat> format, ParquetFactoryOptions options);
345
+
346
+ /// \brief Create a ParquetDatasetFactory from a metadata source.
347
+ ///
348
+ /// Similar to the previous Make definition, but the metadata can be a Buffer
349
+ /// and the base_path is explicit instead of inferred from the metadata
350
+ /// path.
351
+ ///
352
+ /// \param[in] metadata source to open the metadata parquet file from
353
+ /// \param[in] base_path used as the prefix of every parquet files referenced
354
+ /// \param[in] filesystem from which to read the files referenced.
355
+ /// \param[in] format to read the file with.
356
+ /// \param[in] options see ParquetFactoryOptions
357
+ static Result<std::shared_ptr<DatasetFactory>> Make(
358
+ const FileSource& metadata, const std::string& base_path,
359
+ std::shared_ptr<fs::FileSystem> filesystem,
360
+ std::shared_ptr<ParquetFileFormat> format, ParquetFactoryOptions options);
361
+
362
+ Result<std::vector<std::shared_ptr<Schema>>> InspectSchemas(
363
+ InspectOptions options) override;
364
+
365
+ Result<std::shared_ptr<Dataset>> Finish(FinishOptions options) override;
366
+
367
+ protected:
368
+ ParquetDatasetFactory(
369
+ std::shared_ptr<fs::FileSystem> filesystem,
370
+ std::shared_ptr<ParquetFileFormat> format,
371
+ std::shared_ptr<parquet::FileMetaData> metadata,
372
+ std::shared_ptr<parquet::arrow::SchemaManifest> manifest,
373
+ std::shared_ptr<Schema> physical_schema, std::string base_path,
374
+ ParquetFactoryOptions options,
375
+ std::vector<std::pair<std::string, std::vector<int>>> paths_with_row_group_ids)
376
+ : filesystem_(std::move(filesystem)),
377
+ format_(std::move(format)),
378
+ metadata_(std::move(metadata)),
379
+ manifest_(std::move(manifest)),
380
+ physical_schema_(std::move(physical_schema)),
381
+ base_path_(std::move(base_path)),
382
+ options_(std::move(options)),
383
+ paths_with_row_group_ids_(std::move(paths_with_row_group_ids)) {}
384
+
385
+ std::shared_ptr<fs::FileSystem> filesystem_;
386
+ std::shared_ptr<ParquetFileFormat> format_;
387
+ std::shared_ptr<parquet::FileMetaData> metadata_;
388
+ std::shared_ptr<parquet::arrow::SchemaManifest> manifest_;
389
+ std::shared_ptr<Schema> physical_schema_;
390
+ std::string base_path_;
391
+ ParquetFactoryOptions options_;
392
+ std::vector<std::pair<std::string, std::vector<int>>> paths_with_row_group_ids_;
393
+
394
+ private:
395
+ Result<std::vector<std::shared_ptr<FileFragment>>> CollectParquetFragments(
396
+ const Partitioning& partitioning);
397
+
398
+ Result<std::shared_ptr<Schema>> PartitionSchema();
399
+ };
400
+
401
+ /// @}
402
+
403
+ } // namespace dataset
404
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/parquet_encryption_config.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/dataset/type_fwd.h"
21
+
22
+ namespace parquet::encryption {
23
+ class CryptoFactory;
24
+ struct KmsConnectionConfig;
25
+ struct EncryptionConfiguration;
26
+ struct DecryptionConfiguration;
27
+ } // namespace parquet::encryption
28
+
29
+ namespace arrow {
30
+ namespace dataset {
31
+
32
+ /// \brief Core configuration class encapsulating parameters for high-level encryption
33
+ /// within Parquet framework.
34
+ ///
35
+ /// ParquetEncryptionConfig serves as a bridge, passing encryption-related
36
+ /// parameters to appropriate components within the Parquet library. It holds references
37
+ /// to objects defining encryption strategy, Key Management Service (KMS) configuration,
38
+ /// and specific encryption configurations for Parquet data.
39
+ struct ARROW_DS_EXPORT ParquetEncryptionConfig {
40
+ /// Shared pointer to CryptoFactory object, responsible for creating cryptographic
41
+ /// components like encryptors and decryptors.
42
+ std::shared_ptr<parquet::encryption::CryptoFactory> crypto_factory;
43
+
44
+ /// Shared pointer to KmsConnectionConfig object, holding configuration parameters for
45
+ /// connecting to a Key Management Service (KMS).
46
+ std::shared_ptr<parquet::encryption::KmsConnectionConfig> kms_connection_config;
47
+
48
+ /// Shared pointer to EncryptionConfiguration object, defining specific encryption
49
+ /// settings for Parquet data, like keys for different columns.
50
+ std::shared_ptr<parquet::encryption::EncryptionConfiguration> encryption_config;
51
+ };
52
+
53
+ /// \brief Core configuration class encapsulating parameters for high-level decryption
54
+ /// within Parquet framework.
55
+ ///
56
+ /// ParquetDecryptionConfig is designed to pass decryption-related parameters to
57
+ /// appropriate decryption components within Parquet library. It holds references to
58
+ /// objects defining decryption strategy, Key Management Service (KMS) configuration,
59
+ /// and specific decryption configurations for reading encrypted Parquet data.
60
+ struct ARROW_DS_EXPORT ParquetDecryptionConfig {
61
+ /// Shared pointer to CryptoFactory object, pivotal in creating cryptographic
62
+ /// components for decryption process.
63
+ std::shared_ptr<parquet::encryption::CryptoFactory> crypto_factory;
64
+
65
+ /// Shared pointer to KmsConnectionConfig object, containing parameters for connecting
66
+ /// to a Key Management Service (KMS) during decryption.
67
+ std::shared_ptr<parquet::encryption::KmsConnectionConfig> kms_connection_config;
68
+
69
+ /// Shared pointer to DecryptionConfiguration object, specifying decryption settings
70
+ /// for reading encrypted Parquet data.
71
+ std::shared_ptr<parquet::encryption::DecryptionConfiguration> decryption_config;
72
+ };
73
+
74
+ } // namespace dataset
75
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/partition.h ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <functional>
23
+ #include <iosfwd>
24
+ #include <memory>
25
+ #include <optional>
26
+ #include <string>
27
+ #include <unordered_map>
28
+ #include <utility>
29
+ #include <vector>
30
+
31
+ #include "arrow/compute/expression.h"
32
+ #include "arrow/dataset/type_fwd.h"
33
+ #include "arrow/dataset/visibility.h"
34
+ #include "arrow/util/compare.h"
35
+
36
+ namespace arrow {
37
+
38
+ namespace dataset {
39
+
40
+ constexpr char kFilenamePartitionSep = '_';
41
+
42
+ struct ARROW_DS_EXPORT PartitionPathFormat {
43
+ std::string directory, filename;
44
+ };
45
+
46
+ // ----------------------------------------------------------------------
47
+ // Partitioning
48
+
49
+ /// \defgroup dataset-partitioning Partitioning API
50
+ ///
51
+ /// @{
52
+
53
+ /// \brief Interface for parsing partition expressions from string partition
54
+ /// identifiers.
55
+ ///
56
+ /// For example, the identifier "foo=5" might be parsed to an equality expression
57
+ /// between the "foo" field and the value 5.
58
+ ///
59
+ /// Some partitionings may store the field names in a metadata
60
+ /// store instead of in file paths, for example
61
+ /// dataset_root/2009/11/... could be used when the partition fields
62
+ /// are "year" and "month"
63
+ ///
64
+ /// Paths are consumed from left to right. Paths must be relative to
65
+ /// the root of a partition; path prefixes must be removed before passing
66
+ /// the path to a partitioning for parsing.
67
+ class ARROW_DS_EXPORT Partitioning : public util::EqualityComparable<Partitioning> {
68
+ public:
69
+ virtual ~Partitioning() = default;
70
+
71
+ /// \brief The name identifying the kind of partitioning
72
+ virtual std::string type_name() const = 0;
73
+
74
+ //// \brief Return whether the partitionings are equal
75
+ virtual bool Equals(const Partitioning& other) const {
76
+ return schema_->Equals(other.schema_, /*check_metadata=*/false);
77
+ }
78
+
79
+ /// \brief If the input batch shares any fields with this partitioning,
80
+ /// produce sub-batches which satisfy mutually exclusive Expressions.
81
+ struct PartitionedBatches {
82
+ RecordBatchVector batches;
83
+ std::vector<compute::Expression> expressions;
84
+ };
85
+ virtual Result<PartitionedBatches> Partition(
86
+ const std::shared_ptr<RecordBatch>& batch) const = 0;
87
+
88
+ /// \brief Parse a path into a partition expression
89
+ virtual Result<compute::Expression> Parse(const std::string& path) const = 0;
90
+
91
+ virtual Result<PartitionPathFormat> Format(const compute::Expression& expr) const = 0;
92
+
93
+ /// \brief A default Partitioning which is a DirectoryPartitioning
94
+ /// with an empty schema.
95
+ static std::shared_ptr<Partitioning> Default();
96
+
97
+ /// \brief The partition schema.
98
+ const std::shared_ptr<Schema>& schema() const { return schema_; }
99
+
100
+ protected:
101
+ explicit Partitioning(std::shared_ptr<Schema> schema) : schema_(std::move(schema)) {}
102
+
103
+ std::shared_ptr<Schema> schema_;
104
+ };
105
+
106
+ /// \brief The encoding of partition segments.
107
+ enum class SegmentEncoding : int8_t {
108
+ /// No encoding.
109
+ None = 0,
110
+ /// Segment values are URL-encoded.
111
+ Uri = 1,
112
+ };
113
+
114
+ ARROW_DS_EXPORT
115
+ std::ostream& operator<<(std::ostream& os, SegmentEncoding segment_encoding);
116
+
117
+ /// \brief Options for key-value based partitioning (hive/directory).
118
+ struct ARROW_DS_EXPORT KeyValuePartitioningOptions {
119
+ /// After splitting a path into components, decode the path components
120
+ /// before parsing according to this scheme.
121
+ SegmentEncoding segment_encoding = SegmentEncoding::Uri;
122
+ };
123
+
124
+ /// \brief Options for inferring a partitioning.
125
+ struct ARROW_DS_EXPORT PartitioningFactoryOptions {
126
+ /// When inferring a schema for partition fields, yield dictionary encoded types
127
+ /// instead of plain. This can be more efficient when materializing virtual
128
+ /// columns, and Expressions parsed by the finished Partitioning will include
129
+ /// dictionaries of all unique inspected values for each field.
130
+ bool infer_dictionary = false;
131
+ /// Optionally, an expected schema can be provided, in which case inference
132
+ /// will only check discovered fields against the schema and update internal
133
+ /// state (such as dictionaries).
134
+ std::shared_ptr<Schema> schema;
135
+ /// After splitting a path into components, decode the path components
136
+ /// before parsing according to this scheme.
137
+ SegmentEncoding segment_encoding = SegmentEncoding::Uri;
138
+
139
+ KeyValuePartitioningOptions AsPartitioningOptions() const;
140
+ };
141
+
142
+ /// \brief Options for inferring a hive-style partitioning.
143
+ struct ARROW_DS_EXPORT HivePartitioningFactoryOptions : PartitioningFactoryOptions {
144
+ /// The hive partitioning scheme maps null to a hard coded fallback string.
145
+ std::string null_fallback;
146
+
147
+ HivePartitioningOptions AsHivePartitioningOptions() const;
148
+ };
149
+
150
+ /// \brief PartitioningFactory provides creation of a partitioning when the
151
+ /// specific schema must be inferred from available paths (no explicit schema is known).
152
+ class ARROW_DS_EXPORT PartitioningFactory {
153
+ public:
154
+ virtual ~PartitioningFactory() = default;
155
+
156
+ /// \brief The name identifying the kind of partitioning
157
+ virtual std::string type_name() const = 0;
158
+
159
+ /// Get the schema for the resulting Partitioning.
160
+ /// This may reset internal state, for example dictionaries of unique representations.
161
+ virtual Result<std::shared_ptr<Schema>> Inspect(
162
+ const std::vector<std::string>& paths) = 0;
163
+
164
+ /// Create a partitioning using the provided schema
165
+ /// (fields may be dropped).
166
+ virtual Result<std::shared_ptr<Partitioning>> Finish(
167
+ const std::shared_ptr<Schema>& schema) const = 0;
168
+ };
169
+
170
+ /// \brief Subclass for the common case of a partitioning which yields an equality
171
+ /// expression for each segment
172
+ class ARROW_DS_EXPORT KeyValuePartitioning : public Partitioning {
173
+ public:
174
+ /// An unconverted equality expression consisting of a field name and the representation
175
+ /// of a scalar value
176
+ struct Key {
177
+ std::string name;
178
+ std::optional<std::string> value;
179
+ };
180
+
181
+ Result<PartitionedBatches> Partition(
182
+ const std::shared_ptr<RecordBatch>& batch) const override;
183
+
184
+ Result<compute::Expression> Parse(const std::string& path) const override;
185
+
186
+ Result<PartitionPathFormat> Format(const compute::Expression& expr) const override;
187
+
188
+ const ArrayVector& dictionaries() const { return dictionaries_; }
189
+
190
+ SegmentEncoding segment_encoding() const { return options_.segment_encoding; }
191
+
192
+ bool Equals(const Partitioning& other) const override;
193
+
194
+ protected:
195
+ KeyValuePartitioning(std::shared_ptr<Schema> schema, ArrayVector dictionaries,
196
+ KeyValuePartitioningOptions options)
197
+ : Partitioning(std::move(schema)),
198
+ dictionaries_(std::move(dictionaries)),
199
+ options_(options) {
200
+ if (dictionaries_.empty()) {
201
+ dictionaries_.resize(schema_->num_fields());
202
+ }
203
+ }
204
+
205
+ virtual Result<std::vector<Key>> ParseKeys(const std::string& path) const = 0;
206
+
207
+ virtual Result<PartitionPathFormat> FormatValues(const ScalarVector& values) const = 0;
208
+
209
+ /// Convert a Key to a full expression.
210
+ Result<compute::Expression> ConvertKey(const Key& key) const;
211
+
212
+ Result<std::vector<std::string>> FormatPartitionSegments(
213
+ const ScalarVector& values) const;
214
+ Result<std::vector<Key>> ParsePartitionSegments(
215
+ const std::vector<std::string>& segments) const;
216
+
217
+ ArrayVector dictionaries_;
218
+ KeyValuePartitioningOptions options_;
219
+ };
220
+
221
+ /// \brief DirectoryPartitioning parses one segment of a path for each field in its
222
+ /// schema. All fields are required, so paths passed to DirectoryPartitioning::Parse
223
+ /// must contain segments for each field.
224
+ ///
225
+ /// For example given schema<year:int16, month:int8> the path "/2009/11" would be
226
+ /// parsed to ("year"_ == 2009 and "month"_ == 11)
227
+ class ARROW_DS_EXPORT DirectoryPartitioning : public KeyValuePartitioning {
228
+ public:
229
+ /// If a field in schema is of dictionary type, the corresponding element of
230
+ /// dictionaries must be contain the dictionary of values for that field.
231
+ explicit DirectoryPartitioning(std::shared_ptr<Schema> schema,
232
+ ArrayVector dictionaries = {},
233
+ KeyValuePartitioningOptions options = {});
234
+
235
+ std::string type_name() const override { return "directory"; }
236
+
237
+ bool Equals(const Partitioning& other) const override;
238
+
239
+ /// \brief Create a factory for a directory partitioning.
240
+ ///
241
+ /// \param[in] field_names The names for the partition fields. Types will be
242
+ /// inferred.
243
+ static std::shared_ptr<PartitioningFactory> MakeFactory(
244
+ std::vector<std::string> field_names, PartitioningFactoryOptions = {});
245
+
246
+ private:
247
+ Result<std::vector<Key>> ParseKeys(const std::string& path) const override;
248
+
249
+ Result<PartitionPathFormat> FormatValues(const ScalarVector& values) const override;
250
+ };
251
+
252
+ /// \brief The default fallback used for null values in a Hive-style partitioning.
253
+ static constexpr char kDefaultHiveNullFallback[] = "__HIVE_DEFAULT_PARTITION__";
254
+
255
+ struct ARROW_DS_EXPORT HivePartitioningOptions : public KeyValuePartitioningOptions {
256
+ std::string null_fallback = kDefaultHiveNullFallback;
257
+
258
+ static HivePartitioningOptions DefaultsWithNullFallback(std::string fallback) {
259
+ HivePartitioningOptions options;
260
+ options.null_fallback = std::move(fallback);
261
+ return options;
262
+ }
263
+ };
264
+
265
+ /// \brief Multi-level, directory based partitioning
266
+ /// originating from Apache Hive with all data files stored in the
267
+ /// leaf directories. Data is partitioned by static values of a
268
+ /// particular column in the schema. Partition keys are represented in
269
+ /// the form $key=$value in directory names.
270
+ /// Field order is ignored, as are missing or unrecognized field names.
271
+ ///
272
+ /// For example given schema<year:int16, month:int8, day:int8> the path
273
+ /// "/day=321/ignored=3.4/year=2009" parses to ("year"_ == 2009 and "day"_ == 321)
274
+ class ARROW_DS_EXPORT HivePartitioning : public KeyValuePartitioning {
275
+ public:
276
+ /// If a field in schema is of dictionary type, the corresponding element of
277
+ /// dictionaries must be contain the dictionary of values for that field.
278
+ explicit HivePartitioning(std::shared_ptr<Schema> schema, ArrayVector dictionaries = {},
279
+ std::string null_fallback = kDefaultHiveNullFallback)
280
+ : KeyValuePartitioning(std::move(schema), std::move(dictionaries),
281
+ KeyValuePartitioningOptions()),
282
+ hive_options_(
283
+ HivePartitioningOptions::DefaultsWithNullFallback(std::move(null_fallback))) {
284
+ }
285
+
286
+ explicit HivePartitioning(std::shared_ptr<Schema> schema, ArrayVector dictionaries,
287
+ HivePartitioningOptions options)
288
+ : KeyValuePartitioning(std::move(schema), std::move(dictionaries), options),
289
+ hive_options_(options) {}
290
+
291
+ std::string type_name() const override { return "hive"; }
292
+ std::string null_fallback() const { return hive_options_.null_fallback; }
293
+ const HivePartitioningOptions& options() const { return hive_options_; }
294
+
295
+ static Result<std::optional<Key>> ParseKey(const std::string& segment,
296
+ const HivePartitioningOptions& options);
297
+
298
+ bool Equals(const Partitioning& other) const override;
299
+
300
+ /// \brief Create a factory for a hive partitioning.
301
+ static std::shared_ptr<PartitioningFactory> MakeFactory(
302
+ HivePartitioningFactoryOptions = {});
303
+
304
+ private:
305
+ const HivePartitioningOptions hive_options_;
306
+ Result<std::vector<Key>> ParseKeys(const std::string& path) const override;
307
+
308
+ Result<PartitionPathFormat> FormatValues(const ScalarVector& values) const override;
309
+ };
310
+
311
+ /// \brief Implementation provided by lambda or other callable
312
+ class ARROW_DS_EXPORT FunctionPartitioning : public Partitioning {
313
+ public:
314
+ using ParseImpl = std::function<Result<compute::Expression>(const std::string&)>;
315
+
316
+ using FormatImpl =
317
+ std::function<Result<PartitionPathFormat>(const compute::Expression&)>;
318
+
319
+ FunctionPartitioning(std::shared_ptr<Schema> schema, ParseImpl parse_impl,
320
+ FormatImpl format_impl = NULLPTR, std::string name = "function")
321
+ : Partitioning(std::move(schema)),
322
+ parse_impl_(std::move(parse_impl)),
323
+ format_impl_(std::move(format_impl)),
324
+ name_(std::move(name)) {}
325
+
326
+ std::string type_name() const override { return name_; }
327
+
328
+ bool Equals(const Partitioning& other) const override { return false; }
329
+
330
+ Result<compute::Expression> Parse(const std::string& path) const override {
331
+ return parse_impl_(path);
332
+ }
333
+
334
+ Result<PartitionPathFormat> Format(const compute::Expression& expr) const override {
335
+ if (format_impl_) {
336
+ return format_impl_(expr);
337
+ }
338
+ return Status::NotImplemented("formatting paths from ", type_name(), " Partitioning");
339
+ }
340
+
341
+ Result<PartitionedBatches> Partition(
342
+ const std::shared_ptr<RecordBatch>& batch) const override {
343
+ return Status::NotImplemented("partitioning batches from ", type_name(),
344
+ " Partitioning");
345
+ }
346
+
347
+ private:
348
+ ParseImpl parse_impl_;
349
+ FormatImpl format_impl_;
350
+ std::string name_;
351
+ };
352
+
353
+ class ARROW_DS_EXPORT FilenamePartitioning : public KeyValuePartitioning {
354
+ public:
355
+ /// \brief Construct a FilenamePartitioning from its components.
356
+ ///
357
+ /// If a field in schema is of dictionary type, the corresponding element of
358
+ /// dictionaries must be contain the dictionary of values for that field.
359
+ explicit FilenamePartitioning(std::shared_ptr<Schema> schema,
360
+ ArrayVector dictionaries = {},
361
+ KeyValuePartitioningOptions options = {});
362
+
363
+ std::string type_name() const override { return "filename"; }
364
+
365
+ /// \brief Create a factory for a filename partitioning.
366
+ ///
367
+ /// \param[in] field_names The names for the partition fields. Types will be
368
+ /// inferred.
369
+ static std::shared_ptr<PartitioningFactory> MakeFactory(
370
+ std::vector<std::string> field_names, PartitioningFactoryOptions = {});
371
+
372
+ bool Equals(const Partitioning& other) const override;
373
+
374
+ private:
375
+ Result<std::vector<Key>> ParseKeys(const std::string& path) const override;
376
+
377
+ Result<PartitionPathFormat> FormatValues(const ScalarVector& values) const override;
378
+ };
379
+
380
+ ARROW_DS_EXPORT std::string StripPrefix(const std::string& path,
381
+ const std::string& prefix);
382
+
383
+ /// \brief Extracts the directory and filename and removes the prefix of a path
384
+ ///
385
+ /// e.g., `StripPrefixAndFilename("/data/year=2019/c.txt", "/data") ->
386
+ /// {"year=2019","c.txt"}`
387
+ ARROW_DS_EXPORT std::string StripPrefixAndFilename(const std::string& path,
388
+ const std::string& prefix);
389
+
390
+ /// \brief Vector version of StripPrefixAndFilename.
391
+ ARROW_DS_EXPORT std::vector<std::string> StripPrefixAndFilename(
392
+ const std::vector<std::string>& paths, const std::string& prefix);
393
+
394
+ /// \brief Vector version of StripPrefixAndFilename.
395
+ ARROW_DS_EXPORT std::vector<std::string> StripPrefixAndFilename(
396
+ const std::vector<fs::FileInfo>& files, const std::string& prefix);
397
+
398
+ /// \brief Either a Partitioning or a PartitioningFactory
399
+ class ARROW_DS_EXPORT PartitioningOrFactory {
400
+ public:
401
+ explicit PartitioningOrFactory(std::shared_ptr<Partitioning> partitioning)
402
+ : partitioning_(std::move(partitioning)) {}
403
+
404
+ explicit PartitioningOrFactory(std::shared_ptr<PartitioningFactory> factory)
405
+ : factory_(std::move(factory)) {}
406
+
407
+ PartitioningOrFactory& operator=(std::shared_ptr<Partitioning> partitioning) {
408
+ return *this = PartitioningOrFactory(std::move(partitioning));
409
+ }
410
+
411
+ PartitioningOrFactory& operator=(std::shared_ptr<PartitioningFactory> factory) {
412
+ return *this = PartitioningOrFactory(std::move(factory));
413
+ }
414
+
415
+ /// \brief The partitioning (if given).
416
+ const std::shared_ptr<Partitioning>& partitioning() const { return partitioning_; }
417
+
418
+ /// \brief The partition factory (if given).
419
+ const std::shared_ptr<PartitioningFactory>& factory() const { return factory_; }
420
+
421
+ /// \brief Get the partition schema, inferring it with the given factory if needed.
422
+ Result<std::shared_ptr<Schema>> GetOrInferSchema(const std::vector<std::string>& paths);
423
+
424
+ private:
425
+ std::shared_ptr<PartitioningFactory> factory_;
426
+ std::shared_ptr<Partitioning> partitioning_;
427
+ };
428
+
429
+ /// @}
430
+
431
+ } // namespace dataset
432
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/pch.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Often-used headers, for precompiling.
19
+ // If updating this header, please make sure you check compilation speed
20
+ // before checking in. Adding headers which are not used extremely often
21
+ // may incur a slowdown, since it makes the precompiled header heavier to load.
22
+
23
+ // This API is EXPERIMENTAL.
24
+
25
+ #include "arrow/dataset/dataset.h"
26
+ #include "arrow/dataset/scanner.h"
27
+ #include "arrow/pch.h"
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/plan.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #include "arrow/dataset/visibility.h"
21
+
22
+ namespace arrow {
23
+ namespace dataset {
24
+ namespace internal {
25
+
26
+ /// Register dataset-based exec nodes with the exec node registry
27
+ ///
28
+ /// This function must be called before using dataset ExecNode factories
29
+ ARROW_DS_EXPORT void Initialize();
30
+
31
+ } // namespace internal
32
+ } // namespace dataset
33
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/projector.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include "arrow/dataset/visibility.h"
23
+ #include "arrow/type_fwd.h"
24
+
25
+ namespace arrow {
26
+ namespace dataset {
27
+
28
+ // FIXME this is superceded by compute::Expression::Bind
29
+ ARROW_DS_EXPORT Status CheckProjectable(const Schema& from, const Schema& to);
30
+
31
+ } // namespace dataset
32
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/scanner.h ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <string>
25
+ #include <utility>
26
+ #include <vector>
27
+
28
+ #include "arrow/acero/options.h"
29
+ #include "arrow/compute/expression.h"
30
+ #include "arrow/compute/type_fwd.h"
31
+ #include "arrow/dataset/dataset.h"
32
+ #include "arrow/dataset/projector.h"
33
+ #include "arrow/dataset/type_fwd.h"
34
+ #include "arrow/dataset/visibility.h"
35
+ #include "arrow/io/interfaces.h"
36
+ #include "arrow/memory_pool.h"
37
+ #include "arrow/type_fwd.h"
38
+ #include "arrow/util/async_generator.h"
39
+ #include "arrow/util/iterator.h"
40
+ #include "arrow/util/thread_pool.h"
41
+ #include "arrow/util/type_fwd.h"
42
+
43
+ namespace arrow {
44
+
45
+ using RecordBatchGenerator = std::function<Future<std::shared_ptr<RecordBatch>>()>;
46
+
47
+ namespace dataset {
48
+
49
+ /// \defgroup dataset-scanning Scanning API
50
+ ///
51
+ /// @{
52
+
53
+ constexpr int64_t kDefaultBatchSize = 1 << 17; // 128Ki rows
54
+ // This will yield 64 batches ~ 8Mi rows
55
+ constexpr int32_t kDefaultBatchReadahead = 16;
56
+ constexpr int32_t kDefaultFragmentReadahead = 4;
57
+ constexpr int32_t kDefaultBytesReadahead = 1 << 25; // 32MiB
58
+
59
+ /// Scan-specific options, which can be changed between scans of the same dataset.
60
+ struct ARROW_DS_EXPORT ScanOptions {
61
+ /// A row filter (which will be pushed down to partitioning/reading if supported).
62
+ compute::Expression filter = compute::literal(true);
63
+ /// A projection expression (which can add/remove/rename columns).
64
+ compute::Expression projection;
65
+
66
+ /// Schema with which batches will be read from fragments. This is also known as the
67
+ /// "reader schema" it will be used (for example) in constructing CSV file readers to
68
+ /// identify column types for parsing. Usually only a subset of its fields (see
69
+ /// MaterializedFields) will be materialized during a scan.
70
+ std::shared_ptr<Schema> dataset_schema;
71
+
72
+ /// Schema of projected record batches. This is independent of dataset_schema as its
73
+ /// fields are derived from the projection. For example, let
74
+ ///
75
+ /// dataset_schema = {"a": int32, "b": int32, "id": utf8}
76
+ /// projection = project({equal(field_ref("a"), field_ref("b"))}, {"a_plus_b"})
77
+ ///
78
+ /// (no filter specified). In this case, the projected_schema would be
79
+ ///
80
+ /// {"a_plus_b": int32}
81
+ std::shared_ptr<Schema> projected_schema;
82
+
83
+ /// Maximum row count for scanned batches.
84
+ int64_t batch_size = kDefaultBatchSize;
85
+
86
+ /// How many batches to read ahead within a fragment.
87
+ ///
88
+ /// Set to 0 to disable batch readahead
89
+ ///
90
+ /// Note: May not be supported by all formats
91
+ /// Note: Will be ignored if use_threads is set to false
92
+ int32_t batch_readahead = kDefaultBatchReadahead;
93
+
94
+ /// How many files to read ahead
95
+ ///
96
+ /// Set to 0 to disable fragment readahead
97
+ ///
98
+ /// Note: May not be enforced by all scanners
99
+ /// Note: Will be ignored if use_threads is set to false
100
+ int32_t fragment_readahead = kDefaultFragmentReadahead;
101
+
102
+ /// A pool from which materialized and scanned arrays will be allocated.
103
+ MemoryPool* pool = arrow::default_memory_pool();
104
+
105
+ /// IOContext for any IO tasks
106
+ ///
107
+ /// Note: The IOContext executor will be ignored if use_threads is set to false
108
+ io::IOContext io_context;
109
+
110
+ /// If true the scanner will scan in parallel
111
+ ///
112
+ /// Note: If true, this will use threads from both the cpu_executor and the
113
+ /// io_context.executor
114
+ /// Note: This must be true in order for any readahead to happen
115
+ bool use_threads = false;
116
+
117
+ /// Fragment-specific scan options.
118
+ std::shared_ptr<FragmentScanOptions> fragment_scan_options;
119
+
120
+ /// Return a vector of FieldRefs that require materialization.
121
+ ///
122
+ /// This is usually the union of the fields referenced in the projection and the
123
+ /// filter expression. Examples:
124
+ ///
125
+ /// - `SELECT a, b WHERE a < 2 && c > 1` => ["a", "b", "a", "c"]
126
+ /// - `SELECT a + b < 3 WHERE a > 1` => ["a", "b", "a"]
127
+ ///
128
+ /// This is needed for expression where a field may not be directly
129
+ /// used in the final projection but is still required to evaluate the
130
+ /// expression.
131
+ ///
132
+ /// This is used by Fragment implementations to apply the column
133
+ /// sub-selection optimization.
134
+ std::vector<FieldRef> MaterializedFields() const;
135
+
136
+ /// Parameters which control when the plan should pause for a slow consumer
137
+ acero::BackpressureOptions backpressure =
138
+ acero::BackpressureOptions::DefaultBackpressure();
139
+ };
140
+
141
+ /// Scan-specific options, which can be changed between scans of the same dataset.
142
+ ///
143
+ /// A dataset consists of one or more individual fragments. A fragment is anything
144
+ /// that is independently scannable, often a file.
145
+ ///
146
+ /// Batches from all fragments will be converted to a single schema. This unified
147
+ /// schema is referred to as the "dataset schema" and is the output schema for
148
+ /// this node.
149
+ ///
150
+ /// Individual fragments may have schemas that are different from the dataset
151
+ /// schema. This is sometimes referred to as the physical or fragment schema.
152
+ /// Conversion from the fragment schema to the dataset schema is a process
153
+ /// known as evolution.
154
+ struct ARROW_DS_EXPORT ScanV2Options : public acero::ExecNodeOptions {
155
+ explicit ScanV2Options(std::shared_ptr<Dataset> dataset)
156
+ : dataset(std::move(dataset)) {}
157
+
158
+ /// \brief The dataset to scan
159
+ std::shared_ptr<Dataset> dataset;
160
+ /// \brief A row filter
161
+ ///
162
+ /// The filter expression should be written against the dataset schema.
163
+ /// The filter must be unbound.
164
+ ///
165
+ /// This is an opportunistic pushdown filter. Filtering capabilities will
166
+ /// vary between formats. If a format is not capable of applying the filter
167
+ /// then it will ignore it.
168
+ ///
169
+ /// Each fragment will do its best to filter the data based on the information
170
+ /// (partitioning guarantees, statistics) available to it. If it is able to
171
+ /// apply some filtering then it will indicate what filtering it was able to
172
+ /// apply by attaching a guarantee to the batch.
173
+ ///
174
+ /// For example, if a filter is x < 50 && y > 40 then a batch may be able to
175
+ /// apply a guarantee x < 50. Post-scan filtering would then only need to
176
+ /// consider y > 40 (for this specific batch). The next batch may not be able
177
+ /// to attach any guarantee and both clauses would need to be applied to that batch.
178
+ ///
179
+ /// A single guarantee-aware filtering operation should generally be applied to all
180
+ /// resulting batches. The scan node is not responsible for this.
181
+ ///
182
+ /// Fields that are referenced by the filter should be included in the `columns` vector.
183
+ /// The scan node will not automatically fetch fields referenced by the filter
184
+ /// expression. \see AddFieldsNeededForFilter
185
+ ///
186
+ /// If the filter references fields that are not included in `columns` this may or may
187
+ /// not be an error, depending on the format.
188
+ compute::Expression filter = compute::literal(true);
189
+
190
+ /// \brief The columns to scan
191
+ ///
192
+ /// This is not a simple list of top-level column indices but instead a set of paths
193
+ /// allowing for partial selection of columns
194
+ ///
195
+ /// These paths refer to the dataset schema
196
+ ///
197
+ /// For example, consider the following dataset schema:
198
+ /// schema({
199
+ /// field("score", int32()),
200
+ /// "marker", struct_({
201
+ /// field("color", utf8()),
202
+ /// field("location", struct_({
203
+ /// field("x", float64()),
204
+ /// field("y", float64())
205
+ /// })
206
+ /// })
207
+ /// })
208
+ ///
209
+ /// If `columns` is {{0}, {1,1,0}} then the output schema is:
210
+ /// schema({field("score", int32()), field("x", float64())})
211
+ ///
212
+ /// If `columns` is {{1,1,1}, {1,1}} then the output schema is:
213
+ /// schema({
214
+ /// field("y", float64()),
215
+ /// field("location", struct_({
216
+ /// field("x", float64()),
217
+ /// field("y", float64())
218
+ /// })
219
+ /// })
220
+ std::vector<FieldPath> columns;
221
+
222
+ /// \brief Target number of bytes to read ahead in a fragment
223
+ ///
224
+ /// This limit involves some amount of estimation. Formats typically only know
225
+ /// batch boundaries in terms of rows (not decoded bytes) and so an estimation
226
+ /// must be done to guess the average row size. Other formats like CSV and JSON
227
+ /// must make even more generalized guesses.
228
+ ///
229
+ /// This is a best-effort guide. Some formats may need to read ahead further,
230
+ /// for example, if scanning a parquet file that has batches with 100MiB of data
231
+ /// then the actual readahead will be at least 100MiB
232
+ ///
233
+ /// Set to 0 to disable readahead. When disabled, the scanner will read the
234
+ /// dataset one batch at a time
235
+ ///
236
+ /// This limit applies across all fragments. If the limit is 32MiB and the
237
+ /// fragment readahead allows for 20 fragments to be read at once then the
238
+ /// total readahead will still be 32MiB and NOT 20 * 32MiB.
239
+ int32_t target_bytes_readahead = kDefaultBytesReadahead;
240
+
241
+ /// \brief Number of fragments to read ahead
242
+ ///
243
+ /// Higher readahead will potentially lead to more efficient I/O but will lead
244
+ /// to the scan operation using more RAM. The default is fairly conservative
245
+ /// and designed for fast local disks (or slow local spinning disks which cannot
246
+ /// handle much parallelism anyways). When using a highly parallel remote filesystem
247
+ /// you will likely want to increase these values.
248
+ ///
249
+ /// Set to 0 to disable fragment readahead. When disabled the dataset will be scanned
250
+ /// one fragment at a time.
251
+ int32_t fragment_readahead = kDefaultFragmentReadahead;
252
+ /// \brief Options specific to the file format
253
+ const FragmentScanOptions* format_options = NULLPTR;
254
+
255
+ /// \brief Utility method to get a selection representing all columns in a dataset
256
+ static std::vector<FieldPath> AllColumns(const Schema& dataset_schema);
257
+
258
+ /// \brief Utility method to add fields needed for the current filter
259
+ ///
260
+ /// This method adds any fields that are needed by `filter` which are not already
261
+ /// included in the list of columns. Any new fields added will be added to the end
262
+ /// in no particular order.
263
+ static Status AddFieldsNeededForFilter(ScanV2Options* options);
264
+ };
265
+
266
+ /// \brief Describes a projection
267
+ struct ARROW_DS_EXPORT ProjectionDescr {
268
+ /// \brief The projection expression itself
269
+ /// This expression must be a call to make_struct
270
+ compute::Expression expression;
271
+ /// \brief The output schema of the projection.
272
+
273
+ /// This can be calculated from the input schema and the expression but it
274
+ /// is cached here for convenience.
275
+ std::shared_ptr<Schema> schema;
276
+
277
+ /// \brief Create a ProjectionDescr by binding an expression to the dataset schema
278
+ ///
279
+ /// expression must return a struct type
280
+ static Result<ProjectionDescr> FromStructExpression(
281
+ const compute::Expression& expression, const Schema& dataset_schema);
282
+
283
+ /// \brief Create a ProjectionDescr from expressions/names for each field
284
+ static Result<ProjectionDescr> FromExpressions(std::vector<compute::Expression> exprs,
285
+ std::vector<std::string> names,
286
+ const Schema& dataset_schema);
287
+
288
+ /// \brief Create a default projection referencing fields in the dataset schema
289
+ static Result<ProjectionDescr> FromNames(std::vector<std::string> names,
290
+ const Schema& dataset_schema);
291
+
292
+ /// \brief Make a projection that projects every field in the dataset schema
293
+ static Result<ProjectionDescr> Default(const Schema& dataset_schema);
294
+ };
295
+
296
+ /// \brief Utility method to set the projection expression and schema
297
+ ARROW_DS_EXPORT void SetProjection(ScanOptions* options, ProjectionDescr projection);
298
+
299
+ /// \brief Combines a record batch with the fragment that the record batch originated
300
+ /// from
301
+ ///
302
+ /// Knowing the source fragment can be useful for debugging & understanding loaded
303
+ /// data
304
+ struct TaggedRecordBatch {
305
+ std::shared_ptr<RecordBatch> record_batch;
306
+ std::shared_ptr<Fragment> fragment;
307
+ };
308
+ using TaggedRecordBatchGenerator = std::function<Future<TaggedRecordBatch>()>;
309
+ using TaggedRecordBatchIterator = Iterator<TaggedRecordBatch>;
310
+
311
+ /// \brief Combines a tagged batch with positional information
312
+ ///
313
+ /// This is returned when scanning batches in an unordered fashion. This information is
314
+ /// needed if you ever want to reassemble the batches in order
315
+ struct EnumeratedRecordBatch {
316
+ Enumerated<std::shared_ptr<RecordBatch>> record_batch;
317
+ Enumerated<std::shared_ptr<Fragment>> fragment;
318
+ };
319
+ using EnumeratedRecordBatchGenerator = std::function<Future<EnumeratedRecordBatch>()>;
320
+ using EnumeratedRecordBatchIterator = Iterator<EnumeratedRecordBatch>;
321
+
322
+ /// @}
323
+
324
+ } // namespace dataset
325
+
326
+ template <>
327
+ struct IterationTraits<dataset::TaggedRecordBatch> {
328
+ static dataset::TaggedRecordBatch End() {
329
+ return dataset::TaggedRecordBatch{NULLPTR, NULLPTR};
330
+ }
331
+ static bool IsEnd(const dataset::TaggedRecordBatch& val) {
332
+ return val.record_batch == NULLPTR;
333
+ }
334
+ };
335
+
336
+ template <>
337
+ struct IterationTraits<dataset::EnumeratedRecordBatch> {
338
+ static dataset::EnumeratedRecordBatch End() {
339
+ return dataset::EnumeratedRecordBatch{
340
+ IterationEnd<Enumerated<std::shared_ptr<RecordBatch>>>(),
341
+ IterationEnd<Enumerated<std::shared_ptr<dataset::Fragment>>>()};
342
+ }
343
+ static bool IsEnd(const dataset::EnumeratedRecordBatch& val) {
344
+ return IsIterationEnd(val.fragment);
345
+ }
346
+ };
347
+
348
+ namespace dataset {
349
+
350
+ /// \defgroup dataset-scanning Scanning API
351
+ ///
352
+ /// @{
353
+
354
+ /// \brief A scanner glues together several dataset classes to load in data.
355
+ /// The dataset contains a collection of fragments and partitioning rules.
356
+ ///
357
+ /// The fragments identify independently loadable units of data (i.e. each fragment has
358
+ /// a potentially unique schema and possibly even format. It should be possible to read
359
+ /// fragments in parallel if desired).
360
+ ///
361
+ /// The fragment's format contains the logic necessary to actually create a task to load
362
+ /// the fragment into memory. That task may or may not support parallel execution of
363
+ /// its own.
364
+ ///
365
+ /// The scanner is then responsible for creating scan tasks from every fragment in the
366
+ /// dataset and (potentially) sequencing the loaded record batches together.
367
+ ///
368
+ /// The scanner should not buffer the entire dataset in memory (unless asked) instead
369
+ /// yielding record batches as soon as they are ready to scan. Various readahead
370
+ /// properties control how much data is allowed to be scanned before pausing to let a
371
+ /// slow consumer catchup.
372
+ ///
373
+ /// Today the scanner also handles projection & filtering although that may change in
374
+ /// the future.
375
+ class ARROW_DS_EXPORT Scanner {
376
+ public:
377
+ virtual ~Scanner() = default;
378
+
379
+ /// \brief Apply a visitor to each RecordBatch as it is scanned. If multiple threads
380
+ /// are used (via use_threads), the visitor will be invoked from those threads and is
381
+ /// responsible for any synchronization.
382
+ virtual Status Scan(std::function<Status(TaggedRecordBatch)> visitor) = 0;
383
+ /// \brief Convert a Scanner into a Table.
384
+ ///
385
+ /// Use this convenience utility with care. This will serially materialize the
386
+ /// Scan result in memory before creating the Table.
387
+ virtual Result<std::shared_ptr<Table>> ToTable() = 0;
388
+ /// \brief Scan the dataset into a stream of record batches. Each batch is tagged
389
+ /// with the fragment it originated from. The batches will arrive in order. The
390
+ /// order of fragments is determined by the dataset.
391
+ ///
392
+ /// Note: The scanner will perform some readahead but will avoid materializing too
393
+ /// much in memory (this is goverended by the readahead options and use_threads option).
394
+ /// If the readahead queue fills up then I/O will pause until the calling thread catches
395
+ /// up.
396
+ virtual Result<TaggedRecordBatchIterator> ScanBatches() = 0;
397
+ virtual Result<TaggedRecordBatchGenerator> ScanBatchesAsync() = 0;
398
+ virtual Result<TaggedRecordBatchGenerator> ScanBatchesAsync(
399
+ ::arrow::internal::Executor* cpu_thread_pool) = 0;
400
+ /// \brief Scan the dataset into a stream of record batches. Unlike ScanBatches this
401
+ /// method may allow record batches to be returned out of order. This allows for more
402
+ /// efficient scanning: some fragments may be accessed more quickly than others (e.g.
403
+ /// may be cached in RAM or just happen to get scheduled earlier by the I/O)
404
+ ///
405
+ /// To make up for the out-of-order iteration each batch is further tagged with
406
+ /// positional information.
407
+ virtual Result<EnumeratedRecordBatchIterator> ScanBatchesUnordered() = 0;
408
+ virtual Result<EnumeratedRecordBatchGenerator> ScanBatchesUnorderedAsync() = 0;
409
+ virtual Result<EnumeratedRecordBatchGenerator> ScanBatchesUnorderedAsync(
410
+ ::arrow::internal::Executor* cpu_thread_pool) = 0;
411
+ /// \brief A convenience to synchronously load the given rows by index.
412
+ ///
413
+ /// Will only consume as many batches as needed from ScanBatches().
414
+ virtual Result<std::shared_ptr<Table>> TakeRows(const Array& indices) = 0;
415
+ /// \brief Get the first N rows.
416
+ virtual Result<std::shared_ptr<Table>> Head(int64_t num_rows) = 0;
417
+ /// \brief Count rows matching a predicate.
418
+ ///
419
+ /// This method will push down the predicate and compute the result based on fragment
420
+ /// metadata if possible.
421
+ virtual Result<int64_t> CountRows() = 0;
422
+ virtual Future<int64_t> CountRowsAsync() = 0;
423
+ /// \brief Convert the Scanner to a RecordBatchReader so it can be
424
+ /// easily used with APIs that expect a reader.
425
+ virtual Result<std::shared_ptr<RecordBatchReader>> ToRecordBatchReader() = 0;
426
+
427
+ /// \brief Get the options for this scan.
428
+ const std::shared_ptr<ScanOptions>& options() const { return scan_options_; }
429
+ /// \brief Get the dataset that this scanner will scan
430
+ virtual const std::shared_ptr<Dataset>& dataset() const = 0;
431
+
432
+ protected:
433
+ explicit Scanner(std::shared_ptr<ScanOptions> scan_options)
434
+ : scan_options_(std::move(scan_options)) {}
435
+
436
+ Result<EnumeratedRecordBatchIterator> AddPositioningToInOrderScan(
437
+ TaggedRecordBatchIterator scan);
438
+
439
+ const std::shared_ptr<ScanOptions> scan_options_;
440
+ };
441
+
442
+ /// \brief ScannerBuilder is a factory class to construct a Scanner. It is used
443
+ /// to pass information, notably a potential filter expression and a subset of
444
+ /// columns to materialize.
445
+ class ARROW_DS_EXPORT ScannerBuilder {
446
+ public:
447
+ explicit ScannerBuilder(std::shared_ptr<Dataset> dataset);
448
+
449
+ ScannerBuilder(std::shared_ptr<Dataset> dataset,
450
+ std::shared_ptr<ScanOptions> scan_options);
451
+
452
+ ScannerBuilder(std::shared_ptr<Schema> schema, std::shared_ptr<Fragment> fragment,
453
+ std::shared_ptr<ScanOptions> scan_options);
454
+
455
+ /// \brief Make a scanner from a record batch reader.
456
+ ///
457
+ /// The resulting scanner can be scanned only once. This is intended
458
+ /// to support writing data from streaming sources or other sources
459
+ /// that can be iterated only once.
460
+ static std::shared_ptr<ScannerBuilder> FromRecordBatchReader(
461
+ std::shared_ptr<RecordBatchReader> reader);
462
+
463
+ /// \brief Set the subset of columns to materialize.
464
+ ///
465
+ /// Columns which are not referenced may not be read from fragments.
466
+ ///
467
+ /// \param[in] columns list of columns to project. Order and duplicates will
468
+ /// be preserved.
469
+ ///
470
+ /// \return Failure if any column name does not exists in the dataset's
471
+ /// Schema.
472
+ Status Project(std::vector<std::string> columns);
473
+
474
+ /// \brief Set expressions which will be evaluated to produce the materialized
475
+ /// columns.
476
+ ///
477
+ /// Columns which are not referenced may not be read from fragments.
478
+ ///
479
+ /// \param[in] exprs expressions to evaluate to produce columns.
480
+ /// \param[in] names list of names for the resulting columns.
481
+ ///
482
+ /// \return Failure if any referenced column does not exists in the dataset's
483
+ /// Schema.
484
+ Status Project(std::vector<compute::Expression> exprs, std::vector<std::string> names);
485
+
486
+ /// \brief Set the filter expression to return only rows matching the filter.
487
+ ///
488
+ /// The predicate will be passed down to Sources and corresponding
489
+ /// Fragments to exploit predicate pushdown if possible using
490
+ /// partition information or Fragment internal metadata, e.g. Parquet statistics.
491
+ /// Columns which are not referenced may not be read from fragments.
492
+ ///
493
+ /// \param[in] filter expression to filter rows with.
494
+ ///
495
+ /// \return Failure if any referenced columns does not exist in the dataset's
496
+ /// Schema.
497
+ Status Filter(const compute::Expression& filter);
498
+
499
+ /// \brief Indicate if the Scanner should make use of the available
500
+ /// ThreadPool found in ScanOptions;
501
+ Status UseThreads(bool use_threads = true);
502
+
503
+ /// \brief Set the maximum number of rows per RecordBatch.
504
+ ///
505
+ /// \param[in] batch_size the maximum number of rows.
506
+ /// \returns An error if the number for batch is not greater than 0.
507
+ ///
508
+ /// This option provides a control limiting the memory owned by any RecordBatch.
509
+ Status BatchSize(int64_t batch_size);
510
+
511
+ /// \brief Set the number of batches to read ahead within a fragment.
512
+ ///
513
+ /// \param[in] batch_readahead How many batches to read ahead within a fragment
514
+ /// \returns an error if this number is less than 0.
515
+ ///
516
+ /// This option provides a control on the RAM vs I/O tradeoff.
517
+ /// It might not be supported by all file formats, in which case it will
518
+ /// simply be ignored.
519
+ Status BatchReadahead(int32_t batch_readahead);
520
+
521
+ /// \brief Set the number of fragments to read ahead
522
+ ///
523
+ /// \param[in] fragment_readahead How many fragments to read ahead
524
+ /// \returns an error if this number is less than 0.
525
+ ///
526
+ /// This option provides a control on the RAM vs I/O tradeoff.
527
+ Status FragmentReadahead(int32_t fragment_readahead);
528
+
529
+ /// \brief Set the pool from which materialized and scanned arrays will be allocated.
530
+ Status Pool(MemoryPool* pool);
531
+
532
+ /// \brief Set fragment-specific scan options.
533
+ Status FragmentScanOptions(std::shared_ptr<FragmentScanOptions> fragment_scan_options);
534
+
535
+ /// \brief Override default backpressure configuration
536
+ Status Backpressure(acero::BackpressureOptions backpressure);
537
+
538
+ /// \brief Return the current scan options for the builder.
539
+ Result<std::shared_ptr<ScanOptions>> GetScanOptions();
540
+
541
+ /// \brief Return the constructed now-immutable Scanner object
542
+ Result<std::shared_ptr<Scanner>> Finish();
543
+
544
+ const std::shared_ptr<Schema>& schema() const;
545
+ const std::shared_ptr<Schema>& projected_schema() const;
546
+
547
+ private:
548
+ std::shared_ptr<Dataset> dataset_;
549
+ std::shared_ptr<ScanOptions> scan_options_ = std::make_shared<ScanOptions>();
550
+ };
551
+
552
+ /// \brief Construct a source ExecNode which yields batches from a dataset scan.
553
+ ///
554
+ /// Does not construct associated filter or project nodes.
555
+ /// Yielded batches will be augmented with fragment/batch indices to enable stable
556
+ /// ordering for simple ExecPlans.
557
+ class ARROW_DS_EXPORT ScanNodeOptions : public acero::ExecNodeOptions {
558
+ public:
559
+ explicit ScanNodeOptions(std::shared_ptr<Dataset> dataset,
560
+ std::shared_ptr<ScanOptions> scan_options,
561
+ bool require_sequenced_output = false)
562
+ : dataset(std::move(dataset)),
563
+ scan_options(std::move(scan_options)),
564
+ require_sequenced_output(require_sequenced_output) {}
565
+
566
+ std::shared_ptr<Dataset> dataset;
567
+ std::shared_ptr<ScanOptions> scan_options;
568
+ bool require_sequenced_output;
569
+ };
570
+
571
+ /// @}
572
+
573
+ namespace internal {
574
+ ARROW_DS_EXPORT void InitializeScanner(arrow::acero::ExecFactoryRegistry* registry);
575
+ ARROW_DS_EXPORT void InitializeScannerV2(arrow::acero::ExecFactoryRegistry* registry);
576
+ } // namespace internal
577
+ } // namespace dataset
578
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/type_fwd.h ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+ #include <vector>
24
+
25
+ #include "arrow/compute/type_fwd.h" // IWYU pragma: export
26
+ #include "arrow/dataset/visibility.h"
27
+ #include "arrow/filesystem/type_fwd.h" // IWYU pragma: export
28
+ #include "arrow/type_fwd.h" // IWYU pragma: export
29
+
30
+ namespace arrow {
31
+ namespace dataset {
32
+
33
+ class Dataset;
34
+ class DatasetFactory;
35
+ using DatasetVector = std::vector<std::shared_ptr<Dataset>>;
36
+
37
+ class UnionDataset;
38
+ class UnionDatasetFactory;
39
+
40
+ class Fragment;
41
+ using FragmentIterator = Iterator<std::shared_ptr<Fragment>>;
42
+ using FragmentVector = std::vector<std::shared_ptr<Fragment>>;
43
+
44
+ class FragmentScanOptions;
45
+
46
+ class FileSource;
47
+ class FileFormat;
48
+ class FileFragment;
49
+ class FileWriter;
50
+ class FileWriteOptions;
51
+ class FileSystemDataset;
52
+ class FileSystemDatasetFactory;
53
+ struct FileSystemDatasetWriteOptions;
54
+ class WriteNodeOptions;
55
+
56
+ /// \brief Controls what happens if files exist in an output directory during a dataset
57
+ /// write
58
+ enum class ExistingDataBehavior : int8_t {
59
+ /// Deletes all files in a directory the first time that directory is encountered
60
+ kDeleteMatchingPartitions,
61
+ /// Ignores existing files, overwriting any that happen to have the same name as an
62
+ /// output file
63
+ kOverwriteOrIgnore,
64
+ /// Returns an error if there are any files or subdirectories in the output directory
65
+ kError,
66
+ };
67
+
68
+ class InMemoryDataset;
69
+
70
+ class CsvFileFormat;
71
+ class CsvFileWriter;
72
+ class CsvFileWriteOptions;
73
+ struct CsvFragmentScanOptions;
74
+
75
+ class JsonFileFormat;
76
+ class JsonFileWriter;
77
+ class JsonFileWriteOptions;
78
+ struct JsonFragmentScanOptions;
79
+
80
+ class IpcFileFormat;
81
+ class IpcFileWriter;
82
+ class IpcFileWriteOptions;
83
+ class IpcFragmentScanOptions;
84
+
85
+ class ParquetFileFormat;
86
+ class ParquetFileFragment;
87
+ class ParquetFragmentScanOptions;
88
+ class ParquetFileWriter;
89
+ class ParquetFileWriteOptions;
90
+
91
+ class Partitioning;
92
+ class PartitioningFactory;
93
+ class PartitioningOrFactory;
94
+ struct KeyValuePartitioningOptions;
95
+ class DirectoryPartitioning;
96
+ class HivePartitioning;
97
+ struct HivePartitioningOptions;
98
+ class FilenamePartitioning;
99
+ struct FilenamePartitioningOptions;
100
+
101
+ class ScanNodeOptions;
102
+ struct ScanOptions;
103
+
104
+ class Scanner;
105
+
106
+ class ScannerBuilder;
107
+
108
+ class ScanTask;
109
+ using ScanTaskVector = std::vector<std::shared_ptr<ScanTask>>;
110
+ using ScanTaskIterator = Iterator<std::shared_ptr<ScanTask>>;
111
+
112
+ } // namespace dataset
113
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/visibility.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #if defined(_WIN32) || defined(__CYGWIN__)
23
+ #if defined(_MSC_VER)
24
+ #pragma warning(push)
25
+ #pragma warning(disable : 4251)
26
+ #else
27
+ #pragma GCC diagnostic ignored "-Wattributes"
28
+ #endif
29
+
30
+ #ifdef ARROW_DS_STATIC
31
+ #define ARROW_DS_EXPORT
32
+ #elif defined(ARROW_DS_EXPORTING)
33
+ #define ARROW_DS_EXPORT __declspec(dllexport)
34
+ #else
35
+ #define ARROW_DS_EXPORT __declspec(dllimport)
36
+ #endif
37
+
38
+ #define ARROW_DS_NO_EXPORT
39
+ #else // Not Windows
40
+ #ifndef ARROW_DS_EXPORT
41
+ #define ARROW_DS_EXPORT __attribute__((visibility("default")))
42
+ #endif
43
+ #ifndef ARROW_DS_NO_EXPORT
44
+ #define ARROW_DS_NO_EXPORT __attribute__((visibility("hidden")))
45
+ #endif
46
+ #endif // Non-Windows
47
+
48
+ #if defined(_MSC_VER)
49
+ #pragma warning(pop)
50
+ #endif
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/object_parser.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <string_view>
22
+ #include <unordered_map>
23
+
24
+ #include "arrow/result.h"
25
+ #include "arrow/util/visibility.h"
26
+
27
+ namespace arrow {
28
+ namespace json {
29
+ namespace internal {
30
+
31
+ /// This class is a helper to parse a json object from a string.
32
+ /// It uses rapidjson::Document in implementation.
33
+ class ARROW_EXPORT ObjectParser {
34
+ public:
35
+ ObjectParser();
36
+ ~ObjectParser();
37
+
38
+ Status Parse(std::string_view json);
39
+
40
+ Result<std::string> GetString(const char* key) const;
41
+
42
+ Result<bool> GetBool(const char* key) const;
43
+
44
+ // Get all members of the object as a map from string keys to string values
45
+ Result<std::unordered_map<std::string, std::string>> GetStringMap() const;
46
+
47
+ private:
48
+ class Impl;
49
+ std::unique_ptr<Impl> impl_;
50
+ };
51
+
52
+ } // namespace internal
53
+ } // namespace json
54
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/reader.h ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+
22
+ #include "arrow/io/type_fwd.h"
23
+ #include "arrow/json/options.h"
24
+ #include "arrow/record_batch.h"
25
+ #include "arrow/result.h"
26
+ #include "arrow/status.h"
27
+ #include "arrow/util/macros.h"
28
+ #include "arrow/util/type_fwd.h"
29
+ #include "arrow/util/visibility.h"
30
+
31
+ namespace arrow {
32
+ namespace json {
33
+
34
+ /// A class that reads an entire JSON file into a Arrow Table
35
+ ///
36
+ /// The file is expected to consist of individual line-separated JSON objects
37
+ class ARROW_EXPORT TableReader {
38
+ public:
39
+ virtual ~TableReader() = default;
40
+
41
+ /// Read the entire JSON file and convert it to a Arrow Table
42
+ virtual Result<std::shared_ptr<Table>> Read() = 0;
43
+
44
+ /// Create a TableReader instance
45
+ static Result<std::shared_ptr<TableReader>> Make(MemoryPool* pool,
46
+ std::shared_ptr<io::InputStream> input,
47
+ const ReadOptions&,
48
+ const ParseOptions&);
49
+ };
50
+
51
+ ARROW_EXPORT Result<std::shared_ptr<RecordBatch>> ParseOne(ParseOptions options,
52
+ std::shared_ptr<Buffer> json);
53
+
54
+ /// \brief A class that reads a JSON file incrementally
55
+ ///
56
+ /// JSON data is read from a stream in fixed-size blocks (configurable with
57
+ /// `ReadOptions::block_size`). Each block is converted to a `RecordBatch`. Yielded
58
+ /// batches have a consistent schema but may differ in row count.
59
+ ///
60
+ /// The supplied `ParseOptions` are used to determine a schema, based either on a
61
+ /// provided explicit schema or inferred from the first non-empty block.
62
+ /// Afterwards, the target schema is frozen. If `UnexpectedFieldBehavior::InferType` is
63
+ /// specified, unexpected fields will only be inferred for the first block. Afterwards
64
+ /// they'll be treated as errors.
65
+ ///
66
+ /// If `ReadOptions::use_threads` is `true`, each block's parsing/decoding task will be
67
+ /// parallelized on the given `cpu_executor` (with readahead corresponding to the
68
+ /// executor's capacity). If an executor isn't provided, the global thread pool will be
69
+ /// used.
70
+ ///
71
+ /// If `ReadOptions::use_threads` is `false`, computations will be run on the calling
72
+ /// thread and `cpu_executor` will be ignored.
73
+ class ARROW_EXPORT StreamingReader : public RecordBatchReader {
74
+ public:
75
+ virtual ~StreamingReader() = default;
76
+
77
+ /// \brief Read the next `RecordBatch` asynchronously
78
+ /// This function is async-reentrant (but not synchronously reentrant). However, if
79
+ /// threading is disabled, this will block until completion.
80
+ virtual Future<std::shared_ptr<RecordBatch>> ReadNextAsync() = 0;
81
+
82
+ /// Get the number of bytes which have been successfully converted to record batches
83
+ /// and consumed
84
+ [[nodiscard]] virtual int64_t bytes_processed() const = 0;
85
+
86
+ /// \brief Create a `StreamingReader` from an `InputStream`
87
+ /// Blocks until the initial batch is loaded
88
+ ///
89
+ /// \param[in] stream JSON source stream
90
+ /// \param[in] read_options Options for reading
91
+ /// \param[in] parse_options Options for chunking, parsing, and conversion
92
+ /// \param[in] io_context Context for IO operations (optional)
93
+ /// \param[in] cpu_executor Executor for computation tasks (optional)
94
+ /// \return The initialized reader
95
+ static Result<std::shared_ptr<StreamingReader>> Make(
96
+ std::shared_ptr<io::InputStream> stream, const ReadOptions& read_options,
97
+ const ParseOptions& parse_options,
98
+ const io::IOContext& io_context = io::default_io_context(),
99
+ ::arrow::internal::Executor* cpu_executor = NULLPTR);
100
+
101
+ /// \brief Create a `StreamingReader` from an `InputStream` asynchronously
102
+ /// Returned future completes after loading the first batch
103
+ ///
104
+ /// \param[in] stream JSON source stream
105
+ /// \param[in] read_options Options for reading
106
+ /// \param[in] parse_options Options for chunking, parsing, and conversion
107
+ /// \param[in] io_context Context for IO operations (optional)
108
+ /// \param[in] cpu_executor Executor for computation tasks (optional)
109
+ /// \return Future for the initialized reader
110
+ static Future<std::shared_ptr<StreamingReader>> MakeAsync(
111
+ std::shared_ptr<io::InputStream> stream, const ReadOptions& read_options,
112
+ const ParseOptions& parse_options,
113
+ const io::IOContext& io_context = io::default_io_context(),
114
+ ::arrow::internal::Executor* cpu_executor = NULLPTR);
115
+ };
116
+
117
+ } // namespace json
118
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/type_fwd.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ namespace arrow {
19
+ namespace json {
20
+
21
+ class TableReader;
22
+ struct ReadOptions;
23
+ struct ParseOptions;
24
+
25
+ } // namespace json
26
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/executor_util.h ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/util/thread_pool.h"
21
+
22
+ namespace arrow {
23
+
24
+ /// An executor which synchronously runs the task as part of the SpawnReal call.
25
+ class MockExecutor : public internal::Executor {
26
+ public:
27
+ int GetCapacity() override { return 0; }
28
+
29
+ Status SpawnReal(internal::TaskHints hints, internal::FnOnce<void()> task, StopToken,
30
+ StopCallback&&) override {
31
+ spawn_count++;
32
+ std::move(task)();
33
+ return Status::OK();
34
+ }
35
+
36
+ int spawn_count = 0;
37
+ };
38
+
39
+ /// An executor which does not actually run the task. Can be used to simulate situations
40
+ /// where the executor schedules a task in a long queue and doesn't get around to running
41
+ /// it for a while
42
+ class DelayedExecutor : public internal::Executor {
43
+ public:
44
+ int GetCapacity() override { return 0; }
45
+
46
+ Status SpawnReal(internal::TaskHints hints, internal::FnOnce<void()> task, StopToken,
47
+ StopCallback&&) override {
48
+ captured_tasks.push_back(std::move(task));
49
+ return Status::OK();
50
+ }
51
+
52
+ std::vector<internal::FnOnce<void()>> captured_tasks;
53
+ };
54
+
55
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/future_util.h ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/testing/gtest_util.h"
21
+ #include "arrow/util/future.h"
22
+
23
+ // This macro should be called by futures that are expected to
24
+ // complete pretty quickly. arrow::kDefaultAssertFinishesWaitSeconds is the
25
+ // default max wait here. Anything longer than that and it's a questionable unit test
26
+ // anyways.
27
+ #define ASSERT_FINISHES_IMPL(fut) \
28
+ do { \
29
+ ASSERT_TRUE(fut.Wait(::arrow::kDefaultAssertFinishesWaitSeconds)); \
30
+ if (!fut.is_finished()) { \
31
+ FAIL() << "Future did not finish in a timely fashion"; \
32
+ } \
33
+ } while (false)
34
+
35
+ #define ASSERT_FINISHES_OK(expr) \
36
+ do { \
37
+ auto&& _fut = (expr); \
38
+ ASSERT_TRUE(_fut.Wait(::arrow::kDefaultAssertFinishesWaitSeconds)); \
39
+ if (!_fut.is_finished()) { \
40
+ FAIL() << "Future did not finish in a timely fashion"; \
41
+ } \
42
+ auto& _st = _fut.status(); \
43
+ if (!_st.ok()) { \
44
+ FAIL() << "'" ARROW_STRINGIFY(expr) "' failed with " << _st.ToString(); \
45
+ } \
46
+ } while (false)
47
+
48
+ #define ASSERT_FINISHES_AND_RAISES(ENUM, expr) \
49
+ do { \
50
+ auto&& _fut = (expr); \
51
+ ASSERT_FINISHES_IMPL(_fut); \
52
+ ASSERT_RAISES(ENUM, _fut.status()); \
53
+ } while (false)
54
+
55
+ #define EXPECT_FINISHES_AND_RAISES_WITH_MESSAGE_THAT(ENUM, matcher, expr) \
56
+ do { \
57
+ auto&& fut = (expr); \
58
+ ASSERT_FINISHES_IMPL(fut); \
59
+ EXPECT_RAISES_WITH_MESSAGE_THAT(ENUM, matcher, fut.status()); \
60
+ } while (false)
61
+
62
+ #define ASSERT_FINISHES_OK_AND_ASSIGN_IMPL(lhs, rexpr, _future_name) \
63
+ auto _future_name = (rexpr); \
64
+ ASSERT_FINISHES_IMPL(_future_name); \
65
+ ASSERT_OK_AND_ASSIGN(lhs, _future_name.result());
66
+
67
+ #define ASSERT_FINISHES_OK_AND_ASSIGN(lhs, rexpr) \
68
+ ASSERT_FINISHES_OK_AND_ASSIGN_IMPL(lhs, rexpr, \
69
+ ARROW_ASSIGN_OR_RAISE_NAME(_fut, __COUNTER__))
70
+
71
+ #define ASSERT_FINISHES_OK_AND_EQ(expected, expr) \
72
+ do { \
73
+ ASSERT_FINISHES_OK_AND_ASSIGN(auto _actual, (expr)); \
74
+ ASSERT_EQ(expected, _actual); \
75
+ } while (0)
76
+
77
+ #define EXPECT_FINISHES_IMPL(fut) \
78
+ do { \
79
+ EXPECT_TRUE(fut.Wait(::arrow::kDefaultAssertFinishesWaitSeconds)); \
80
+ if (!fut.is_finished()) { \
81
+ ADD_FAILURE() << "Future did not finish in a timely fashion"; \
82
+ } \
83
+ } while (false)
84
+
85
+ #define ON_FINISH_ASSIGN_OR_HANDLE_ERROR_IMPL(handle_error, future_name, lhs, rexpr) \
86
+ auto future_name = (rexpr); \
87
+ EXPECT_FINISHES_IMPL(future_name); \
88
+ handle_error(future_name.status()); \
89
+ EXPECT_OK_AND_ASSIGN(lhs, future_name.result());
90
+
91
+ #define EXPECT_FINISHES(expr) \
92
+ do { \
93
+ EXPECT_FINISHES_IMPL(expr); \
94
+ } while (0)
95
+
96
+ #define EXPECT_FINISHES_OK_AND_ASSIGN(lhs, rexpr) \
97
+ ON_FINISH_ASSIGN_OR_HANDLE_ERROR_IMPL( \
98
+ ARROW_EXPECT_OK, ARROW_ASSIGN_OR_RAISE_NAME(_fut, __COUNTER__), lhs, rexpr);
99
+
100
+ #define EXPECT_FINISHES_OK_AND_EQ(expected, expr) \
101
+ do { \
102
+ EXPECT_FINISHES_OK_AND_ASSIGN(auto _actual, (expr)); \
103
+ EXPECT_EQ(expected, _actual); \
104
+ } while (0)
105
+
106
+ namespace arrow {
107
+
108
+ constexpr double kDefaultAssertFinishesWaitSeconds = 64;
109
+
110
+ template <typename T>
111
+ void AssertNotFinished(const Future<T>& fut) {
112
+ ASSERT_FALSE(IsFutureFinished(fut.state()));
113
+ }
114
+
115
+ template <typename T>
116
+ void AssertFinished(const Future<T>& fut) {
117
+ ASSERT_TRUE(IsFutureFinished(fut.state()));
118
+ }
119
+
120
+ // Assert the future is successful *now*
121
+ template <typename T>
122
+ void AssertSuccessful(const Future<T>& fut) {
123
+ if (IsFutureFinished(fut.state())) {
124
+ ASSERT_EQ(fut.state(), FutureState::SUCCESS);
125
+ ASSERT_OK(fut.status());
126
+ } else {
127
+ FAIL() << "Expected future to be completed successfully but it was still pending";
128
+ }
129
+ }
130
+
131
+ // Assert the future is failed *now*
132
+ template <typename T>
133
+ void AssertFailed(const Future<T>& fut) {
134
+ if (IsFutureFinished(fut.state())) {
135
+ ASSERT_EQ(fut.state(), FutureState::FAILURE);
136
+ ASSERT_FALSE(fut.status().ok());
137
+ } else {
138
+ FAIL() << "Expected future to have failed but it was still pending";
139
+ }
140
+ }
141
+
142
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/generator.h ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <vector>
24
+
25
+ #include "arrow/array/array_base.h"
26
+ #include "arrow/compute/type_fwd.h"
27
+ #include "arrow/testing/gtest_util.h"
28
+ #include "arrow/testing/visibility.h"
29
+ #include "arrow/type_fwd.h"
30
+
31
+ namespace arrow {
32
+
33
+ class ARROW_TESTING_EXPORT ConstantArrayGenerator {
34
+ public:
35
+ /// \brief Generates a constant BooleanArray
36
+ ///
37
+ /// \param[in] size the size of the array to generate
38
+ /// \param[in] value to repeat
39
+ ///
40
+ /// \return a generated Array
41
+ static std::shared_ptr<Array> Boolean(int64_t size, bool value = false);
42
+
43
+ /// \brief Generates a constant UInt8Array
44
+ ///
45
+ /// \param[in] size the size of the array to generate
46
+ /// \param[in] value to repeat
47
+ ///
48
+ /// \return a generated Array
49
+ static std::shared_ptr<Array> UInt8(int64_t size, uint8_t value = 0);
50
+
51
+ /// \brief Generates a constant Int8Array
52
+ ///
53
+ /// \param[in] size the size of the array to generate
54
+ /// \param[in] value to repeat
55
+ ///
56
+ /// \return a generated Array
57
+ static std::shared_ptr<Array> Int8(int64_t size, int8_t value = 0);
58
+
59
+ /// \brief Generates a constant UInt16Array
60
+ ///
61
+ /// \param[in] size the size of the array to generate
62
+ /// \param[in] value to repeat
63
+ ///
64
+ /// \return a generated Array
65
+ static std::shared_ptr<Array> UInt16(int64_t size, uint16_t value = 0);
66
+
67
+ /// \brief Generates a constant UInt16Array
68
+ ///
69
+ /// \param[in] size the size of the array to generate
70
+ /// \param[in] value to repeat
71
+ ///
72
+ /// \return a generated Array
73
+ static std::shared_ptr<Array> Int16(int64_t size, int16_t value = 0);
74
+
75
+ /// \brief Generates a constant UInt32Array
76
+ ///
77
+ /// \param[in] size the size of the array to generate
78
+ /// \param[in] value to repeat
79
+ ///
80
+ /// \return a generated Array
81
+ static std::shared_ptr<Array> UInt32(int64_t size, uint32_t value = 0);
82
+
83
+ /// \brief Generates a constant UInt32Array
84
+ ///
85
+ /// \param[in] size the size of the array to generate
86
+ /// \param[in] value to repeat
87
+ ///
88
+ /// \return a generated Array
89
+ static std::shared_ptr<Array> Int32(int64_t size, int32_t value = 0);
90
+
91
+ /// \brief Generates a constant UInt64Array
92
+ ///
93
+ /// \param[in] size the size of the array to generate
94
+ /// \param[in] value to repeat
95
+ ///
96
+ /// \return a generated Array
97
+ static std::shared_ptr<Array> UInt64(int64_t size, uint64_t value = 0);
98
+
99
+ /// \brief Generates a constant UInt64Array
100
+ ///
101
+ /// \param[in] size the size of the array to generate
102
+ /// \param[in] value to repeat
103
+ ///
104
+ /// \return a generated Array
105
+ static std::shared_ptr<Array> Int64(int64_t size, int64_t value = 0);
106
+
107
+ /// \brief Generates a constant Float32Array
108
+ ///
109
+ /// \param[in] size the size of the array to generate
110
+ /// \param[in] value to repeat
111
+ ///
112
+ /// \return a generated Array
113
+ static std::shared_ptr<Array> Float32(int64_t size, float value = 0);
114
+
115
+ /// \brief Generates a constant Float64Array
116
+ ///
117
+ /// \param[in] size the size of the array to generate
118
+ /// \param[in] value to repeat
119
+ ///
120
+ /// \return a generated Array
121
+ static std::shared_ptr<Array> Float64(int64_t size, double value = 0);
122
+
123
+ /// \brief Generates a constant StringArray
124
+ ///
125
+ /// \param[in] size the size of the array to generate
126
+ /// \param[in] value to repeat
127
+ ///
128
+ /// \return a generated Array
129
+ static std::shared_ptr<Array> String(int64_t size, std::string value = "");
130
+
131
+ template <typename ArrowType, typename CType = typename ArrowType::c_type>
132
+ static std::shared_ptr<Array> Numeric(int64_t size, CType value = 0) {
133
+ switch (ArrowType::type_id) {
134
+ case Type::BOOL:
135
+ return Boolean(size, static_cast<bool>(value));
136
+ case Type::UINT8:
137
+ return UInt8(size, static_cast<uint8_t>(value));
138
+ case Type::INT8:
139
+ return Int8(size, static_cast<int8_t>(value));
140
+ case Type::UINT16:
141
+ return UInt16(size, static_cast<uint16_t>(value));
142
+ case Type::INT16:
143
+ return Int16(size, static_cast<int16_t>(value));
144
+ case Type::UINT32:
145
+ return UInt32(size, static_cast<uint32_t>(value));
146
+ case Type::INT32:
147
+ return Int32(size, static_cast<int32_t>(value));
148
+ case Type::UINT64:
149
+ return UInt64(size, static_cast<uint64_t>(value));
150
+ case Type::INT64:
151
+ return Int64(size, static_cast<int64_t>(value));
152
+ case Type::FLOAT:
153
+ return Float32(size, static_cast<float>(value));
154
+ case Type::DOUBLE:
155
+ return Float64(size, static_cast<double>(value));
156
+ case Type::INTERVAL_DAY_TIME:
157
+ case Type::DATE32: {
158
+ EXPECT_OK_AND_ASSIGN(auto viewed,
159
+ Int32(size, static_cast<uint32_t>(value))->View(date32()));
160
+ return viewed;
161
+ }
162
+ case Type::INTERVAL_MONTHS: {
163
+ EXPECT_OK_AND_ASSIGN(auto viewed,
164
+ Int32(size, static_cast<uint32_t>(value))
165
+ ->View(std::make_shared<MonthIntervalType>()));
166
+ return viewed;
167
+ }
168
+ case Type::TIME32: {
169
+ EXPECT_OK_AND_ASSIGN(auto viewed,
170
+ Int32(size, static_cast<uint32_t>(value))
171
+ ->View(std::make_shared<Time32Type>(TimeUnit::SECOND)));
172
+ return viewed;
173
+ }
174
+ case Type::TIME64: {
175
+ EXPECT_OK_AND_ASSIGN(auto viewed, Int64(size, static_cast<uint64_t>(value))
176
+ ->View(std::make_shared<Time64Type>()));
177
+ return viewed;
178
+ }
179
+ case Type::DATE64: {
180
+ EXPECT_OK_AND_ASSIGN(auto viewed,
181
+ Int64(size, static_cast<uint64_t>(value))->View(date64()));
182
+ return viewed;
183
+ }
184
+ case Type::TIMESTAMP: {
185
+ EXPECT_OK_AND_ASSIGN(
186
+ auto viewed, Int64(size, static_cast<int64_t>(value))
187
+ ->View(std::make_shared<TimestampType>(TimeUnit::SECOND)));
188
+ return viewed;
189
+ }
190
+ default:
191
+ return nullptr;
192
+ }
193
+ }
194
+
195
+ /// \brief Generates a constant Array of zeroes
196
+ ///
197
+ /// \param[in] size the size of the array to generate
198
+ /// \param[in] type the type of the Array
199
+ ///
200
+ /// \return a generated Array
201
+ static std::shared_ptr<Array> Zeroes(int64_t size,
202
+ const std::shared_ptr<DataType>& type);
203
+
204
+ /// \brief Generates a RecordBatch of zeroes
205
+ ///
206
+ /// \param[in] size the size of the array to generate
207
+ /// \param[in] schema to conform to
208
+ ///
209
+ /// This function is handy to return of RecordBatch of a desired shape.
210
+ ///
211
+ /// \return a generated RecordBatch
212
+ static std::shared_ptr<RecordBatch> Zeroes(int64_t size,
213
+ const std::shared_ptr<Schema>& schema);
214
+
215
+ /// \brief Generates a RecordBatchReader by repeating a RecordBatch
216
+ ///
217
+ /// \param[in] n_batch the number of times it repeats batch
218
+ /// \param[in] batch the RecordBatch to repeat
219
+ ///
220
+ /// \return a generated RecordBatchReader
221
+ static std::shared_ptr<RecordBatchReader> Repeat(
222
+ int64_t n_batch, const std::shared_ptr<RecordBatch> batch);
223
+
224
+ /// \brief Generates a RecordBatchReader of zeroes batches
225
+ ///
226
+ /// \param[in] n_batch the number of RecordBatch
227
+ /// \param[in] batch_size the size of each RecordBatch
228
+ /// \param[in] schema to conform to
229
+ ///
230
+ /// \return a generated RecordBatchReader
231
+ static std::shared_ptr<RecordBatchReader> Zeroes(int64_t n_batch, int64_t batch_size,
232
+ const std::shared_ptr<Schema>& schema);
233
+ };
234
+
235
+ ARROW_TESTING_EXPORT
236
+ Result<std::shared_ptr<Array>> ScalarVectorToArray(const ScalarVector& scalars);
237
+
238
+ namespace gen {
239
+
240
+ class ARROW_TESTING_EXPORT ArrayGenerator {
241
+ public:
242
+ virtual ~ArrayGenerator() = default;
243
+ virtual Result<std::shared_ptr<Array>> Generate(int64_t num_rows) = 0;
244
+ virtual std::shared_ptr<DataType> type() const = 0;
245
+ };
246
+
247
+ // Same as DataGenerator below but instead of returning Result an ok status is EXPECT'd
248
+ class ARROW_TESTING_EXPORT GTestDataGenerator {
249
+ public:
250
+ virtual ~GTestDataGenerator() = default;
251
+ virtual std::shared_ptr<::arrow::RecordBatch> RecordBatch(int64_t num_rows) = 0;
252
+ virtual std::vector<std::shared_ptr<::arrow::RecordBatch>> RecordBatches(
253
+ int64_t rows_per_batch, int num_batches) = 0;
254
+
255
+ virtual ::arrow::compute::ExecBatch ExecBatch(int64_t num_rows) = 0;
256
+ virtual std::vector<::arrow::compute::ExecBatch> ExecBatches(int64_t rows_per_batch,
257
+ int num_batches) = 0;
258
+
259
+ virtual std::shared_ptr<::arrow::Table> Table(int64_t rows_per_chunk,
260
+ int num_chunks = 1) = 0;
261
+ virtual std::shared_ptr<::arrow::Schema> Schema() = 0;
262
+ };
263
+
264
+ class ARROW_TESTING_EXPORT DataGenerator {
265
+ public:
266
+ virtual ~DataGenerator() = default;
267
+ virtual Result<std::shared_ptr<::arrow::RecordBatch>> RecordBatch(int64_t num_rows) = 0;
268
+ virtual Result<std::vector<std::shared_ptr<::arrow::RecordBatch>>> RecordBatches(
269
+ int64_t rows_per_batch, int num_batches) = 0;
270
+
271
+ virtual Result<::arrow::compute::ExecBatch> ExecBatch(int64_t num_rows) = 0;
272
+ virtual Result<std::vector<::arrow::compute::ExecBatch>> ExecBatches(
273
+ int64_t rows_per_batch, int num_batches) = 0;
274
+
275
+ virtual Result<std::shared_ptr<::arrow::Table>> Table(int64_t rows_per_chunk,
276
+ int num_chunks = 1) = 0;
277
+ virtual std::shared_ptr<::arrow::Schema> Schema() = 0;
278
+ /// @brief Converts this generator to a variant that fails (in a googletest sense)
279
+ /// if any error is encountered.
280
+ virtual std::unique_ptr<GTestDataGenerator> FailOnError() = 0;
281
+ };
282
+
283
+ /// @brief A potentially named field
284
+ ///
285
+ /// If name is not specified then a name will be generated automatically (e.g. f0, f1)
286
+ struct ARROW_TESTING_EXPORT GeneratorField {
287
+ public:
288
+ GeneratorField(std::shared_ptr<ArrayGenerator> gen) // NOLINT implicit conversion
289
+ : name(), gen(std::move(gen)) {}
290
+ GeneratorField(std::string name, std::shared_ptr<ArrayGenerator> gen)
291
+ : name(std::move(name)), gen(std::move(gen)) {}
292
+
293
+ std::optional<std::string> name;
294
+ std::shared_ptr<ArrayGenerator> gen;
295
+ };
296
+
297
+ /// Create a table generator with the given fields
298
+ ARROW_TESTING_EXPORT std::shared_ptr<DataGenerator> Gen(
299
+ std::vector<GeneratorField> column_gens);
300
+
301
+ /// make a generator that returns a constant value
302
+ ARROW_TESTING_EXPORT std::shared_ptr<ArrayGenerator> Constant(
303
+ std::shared_ptr<Scalar> value);
304
+ /// make a generator that returns an incrementing value
305
+ ///
306
+ /// Note: overflow is not prevented standard unsigned integer overflow applies
307
+ ARROW_TESTING_EXPORT std::shared_ptr<ArrayGenerator> Step(uint32_t start = 0,
308
+ uint32_t step = 1,
309
+ bool signed_int = false);
310
+ /// make a generator that returns a random value
311
+ ARROW_TESTING_EXPORT std::shared_ptr<ArrayGenerator> Random(
312
+ std::shared_ptr<DataType> type);
313
+ /// TODO(if-needed) could add a repeat-scalars generator, e.g. Repeat({1, 2, 3}) for
314
+ /// 1,2,3,1,2,3,1
315
+ ///
316
+ /// TODO(if-needed) could add a repeat-from-json generator e.g. Repeat(int32(), "[1, 2,
317
+ /// 3]")), same behavior as repeat-scalars
318
+
319
+ } // namespace gen
320
+
321
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/gtest_util.h ADDED
@@ -0,0 +1,557 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+ #include <cstdint>
22
+ #include <cstdlib>
23
+ #include <cstring>
24
+ #include <functional>
25
+ #include <memory>
26
+ #include <optional>
27
+ #include <string>
28
+ #include <string_view>
29
+ #include <type_traits>
30
+ #include <utility>
31
+ #include <vector>
32
+
33
+ #include <gtest/gtest.h>
34
+
35
+ #include "arrow/compare.h"
36
+ #include "arrow/result.h"
37
+ #include "arrow/status.h"
38
+ #include "arrow/testing/gtest_compat.h"
39
+ #include "arrow/testing/visibility.h"
40
+ #include "arrow/type_fwd.h"
41
+ #include "arrow/type_traits.h"
42
+ #include "arrow/util/macros.h"
43
+ #include "arrow/util/string_builder.h"
44
+ #include "arrow/util/type_fwd.h"
45
+
46
+ // NOTE: failing must be inline in the macros below, to get correct file / line number
47
+ // reporting on test failures.
48
+
49
+ // NOTE: using a for loop for this macro allows extra failure messages to be
50
+ // appended with operator<<
51
+ #define ASSERT_RAISES(ENUM, expr) \
52
+ for (::arrow::Status _st = ::arrow::internal::GenericToStatus((expr)); \
53
+ !_st.Is##ENUM();) \
54
+ FAIL() << "Expected '" ARROW_STRINGIFY(expr) "' to fail with " ARROW_STRINGIFY( \
55
+ ENUM) ", but got " \
56
+ << _st.ToString()
57
+
58
+ #define ASSERT_RAISES_WITH_MESSAGE(ENUM, message, expr) \
59
+ do { \
60
+ auto _res = (expr); \
61
+ ::arrow::Status _st = ::arrow::internal::GenericToStatus(_res); \
62
+ if (!_st.Is##ENUM()) { \
63
+ FAIL() << "Expected '" ARROW_STRINGIFY(expr) "' to fail with " ARROW_STRINGIFY( \
64
+ ENUM) ", but got " \
65
+ << _st.ToString(); \
66
+ } \
67
+ ASSERT_EQ((message), _st.ToStringWithoutContextLines()); \
68
+ } while (false)
69
+
70
+ #define EXPECT_RAISES_WITH_MESSAGE_THAT(ENUM, matcher, expr) \
71
+ do { \
72
+ auto _res = (expr); \
73
+ ::arrow::Status _st = ::arrow::internal::GenericToStatus(_res); \
74
+ EXPECT_TRUE(_st.Is##ENUM()) << "Expected '" ARROW_STRINGIFY(expr) "' to fail with " \
75
+ << ARROW_STRINGIFY(ENUM) ", but got " << _st.ToString(); \
76
+ EXPECT_THAT(_st.ToStringWithoutContextLines(), (matcher)); \
77
+ } while (false)
78
+
79
+ #define EXPECT_RAISES_WITH_CODE_AND_MESSAGE_THAT(code, matcher, expr) \
80
+ do { \
81
+ auto _res = (expr); \
82
+ ::arrow::Status _st = ::arrow::internal::GenericToStatus(_res); \
83
+ EXPECT_EQ(_st.CodeAsString(), Status::CodeAsString(code)); \
84
+ EXPECT_THAT(_st.ToStringWithoutContextLines(), (matcher)); \
85
+ } while (false)
86
+
87
+ #define ASSERT_OK(expr) \
88
+ for (::arrow::Status _st = ::arrow::internal::GenericToStatus((expr)); !_st.ok();) \
89
+ FAIL() << "'" ARROW_STRINGIFY(expr) "' failed with " << _st.ToString()
90
+
91
+ #define ASSERT_OK_NO_THROW(expr) ASSERT_NO_THROW(ASSERT_OK(expr))
92
+
93
+ #define ARROW_EXPECT_OK(expr) \
94
+ do { \
95
+ auto _res = (expr); \
96
+ ::arrow::Status _st = ::arrow::internal::GenericToStatus(_res); \
97
+ EXPECT_TRUE(_st.ok()) << "'" ARROW_STRINGIFY(expr) "' failed with " \
98
+ << _st.ToString(); \
99
+ } while (false)
100
+
101
+ #define ASSERT_NOT_OK(expr) \
102
+ for (::arrow::Status _st = ::arrow::internal::GenericToStatus((expr)); _st.ok();) \
103
+ FAIL() << "'" ARROW_STRINGIFY(expr) "' did not failed" << _st.ToString()
104
+
105
+ #define ABORT_NOT_OK(expr) \
106
+ do { \
107
+ auto _res = (expr); \
108
+ ::arrow::Status _st = ::arrow::internal::GenericToStatus(_res); \
109
+ if (ARROW_PREDICT_FALSE(!_st.ok())) { \
110
+ _st.Abort(); \
111
+ } \
112
+ } while (false);
113
+
114
+ #define ASSIGN_OR_HANDLE_ERROR_IMPL(handle_error, status_name, lhs, rexpr) \
115
+ auto&& status_name = (rexpr); \
116
+ handle_error(status_name.status()); \
117
+ lhs = std::move(status_name).ValueOrDie();
118
+
119
+ #define ASSERT_OK_AND_ASSIGN(lhs, rexpr) \
120
+ ASSIGN_OR_HANDLE_ERROR_IMPL( \
121
+ ASSERT_OK, ARROW_ASSIGN_OR_RAISE_NAME(_error_or_value, __COUNTER__), lhs, rexpr);
122
+
123
+ #define ASSIGN_OR_ABORT(lhs, rexpr) \
124
+ ASSIGN_OR_HANDLE_ERROR_IMPL(ABORT_NOT_OK, \
125
+ ARROW_ASSIGN_OR_RAISE_NAME(_error_or_value, __COUNTER__), \
126
+ lhs, rexpr);
127
+
128
+ #define EXPECT_OK_AND_ASSIGN(lhs, rexpr) \
129
+ ASSIGN_OR_HANDLE_ERROR_IMPL(ARROW_EXPECT_OK, \
130
+ ARROW_ASSIGN_OR_RAISE_NAME(_error_or_value, __COUNTER__), \
131
+ lhs, rexpr);
132
+
133
+ #define ASSERT_OK_AND_EQ(expected, expr) \
134
+ do { \
135
+ ASSERT_OK_AND_ASSIGN(auto _actual, (expr)); \
136
+ ASSERT_EQ(expected, _actual); \
137
+ } while (0)
138
+
139
+ // A generalized version of GTest's SCOPED_TRACE that takes arbitrary arguments.
140
+ // ARROW_SCOPED_TRACE("some variable = ", some_variable, ...)
141
+
142
+ #define ARROW_SCOPED_TRACE(...) SCOPED_TRACE(::arrow::util::StringBuilder(__VA_ARGS__))
143
+
144
+ namespace arrow {
145
+
146
+ // ----------------------------------------------------------------------
147
+ // Useful testing::Types declarations
148
+
149
+ inline void PrintTo(StatusCode code, std::ostream* os) {
150
+ *os << Status::CodeAsString(code);
151
+ }
152
+
153
+ using NumericArrowTypes =
154
+ ::testing::Types<UInt8Type, UInt16Type, UInt32Type, UInt64Type, Int8Type, Int16Type,
155
+ Int32Type, Int64Type, FloatType, DoubleType>;
156
+
157
+ using RealArrowTypes = ::testing::Types<FloatType, DoubleType>;
158
+
159
+ using IntegralArrowTypes = ::testing::Types<UInt8Type, UInt16Type, UInt32Type, UInt64Type,
160
+ Int8Type, Int16Type, Int32Type, Int64Type>;
161
+
162
+ using PhysicalIntegralArrowTypes =
163
+ ::testing::Types<UInt8Type, UInt16Type, UInt32Type, UInt64Type, Int8Type, Int16Type,
164
+ Int32Type, Int64Type, Date32Type, Date64Type, Time32Type, Time64Type,
165
+ TimestampType, MonthIntervalType>;
166
+
167
+ using PrimitiveArrowTypes =
168
+ ::testing::Types<BooleanType, Int8Type, UInt8Type, Int16Type, UInt16Type, Int32Type,
169
+ UInt32Type, Int64Type, UInt64Type, FloatType, DoubleType>;
170
+
171
+ using TemporalArrowTypes =
172
+ ::testing::Types<Date32Type, Date64Type, TimestampType, Time32Type, Time64Type>;
173
+
174
+ using DecimalArrowTypes = ::testing::Types<Decimal128Type, Decimal256Type>;
175
+
176
+ using BaseBinaryArrowTypes =
177
+ ::testing::Types<BinaryType, LargeBinaryType, StringType, LargeStringType>;
178
+
179
+ using BaseBinaryOrBinaryViewLikeArrowTypes =
180
+ ::testing::Types<BinaryType, LargeBinaryType, BinaryViewType, StringType,
181
+ LargeStringType, StringViewType>;
182
+
183
+ using BinaryArrowTypes = ::testing::Types<BinaryType, LargeBinaryType>;
184
+
185
+ using StringArrowTypes = ::testing::Types<StringType, LargeStringType>;
186
+
187
+ using StringOrStringViewArrowTypes =
188
+ ::testing::Types<StringType, LargeStringType, StringViewType>;
189
+
190
+ using ListArrowTypes = ::testing::Types<ListType, LargeListType>;
191
+
192
+ using UnionArrowTypes = ::testing::Types<SparseUnionType, DenseUnionType>;
193
+
194
+ class Array;
195
+ class ChunkedArray;
196
+ class RecordBatch;
197
+ class Table;
198
+ struct Datum;
199
+
200
+ #define ASSERT_ARRAYS_EQUAL(lhs, rhs) AssertArraysEqual((lhs), (rhs))
201
+ #define ASSERT_BATCHES_EQUAL(lhs, rhs) AssertBatchesEqual((lhs), (rhs))
202
+ #define ASSERT_BATCHES_APPROX_EQUAL(lhs, rhs) AssertBatchesApproxEqual((lhs), (rhs))
203
+ #define ASSERT_TABLES_EQUAL(lhs, rhs) AssertTablesEqual((lhs), (rhs))
204
+
205
+ // Default EqualOptions for testing
206
+ static inline EqualOptions TestingEqualOptions() {
207
+ return EqualOptions{}.nans_equal(true).signed_zeros_equal(false);
208
+ }
209
+
210
+ // If verbose is true, then the arrays will be pretty printed
211
+ ARROW_TESTING_EXPORT void AssertArraysEqual(
212
+ const Array& expected, const Array& actual, bool verbose = false,
213
+ const EqualOptions& options = TestingEqualOptions());
214
+ ARROW_TESTING_EXPORT void AssertArraysApproxEqual(
215
+ const Array& expected, const Array& actual, bool verbose = false,
216
+ const EqualOptions& options = TestingEqualOptions());
217
+ // Returns true when values are both null
218
+ ARROW_TESTING_EXPORT void AssertScalarsEqual(
219
+ const Scalar& expected, const Scalar& actual, bool verbose = false,
220
+ const EqualOptions& options = TestingEqualOptions());
221
+ ARROW_TESTING_EXPORT void AssertScalarsApproxEqual(
222
+ const Scalar& expected, const Scalar& actual, bool verbose = false,
223
+ const EqualOptions& options = TestingEqualOptions());
224
+ ARROW_TESTING_EXPORT void AssertBatchesEqual(
225
+ const RecordBatch& expected, const RecordBatch& actual, bool check_metadata = false,
226
+ const EqualOptions& options = TestingEqualOptions());
227
+ ARROW_TESTING_EXPORT void AssertBatchesApproxEqual(
228
+ const RecordBatch& expected, const RecordBatch& actual,
229
+ const EqualOptions& options = TestingEqualOptions());
230
+ ARROW_TESTING_EXPORT void AssertChunkedEqual(
231
+ const ChunkedArray& expected, const ChunkedArray& actual,
232
+ const EqualOptions& options = TestingEqualOptions());
233
+ ARROW_TESTING_EXPORT void AssertChunkedEqual(
234
+ const ChunkedArray& actual, const ArrayVector& expected,
235
+ const EqualOptions& options = TestingEqualOptions());
236
+ // Like ChunkedEqual, but permits different chunk layout
237
+ ARROW_TESTING_EXPORT void AssertChunkedEquivalent(
238
+ const ChunkedArray& expected, const ChunkedArray& actual,
239
+ const EqualOptions& options = TestingEqualOptions());
240
+ ARROW_TESTING_EXPORT void AssertChunkedApproxEquivalent(
241
+ const ChunkedArray& expected, const ChunkedArray& actual,
242
+ const EqualOptions& options = TestingEqualOptions());
243
+ ARROW_TESTING_EXPORT void AssertBufferEqual(const Buffer& buffer,
244
+ const std::vector<uint8_t>& expected);
245
+ ARROW_TESTING_EXPORT void AssertBufferEqual(const Buffer& buffer,
246
+ std::string_view expected);
247
+ ARROW_TESTING_EXPORT void AssertBufferEqual(const Buffer& buffer, const Buffer& expected);
248
+
249
+ ARROW_TESTING_EXPORT void AssertTypeEqual(const DataType& lhs, const DataType& rhs,
250
+ bool check_metadata = false);
251
+ ARROW_TESTING_EXPORT void AssertTypeEqual(const std::shared_ptr<DataType>& lhs,
252
+ const std::shared_ptr<DataType>& rhs,
253
+ bool check_metadata = false);
254
+ ARROW_TESTING_EXPORT void AssertFieldEqual(const Field& lhs, const Field& rhs,
255
+ bool check_metadata = false);
256
+ ARROW_TESTING_EXPORT void AssertFieldEqual(const std::shared_ptr<Field>& lhs,
257
+ const std::shared_ptr<Field>& rhs,
258
+ bool check_metadata = false);
259
+ ARROW_TESTING_EXPORT void AssertSchemaEqual(const Schema& lhs, const Schema& rhs,
260
+ bool check_metadata = false);
261
+ ARROW_TESTING_EXPORT void AssertSchemaEqual(const std::shared_ptr<Schema>& lhs,
262
+ const std::shared_ptr<Schema>& rhs,
263
+ bool check_metadata = false);
264
+
265
+ ARROW_TESTING_EXPORT void AssertTypeNotEqual(const DataType& lhs, const DataType& rhs,
266
+ bool check_metadata = false);
267
+ ARROW_TESTING_EXPORT void AssertTypeNotEqual(const std::shared_ptr<DataType>& lhs,
268
+ const std::shared_ptr<DataType>& rhs,
269
+ bool check_metadata = false);
270
+ ARROW_TESTING_EXPORT void AssertFieldNotEqual(const Field& lhs, const Field& rhs,
271
+ bool check_metadata = false);
272
+ ARROW_TESTING_EXPORT void AssertFieldNotEqual(const std::shared_ptr<Field>& lhs,
273
+ const std::shared_ptr<Field>& rhs,
274
+ bool check_metadata = false);
275
+ ARROW_TESTING_EXPORT void AssertSchemaNotEqual(const Schema& lhs, const Schema& rhs,
276
+ bool check_metadata = false);
277
+ ARROW_TESTING_EXPORT void AssertSchemaNotEqual(const std::shared_ptr<Schema>& lhs,
278
+ const std::shared_ptr<Schema>& rhs,
279
+ bool check_metadata = false);
280
+
281
+ ARROW_TESTING_EXPORT Result<std::optional<std::string>> PrintArrayDiff(
282
+ const ChunkedArray& expected, const ChunkedArray& actual);
283
+
284
+ ARROW_TESTING_EXPORT void AssertTablesEqual(
285
+ const Table& expected, const Table& actual, bool same_chunk_layout = true,
286
+ bool flatten = false, const EqualOptions& options = TestingEqualOptions());
287
+
288
+ ARROW_TESTING_EXPORT void AssertDatumsEqual(
289
+ const Datum& expected, const Datum& actual, bool verbose = false,
290
+ const EqualOptions& options = TestingEqualOptions());
291
+ ARROW_TESTING_EXPORT void AssertDatumsApproxEqual(
292
+ const Datum& expected, const Datum& actual, bool verbose = false,
293
+ const EqualOptions& options = TestingEqualOptions());
294
+
295
+ template <typename C_TYPE>
296
+ void AssertNumericDataEqual(const C_TYPE* raw_data,
297
+ const std::vector<C_TYPE>& expected_values) {
298
+ for (auto expected : expected_values) {
299
+ ASSERT_EQ(expected, *raw_data);
300
+ ++raw_data;
301
+ }
302
+ }
303
+
304
+ ARROW_TESTING_EXPORT void CompareBatch(
305
+ const RecordBatch& left, const RecordBatch& right, bool compare_metadata = true,
306
+ const EqualOptions& options = TestingEqualOptions());
307
+
308
+ ARROW_TESTING_EXPORT void ApproxCompareBatch(
309
+ const RecordBatch& left, const RecordBatch& right, bool compare_metadata = true,
310
+ const EqualOptions& options = TestingEqualOptions());
311
+
312
+ // Check if the padding of the buffers of the array is zero.
313
+ // Also cause valgrind warnings if the padding bytes are uninitialized.
314
+ ARROW_TESTING_EXPORT void AssertZeroPadded(const Array& array);
315
+
316
+ // Check if the valid buffer bytes are initialized
317
+ // and cause valgrind warnings otherwise.
318
+ ARROW_TESTING_EXPORT void TestInitialized(const ArrayData& array);
319
+ ARROW_TESTING_EXPORT void TestInitialized(const Array& array);
320
+
321
+ #define DECL_T() typedef typename TestFixture::T T;
322
+
323
+ #define DECL_TYPE() typedef typename TestFixture::Type Type;
324
+
325
+ // ArrayFromJSON: construct an Array from a simple JSON representation
326
+
327
+ ARROW_TESTING_EXPORT
328
+ std::shared_ptr<Array> ArrayFromJSON(const std::shared_ptr<DataType>&,
329
+ std::string_view json);
330
+
331
+ ARROW_TESTING_EXPORT
332
+ std::shared_ptr<Array> DictArrayFromJSON(const std::shared_ptr<DataType>& type,
333
+ std::string_view indices_json,
334
+ std::string_view dictionary_json);
335
+
336
+ ARROW_TESTING_EXPORT
337
+ std::shared_ptr<RecordBatch> RecordBatchFromJSON(const std::shared_ptr<Schema>&,
338
+ std::string_view);
339
+
340
+ ARROW_TESTING_EXPORT
341
+ std::shared_ptr<ChunkedArray> ChunkedArrayFromJSON(const std::shared_ptr<DataType>&,
342
+ const std::vector<std::string>& json);
343
+
344
+ ARROW_TESTING_EXPORT
345
+ std::shared_ptr<Scalar> ScalarFromJSON(const std::shared_ptr<DataType>&,
346
+ std::string_view json);
347
+
348
+ ARROW_TESTING_EXPORT
349
+ std::shared_ptr<Scalar> DictScalarFromJSON(const std::shared_ptr<DataType>&,
350
+ std::string_view index_json,
351
+ std::string_view dictionary_json);
352
+
353
+ ARROW_TESTING_EXPORT
354
+ std::shared_ptr<Table> TableFromJSON(const std::shared_ptr<Schema>&,
355
+ const std::vector<std::string>& json);
356
+
357
+ ARROW_TESTING_EXPORT
358
+ Result<std::shared_ptr<Table>> RunEndEncodeTableColumns(
359
+ const Table& table, const std::vector<int>& column_indices);
360
+
361
+ // Given an array, return a new identical array except for one validity bit
362
+ // set to a new value.
363
+ // This is useful to force the underlying "value" of null entries to otherwise
364
+ // invalid data and check that errors don't get reported.
365
+ ARROW_TESTING_EXPORT
366
+ std::shared_ptr<Array> TweakValidityBit(const std::shared_ptr<Array>& array,
367
+ int64_t index, bool validity);
368
+
369
+ ARROW_TESTING_EXPORT
370
+ void SleepFor(double seconds);
371
+
372
+ // Sleeps for a very small amount of time. The thread will be yielded
373
+ // at least once ensuring that context switches could happen. It is intended
374
+ // to be used for stress testing parallel code and shouldn't be assumed to do any
375
+ // reliable timing.
376
+ ARROW_TESTING_EXPORT
377
+ void SleepABit();
378
+
379
+ // Wait until predicate is true or timeout in seconds expires.
380
+ ARROW_TESTING_EXPORT
381
+ void BusyWait(double seconds, std::function<bool()> predicate);
382
+
383
+ // \see SleepABit
384
+ ARROW_TESTING_EXPORT
385
+ Future<> SleepABitAsync();
386
+
387
+ ARROW_TESTING_EXPORT bool FileIsClosed(int fd);
388
+
389
+ template <typename T>
390
+ std::vector<T> IteratorToVector(Iterator<T> iterator) {
391
+ EXPECT_OK_AND_ASSIGN(auto out, iterator.ToVector());
392
+ return out;
393
+ }
394
+
395
+ ARROW_TESTING_EXPORT
396
+ bool LocaleExists(const char* locale);
397
+
398
+ #ifndef _WIN32
399
+ ARROW_TESTING_EXPORT
400
+ void AssertChildExit(int child_pid, int expected_exit_status = 0);
401
+ #endif
402
+
403
+ // A RAII-style object that switches to a new locale, and switches back
404
+ // to the old locale when going out of scope. Doesn't do anything if the
405
+ // new locale doesn't exist on the local machine.
406
+ // ATTENTION: may crash with an assertion failure on Windows debug builds.
407
+ // See ARROW-6108, also https://gerrit.libreoffice.org/#/c/54110/
408
+ class ARROW_TESTING_EXPORT LocaleGuard {
409
+ public:
410
+ explicit LocaleGuard(const char* new_locale);
411
+ ~LocaleGuard();
412
+
413
+ protected:
414
+ class Impl;
415
+ std::unique_ptr<Impl> impl_;
416
+ };
417
+
418
+ class ARROW_TESTING_EXPORT EnvVarGuard {
419
+ public:
420
+ EnvVarGuard(const std::string& name, const std::string& value);
421
+ ~EnvVarGuard();
422
+
423
+ protected:
424
+ const std::string name_;
425
+ std::string old_value_;
426
+ bool was_set_;
427
+ };
428
+
429
+ namespace internal {
430
+ class SignalHandler;
431
+ }
432
+
433
+ class ARROW_TESTING_EXPORT SignalHandlerGuard {
434
+ public:
435
+ typedef void (*Callback)(int);
436
+
437
+ SignalHandlerGuard(int signum, Callback cb);
438
+ SignalHandlerGuard(int signum, const internal::SignalHandler& handler);
439
+ ~SignalHandlerGuard();
440
+
441
+ protected:
442
+ struct Impl;
443
+ std::unique_ptr<Impl> impl_;
444
+ };
445
+
446
+ #ifndef ARROW_LARGE_MEMORY_TESTS
447
+ #define LARGE_MEMORY_TEST(name) DISABLED_##name
448
+ #else
449
+ #define LARGE_MEMORY_TEST(name) name
450
+ #endif
451
+
452
+ inline void PrintTo(const Status& st, std::ostream* os) { *os << st.ToString(); }
453
+
454
+ template <typename T>
455
+ void PrintTo(const Result<T>& result, std::ostream* os) {
456
+ if (result.ok()) {
457
+ ::testing::internal::UniversalPrint(result.ValueOrDie(), os);
458
+ } else {
459
+ *os << result.status();
460
+ }
461
+ }
462
+
463
+ // A data type with only move constructors (no copy, no default).
464
+ struct MoveOnlyDataType {
465
+ explicit MoveOnlyDataType(int x) : data(new int(x)) {}
466
+
467
+ MoveOnlyDataType(const MoveOnlyDataType& other) = delete;
468
+ MoveOnlyDataType& operator=(const MoveOnlyDataType& other) = delete;
469
+
470
+ MoveOnlyDataType(MoveOnlyDataType&& other) { MoveFrom(&other); }
471
+ MoveOnlyDataType& operator=(MoveOnlyDataType&& other) {
472
+ MoveFrom(&other);
473
+ return *this;
474
+ }
475
+
476
+ MoveOnlyDataType& operator=(int x) {
477
+ if (data != nullptr) {
478
+ delete data;
479
+ }
480
+ data = new int(x);
481
+ return *this;
482
+ }
483
+
484
+ ~MoveOnlyDataType() { Destroy(); }
485
+
486
+ void Destroy() {
487
+ if (data != nullptr) {
488
+ delete data;
489
+ data = nullptr;
490
+ moves = -1;
491
+ }
492
+ }
493
+
494
+ void MoveFrom(MoveOnlyDataType* other) {
495
+ Destroy();
496
+ data = other->data;
497
+ other->data = nullptr;
498
+ moves = other->moves + 1;
499
+ }
500
+
501
+ int ToInt() const { return data == nullptr ? -42 : *data; }
502
+
503
+ bool operator==(const MoveOnlyDataType& other) const {
504
+ return data != nullptr && other.data != nullptr && *data == *other.data;
505
+ }
506
+ bool operator<(const MoveOnlyDataType& other) const {
507
+ return data == nullptr || (other.data != nullptr && *data < *other.data);
508
+ }
509
+
510
+ bool operator==(int other) const { return data != nullptr && *data == other; }
511
+ friend bool operator==(int left, const MoveOnlyDataType& right) {
512
+ return right == left;
513
+ }
514
+
515
+ int* data = nullptr;
516
+ int moves = 0;
517
+ };
518
+
519
+ // A task that blocks until unlocked. Useful for timing tests.
520
+ class ARROW_TESTING_EXPORT GatingTask {
521
+ public:
522
+ explicit GatingTask(double timeout_seconds = 10);
523
+ /// \brief During destruction we wait for all pending tasks to finish
524
+ ~GatingTask();
525
+
526
+ /// \brief Creates a new waiting task (presumably to spawn on a thread). It will return
527
+ /// invalid if the timeout arrived before the unlock. The task will not complete until
528
+ /// unlocked or timed out
529
+ ///
530
+ /// Note: The GatingTask must outlive any Task instances
531
+ std::function<void()> Task();
532
+ /// \brief Creates a new waiting task as a future. The future will not complete
533
+ /// until unlocked.
534
+ Future<> AsyncTask();
535
+ /// \brief Waits until at least count tasks are running.
536
+ Status WaitForRunning(int count);
537
+ /// \brief Unlocks all waiting tasks. Returns an invalid status if any waiting task has
538
+ /// timed out
539
+ Status Unlock();
540
+
541
+ static std::shared_ptr<GatingTask> Make(double timeout_seconds = 10);
542
+
543
+ private:
544
+ class Impl;
545
+ std::shared_ptr<Impl> impl_;
546
+ };
547
+
548
+ /// \brief create an exact copy of the data where each buffer has a max alignment of 1
549
+ ///
550
+ /// This method does not recurse into the dictionary or children
551
+ ARROW_TESTING_EXPORT std::shared_ptr<ArrayData> UnalignBuffers(const ArrayData& array);
552
+ /// \brief create an exact copy of the array where each buffer has a max alignment of 1
553
+ ///
554
+ /// This method does not recurse into the dictionary or children
555
+ ARROW_TESTING_EXPORT std::shared_ptr<Array> UnalignBuffers(const Array& array);
556
+
557
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/matchers.h ADDED
@@ -0,0 +1,467 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <utility>
21
+
22
+ #include <gmock/gmock-matchers.h>
23
+
24
+ #include "arrow/datum.h"
25
+ #include "arrow/result.h"
26
+ #include "arrow/status.h"
27
+ #include "arrow/stl_iterator.h"
28
+ #include "arrow/testing/future_util.h"
29
+ #include "arrow/testing/gtest_util.h"
30
+ #include "arrow/util/future.h"
31
+ #include "arrow/util/unreachable.h"
32
+
33
+ namespace arrow {
34
+
35
+ class PointeesEqualMatcher {
36
+ public:
37
+ template <typename PtrPair>
38
+ operator testing::Matcher<PtrPair>() const { // NOLINT runtime/explicit
39
+ struct Impl : testing::MatcherInterface<const PtrPair&> {
40
+ void DescribeTo(::std::ostream* os) const override { *os << "pointees are equal"; }
41
+
42
+ void DescribeNegationTo(::std::ostream* os) const override {
43
+ *os << "pointees are not equal";
44
+ }
45
+
46
+ bool MatchAndExplain(const PtrPair& pair,
47
+ testing::MatchResultListener* listener) const override {
48
+ const auto& first = *std::get<0>(pair);
49
+ const auto& second = *std::get<1>(pair);
50
+ const bool match = first.Equals(second);
51
+ *listener << "whose pointees " << testing::PrintToString(first) << " and "
52
+ << testing::PrintToString(second)
53
+ << (match ? " are equal" : " are not equal");
54
+ return match;
55
+ }
56
+ };
57
+
58
+ return testing::Matcher<PtrPair>(new Impl());
59
+ }
60
+ };
61
+
62
+ // A matcher that checks that the values pointed to are Equals().
63
+ // Useful in conjunction with other googletest matchers.
64
+ inline PointeesEqualMatcher PointeesEqual() { return {}; }
65
+
66
+ class AnyOfJSONMatcher {
67
+ public:
68
+ AnyOfJSONMatcher(std::shared_ptr<DataType> type, std::string array_json)
69
+ : type_(std::move(type)), array_json_(std::move(array_json)) {}
70
+
71
+ template <typename arg_type>
72
+ operator testing::Matcher<arg_type>() const { // NOLINT runtime/explicit
73
+ struct Impl : testing::MatcherInterface<const arg_type&> {
74
+ static_assert(std::is_same<arg_type, std::shared_ptr<Scalar>>(),
75
+ "AnyOfJSON only supported for std::shared_ptr<Scalar>");
76
+ Impl(std::shared_ptr<DataType> type, std::string array_json)
77
+ : type_(std::move(type)), array_json_(std::move(array_json)) {
78
+ array = ArrayFromJSON(type_, array_json_);
79
+ }
80
+ void DescribeTo(std::ostream* os) const override {
81
+ *os << "matches at least one scalar from ";
82
+ *os << array->ToString();
83
+ }
84
+ void DescribeNegationTo(::std::ostream* os) const override {
85
+ *os << "matches no scalar from ";
86
+ *os << array->ToString();
87
+ }
88
+ bool MatchAndExplain(
89
+ const arg_type& arg,
90
+ ::testing::MatchResultListener* result_listener) const override {
91
+ for (int64_t i = 0; i < array->length(); ++i) {
92
+ std::shared_ptr<Scalar> scalar;
93
+ auto maybe_scalar = array->GetScalar(i);
94
+ if (maybe_scalar.ok()) {
95
+ scalar = maybe_scalar.ValueOrDie();
96
+ } else {
97
+ *result_listener << "GetScalar() had status "
98
+ << maybe_scalar.status().ToString() << "at index " << i
99
+ << " in the input JSON Array";
100
+ return false;
101
+ }
102
+
103
+ if (scalar->Equals(*arg)) return true;
104
+ }
105
+ *result_listener << "Argument scalar: '" << arg->ToString()
106
+ << "' matches no scalar from " << array->ToString();
107
+ return false;
108
+ }
109
+ const std::shared_ptr<DataType> type_;
110
+ const std::string array_json_;
111
+ std::shared_ptr<Array> array;
112
+ };
113
+
114
+ return testing::Matcher<arg_type>(new Impl(type_, array_json_));
115
+ }
116
+
117
+ private:
118
+ const std::shared_ptr<DataType> type_;
119
+ const std::string array_json_;
120
+ };
121
+
122
+ inline AnyOfJSONMatcher AnyOfJSON(std::shared_ptr<DataType> type,
123
+ std::string array_json) {
124
+ return {std::move(type), std::move(array_json)};
125
+ }
126
+
127
+ template <typename ResultMatcher>
128
+ class FutureMatcher {
129
+ public:
130
+ explicit FutureMatcher(ResultMatcher result_matcher, double wait_seconds)
131
+ : result_matcher_(std::move(result_matcher)), wait_seconds_(wait_seconds) {}
132
+
133
+ template <typename Fut,
134
+ typename ValueType = typename std::decay<Fut>::type::ValueType>
135
+ operator testing::Matcher<Fut>() const { // NOLINT runtime/explicit
136
+ struct Impl : testing::MatcherInterface<const Fut&> {
137
+ explicit Impl(const ResultMatcher& result_matcher, double wait_seconds)
138
+ : result_matcher_(testing::MatcherCast<Result<ValueType>>(result_matcher)),
139
+ wait_seconds_(wait_seconds) {}
140
+
141
+ void DescribeTo(::std::ostream* os) const override {
142
+ *os << "value ";
143
+ result_matcher_.DescribeTo(os);
144
+ }
145
+
146
+ void DescribeNegationTo(::std::ostream* os) const override {
147
+ *os << "value ";
148
+ result_matcher_.DescribeNegationTo(os);
149
+ }
150
+
151
+ bool MatchAndExplain(const Fut& fut,
152
+ testing::MatchResultListener* listener) const override {
153
+ if (!fut.Wait(wait_seconds_)) {
154
+ *listener << "which didn't finish within " << wait_seconds_ << " seconds";
155
+ return false;
156
+ }
157
+ return result_matcher_.MatchAndExplain(fut.result(), listener);
158
+ }
159
+
160
+ const testing::Matcher<Result<ValueType>> result_matcher_;
161
+ const double wait_seconds_;
162
+ };
163
+
164
+ return testing::Matcher<Fut>(new Impl(result_matcher_, wait_seconds_));
165
+ }
166
+
167
+ private:
168
+ const ResultMatcher result_matcher_;
169
+ const double wait_seconds_;
170
+ };
171
+
172
+ template <typename ValueMatcher>
173
+ class ResultMatcher {
174
+ public:
175
+ explicit ResultMatcher(ValueMatcher value_matcher)
176
+ : value_matcher_(std::move(value_matcher)) {}
177
+
178
+ template <typename Res,
179
+ typename ValueType = typename std::decay<Res>::type::ValueType>
180
+ operator testing::Matcher<Res>() const { // NOLINT runtime/explicit
181
+ struct Impl : testing::MatcherInterface<const Res&> {
182
+ explicit Impl(const ValueMatcher& value_matcher)
183
+ : value_matcher_(testing::MatcherCast<ValueType>(value_matcher)) {}
184
+
185
+ void DescribeTo(::std::ostream* os) const override {
186
+ *os << "value ";
187
+ value_matcher_.DescribeTo(os);
188
+ }
189
+
190
+ void DescribeNegationTo(::std::ostream* os) const override {
191
+ *os << "value ";
192
+ value_matcher_.DescribeNegationTo(os);
193
+ }
194
+
195
+ bool MatchAndExplain(const Res& maybe_value,
196
+ testing::MatchResultListener* listener) const override {
197
+ if (!maybe_value.status().ok()) {
198
+ *listener << "whose error "
199
+ << testing::PrintToString(maybe_value.status().ToString())
200
+ << " doesn't match";
201
+ return false;
202
+ }
203
+ const ValueType& value = maybe_value.ValueOrDie();
204
+ testing::StringMatchResultListener value_listener;
205
+ const bool match = value_matcher_.MatchAndExplain(value, &value_listener);
206
+ *listener << "whose value " << testing::PrintToString(value)
207
+ << (match ? " matches" : " doesn't match");
208
+ testing::internal::PrintIfNotEmpty(value_listener.str(), listener->stream());
209
+ return match;
210
+ }
211
+
212
+ const testing::Matcher<ValueType> value_matcher_;
213
+ };
214
+
215
+ return testing::Matcher<Res>(new Impl(value_matcher_));
216
+ }
217
+
218
+ private:
219
+ const ValueMatcher value_matcher_;
220
+ };
221
+
222
+ class ErrorMatcher {
223
+ public:
224
+ explicit ErrorMatcher(StatusCode code,
225
+ std::optional<testing::Matcher<std::string>> message_matcher)
226
+ : code_(code), message_matcher_(std::move(message_matcher)) {}
227
+
228
+ template <typename Res>
229
+ operator testing::Matcher<Res>() const { // NOLINT runtime/explicit
230
+ struct Impl : testing::MatcherInterface<const Res&> {
231
+ explicit Impl(StatusCode code,
232
+ std::optional<testing::Matcher<std::string>> message_matcher)
233
+ : code_(code), message_matcher_(std::move(message_matcher)) {}
234
+
235
+ void DescribeTo(::std::ostream* os) const override {
236
+ *os << "raises StatusCode::" << Status::CodeAsString(code_);
237
+ if (message_matcher_) {
238
+ *os << " and message ";
239
+ message_matcher_->DescribeTo(os);
240
+ }
241
+ }
242
+
243
+ void DescribeNegationTo(::std::ostream* os) const override {
244
+ *os << "does not raise StatusCode::" << Status::CodeAsString(code_);
245
+ if (message_matcher_) {
246
+ *os << " or message ";
247
+ message_matcher_->DescribeNegationTo(os);
248
+ }
249
+ }
250
+
251
+ bool MatchAndExplain(const Res& maybe_value,
252
+ testing::MatchResultListener* listener) const override {
253
+ const Status& status = internal::GenericToStatus(maybe_value);
254
+ testing::StringMatchResultListener value_listener;
255
+
256
+ bool match = status.code() == code_;
257
+ if (message_matcher_) {
258
+ match = match &&
259
+ message_matcher_->MatchAndExplain(status.message(), &value_listener);
260
+ }
261
+
262
+ if (match) {
263
+ *listener << "whose error matches";
264
+ } else if (status.ok()) {
265
+ *listener << "whose non-error doesn't match";
266
+ } else {
267
+ *listener << "whose error doesn't match";
268
+ }
269
+
270
+ testing::internal::PrintIfNotEmpty(value_listener.str(), listener->stream());
271
+ return match;
272
+ }
273
+
274
+ const StatusCode code_;
275
+ const std::optional<testing::Matcher<std::string>> message_matcher_;
276
+ };
277
+
278
+ return testing::Matcher<Res>(new Impl(code_, message_matcher_));
279
+ }
280
+
281
+ private:
282
+ const StatusCode code_;
283
+ const std::optional<testing::Matcher<std::string>> message_matcher_;
284
+ };
285
+
286
+ class OkMatcher {
287
+ public:
288
+ template <typename Res>
289
+ operator testing::Matcher<Res>() const { // NOLINT runtime/explicit
290
+ struct Impl : testing::MatcherInterface<const Res&> {
291
+ void DescribeTo(::std::ostream* os) const override { *os << "is ok"; }
292
+
293
+ void DescribeNegationTo(::std::ostream* os) const override { *os << "is not ok"; }
294
+
295
+ bool MatchAndExplain(const Res& maybe_value,
296
+ testing::MatchResultListener* listener) const override {
297
+ const Status& status = internal::GenericToStatus(maybe_value);
298
+
299
+ const bool match = status.ok();
300
+ *listener << "whose " << (match ? "non-error matches" : "error doesn't match");
301
+ return match;
302
+ }
303
+ };
304
+
305
+ return testing::Matcher<Res>(new Impl());
306
+ }
307
+ };
308
+
309
+ // Returns a matcher that waits on a Future (by default for 16 seconds)
310
+ // then applies a matcher to the result.
311
+ template <typename ResultMatcher>
312
+ FutureMatcher<ResultMatcher> Finishes(
313
+ const ResultMatcher& result_matcher,
314
+ double wait_seconds = kDefaultAssertFinishesWaitSeconds) {
315
+ return FutureMatcher<ResultMatcher>(result_matcher, wait_seconds);
316
+ }
317
+
318
+ // Returns a matcher that matches the value of a successful Result<T>.
319
+ template <typename ValueMatcher>
320
+ ResultMatcher<ValueMatcher> ResultWith(const ValueMatcher& value_matcher) {
321
+ return ResultMatcher<ValueMatcher>(value_matcher);
322
+ }
323
+
324
+ // Returns a matcher that matches an ok Status or Result<T>.
325
+ inline OkMatcher Ok() { return {}; }
326
+
327
+ // Returns a matcher that matches the StatusCode of a Status or Result<T>.
328
+ // Do not use Raises(StatusCode::OK) to match a non error code.
329
+ inline ErrorMatcher Raises(StatusCode code) { return ErrorMatcher(code, std::nullopt); }
330
+
331
+ // Returns a matcher that matches the StatusCode and message of a Status or Result<T>.
332
+ template <typename MessageMatcher>
333
+ ErrorMatcher Raises(StatusCode code, const MessageMatcher& message_matcher) {
334
+ return ErrorMatcher(code, testing::MatcherCast<std::string>(message_matcher));
335
+ }
336
+
337
+ class DataEqMatcher {
338
+ public:
339
+ // TODO(bkietz) support EqualOptions, ApproxEquals, etc
340
+ // Probably it's better to use something like config-through-key_value_metadata
341
+ // as with the random generators to decouple this from EqualOptions etc.
342
+ explicit DataEqMatcher(Datum expected) : expected_(std::move(expected)) {}
343
+
344
+ template <typename Data>
345
+ operator testing::Matcher<Data>() const { // NOLINT runtime/explicit
346
+ struct Impl : testing::MatcherInterface<const Data&> {
347
+ explicit Impl(Datum expected) : expected_(std::move(expected)) {}
348
+
349
+ void DescribeTo(::std::ostream* os) const override {
350
+ *os << "has data ";
351
+ PrintTo(expected_, os);
352
+ }
353
+
354
+ void DescribeNegationTo(::std::ostream* os) const override {
355
+ *os << "doesn't have data ";
356
+ PrintTo(expected_, os);
357
+ }
358
+
359
+ bool MatchAndExplain(const Data& data,
360
+ testing::MatchResultListener* listener) const override {
361
+ Datum boxed(data);
362
+
363
+ if (boxed.kind() != expected_.kind()) {
364
+ *listener << "whose Datum::kind " << boxed.ToString() << " doesn't match "
365
+ << expected_.ToString();
366
+ return false;
367
+ }
368
+
369
+ if (const auto& boxed_type = boxed.type()) {
370
+ if (*boxed_type != *expected_.type()) {
371
+ *listener << "whose DataType " << boxed_type->ToString() << " doesn't match "
372
+ << expected_.type()->ToString();
373
+ return false;
374
+ }
375
+ } else if (const auto& boxed_schema = boxed.schema()) {
376
+ if (*boxed_schema != *expected_.schema()) {
377
+ *listener << "whose Schema " << boxed_schema->ToString() << " doesn't match "
378
+ << expected_.schema()->ToString();
379
+ return false;
380
+ }
381
+ } else {
382
+ Unreachable();
383
+ }
384
+
385
+ if (boxed == expected_) {
386
+ *listener << "whose value matches";
387
+ return true;
388
+ }
389
+
390
+ if (listener->IsInterested() && boxed.kind() == Datum::ARRAY) {
391
+ *listener << "whose value differs from the expected value by "
392
+ << boxed.make_array()->Diff(*expected_.make_array());
393
+ } else {
394
+ *listener << "whose value doesn't match";
395
+ }
396
+ return false;
397
+ }
398
+
399
+ Datum expected_;
400
+ };
401
+
402
+ return testing::Matcher<Data>(new Impl(expected_));
403
+ }
404
+
405
+ private:
406
+ Datum expected_;
407
+ };
408
+
409
+ /// Constructs a datum against which arguments are matched
410
+ template <typename Data>
411
+ DataEqMatcher DataEq(Data&& dat) {
412
+ return DataEqMatcher(Datum(std::forward<Data>(dat)));
413
+ }
414
+
415
+ /// Constructs an array with ArrayFromJSON against which arguments are matched
416
+ inline DataEqMatcher DataEqArray(const std::shared_ptr<DataType>& type,
417
+ std::string_view json) {
418
+ return DataEq(ArrayFromJSON(type, json));
419
+ }
420
+
421
+ /// Constructs an array from a vector of optionals against which arguments are matched
422
+ template <typename T, typename ArrayType = typename TypeTraits<T>::ArrayType,
423
+ typename BuilderType = typename TypeTraits<T>::BuilderType,
424
+ typename ValueType =
425
+ typename ::arrow::stl::detail::DefaultValueAccessor<ArrayType>::ValueType>
426
+ DataEqMatcher DataEqArray(T type, const std::vector<std::optional<ValueType>>& values) {
427
+ // FIXME(bkietz) broken until DataType is move constructible
428
+ BuilderType builder(std::make_shared<T>(std::move(type)), default_memory_pool());
429
+ DCHECK_OK(builder.Reserve(static_cast<int64_t>(values.size())));
430
+
431
+ // pseudo constexpr:
432
+ static const bool need_safe_append = !is_fixed_width(T::type_id);
433
+
434
+ for (auto value : values) {
435
+ if (need_safe_append) {
436
+ DCHECK_OK(builder.AppendOrNull(value));
437
+ } else {
438
+ builder.UnsafeAppendOrNull(value);
439
+ }
440
+ }
441
+
442
+ return DataEq(builder.Finish().ValueOrDie());
443
+ }
444
+
445
+ /// Constructs a scalar with ScalarFromJSON against which arguments are matched
446
+ inline DataEqMatcher DataEqScalar(const std::shared_ptr<DataType>& type,
447
+ std::string_view json) {
448
+ return DataEq(ScalarFromJSON(type, json));
449
+ }
450
+
451
+ /// Constructs a scalar against which arguments are matched
452
+ template <typename T, typename ScalarType = typename TypeTraits<T>::ScalarType,
453
+ typename ValueType = typename ScalarType::ValueType>
454
+ DataEqMatcher DataEqScalar(T type, std::optional<ValueType> value) {
455
+ ScalarType expected(std::make_shared<T>(std::move(type)));
456
+
457
+ if (value) {
458
+ expected.is_valid = true;
459
+ expected.value = std::move(*value);
460
+ }
461
+
462
+ return DataEq(std::move(expected));
463
+ }
464
+
465
+ // HasType, HasSchema matchers
466
+
467
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/pch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Often-used headers, for precompiling.
19
+ // If updating this header, please make sure you check compilation speed
20
+ // before checking in. Adding headers which are not used extremely often
21
+ // may incur a slowdown, since it makes the precompiled header heavier to load.
22
+
23
+ #include "arrow/pch.h"
24
+ #include "arrow/testing/gtest_util.h"
25
+ #include "arrow/testing/util.h"
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/uniform_real.h ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Random real generation is very slow on Arm if built with clang + libstdc++
19
+ // due to software emulated long double arithmetic.
20
+ // This file ports some random real libs from llvm libc++ library, which are
21
+ // free from long double calculation.
22
+ // It improves performance significantly on both Arm (~100x) and x86 (~8x) in
23
+ // generating random reals when built with clang + gnu libstdc++.
24
+ // Based on: https://github.com/llvm/llvm-project/tree/main/libcxx
25
+
26
+ #pragma once
27
+
28
+ #include <limits>
29
+
30
+ #include <arrow/util/bit_util.h>
31
+
32
+ namespace arrow {
33
+ namespace random {
34
+
35
+ namespace detail {
36
+
37
+ // std::generate_canonical, simplified
38
+ // https://en.cppreference.com/w/cpp/numeric/random/generate_canonical
39
+ template <typename RealType, typename Rng>
40
+ RealType generate_canonical(Rng& rng) {
41
+ const size_t b = std::numeric_limits<RealType>::digits;
42
+ const size_t log2R = 63 - ::arrow::bit_util::CountLeadingZeros(
43
+ static_cast<uint64_t>(Rng::max() - Rng::min()) + 1);
44
+ const size_t k = b / log2R + (b % log2R != 0) + (b == 0);
45
+ const RealType r = static_cast<RealType>(Rng::max() - Rng::min()) + 1;
46
+ RealType base = r;
47
+ RealType sp = static_cast<RealType>(rng() - Rng::min());
48
+ for (size_t i = 1; i < k; ++i, base *= r) {
49
+ sp += (rng() - Rng::min()) * base;
50
+ }
51
+ return sp / base;
52
+ }
53
+
54
+ } // namespace detail
55
+
56
+ // std::uniform_real_distribution, simplified
57
+ // https://en.cppreference.com/w/cpp/numeric/random/uniform_real_distribution
58
+ template <typename RealType = double>
59
+ struct uniform_real_distribution {
60
+ const RealType a, b;
61
+
62
+ explicit uniform_real_distribution(RealType a = 0, RealType b = 1) : a(a), b(b) {}
63
+
64
+ template <typename Rng>
65
+ RealType operator()(Rng& rng) {
66
+ return (b - a) * detail::generate_canonical<RealType>(rng) + a;
67
+ }
68
+ };
69
+
70
+ // std::bernoulli_distribution, simplified
71
+ // https://en.cppreference.com/w/cpp/numeric/random/bernoulli_distribution
72
+ struct bernoulli_distribution {
73
+ const double p;
74
+
75
+ explicit bernoulli_distribution(double p = 0.5) : p(p) {}
76
+
77
+ template <class Rng>
78
+ bool operator()(Rng& rng) {
79
+ return detail::generate_canonical<double>(rng) < p;
80
+ }
81
+ };
82
+
83
+ } // namespace random
84
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/util.h ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+ #include <cstdint>
22
+ #include <cstdlib>
23
+ #include <cstring>
24
+ #include <limits>
25
+ #include <memory>
26
+ #include <optional>
27
+ #include <string>
28
+ #include <type_traits>
29
+ #include <utility>
30
+ #include <vector>
31
+
32
+ #include "arrow/buffer.h"
33
+ #include "arrow/record_batch.h"
34
+ #include "arrow/status.h"
35
+ #include "arrow/testing/visibility.h"
36
+ #include "arrow/type_fwd.h"
37
+ #include "arrow/util/macros.h"
38
+
39
+ namespace arrow {
40
+
41
+ template <typename T>
42
+ Status CopyBufferFromVector(const std::vector<T>& values, MemoryPool* pool,
43
+ std::shared_ptr<Buffer>* result) {
44
+ int64_t nbytes = static_cast<int>(values.size()) * sizeof(T);
45
+
46
+ ARROW_ASSIGN_OR_RAISE(auto buffer, AllocateBuffer(nbytes, pool));
47
+ auto immutable_data = reinterpret_cast<const uint8_t*>(values.data());
48
+ std::copy(immutable_data, immutable_data + nbytes, buffer->mutable_data());
49
+ memset(buffer->mutable_data() + nbytes, 0,
50
+ static_cast<size_t>(buffer->capacity() - nbytes));
51
+
52
+ *result = std::move(buffer);
53
+ return Status::OK();
54
+ }
55
+
56
+ // Sets approximately pct_null of the first n bytes in null_bytes to zero
57
+ // and the rest to non-zero (true) values.
58
+ ARROW_TESTING_EXPORT void random_null_bytes(int64_t n, double pct_null,
59
+ uint8_t* null_bytes);
60
+ ARROW_TESTING_EXPORT void random_is_valid(int64_t n, double pct_null,
61
+ std::vector<bool>* is_valid,
62
+ int random_seed = 0);
63
+ ARROW_TESTING_EXPORT void random_bytes(int64_t n, uint32_t seed, uint8_t* out);
64
+ ARROW_TESTING_EXPORT std::string random_string(int64_t n, uint32_t seed);
65
+ ARROW_TESTING_EXPORT int32_t DecimalSize(int32_t precision);
66
+ ARROW_TESTING_EXPORT void random_ascii(int64_t n, uint32_t seed, uint8_t* out);
67
+ ARROW_TESTING_EXPORT int64_t CountNulls(const std::vector<uint8_t>& valid_bytes);
68
+
69
+ ARROW_TESTING_EXPORT Status MakeRandomByteBuffer(int64_t length, MemoryPool* pool,
70
+ std::shared_ptr<ResizableBuffer>* out,
71
+ uint32_t seed = 0);
72
+
73
+ ARROW_TESTING_EXPORT uint64_t random_seed();
74
+
75
+ #define DECL_T() typedef typename TestFixture::T T;
76
+
77
+ #define DECL_TYPE() typedef typename TestFixture::Type Type;
78
+
79
+ // ----------------------------------------------------------------------
80
+ // A RecordBatchReader for serving a sequence of in-memory record batches
81
+
82
+ class BatchIterator : public RecordBatchReader {
83
+ public:
84
+ BatchIterator(const std::shared_ptr<Schema>& schema,
85
+ const std::vector<std::shared_ptr<RecordBatch>>& batches)
86
+ : schema_(schema), batches_(batches), position_(0) {}
87
+
88
+ std::shared_ptr<Schema> schema() const override { return schema_; }
89
+
90
+ Status ReadNext(std::shared_ptr<RecordBatch>* out) override {
91
+ if (position_ >= batches_.size()) {
92
+ *out = nullptr;
93
+ } else {
94
+ *out = batches_[position_++];
95
+ }
96
+ return Status::OK();
97
+ }
98
+
99
+ private:
100
+ std::shared_ptr<Schema> schema_;
101
+ std::vector<std::shared_ptr<RecordBatch>> batches_;
102
+ size_t position_;
103
+ };
104
+
105
+ static inline std::vector<std::shared_ptr<DataType> (*)(FieldVector, std::vector<int8_t>)>
106
+ UnionTypeFactories() {
107
+ return {sparse_union, dense_union};
108
+ }
109
+
110
+ // Return the value of the ARROW_TEST_DATA environment variable or return error
111
+ // Status
112
+ ARROW_TESTING_EXPORT Status GetTestResourceRoot(std::string*);
113
+
114
+ // Return the value of the ARROW_TIMEZONE_DATABASE environment variable
115
+ ARROW_TESTING_EXPORT std::optional<std::string> GetTestTimezoneDatabaseRoot();
116
+
117
+ // Set the Timezone database based on the ARROW_TIMEZONE_DATABASE env variable
118
+ // This is only relevant on Windows, since other OSs have compatible databases built-in
119
+ ARROW_TESTING_EXPORT Status InitTestTimezoneDatabase();
120
+
121
+ // Get a TCP port number to listen on. This is a different number every time,
122
+ // as reusing the same port across tests can produce spurious bind errors on
123
+ // Windows.
124
+ ARROW_TESTING_EXPORT int GetListenPort();
125
+
126
+ // Get a IPv4 "address:port" to listen on. The address will be a loopback
127
+ // address. Compared to GetListenPort(), this will minimize the risk of
128
+ // port conflicts.
129
+ ARROW_TESTING_EXPORT std::string GetListenAddress();
130
+
131
+ ARROW_TESTING_EXPORT
132
+ const std::vector<std::shared_ptr<DataType>>& all_dictionary_index_types();
133
+
134
+ // Get a list of supported hardware flags from the given candidates.
135
+ // The result will always contain 0, meaning no optional CPU feature enabled at all.
136
+ ARROW_TESTING_EXPORT
137
+ std::vector<int64_t> GetSupportedHardwareFlags(
138
+ const std::vector<int64_t>& candidate_flags);
139
+
140
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/testing/visibility.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #if defined(_WIN32) || defined(__CYGWIN__)
21
+ #if defined(_MSC_VER)
22
+ #pragma warning(push)
23
+ #pragma warning(disable : 4251)
24
+ #else
25
+ #pragma GCC diagnostic ignored "-Wattributes"
26
+ #endif
27
+
28
+ #ifdef ARROW_TESTING_STATIC
29
+ #define ARROW_TESTING_EXPORT
30
+ #elif defined(ARROW_TESTING_EXPORTING)
31
+ #define ARROW_TESTING_EXPORT __declspec(dllexport)
32
+ #else
33
+ #define ARROW_TESTING_EXPORT __declspec(dllimport)
34
+ #endif
35
+
36
+ #define ARROW_TESTING_NO_EXPORT
37
+ #else // Not Windows
38
+ #ifndef ARROW_TESTING_EXPORT
39
+ #define ARROW_TESTING_EXPORT __attribute__((visibility("default")))
40
+ #endif
41
+ #ifndef ARROW_TESTING_NO_EXPORT
42
+ #define ARROW_TESTING_NO_EXPORT __attribute__((visibility("hidden")))
43
+ #endif
44
+ #endif // Non-Windows
45
+
46
+ #if defined(_MSC_VER)
47
+ #pragma warning(pop)
48
+ #endif
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/aligned_storage.h ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstring>
21
+ #include <type_traits>
22
+ #include <utility>
23
+
24
+ #include "arrow/util/launder.h"
25
+ #include "arrow/util/macros.h"
26
+
27
+ namespace arrow {
28
+ namespace internal {
29
+
30
+ template <typename T>
31
+ class AlignedStorage {
32
+ public:
33
+ static constexpr bool can_memcpy = std::is_trivial<T>::value;
34
+
35
+ constexpr T* get() noexcept {
36
+ return arrow::internal::launder(reinterpret_cast<T*>(&data_));
37
+ }
38
+
39
+ constexpr const T* get() const noexcept {
40
+ // Use fully qualified name to avoid ambiguities with MSVC (ARROW-14800)
41
+ return arrow::internal::launder(reinterpret_cast<const T*>(&data_));
42
+ }
43
+
44
+ void destroy() noexcept {
45
+ if (!std::is_trivially_destructible<T>::value) {
46
+ get()->~T();
47
+ }
48
+ }
49
+
50
+ template <typename... A>
51
+ void construct(A&&... args) noexcept {
52
+ new (&data_) T(std::forward<A>(args)...);
53
+ }
54
+
55
+ template <typename V>
56
+ void assign(V&& v) noexcept {
57
+ *get() = std::forward<V>(v);
58
+ }
59
+
60
+ void move_construct(AlignedStorage* other) noexcept {
61
+ new (&data_) T(std::move(*other->get()));
62
+ }
63
+
64
+ void move_assign(AlignedStorage* other) noexcept { *get() = std::move(*other->get()); }
65
+
66
+ template <bool CanMemcpy = can_memcpy>
67
+ static typename std::enable_if<CanMemcpy>::type move_construct_several(
68
+ AlignedStorage* ARROW_RESTRICT src, AlignedStorage* ARROW_RESTRICT dest, size_t n,
69
+ size_t memcpy_length) noexcept {
70
+ memcpy(dest->get(), src->get(), memcpy_length * sizeof(T));
71
+ }
72
+
73
+ template <bool CanMemcpy = can_memcpy>
74
+ static typename std::enable_if<CanMemcpy>::type
75
+ move_construct_several_and_destroy_source(AlignedStorage* ARROW_RESTRICT src,
76
+ AlignedStorage* ARROW_RESTRICT dest, size_t n,
77
+ size_t memcpy_length) noexcept {
78
+ memcpy(dest->get(), src->get(), memcpy_length * sizeof(T));
79
+ }
80
+
81
+ template <bool CanMemcpy = can_memcpy>
82
+ static typename std::enable_if<!CanMemcpy>::type move_construct_several(
83
+ AlignedStorage* ARROW_RESTRICT src, AlignedStorage* ARROW_RESTRICT dest, size_t n,
84
+ size_t memcpy_length) noexcept {
85
+ for (size_t i = 0; i < n; ++i) {
86
+ new (dest[i].get()) T(std::move(*src[i].get()));
87
+ }
88
+ }
89
+
90
+ template <bool CanMemcpy = can_memcpy>
91
+ static typename std::enable_if<!CanMemcpy>::type
92
+ move_construct_several_and_destroy_source(AlignedStorage* ARROW_RESTRICT src,
93
+ AlignedStorage* ARROW_RESTRICT dest, size_t n,
94
+ size_t memcpy_length) noexcept {
95
+ for (size_t i = 0; i < n; ++i) {
96
+ new (dest[i].get()) T(std::move(*src[i].get()));
97
+ src[i].destroy();
98
+ }
99
+ }
100
+
101
+ static void move_construct_several(AlignedStorage* ARROW_RESTRICT src,
102
+ AlignedStorage* ARROW_RESTRICT dest,
103
+ size_t n) noexcept {
104
+ move_construct_several(src, dest, n, n);
105
+ }
106
+
107
+ static void move_construct_several_and_destroy_source(
108
+ AlignedStorage* ARROW_RESTRICT src, AlignedStorage* ARROW_RESTRICT dest,
109
+ size_t n) noexcept {
110
+ move_construct_several_and_destroy_source(src, dest, n, n);
111
+ }
112
+
113
+ static void destroy_several(AlignedStorage* p, size_t n) noexcept {
114
+ if (!std::is_trivially_destructible<T>::value) {
115
+ for (size_t i = 0; i < n; ++i) {
116
+ p[i].destroy();
117
+ }
118
+ }
119
+ }
120
+
121
+ private:
122
+ #if !defined(__clang__) && defined(__GNUC__) && defined(__i386__)
123
+ // Workaround for GCC bug on i386:
124
+ // alignof(int64 | float64) can give different results depending on the
125
+ // compilation context, leading to internal ABI mismatch manifesting
126
+ // in incorrect propagation of Result<int64 | float64> between
127
+ // compilation units.
128
+ // (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88115)
129
+ static constexpr size_t alignment() {
130
+ if (std::is_integral_v<T> && sizeof(T) == 8) {
131
+ return 4;
132
+ } else if (std::is_floating_point_v<T> && sizeof(T) == 8) {
133
+ return 4;
134
+ }
135
+ return alignof(T);
136
+ }
137
+
138
+ typename std::aligned_storage<sizeof(T), alignment()>::type data_;
139
+ #else
140
+ typename std::aligned_storage<sizeof(T), alignof(T)>::type data_;
141
+ #endif
142
+ };
143
+
144
+ } // namespace internal
145
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator.h ADDED
@@ -0,0 +1,2058 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <atomic>
21
+ #include <cassert>
22
+ #include <cstring>
23
+ #include <deque>
24
+ #include <limits>
25
+ #include <optional>
26
+ #include <queue>
27
+
28
+ #include "arrow/util/async_generator_fwd.h"
29
+ #include "arrow/util/async_util.h"
30
+ #include "arrow/util/functional.h"
31
+ #include "arrow/util/future.h"
32
+ #include "arrow/util/io_util.h"
33
+ #include "arrow/util/iterator.h"
34
+ #include "arrow/util/mutex.h"
35
+ #include "arrow/util/queue.h"
36
+ #include "arrow/util/thread_pool.h"
37
+
38
+ namespace arrow {
39
+
40
+ // The methods in this file create, modify, and utilize AsyncGenerator which is an
41
+ // iterator of futures. This allows an asynchronous source (like file input) to be run
42
+ // through a pipeline in the same way that iterators can be used to create pipelined
43
+ // workflows.
44
+ //
45
+ // In order to support pipeline parallelism we introduce the concept of asynchronous
46
+ // reentrancy. This is different than synchronous reentrancy. With synchronous code a
47
+ // function is reentrant if the function can be called again while a previous call to that
48
+ // function is still running. Unless otherwise specified none of these generators are
49
+ // synchronously reentrant. Care should be taken to avoid calling them in such a way (and
50
+ // the utilities Visit/Collect/Await take care to do this).
51
+ //
52
+ // Asynchronous reentrancy on the other hand means the function is called again before the
53
+ // future returned by the function is marked finished (but after the call to get the
54
+ // future returns). Some of these generators are async-reentrant while others (e.g.
55
+ // those that depend on ordered processing like decompression) are not. Read the MakeXYZ
56
+ // function comments to determine which generators support async reentrancy.
57
+ //
58
+ // Note: Generators that are not asynchronously reentrant can still support readahead
59
+ // (\see MakeSerialReadaheadGenerator).
60
+ //
61
+ // Readahead operators, and some other operators, may introduce queueing. Any operators
62
+ // that introduce buffering should detail the amount of buffering they introduce in their
63
+ // MakeXYZ function comments.
64
+ //
65
+ // A generator should always be fully consumed before it is destroyed.
66
+ // A generator should not mark a future complete with an error status or a terminal value
67
+ // until all outstanding futures have completed. Generators that spawn multiple
68
+ // concurrent futures may need to hold onto an error while other concurrent futures wrap
69
+ // up.
70
+ template <typename T>
71
+ struct IterationTraits<AsyncGenerator<T>> {
72
+ /// \brief by default when iterating through a sequence of AsyncGenerator<T>,
73
+ /// an empty function indicates the end of iteration.
74
+ static AsyncGenerator<T> End() { return AsyncGenerator<T>(); }
75
+
76
+ static bool IsEnd(const AsyncGenerator<T>& val) { return !val; }
77
+ };
78
+
79
+ template <typename T>
80
+ Future<T> AsyncGeneratorEnd() {
81
+ return Future<T>::MakeFinished(IterationTraits<T>::End());
82
+ }
83
+
84
+ /// returning a future that completes when all have been visited
85
+ template <typename T, typename Visitor>
86
+ Future<> VisitAsyncGenerator(AsyncGenerator<T> generator, Visitor visitor) {
87
+ struct LoopBody {
88
+ struct Callback {
89
+ Result<ControlFlow<>> operator()(const T& next) {
90
+ if (IsIterationEnd(next)) {
91
+ return Break();
92
+ } else {
93
+ auto visited = visitor(next);
94
+ if (visited.ok()) {
95
+ return Continue();
96
+ } else {
97
+ return visited;
98
+ }
99
+ }
100
+ }
101
+
102
+ Visitor visitor;
103
+ };
104
+
105
+ Future<ControlFlow<>> operator()() {
106
+ Callback callback{visitor};
107
+ auto next = generator();
108
+ return next.Then(std::move(callback));
109
+ }
110
+
111
+ AsyncGenerator<T> generator;
112
+ Visitor visitor;
113
+ };
114
+
115
+ return Loop(LoopBody{std::move(generator), std::move(visitor)});
116
+ }
117
+
118
+ /// \brief Wait for an async generator to complete, discarding results.
119
+ template <typename T>
120
+ Future<> DiscardAllFromAsyncGenerator(AsyncGenerator<T> generator) {
121
+ std::function<Status(T)> visitor = [](const T&) { return Status::OK(); };
122
+ return VisitAsyncGenerator(generator, visitor);
123
+ }
124
+
125
+ /// \brief Collect the results of an async generator into a vector
126
+ template <typename T>
127
+ Future<std::vector<T>> CollectAsyncGenerator(AsyncGenerator<T> generator) {
128
+ auto vec = std::make_shared<std::vector<T>>();
129
+ auto loop_body = [generator = std::move(generator),
130
+ vec = std::move(vec)]() -> Future<ControlFlow<std::vector<T>>> {
131
+ auto next = generator();
132
+ return next.Then([vec](const T& result) -> Result<ControlFlow<std::vector<T>>> {
133
+ if (IsIterationEnd(result)) {
134
+ return Break(*vec);
135
+ } else {
136
+ vec->push_back(result);
137
+ return Continue();
138
+ }
139
+ });
140
+ };
141
+ return Loop(std::move(loop_body));
142
+ }
143
+
144
+ /// \see MakeMappedGenerator
145
+ template <typename T, typename V>
146
+ class MappingGenerator {
147
+ public:
148
+ MappingGenerator(AsyncGenerator<T> source, std::function<Future<V>(const T&)> map)
149
+ : state_(std::make_shared<State>(std::move(source), std::move(map))) {}
150
+
151
+ Future<V> operator()() {
152
+ auto future = Future<V>::Make();
153
+ bool should_trigger;
154
+ {
155
+ auto guard = state_->mutex.Lock();
156
+ if (state_->finished) {
157
+ return AsyncGeneratorEnd<V>();
158
+ }
159
+ should_trigger = state_->waiting_jobs.empty();
160
+ state_->waiting_jobs.push_back(future);
161
+ }
162
+ if (should_trigger) {
163
+ state_->source().AddCallback(Callback{state_});
164
+ }
165
+ return future;
166
+ }
167
+
168
+ private:
169
+ struct State {
170
+ State(AsyncGenerator<T> source, std::function<Future<V>(const T&)> map)
171
+ : source(std::move(source)),
172
+ map(std::move(map)),
173
+ waiting_jobs(),
174
+ mutex(),
175
+ finished(false) {}
176
+
177
+ void Purge() {
178
+ // This might be called by an original callback (if the source iterator fails or
179
+ // ends) or by a mapped callback (if the map function fails or ends prematurely).
180
+ // Either way it should only be called once and after finished is set so there is no
181
+ // need to guard access to `waiting_jobs`.
182
+ while (!waiting_jobs.empty()) {
183
+ waiting_jobs.front().MarkFinished(IterationTraits<V>::End());
184
+ waiting_jobs.pop_front();
185
+ }
186
+ }
187
+
188
+ AsyncGenerator<T> source;
189
+ std::function<Future<V>(const T&)> map;
190
+ std::deque<Future<V>> waiting_jobs;
191
+ util::Mutex mutex;
192
+ bool finished;
193
+ };
194
+
195
+ struct Callback;
196
+
197
+ struct MappedCallback {
198
+ void operator()(const Result<V>& maybe_next) {
199
+ bool end = !maybe_next.ok() || IsIterationEnd(*maybe_next);
200
+ bool should_purge = false;
201
+ if (end) {
202
+ {
203
+ auto guard = state->mutex.Lock();
204
+ should_purge = !state->finished;
205
+ state->finished = true;
206
+ }
207
+ }
208
+ sink.MarkFinished(maybe_next);
209
+ if (should_purge) {
210
+ state->Purge();
211
+ }
212
+ }
213
+ std::shared_ptr<State> state;
214
+ Future<V> sink;
215
+ };
216
+
217
+ struct Callback {
218
+ void operator()(const Result<T>& maybe_next) {
219
+ Future<V> sink;
220
+ bool end = !maybe_next.ok() || IsIterationEnd(*maybe_next);
221
+ bool should_purge = false;
222
+ bool should_trigger;
223
+ {
224
+ auto guard = state->mutex.Lock();
225
+ // A MappedCallback may have purged or be purging the queue;
226
+ // we shouldn't do anything here.
227
+ if (state->finished) return;
228
+ if (end) {
229
+ should_purge = !state->finished;
230
+ state->finished = true;
231
+ }
232
+ sink = state->waiting_jobs.front();
233
+ state->waiting_jobs.pop_front();
234
+ should_trigger = !end && !state->waiting_jobs.empty();
235
+ }
236
+ if (should_purge) {
237
+ state->Purge();
238
+ }
239
+ if (should_trigger) {
240
+ state->source().AddCallback(Callback{state});
241
+ }
242
+ if (maybe_next.ok()) {
243
+ const T& val = maybe_next.ValueUnsafe();
244
+ if (IsIterationEnd(val)) {
245
+ sink.MarkFinished(IterationTraits<V>::End());
246
+ } else {
247
+ Future<V> mapped_fut = state->map(val);
248
+ mapped_fut.AddCallback(MappedCallback{std::move(state), std::move(sink)});
249
+ }
250
+ } else {
251
+ sink.MarkFinished(maybe_next.status());
252
+ }
253
+ }
254
+
255
+ std::shared_ptr<State> state;
256
+ };
257
+
258
+ std::shared_ptr<State> state_;
259
+ };
260
+
261
+ /// \brief Create a generator that will apply the map function to each element of
262
+ /// source. The map function is not called on the end token.
263
+ ///
264
+ /// Note: This function makes a copy of `map` for each item
265
+ /// Note: Errors returned from the `map` function will be propagated
266
+ ///
267
+ /// If the source generator is async-reentrant then this generator will be also
268
+ template <typename T, typename MapFn,
269
+ typename Mapped = detail::result_of_t<MapFn(const T&)>,
270
+ typename V = typename EnsureFuture<Mapped>::type::ValueType>
271
+ AsyncGenerator<V> MakeMappedGenerator(AsyncGenerator<T> source_generator, MapFn map) {
272
+ auto map_callback = [map = std::move(map)](const T& val) mutable -> Future<V> {
273
+ return ToFuture(map(val));
274
+ };
275
+ return MappingGenerator<T, V>(std::move(source_generator), std::move(map_callback));
276
+ }
277
+
278
+ /// \brief Create a generator that will apply the map function to
279
+ /// each element of source. The map function is not called on the end
280
+ /// token. The result of the map function should be another
281
+ /// generator; all these generators will then be flattened to produce
282
+ /// a single stream of items.
283
+ ///
284
+ /// Note: This function makes a copy of `map` for each item
285
+ /// Note: Errors returned from the `map` function will be propagated
286
+ ///
287
+ /// If the source generator is async-reentrant then this generator will be also
288
+ template <typename T, typename MapFn,
289
+ typename Mapped = detail::result_of_t<MapFn(const T&)>,
290
+ typename V = typename EnsureFuture<Mapped>::type::ValueType>
291
+ AsyncGenerator<T> MakeFlatMappedGenerator(AsyncGenerator<T> source_generator, MapFn map) {
292
+ return MakeConcatenatedGenerator(
293
+ MakeMappedGenerator(std::move(source_generator), std::move(map)));
294
+ }
295
+
296
+ /// \see MakeSequencingGenerator
297
+ template <typename T, typename ComesAfter, typename IsNext>
298
+ class SequencingGenerator {
299
+ public:
300
+ SequencingGenerator(AsyncGenerator<T> source, ComesAfter compare, IsNext is_next,
301
+ T initial_value)
302
+ : state_(std::make_shared<State>(std::move(source), std::move(compare),
303
+ std::move(is_next), std::move(initial_value))) {}
304
+
305
+ Future<T> operator()() {
306
+ {
307
+ auto guard = state_->mutex.Lock();
308
+ // We can send a result immediately if the top of the queue is either an
309
+ // error or the next item
310
+ if (!state_->queue.empty() &&
311
+ (!state_->queue.top().ok() ||
312
+ state_->is_next(state_->previous_value, *state_->queue.top()))) {
313
+ auto result = std::move(state_->queue.top());
314
+ if (result.ok()) {
315
+ state_->previous_value = *result;
316
+ }
317
+ state_->queue.pop();
318
+ return Future<T>::MakeFinished(result);
319
+ }
320
+ if (state_->finished) {
321
+ return AsyncGeneratorEnd<T>();
322
+ }
323
+ // The next item is not in the queue so we will need to wait
324
+ auto new_waiting_fut = Future<T>::Make();
325
+ state_->waiting_future = new_waiting_fut;
326
+ guard.Unlock();
327
+ state_->source().AddCallback(Callback{state_});
328
+ return new_waiting_fut;
329
+ }
330
+ }
331
+
332
+ private:
333
+ struct WrappedComesAfter {
334
+ bool operator()(const Result<T>& left, const Result<T>& right) {
335
+ if (!left.ok() || !right.ok()) {
336
+ // Should never happen
337
+ return false;
338
+ }
339
+ return compare(*left, *right);
340
+ }
341
+ ComesAfter compare;
342
+ };
343
+
344
+ struct State {
345
+ State(AsyncGenerator<T> source, ComesAfter compare, IsNext is_next, T initial_value)
346
+ : source(std::move(source)),
347
+ is_next(std::move(is_next)),
348
+ previous_value(std::move(initial_value)),
349
+ waiting_future(),
350
+ queue(WrappedComesAfter{compare}),
351
+ finished(false),
352
+ mutex() {}
353
+
354
+ AsyncGenerator<T> source;
355
+ IsNext is_next;
356
+ T previous_value;
357
+ Future<T> waiting_future;
358
+ std::priority_queue<Result<T>, std::vector<Result<T>>, WrappedComesAfter> queue;
359
+ bool finished;
360
+ util::Mutex mutex;
361
+ };
362
+
363
+ class Callback {
364
+ public:
365
+ explicit Callback(std::shared_ptr<State> state) : state_(std::move(state)) {}
366
+
367
+ void operator()(const Result<T> result) {
368
+ Future<T> to_deliver;
369
+ bool finished;
370
+ {
371
+ auto guard = state_->mutex.Lock();
372
+ bool ready_to_deliver = false;
373
+ if (!result.ok()) {
374
+ // Clear any cached results
375
+ while (!state_->queue.empty()) {
376
+ state_->queue.pop();
377
+ }
378
+ ready_to_deliver = true;
379
+ state_->finished = true;
380
+ } else if (IsIterationEnd<T>(result.ValueUnsafe())) {
381
+ ready_to_deliver = state_->queue.empty();
382
+ state_->finished = true;
383
+ } else {
384
+ ready_to_deliver = state_->is_next(state_->previous_value, *result);
385
+ }
386
+
387
+ if (ready_to_deliver && state_->waiting_future.is_valid()) {
388
+ to_deliver = state_->waiting_future;
389
+ if (result.ok()) {
390
+ state_->previous_value = *result;
391
+ }
392
+ } else {
393
+ state_->queue.push(result);
394
+ }
395
+ // Capture state_->finished so we can access it outside the mutex
396
+ finished = state_->finished;
397
+ }
398
+ // Must deliver result outside of the mutex
399
+ if (to_deliver.is_valid()) {
400
+ to_deliver.MarkFinished(result);
401
+ } else {
402
+ // Otherwise, if we didn't get the next item (or a terminal item), we
403
+ // need to keep looking
404
+ if (!finished) {
405
+ state_->source().AddCallback(Callback{state_});
406
+ }
407
+ }
408
+ }
409
+
410
+ private:
411
+ const std::shared_ptr<State> state_;
412
+ };
413
+
414
+ const std::shared_ptr<State> state_;
415
+ };
416
+
417
+ /// \brief Buffer an AsyncGenerator to return values in sequence order ComesAfter
418
+ /// and IsNext determine the sequence order.
419
+ ///
420
+ /// ComesAfter should be a BinaryPredicate that only returns true if a comes after b
421
+ ///
422
+ /// IsNext should be a BinaryPredicate that returns true, given `a` and `b`, only if
423
+ /// `b` follows immediately after `a`. It should return true given `initial_value` and
424
+ /// `b` if `b` is the first item in the sequence.
425
+ ///
426
+ /// This operator will queue unboundedly while waiting for the next item. It is intended
427
+ /// for jittery sources that might scatter an ordered sequence. It is NOT intended to
428
+ /// sort. Using it to try and sort could result in excessive RAM usage. This generator
429
+ /// will queue up to N blocks where N is the max "out of order"ness of the source.
430
+ ///
431
+ /// For example, if the source is 1,6,2,5,4,3 it will queue 3 blocks because 3 is 3
432
+ /// blocks beyond where it belongs.
433
+ ///
434
+ /// This generator is not async-reentrant but it consists only of a simple log(n)
435
+ /// insertion into a priority queue.
436
+ template <typename T, typename ComesAfter, typename IsNext>
437
+ AsyncGenerator<T> MakeSequencingGenerator(AsyncGenerator<T> source_generator,
438
+ ComesAfter compare, IsNext is_next,
439
+ T initial_value) {
440
+ return SequencingGenerator<T, ComesAfter, IsNext>(
441
+ std::move(source_generator), std::move(compare), std::move(is_next),
442
+ std::move(initial_value));
443
+ }
444
+
445
+ /// \see MakeTransformedGenerator
446
+ template <typename T, typename V>
447
+ class TransformingGenerator {
448
+ // The transforming generator state will be referenced as an async generator but will
449
+ // also be referenced via callback to various futures. If the async generator owner
450
+ // moves it around we need the state to be consistent for future callbacks.
451
+ struct TransformingGeneratorState
452
+ : std::enable_shared_from_this<TransformingGeneratorState> {
453
+ TransformingGeneratorState(AsyncGenerator<T> generator, Transformer<T, V> transformer)
454
+ : generator_(std::move(generator)),
455
+ transformer_(std::move(transformer)),
456
+ last_value_(),
457
+ finished_() {}
458
+
459
+ Future<V> operator()() {
460
+ while (true) {
461
+ auto maybe_next_result = Pump();
462
+ if (!maybe_next_result.ok()) {
463
+ return Future<V>::MakeFinished(maybe_next_result.status());
464
+ }
465
+ auto maybe_next = std::move(maybe_next_result).ValueUnsafe();
466
+ if (maybe_next.has_value()) {
467
+ return Future<V>::MakeFinished(*std::move(maybe_next));
468
+ }
469
+
470
+ auto next_fut = generator_();
471
+ // If finished already, process results immediately inside the loop to avoid
472
+ // stack overflow
473
+ if (next_fut.is_finished()) {
474
+ auto next_result = next_fut.result();
475
+ if (next_result.ok()) {
476
+ last_value_ = *next_result;
477
+ } else {
478
+ return Future<V>::MakeFinished(next_result.status());
479
+ }
480
+ // Otherwise, if not finished immediately, add callback to process results
481
+ } else {
482
+ auto self = this->shared_from_this();
483
+ return next_fut.Then([self](const T& next_result) {
484
+ self->last_value_ = next_result;
485
+ return (*self)();
486
+ });
487
+ }
488
+ }
489
+ }
490
+
491
+ // See comment on TransformingIterator::Pump
492
+ Result<std::optional<V>> Pump() {
493
+ if (!finished_ && last_value_.has_value()) {
494
+ ARROW_ASSIGN_OR_RAISE(TransformFlow<V> next, transformer_(*last_value_));
495
+ if (next.ReadyForNext()) {
496
+ if (IsIterationEnd(*last_value_)) {
497
+ finished_ = true;
498
+ }
499
+ last_value_.reset();
500
+ }
501
+ if (next.Finished()) {
502
+ finished_ = true;
503
+ }
504
+ if (next.HasValue()) {
505
+ return next.Value();
506
+ }
507
+ }
508
+ if (finished_) {
509
+ return IterationTraits<V>::End();
510
+ }
511
+ return std::nullopt;
512
+ }
513
+
514
+ AsyncGenerator<T> generator_;
515
+ Transformer<T, V> transformer_;
516
+ std::optional<T> last_value_;
517
+ bool finished_;
518
+ };
519
+
520
+ public:
521
+ explicit TransformingGenerator(AsyncGenerator<T> generator,
522
+ Transformer<T, V> transformer)
523
+ : state_(std::make_shared<TransformingGeneratorState>(std::move(generator),
524
+ std::move(transformer))) {}
525
+
526
+ Future<V> operator()() { return (*state_)(); }
527
+
528
+ protected:
529
+ std::shared_ptr<TransformingGeneratorState> state_;
530
+ };
531
+
532
+ /// \brief Transform an async generator using a transformer function returning a new
533
+ /// AsyncGenerator
534
+ ///
535
+ /// The transform function here behaves exactly the same as the transform function in
536
+ /// MakeTransformedIterator and you can safely use the same transform function to
537
+ /// transform both synchronous and asynchronous streams.
538
+ ///
539
+ /// This generator is not async-reentrant
540
+ ///
541
+ /// This generator may queue up to 1 instance of T but will not delay
542
+ template <typename T, typename V>
543
+ AsyncGenerator<V> MakeTransformedGenerator(AsyncGenerator<T> generator,
544
+ Transformer<T, V> transformer) {
545
+ return TransformingGenerator<T, V>(generator, transformer);
546
+ }
547
+
548
+ /// \see MakeSerialReadaheadGenerator
549
+ template <typename T>
550
+ class SerialReadaheadGenerator {
551
+ public:
552
+ SerialReadaheadGenerator(AsyncGenerator<T> source_generator, int max_readahead)
553
+ : state_(std::make_shared<State>(std::move(source_generator), max_readahead)) {}
554
+
555
+ Future<T> operator()() {
556
+ if (state_->first_) {
557
+ // Lazy generator, need to wait for the first ask to prime the pump
558
+ state_->first_ = false;
559
+ auto next = state_->source_();
560
+ return next.Then(Callback{state_}, ErrCallback{state_});
561
+ }
562
+
563
+ // This generator is not async-reentrant. We won't be called until the last
564
+ // future finished so we know there is something in the queue
565
+ auto finished = state_->finished_.load();
566
+ if (finished && state_->readahead_queue_.IsEmpty()) {
567
+ return AsyncGeneratorEnd<T>();
568
+ }
569
+
570
+ std::shared_ptr<Future<T>> next;
571
+ if (!state_->readahead_queue_.Read(next)) {
572
+ return Status::UnknownError("Could not read from readahead_queue");
573
+ }
574
+
575
+ auto last_available = state_->spaces_available_.fetch_add(1);
576
+ if (last_available == 0 && !finished) {
577
+ // Reader idled out, we need to restart it
578
+ ARROW_RETURN_NOT_OK(state_->Pump(state_));
579
+ }
580
+ return *next;
581
+ }
582
+
583
+ private:
584
+ struct State {
585
+ State(AsyncGenerator<T> source, int max_readahead)
586
+ : first_(true),
587
+ source_(std::move(source)),
588
+ finished_(false),
589
+ // There is one extra "space" for the in-flight request
590
+ spaces_available_(max_readahead + 1),
591
+ // The SPSC queue has size-1 "usable" slots so we need to overallocate 1
592
+ readahead_queue_(max_readahead + 1) {}
593
+
594
+ Status Pump(const std::shared_ptr<State>& self) {
595
+ // Can't do readahead_queue.write(source().Then(...)) because then the
596
+ // callback might run immediately and add itself to the queue before this gets added
597
+ // to the queue messing up the order.
598
+ auto next_slot = std::make_shared<Future<T>>();
599
+ auto written = readahead_queue_.Write(next_slot);
600
+ if (!written) {
601
+ return Status::UnknownError("Could not write to readahead_queue");
602
+ }
603
+ // If this Pump is being called from a callback it is possible for the source to
604
+ // poll and read from the queue between the Write and this spot where we fill the
605
+ // value in. However, it is not possible for the future to read this value we are
606
+ // writing. That is because this callback (the callback for future X) must be
607
+ // finished before future X is marked complete and this source is not pulled
608
+ // reentrantly so it will not poll for future X+1 until this callback has completed.
609
+ *next_slot = source_().Then(Callback{self}, ErrCallback{self});
610
+ return Status::OK();
611
+ }
612
+
613
+ // Only accessed by the consumer end
614
+ bool first_;
615
+ // Accessed by both threads
616
+ AsyncGenerator<T> source_;
617
+ std::atomic<bool> finished_;
618
+ // The queue has a size but it is not atomic. We keep track of how many spaces are
619
+ // left in the queue here so we know if we've just written the last value and we need
620
+ // to stop reading ahead or if we've just read from a full queue and we need to
621
+ // restart reading ahead
622
+ std::atomic<uint32_t> spaces_available_;
623
+ // Needs to be a queue of shared_ptr and not Future because we set the value of the
624
+ // future after we add it to the queue
625
+ util::SpscQueue<std::shared_ptr<Future<T>>> readahead_queue_;
626
+ };
627
+
628
+ struct Callback {
629
+ Result<T> operator()(const T& next) {
630
+ if (IsIterationEnd(next)) {
631
+ state_->finished_.store(true);
632
+ return next;
633
+ }
634
+ auto last_available = state_->spaces_available_.fetch_sub(1);
635
+ if (last_available > 1) {
636
+ ARROW_RETURN_NOT_OK(state_->Pump(state_));
637
+ }
638
+ return next;
639
+ }
640
+
641
+ std::shared_ptr<State> state_;
642
+ };
643
+
644
+ struct ErrCallback {
645
+ Result<T> operator()(const Status& st) {
646
+ state_->finished_.store(true);
647
+ return st;
648
+ }
649
+
650
+ std::shared_ptr<State> state_;
651
+ };
652
+
653
+ std::shared_ptr<State> state_;
654
+ };
655
+
656
+ /// \see MakeFromFuture
657
+ template <typename T>
658
+ class FutureFirstGenerator {
659
+ public:
660
+ explicit FutureFirstGenerator(Future<AsyncGenerator<T>> future)
661
+ : state_(std::make_shared<State>(std::move(future))) {}
662
+
663
+ Future<T> operator()() {
664
+ if (state_->source_) {
665
+ return state_->source_();
666
+ } else {
667
+ auto state = state_;
668
+ return state_->future_.Then([state](const AsyncGenerator<T>& source) {
669
+ state->source_ = source;
670
+ return state->source_();
671
+ });
672
+ }
673
+ }
674
+
675
+ private:
676
+ struct State {
677
+ explicit State(Future<AsyncGenerator<T>> future) : future_(future), source_() {}
678
+
679
+ Future<AsyncGenerator<T>> future_;
680
+ AsyncGenerator<T> source_;
681
+ };
682
+
683
+ std::shared_ptr<State> state_;
684
+ };
685
+
686
+ /// \brief Transform a Future<AsyncGenerator<T>> into an AsyncGenerator<T>
687
+ /// that waits for the future to complete as part of the first item.
688
+ ///
689
+ /// This generator is not async-reentrant (even if the generator yielded by future is)
690
+ ///
691
+ /// This generator does not queue
692
+ template <typename T>
693
+ AsyncGenerator<T> MakeFromFuture(Future<AsyncGenerator<T>> future) {
694
+ return FutureFirstGenerator<T>(std::move(future));
695
+ }
696
+
697
+ /// \brief Create a generator that will pull from the source into a queue. Unlike
698
+ /// MakeReadaheadGenerator this will not pull reentrantly from the source.
699
+ ///
700
+ /// The source generator does not need to be async-reentrant
701
+ ///
702
+ /// This generator is not async-reentrant (even if the source is)
703
+ ///
704
+ /// This generator may queue up to max_readahead additional instances of T
705
+ template <typename T>
706
+ AsyncGenerator<T> MakeSerialReadaheadGenerator(AsyncGenerator<T> source_generator,
707
+ int max_readahead) {
708
+ return SerialReadaheadGenerator<T>(std::move(source_generator), max_readahead);
709
+ }
710
+
711
+ /// \brief Create a generator that immediately pulls from the source
712
+ ///
713
+ /// Typical generators do not pull from their source until they themselves
714
+ /// are pulled. This generator does not follow that convention and will call
715
+ /// generator() once before it returns. The returned generator will otherwise
716
+ /// mirror the source.
717
+ ///
718
+ /// This generator forwards async-reentrant pressure to the source
719
+ /// This generator buffers one item (the first result) until it is delivered.
720
+ template <typename T>
721
+ AsyncGenerator<T> MakeAutoStartingGenerator(AsyncGenerator<T> generator) {
722
+ struct AutostartGenerator {
723
+ Future<T> operator()() {
724
+ if (first_future->is_valid()) {
725
+ Future<T> result = *first_future;
726
+ *first_future = Future<T>();
727
+ return result;
728
+ }
729
+ return source();
730
+ }
731
+
732
+ std::shared_ptr<Future<T>> first_future;
733
+ AsyncGenerator<T> source;
734
+ };
735
+
736
+ std::shared_ptr<Future<T>> first_future = std::make_shared<Future<T>>(generator());
737
+ return AutostartGenerator{std::move(first_future), std::move(generator)};
738
+ }
739
+
740
+ /// \see MakeReadaheadGenerator
741
+ template <typename T>
742
+ class ReadaheadGenerator {
743
+ public:
744
+ ReadaheadGenerator(AsyncGenerator<T> source_generator, int max_readahead)
745
+ : state_(std::make_shared<State>(std::move(source_generator), max_readahead)) {}
746
+
747
+ Future<T> AddMarkFinishedContinuation(Future<T> fut) {
748
+ auto state = state_;
749
+ return fut.Then(
750
+ [state](const T& result) -> Future<T> {
751
+ state->MarkFinishedIfDone(result);
752
+ if (state->finished.load()) {
753
+ if (state->num_running.fetch_sub(1) == 1) {
754
+ state->final_future.MarkFinished();
755
+ }
756
+ } else {
757
+ state->num_running.fetch_sub(1);
758
+ }
759
+ return result;
760
+ },
761
+ [state](const Status& err) -> Future<T> {
762
+ // If there is an error we need to make sure all running
763
+ // tasks finish before we return the error.
764
+ state->finished.store(true);
765
+ if (state->num_running.fetch_sub(1) == 1) {
766
+ state->final_future.MarkFinished();
767
+ }
768
+ return state->final_future.Then([err]() -> Result<T> { return err; });
769
+ });
770
+ }
771
+
772
+ Future<T> operator()() {
773
+ if (state_->readahead_queue.empty()) {
774
+ // This is the first request, let's pump the underlying queue
775
+ state_->num_running.store(state_->max_readahead);
776
+ for (int i = 0; i < state_->max_readahead; i++) {
777
+ auto next = state_->source_generator();
778
+ auto next_after_check = AddMarkFinishedContinuation(std::move(next));
779
+ state_->readahead_queue.push(std::move(next_after_check));
780
+ }
781
+ }
782
+ // Pop one and add one
783
+ auto result = state_->readahead_queue.front();
784
+ state_->readahead_queue.pop();
785
+ if (state_->finished.load()) {
786
+ state_->readahead_queue.push(AsyncGeneratorEnd<T>());
787
+ } else {
788
+ state_->num_running.fetch_add(1);
789
+ auto back_of_queue = state_->source_generator();
790
+ auto back_of_queue_after_check =
791
+ AddMarkFinishedContinuation(std::move(back_of_queue));
792
+ state_->readahead_queue.push(std::move(back_of_queue_after_check));
793
+ }
794
+ return result;
795
+ }
796
+
797
+ private:
798
+ struct State {
799
+ State(AsyncGenerator<T> source_generator, int max_readahead)
800
+ : source_generator(std::move(source_generator)), max_readahead(max_readahead) {}
801
+
802
+ void MarkFinishedIfDone(const T& next_result) {
803
+ if (IsIterationEnd(next_result)) {
804
+ finished.store(true);
805
+ }
806
+ }
807
+
808
+ AsyncGenerator<T> source_generator;
809
+ int max_readahead;
810
+ Future<> final_future = Future<>::Make();
811
+ std::atomic<int> num_running{0};
812
+ std::atomic<bool> finished{false};
813
+ std::queue<Future<T>> readahead_queue;
814
+ };
815
+
816
+ std::shared_ptr<State> state_;
817
+ };
818
+
819
+ /// \brief A generator where the producer pushes items on a queue.
820
+ ///
821
+ /// No back-pressure is applied, so this generator is mostly useful when
822
+ /// producing the values is neither CPU- nor memory-expensive (e.g. fetching
823
+ /// filesystem metadata).
824
+ ///
825
+ /// This generator is not async-reentrant.
826
+ template <typename T>
827
+ class PushGenerator {
828
+ struct State {
829
+ State() {}
830
+
831
+ util::Mutex mutex;
832
+ std::deque<Result<T>> result_q;
833
+ std::optional<Future<T>> consumer_fut;
834
+ bool finished = false;
835
+ };
836
+
837
+ public:
838
+ /// Producer API for PushGenerator
839
+ class Producer {
840
+ public:
841
+ explicit Producer(const std::shared_ptr<State>& state) : weak_state_(state) {}
842
+
843
+ /// \brief Push a value on the queue
844
+ ///
845
+ /// True is returned if the value was pushed, false if the generator is
846
+ /// already closed or destroyed. If the latter, it is recommended to stop
847
+ /// producing any further values.
848
+ bool Push(Result<T> result) {
849
+ auto state = weak_state_.lock();
850
+ if (!state) {
851
+ // Generator was destroyed
852
+ return false;
853
+ }
854
+ auto lock = state->mutex.Lock();
855
+ if (state->finished) {
856
+ // Closed early
857
+ return false;
858
+ }
859
+ if (state->consumer_fut.has_value()) {
860
+ auto fut = std::move(state->consumer_fut.value());
861
+ state->consumer_fut.reset();
862
+ lock.Unlock(); // unlock before potentially invoking a callback
863
+ fut.MarkFinished(std::move(result));
864
+ } else {
865
+ state->result_q.push_back(std::move(result));
866
+ }
867
+ return true;
868
+ }
869
+
870
+ /// \brief Tell the consumer we have finished producing
871
+ ///
872
+ /// It is allowed to call this and later call Push() again ("early close").
873
+ /// In this case, calls to Push() after the queue is closed are silently
874
+ /// ignored. This can help implementing non-trivial cancellation cases.
875
+ ///
876
+ /// True is returned on success, false if the generator is already closed
877
+ /// or destroyed.
878
+ bool Close() {
879
+ auto state = weak_state_.lock();
880
+ if (!state) {
881
+ // Generator was destroyed
882
+ return false;
883
+ }
884
+ auto lock = state->mutex.Lock();
885
+ if (state->finished) {
886
+ // Already closed
887
+ return false;
888
+ }
889
+ state->finished = true;
890
+ if (state->consumer_fut.has_value()) {
891
+ auto fut = std::move(state->consumer_fut.value());
892
+ state->consumer_fut.reset();
893
+ lock.Unlock(); // unlock before potentially invoking a callback
894
+ fut.MarkFinished(IterationTraits<T>::End());
895
+ }
896
+ return true;
897
+ }
898
+
899
+ /// Return whether the generator was closed or destroyed.
900
+ bool is_closed() const {
901
+ auto state = weak_state_.lock();
902
+ if (!state) {
903
+ // Generator was destroyed
904
+ return true;
905
+ }
906
+ auto lock = state->mutex.Lock();
907
+ return state->finished;
908
+ }
909
+
910
+ private:
911
+ const std::weak_ptr<State> weak_state_;
912
+ };
913
+
914
+ PushGenerator() : state_(std::make_shared<State>()) {}
915
+
916
+ /// Read an item from the queue
917
+ Future<T> operator()() const {
918
+ auto lock = state_->mutex.Lock();
919
+ assert(!state_->consumer_fut.has_value()); // Non-reentrant
920
+ if (!state_->result_q.empty()) {
921
+ auto fut = Future<T>::MakeFinished(std::move(state_->result_q.front()));
922
+ state_->result_q.pop_front();
923
+ return fut;
924
+ }
925
+ if (state_->finished) {
926
+ return AsyncGeneratorEnd<T>();
927
+ }
928
+ auto fut = Future<T>::Make();
929
+ state_->consumer_fut = fut;
930
+ return fut;
931
+ }
932
+
933
+ /// \brief Return producer-side interface
934
+ ///
935
+ /// The returned object must be used by the producer to push values on the queue.
936
+ /// Only a single Producer object should be instantiated.
937
+ Producer producer() { return Producer{state_}; }
938
+
939
+ private:
940
+ const std::shared_ptr<State> state_;
941
+ };
942
+
943
+ /// \brief Create a generator that pulls reentrantly from a source
944
+ /// This generator will pull reentrantly from a source, ensuring that max_readahead
945
+ /// requests are active at any given time.
946
+ ///
947
+ /// The source generator must be async-reentrant
948
+ ///
949
+ /// This generator itself is async-reentrant.
950
+ ///
951
+ /// This generator may queue up to max_readahead instances of T
952
+ template <typename T>
953
+ AsyncGenerator<T> MakeReadaheadGenerator(AsyncGenerator<T> source_generator,
954
+ int max_readahead) {
955
+ return ReadaheadGenerator<T>(std::move(source_generator), max_readahead);
956
+ }
957
+
958
+ /// \brief Creates a generator that will yield finished futures from a vector
959
+ ///
960
+ /// This generator is async-reentrant
961
+ template <typename T>
962
+ AsyncGenerator<T> MakeVectorGenerator(std::vector<T> vec) {
963
+ struct State {
964
+ explicit State(std::vector<T> vec_) : vec(std::move(vec_)), vec_idx(0) {}
965
+
966
+ std::vector<T> vec;
967
+ std::atomic<std::size_t> vec_idx;
968
+ };
969
+
970
+ auto state = std::make_shared<State>(std::move(vec));
971
+ return [state]() {
972
+ auto idx = state->vec_idx.fetch_add(1);
973
+ if (idx >= state->vec.size()) {
974
+ // Eagerly return memory
975
+ state->vec.clear();
976
+ return AsyncGeneratorEnd<T>();
977
+ }
978
+ return Future<T>::MakeFinished(state->vec[idx]);
979
+ };
980
+ }
981
+
982
+ /// \see MakeMergedGenerator
983
+ template <typename T>
984
+ class MergedGenerator {
985
+ // Note, the implementation of this class is quite complex at the moment (PRs to
986
+ // simplify are always welcome)
987
+ //
988
+ // Terminology is borrowed from rxjs. This is a pull based implementation of the
989
+ // mergeAll operator. The "outer subscription" refers to the async
990
+ // generator that the caller provided when creating this. The outer subscription
991
+ // yields generators.
992
+ //
993
+ // Each of these generators is then subscribed to (up to max_subscriptions) and these
994
+ // are referred to as "inner subscriptions".
995
+ //
996
+ // As soon as we start we try and establish `max_subscriptions` inner subscriptions. For
997
+ // each inner subscription we will cache up to 1 value. This means we may have more
998
+ // values than we have been asked for. In our example, if a caller asks for one record
999
+ // batch we will start scanning `max_subscriptions` different files. For each file we
1000
+ // will only queue up to 1 batch (so a separate readahead is needed on the file if batch
1001
+ // readahead is desired).
1002
+ //
1003
+ // If the caller is slow we may accumulate ready-to-deliver items. These are stored
1004
+ // in `delivered_jobs`.
1005
+ //
1006
+ // If the caller is very quick we may accumulate requests. These are stored in
1007
+ // `waiting_jobs`.
1008
+ //
1009
+ // It may be helpful to consider an example, in the scanner the outer subscription
1010
+ // is some kind of asynchronous directory listing. The inner subscription is
1011
+ // then a scan on a file yielded by the directory listing.
1012
+ //
1013
+ // An "outstanding" request is when we have polled either the inner or outer
1014
+ // subscription but that future hasn't completed yet.
1015
+ //
1016
+ // There are three possible "events" that can happen.
1017
+ // * A caller could request the next future
1018
+ // * An outer callback occurs when the next subscription is ready (e.g. the directory
1019
+ // listing has produced a new file)
1020
+ // * An inner callback occurs when one of the inner subscriptions emits a value (e.g.
1021
+ // a file scan emits a record batch)
1022
+ //
1023
+ // Any time an event happens the logic is broken into two phases. First, we grab the
1024
+ // lock and modify the shared state. While doing this we figure out what callbacks we
1025
+ // will need to execute. Then, we give up the lock and execute these callbacks. It is
1026
+ // important to execute these callbacks without the lock to avoid deadlock.
1027
+ public:
1028
+ explicit MergedGenerator(AsyncGenerator<AsyncGenerator<T>> source,
1029
+ int max_subscriptions)
1030
+ : state_(std::make_shared<State>(std::move(source), max_subscriptions)) {}
1031
+
1032
+ Future<T> operator()() {
1033
+ // A caller has requested a future
1034
+ Future<T> waiting_future;
1035
+ std::shared_ptr<DeliveredJob> delivered_job;
1036
+ bool mark_generator_complete = false;
1037
+ {
1038
+ auto guard = state_->mutex.Lock();
1039
+ if (!state_->delivered_jobs.empty()) {
1040
+ // If we have a job sitting around we can deliver it
1041
+ delivered_job = std::move(state_->delivered_jobs.front());
1042
+ state_->delivered_jobs.pop_front();
1043
+ if (state_->IsCompleteUnlocked(guard)) {
1044
+ // It's possible this waiting job was the only thing left to handle and
1045
+ // we have now completed the generator.
1046
+ mark_generator_complete = true;
1047
+ } else {
1048
+ // Since we had a job sitting around we also had an inner subscription
1049
+ // that had paused. We are going to restart this inner subscription and
1050
+ // so there will be a new outstanding request.
1051
+ state_->outstanding_requests++;
1052
+ }
1053
+ } else if (state_->broken ||
1054
+ (!state_->first && state_->num_running_subscriptions == 0)) {
1055
+ // If we are broken or exhausted then prepare a terminal item but
1056
+ // we won't complete it until we've finished.
1057
+ Result<T> end_res = IterationEnd<T>();
1058
+ if (!state_->final_error.ok()) {
1059
+ end_res = state_->final_error;
1060
+ state_->final_error = Status::OK();
1061
+ }
1062
+ return state_->all_finished.Then([end_res]() -> Result<T> { return end_res; });
1063
+ } else {
1064
+ // Otherwise we just queue the request and it will be completed when one of the
1065
+ // ongoing inner subscriptions delivers a result
1066
+ waiting_future = Future<T>::Make();
1067
+ state_->waiting_jobs.push_back(std::make_shared<Future<T>>(waiting_future));
1068
+ }
1069
+ if (state_->first) {
1070
+ // On the first request we are going to try and immediately fill our queue
1071
+ // of subscriptions. We assume we are going to be able to start them all.
1072
+ state_->outstanding_requests +=
1073
+ static_cast<int>(state_->active_subscriptions.size());
1074
+ state_->num_running_subscriptions +=
1075
+ static_cast<int>(state_->active_subscriptions.size());
1076
+ }
1077
+ }
1078
+ // If we grabbed a finished item from the delivered_jobs queue then we may need
1079
+ // to mark the generator finished or issue a request for a new item to fill in
1080
+ // the spot we just vacated. Notice that we issue that request to the same
1081
+ // subscription that delivered it (deliverer).
1082
+ if (delivered_job) {
1083
+ if (mark_generator_complete) {
1084
+ state_->all_finished.MarkFinished();
1085
+ } else {
1086
+ delivered_job->deliverer().AddCallback(
1087
+ InnerCallback(state_, delivered_job->index));
1088
+ }
1089
+ return std::move(delivered_job->value);
1090
+ }
1091
+ // On the first call we try and fill up our subscriptions. It's possible the outer
1092
+ // generator only has a few items and we can't fill up to what we were hoping. In
1093
+ // that case we have to bail early.
1094
+ if (state_->first) {
1095
+ state_->first = false;
1096
+ mark_generator_complete = false;
1097
+ for (int i = 0; i < static_cast<int>(state_->active_subscriptions.size()); i++) {
1098
+ state_->PullSource().AddCallback(
1099
+ OuterCallback{state_, static_cast<std::size_t>(i)});
1100
+ // If we have to bail early then we need to update the shared state again so
1101
+ // we need to reacquire the lock.
1102
+ auto guard = state_->mutex.Lock();
1103
+ if (state_->source_exhausted) {
1104
+ int excess_requests =
1105
+ static_cast<int>(state_->active_subscriptions.size()) - i - 1;
1106
+ state_->outstanding_requests -= excess_requests;
1107
+ state_->num_running_subscriptions -= excess_requests;
1108
+ if (excess_requests > 0) {
1109
+ // It's possible that we are completing the generator by reducing the number
1110
+ // of outstanding requests (e.g. this happens when the outer subscription and
1111
+ // all inner subscriptions are synchronous)
1112
+ mark_generator_complete = state_->IsCompleteUnlocked(guard);
1113
+ }
1114
+ break;
1115
+ }
1116
+ }
1117
+ if (mark_generator_complete) {
1118
+ state_->MarkFinishedAndPurge();
1119
+ }
1120
+ }
1121
+ return waiting_future;
1122
+ }
1123
+
1124
+ private:
1125
+ struct DeliveredJob {
1126
+ explicit DeliveredJob(AsyncGenerator<T> deliverer_, Result<T> value_,
1127
+ std::size_t index_)
1128
+ : deliverer(deliverer_), value(std::move(value_)), index(index_) {}
1129
+
1130
+ // The generator that delivered this result, we will request another item
1131
+ // from this generator once the result is delivered
1132
+ AsyncGenerator<T> deliverer;
1133
+ // The result we received from the generator
1134
+ Result<T> value;
1135
+ // The index of the generator (in active_subscriptions) that delivered this
1136
+ // result. This is used if we need to replace a finished generator.
1137
+ std::size_t index;
1138
+ };
1139
+
1140
+ struct State {
1141
+ State(AsyncGenerator<AsyncGenerator<T>> source, int max_subscriptions)
1142
+ : source(std::move(source)),
1143
+ active_subscriptions(max_subscriptions),
1144
+ delivered_jobs(),
1145
+ waiting_jobs(),
1146
+ mutex(),
1147
+ first(true),
1148
+ broken(false),
1149
+ source_exhausted(false),
1150
+ outstanding_requests(0),
1151
+ num_running_subscriptions(0),
1152
+ final_error(Status::OK()) {}
1153
+
1154
+ Future<AsyncGenerator<T>> PullSource() {
1155
+ // Need to guard access to source() so we don't pull sync-reentrantly which
1156
+ // is never valid.
1157
+ auto lock = mutex.Lock();
1158
+ return source();
1159
+ }
1160
+
1161
+ void SignalErrorUnlocked(const util::Mutex::Guard& guard) {
1162
+ broken = true;
1163
+ // Empty any results that have arrived but not asked for.
1164
+ while (!delivered_jobs.empty()) {
1165
+ delivered_jobs.pop_front();
1166
+ }
1167
+ }
1168
+
1169
+ // This function is called outside the mutex but it will only ever be
1170
+ // called once
1171
+ void MarkFinishedAndPurge() {
1172
+ all_finished.MarkFinished();
1173
+ while (!waiting_jobs.empty()) {
1174
+ waiting_jobs.front()->MarkFinished(IterationEnd<T>());
1175
+ waiting_jobs.pop_front();
1176
+ }
1177
+ }
1178
+
1179
+ // This is called outside the mutex but it is only ever called
1180
+ // once and Future<>::AddCallback is thread-safe
1181
+ void MarkFinalError(const Status& err, Future<T> maybe_sink) {
1182
+ if (maybe_sink.is_valid()) {
1183
+ // Someone is waiting for this error so lets mark it complete when
1184
+ // all the work is done
1185
+ all_finished.AddCallback([maybe_sink, err](const Status& status) mutable {
1186
+ maybe_sink.MarkFinished(err);
1187
+ });
1188
+ } else {
1189
+ // No one is waiting for this error right now so it will be delivered
1190
+ // next.
1191
+ final_error = err;
1192
+ }
1193
+ }
1194
+
1195
+ bool IsCompleteUnlocked(const util::Mutex::Guard& guard) {
1196
+ return outstanding_requests == 0 &&
1197
+ (broken || (source_exhausted && num_running_subscriptions == 0 &&
1198
+ delivered_jobs.empty()));
1199
+ }
1200
+
1201
+ bool MarkTaskFinishedUnlocked(const util::Mutex::Guard& guard) {
1202
+ --outstanding_requests;
1203
+ return IsCompleteUnlocked(guard);
1204
+ }
1205
+
1206
+ // The outer generator. Each item we pull from this will be its own generator
1207
+ // and become an inner subscription
1208
+ AsyncGenerator<AsyncGenerator<T>> source;
1209
+ // active_subscriptions and delivered_jobs will be bounded by max_subscriptions
1210
+ std::vector<AsyncGenerator<T>> active_subscriptions;
1211
+ // Results delivered by the inner subscriptions that weren't yet asked for by the
1212
+ // caller
1213
+ std::deque<std::shared_ptr<DeliveredJob>> delivered_jobs;
1214
+ // waiting_jobs is unbounded, reentrant pulls (e.g. AddReadahead) will provide the
1215
+ // backpressure
1216
+ std::deque<std::shared_ptr<Future<T>>> waiting_jobs;
1217
+ // A future that will be marked complete when the terminal item has arrived and all
1218
+ // outstanding futures have completed. It is used to hold off emission of an error
1219
+ // until all outstanding work is done.
1220
+ Future<> all_finished = Future<>::Make();
1221
+ util::Mutex mutex;
1222
+ // A flag cleared when the caller firsts asks for a future. Used to start polling.
1223
+ bool first;
1224
+ // A flag set when an error arrives, prevents us from issuing new requests.
1225
+ bool broken;
1226
+ // A flag set when the outer subscription has been exhausted. Prevents us from
1227
+ // pulling it further (even though it would be generally harmless) and lets us know we
1228
+ // are finishing up.
1229
+ bool source_exhausted;
1230
+ // The number of futures that we have requested from either the outer or inner
1231
+ // subscriptions that have not yet completed. We cannot mark all_finished until this
1232
+ // reaches 0. This will never be greater than max_subscriptions
1233
+ int outstanding_requests;
1234
+ // The number of running subscriptions. We ramp this up to `max_subscriptions` as
1235
+ // soon as the first item is requested and then it stays at that level (each exhausted
1236
+ // inner subscription is replaced by a new inner subscription) until the outer
1237
+ // subscription is exhausted at which point this descends to 0 (and source_exhausted)
1238
+ // is then set to true.
1239
+ int num_running_subscriptions;
1240
+ // If an error arrives, and the caller hasn't asked for that item, we store the error
1241
+ // here. It is analagous to delivered_jobs but for errors instead of finished
1242
+ // results.
1243
+ Status final_error;
1244
+ };
1245
+
1246
+ struct InnerCallback {
1247
+ InnerCallback(std::shared_ptr<State> state, std::size_t index, bool recursive = false)
1248
+ : state(std::move(state)), index(index), recursive(recursive) {}
1249
+
1250
+ void operator()(const Result<T>& maybe_next_ref) {
1251
+ // An item has been delivered by one of the inner subscriptions
1252
+ Future<T> next_fut;
1253
+ const Result<T>* maybe_next = &maybe_next_ref;
1254
+
1255
+ // When an item is delivered (and the caller has asked for it) we grab the
1256
+ // next item from the inner subscription. To avoid this behavior leading to an
1257
+ // infinite loop (this can happen if the caller's callback asks for the next item)
1258
+ // we use a while loop.
1259
+ while (true) {
1260
+ Future<T> sink;
1261
+ bool sub_finished = maybe_next->ok() && IsIterationEnd(**maybe_next);
1262
+ bool pull_next_sub = false;
1263
+ bool was_broken = false;
1264
+ bool should_mark_gen_complete = false;
1265
+ bool should_mark_final_error = false;
1266
+ {
1267
+ auto guard = state->mutex.Lock();
1268
+ if (state->broken) {
1269
+ // We've errored out previously so ignore the result. If anyone was waiting
1270
+ // for this they will get IterationEnd when we purge
1271
+ was_broken = true;
1272
+ } else {
1273
+ if (!sub_finished) {
1274
+ // There is a result to deliver. Either we can deliver it now or we will
1275
+ // queue it up
1276
+ if (state->waiting_jobs.empty()) {
1277
+ state->delivered_jobs.push_back(std::make_shared<DeliveredJob>(
1278
+ state->active_subscriptions[index], *maybe_next, index));
1279
+ } else {
1280
+ sink = std::move(*state->waiting_jobs.front());
1281
+ state->waiting_jobs.pop_front();
1282
+ }
1283
+ }
1284
+
1285
+ // If this is the first error then we transition the state to a broken state
1286
+ if (!maybe_next->ok()) {
1287
+ should_mark_final_error = true;
1288
+ state->SignalErrorUnlocked(guard);
1289
+ }
1290
+ }
1291
+
1292
+ // If we finished this inner subscription then we need to grab a new inner
1293
+ // subscription to take its spot. If we can't (because we're broken or
1294
+ // exhausted) then we aren't going to be starting any new futures and so
1295
+ // the number of running subscriptions drops.
1296
+ pull_next_sub = sub_finished && !state->source_exhausted && !was_broken;
1297
+ if (sub_finished && !pull_next_sub) {
1298
+ state->num_running_subscriptions--;
1299
+ }
1300
+ // There are three situations we won't pull again. If an error occurred or we
1301
+ // are already finished or if no one was waiting for our result and so we queued
1302
+ // it up. We will decrement outstanding_requests and possibly mark the
1303
+ // generator completed.
1304
+ if (state->broken || (!sink.is_valid() && !sub_finished) ||
1305
+ (sub_finished && state->source_exhausted)) {
1306
+ if (state->MarkTaskFinishedUnlocked(guard)) {
1307
+ should_mark_gen_complete = true;
1308
+ }
1309
+ }
1310
+ }
1311
+
1312
+ // Now we have given up the lock and we can take all the actions we decided we
1313
+ // need to take.
1314
+ if (should_mark_final_error) {
1315
+ state->MarkFinalError(maybe_next->status(), std::move(sink));
1316
+ }
1317
+
1318
+ if (should_mark_gen_complete) {
1319
+ state->MarkFinishedAndPurge();
1320
+ }
1321
+
1322
+ // An error occurred elsewhere so there is no need to mark any future
1323
+ // finished (will happen during the purge) or pull from anything
1324
+ if (was_broken) {
1325
+ return;
1326
+ }
1327
+
1328
+ if (pull_next_sub) {
1329
+ if (recursive) {
1330
+ was_empty = true;
1331
+ return;
1332
+ }
1333
+ // We pulled an end token so we need to start a new subscription
1334
+ // in our spot
1335
+ state->PullSource().AddCallback(OuterCallback{state, index});
1336
+ } else if (sink.is_valid()) {
1337
+ // We pulled a valid result and there was someone waiting for it
1338
+ // so lets fetch the next result from our subscription
1339
+ sink.MarkFinished(*maybe_next);
1340
+ next_fut = state->active_subscriptions[index]();
1341
+ if (next_fut.TryAddCallback([this]() { return InnerCallback(state, index); })) {
1342
+ return;
1343
+ }
1344
+ // Already completed. Avoid very deep recursion by looping
1345
+ // here instead of relying on the callback.
1346
+ maybe_next = &next_fut.result();
1347
+ continue;
1348
+ }
1349
+ // else: We pulled a valid result but no one was waiting for it so
1350
+ // we can just stop.
1351
+ return;
1352
+ }
1353
+ }
1354
+ std::shared_ptr<State> state;
1355
+ std::size_t index;
1356
+ bool recursive;
1357
+ bool was_empty = false;
1358
+ };
1359
+
1360
+ struct OuterCallback {
1361
+ void operator()(const Result<AsyncGenerator<T>>& initial_maybe_next) {
1362
+ Result<AsyncGenerator<T>> maybe_next = initial_maybe_next;
1363
+ while (true) {
1364
+ // We have been given a new inner subscription
1365
+ bool should_continue = false;
1366
+ bool should_mark_gen_complete = false;
1367
+ bool should_deliver_error = false;
1368
+ bool source_exhausted = maybe_next.ok() && IsIterationEnd(*maybe_next);
1369
+ Future<T> error_sink;
1370
+ {
1371
+ auto guard = state->mutex.Lock();
1372
+ if (!maybe_next.ok() || source_exhausted || state->broken) {
1373
+ // If here then we will not pull any more from the outer source
1374
+ if (!state->broken && !maybe_next.ok()) {
1375
+ state->SignalErrorUnlocked(guard);
1376
+ // If here then we are the first error so we need to deliver it
1377
+ should_deliver_error = true;
1378
+ if (!state->waiting_jobs.empty()) {
1379
+ error_sink = std::move(*state->waiting_jobs.front());
1380
+ state->waiting_jobs.pop_front();
1381
+ }
1382
+ }
1383
+ if (source_exhausted) {
1384
+ state->source_exhausted = true;
1385
+ state->num_running_subscriptions--;
1386
+ }
1387
+ if (state->MarkTaskFinishedUnlocked(guard)) {
1388
+ should_mark_gen_complete = true;
1389
+ }
1390
+ } else {
1391
+ state->active_subscriptions[index] = *maybe_next;
1392
+ should_continue = true;
1393
+ }
1394
+ }
1395
+ if (should_deliver_error) {
1396
+ state->MarkFinalError(maybe_next.status(), std::move(error_sink));
1397
+ }
1398
+ if (should_mark_gen_complete) {
1399
+ state->MarkFinishedAndPurge();
1400
+ }
1401
+ if (should_continue) {
1402
+ // There is a possibility that a large sequence of immediately available inner
1403
+ // callbacks could lead to a stack overflow. To avoid this we need to
1404
+ // synchronously loop through inner/outer callbacks until we either find an
1405
+ // unfinished future or we find an actual item to deliver.
1406
+ Future<T> next_item = (*maybe_next)();
1407
+ if (!next_item.TryAddCallback([this] { return InnerCallback(state, index); })) {
1408
+ // By setting recursive to true we signal to the inner callback that, if it is
1409
+ // empty, instead of adding a new outer callback, it should just immediately
1410
+ // return, flagging was_empty so that we know we need to check the next
1411
+ // subscription.
1412
+ InnerCallback immediate_inner(state, index, /*recursive=*/true);
1413
+ immediate_inner(next_item.result());
1414
+ if (immediate_inner.was_empty) {
1415
+ Future<AsyncGenerator<T>> next_source = state->PullSource();
1416
+ if (next_source.TryAddCallback([this] {
1417
+ return OuterCallback{state, index};
1418
+ })) {
1419
+ // We hit an unfinished future so we can stop looping
1420
+ return;
1421
+ }
1422
+ // The current subscription was immediately and synchronously empty
1423
+ // and we were able to synchronously pull the next subscription so we
1424
+ // can keep looping.
1425
+ maybe_next = next_source.result();
1426
+ continue;
1427
+ }
1428
+ }
1429
+ }
1430
+ return;
1431
+ }
1432
+ }
1433
+ std::shared_ptr<State> state;
1434
+ std::size_t index;
1435
+ };
1436
+
1437
+ std::shared_ptr<State> state_;
1438
+ };
1439
+
1440
+ /// \brief Create a generator that takes in a stream of generators and pulls from up to
1441
+ /// max_subscriptions at a time
1442
+ ///
1443
+ /// Note: This may deliver items out of sequence. For example, items from the third
1444
+ /// AsyncGenerator generated by the source may be emitted before some items from the first
1445
+ /// AsyncGenerator generated by the source.
1446
+ ///
1447
+ /// This generator will pull from source async-reentrantly unless max_subscriptions is 1
1448
+ /// This generator will not pull from the individual subscriptions reentrantly. Add
1449
+ /// readahead to the individual subscriptions if that is desired.
1450
+ /// This generator is async-reentrant
1451
+ ///
1452
+ /// This generator may queue up to max_subscriptions instances of T
1453
+ template <typename T>
1454
+ AsyncGenerator<T> MakeMergedGenerator(AsyncGenerator<AsyncGenerator<T>> source,
1455
+ int max_subscriptions) {
1456
+ return MergedGenerator<T>(std::move(source), max_subscriptions);
1457
+ }
1458
+
1459
+ template <typename T>
1460
+ Result<AsyncGenerator<T>> MakeSequencedMergedGenerator(
1461
+ AsyncGenerator<AsyncGenerator<T>> source, int max_subscriptions) {
1462
+ if (max_subscriptions < 0) {
1463
+ return Status::Invalid("max_subscriptions must be a positive integer");
1464
+ }
1465
+ if (max_subscriptions == 1) {
1466
+ return Status::Invalid("Use MakeConcatenatedGenerator if max_subscriptions is 1");
1467
+ }
1468
+ AsyncGenerator<AsyncGenerator<T>> autostarting_source = MakeMappedGenerator(
1469
+ std::move(source),
1470
+ [](const AsyncGenerator<T>& sub) { return MakeAutoStartingGenerator(sub); });
1471
+ AsyncGenerator<AsyncGenerator<T>> sub_readahead =
1472
+ MakeSerialReadaheadGenerator(std::move(autostarting_source), max_subscriptions - 1);
1473
+ return MakeConcatenatedGenerator(std::move(sub_readahead));
1474
+ }
1475
+
1476
+ /// \brief Create a generator that takes in a stream of generators and pulls from each
1477
+ /// one in sequence.
1478
+ ///
1479
+ /// This generator is async-reentrant but will never pull from source reentrantly and
1480
+ /// will never pull from any subscription reentrantly.
1481
+ ///
1482
+ /// This generator may queue 1 instance of T
1483
+ ///
1484
+ /// TODO: Could potentially make a bespoke implementation instead of MergedGenerator that
1485
+ /// forwards async-reentrant requests instead of buffering them (which is what
1486
+ /// MergedGenerator does)
1487
+ template <typename T>
1488
+ AsyncGenerator<T> MakeConcatenatedGenerator(AsyncGenerator<AsyncGenerator<T>> source) {
1489
+ return MergedGenerator<T>(std::move(source), 1);
1490
+ }
1491
+
1492
+ template <typename T>
1493
+ struct Enumerated {
1494
+ T value;
1495
+ int index;
1496
+ bool last;
1497
+ };
1498
+
1499
+ template <typename T>
1500
+ struct IterationTraits<Enumerated<T>> {
1501
+ static Enumerated<T> End() { return Enumerated<T>{IterationEnd<T>(), -1, false}; }
1502
+ static bool IsEnd(const Enumerated<T>& val) { return val.index < 0; }
1503
+ };
1504
+
1505
+ /// \see MakeEnumeratedGenerator
1506
+ template <typename T>
1507
+ class EnumeratingGenerator {
1508
+ public:
1509
+ EnumeratingGenerator(AsyncGenerator<T> source, T initial_value)
1510
+ : state_(std::make_shared<State>(std::move(source), std::move(initial_value))) {}
1511
+
1512
+ Future<Enumerated<T>> operator()() {
1513
+ if (state_->finished) {
1514
+ return AsyncGeneratorEnd<Enumerated<T>>();
1515
+ } else {
1516
+ auto state = state_;
1517
+ return state->source().Then([state](const T& next) {
1518
+ auto finished = IsIterationEnd<T>(next);
1519
+ auto prev = Enumerated<T>{state->prev_value, state->prev_index, finished};
1520
+ state->prev_value = next;
1521
+ state->prev_index++;
1522
+ state->finished = finished;
1523
+ return prev;
1524
+ });
1525
+ }
1526
+ }
1527
+
1528
+ private:
1529
+ struct State {
1530
+ State(AsyncGenerator<T> source, T initial_value)
1531
+ : source(std::move(source)), prev_value(std::move(initial_value)), prev_index(0) {
1532
+ finished = IsIterationEnd<T>(prev_value);
1533
+ }
1534
+
1535
+ AsyncGenerator<T> source;
1536
+ T prev_value;
1537
+ int prev_index;
1538
+ bool finished;
1539
+ };
1540
+
1541
+ std::shared_ptr<State> state_;
1542
+ };
1543
+
1544
+ /// Wrap items from a source generator with positional information
1545
+ ///
1546
+ /// When used with MakeMergedGenerator and MakeSequencingGenerator this allows items to be
1547
+ /// processed in a "first-available" fashion and later resequenced which can reduce the
1548
+ /// impact of sources with erratic performance (e.g. a filesystem where some items may
1549
+ /// take longer to read than others).
1550
+ ///
1551
+ /// TODO(ARROW-12371) Would require this generator be async-reentrant
1552
+ ///
1553
+ /// \see MakeSequencingGenerator for an example of putting items back in order
1554
+ ///
1555
+ /// This generator is not async-reentrant
1556
+ ///
1557
+ /// This generator buffers one item (so it knows which item is the last item)
1558
+ template <typename T>
1559
+ AsyncGenerator<Enumerated<T>> MakeEnumeratedGenerator(AsyncGenerator<T> source) {
1560
+ return FutureFirstGenerator<Enumerated<T>>(
1561
+ source().Then([source](const T& initial_value) -> AsyncGenerator<Enumerated<T>> {
1562
+ return EnumeratingGenerator<T>(std::move(source), initial_value);
1563
+ }));
1564
+ }
1565
+
1566
+ /// \see MakeTransferredGenerator
1567
+ template <typename T>
1568
+ class TransferringGenerator {
1569
+ public:
1570
+ explicit TransferringGenerator(AsyncGenerator<T> source, internal::Executor* executor)
1571
+ : source_(std::move(source)), executor_(executor) {}
1572
+
1573
+ Future<T> operator()() { return executor_->Transfer(source_()); }
1574
+
1575
+ private:
1576
+ AsyncGenerator<T> source_;
1577
+ internal::Executor* executor_;
1578
+ };
1579
+
1580
+ /// \brief Transfer a future to an underlying executor.
1581
+ ///
1582
+ /// Continuations run on the returned future will be run on the given executor
1583
+ /// if they cannot be run synchronously.
1584
+ ///
1585
+ /// This is often needed to move computation off I/O threads or other external
1586
+ /// completion sources and back on to the CPU executor so the I/O thread can
1587
+ /// stay busy and focused on I/O
1588
+ ///
1589
+ /// Keep in mind that continuations called on an already completed future will
1590
+ /// always be run synchronously and so no transfer will happen in that case.
1591
+ ///
1592
+ /// This generator is async reentrant if the source is
1593
+ ///
1594
+ /// This generator will not queue
1595
+ template <typename T>
1596
+ AsyncGenerator<T> MakeTransferredGenerator(AsyncGenerator<T> source,
1597
+ internal::Executor* executor) {
1598
+ return TransferringGenerator<T>(std::move(source), executor);
1599
+ }
1600
+
1601
+ /// \see MakeBackgroundGenerator
1602
+ template <typename T>
1603
+ class BackgroundGenerator {
1604
+ public:
1605
+ explicit BackgroundGenerator(Iterator<T> it, internal::Executor* io_executor, int max_q,
1606
+ int q_restart)
1607
+ : state_(std::make_shared<State>(io_executor, std::move(it), max_q, q_restart)),
1608
+ cleanup_(std::make_shared<Cleanup>(state_.get())) {}
1609
+
1610
+ Future<T> operator()() {
1611
+ auto guard = state_->mutex.Lock();
1612
+ Future<T> waiting_future;
1613
+ if (state_->queue.empty()) {
1614
+ if (state_->finished) {
1615
+ return AsyncGeneratorEnd<T>();
1616
+ } else {
1617
+ waiting_future = Future<T>::Make();
1618
+ state_->waiting_future = waiting_future;
1619
+ }
1620
+ } else {
1621
+ auto next = Future<T>::MakeFinished(std::move(state_->queue.front()));
1622
+ state_->queue.pop();
1623
+ if (state_->NeedsRestart()) {
1624
+ return state_->RestartTask(state_, std::move(guard), std::move(next));
1625
+ }
1626
+ return next;
1627
+ }
1628
+ // This should only trigger the very first time this method is called
1629
+ if (state_->NeedsRestart()) {
1630
+ return state_->RestartTask(state_, std::move(guard), std::move(waiting_future));
1631
+ }
1632
+ return waiting_future;
1633
+ }
1634
+
1635
+ protected:
1636
+ static constexpr uint64_t kUnlikelyThreadId{std::numeric_limits<uint64_t>::max()};
1637
+
1638
+ struct State {
1639
+ State(internal::Executor* io_executor, Iterator<T> it, int max_q, int q_restart)
1640
+ : io_executor(io_executor),
1641
+ max_q(max_q),
1642
+ q_restart(q_restart),
1643
+ it(std::move(it)),
1644
+ reading(false),
1645
+ finished(false),
1646
+ should_shutdown(false) {}
1647
+
1648
+ void ClearQueue() {
1649
+ while (!queue.empty()) {
1650
+ queue.pop();
1651
+ }
1652
+ }
1653
+
1654
+ bool TaskIsRunning() const { return task_finished.is_valid(); }
1655
+
1656
+ bool NeedsRestart() const {
1657
+ return !finished && !reading && static_cast<int>(queue.size()) <= q_restart;
1658
+ }
1659
+
1660
+ void DoRestartTask(std::shared_ptr<State> state, util::Mutex::Guard guard) {
1661
+ // If we get here we are actually going to start a new task so let's create a
1662
+ // task_finished future for it
1663
+ state->task_finished = Future<>::Make();
1664
+ state->reading = true;
1665
+ auto spawn_status = io_executor->Spawn(
1666
+ [state]() { BackgroundGenerator::WorkerTask(std::move(state)); });
1667
+ if (!spawn_status.ok()) {
1668
+ // If we can't spawn a new task then send an error to the consumer (either via a
1669
+ // waiting future or the queue) and mark ourselves finished
1670
+ state->finished = true;
1671
+ state->task_finished = Future<>();
1672
+ if (waiting_future.has_value()) {
1673
+ auto to_deliver = std::move(waiting_future.value());
1674
+ waiting_future.reset();
1675
+ guard.Unlock();
1676
+ to_deliver.MarkFinished(spawn_status);
1677
+ } else {
1678
+ ClearQueue();
1679
+ queue.push(spawn_status);
1680
+ }
1681
+ }
1682
+ }
1683
+
1684
+ Future<T> RestartTask(std::shared_ptr<State> state, util::Mutex::Guard guard,
1685
+ Future<T> next) {
1686
+ if (TaskIsRunning()) {
1687
+ // If the task is still cleaning up we need to wait for it to finish before
1688
+ // restarting. We also want to block the consumer until we've restarted the
1689
+ // reader to avoid multiple restarts
1690
+ return task_finished.Then([state, next]() {
1691
+ // This may appear dangerous (recursive mutex) but we should be guaranteed the
1692
+ // outer guard has been released by this point. We know...
1693
+ // * task_finished is not already finished (it would be invalid in that case)
1694
+ // * task_finished will not be marked complete until we've given up the mutex
1695
+ auto guard_ = state->mutex.Lock();
1696
+ state->DoRestartTask(state, std::move(guard_));
1697
+ return next;
1698
+ });
1699
+ }
1700
+ // Otherwise we can restart immediately
1701
+ DoRestartTask(std::move(state), std::move(guard));
1702
+ return next;
1703
+ }
1704
+
1705
+ internal::Executor* io_executor;
1706
+ const int max_q;
1707
+ const int q_restart;
1708
+ Iterator<T> it;
1709
+ std::atomic<uint64_t> worker_thread_id{kUnlikelyThreadId};
1710
+
1711
+ // If true, the task is actively pumping items from the queue and does not need a
1712
+ // restart
1713
+ bool reading;
1714
+ // Set to true when a terminal item arrives
1715
+ bool finished;
1716
+ // Signal to the background task to end early because consumers have given up on it
1717
+ bool should_shutdown;
1718
+ // If the queue is empty, the consumer will create a waiting future and wait for it
1719
+ std::queue<Result<T>> queue;
1720
+ std::optional<Future<T>> waiting_future;
1721
+ // Every background task is given a future to complete when it is entirely finished
1722
+ // processing and ready for the next task to start or for State to be destroyed
1723
+ Future<> task_finished;
1724
+ util::Mutex mutex;
1725
+ };
1726
+
1727
+ // Cleanup task that will be run when all consumer references to the generator are lost
1728
+ struct Cleanup {
1729
+ explicit Cleanup(State* state) : state(state) {}
1730
+ ~Cleanup() {
1731
+ /// TODO: Once ARROW-13109 is available then we can be force consumers to spawn and
1732
+ /// there is no need to perform this check.
1733
+ ///
1734
+ /// It's a deadlock if we enter cleanup from
1735
+ /// the worker thread but it can happen if the consumer doesn't transfer away
1736
+ assert(state->worker_thread_id.load() != ::arrow::internal::GetThreadId());
1737
+ Future<> finish_fut;
1738
+ {
1739
+ auto lock = state->mutex.Lock();
1740
+ if (!state->TaskIsRunning()) {
1741
+ return;
1742
+ }
1743
+ // Signal the current task to stop and wait for it to finish
1744
+ state->should_shutdown = true;
1745
+ finish_fut = state->task_finished;
1746
+ }
1747
+ // Using future as a condition variable here
1748
+ Status st = finish_fut.status();
1749
+ ARROW_UNUSED(st);
1750
+ }
1751
+ State* state;
1752
+ };
1753
+
1754
+ static void WorkerTask(std::shared_ptr<State> state) {
1755
+ state->worker_thread_id.store(::arrow::internal::GetThreadId());
1756
+ // We need to capture the state to read while outside the mutex
1757
+ bool reading = true;
1758
+ while (reading) {
1759
+ auto next = state->it.Next();
1760
+ // Need to capture state->waiting_future inside the mutex to mark finished outside
1761
+ Future<T> waiting_future;
1762
+ {
1763
+ auto guard = state->mutex.Lock();
1764
+
1765
+ if (state->should_shutdown) {
1766
+ state->finished = true;
1767
+ break;
1768
+ }
1769
+
1770
+ if (!next.ok() || IsIterationEnd<T>(*next)) {
1771
+ // Terminal item. Mark finished to true, send this last item, and quit
1772
+ state->finished = true;
1773
+ if (!next.ok()) {
1774
+ state->ClearQueue();
1775
+ }
1776
+ }
1777
+ // At this point we are going to send an item. Either we will add it to the
1778
+ // queue or deliver it to a waiting future.
1779
+ if (state->waiting_future.has_value()) {
1780
+ waiting_future = std::move(state->waiting_future.value());
1781
+ state->waiting_future.reset();
1782
+ } else {
1783
+ state->queue.push(std::move(next));
1784
+ // We just filled up the queue so it is time to quit. We may need to notify
1785
+ // a cleanup task so we transition to Quitting
1786
+ if (static_cast<int>(state->queue.size()) >= state->max_q) {
1787
+ state->reading = false;
1788
+ }
1789
+ }
1790
+ reading = state->reading && !state->finished;
1791
+ }
1792
+ // This should happen outside the mutex. Presumably there is a
1793
+ // transferring generator on the other end that will quickly transfer any
1794
+ // callbacks off of this thread so we can continue looping. Still, best not to
1795
+ // rely on that
1796
+ if (waiting_future.is_valid()) {
1797
+ waiting_future.MarkFinished(next);
1798
+ }
1799
+ }
1800
+ // Once we've sent our last item we can notify any waiters that we are done and so
1801
+ // either state can be cleaned up or a new background task can be started
1802
+ Future<> task_finished;
1803
+ {
1804
+ auto guard = state->mutex.Lock();
1805
+ // After we give up the mutex state can be safely deleted. We will no longer
1806
+ // reference it. We can safely transition to idle now.
1807
+ task_finished = state->task_finished;
1808
+ state->task_finished = Future<>();
1809
+ state->worker_thread_id.store(kUnlikelyThreadId);
1810
+ }
1811
+ task_finished.MarkFinished();
1812
+ }
1813
+
1814
+ std::shared_ptr<State> state_;
1815
+ // state_ is held by both the generator and the background thread so it won't be cleaned
1816
+ // up when all consumer references are relinquished. cleanup_ is only held by the
1817
+ // generator so it will be destructed when the last consumer reference is gone. We use
1818
+ // this to cleanup / stop the background generator in case the consuming end stops
1819
+ // listening (e.g. due to a downstream error)
1820
+ std::shared_ptr<Cleanup> cleanup_;
1821
+ };
1822
+
1823
+ constexpr int kDefaultBackgroundMaxQ = 32;
1824
+ constexpr int kDefaultBackgroundQRestart = 16;
1825
+
1826
+ /// \brief Create an AsyncGenerator<T> by iterating over an Iterator<T> on a background
1827
+ /// thread
1828
+ ///
1829
+ /// The parameter max_q and q_restart control queue size and background thread task
1830
+ /// management. If the background task is fast you typically don't want it creating a
1831
+ /// thread task for every item. Instead the background thread will run until it fills
1832
+ /// up a readahead queue.
1833
+ ///
1834
+ /// Once the queue has filled up the background thread task will terminate (allowing other
1835
+ /// I/O tasks to use the thread). Once the queue has been drained enough (specified by
1836
+ /// q_restart) then the background thread task will be restarted. If q_restart is too low
1837
+ /// then you may exhaust the queue waiting for the background thread task to start running
1838
+ /// again. If it is too high then it will be constantly stopping and restarting the
1839
+ /// background queue task
1840
+ ///
1841
+ /// The "background thread" is a logical thread and will run as tasks on the io_executor.
1842
+ /// This thread may stop and start when the queue fills up but there will only be one
1843
+ /// active background thread task at any given time. You MUST transfer away from this
1844
+ /// background generator. Otherwise there could be a race condition if a callback on the
1845
+ /// background thread deletes the last consumer reference to the background generator. You
1846
+ /// can transfer onto the same executor as the background thread, it is only necessary to
1847
+ /// create a new thread task, not to switch executors.
1848
+ ///
1849
+ /// This generator is not async-reentrant
1850
+ ///
1851
+ /// This generator will queue up to max_q blocks
1852
+ template <typename T>
1853
+ static Result<AsyncGenerator<T>> MakeBackgroundGenerator(
1854
+ Iterator<T> iterator, internal::Executor* io_executor,
1855
+ int max_q = kDefaultBackgroundMaxQ, int q_restart = kDefaultBackgroundQRestart) {
1856
+ if (max_q < q_restart) {
1857
+ return Status::Invalid("max_q must be >= q_restart");
1858
+ }
1859
+ return BackgroundGenerator<T>(std::move(iterator), io_executor, max_q, q_restart);
1860
+ }
1861
+
1862
+ /// \brief Create an AsyncGenerator<T> by iterating over an Iterator<T> synchronously
1863
+ ///
1864
+ /// This should only be used if you know the source iterator does not involve any
1865
+ /// I/O (or other blocking calls). Otherwise a CPU thread will be blocked and, depending
1866
+ /// on the complexity of the iterator, it may lead to deadlock.
1867
+ ///
1868
+ /// If you are not certain if there will be I/O then it is better to use
1869
+ /// MakeBackgroundGenerator. If helpful you can think of this as the AsyncGenerator
1870
+ /// equivalent of Future::MakeFinished
1871
+ ///
1872
+ /// It is impossible to call this in an async-reentrant manner since the returned
1873
+ /// future will be completed by the time it is polled.
1874
+ ///
1875
+ /// This generator does not queue
1876
+ template <typename T>
1877
+ static Result<AsyncGenerator<T>> MakeBlockingGenerator(
1878
+ std::shared_ptr<Iterator<T>> iterator) {
1879
+ return [it = std::move(iterator)]() mutable -> Future<T> {
1880
+ return Future<T>::MakeFinished(it->Next());
1881
+ };
1882
+ }
1883
+
1884
+ template <typename T>
1885
+ static Result<AsyncGenerator<T>> MakeBlockingGenerator(Iterator<T> iterator) {
1886
+ return MakeBlockingGenerator(std::make_shared<Iterator<T>>(std::move(iterator)));
1887
+ }
1888
+
1889
+ /// \see MakeGeneratorIterator
1890
+ template <typename T>
1891
+ class GeneratorIterator {
1892
+ public:
1893
+ explicit GeneratorIterator(AsyncGenerator<T> source) : source_(std::move(source)) {}
1894
+
1895
+ Result<T> Next() { return source_().result(); }
1896
+
1897
+ private:
1898
+ AsyncGenerator<T> source_;
1899
+ };
1900
+
1901
+ /// \brief Convert an AsyncGenerator<T> to an Iterator<T> which blocks until each future
1902
+ /// is finished
1903
+ template <typename T>
1904
+ Iterator<T> MakeGeneratorIterator(AsyncGenerator<T> source) {
1905
+ return Iterator<T>(GeneratorIterator<T>(std::move(source)));
1906
+ }
1907
+
1908
+ /// \brief Add readahead to an iterator using a background thread.
1909
+ ///
1910
+ /// Under the hood this is converting the iterator to a generator using
1911
+ /// MakeBackgroundGenerator, adding readahead to the converted generator with
1912
+ /// MakeReadaheadGenerator, and then converting back to an iterator using
1913
+ /// MakeGeneratorIterator.
1914
+ template <typename T>
1915
+ Result<Iterator<T>> MakeReadaheadIterator(Iterator<T> it, int readahead_queue_size) {
1916
+ ARROW_ASSIGN_OR_RAISE(auto io_executor, internal::ThreadPool::Make(1));
1917
+ auto max_q = readahead_queue_size;
1918
+ auto q_restart = std::max(1, max_q / 2);
1919
+ ARROW_ASSIGN_OR_RAISE(
1920
+ auto background_generator,
1921
+ MakeBackgroundGenerator(std::move(it), io_executor.get(), max_q, q_restart));
1922
+ // Capture io_executor to keep it alive as long as owned_bg_generator is still
1923
+ // referenced
1924
+ AsyncGenerator<T> owned_bg_generator = [io_executor, background_generator]() {
1925
+ return background_generator();
1926
+ };
1927
+ return MakeGeneratorIterator(std::move(owned_bg_generator));
1928
+ }
1929
+
1930
+ /// \brief Make a generator that returns a single pre-generated future
1931
+ ///
1932
+ /// This generator is async-reentrant.
1933
+ template <typename T>
1934
+ std::function<Future<T>()> MakeSingleFutureGenerator(Future<T> future) {
1935
+ assert(future.is_valid());
1936
+ auto state = std::make_shared<Future<T>>(std::move(future));
1937
+ return [state]() -> Future<T> {
1938
+ auto fut = std::move(*state);
1939
+ if (fut.is_valid()) {
1940
+ return fut;
1941
+ } else {
1942
+ return AsyncGeneratorEnd<T>();
1943
+ }
1944
+ };
1945
+ }
1946
+
1947
+ /// \brief Make a generator that immediately ends.
1948
+ ///
1949
+ /// This generator is async-reentrant.
1950
+ template <typename T>
1951
+ std::function<Future<T>()> MakeEmptyGenerator() {
1952
+ return []() -> Future<T> { return AsyncGeneratorEnd<T>(); };
1953
+ }
1954
+
1955
+ /// \brief Make a generator that always fails with a given error
1956
+ ///
1957
+ /// This generator is async-reentrant.
1958
+ template <typename T>
1959
+ AsyncGenerator<T> MakeFailingGenerator(Status st) {
1960
+ assert(!st.ok());
1961
+ auto state = std::make_shared<Status>(std::move(st));
1962
+ return [state]() -> Future<T> {
1963
+ auto st = std::move(*state);
1964
+ if (!st.ok()) {
1965
+ return std::move(st);
1966
+ } else {
1967
+ return AsyncGeneratorEnd<T>();
1968
+ }
1969
+ };
1970
+ }
1971
+
1972
+ /// \brief Make a generator that always fails with a given error
1973
+ ///
1974
+ /// This overload allows inferring the return type from the argument.
1975
+ template <typename T>
1976
+ AsyncGenerator<T> MakeFailingGenerator(const Result<T>& result) {
1977
+ return MakeFailingGenerator<T>(result.status());
1978
+ }
1979
+
1980
+ /// \brief Prepend initial_values onto a generator
1981
+ ///
1982
+ /// This generator is async-reentrant but will buffer requests and will not
1983
+ /// pull from following_values async-reentrantly.
1984
+ template <typename T>
1985
+ AsyncGenerator<T> MakeGeneratorStartsWith(std::vector<T> initial_values,
1986
+ AsyncGenerator<T> following_values) {
1987
+ auto initial_values_vec_gen = MakeVectorGenerator(std::move(initial_values));
1988
+ auto gen_gen = MakeVectorGenerator<AsyncGenerator<T>>(
1989
+ {std::move(initial_values_vec_gen), std::move(following_values)});
1990
+ return MakeConcatenatedGenerator(std::move(gen_gen));
1991
+ }
1992
+
1993
+ template <typename T>
1994
+ struct CancellableGenerator {
1995
+ Future<T> operator()() {
1996
+ if (stop_token.IsStopRequested()) {
1997
+ return stop_token.Poll();
1998
+ }
1999
+ return source();
2000
+ }
2001
+
2002
+ AsyncGenerator<T> source;
2003
+ StopToken stop_token;
2004
+ };
2005
+
2006
+ /// \brief Allow an async generator to be cancelled
2007
+ ///
2008
+ /// This generator is async-reentrant
2009
+ template <typename T>
2010
+ AsyncGenerator<T> MakeCancellable(AsyncGenerator<T> source, StopToken stop_token) {
2011
+ return CancellableGenerator<T>{std::move(source), std::move(stop_token)};
2012
+ }
2013
+
2014
+ template <typename T>
2015
+ class DefaultIfEmptyGenerator {
2016
+ public:
2017
+ DefaultIfEmptyGenerator(AsyncGenerator<T> source, T or_value)
2018
+ : state_(std::make_shared<State>(std::move(source), std::move(or_value))) {}
2019
+
2020
+ Future<T> operator()() {
2021
+ if (state_->first) {
2022
+ state_->first = false;
2023
+ struct {
2024
+ T or_value;
2025
+
2026
+ Result<T> operator()(const T& value) {
2027
+ if (IterationTraits<T>::IsEnd(value)) {
2028
+ return std::move(or_value);
2029
+ }
2030
+ return value;
2031
+ }
2032
+ } Continuation;
2033
+ Continuation.or_value = std::move(state_->or_value);
2034
+ return state_->source().Then(std::move(Continuation));
2035
+ }
2036
+ return state_->source();
2037
+ }
2038
+
2039
+ private:
2040
+ struct State {
2041
+ AsyncGenerator<T> source;
2042
+ T or_value;
2043
+ bool first;
2044
+ State(AsyncGenerator<T> source_, T or_value_)
2045
+ : source(std::move(source_)), or_value(std::move(or_value_)), first(true) {}
2046
+ };
2047
+ std::shared_ptr<State> state_;
2048
+ };
2049
+
2050
+ /// \brief If the generator is empty, return the given value, else
2051
+ /// forward the values from the generator.
2052
+ ///
2053
+ /// This generator is async-reentrant.
2054
+ template <typename T>
2055
+ AsyncGenerator<T> MakeDefaultIfEmptyGenerator(AsyncGenerator<T> source, T or_value) {
2056
+ return DefaultIfEmptyGenerator<T>(std::move(source), std::move(or_value));
2057
+ }
2058
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_util.h ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <atomic>
21
+ #include <functional>
22
+ #include <list>
23
+ #include <memory>
24
+
25
+ #include "arrow/result.h"
26
+ #include "arrow/status.h"
27
+ #include "arrow/util/cancel.h"
28
+ #include "arrow/util/functional.h"
29
+ #include "arrow/util/future.h"
30
+ #include "arrow/util/iterator.h"
31
+ #include "arrow/util/mutex.h"
32
+ #include "arrow/util/thread_pool.h"
33
+ #include "arrow/util/tracing.h"
34
+
35
+ namespace arrow {
36
+
37
+ using internal::FnOnce;
38
+
39
+ namespace util {
40
+
41
+ /// A utility which keeps tracks of, and schedules, asynchronous tasks
42
+ ///
43
+ /// An asynchronous task has a synchronous component and an asynchronous component.
44
+ /// The synchronous component typically schedules some kind of work on an external
45
+ /// resource (e.g. the I/O thread pool or some kind of kernel-based asynchronous
46
+ /// resource like io_uring). The asynchronous part represents the work
47
+ /// done on that external resource. Executing the synchronous part will be referred
48
+ /// to as "submitting the task" since this usually includes submitting the asynchronous
49
+ /// portion to the external thread pool.
50
+ ///
51
+ /// By default the scheduler will submit the task (execute the synchronous part) as
52
+ /// soon as it is added, assuming the underlying thread pool hasn't terminated or the
53
+ /// scheduler hasn't aborted. In this mode, the scheduler is simply acting as
54
+ /// a simple task group.
55
+ ///
56
+ /// A task scheduler starts with an initial task. That task, and all subsequent tasks
57
+ /// are free to add subtasks. Once all submitted tasks finish the scheduler will
58
+ /// finish. Note, it is not an error to add additional tasks after a scheduler has
59
+ /// aborted. These tasks will be ignored and never submitted. The scheduler returns a
60
+ /// future which will complete when all submitted tasks have finished executing. Once all
61
+ /// tasks have been finished the scheduler is invalid and should no longer be used.
62
+ ///
63
+ /// Task failure (either the synchronous portion or the asynchronous portion) will cause
64
+ /// the scheduler to enter an aborted state. The first such failure will be reported in
65
+ /// the final task future.
66
+ class ARROW_EXPORT AsyncTaskScheduler {
67
+ public:
68
+ /// Destructor for AsyncTaskScheduler
69
+ ///
70
+ /// The lifetime of the task scheduled is managed automatically. The scheduler
71
+ /// will remain valid while any tasks are running (and can always be safely accessed)
72
+ /// within tasks) and will be destroyed as soon as all tasks have finished.
73
+ virtual ~AsyncTaskScheduler() = default;
74
+ /// An interface for a task
75
+ ///
76
+ /// Users may want to override this, for example, to add priority
77
+ /// information for use by a queue.
78
+ class Task {
79
+ public:
80
+ virtual ~Task() = default;
81
+ /// Submit the task
82
+ ///
83
+ /// This will be called by the scheduler at most once when there
84
+ /// is space to run the task. This is expected to be a fairly quick
85
+ /// function that simply submits the actual task work to an external
86
+ /// resource (e.g. I/O thread pool).
87
+ ///
88
+ /// If this call fails then the scheduler will enter an aborted state.
89
+ virtual Result<Future<>> operator()() = 0;
90
+ /// The cost of the task
91
+ ///
92
+ /// A ThrottledAsyncTaskScheduler can be used to limit the number of concurrent tasks.
93
+ /// A custom cost may be used, for example, if you would like to limit the number of
94
+ /// tasks based on the total expected RAM usage of the tasks (this is done in the
95
+ /// scanner)
96
+ virtual int cost() const { return 1; }
97
+ /// The name of the task
98
+ ///
99
+ /// This is used for debugging and traceability. The returned view must remain
100
+ /// valid for the lifetime of the task.
101
+ virtual std::string_view name() const = 0;
102
+
103
+ /// a span tied to the lifetime of the task, for internal use only
104
+ tracing::Span span;
105
+ };
106
+
107
+ /// Add a task to the scheduler
108
+ ///
109
+ /// If the scheduler is in an aborted state this call will return false and the task
110
+ /// will never be run. This is harmless and does not need to be guarded against.
111
+ ///
112
+ /// The return value for this call can usually be ignored. There is little harm in
113
+ /// attempting to add tasks to an aborted scheduler. It is only included for callers
114
+ /// that want to avoid future task generation to save effort.
115
+ ///
116
+ /// \param task the task to submit
117
+ ///
118
+ /// A task's name must remain valid for the duration of the task. It is used for
119
+ /// debugging (e.g. when debugging a deadlock to see which tasks still remain) and for
120
+ /// traceability (the name will be used for spans assigned to the task)
121
+ ///
122
+ /// \return true if the task was submitted or queued, false if the task was ignored
123
+ virtual bool AddTask(std::unique_ptr<Task> task) = 0;
124
+
125
+ /// Adds an async generator to the scheduler
126
+ ///
127
+ /// The async generator will be visited, one item at a time. Submitting a task
128
+ /// will consist of polling the generator for the next future. The generator's future
129
+ /// will then represent the task itself.
130
+ ///
131
+ /// This visits the task serially without readahead. If readahead or parallelism
132
+ /// is desired then it should be added in the generator itself.
133
+ ///
134
+ /// The generator itself will be kept alive until all tasks have been completed.
135
+ /// However, if the scheduler is aborted, the generator will be destroyed as soon as the
136
+ /// next item would be requested.
137
+ ///
138
+ /// \param generator the generator to submit to the scheduler
139
+ /// \param visitor a function which visits each generator future as it completes
140
+ /// \param name a name which will be used for each submitted task
141
+ template <typename T>
142
+ bool AddAsyncGenerator(std::function<Future<T>()> generator,
143
+ std::function<Status(const T&)> visitor, std::string_view name);
144
+
145
+ template <typename Callable>
146
+ struct SimpleTask : public Task {
147
+ SimpleTask(Callable callable, std::string_view name)
148
+ : callable(std::move(callable)), name_(name) {}
149
+ SimpleTask(Callable callable, std::string name)
150
+ : callable(std::move(callable)), owned_name_(std::move(name)) {
151
+ name_ = *owned_name_;
152
+ }
153
+ Result<Future<>> operator()() override { return callable(); }
154
+ std::string_view name() const override { return name_; }
155
+ Callable callable;
156
+ std::string_view name_;
157
+ std::optional<std::string> owned_name_;
158
+ };
159
+
160
+ /// Add a task with cost 1 to the scheduler
161
+ ///
162
+ /// \param callable a "submit" function that should return a future
163
+ /// \param name a name for the task
164
+ ///
165
+ /// `name` must remain valid until the task has been submitted AND the returned
166
+ /// future completes. It is used for debugging and tracing.
167
+ ///
168
+ /// \see AddTask for more details
169
+ template <typename Callable>
170
+ bool AddSimpleTask(Callable callable, std::string_view name) {
171
+ return AddTask(std::make_unique<SimpleTask<Callable>>(std::move(callable), name));
172
+ }
173
+
174
+ /// Add a task with cost 1 to the scheduler
175
+ ///
176
+ /// This is an overload of \see AddSimpleTask that keeps `name` alive
177
+ /// in the task.
178
+ template <typename Callable>
179
+ bool AddSimpleTask(Callable callable, std::string name) {
180
+ return AddTask(
181
+ std::make_unique<SimpleTask<Callable>>(std::move(callable), std::move(name)));
182
+ }
183
+
184
+ /// Construct a scheduler
185
+ ///
186
+ /// \param initial_task The initial task which is responsible for adding
187
+ /// the first subtasks to the scheduler.
188
+ /// \param abort_callback A callback that will be triggered immediately after a task
189
+ /// fails while other tasks may still be running. Nothing needs to be done here,
190
+ /// when a task fails the scheduler will stop accepting new tasks and eventually
191
+ /// return the error. However, this callback can be used to more quickly end
192
+ /// long running tasks that have already been submitted. Defaults to doing
193
+ /// nothing.
194
+ /// \param stop_token An optional stop token that will allow cancellation of the
195
+ /// scheduler. This will be checked before each task is submitted and, in the
196
+ /// event of a cancellation, the scheduler will enter an aborted state. This is
197
+ /// a graceful cancellation and submitted tasks will still complete.
198
+ /// \return A future that will be completed when the initial task and all subtasks have
199
+ /// finished.
200
+ static Future<> Make(
201
+ FnOnce<Status(AsyncTaskScheduler*)> initial_task,
202
+ FnOnce<void(const Status&)> abort_callback = [](const Status&) {},
203
+ StopToken stop_token = StopToken::Unstoppable());
204
+
205
+ /// A span tracking execution of the scheduler's tasks, for internal use only
206
+ virtual const tracing::Span& span() const = 0;
207
+ };
208
+
209
+ class ARROW_EXPORT ThrottledAsyncTaskScheduler : public AsyncTaskScheduler {
210
+ public:
211
+ /// An interface for a task queue
212
+ ///
213
+ /// A queue's methods will not be called concurrently
214
+ class Queue {
215
+ public:
216
+ virtual ~Queue() = default;
217
+ /// Push a task to the queue
218
+ ///
219
+ /// \param task the task to enqueue
220
+ virtual void Push(std::unique_ptr<Task> task) = 0;
221
+ /// Pop the next task from the queue
222
+ virtual std::unique_ptr<Task> Pop() = 0;
223
+ /// Peek the next task in the queue
224
+ virtual const Task& Peek() = 0;
225
+ /// Check if the queue is empty
226
+ virtual bool Empty() = 0;
227
+ /// Purge the queue of all items
228
+ virtual void Purge() = 0;
229
+ };
230
+
231
+ class Throttle {
232
+ public:
233
+ virtual ~Throttle() = default;
234
+ /// Acquire amt permits
235
+ ///
236
+ /// If nullopt is returned then the permits were immediately
237
+ /// acquired and the caller can proceed. If a future is returned then the caller
238
+ /// should wait for the future to complete first. When the returned future completes
239
+ /// the permits have NOT been acquired and the caller must call Acquire again
240
+ ///
241
+ /// \param amt the number of permits to acquire
242
+ virtual std::optional<Future<>> TryAcquire(int amt) = 0;
243
+ /// Release amt permits
244
+ ///
245
+ /// This will possibly complete waiting futures and should probably not be
246
+ /// called while holding locks.
247
+ ///
248
+ /// \param amt the number of permits to release
249
+ virtual void Release(int amt) = 0;
250
+
251
+ /// The size of the largest task that can run
252
+ ///
253
+ /// Incoming tasks will have their cost latched to this value to ensure
254
+ /// they can still run (although they will be the only thing allowed to
255
+ /// run at that time).
256
+ virtual int Capacity() = 0;
257
+
258
+ /// Pause the throttle
259
+ ///
260
+ /// Any tasks that have been submitted already will continue. However, no new tasks
261
+ /// will be run until the throttle is resumed.
262
+ virtual void Pause() = 0;
263
+ /// Resume the throttle
264
+ ///
265
+ /// Allows task to be submitted again. If there is a max_concurrent_cost limit then
266
+ /// it will still apply.
267
+ virtual void Resume() = 0;
268
+ };
269
+
270
+ /// Pause the throttle
271
+ ///
272
+ /// Any tasks that have been submitted already will continue. However, no new tasks
273
+ /// will be run until the throttle is resumed.
274
+ virtual void Pause() = 0;
275
+ /// Resume the throttle
276
+ ///
277
+ /// Allows task to be submitted again. If there is a max_concurrent_cost limit then
278
+ /// it will still apply.
279
+ virtual void Resume() = 0;
280
+
281
+ /// Create a throttled view of a scheduler
282
+ ///
283
+ /// Tasks added via this view will be subjected to the throttle and, if the tasks cannot
284
+ /// run immediately, will be placed into a queue.
285
+ ///
286
+ /// Although a shared_ptr is returned it should generally be assumed that the caller
287
+ /// is being given exclusive ownership. The shared_ptr is used to share the view with
288
+ /// queued and submitted tasks and the lifetime of those is unpredictable. It is
289
+ /// important the caller keep the returned pointer alive for as long as they plan to add
290
+ /// tasks to the view.
291
+ ///
292
+ /// \param scheduler a scheduler to submit tasks to after throttling
293
+ ///
294
+ /// This can be the root scheduler, another throttled scheduler, or a task group. These
295
+ /// are all composable.
296
+ ///
297
+ /// \param max_concurrent_cost the maximum amount of cost allowed to run at any one time
298
+ ///
299
+ /// If a task is added that has a cost greater than max_concurrent_cost then its cost
300
+ /// will be reduced to max_concurrent_cost so that it is still possible for the task to
301
+ /// run.
302
+ ///
303
+ /// \param queue the queue to use when tasks cannot be submitted
304
+ ///
305
+ /// By default a FIFO queue will be used. However, a custom queue can be provided if
306
+ /// some tasks have higher priority than other tasks.
307
+ static std::shared_ptr<ThrottledAsyncTaskScheduler> Make(
308
+ AsyncTaskScheduler* scheduler, int max_concurrent_cost,
309
+ std::unique_ptr<Queue> queue = NULLPTR);
310
+
311
+ /// @brief Create a ThrottledAsyncTaskScheduler using a custom throttle
312
+ ///
313
+ /// \see Make
314
+ static std::shared_ptr<ThrottledAsyncTaskScheduler> MakeWithCustomThrottle(
315
+ AsyncTaskScheduler* scheduler, std::unique_ptr<Throttle> throttle,
316
+ std::unique_ptr<Queue> queue = NULLPTR);
317
+ };
318
+
319
+ /// A utility to keep track of a collection of tasks
320
+ ///
321
+ /// Often it is useful to keep track of some state that only needs to stay alive
322
+ /// for some small collection of tasks, or to perform some kind of final cleanup
323
+ /// when a collection of tasks is finished.
324
+ ///
325
+ /// For example, when scanning, we need to keep the file reader alive while all scan
326
+ /// tasks run for a given file, and then we can gracefully close it when we finish the
327
+ /// file.
328
+ class ARROW_EXPORT AsyncTaskGroup : public AsyncTaskScheduler {
329
+ public:
330
+ /// Destructor for the task group
331
+ ///
332
+ /// The destructor might trigger the finish callback. If the finish callback fails
333
+ /// then the error will be reported as a task on the scheduler.
334
+ ///
335
+ /// Failure to destroy the async task group will not prevent the scheduler from
336
+ /// finishing. If the scheduler finishes before the async task group is done then
337
+ /// the finish callback will be run immediately when the async task group finishes.
338
+ ///
339
+ /// If the scheduler has aborted then the finish callback will not run.
340
+ ~AsyncTaskGroup() = default;
341
+ /// Create an async task group
342
+ ///
343
+ /// The finish callback will not run until the task group is destroyed and all
344
+ /// tasks are finished so you will generally want to reset / destroy the returned
345
+ /// unique_ptr at some point.
346
+ ///
347
+ /// \param scheduler The underlying scheduler to submit tasks to
348
+ /// \param finish_callback A callback that will be run only after the task group has
349
+ /// been destroyed and all tasks added by the group have
350
+ /// finished.
351
+ ///
352
+ /// Note: in error scenarios the finish callback may not run. However, it will still,
353
+ /// of course, be destroyed.
354
+ static std::unique_ptr<AsyncTaskGroup> Make(AsyncTaskScheduler* scheduler,
355
+ FnOnce<Status()> finish_callback);
356
+ };
357
+
358
+ /// Create a task group that is also throttled
359
+ ///
360
+ /// This is a utility factory that creates a throttled view of a scheduler and then
361
+ /// wraps that throttled view with a task group that destroys the throttle when finished.
362
+ ///
363
+ /// \see ThrottledAsyncTaskScheduler
364
+ /// \see AsyncTaskGroup
365
+ /// \param target the underlying scheduler to submit tasks to
366
+ /// \param max_concurrent_cost the maximum amount of cost allowed to run at any one time
367
+ /// \param queue the queue to use when tasks cannot be submitted
368
+ /// \param finish_callback A callback that will be run only after the task group has
369
+ /// been destroyed and all tasks added by the group have finished
370
+ ARROW_EXPORT std::unique_ptr<ThrottledAsyncTaskScheduler> MakeThrottledAsyncTaskGroup(
371
+ AsyncTaskScheduler* target, int max_concurrent_cost,
372
+ std::unique_ptr<ThrottledAsyncTaskScheduler::Queue> queue,
373
+ FnOnce<Status()> finish_callback);
374
+
375
+ // Defined down here to avoid circular dependency between AsyncTaskScheduler and
376
+ // AsyncTaskGroup
377
+ template <typename T>
378
+ bool AsyncTaskScheduler::AddAsyncGenerator(std::function<Future<T>()> generator,
379
+ std::function<Status(const T&)> visitor,
380
+ std::string_view name) {
381
+ struct State {
382
+ State(std::function<Future<T>()> generator, std::function<Status(const T&)> visitor,
383
+ std::unique_ptr<AsyncTaskGroup> task_group, std::string_view name)
384
+ : generator(std::move(generator)),
385
+ visitor(std::move(visitor)),
386
+ task_group(std::move(task_group)),
387
+ name(name) {}
388
+ std::function<Future<T>()> generator;
389
+ std::function<Status(const T&)> visitor;
390
+ std::unique_ptr<AsyncTaskGroup> task_group;
391
+ std::string_view name;
392
+ };
393
+ struct SubmitTask : public Task {
394
+ explicit SubmitTask(std::unique_ptr<State> state_holder)
395
+ : state_holder(std::move(state_holder)) {}
396
+
397
+ struct SubmitTaskCallback {
398
+ SubmitTaskCallback(std::unique_ptr<State> state_holder, Future<> task_completion)
399
+ : state_holder(std::move(state_holder)),
400
+ task_completion(std::move(task_completion)) {}
401
+ void operator()(const Result<T>& maybe_item) {
402
+ if (!maybe_item.ok()) {
403
+ task_completion.MarkFinished(maybe_item.status());
404
+ return;
405
+ }
406
+ const auto& item = *maybe_item;
407
+ if (IsIterationEnd(item)) {
408
+ task_completion.MarkFinished();
409
+ return;
410
+ }
411
+ Status visit_st = state_holder->visitor(item);
412
+ if (!visit_st.ok()) {
413
+ task_completion.MarkFinished(std::move(visit_st));
414
+ return;
415
+ }
416
+ state_holder->task_group->AddTask(
417
+ std::make_unique<SubmitTask>(std::move(state_holder)));
418
+ task_completion.MarkFinished();
419
+ }
420
+ std::unique_ptr<State> state_holder;
421
+ Future<> task_completion;
422
+ };
423
+
424
+ Result<Future<>> operator()() {
425
+ Future<> task = Future<>::Make();
426
+ // Consume as many items as we can (those that are already finished)
427
+ // synchronously to avoid recursion / stack overflow.
428
+ while (true) {
429
+ Future<T> next = state_holder->generator();
430
+ if (next.TryAddCallback(
431
+ [&] { return SubmitTaskCallback(std::move(state_holder), task); })) {
432
+ return task;
433
+ }
434
+ ARROW_ASSIGN_OR_RAISE(T item, next.result());
435
+ if (IsIterationEnd(item)) {
436
+ task.MarkFinished();
437
+ return task;
438
+ }
439
+ ARROW_RETURN_NOT_OK(state_holder->visitor(item));
440
+ }
441
+ }
442
+
443
+ std::string_view name() const { return state_holder->name; }
444
+
445
+ std::unique_ptr<State> state_holder;
446
+ };
447
+ std::unique_ptr<AsyncTaskGroup> task_group =
448
+ AsyncTaskGroup::Make(this, [] { return Status::OK(); });
449
+ AsyncTaskGroup* task_group_view = task_group.get();
450
+ std::unique_ptr<State> state_holder = std::make_unique<State>(
451
+ std::move(generator), std::move(visitor), std::move(task_group), name);
452
+ task_group_view->AddTask(std::make_unique<SubmitTask>(std::move(state_holder)));
453
+ return true;
454
+ }
455
+
456
+ } // namespace util
457
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/basic_decimal.h ADDED
@@ -0,0 +1,492 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <array>
21
+ #include <cstdint>
22
+ #include <cstring>
23
+ #include <limits>
24
+ #include <string>
25
+ #include <type_traits>
26
+
27
+ #include "arrow/util/endian.h"
28
+ #include "arrow/util/macros.h"
29
+ #include "arrow/util/type_traits.h"
30
+ #include "arrow/util/visibility.h"
31
+
32
+ namespace arrow {
33
+
34
+ enum class DecimalStatus {
35
+ kSuccess,
36
+ kDivideByZero,
37
+ kOverflow,
38
+ kRescaleDataLoss,
39
+ };
40
+
41
+ template <typename Derived, int BIT_WIDTH, int NWORDS = BIT_WIDTH / 64>
42
+ class ARROW_EXPORT GenericBasicDecimal {
43
+ protected:
44
+ struct LittleEndianArrayTag {};
45
+
46
+ #if ARROW_LITTLE_ENDIAN
47
+ static constexpr int kHighWordIndex = NWORDS - 1;
48
+ static constexpr int kLowWordIndex = 0;
49
+ #else
50
+ static constexpr int kHighWordIndex = 0;
51
+ static constexpr int kLowWordIndex = NWORDS - 1;
52
+ #endif
53
+
54
+ public:
55
+ static constexpr int kBitWidth = BIT_WIDTH;
56
+ static constexpr int kByteWidth = kBitWidth / 8;
57
+ static constexpr int kNumWords = NWORDS;
58
+
59
+ // A constructor tag to introduce a little-endian encoded array
60
+ static constexpr LittleEndianArrayTag LittleEndianArray{};
61
+
62
+ using WordArray = std::array<uint64_t, NWORDS>;
63
+
64
+ /// \brief Empty constructor creates a decimal with a value of 0.
65
+ constexpr GenericBasicDecimal() noexcept : array_({0}) {}
66
+
67
+ /// \brief Create a decimal from the two's complement representation.
68
+ ///
69
+ /// Input array is assumed to be in native endianness.
70
+ explicit constexpr GenericBasicDecimal(const WordArray& array) noexcept
71
+ : array_(array) {}
72
+
73
+ /// \brief Create a decimal from the two's complement representation.
74
+ ///
75
+ /// Input array is assumed to be in little endianness, with native endian elements.
76
+ GenericBasicDecimal(LittleEndianArrayTag, const WordArray& array) noexcept
77
+ : GenericBasicDecimal(bit_util::little_endian::ToNative(array)) {}
78
+
79
+ /// \brief Create a decimal from any integer not wider than 64 bits.
80
+ template <typename T,
81
+ typename = typename std::enable_if<
82
+ std::is_integral<T>::value && (sizeof(T) <= sizeof(uint64_t)), T>::type>
83
+ constexpr GenericBasicDecimal(T value) noexcept // NOLINT(runtime/explicit)
84
+ : array_(WordsFromLowBits(value)) {}
85
+
86
+ /// \brief Create a decimal from an array of bytes.
87
+ ///
88
+ /// Bytes are assumed to be in native-endian byte order.
89
+ explicit GenericBasicDecimal(const uint8_t* bytes) {
90
+ memcpy(array_.data(), bytes, sizeof(array_));
91
+ }
92
+
93
+ /// \brief Get the bits of the two's complement representation of the number.
94
+ ///
95
+ /// The elements are in native endian order. The bits within each uint64_t element
96
+ /// are in native endian order. For example, on a little endian machine,
97
+ /// BasicDecimal128(123).native_endian_array() = {123, 0};
98
+ /// but on a big endian machine,
99
+ /// BasicDecimal128(123).native_endian_array() = {0, 123};
100
+ constexpr const WordArray& native_endian_array() const { return array_; }
101
+
102
+ /// \brief Get the bits of the two's complement representation of the number.
103
+ ///
104
+ /// The elements are in little endian order. However, the bits within each
105
+ /// uint64_t element are in native endian order.
106
+ /// For example, BasicDecimal128(123).little_endian_array() = {123, 0};
107
+ WordArray little_endian_array() const {
108
+ return bit_util::little_endian::FromNative(array_);
109
+ }
110
+
111
+ const uint8_t* native_endian_bytes() const {
112
+ return reinterpret_cast<const uint8_t*>(array_.data());
113
+ }
114
+
115
+ uint8_t* mutable_native_endian_bytes() {
116
+ return reinterpret_cast<uint8_t*>(array_.data());
117
+ }
118
+
119
+ /// \brief Return the raw bytes of the value in native-endian byte order.
120
+ std::array<uint8_t, kByteWidth> ToBytes() const {
121
+ std::array<uint8_t, kByteWidth> out{{0}};
122
+ memcpy(out.data(), array_.data(), kByteWidth);
123
+ return out;
124
+ }
125
+
126
+ /// \brief Copy the raw bytes of the value in native-endian byte order.
127
+ void ToBytes(uint8_t* out) const { memcpy(out, array_.data(), kByteWidth); }
128
+
129
+ /// Return 1 if positive or zero, -1 if strictly negative.
130
+ int64_t Sign() const {
131
+ return 1 | (static_cast<int64_t>(array_[kHighWordIndex]) >> 63);
132
+ }
133
+
134
+ bool IsNegative() const { return static_cast<int64_t>(array_[kHighWordIndex]) < 0; }
135
+
136
+ explicit operator bool() const { return array_ != WordArray{}; }
137
+
138
+ friend bool operator==(const GenericBasicDecimal& left,
139
+ const GenericBasicDecimal& right) {
140
+ return left.array_ == right.array_;
141
+ }
142
+
143
+ friend bool operator!=(const GenericBasicDecimal& left,
144
+ const GenericBasicDecimal& right) {
145
+ return left.array_ != right.array_;
146
+ }
147
+
148
+ protected:
149
+ WordArray array_;
150
+
151
+ template <typename T>
152
+ static constexpr uint64_t SignExtend(T low_bits) noexcept {
153
+ return low_bits >= T{} ? uint64_t{0} : ~uint64_t{0};
154
+ }
155
+
156
+ template <typename T>
157
+ static constexpr WordArray WordsFromLowBits(T low_bits) {
158
+ WordArray words{};
159
+ if (low_bits < T{}) {
160
+ for (auto& word : words) {
161
+ word = ~uint64_t{0};
162
+ }
163
+ }
164
+ words[kLowWordIndex] = static_cast<uint64_t>(low_bits);
165
+ return words;
166
+ }
167
+ };
168
+
169
+ /// Represents a signed 128-bit integer in two's complement.
170
+ ///
171
+ /// This class is also compiled into LLVM IR - so, it should not have cpp references like
172
+ /// streams and boost.
173
+ class ARROW_EXPORT BasicDecimal128 : public GenericBasicDecimal<BasicDecimal128, 128> {
174
+ public:
175
+ static constexpr int kMaxPrecision = 38;
176
+ static constexpr int kMaxScale = 38;
177
+
178
+ using GenericBasicDecimal::GenericBasicDecimal;
179
+
180
+ constexpr BasicDecimal128() noexcept : GenericBasicDecimal() {}
181
+
182
+ /// \brief Create a BasicDecimal128 from the two's complement representation.
183
+ #if ARROW_LITTLE_ENDIAN
184
+ constexpr BasicDecimal128(int64_t high, uint64_t low) noexcept
185
+ : BasicDecimal128(WordArray{low, static_cast<uint64_t>(high)}) {}
186
+ #else
187
+ constexpr BasicDecimal128(int64_t high, uint64_t low) noexcept
188
+ : BasicDecimal128(WordArray{static_cast<uint64_t>(high), low}) {}
189
+ #endif
190
+
191
+ /// \brief Negate the current value (in-place)
192
+ BasicDecimal128& Negate();
193
+
194
+ /// \brief Absolute value (in-place)
195
+ BasicDecimal128& Abs();
196
+
197
+ /// \brief Absolute value
198
+ static BasicDecimal128 Abs(const BasicDecimal128& left);
199
+
200
+ /// \brief Add a number to this one. The result is truncated to 128 bits.
201
+ BasicDecimal128& operator+=(const BasicDecimal128& right);
202
+
203
+ /// \brief Subtract a number from this one. The result is truncated to 128 bits.
204
+ BasicDecimal128& operator-=(const BasicDecimal128& right);
205
+
206
+ /// \brief Multiply this number by another number. The result is truncated to 128 bits.
207
+ BasicDecimal128& operator*=(const BasicDecimal128& right);
208
+
209
+ /// Divide this number by right and return the result.
210
+ ///
211
+ /// This operation is not destructive.
212
+ /// The answer rounds to zero. Signs work like:
213
+ /// 21 / 5 -> 4, 1
214
+ /// -21 / 5 -> -4, -1
215
+ /// 21 / -5 -> -4, 1
216
+ /// -21 / -5 -> 4, -1
217
+ /// \param[in] divisor the number to divide by
218
+ /// \param[out] result the quotient
219
+ /// \param[out] remainder the remainder after the division
220
+ DecimalStatus Divide(const BasicDecimal128& divisor, BasicDecimal128* result,
221
+ BasicDecimal128* remainder) const;
222
+
223
+ /// \brief In-place division.
224
+ BasicDecimal128& operator/=(const BasicDecimal128& right);
225
+
226
+ /// \brief Bitwise "or" between two BasicDecimal128.
227
+ BasicDecimal128& operator|=(const BasicDecimal128& right);
228
+
229
+ /// \brief Bitwise "and" between two BasicDecimal128.
230
+ BasicDecimal128& operator&=(const BasicDecimal128& right);
231
+
232
+ /// \brief Shift left by the given number of bits.
233
+ BasicDecimal128& operator<<=(uint32_t bits);
234
+
235
+ BasicDecimal128 operator<<(uint32_t bits) const {
236
+ auto res = *this;
237
+ res <<= bits;
238
+ return res;
239
+ }
240
+
241
+ /// \brief Shift right by the given number of bits.
242
+ ///
243
+ /// Negative values will sign-extend.
244
+ BasicDecimal128& operator>>=(uint32_t bits);
245
+
246
+ BasicDecimal128 operator>>(uint32_t bits) const {
247
+ auto res = *this;
248
+ res >>= bits;
249
+ return res;
250
+ }
251
+
252
+ /// \brief Get the high bits of the two's complement representation of the number.
253
+ constexpr int64_t high_bits() const {
254
+ #if ARROW_LITTLE_ENDIAN
255
+ return static_cast<int64_t>(array_[1]);
256
+ #else
257
+ return static_cast<int64_t>(array_[0]);
258
+ #endif
259
+ }
260
+
261
+ /// \brief Get the low bits of the two's complement representation of the number.
262
+ constexpr uint64_t low_bits() const {
263
+ #if ARROW_LITTLE_ENDIAN
264
+ return array_[0];
265
+ #else
266
+ return array_[1];
267
+ #endif
268
+ }
269
+
270
+ /// \brief separate the integer and fractional parts for the given scale.
271
+ void GetWholeAndFraction(int32_t scale, BasicDecimal128* whole,
272
+ BasicDecimal128* fraction) const;
273
+
274
+ /// \brief Scale multiplier for given scale value.
275
+ static const BasicDecimal128& GetScaleMultiplier(int32_t scale);
276
+ /// \brief Half-scale multiplier for given scale value.
277
+ static const BasicDecimal128& GetHalfScaleMultiplier(int32_t scale);
278
+
279
+ /// \brief Convert BasicDecimal128 from one scale to another
280
+ DecimalStatus Rescale(int32_t original_scale, int32_t new_scale,
281
+ BasicDecimal128* out) const;
282
+
283
+ /// \brief Scale up.
284
+ BasicDecimal128 IncreaseScaleBy(int32_t increase_by) const;
285
+
286
+ /// \brief Scale down.
287
+ /// - If 'round' is true, the right-most digits are dropped and the result value is
288
+ /// rounded up (+1 for +ve, -1 for -ve) based on the value of the dropped digits
289
+ /// (>= 10^reduce_by / 2).
290
+ /// - If 'round' is false, the right-most digits are simply dropped.
291
+ BasicDecimal128 ReduceScaleBy(int32_t reduce_by, bool round = true) const;
292
+
293
+ /// \brief Whether this number fits in the given precision
294
+ ///
295
+ /// Return true if the number of significant digits is less or equal to `precision`.
296
+ bool FitsInPrecision(int32_t precision) const;
297
+
298
+ /// \brief count the number of leading binary zeroes.
299
+ int32_t CountLeadingBinaryZeros() const;
300
+
301
+ /// \brief Get the maximum valid unscaled decimal value.
302
+ static const BasicDecimal128& GetMaxValue();
303
+
304
+ /// \brief Get the maximum valid unscaled decimal value for the given precision.
305
+ static BasicDecimal128 GetMaxValue(int32_t precision);
306
+
307
+ /// \brief Get the maximum decimal value (is not a valid value).
308
+ static constexpr BasicDecimal128 GetMaxSentinel() {
309
+ return BasicDecimal128(/*high=*/std::numeric_limits<int64_t>::max(),
310
+ /*low=*/std::numeric_limits<uint64_t>::max());
311
+ }
312
+ /// \brief Get the minimum decimal value (is not a valid value).
313
+ static constexpr BasicDecimal128 GetMinSentinel() {
314
+ return BasicDecimal128(/*high=*/std::numeric_limits<int64_t>::min(),
315
+ /*low=*/std::numeric_limits<uint64_t>::min());
316
+ }
317
+ };
318
+
319
+ ARROW_EXPORT bool operator<(const BasicDecimal128& left, const BasicDecimal128& right);
320
+ ARROW_EXPORT bool operator<=(const BasicDecimal128& left, const BasicDecimal128& right);
321
+ ARROW_EXPORT bool operator>(const BasicDecimal128& left, const BasicDecimal128& right);
322
+ ARROW_EXPORT bool operator>=(const BasicDecimal128& left, const BasicDecimal128& right);
323
+
324
+ ARROW_EXPORT BasicDecimal128 operator-(const BasicDecimal128& operand);
325
+ ARROW_EXPORT BasicDecimal128 operator~(const BasicDecimal128& operand);
326
+ ARROW_EXPORT BasicDecimal128 operator+(const BasicDecimal128& left,
327
+ const BasicDecimal128& right);
328
+ ARROW_EXPORT BasicDecimal128 operator-(const BasicDecimal128& left,
329
+ const BasicDecimal128& right);
330
+ ARROW_EXPORT BasicDecimal128 operator*(const BasicDecimal128& left,
331
+ const BasicDecimal128& right);
332
+ ARROW_EXPORT BasicDecimal128 operator/(const BasicDecimal128& left,
333
+ const BasicDecimal128& right);
334
+ ARROW_EXPORT BasicDecimal128 operator%(const BasicDecimal128& left,
335
+ const BasicDecimal128& right);
336
+
337
+ class ARROW_EXPORT BasicDecimal256 : public GenericBasicDecimal<BasicDecimal256, 256> {
338
+ public:
339
+ using GenericBasicDecimal::GenericBasicDecimal;
340
+
341
+ static constexpr int kMaxPrecision = 76;
342
+ static constexpr int kMaxScale = 76;
343
+
344
+ constexpr BasicDecimal256() noexcept : GenericBasicDecimal() {}
345
+
346
+ explicit BasicDecimal256(const BasicDecimal128& value) noexcept
347
+ : BasicDecimal256(bit_util::little_endian::ToNative<uint64_t, 4>(
348
+ {value.low_bits(), static_cast<uint64_t>(value.high_bits()),
349
+ SignExtend(value.high_bits()), SignExtend(value.high_bits())})) {}
350
+
351
+ /// \brief Negate the current value (in-place)
352
+ BasicDecimal256& Negate();
353
+
354
+ /// \brief Absolute value (in-place)
355
+ BasicDecimal256& Abs();
356
+
357
+ /// \brief Absolute value
358
+ static BasicDecimal256 Abs(const BasicDecimal256& left);
359
+
360
+ /// \brief Add a number to this one. The result is truncated to 256 bits.
361
+ BasicDecimal256& operator+=(const BasicDecimal256& right);
362
+
363
+ /// \brief Subtract a number from this one. The result is truncated to 256 bits.
364
+ BasicDecimal256& operator-=(const BasicDecimal256& right);
365
+
366
+ /// \brief Get the lowest bits of the two's complement representation of the number.
367
+ uint64_t low_bits() const { return bit_util::little_endian::Make(array_)[0]; }
368
+
369
+ /// \brief separate the integer and fractional parts for the given scale.
370
+ void GetWholeAndFraction(int32_t scale, BasicDecimal256* whole,
371
+ BasicDecimal256* fraction) const;
372
+
373
+ /// \brief Scale multiplier for given scale value.
374
+ static const BasicDecimal256& GetScaleMultiplier(int32_t scale);
375
+ /// \brief Half-scale multiplier for given scale value.
376
+ static const BasicDecimal256& GetHalfScaleMultiplier(int32_t scale);
377
+
378
+ /// \brief Convert BasicDecimal256 from one scale to another
379
+ DecimalStatus Rescale(int32_t original_scale, int32_t new_scale,
380
+ BasicDecimal256* out) const;
381
+
382
+ /// \brief Scale up.
383
+ BasicDecimal256 IncreaseScaleBy(int32_t increase_by) const;
384
+
385
+ /// \brief Scale down.
386
+ /// - If 'round' is true, the right-most digits are dropped and the result value is
387
+ /// rounded up (+1 for positive, -1 for negative) based on the value of the
388
+ /// dropped digits (>= 10^reduce_by / 2).
389
+ /// - If 'round' is false, the right-most digits are simply dropped.
390
+ BasicDecimal256 ReduceScaleBy(int32_t reduce_by, bool round = true) const;
391
+
392
+ /// \brief Whether this number fits in the given precision
393
+ ///
394
+ /// Return true if the number of significant digits is less or equal to `precision`.
395
+ bool FitsInPrecision(int32_t precision) const;
396
+
397
+ /// \brief Multiply this number by another number. The result is truncated to 256 bits.
398
+ BasicDecimal256& operator*=(const BasicDecimal256& right);
399
+
400
+ /// Divide this number by right and return the result.
401
+ ///
402
+ /// This operation is not destructive.
403
+ /// The answer rounds to zero. Signs work like:
404
+ /// 21 / 5 -> 4, 1
405
+ /// -21 / 5 -> -4, -1
406
+ /// 21 / -5 -> -4, 1
407
+ /// -21 / -5 -> 4, -1
408
+ /// \param[in] divisor the number to divide by
409
+ /// \param[out] result the quotient
410
+ /// \param[out] remainder the remainder after the division
411
+ DecimalStatus Divide(const BasicDecimal256& divisor, BasicDecimal256* result,
412
+ BasicDecimal256* remainder) const;
413
+
414
+ /// \brief Shift left by the given number of bits.
415
+ BasicDecimal256& operator<<=(uint32_t bits);
416
+
417
+ BasicDecimal256 operator<<(uint32_t bits) const {
418
+ auto res = *this;
419
+ res <<= bits;
420
+ return res;
421
+ }
422
+
423
+ /// \brief Shift right by the given number of bits.
424
+ ///
425
+ /// Negative values will sign-extend.
426
+ BasicDecimal256& operator>>=(uint32_t bits);
427
+
428
+ BasicDecimal256 operator>>(uint32_t bits) const {
429
+ auto res = *this;
430
+ res >>= bits;
431
+ return res;
432
+ }
433
+
434
+ /// \brief In-place division.
435
+ BasicDecimal256& operator/=(const BasicDecimal256& right);
436
+
437
+ /// \brief Get the maximum valid unscaled decimal value for the given precision.
438
+ static BasicDecimal256 GetMaxValue(int32_t precision);
439
+
440
+ /// \brief Get the maximum decimal value (is not a valid value).
441
+ static constexpr BasicDecimal256 GetMaxSentinel() {
442
+ #if ARROW_LITTLE_ENDIAN
443
+ return BasicDecimal256({std::numeric_limits<uint64_t>::max(),
444
+ std::numeric_limits<uint64_t>::max(),
445
+ std::numeric_limits<uint64_t>::max(),
446
+ static_cast<uint64_t>(std::numeric_limits<int64_t>::max())});
447
+ #else
448
+ return BasicDecimal256({static_cast<uint64_t>(std::numeric_limits<int64_t>::max()),
449
+ std::numeric_limits<uint64_t>::max(),
450
+ std::numeric_limits<uint64_t>::max(),
451
+ std::numeric_limits<uint64_t>::max()});
452
+ #endif
453
+ }
454
+ /// \brief Get the minimum decimal value (is not a valid value).
455
+ static constexpr BasicDecimal256 GetMinSentinel() {
456
+ #if ARROW_LITTLE_ENDIAN
457
+ return BasicDecimal256(
458
+ {0, 0, 0, static_cast<uint64_t>(std::numeric_limits<int64_t>::min())});
459
+ #else
460
+ return BasicDecimal256(
461
+ {static_cast<uint64_t>(std::numeric_limits<int64_t>::min()), 0, 0, 0});
462
+ #endif
463
+ }
464
+ };
465
+
466
+ ARROW_EXPORT bool operator<(const BasicDecimal256& left, const BasicDecimal256& right);
467
+
468
+ ARROW_EXPORT inline bool operator<=(const BasicDecimal256& left,
469
+ const BasicDecimal256& right) {
470
+ return !operator<(right, left);
471
+ }
472
+
473
+ ARROW_EXPORT inline bool operator>(const BasicDecimal256& left,
474
+ const BasicDecimal256& right) {
475
+ return operator<(right, left);
476
+ }
477
+
478
+ ARROW_EXPORT inline bool operator>=(const BasicDecimal256& left,
479
+ const BasicDecimal256& right) {
480
+ return !operator<(left, right);
481
+ }
482
+
483
+ ARROW_EXPORT BasicDecimal256 operator-(const BasicDecimal256& operand);
484
+ ARROW_EXPORT BasicDecimal256 operator~(const BasicDecimal256& operand);
485
+ ARROW_EXPORT BasicDecimal256 operator+(const BasicDecimal256& left,
486
+ const BasicDecimal256& right);
487
+ ARROW_EXPORT BasicDecimal256 operator*(const BasicDecimal256& left,
488
+ const BasicDecimal256& right);
489
+ ARROW_EXPORT BasicDecimal256 operator/(const BasicDecimal256& left,
490
+ const BasicDecimal256& right);
491
+
492
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_block_counter.h ADDED
@@ -0,0 +1,570 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+ #include <cstdint>
22
+ #include <limits>
23
+ #include <memory>
24
+
25
+ #include "arrow/buffer.h"
26
+ #include "arrow/status.h"
27
+ #include "arrow/util/bit_util.h"
28
+ #include "arrow/util/endian.h"
29
+ #include "arrow/util/macros.h"
30
+ #include "arrow/util/ubsan.h"
31
+ #include "arrow/util/visibility.h"
32
+
33
+ namespace arrow {
34
+ namespace internal {
35
+ namespace detail {
36
+
37
+ inline uint64_t LoadWord(const uint8_t* bytes) {
38
+ return bit_util::ToLittleEndian(util::SafeLoadAs<uint64_t>(bytes));
39
+ }
40
+
41
+ inline uint64_t ShiftWord(uint64_t current, uint64_t next, int64_t shift) {
42
+ if (shift == 0) {
43
+ return current;
44
+ }
45
+ return (current >> shift) | (next << (64 - shift));
46
+ }
47
+
48
+ // These templates are here to help with unit tests
49
+
50
+ template <typename T>
51
+ constexpr T BitNot(T x) {
52
+ return ~x;
53
+ }
54
+
55
+ template <>
56
+ constexpr bool BitNot(bool x) {
57
+ return !x;
58
+ }
59
+
60
+ struct BitBlockAnd {
61
+ template <typename T>
62
+ static constexpr T Call(T left, T right) {
63
+ return left & right;
64
+ }
65
+ };
66
+
67
+ struct BitBlockAndNot {
68
+ template <typename T>
69
+ static constexpr T Call(T left, T right) {
70
+ return left & BitNot(right);
71
+ }
72
+ };
73
+
74
+ struct BitBlockOr {
75
+ template <typename T>
76
+ static constexpr T Call(T left, T right) {
77
+ return left | right;
78
+ }
79
+ };
80
+
81
+ struct BitBlockOrNot {
82
+ template <typename T>
83
+ static constexpr T Call(T left, T right) {
84
+ return left | BitNot(right);
85
+ }
86
+ };
87
+
88
+ } // namespace detail
89
+
90
+ /// \brief Return value from bit block counters: the total number of bits and
91
+ /// the number of set bits.
92
+ struct BitBlockCount {
93
+ int16_t length;
94
+ int16_t popcount;
95
+
96
+ bool NoneSet() const { return this->popcount == 0; }
97
+ bool AllSet() const { return this->length == this->popcount; }
98
+ };
99
+
100
+ /// \brief A class that scans through a true/false bitmap to compute popcounts
101
+ /// 64 or 256 bits at a time. This is used to accelerate processing of
102
+ /// mostly-not-null array data.
103
+ class ARROW_EXPORT BitBlockCounter {
104
+ public:
105
+ BitBlockCounter(const uint8_t* bitmap, int64_t start_offset, int64_t length)
106
+ : bitmap_(util::MakeNonNull(bitmap) + start_offset / 8),
107
+ bits_remaining_(length),
108
+ offset_(start_offset % 8) {}
109
+
110
+ /// \brief The bit size of each word run
111
+ static constexpr int64_t kWordBits = 64;
112
+
113
+ /// \brief The bit size of four words run
114
+ static constexpr int64_t kFourWordsBits = kWordBits * 4;
115
+
116
+ /// \brief Return the next run of available bits, usually 256. The returned
117
+ /// pair contains the size of run and the number of true values. The last
118
+ /// block will have a length less than 256 if the bitmap length is not a
119
+ /// multiple of 256, and will return 0-length blocks in subsequent
120
+ /// invocations.
121
+ BitBlockCount NextFourWords() {
122
+ using detail::LoadWord;
123
+ using detail::ShiftWord;
124
+
125
+ if (!bits_remaining_) {
126
+ return {0, 0};
127
+ }
128
+ int64_t total_popcount = 0;
129
+ if (offset_ == 0) {
130
+ if (bits_remaining_ < kFourWordsBits) {
131
+ return GetBlockSlow(kFourWordsBits);
132
+ }
133
+ total_popcount += bit_util::PopCount(LoadWord(bitmap_));
134
+ total_popcount += bit_util::PopCount(LoadWord(bitmap_ + 8));
135
+ total_popcount += bit_util::PopCount(LoadWord(bitmap_ + 16));
136
+ total_popcount += bit_util::PopCount(LoadWord(bitmap_ + 24));
137
+ } else {
138
+ // When the offset is > 0, we need there to be a word beyond the last
139
+ // aligned word in the bitmap for the bit shifting logic.
140
+ if (bits_remaining_ < 5 * kFourWordsBits - offset_) {
141
+ return GetBlockSlow(kFourWordsBits);
142
+ }
143
+ auto current = LoadWord(bitmap_);
144
+ auto next = LoadWord(bitmap_ + 8);
145
+ total_popcount += bit_util::PopCount(ShiftWord(current, next, offset_));
146
+ current = next;
147
+ next = LoadWord(bitmap_ + 16);
148
+ total_popcount += bit_util::PopCount(ShiftWord(current, next, offset_));
149
+ current = next;
150
+ next = LoadWord(bitmap_ + 24);
151
+ total_popcount += bit_util::PopCount(ShiftWord(current, next, offset_));
152
+ current = next;
153
+ next = LoadWord(bitmap_ + 32);
154
+ total_popcount += bit_util::PopCount(ShiftWord(current, next, offset_));
155
+ }
156
+ bitmap_ += bit_util::BytesForBits(kFourWordsBits);
157
+ bits_remaining_ -= kFourWordsBits;
158
+ return {256, static_cast<int16_t>(total_popcount)};
159
+ }
160
+
161
+ /// \brief Return the next run of available bits, usually 64. The returned
162
+ /// pair contains the size of run and the number of true values. The last
163
+ /// block will have a length less than 64 if the bitmap length is not a
164
+ /// multiple of 64, and will return 0-length blocks in subsequent
165
+ /// invocations.
166
+ BitBlockCount NextWord() {
167
+ using detail::LoadWord;
168
+ using detail::ShiftWord;
169
+
170
+ if (!bits_remaining_) {
171
+ return {0, 0};
172
+ }
173
+ int64_t popcount = 0;
174
+ if (offset_ == 0) {
175
+ if (bits_remaining_ < kWordBits) {
176
+ return GetBlockSlow(kWordBits);
177
+ }
178
+ popcount = bit_util::PopCount(LoadWord(bitmap_));
179
+ } else {
180
+ // When the offset is > 0, we need there to be a word beyond the last
181
+ // aligned word in the bitmap for the bit shifting logic.
182
+ if (bits_remaining_ < 2 * kWordBits - offset_) {
183
+ return GetBlockSlow(kWordBits);
184
+ }
185
+ popcount = bit_util::PopCount(
186
+ ShiftWord(LoadWord(bitmap_), LoadWord(bitmap_ + 8), offset_));
187
+ }
188
+ bitmap_ += kWordBits / 8;
189
+ bits_remaining_ -= kWordBits;
190
+ return {64, static_cast<int16_t>(popcount)};
191
+ }
192
+
193
+ private:
194
+ /// \brief Return block with the requested size when doing word-wise
195
+ /// computation is not possible due to inadequate bits remaining.
196
+ BitBlockCount GetBlockSlow(int64_t block_size) noexcept;
197
+
198
+ const uint8_t* bitmap_;
199
+ int64_t bits_remaining_;
200
+ int64_t offset_;
201
+ };
202
+
203
+ /// \brief A tool to iterate through a possibly nonexistent validity bitmap,
204
+ /// to allow us to write one code path for both the with-nulls and no-nulls
205
+ /// cases without giving up a lot of performance.
206
+ class ARROW_EXPORT OptionalBitBlockCounter {
207
+ public:
208
+ // validity_bitmap may be NULLPTR
209
+ OptionalBitBlockCounter(const uint8_t* validity_bitmap, int64_t offset, int64_t length);
210
+
211
+ // validity_bitmap may be null
212
+ OptionalBitBlockCounter(const std::shared_ptr<Buffer>& validity_bitmap, int64_t offset,
213
+ int64_t length);
214
+
215
+ /// Return block count for next word when the bitmap is available otherwise
216
+ /// return a block with length up to INT16_MAX when there is no validity
217
+ /// bitmap (so all the referenced values are not null).
218
+ BitBlockCount NextBlock() {
219
+ static constexpr int64_t kMaxBlockSize = std::numeric_limits<int16_t>::max();
220
+ if (has_bitmap_) {
221
+ BitBlockCount block = counter_.NextWord();
222
+ position_ += block.length;
223
+ return block;
224
+ } else {
225
+ int16_t block_size =
226
+ static_cast<int16_t>(std::min(kMaxBlockSize, length_ - position_));
227
+ position_ += block_size;
228
+ // All values are non-null
229
+ return {block_size, block_size};
230
+ }
231
+ }
232
+
233
+ // Like NextBlock, but returns a word-sized block even when there is no
234
+ // validity bitmap
235
+ BitBlockCount NextWord() {
236
+ static constexpr int64_t kWordSize = 64;
237
+ if (has_bitmap_) {
238
+ BitBlockCount block = counter_.NextWord();
239
+ position_ += block.length;
240
+ return block;
241
+ } else {
242
+ int16_t block_size = static_cast<int16_t>(std::min(kWordSize, length_ - position_));
243
+ position_ += block_size;
244
+ // All values are non-null
245
+ return {block_size, block_size};
246
+ }
247
+ }
248
+
249
+ private:
250
+ const bool has_bitmap_;
251
+ int64_t position_;
252
+ int64_t length_;
253
+ BitBlockCounter counter_;
254
+ };
255
+
256
+ /// \brief A class that computes popcounts on the result of bitwise operations
257
+ /// between two bitmaps, 64 bits at a time. A 64-bit word is loaded from each
258
+ /// bitmap, then the popcount is computed on e.g. the bitwise-and of the two
259
+ /// words.
260
+ class ARROW_EXPORT BinaryBitBlockCounter {
261
+ public:
262
+ BinaryBitBlockCounter(const uint8_t* left_bitmap, int64_t left_offset,
263
+ const uint8_t* right_bitmap, int64_t right_offset, int64_t length)
264
+ : left_bitmap_(util::MakeNonNull(left_bitmap) + left_offset / 8),
265
+ left_offset_(left_offset % 8),
266
+ right_bitmap_(util::MakeNonNull(right_bitmap) + right_offset / 8),
267
+ right_offset_(right_offset % 8),
268
+ bits_remaining_(length) {}
269
+
270
+ /// \brief Return the popcount of the bitwise-and of the next run of
271
+ /// available bits, up to 64. The returned pair contains the size of run and
272
+ /// the number of true values. The last block will have a length less than 64
273
+ /// if the bitmap length is not a multiple of 64, and will return 0-length
274
+ /// blocks in subsequent invocations.
275
+ BitBlockCount NextAndWord() { return NextWord<detail::BitBlockAnd>(); }
276
+
277
+ /// \brief Computes "x & ~y" block for each available run of bits.
278
+ BitBlockCount NextAndNotWord() { return NextWord<detail::BitBlockAndNot>(); }
279
+
280
+ /// \brief Computes "x | y" block for each available run of bits.
281
+ BitBlockCount NextOrWord() { return NextWord<detail::BitBlockOr>(); }
282
+
283
+ /// \brief Computes "x | ~y" block for each available run of bits.
284
+ BitBlockCount NextOrNotWord() { return NextWord<detail::BitBlockOrNot>(); }
285
+
286
+ private:
287
+ template <class Op>
288
+ BitBlockCount NextWord() {
289
+ using detail::LoadWord;
290
+ using detail::ShiftWord;
291
+
292
+ if (!bits_remaining_) {
293
+ return {0, 0};
294
+ }
295
+ // When the offset is > 0, we need there to be a word beyond the last aligned
296
+ // word in the bitmap for the bit shifting logic.
297
+ constexpr int64_t kWordBits = BitBlockCounter::kWordBits;
298
+ const int64_t bits_required_to_use_words =
299
+ std::max(left_offset_ == 0 ? 64 : 64 + (64 - left_offset_),
300
+ right_offset_ == 0 ? 64 : 64 + (64 - right_offset_));
301
+ if (bits_remaining_ < bits_required_to_use_words) {
302
+ const int16_t run_length =
303
+ static_cast<int16_t>(std::min(bits_remaining_, kWordBits));
304
+ int16_t popcount = 0;
305
+ for (int64_t i = 0; i < run_length; ++i) {
306
+ if (Op::Call(bit_util::GetBit(left_bitmap_, left_offset_ + i),
307
+ bit_util::GetBit(right_bitmap_, right_offset_ + i))) {
308
+ ++popcount;
309
+ }
310
+ }
311
+ // This code path should trigger _at most_ 2 times. In the "two times"
312
+ // case, the first time the run length will be a multiple of 8.
313
+ left_bitmap_ += run_length / 8;
314
+ right_bitmap_ += run_length / 8;
315
+ bits_remaining_ -= run_length;
316
+ return {run_length, popcount};
317
+ }
318
+
319
+ int64_t popcount = 0;
320
+ if (left_offset_ == 0 && right_offset_ == 0) {
321
+ popcount =
322
+ bit_util::PopCount(Op::Call(LoadWord(left_bitmap_), LoadWord(right_bitmap_)));
323
+ } else {
324
+ auto left_word =
325
+ ShiftWord(LoadWord(left_bitmap_), LoadWord(left_bitmap_ + 8), left_offset_);
326
+ auto right_word =
327
+ ShiftWord(LoadWord(right_bitmap_), LoadWord(right_bitmap_ + 8), right_offset_);
328
+ popcount = bit_util::PopCount(Op::Call(left_word, right_word));
329
+ }
330
+ left_bitmap_ += kWordBits / 8;
331
+ right_bitmap_ += kWordBits / 8;
332
+ bits_remaining_ -= kWordBits;
333
+ return {64, static_cast<int16_t>(popcount)};
334
+ }
335
+
336
+ const uint8_t* left_bitmap_;
337
+ int64_t left_offset_;
338
+ const uint8_t* right_bitmap_;
339
+ int64_t right_offset_;
340
+ int64_t bits_remaining_;
341
+ };
342
+
343
+ class ARROW_EXPORT OptionalBinaryBitBlockCounter {
344
+ public:
345
+ // Any bitmap may be NULLPTR
346
+ OptionalBinaryBitBlockCounter(const uint8_t* left_bitmap, int64_t left_offset,
347
+ const uint8_t* right_bitmap, int64_t right_offset,
348
+ int64_t length);
349
+
350
+ // Any bitmap may be null
351
+ OptionalBinaryBitBlockCounter(const std::shared_ptr<Buffer>& left_bitmap,
352
+ int64_t left_offset,
353
+ const std::shared_ptr<Buffer>& right_bitmap,
354
+ int64_t right_offset, int64_t length);
355
+
356
+ BitBlockCount NextAndBlock() {
357
+ static constexpr int64_t kMaxBlockSize = std::numeric_limits<int16_t>::max();
358
+ switch (has_bitmap_) {
359
+ case HasBitmap::BOTH: {
360
+ BitBlockCount block = binary_counter_.NextAndWord();
361
+ position_ += block.length;
362
+ return block;
363
+ }
364
+ case HasBitmap::ONE: {
365
+ BitBlockCount block = unary_counter_.NextWord();
366
+ position_ += block.length;
367
+ return block;
368
+ }
369
+ case HasBitmap::NONE:
370
+ default: {
371
+ const int16_t block_size =
372
+ static_cast<int16_t>(std::min(kMaxBlockSize, length_ - position_));
373
+ position_ += block_size;
374
+ // All values are non-null
375
+ return {block_size, block_size};
376
+ }
377
+ }
378
+ }
379
+
380
+ BitBlockCount NextOrNotBlock() {
381
+ static constexpr int64_t kMaxBlockSize = std::numeric_limits<int16_t>::max();
382
+ switch (has_bitmap_) {
383
+ case HasBitmap::BOTH: {
384
+ BitBlockCount block = binary_counter_.NextOrNotWord();
385
+ position_ += block.length;
386
+ return block;
387
+ }
388
+ case HasBitmap::ONE: {
389
+ BitBlockCount block = unary_counter_.NextWord();
390
+ position_ += block.length;
391
+ return block;
392
+ }
393
+ case HasBitmap::NONE:
394
+ default: {
395
+ const int16_t block_size =
396
+ static_cast<int16_t>(std::min(kMaxBlockSize, length_ - position_));
397
+ position_ += block_size;
398
+ // All values are non-null
399
+ return {block_size, block_size};
400
+ }
401
+ }
402
+ }
403
+
404
+ private:
405
+ enum class HasBitmap : int { BOTH, ONE, NONE };
406
+
407
+ const HasBitmap has_bitmap_;
408
+ int64_t position_;
409
+ int64_t length_;
410
+ BitBlockCounter unary_counter_;
411
+ BinaryBitBlockCounter binary_counter_;
412
+
413
+ static HasBitmap HasBitmapFromBitmaps(bool has_left, bool has_right) {
414
+ switch (static_cast<int>(has_left) + static_cast<int>(has_right)) {
415
+ case 0:
416
+ return HasBitmap::NONE;
417
+ case 1:
418
+ return HasBitmap::ONE;
419
+ default: // 2
420
+ return HasBitmap::BOTH;
421
+ }
422
+ }
423
+ };
424
+
425
+ // Functional-style bit block visitors.
426
+
427
+ template <typename VisitNotNull, typename VisitNull>
428
+ static Status VisitBitBlocks(const uint8_t* bitmap, int64_t offset, int64_t length,
429
+ VisitNotNull&& visit_not_null, VisitNull&& visit_null) {
430
+ internal::OptionalBitBlockCounter bit_counter(bitmap, offset, length);
431
+ int64_t position = 0;
432
+ while (position < length) {
433
+ internal::BitBlockCount block = bit_counter.NextBlock();
434
+ if (block.AllSet()) {
435
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
436
+ ARROW_RETURN_NOT_OK(visit_not_null(position));
437
+ }
438
+ } else if (block.NoneSet()) {
439
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
440
+ ARROW_RETURN_NOT_OK(visit_null());
441
+ }
442
+ } else {
443
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
444
+ if (bit_util::GetBit(bitmap, offset + position)) {
445
+ ARROW_RETURN_NOT_OK(visit_not_null(position));
446
+ } else {
447
+ ARROW_RETURN_NOT_OK(visit_null());
448
+ }
449
+ }
450
+ }
451
+ }
452
+ return Status::OK();
453
+ }
454
+
455
+ template <typename VisitNotNull, typename VisitNull>
456
+ static void VisitBitBlocksVoid(const uint8_t* bitmap, int64_t offset, int64_t length,
457
+ VisitNotNull&& visit_not_null, VisitNull&& visit_null) {
458
+ internal::OptionalBitBlockCounter bit_counter(bitmap, offset, length);
459
+ int64_t position = 0;
460
+ while (position < length) {
461
+ internal::BitBlockCount block = bit_counter.NextBlock();
462
+ if (block.AllSet()) {
463
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
464
+ visit_not_null(position);
465
+ }
466
+ } else if (block.NoneSet()) {
467
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
468
+ visit_null();
469
+ }
470
+ } else {
471
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
472
+ if (bit_util::GetBit(bitmap, offset + position)) {
473
+ visit_not_null(position);
474
+ } else {
475
+ visit_null();
476
+ }
477
+ }
478
+ }
479
+ }
480
+ }
481
+
482
+ template <typename VisitNotNull, typename VisitNull>
483
+ static Status VisitTwoBitBlocks(const uint8_t* left_bitmap, int64_t left_offset,
484
+ const uint8_t* right_bitmap, int64_t right_offset,
485
+ int64_t length, VisitNotNull&& visit_not_null,
486
+ VisitNull&& visit_null) {
487
+ if (left_bitmap == NULLPTR || right_bitmap == NULLPTR) {
488
+ // At most one bitmap is present
489
+ if (left_bitmap == NULLPTR) {
490
+ return VisitBitBlocks(right_bitmap, right_offset, length,
491
+ std::forward<VisitNotNull>(visit_not_null),
492
+ std::forward<VisitNull>(visit_null));
493
+ } else {
494
+ return VisitBitBlocks(left_bitmap, left_offset, length,
495
+ std::forward<VisitNotNull>(visit_not_null),
496
+ std::forward<VisitNull>(visit_null));
497
+ }
498
+ }
499
+ BinaryBitBlockCounter bit_counter(left_bitmap, left_offset, right_bitmap, right_offset,
500
+ length);
501
+ int64_t position = 0;
502
+ while (position < length) {
503
+ BitBlockCount block = bit_counter.NextAndWord();
504
+ if (block.AllSet()) {
505
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
506
+ ARROW_RETURN_NOT_OK(visit_not_null(position));
507
+ }
508
+ } else if (block.NoneSet()) {
509
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
510
+ ARROW_RETURN_NOT_OK(visit_null());
511
+ }
512
+ } else {
513
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
514
+ if (bit_util::GetBit(left_bitmap, left_offset + position) &&
515
+ bit_util::GetBit(right_bitmap, right_offset + position)) {
516
+ ARROW_RETURN_NOT_OK(visit_not_null(position));
517
+ } else {
518
+ ARROW_RETURN_NOT_OK(visit_null());
519
+ }
520
+ }
521
+ }
522
+ }
523
+ return Status::OK();
524
+ }
525
+
526
+ template <typename VisitNotNull, typename VisitNull>
527
+ static void VisitTwoBitBlocksVoid(const uint8_t* left_bitmap, int64_t left_offset,
528
+ const uint8_t* right_bitmap, int64_t right_offset,
529
+ int64_t length, VisitNotNull&& visit_not_null,
530
+ VisitNull&& visit_null) {
531
+ if (left_bitmap == NULLPTR || right_bitmap == NULLPTR) {
532
+ // At most one bitmap is present
533
+ if (left_bitmap == NULLPTR) {
534
+ return VisitBitBlocksVoid(right_bitmap, right_offset, length,
535
+ std::forward<VisitNotNull>(visit_not_null),
536
+ std::forward<VisitNull>(visit_null));
537
+ } else {
538
+ return VisitBitBlocksVoid(left_bitmap, left_offset, length,
539
+ std::forward<VisitNotNull>(visit_not_null),
540
+ std::forward<VisitNull>(visit_null));
541
+ }
542
+ }
543
+ BinaryBitBlockCounter bit_counter(left_bitmap, left_offset, right_bitmap, right_offset,
544
+ length);
545
+ int64_t position = 0;
546
+ while (position < length) {
547
+ BitBlockCount block = bit_counter.NextAndWord();
548
+ if (block.AllSet()) {
549
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
550
+ visit_not_null(position);
551
+ }
552
+ } else if (block.NoneSet()) {
553
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
554
+ visit_null();
555
+ }
556
+ } else {
557
+ for (int64_t i = 0; i < block.length; ++i, ++position) {
558
+ if (bit_util::GetBit(left_bitmap, left_offset + position) &&
559
+ bit_util::GetBit(right_bitmap, right_offset + position)) {
560
+ visit_not_null(position);
561
+ } else {
562
+ visit_null();
563
+ }
564
+ }
565
+ }
566
+ }
567
+ }
568
+
569
+ } // namespace internal
570
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_stream_utils.h ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // From Apache Impala (incubating) as of 2016-01-29
19
+
20
+ #pragma once
21
+
22
+ #include <algorithm>
23
+ #include <cstdint>
24
+ #include <cstring>
25
+
26
+ #include "arrow/util/bit_util.h"
27
+ #include "arrow/util/bpacking.h"
28
+ #include "arrow/util/logging.h"
29
+ #include "arrow/util/macros.h"
30
+ #include "arrow/util/ubsan.h"
31
+
32
+ namespace arrow {
33
+ namespace bit_util {
34
+
35
+ /// Utility class to write bit/byte streams. This class can write data to either be
36
+ /// bit packed or byte aligned (and a single stream that has a mix of both).
37
+ /// This class does not allocate memory.
38
+ class BitWriter {
39
+ public:
40
+ /// buffer: buffer to write bits to. Buffer should be preallocated with
41
+ /// 'buffer_len' bytes.
42
+ BitWriter(uint8_t* buffer, int buffer_len) : buffer_(buffer), max_bytes_(buffer_len) {
43
+ Clear();
44
+ }
45
+
46
+ void Clear() {
47
+ buffered_values_ = 0;
48
+ byte_offset_ = 0;
49
+ bit_offset_ = 0;
50
+ }
51
+
52
+ /// The number of current bytes written, including the current byte (i.e. may include a
53
+ /// fraction of a byte). Includes buffered values.
54
+ int bytes_written() const {
55
+ return byte_offset_ + static_cast<int>(bit_util::BytesForBits(bit_offset_));
56
+ }
57
+ uint8_t* buffer() const { return buffer_; }
58
+ int buffer_len() const { return max_bytes_; }
59
+
60
+ /// Writes a value to buffered_values_, flushing to buffer_ if necessary. This is bit
61
+ /// packed. Returns false if there was not enough space. num_bits must be <= 32.
62
+ bool PutValue(uint64_t v, int num_bits);
63
+
64
+ /// Writes v to the next aligned byte using num_bytes. If T is larger than
65
+ /// num_bytes, the extra high-order bytes will be ignored. Returns false if
66
+ /// there was not enough space.
67
+ /// Assume the v is stored in buffer_ as a little-endian format
68
+ template <typename T>
69
+ bool PutAligned(T v, int num_bytes);
70
+
71
+ /// Write a Vlq encoded int to the buffer. Returns false if there was not enough
72
+ /// room. The value is written byte aligned.
73
+ /// For more details on vlq:
74
+ /// en.wikipedia.org/wiki/Variable-length_quantity
75
+ bool PutVlqInt(uint32_t v);
76
+
77
+ // Writes an int zigzag encoded.
78
+ bool PutZigZagVlqInt(int32_t v);
79
+
80
+ /// Write a Vlq encoded int64 to the buffer. Returns false if there was not enough
81
+ /// room. The value is written byte aligned.
82
+ /// For more details on vlq:
83
+ /// en.wikipedia.org/wiki/Variable-length_quantity
84
+ bool PutVlqInt(uint64_t v);
85
+
86
+ // Writes an int64 zigzag encoded.
87
+ bool PutZigZagVlqInt(int64_t v);
88
+
89
+ /// Get a pointer to the next aligned byte and advance the underlying buffer
90
+ /// by num_bytes.
91
+ /// Returns NULL if there was not enough space.
92
+ uint8_t* GetNextBytePtr(int num_bytes = 1);
93
+
94
+ /// Flushes all buffered values to the buffer. Call this when done writing to
95
+ /// the buffer. If 'align' is true, buffered_values_ is reset and any future
96
+ /// writes will be written to the next byte boundary.
97
+ void Flush(bool align = false);
98
+
99
+ private:
100
+ uint8_t* buffer_;
101
+ int max_bytes_;
102
+
103
+ /// Bit-packed values are initially written to this variable before being memcpy'd to
104
+ /// buffer_. This is faster than writing values byte by byte directly to buffer_.
105
+ uint64_t buffered_values_;
106
+
107
+ int byte_offset_; // Offset in buffer_
108
+ int bit_offset_; // Offset in buffered_values_
109
+ };
110
+
111
+ namespace detail {
112
+
113
+ inline uint64_t ReadLittleEndianWord(const uint8_t* buffer, int bytes_remaining) {
114
+ uint64_t le_value = 0;
115
+ if (ARROW_PREDICT_TRUE(bytes_remaining >= 8)) {
116
+ memcpy(&le_value, buffer, 8);
117
+ } else {
118
+ memcpy(&le_value, buffer, bytes_remaining);
119
+ }
120
+ return arrow::bit_util::FromLittleEndian(le_value);
121
+ }
122
+
123
+ } // namespace detail
124
+
125
+ /// Utility class to read bit/byte stream. This class can read bits or bytes
126
+ /// that are either byte aligned or not. It also has utilities to read multiple
127
+ /// bytes in one read (e.g. encoded int).
128
+ class BitReader {
129
+ public:
130
+ BitReader() = default;
131
+
132
+ /// 'buffer' is the buffer to read from. The buffer's length is 'buffer_len'.
133
+ BitReader(const uint8_t* buffer, int buffer_len) : BitReader() {
134
+ Reset(buffer, buffer_len);
135
+ }
136
+
137
+ void Reset(const uint8_t* buffer, int buffer_len) {
138
+ buffer_ = buffer;
139
+ max_bytes_ = buffer_len;
140
+ byte_offset_ = 0;
141
+ bit_offset_ = 0;
142
+ buffered_values_ =
143
+ detail::ReadLittleEndianWord(buffer_ + byte_offset_, max_bytes_ - byte_offset_);
144
+ }
145
+
146
+ /// Gets the next value from the buffer. Returns true if 'v' could be read or false if
147
+ /// there are not enough bytes left.
148
+ template <typename T>
149
+ bool GetValue(int num_bits, T* v);
150
+
151
+ /// Get a number of values from the buffer. Return the number of values actually read.
152
+ template <typename T>
153
+ int GetBatch(int num_bits, T* v, int batch_size);
154
+
155
+ /// Reads a 'num_bytes'-sized value from the buffer and stores it in 'v'. T
156
+ /// needs to be a little-endian native type and big enough to store
157
+ /// 'num_bytes'. The value is assumed to be byte-aligned so the stream will
158
+ /// be advanced to the start of the next byte before 'v' is read. Returns
159
+ /// false if there are not enough bytes left.
160
+ /// Assume the v was stored in buffer_ as a little-endian format
161
+ template <typename T>
162
+ bool GetAligned(int num_bytes, T* v);
163
+
164
+ /// Advances the stream by a number of bits. Returns true if succeed or false if there
165
+ /// are not enough bits left.
166
+ bool Advance(int64_t num_bits);
167
+
168
+ /// Reads a vlq encoded int from the stream. The encoded int must start at
169
+ /// the beginning of a byte. Return false if there were not enough bytes in
170
+ /// the buffer.
171
+ bool GetVlqInt(uint32_t* v);
172
+
173
+ // Reads a zigzag encoded int `into` v.
174
+ bool GetZigZagVlqInt(int32_t* v);
175
+
176
+ /// Reads a vlq encoded int64 from the stream. The encoded int must start at
177
+ /// the beginning of a byte. Return false if there were not enough bytes in
178
+ /// the buffer.
179
+ bool GetVlqInt(uint64_t* v);
180
+
181
+ // Reads a zigzag encoded int64 `into` v.
182
+ bool GetZigZagVlqInt(int64_t* v);
183
+
184
+ /// Returns the number of bytes left in the stream, not including the current
185
+ /// byte (i.e., there may be an additional fraction of a byte).
186
+ int bytes_left() {
187
+ return max_bytes_ -
188
+ (byte_offset_ + static_cast<int>(bit_util::BytesForBits(bit_offset_)));
189
+ }
190
+
191
+ /// Maximum byte length of a vlq encoded int
192
+ static constexpr int kMaxVlqByteLength = 5;
193
+
194
+ /// Maximum byte length of a vlq encoded int64
195
+ static constexpr int kMaxVlqByteLengthForInt64 = 10;
196
+
197
+ private:
198
+ const uint8_t* buffer_;
199
+ int max_bytes_;
200
+
201
+ /// Bytes are memcpy'd from buffer_ and values are read from this variable. This is
202
+ /// faster than reading values byte by byte directly from buffer_.
203
+ uint64_t buffered_values_;
204
+
205
+ int byte_offset_; // Offset in buffer_
206
+ int bit_offset_; // Offset in buffered_values_
207
+ };
208
+
209
+ inline bool BitWriter::PutValue(uint64_t v, int num_bits) {
210
+ DCHECK_LE(num_bits, 64);
211
+ if (num_bits < 64) {
212
+ DCHECK_EQ(v >> num_bits, 0) << "v = " << v << ", num_bits = " << num_bits;
213
+ }
214
+
215
+ if (ARROW_PREDICT_FALSE(byte_offset_ * 8 + bit_offset_ + num_bits > max_bytes_ * 8))
216
+ return false;
217
+
218
+ buffered_values_ |= v << bit_offset_;
219
+ bit_offset_ += num_bits;
220
+
221
+ if (ARROW_PREDICT_FALSE(bit_offset_ >= 64)) {
222
+ // Flush buffered_values_ and write out bits of v that did not fit
223
+ buffered_values_ = arrow::bit_util::ToLittleEndian(buffered_values_);
224
+ memcpy(buffer_ + byte_offset_, &buffered_values_, 8);
225
+ buffered_values_ = 0;
226
+ byte_offset_ += 8;
227
+ bit_offset_ -= 64;
228
+ buffered_values_ =
229
+ (num_bits - bit_offset_ == 64) ? 0 : (v >> (num_bits - bit_offset_));
230
+ }
231
+ DCHECK_LT(bit_offset_, 64);
232
+ return true;
233
+ }
234
+
235
+ inline void BitWriter::Flush(bool align) {
236
+ int num_bytes = static_cast<int>(bit_util::BytesForBits(bit_offset_));
237
+ DCHECK_LE(byte_offset_ + num_bytes, max_bytes_);
238
+ auto buffered_values = arrow::bit_util::ToLittleEndian(buffered_values_);
239
+ memcpy(buffer_ + byte_offset_, &buffered_values, num_bytes);
240
+
241
+ if (align) {
242
+ buffered_values_ = 0;
243
+ byte_offset_ += num_bytes;
244
+ bit_offset_ = 0;
245
+ }
246
+ }
247
+
248
+ inline uint8_t* BitWriter::GetNextBytePtr(int num_bytes) {
249
+ Flush(/* align */ true);
250
+ DCHECK_LE(byte_offset_, max_bytes_);
251
+ if (byte_offset_ + num_bytes > max_bytes_) return NULL;
252
+ uint8_t* ptr = buffer_ + byte_offset_;
253
+ byte_offset_ += num_bytes;
254
+ return ptr;
255
+ }
256
+
257
+ template <typename T>
258
+ inline bool BitWriter::PutAligned(T val, int num_bytes) {
259
+ uint8_t* ptr = GetNextBytePtr(num_bytes);
260
+ if (ptr == NULL) return false;
261
+ val = arrow::bit_util::ToLittleEndian(val);
262
+ memcpy(ptr, &val, num_bytes);
263
+ return true;
264
+ }
265
+
266
+ namespace detail {
267
+
268
+ template <typename T>
269
+ inline void GetValue_(int num_bits, T* v, int max_bytes, const uint8_t* buffer,
270
+ int* bit_offset, int* byte_offset, uint64_t* buffered_values) {
271
+ #ifdef _MSC_VER
272
+ #pragma warning(push)
273
+ #pragma warning(disable : 4800)
274
+ #endif
275
+ *v = static_cast<T>(bit_util::TrailingBits(*buffered_values, *bit_offset + num_bits) >>
276
+ *bit_offset);
277
+ #ifdef _MSC_VER
278
+ #pragma warning(pop)
279
+ #endif
280
+ *bit_offset += num_bits;
281
+ if (*bit_offset >= 64) {
282
+ *byte_offset += 8;
283
+ *bit_offset -= 64;
284
+
285
+ *buffered_values =
286
+ detail::ReadLittleEndianWord(buffer + *byte_offset, max_bytes - *byte_offset);
287
+ #ifdef _MSC_VER
288
+ #pragma warning(push)
289
+ #pragma warning(disable : 4800 4805)
290
+ #endif
291
+ // Read bits of v that crossed into new buffered_values_
292
+ if (ARROW_PREDICT_TRUE(num_bits - *bit_offset < static_cast<int>(8 * sizeof(T)))) {
293
+ // if shift exponent(num_bits - *bit_offset) is not less than sizeof(T), *v will not
294
+ // change and the following code may cause a runtime error that the shift exponent
295
+ // is too large
296
+ *v = *v | static_cast<T>(bit_util::TrailingBits(*buffered_values, *bit_offset)
297
+ << (num_bits - *bit_offset));
298
+ }
299
+ #ifdef _MSC_VER
300
+ #pragma warning(pop)
301
+ #endif
302
+ DCHECK_LE(*bit_offset, 64);
303
+ }
304
+ }
305
+
306
+ } // namespace detail
307
+
308
+ template <typename T>
309
+ inline bool BitReader::GetValue(int num_bits, T* v) {
310
+ return GetBatch(num_bits, v, 1) == 1;
311
+ }
312
+
313
+ template <typename T>
314
+ inline int BitReader::GetBatch(int num_bits, T* v, int batch_size) {
315
+ DCHECK(buffer_ != NULL);
316
+ DCHECK_LE(num_bits, static_cast<int>(sizeof(T) * 8)) << "num_bits: " << num_bits;
317
+
318
+ int bit_offset = bit_offset_;
319
+ int byte_offset = byte_offset_;
320
+ uint64_t buffered_values = buffered_values_;
321
+ int max_bytes = max_bytes_;
322
+ const uint8_t* buffer = buffer_;
323
+
324
+ const int64_t needed_bits = num_bits * static_cast<int64_t>(batch_size);
325
+ constexpr uint64_t kBitsPerByte = 8;
326
+ const int64_t remaining_bits =
327
+ static_cast<int64_t>(max_bytes - byte_offset) * kBitsPerByte - bit_offset;
328
+ if (remaining_bits < needed_bits) {
329
+ batch_size = static_cast<int>(remaining_bits / num_bits);
330
+ }
331
+
332
+ int i = 0;
333
+ if (ARROW_PREDICT_FALSE(bit_offset != 0)) {
334
+ for (; i < batch_size && bit_offset != 0; ++i) {
335
+ detail::GetValue_(num_bits, &v[i], max_bytes, buffer, &bit_offset, &byte_offset,
336
+ &buffered_values);
337
+ }
338
+ }
339
+
340
+ if (sizeof(T) == 4) {
341
+ int num_unpacked =
342
+ internal::unpack32(reinterpret_cast<const uint32_t*>(buffer + byte_offset),
343
+ reinterpret_cast<uint32_t*>(v + i), batch_size - i, num_bits);
344
+ i += num_unpacked;
345
+ byte_offset += num_unpacked * num_bits / 8;
346
+ } else if (sizeof(T) == 8 && num_bits > 32) {
347
+ // Use unpack64 only if num_bits is larger than 32
348
+ // TODO (ARROW-13677): improve the performance of internal::unpack64
349
+ // and remove the restriction of num_bits
350
+ int num_unpacked =
351
+ internal::unpack64(buffer + byte_offset, reinterpret_cast<uint64_t*>(v + i),
352
+ batch_size - i, num_bits);
353
+ i += num_unpacked;
354
+ byte_offset += num_unpacked * num_bits / 8;
355
+ } else {
356
+ // TODO: revisit this limit if necessary
357
+ DCHECK_LE(num_bits, 32);
358
+ const int buffer_size = 1024;
359
+ uint32_t unpack_buffer[buffer_size];
360
+ while (i < batch_size) {
361
+ int unpack_size = std::min(buffer_size, batch_size - i);
362
+ int num_unpacked =
363
+ internal::unpack32(reinterpret_cast<const uint32_t*>(buffer + byte_offset),
364
+ unpack_buffer, unpack_size, num_bits);
365
+ if (num_unpacked == 0) {
366
+ break;
367
+ }
368
+ for (int k = 0; k < num_unpacked; ++k) {
369
+ #ifdef _MSC_VER
370
+ #pragma warning(push)
371
+ #pragma warning(disable : 4800)
372
+ #endif
373
+ v[i + k] = static_cast<T>(unpack_buffer[k]);
374
+ #ifdef _MSC_VER
375
+ #pragma warning(pop)
376
+ #endif
377
+ }
378
+ i += num_unpacked;
379
+ byte_offset += num_unpacked * num_bits / 8;
380
+ }
381
+ }
382
+
383
+ buffered_values =
384
+ detail::ReadLittleEndianWord(buffer + byte_offset, max_bytes - byte_offset);
385
+
386
+ for (; i < batch_size; ++i) {
387
+ detail::GetValue_(num_bits, &v[i], max_bytes, buffer, &bit_offset, &byte_offset,
388
+ &buffered_values);
389
+ }
390
+
391
+ bit_offset_ = bit_offset;
392
+ byte_offset_ = byte_offset;
393
+ buffered_values_ = buffered_values;
394
+
395
+ return batch_size;
396
+ }
397
+
398
+ template <typename T>
399
+ inline bool BitReader::GetAligned(int num_bytes, T* v) {
400
+ if (ARROW_PREDICT_FALSE(num_bytes > static_cast<int>(sizeof(T)))) {
401
+ return false;
402
+ }
403
+
404
+ int bytes_read = static_cast<int>(bit_util::BytesForBits(bit_offset_));
405
+ if (ARROW_PREDICT_FALSE(byte_offset_ + bytes_read + num_bytes > max_bytes_)) {
406
+ return false;
407
+ }
408
+
409
+ // Advance byte_offset to next unread byte and read num_bytes
410
+ byte_offset_ += bytes_read;
411
+ if constexpr (std::is_same_v<T, bool>) {
412
+ // ARROW-18031: if we're trying to get an aligned bool, just check
413
+ // the LSB of the next byte and move on. If we memcpy + FromLittleEndian
414
+ // as usual, we have potential undefined behavior for bools if the value
415
+ // isn't 0 or 1
416
+ *v = *(buffer_ + byte_offset_) & 1;
417
+ } else {
418
+ memcpy(v, buffer_ + byte_offset_, num_bytes);
419
+ *v = arrow::bit_util::FromLittleEndian(*v);
420
+ }
421
+ byte_offset_ += num_bytes;
422
+
423
+ bit_offset_ = 0;
424
+ buffered_values_ =
425
+ detail::ReadLittleEndianWord(buffer_ + byte_offset_, max_bytes_ - byte_offset_);
426
+ return true;
427
+ }
428
+
429
+ inline bool BitReader::Advance(int64_t num_bits) {
430
+ int64_t bits_required = bit_offset_ + num_bits;
431
+ int64_t bytes_required = bit_util::BytesForBits(bits_required);
432
+ if (ARROW_PREDICT_FALSE(bytes_required > max_bytes_ - byte_offset_)) {
433
+ return false;
434
+ }
435
+ byte_offset_ += static_cast<int>(bits_required >> 3);
436
+ bit_offset_ = static_cast<int>(bits_required & 7);
437
+ buffered_values_ =
438
+ detail::ReadLittleEndianWord(buffer_ + byte_offset_, max_bytes_ - byte_offset_);
439
+ return true;
440
+ }
441
+
442
+ inline bool BitWriter::PutVlqInt(uint32_t v) {
443
+ bool result = true;
444
+ while ((v & 0xFFFFFF80UL) != 0UL) {
445
+ result &= PutAligned<uint8_t>(static_cast<uint8_t>((v & 0x7F) | 0x80), 1);
446
+ v >>= 7;
447
+ }
448
+ result &= PutAligned<uint8_t>(static_cast<uint8_t>(v & 0x7F), 1);
449
+ return result;
450
+ }
451
+
452
+ inline bool BitReader::GetVlqInt(uint32_t* v) {
453
+ uint32_t tmp = 0;
454
+
455
+ for (int i = 0; i < kMaxVlqByteLength; i++) {
456
+ uint8_t byte = 0;
457
+ if (ARROW_PREDICT_FALSE(!GetAligned<uint8_t>(1, &byte))) {
458
+ return false;
459
+ }
460
+ tmp |= static_cast<uint32_t>(byte & 0x7F) << (7 * i);
461
+
462
+ if ((byte & 0x80) == 0) {
463
+ *v = tmp;
464
+ return true;
465
+ }
466
+ }
467
+
468
+ return false;
469
+ }
470
+
471
+ inline bool BitWriter::PutZigZagVlqInt(int32_t v) {
472
+ uint32_t u_v = ::arrow::util::SafeCopy<uint32_t>(v);
473
+ u_v = (u_v << 1) ^ static_cast<uint32_t>(v >> 31);
474
+ return PutVlqInt(u_v);
475
+ }
476
+
477
+ inline bool BitReader::GetZigZagVlqInt(int32_t* v) {
478
+ uint32_t u;
479
+ if (!GetVlqInt(&u)) return false;
480
+ u = (u >> 1) ^ (~(u & 1) + 1);
481
+ *v = ::arrow::util::SafeCopy<int32_t>(u);
482
+ return true;
483
+ }
484
+
485
+ inline bool BitWriter::PutVlqInt(uint64_t v) {
486
+ bool result = true;
487
+ while ((v & 0xFFFFFFFFFFFFFF80ULL) != 0ULL) {
488
+ result &= PutAligned<uint8_t>(static_cast<uint8_t>((v & 0x7F) | 0x80), 1);
489
+ v >>= 7;
490
+ }
491
+ result &= PutAligned<uint8_t>(static_cast<uint8_t>(v & 0x7F), 1);
492
+ return result;
493
+ }
494
+
495
+ inline bool BitReader::GetVlqInt(uint64_t* v) {
496
+ uint64_t tmp = 0;
497
+
498
+ for (int i = 0; i < kMaxVlqByteLengthForInt64; i++) {
499
+ uint8_t byte = 0;
500
+ if (ARROW_PREDICT_FALSE(!GetAligned<uint8_t>(1, &byte))) {
501
+ return false;
502
+ }
503
+ tmp |= static_cast<uint64_t>(byte & 0x7F) << (7 * i);
504
+
505
+ if ((byte & 0x80) == 0) {
506
+ *v = tmp;
507
+ return true;
508
+ }
509
+ }
510
+
511
+ return false;
512
+ }
513
+
514
+ inline bool BitWriter::PutZigZagVlqInt(int64_t v) {
515
+ uint64_t u_v = ::arrow::util::SafeCopy<uint64_t>(v);
516
+ u_v = (u_v << 1) ^ static_cast<uint64_t>(v >> 63);
517
+ return PutVlqInt(u_v);
518
+ }
519
+
520
+ inline bool BitReader::GetZigZagVlqInt(int64_t* v) {
521
+ uint64_t u;
522
+ if (!GetVlqInt(&u)) return false;
523
+ u = (u >> 1) ^ (~(u & 1) + 1);
524
+ *v = ::arrow::util::SafeCopy<int64_t>(u);
525
+ return true;
526
+ }
527
+
528
+ } // namespace bit_util
529
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_reader.h ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cassert>
21
+ #include <cstdint>
22
+ #include <cstring>
23
+
24
+ #include "arrow/buffer.h"
25
+ #include "arrow/util/bit_util.h"
26
+ #include "arrow/util/endian.h"
27
+ #include "arrow/util/macros.h"
28
+
29
+ namespace arrow {
30
+ namespace internal {
31
+
32
+ class BitmapReader {
33
+ public:
34
+ BitmapReader(const uint8_t* bitmap, int64_t start_offset, int64_t length)
35
+ : bitmap_(bitmap), position_(0), length_(length) {
36
+ current_byte_ = 0;
37
+ byte_offset_ = start_offset / 8;
38
+ bit_offset_ = start_offset % 8;
39
+ if (length > 0) {
40
+ current_byte_ = bitmap[byte_offset_];
41
+ }
42
+ }
43
+
44
+ bool IsSet() const { return (current_byte_ & (1 << bit_offset_)) != 0; }
45
+
46
+ bool IsNotSet() const { return (current_byte_ & (1 << bit_offset_)) == 0; }
47
+
48
+ void Next() {
49
+ ++bit_offset_;
50
+ ++position_;
51
+ if (ARROW_PREDICT_FALSE(bit_offset_ == 8)) {
52
+ bit_offset_ = 0;
53
+ ++byte_offset_;
54
+ if (ARROW_PREDICT_TRUE(position_ < length_)) {
55
+ current_byte_ = bitmap_[byte_offset_];
56
+ }
57
+ }
58
+ }
59
+
60
+ int64_t position() const { return position_; }
61
+
62
+ int64_t length() const { return length_; }
63
+
64
+ private:
65
+ const uint8_t* bitmap_;
66
+ int64_t position_;
67
+ int64_t length_;
68
+
69
+ uint8_t current_byte_;
70
+ int64_t byte_offset_;
71
+ int64_t bit_offset_;
72
+ };
73
+
74
+ // XXX Cannot name it BitmapWordReader because the name is already used
75
+ // in bitmap_ops.cc
76
+
77
+ class BitmapUInt64Reader {
78
+ public:
79
+ BitmapUInt64Reader(const uint8_t* bitmap, int64_t start_offset, int64_t length)
80
+ : bitmap_(util::MakeNonNull(bitmap) + start_offset / 8),
81
+ num_carry_bits_(8 - start_offset % 8),
82
+ length_(length),
83
+ remaining_length_(length_),
84
+ carry_bits_(0) {
85
+ if (length_ > 0) {
86
+ // Load carry bits from the first byte's MSBs
87
+ if (length_ >= num_carry_bits_) {
88
+ carry_bits_ =
89
+ LoadPartialWord(static_cast<int8_t>(8 - num_carry_bits_), num_carry_bits_);
90
+ } else {
91
+ carry_bits_ = LoadPartialWord(static_cast<int8_t>(8 - num_carry_bits_), length_);
92
+ }
93
+ }
94
+ }
95
+
96
+ uint64_t NextWord() {
97
+ if (ARROW_PREDICT_TRUE(remaining_length_ >= 64 + num_carry_bits_)) {
98
+ // We can load a full word
99
+ uint64_t next_word = LoadFullWord();
100
+ // Carry bits come first, then the (64 - num_carry_bits_) LSBs from next_word
101
+ uint64_t word = carry_bits_ | (next_word << num_carry_bits_);
102
+ carry_bits_ = next_word >> (64 - num_carry_bits_);
103
+ remaining_length_ -= 64;
104
+ return word;
105
+ } else if (remaining_length_ > num_carry_bits_) {
106
+ // We can load a partial word
107
+ uint64_t next_word =
108
+ LoadPartialWord(/*bit_offset=*/0, remaining_length_ - num_carry_bits_);
109
+ uint64_t word = carry_bits_ | (next_word << num_carry_bits_);
110
+ carry_bits_ = next_word >> (64 - num_carry_bits_);
111
+ remaining_length_ = std::max<int64_t>(remaining_length_ - 64, 0);
112
+ return word;
113
+ } else {
114
+ remaining_length_ = 0;
115
+ return carry_bits_;
116
+ }
117
+ }
118
+
119
+ int64_t position() const { return length_ - remaining_length_; }
120
+
121
+ int64_t length() const { return length_; }
122
+
123
+ private:
124
+ uint64_t LoadFullWord() {
125
+ uint64_t word;
126
+ memcpy(&word, bitmap_, 8);
127
+ bitmap_ += 8;
128
+ return bit_util::ToLittleEndian(word);
129
+ }
130
+
131
+ uint64_t LoadPartialWord(int8_t bit_offset, int64_t num_bits) {
132
+ uint64_t word = 0;
133
+ const int64_t num_bytes = bit_util::BytesForBits(num_bits);
134
+ memcpy(&word, bitmap_, num_bytes);
135
+ bitmap_ += num_bytes;
136
+ return (bit_util::ToLittleEndian(word) >> bit_offset) &
137
+ bit_util::LeastSignificantBitMask(num_bits);
138
+ }
139
+
140
+ const uint8_t* bitmap_;
141
+ const int64_t num_carry_bits_; // in [1, 8]
142
+ const int64_t length_;
143
+ int64_t remaining_length_;
144
+ uint64_t carry_bits_;
145
+ };
146
+
147
+ // BitmapWordReader here is faster than BitmapUInt64Reader (in bitmap_reader.h)
148
+ // on sufficiently large inputs. However, it has a larger prolog / epilog overhead
149
+ // and should probably not be used for small bitmaps.
150
+
151
+ template <typename Word, bool may_have_byte_offset = true>
152
+ class BitmapWordReader {
153
+ public:
154
+ BitmapWordReader() = default;
155
+ BitmapWordReader(const uint8_t* bitmap, int64_t offset, int64_t length)
156
+ : offset_(static_cast<int64_t>(may_have_byte_offset) * (offset % 8)),
157
+ bitmap_(bitmap + offset / 8),
158
+ bitmap_end_(bitmap_ + bit_util::BytesForBits(offset_ + length)) {
159
+ // decrement word count by one as we may touch two adjacent words in one iteration
160
+ nwords_ = length / (sizeof(Word) * 8) - 1;
161
+ if (nwords_ < 0) {
162
+ nwords_ = 0;
163
+ }
164
+ trailing_bits_ = static_cast<int>(length - nwords_ * sizeof(Word) * 8);
165
+ trailing_bytes_ = static_cast<int>(bit_util::BytesForBits(trailing_bits_));
166
+
167
+ if (nwords_ > 0) {
168
+ current_data.word_ = load<Word>(bitmap_);
169
+ } else if (length > 0) {
170
+ current_data.epi.byte_ = load<uint8_t>(bitmap_);
171
+ }
172
+ }
173
+
174
+ Word NextWord() {
175
+ bitmap_ += sizeof(Word);
176
+ const Word next_word = load<Word>(bitmap_);
177
+ Word word = current_data.word_;
178
+ if (may_have_byte_offset && offset_) {
179
+ // combine two adjacent words into one word
180
+ // |<------ next ----->|<---- current ---->|
181
+ // +-------------+-----+-------------+-----+
182
+ // | --- | A | B | --- |
183
+ // +-------------+-----+-------------+-----+
184
+ // | | offset
185
+ // v v
186
+ // +-----+-------------+
187
+ // | A | B |
188
+ // +-----+-------------+
189
+ // |<------ word ----->|
190
+ word >>= offset_;
191
+ word |= next_word << (sizeof(Word) * 8 - offset_);
192
+ }
193
+ current_data.word_ = next_word;
194
+ return word;
195
+ }
196
+
197
+ uint8_t NextTrailingByte(int& valid_bits) {
198
+ uint8_t byte;
199
+ assert(trailing_bits_ > 0);
200
+
201
+ if (trailing_bits_ <= 8) {
202
+ // last byte
203
+ valid_bits = trailing_bits_;
204
+ trailing_bits_ = 0;
205
+ byte = 0;
206
+ internal::BitmapReader reader(bitmap_, offset_, valid_bits);
207
+ for (int i = 0; i < valid_bits; ++i) {
208
+ byte >>= 1;
209
+ if (reader.IsSet()) {
210
+ byte |= 0x80;
211
+ }
212
+ reader.Next();
213
+ }
214
+ byte >>= (8 - valid_bits);
215
+ } else {
216
+ ++bitmap_;
217
+ const uint8_t next_byte = load<uint8_t>(bitmap_);
218
+ byte = current_data.epi.byte_;
219
+ if (may_have_byte_offset && offset_) {
220
+ byte >>= offset_;
221
+ byte |= next_byte << (8 - offset_);
222
+ }
223
+ current_data.epi.byte_ = next_byte;
224
+ trailing_bits_ -= 8;
225
+ trailing_bytes_--;
226
+ valid_bits = 8;
227
+ }
228
+ return byte;
229
+ }
230
+
231
+ int64_t words() const { return nwords_; }
232
+ int trailing_bytes() const { return trailing_bytes_; }
233
+
234
+ private:
235
+ int64_t offset_;
236
+ const uint8_t* bitmap_;
237
+
238
+ const uint8_t* bitmap_end_;
239
+ int64_t nwords_;
240
+ int trailing_bits_;
241
+ int trailing_bytes_;
242
+ union {
243
+ Word word_;
244
+ struct {
245
+ #if ARROW_LITTLE_ENDIAN == 0
246
+ uint8_t padding_bytes_[sizeof(Word) - 1];
247
+ #endif
248
+ uint8_t byte_;
249
+ } epi;
250
+ } current_data;
251
+
252
+ template <typename DType>
253
+ DType load(const uint8_t* bitmap) {
254
+ assert(bitmap + sizeof(DType) <= bitmap_end_);
255
+ return bit_util::ToLittleEndian(util::SafeLoadAs<DType>(bitmap));
256
+ }
257
+ };
258
+
259
+ /// \brief Index into a possibly nonexistent bitmap
260
+ struct OptionalBitIndexer {
261
+ const uint8_t* bitmap;
262
+ const int64_t offset;
263
+
264
+ explicit OptionalBitIndexer(const uint8_t* buffer = NULLPTR, int64_t offset = 0)
265
+ : bitmap(buffer), offset(offset) {}
266
+
267
+ bool operator[](int64_t i) const {
268
+ return bitmap == NULLPTR || bit_util::GetBit(bitmap, offset + i);
269
+ }
270
+ };
271
+
272
+ } // namespace internal
273
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_visit.h ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+
22
+ #include "arrow/util/bit_util.h"
23
+ #include "arrow/util/bitmap_reader.h"
24
+
25
+ namespace arrow {
26
+ namespace internal {
27
+
28
+ // A function that visits each bit in a bitmap and calls a visitor function with a
29
+ // boolean representation of that bit. This is intended to be analogous to
30
+ // GenerateBits.
31
+ template <class Visitor>
32
+ void VisitBits(const uint8_t* bitmap, int64_t start_offset, int64_t length,
33
+ Visitor&& visit) {
34
+ BitmapReader reader(bitmap, start_offset, length);
35
+ for (int64_t index = 0; index < length; ++index) {
36
+ visit(reader.IsSet());
37
+ reader.Next();
38
+ }
39
+ }
40
+
41
+ // Like VisitBits(), but unrolls its main loop for better performance.
42
+ template <class Visitor>
43
+ void VisitBitsUnrolled(const uint8_t* bitmap, int64_t start_offset, int64_t length,
44
+ Visitor&& visit) {
45
+ if (length == 0) {
46
+ return;
47
+ }
48
+
49
+ // Start by visiting any bits preceding the first full byte.
50
+ int64_t num_bits_before_full_bytes =
51
+ bit_util::RoundUpToMultipleOf8(start_offset) - start_offset;
52
+ // Truncate num_bits_before_full_bytes if it is greater than length.
53
+ if (num_bits_before_full_bytes > length) {
54
+ num_bits_before_full_bytes = length;
55
+ }
56
+ // Use the non loop-unrolled VisitBits since we don't want to add branches
57
+ VisitBits<Visitor>(bitmap, start_offset, num_bits_before_full_bytes, visit);
58
+
59
+ // Shift the start pointer to the first full byte and compute the
60
+ // number of full bytes to be read.
61
+ const uint8_t* first_full_byte = bitmap + bit_util::CeilDiv(start_offset, 8);
62
+ const int64_t num_full_bytes = (length - num_bits_before_full_bytes) / 8;
63
+
64
+ // Iterate over each full byte of the input bitmap and call the visitor in
65
+ // a loop-unrolled manner.
66
+ for (int64_t byte_index = 0; byte_index < num_full_bytes; ++byte_index) {
67
+ // Get the current bit-packed byte value from the bitmap.
68
+ const uint8_t byte = *(first_full_byte + byte_index);
69
+
70
+ // Execute the visitor function on each bit of the current byte.
71
+ visit(bit_util::GetBitFromByte(byte, 0));
72
+ visit(bit_util::GetBitFromByte(byte, 1));
73
+ visit(bit_util::GetBitFromByte(byte, 2));
74
+ visit(bit_util::GetBitFromByte(byte, 3));
75
+ visit(bit_util::GetBitFromByte(byte, 4));
76
+ visit(bit_util::GetBitFromByte(byte, 5));
77
+ visit(bit_util::GetBitFromByte(byte, 6));
78
+ visit(bit_util::GetBitFromByte(byte, 7));
79
+ }
80
+
81
+ // Write any leftover bits in the last byte.
82
+ const int64_t num_bits_after_full_bytes = (length - num_bits_before_full_bytes) % 8;
83
+ VisitBits<Visitor>(first_full_byte + num_full_bytes, 0, num_bits_after_full_bytes,
84
+ visit);
85
+ }
86
+
87
+ } // namespace internal
88
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitset_stack.h ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+ #include <array>
22
+ #include <bitset>
23
+ #include <cassert>
24
+ #include <cstdint>
25
+ #include <cstring>
26
+ #include <memory>
27
+ #include <string>
28
+ #include <string_view>
29
+ #include <type_traits>
30
+ #include <utility>
31
+ #include <vector>
32
+
33
+ #include "arrow/buffer.h"
34
+ #include "arrow/memory_pool.h"
35
+ #include "arrow/result.h"
36
+ #include "arrow/type_fwd.h"
37
+ #include "arrow/util/bit_util.h"
38
+ #include "arrow/util/compare.h"
39
+ #include "arrow/util/functional.h"
40
+ #include "arrow/util/macros.h"
41
+ #include "arrow/util/string_builder.h"
42
+ #include "arrow/util/type_traits.h"
43
+ #include "arrow/util/visibility.h"
44
+
45
+ namespace arrow {
46
+ namespace internal {
47
+
48
+ /// \brief Store a stack of bitsets efficiently. The top bitset may be
49
+ /// accessed and its bits may be modified, but it may not be resized.
50
+ class BitsetStack {
51
+ public:
52
+ using reference = typename std::vector<bool>::reference;
53
+
54
+ /// \brief push a bitset onto the stack
55
+ /// \param size number of bits in the next bitset
56
+ /// \param value initial value for bits in the pushed bitset
57
+ void Push(int size, bool value) {
58
+ offsets_.push_back(bit_count());
59
+ bits_.resize(bit_count() + size, value);
60
+ }
61
+
62
+ /// \brief number of bits in the bitset at the top of the stack
63
+ int TopSize() const {
64
+ if (offsets_.size() == 0) return 0;
65
+ return bit_count() - offsets_.back();
66
+ }
67
+
68
+ /// \brief pop a bitset off the stack
69
+ void Pop() {
70
+ bits_.resize(offsets_.back());
71
+ offsets_.pop_back();
72
+ }
73
+
74
+ /// \brief get the value of a bit in the top bitset
75
+ /// \param i index of the bit to access
76
+ bool operator[](int i) const { return bits_[offsets_.back() + i]; }
77
+
78
+ /// \brief get a mutable reference to a bit in the top bitset
79
+ /// \param i index of the bit to access
80
+ reference operator[](int i) { return bits_[offsets_.back() + i]; }
81
+
82
+ private:
83
+ int bit_count() const { return static_cast<int>(bits_.size()); }
84
+ std::vector<bool> bits_;
85
+ std::vector<int> offsets_;
86
+ };
87
+
88
+ } // namespace internal
89
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx2.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <stdint.h>
21
+
22
+ namespace arrow {
23
+ namespace internal {
24
+
25
+ int unpack32_avx2(const uint32_t* in, uint32_t* out, int batch_size, int num_bits);
26
+
27
+ } // namespace internal
28
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_avx512.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <stdint.h>
21
+
22
+ namespace arrow {
23
+ namespace internal {
24
+
25
+ int unpack32_avx512(const uint32_t* in, uint32_t* out, int batch_size, int num_bits);
26
+
27
+ } // namespace internal
28
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_default.h ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking_neon.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <stdint.h>
21
+
22
+ namespace arrow {
23
+ namespace internal {
24
+
25
+ int unpack32_neon(const uint32_t* in, uint32_t* out, int batch_size, int num_bits);
26
+
27
+ } // namespace internal
28
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/byte_size.h ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+
22
+ #include "arrow/type_fwd.h"
23
+
24
+ namespace arrow {
25
+
26
+ namespace util {
27
+
28
+ /// \brief The sum of bytes in each buffer referenced by the array
29
+ ///
30
+ /// Note: An array may only reference a portion of a buffer.
31
+ /// This method will overestimate in this case and return the
32
+ /// byte size of the entire buffer.
33
+ /// Note: If a buffer is referenced multiple times then it will
34
+ /// only be counted once.
35
+ ARROW_EXPORT int64_t TotalBufferSize(const ArrayData& array_data);
36
+ /// \brief The sum of bytes in each buffer referenced by the array
37
+ /// \see TotalBufferSize(const ArrayData& array_data) for details
38
+ ARROW_EXPORT int64_t TotalBufferSize(const Array& array);
39
+ /// \brief The sum of bytes in each buffer referenced by the array
40
+ /// \see TotalBufferSize(const ArrayData& array_data) for details
41
+ ARROW_EXPORT int64_t TotalBufferSize(const ChunkedArray& chunked_array);
42
+ /// \brief The sum of bytes in each buffer referenced by the batch
43
+ /// \see TotalBufferSize(const ArrayData& array_data) for details
44
+ ARROW_EXPORT int64_t TotalBufferSize(const RecordBatch& record_batch);
45
+ /// \brief The sum of bytes in each buffer referenced by the table
46
+ /// \see TotalBufferSize(const ArrayData& array_data) for details
47
+ ARROW_EXPORT int64_t TotalBufferSize(const Table& table);
48
+
49
+ /// \brief Calculate the buffer ranges referenced by the array
50
+ ///
51
+ /// These ranges will take into account array offsets
52
+ ///
53
+ /// The ranges may contain duplicates
54
+ ///
55
+ /// Dictionary arrays will ignore the offset of their containing array
56
+ ///
57
+ /// The return value will be a struct array corresponding to the schema:
58
+ /// schema({field("start", uint64()), field("offset", uint64()), field("length",
59
+ /// uint64()))
60
+ ARROW_EXPORT Result<std::shared_ptr<Array>> ReferencedRanges(const ArrayData& array_data);
61
+
62
+ /// \brief Returns the sum of bytes from all buffer ranges referenced
63
+ ///
64
+ /// Unlike TotalBufferSize this method will account for array
65
+ /// offsets.
66
+ ///
67
+ /// If buffers are shared between arrays then the shared
68
+ /// portion will be counted multiple times.
69
+ ///
70
+ /// Dictionary arrays will always be counted in their entirety
71
+ /// even if the array only references a portion of the dictionary.
72
+ ARROW_EXPORT Result<int64_t> ReferencedBufferSize(const ArrayData& array_data);
73
+ /// \brief Returns the sum of bytes from all buffer ranges referenced
74
+ /// \see ReferencedBufferSize(const ArrayData& array_data) for details
75
+ ARROW_EXPORT Result<int64_t> ReferencedBufferSize(const Array& array_data);
76
+ /// \brief Returns the sum of bytes from all buffer ranges referenced
77
+ /// \see ReferencedBufferSize(const ArrayData& array_data) for details
78
+ ARROW_EXPORT Result<int64_t> ReferencedBufferSize(const ChunkedArray& array_data);
79
+ /// \brief Returns the sum of bytes from all buffer ranges referenced
80
+ /// \see ReferencedBufferSize(const ArrayData& array_data) for details
81
+ ARROW_EXPORT Result<int64_t> ReferencedBufferSize(const RecordBatch& array_data);
82
+ /// \brief Returns the sum of bytes from all buffer ranges referenced
83
+ /// \see ReferencedBufferSize(const ArrayData& array_data) for details
84
+ ARROW_EXPORT Result<int64_t> ReferencedBufferSize(const Table& array_data);
85
+
86
+ } // namespace util
87
+
88
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/cancel.h ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <vector>
24
+
25
+ #include "arrow/status.h"
26
+ #include "arrow/type_fwd.h"
27
+ #include "arrow/util/macros.h"
28
+ #include "arrow/util/visibility.h"
29
+
30
+ namespace arrow {
31
+
32
+ class StopToken;
33
+
34
+ struct StopSourceImpl;
35
+
36
+ /// EXPERIMENTAL
37
+ class ARROW_EXPORT StopSource {
38
+ public:
39
+ StopSource();
40
+ ~StopSource();
41
+
42
+ // Consumer API (the side that stops)
43
+ void RequestStop();
44
+ void RequestStop(Status error);
45
+ // Async-signal-safe. TODO Deprecate this?
46
+ void RequestStopFromSignal(int signum);
47
+
48
+ StopToken token();
49
+
50
+ // For internal use only
51
+ void Reset();
52
+
53
+ protected:
54
+ std::shared_ptr<StopSourceImpl> impl_;
55
+ };
56
+
57
+ /// EXPERIMENTAL
58
+ class ARROW_EXPORT StopToken {
59
+ public:
60
+ // Public for Cython
61
+ StopToken() {}
62
+
63
+ explicit StopToken(std::shared_ptr<StopSourceImpl> impl) : impl_(std::move(impl)) {}
64
+
65
+ // A trivial token that never propagates any stop request
66
+ static StopToken Unstoppable() { return StopToken(); }
67
+
68
+ /// \brief Check if the stop source has been cancelled.
69
+ ///
70
+ /// Producers should call this method, whenever convenient, to check and
71
+ /// see if they should stop producing early (i.e. have been cancelled).
72
+ /// Failure to call this method often enough will lead to an unresponsive
73
+ /// cancellation.
74
+ ///
75
+ /// This is part of the producer API (the side that gets asked to stop)
76
+ /// This method is thread-safe
77
+ ///
78
+ /// \return An OK status if the stop source has not been cancelled or a
79
+ /// cancel error if the source has been cancelled.
80
+ Status Poll() const;
81
+ bool IsStopRequested() const;
82
+
83
+ protected:
84
+ std::shared_ptr<StopSourceImpl> impl_;
85
+ };
86
+
87
+ /// EXPERIMENTAL: Set a global StopSource that can receive signals
88
+ ///
89
+ /// The only allowed order of calls is the following:
90
+ /// - SetSignalStopSource()
91
+ /// - any number of pairs of (RegisterCancellingSignalHandler,
92
+ /// UnregisterCancellingSignalHandler) calls
93
+ /// - ResetSignalStopSource()
94
+ ///
95
+ /// Beware that these settings are process-wide. Typically, only one
96
+ /// thread should call these APIs, even in a multithreaded setting.
97
+ ARROW_EXPORT
98
+ Result<StopSource*> SetSignalStopSource();
99
+
100
+ /// EXPERIMENTAL: Reset the global signal-receiving StopSource
101
+ ///
102
+ /// This will invalidate the pointer returned by SetSignalStopSource.
103
+ ARROW_EXPORT
104
+ void ResetSignalStopSource();
105
+
106
+ /// EXPERIMENTAL: Register signal handler triggering the signal-receiving StopSource
107
+ ///
108
+ /// Note that those handlers are automatically un-registered in a fork()ed process,
109
+ /// therefore the child process will need to call RegisterCancellingSignalHandler()
110
+ /// if desired.
111
+ ARROW_EXPORT
112
+ Status RegisterCancellingSignalHandler(const std::vector<int>& signals);
113
+
114
+ /// EXPERIMENTAL: Unregister signal handler set up by RegisterCancellingSignalHandler
115
+ ARROW_EXPORT
116
+ void UnregisterCancellingSignalHandler();
117
+
118
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/compare.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <type_traits>
22
+ #include <utility>
23
+
24
+ #include "arrow/util/macros.h"
25
+
26
+ namespace arrow {
27
+ namespace util {
28
+
29
+ /// CRTP helper for declaring equality comparison. Defines operator== and operator!=
30
+ template <typename T>
31
+ class EqualityComparable {
32
+ public:
33
+ ~EqualityComparable() {
34
+ static_assert(
35
+ std::is_same<decltype(std::declval<const T>().Equals(std::declval<const T>())),
36
+ bool>::value,
37
+ "EqualityComparable depends on the method T::Equals(const T&) const");
38
+ }
39
+
40
+ template <typename... Extra>
41
+ bool Equals(const std::shared_ptr<T>& other, Extra&&... extra) const {
42
+ if (other == NULLPTR) {
43
+ return false;
44
+ }
45
+ return cast().Equals(*other, std::forward<Extra>(extra)...);
46
+ }
47
+
48
+ struct PtrsEqual {
49
+ bool operator()(const std::shared_ptr<T>& l, const std::shared_ptr<T>& r) const {
50
+ return l->Equals(*r);
51
+ }
52
+ };
53
+
54
+ friend bool operator==(T const& a, T const& b) { return a.Equals(b); }
55
+ friend bool operator!=(T const& a, T const& b) { return !(a == b); }
56
+
57
+ private:
58
+ const T& cast() const { return static_cast<const T&>(*this); }
59
+ };
60
+
61
+ } // namespace util
62
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/concurrent_map.h ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <unordered_map>
21
+ #include <utility>
22
+
23
+ #include "arrow/util/mutex.h"
24
+
25
+ namespace arrow {
26
+ namespace util {
27
+
28
+ template <typename K, typename V>
29
+ class ConcurrentMap {
30
+ public:
31
+ void Insert(const K& key, const V& value) {
32
+ auto lock = mutex_.Lock();
33
+ map_.insert({key, value});
34
+ }
35
+
36
+ template <typename ValueFunc>
37
+ V GetOrInsert(const K& key, ValueFunc&& compute_value_func) {
38
+ auto lock = mutex_.Lock();
39
+ auto it = map_.find(key);
40
+ if (it == map_.end()) {
41
+ auto pair = map_.emplace(key, compute_value_func());
42
+ it = pair.first;
43
+ }
44
+ return it->second;
45
+ }
46
+
47
+ void Erase(const K& key) {
48
+ auto lock = mutex_.Lock();
49
+ map_.erase(key);
50
+ }
51
+
52
+ void Clear() {
53
+ auto lock = mutex_.Lock();
54
+ map_.clear();
55
+ }
56
+
57
+ size_t size() const {
58
+ auto lock = mutex_.Lock();
59
+ return map_.size();
60
+ }
61
+
62
+ private:
63
+ std::unordered_map<K, V> map_;
64
+ mutable arrow::util::Mutex mutex_;
65
+ };
66
+
67
+ } // namespace util
68
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/config.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #define ARROW_VERSION_MAJOR 15
19
+ #define ARROW_VERSION_MINOR 0
20
+ #define ARROW_VERSION_PATCH 2
21
+ #define ARROW_VERSION ((ARROW_VERSION_MAJOR * 1000) + ARROW_VERSION_MINOR) * 1000 + ARROW_VERSION_PATCH
22
+
23
+ #define ARROW_VERSION_STRING "15.0.2"
24
+
25
+ #define ARROW_SO_VERSION "1500"
26
+ #define ARROW_FULL_SO_VERSION "1500.2.0"
27
+
28
+ #define ARROW_CXX_COMPILER_ID "GNU"
29
+ #define ARROW_CXX_COMPILER_VERSION "12.2.1"
30
+ #define ARROW_CXX_COMPILER_FLAGS " -fdiagnostics-color=always"
31
+
32
+ #define ARROW_BUILD_TYPE "RELEASE"
33
+
34
+ #define ARROW_GIT_ID ""
35
+ #define ARROW_GIT_DESCRIPTION ""
36
+
37
+ #define ARROW_PACKAGE_KIND "python-wheel-manylinux228"
38
+
39
+ #define ARROW_COMPUTE
40
+ #define ARROW_CSV
41
+ /* #undef ARROW_CUDA */
42
+ #define ARROW_DATASET
43
+ #define ARROW_FILESYSTEM
44
+ #define ARROW_FLIGHT
45
+ /* #undef ARROW_FLIGHT_SQL */
46
+ #define ARROW_IPC
47
+ #define ARROW_JEMALLOC
48
+ #define ARROW_JEMALLOC_VENDORED
49
+ #define ARROW_JSON
50
+ #define ARROW_ORC
51
+ #define ARROW_PARQUET
52
+ #define ARROW_SUBSTRAIT
53
+
54
+ #define ARROW_ENABLE_THREADING
55
+ #define ARROW_GCS
56
+ #define ARROW_S3
57
+ #define ARROW_USE_NATIVE_INT128
58
+ /* #undef ARROW_WITH_MUSL */
59
+ /* #undef ARROW_WITH_OPENTELEMETRY */
60
+ /* #undef ARROW_WITH_UCX */
61
+ #define PARQUET_REQUIRE_ENCRYPTION
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/counting_semaphore.h ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #ifndef ARROW_COUNTING_SEMAPHORE_H
19
+ #define ARROW_COUNTING_SEMAPHORE_H
20
+
21
+ #include <memory>
22
+
23
+ #include "arrow/status.h"
24
+
25
+ namespace arrow {
26
+ namespace util {
27
+
28
+ /// \brief Simple mutex-based counting semaphore with timeout
29
+ class ARROW_EXPORT CountingSemaphore {
30
+ public:
31
+ /// \brief Create an instance with initial_avail starting permits
32
+ ///
33
+ /// \param[in] initial_avail The semaphore will start with this many permits available
34
+ /// \param[in] timeout_seconds A timeout to be applied to all operations. Operations
35
+ /// will return Status::Invalid if this timeout elapses
36
+ explicit CountingSemaphore(uint32_t initial_avail = 0, double timeout_seconds = 10);
37
+ ~CountingSemaphore();
38
+ /// \brief Block until num_permits permits are available
39
+ Status Acquire(uint32_t num_permits);
40
+ /// \brief Make num_permits permits available
41
+ Status Release(uint32_t num_permits);
42
+ /// \brief Wait until num_waiters are waiting on permits
43
+ ///
44
+ /// This method is non-standard but useful in unit tests to ensure sequencing
45
+ Status WaitForWaiters(uint32_t num_waiters);
46
+ /// \brief Immediately time out any waiters
47
+ ///
48
+ /// This method will return Status::OK only if there were no waiters to time out.
49
+ /// Once closed any operation on this instance will return an invalid status.
50
+ Status Close();
51
+
52
+ private:
53
+ class Impl;
54
+ std::unique_ptr<Impl> impl_;
55
+ };
56
+
57
+ } // namespace util
58
+ } // namespace arrow
59
+
60
+ #endif // ARROW_COUNTING_SEMAPHORE_H
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/cpu_info.h ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // From Apache Impala (incubating) as of 2016-01-29. Pared down to a minimal
19
+ // set of functions needed for Apache Arrow / Apache parquet-cpp
20
+
21
+ #pragma once
22
+
23
+ #include <cstdint>
24
+ #include <memory>
25
+ #include <string>
26
+
27
+ #include "arrow/util/macros.h"
28
+ #include "arrow/util/visibility.h"
29
+
30
+ namespace arrow {
31
+ namespace internal {
32
+
33
+ /// CpuInfo is an interface to query for cpu information at runtime. The caller can
34
+ /// ask for the sizes of the caches and what hardware features are supported.
35
+ /// On Linux, this information is pulled from a couple of sys files (/proc/cpuinfo and
36
+ /// /sys/devices)
37
+ class ARROW_EXPORT CpuInfo {
38
+ public:
39
+ ~CpuInfo();
40
+
41
+ /// x86 features
42
+ static constexpr int64_t SSSE3 = (1LL << 0);
43
+ static constexpr int64_t SSE4_1 = (1LL << 1);
44
+ static constexpr int64_t SSE4_2 = (1LL << 2);
45
+ static constexpr int64_t POPCNT = (1LL << 3);
46
+ static constexpr int64_t AVX = (1LL << 4);
47
+ static constexpr int64_t AVX2 = (1LL << 5);
48
+ static constexpr int64_t AVX512F = (1LL << 6);
49
+ static constexpr int64_t AVX512CD = (1LL << 7);
50
+ static constexpr int64_t AVX512VL = (1LL << 8);
51
+ static constexpr int64_t AVX512DQ = (1LL << 9);
52
+ static constexpr int64_t AVX512BW = (1LL << 10);
53
+ static constexpr int64_t AVX512 = AVX512F | AVX512CD | AVX512VL | AVX512DQ | AVX512BW;
54
+ static constexpr int64_t BMI1 = (1LL << 11);
55
+ static constexpr int64_t BMI2 = (1LL << 12);
56
+
57
+ /// Arm features
58
+ static constexpr int64_t ASIMD = (1LL << 32);
59
+
60
+ /// Cache enums for L1 (data), L2 and L3
61
+ enum class CacheLevel { L1 = 0, L2, L3, Last = L3 };
62
+
63
+ /// CPU vendors
64
+ enum class Vendor { Unknown, Intel, AMD };
65
+
66
+ static const CpuInfo* GetInstance();
67
+
68
+ /// Returns all the flags for this cpu
69
+ int64_t hardware_flags() const;
70
+
71
+ /// Returns the number of cores (including hyper-threaded) on this machine.
72
+ int num_cores() const;
73
+
74
+ /// Returns the vendor of the cpu.
75
+ Vendor vendor() const;
76
+
77
+ /// Returns the model name of the cpu (e.g. Intel i7-2600)
78
+ const std::string& model_name() const;
79
+
80
+ /// Returns the size of the cache in KB at this cache level
81
+ int64_t CacheSize(CacheLevel level) const;
82
+
83
+ /// \brief Returns whether or not the given feature is enabled.
84
+ ///
85
+ /// IsSupported() is true iff IsDetected() is also true and the feature
86
+ /// wasn't disabled by the user (for example by setting the ARROW_USER_SIMD_LEVEL
87
+ /// environment variable).
88
+ bool IsSupported(int64_t flags) const;
89
+
90
+ /// Returns whether or not the given feature is available on the CPU.
91
+ bool IsDetected(int64_t flags) const;
92
+
93
+ /// Determine if the CPU meets the minimum CPU requirements and if not, issue an error
94
+ /// and terminate.
95
+ void VerifyCpuRequirements() const;
96
+
97
+ /// Toggle a hardware feature on and off. It is not valid to turn on a feature
98
+ /// that the underlying hardware cannot support. This is useful for testing.
99
+ void EnableFeature(int64_t flag, bool enable);
100
+
101
+ bool HasEfficientBmi2() const {
102
+ // BMI2 (pext, pdep) is only efficient on Intel X86 processors.
103
+ return vendor() == Vendor::Intel && IsSupported(BMI2);
104
+ }
105
+
106
+ private:
107
+ CpuInfo();
108
+
109
+ struct Impl;
110
+ std::unique_ptr<Impl> impl_;
111
+ };
112
+
113
+ } // namespace internal
114
+ } // namespace arrow