applied-ai-018 commited on
Commit
66f948c
·
verified ·
1 Parent(s): ab01b71

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/adapter.h +323 -0
  2. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/options.h +120 -0
  3. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/tensorflow/convert.h +128 -0
  4. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_adaptive.h +215 -0
  5. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_nested.h +838 -0
  6. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/api.h +39 -0
  7. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset.h +481 -0
  8. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset_writer.h +103 -0
  9. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/discovery.h +275 -0
  10. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_base.h +495 -0
  11. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_csv.h +144 -0
  12. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_ipc.h +123 -0
  13. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_json.h +98 -0
  14. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_orc.h +75 -0
  15. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_parquet.h +404 -0
  16. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/parquet_encryption_config.h +75 -0
  17. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/partition.h +432 -0
  18. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/pch.h +27 -0
  19. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/plan.h +33 -0
  20. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/projector.h +32 -0
  21. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/scanner.h +578 -0
  22. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/type_fwd.h +113 -0
  23. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/visibility.h +50 -0
  24. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/api.h +30 -0
  25. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client.h +436 -0
  26. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_auth.h +62 -0
  27. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_tracing_middleware.h +34 -0
  28. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/middleware.h +75 -0
  29. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/platform.h +31 -0
  30. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server_auth.h +125 -0
  31. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/transport.h +302 -0
  32. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/transport_server.h +133 -0
  33. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/type_fwd.h +65 -0
  34. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/types_async.h +80 -0
  35. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h +33 -0
  36. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h +221 -0
  37. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator_fwd.h +71 -0
  38. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_util.h +460 -0
  39. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/base64.h +35 -0
  40. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h +95 -0
  41. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_run_reader.h +515 -0
  42. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_stream_utils.h +529 -0
  43. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_visit.h +88 -0
  44. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_writer.h +286 -0
  45. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitset_stack.h +89 -0
  46. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h +34 -0
  47. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/byte_size.h +88 -0
  48. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/checked_cast.h +61 -0
  49. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/cpu_info.h +114 -0
  50. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/crc32.h +36 -0
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/adapter.h ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <vector>
23
+
24
+ #include "arrow/adapters/orc/options.h"
25
+ #include "arrow/io/interfaces.h"
26
+ #include "arrow/memory_pool.h"
27
+ #include "arrow/record_batch.h"
28
+ #include "arrow/status.h"
29
+ #include "arrow/type.h"
30
+ #include "arrow/type_fwd.h"
31
+ #include "arrow/util/macros.h"
32
+ #include "arrow/util/visibility.h"
33
+
34
+ namespace arrow {
35
+ namespace adapters {
36
+ namespace orc {
37
+
38
+ /// \brief Information about an ORC stripe
39
+ struct StripeInformation {
40
+ /// \brief Offset of the stripe from the start of the file, in bytes
41
+ int64_t offset;
42
+ /// \brief Length of the stripe, in bytes
43
+ int64_t length;
44
+ /// \brief Number of rows in the stripe
45
+ int64_t num_rows;
46
+ /// \brief Index of the first row of the stripe
47
+ int64_t first_row_id;
48
+ };
49
+
50
+ /// \class ORCFileReader
51
+ /// \brief Read an Arrow Table or RecordBatch from an ORC file.
52
+ class ARROW_EXPORT ORCFileReader {
53
+ public:
54
+ ~ORCFileReader();
55
+
56
+ /// \brief Creates a new ORC reader
57
+ ///
58
+ /// \param[in] file the data source
59
+ /// \param[in] pool a MemoryPool to use for buffer allocations
60
+ /// \return the returned reader object
61
+ static Result<std::unique_ptr<ORCFileReader>> Open(
62
+ const std::shared_ptr<io::RandomAccessFile>& file, MemoryPool* pool);
63
+
64
+ /// \brief Return the schema read from the ORC file
65
+ ///
66
+ /// \return the returned Schema object
67
+ Result<std::shared_ptr<Schema>> ReadSchema();
68
+
69
+ /// \brief Read the file as a Table
70
+ ///
71
+ /// The table will be composed of one record batch per stripe.
72
+ ///
73
+ /// \return the returned Table
74
+ Result<std::shared_ptr<Table>> Read();
75
+
76
+ /// \brief Read the file as a Table
77
+ ///
78
+ /// The table will be composed of one record batch per stripe.
79
+ ///
80
+ /// \param[in] schema the Table schema
81
+ /// \return the returned Table
82
+ Result<std::shared_ptr<Table>> Read(const std::shared_ptr<Schema>& schema);
83
+
84
+ /// \brief Read the file as a Table
85
+ ///
86
+ /// The table will be composed of one record batch per stripe.
87
+ ///
88
+ /// \param[in] include_indices the selected field indices to read
89
+ /// \return the returned Table
90
+ Result<std::shared_ptr<Table>> Read(const std::vector<int>& include_indices);
91
+
92
+ /// \brief Read the file as a Table
93
+ ///
94
+ /// The table will be composed of one record batch per stripe.
95
+ ///
96
+ /// \param[in] include_names the selected field names to read
97
+ /// \return the returned Table
98
+ Result<std::shared_ptr<Table>> Read(const std::vector<std::string>& include_names);
99
+
100
+ /// \brief Read the file as a Table
101
+ ///
102
+ /// The table will be composed of one record batch per stripe.
103
+ ///
104
+ /// \param[in] schema the Table schema
105
+ /// \param[in] include_indices the selected field indices to read
106
+ /// \return the returned Table
107
+ Result<std::shared_ptr<Table>> Read(const std::shared_ptr<Schema>& schema,
108
+ const std::vector<int>& include_indices);
109
+
110
+ /// \brief Read a single stripe as a RecordBatch
111
+ ///
112
+ /// \param[in] stripe the stripe index
113
+ /// \return the returned RecordBatch
114
+ Result<std::shared_ptr<RecordBatch>> ReadStripe(int64_t stripe);
115
+
116
+ /// \brief Read a single stripe as a RecordBatch
117
+ ///
118
+ /// \param[in] stripe the stripe index
119
+ /// \param[in] include_indices the selected field indices to read
120
+ /// \return the returned RecordBatch
121
+ Result<std::shared_ptr<RecordBatch>> ReadStripe(
122
+ int64_t stripe, const std::vector<int>& include_indices);
123
+
124
+ /// \brief Read a single stripe as a RecordBatch
125
+ ///
126
+ /// \param[in] stripe the stripe index
127
+ /// \param[in] include_names the selected field names to read
128
+ /// \return the returned RecordBatch
129
+ Result<std::shared_ptr<RecordBatch>> ReadStripe(
130
+ int64_t stripe, const std::vector<std::string>& include_names);
131
+
132
+ /// \brief Seek to designated row. Invoke NextStripeReader() after seek
133
+ /// will return stripe reader starting from designated row.
134
+ ///
135
+ /// \param[in] row_number the rows number to seek
136
+ Status Seek(int64_t row_number);
137
+
138
+ /// \brief Get a stripe level record batch iterator.
139
+ ///
140
+ /// Each record batch will have up to `batch_size` rows.
141
+ /// NextStripeReader serves as a fine-grained alternative to ReadStripe
142
+ /// which may cause OOM issues by loading the whole stripe into memory.
143
+ ///
144
+ /// Note this will only read rows for the current stripe, not the entire
145
+ /// file.
146
+ ///
147
+ /// \param[in] batch_size the maximum number of rows in each record batch
148
+ /// \return the returned stripe reader
149
+ Result<std::shared_ptr<RecordBatchReader>> NextStripeReader(int64_t batch_size);
150
+
151
+ /// \brief Get a stripe level record batch iterator.
152
+ ///
153
+ /// Each record batch will have up to `batch_size` rows.
154
+ /// NextStripeReader serves as a fine-grained alternative to ReadStripe
155
+ /// which may cause OOM issues by loading the whole stripe into memory.
156
+ ///
157
+ /// Note this will only read rows for the current stripe, not the entire
158
+ /// file.
159
+ ///
160
+ /// \param[in] batch_size the maximum number of rows in each record batch
161
+ /// \param[in] include_indices the selected field indices to read
162
+ /// \return the stripe reader
163
+ Result<std::shared_ptr<RecordBatchReader>> NextStripeReader(
164
+ int64_t batch_size, const std::vector<int>& include_indices);
165
+
166
+ /// \brief Get a record batch iterator for the entire file.
167
+ ///
168
+ /// Each record batch will have up to `batch_size` rows.
169
+ ///
170
+ /// \param[in] batch_size the maximum number of rows in each record batch
171
+ /// \param[in] include_names the selected field names to read, if not empty
172
+ /// (otherwise all fields are read)
173
+ /// \return the record batch iterator
174
+ Result<std::shared_ptr<RecordBatchReader>> GetRecordBatchReader(
175
+ int64_t batch_size, const std::vector<std::string>& include_names);
176
+
177
+ /// \brief The number of stripes in the file
178
+ int64_t NumberOfStripes();
179
+
180
+ /// \brief The number of rows in the file
181
+ int64_t NumberOfRows();
182
+
183
+ /// \brief StripeInformation for each stripe.
184
+ StripeInformation GetStripeInformation(int64_t stripe);
185
+
186
+ /// \brief Get the format version of the file.
187
+ /// Currently known values are 0.11 and 0.12.
188
+ ///
189
+ /// \return The FileVersion of the ORC file.
190
+ FileVersion GetFileVersion();
191
+
192
+ /// \brief Get the software instance and version that wrote this file.
193
+ ///
194
+ /// \return a user-facing string that specifies the software version
195
+ std::string GetSoftwareVersion();
196
+
197
+ /// \brief Get the compression kind of the file.
198
+ ///
199
+ /// \return The kind of compression in the ORC file.
200
+ Result<Compression::type> GetCompression();
201
+
202
+ /// \brief Get the buffer size for the compression.
203
+ ///
204
+ /// \return Number of bytes to buffer for the compression codec.
205
+ int64_t GetCompressionSize();
206
+
207
+ /// \brief Get the number of rows per an entry in the row index.
208
+ /// \return the number of rows per an entry in the row index or 0 if there
209
+ /// is no row index.
210
+ int64_t GetRowIndexStride();
211
+
212
+ /// \brief Get ID of writer that generated the file.
213
+ ///
214
+ /// \return UNKNOWN_WRITER if the writer ID is undefined
215
+ WriterId GetWriterId();
216
+
217
+ /// \brief Get the writer id value when getWriterId() returns an unknown writer.
218
+ ///
219
+ /// \return the integer value of the writer ID.
220
+ int32_t GetWriterIdValue();
221
+
222
+ /// \brief Get the version of the writer.
223
+ ///
224
+ /// \return the version of the writer.
225
+
226
+ WriterVersion GetWriterVersion();
227
+
228
+ /// \brief Get the number of stripe statistics in the file.
229
+ ///
230
+ /// \return the number of stripe statistics
231
+ int64_t GetNumberOfStripeStatistics();
232
+
233
+ /// \brief Get the length of the data stripes in the file.
234
+ ///
235
+ /// \return return the number of bytes in stripes
236
+ int64_t GetContentLength();
237
+
238
+ /// \brief Get the length of the file stripe statistics.
239
+ ///
240
+ /// \return the number of compressed bytes in the file stripe statistics
241
+ int64_t GetStripeStatisticsLength();
242
+
243
+ /// \brief Get the length of the file footer.
244
+ ///
245
+ /// \return the number of compressed bytes in the file footer
246
+ int64_t GetFileFooterLength();
247
+
248
+ /// \brief Get the length of the file postscript.
249
+ ///
250
+ /// \return the number of bytes in the file postscript
251
+ int64_t GetFilePostscriptLength();
252
+
253
+ /// \brief Get the total length of the file.
254
+ ///
255
+ /// \return the number of bytes in the file
256
+ int64_t GetFileLength();
257
+
258
+ /// \brief Get the serialized file tail.
259
+ /// Useful if another reader of the same file wants to avoid re-reading
260
+ /// the file tail. See ReadOptions.SetSerializedFileTail().
261
+ ///
262
+ /// \return a string of bytes with the file tail
263
+ std::string GetSerializedFileTail();
264
+
265
+ /// \brief Return the metadata read from the ORC file
266
+ ///
267
+ /// \return A KeyValueMetadata object containing the ORC metadata
268
+ Result<std::shared_ptr<const KeyValueMetadata>> ReadMetadata();
269
+
270
+ private:
271
+ class Impl;
272
+ std::unique_ptr<Impl> impl_;
273
+ ORCFileReader();
274
+ };
275
+
276
+ /// \class ORCFileWriter
277
+ /// \brief Write an Arrow Table or RecordBatch to an ORC file.
278
+ class ARROW_EXPORT ORCFileWriter {
279
+ public:
280
+ ~ORCFileWriter();
281
+ /// \brief Creates a new ORC writer.
282
+ ///
283
+ /// \param[in] output_stream a pointer to the io::OutputStream to write into
284
+ /// \param[in] write_options the ORC writer options for Arrow
285
+ /// \return the returned writer object
286
+ static Result<std::unique_ptr<ORCFileWriter>> Open(
287
+ io::OutputStream* output_stream,
288
+ const WriteOptions& write_options = WriteOptions());
289
+
290
+ /// \brief Write a table. This can be called multiple times.
291
+ ///
292
+ /// Tables passed in subsequent calls must match the schema of the table that was
293
+ /// written first.
294
+ ///
295
+ /// \param[in] table the Arrow table from which data is extracted.
296
+ /// \return Status
297
+ Status Write(const Table& table);
298
+
299
+ /// \brief Write a RecordBatch. This can be called multiple times.
300
+ ///
301
+ /// RecordBatches passed in subsequent calls must match the schema of the
302
+ /// RecordBatch that was written first.
303
+ ///
304
+ /// \param[in] record_batch the Arrow RecordBatch from which data is extracted.
305
+ /// \return Status
306
+ Status Write(const RecordBatch& record_batch);
307
+
308
+ /// \brief Close an ORC writer (orc::Writer)
309
+ ///
310
+ /// \return Status
311
+ Status Close();
312
+
313
+ private:
314
+ class Impl;
315
+ std::unique_ptr<Impl> impl_;
316
+
317
+ private:
318
+ ORCFileWriter();
319
+ };
320
+
321
+ } // namespace orc
322
+ } // namespace adapters
323
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/options.h ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <vector>
21
+
22
+ #include "arrow/io/interfaces.h"
23
+ #include "arrow/status.h"
24
+ #include "arrow/util/type_fwd.h"
25
+ #include "arrow/util/visibility.h"
26
+
27
+ namespace arrow {
28
+
29
+ namespace adapters {
30
+
31
+ namespace orc {
32
+
33
+ enum class WriterId : int32_t {
34
+ kOrcJava = 0,
35
+ kOrcCpp = 1,
36
+ kPresto = 2,
37
+ kScritchleyGo = 3,
38
+ kTrino = 4,
39
+ kUnknown = INT32_MAX
40
+ };
41
+
42
+ enum class WriterVersion : int32_t {
43
+ kOriginal = 0,
44
+ kHive8732 = 1,
45
+ kHive4243 = 2,
46
+ kHive12055 = 3,
47
+ kHive13083 = 4,
48
+ kOrc101 = 5,
49
+ kOrc135 = 6,
50
+ kOrc517 = 7,
51
+ kOrc203 = 8,
52
+ kOrc14 = 9,
53
+ kMax = INT32_MAX
54
+ };
55
+
56
+ enum class CompressionStrategy : int32_t { kSpeed = 0, kCompression };
57
+
58
+ class ARROW_EXPORT FileVersion {
59
+ private:
60
+ int32_t major_version_;
61
+ int32_t minor_version_;
62
+
63
+ public:
64
+ static const FileVersion& v_0_11();
65
+ static const FileVersion& v_0_12();
66
+
67
+ FileVersion(int32_t major, int32_t minor)
68
+ : major_version_(major), minor_version_(minor) {}
69
+
70
+ /**
71
+ * Get major version
72
+ */
73
+ int32_t major_version() const { return this->major_version_; }
74
+
75
+ /**
76
+ * Get minor version
77
+ */
78
+ int32_t minor_version() const { return this->minor_version_; }
79
+
80
+ bool operator==(const FileVersion& right) const {
81
+ return this->major_version() == right.major_version() &&
82
+ this->minor_version() == right.minor_version();
83
+ }
84
+
85
+ bool operator!=(const FileVersion& right) const { return !(*this == right); }
86
+
87
+ std::string ToString() const;
88
+ };
89
+
90
+ /// Options for the ORC Writer
91
+ struct ARROW_EXPORT WriteOptions {
92
+ /// Number of rows the ORC writer writes at a time, default 1024
93
+ int64_t batch_size = 1024;
94
+ /// Which ORC file version to use, default FileVersion(0, 12)
95
+ FileVersion file_version = FileVersion(0, 12);
96
+ /// Size of each ORC stripe in bytes, default 64 MiB
97
+ int64_t stripe_size = 64 * 1024 * 1024;
98
+ /// The compression codec of the ORC file, there is no compression by default
99
+ Compression::type compression = Compression::UNCOMPRESSED;
100
+ /// The size of each compression block in bytes, default 64 KiB
101
+ int64_t compression_block_size = 64 * 1024;
102
+ /// The compression strategy i.e. speed vs size reduction, default
103
+ /// CompressionStrategy::kSpeed
104
+ CompressionStrategy compression_strategy = CompressionStrategy::kSpeed;
105
+ /// The number of rows per an entry in the row index, default 10000
106
+ int64_t row_index_stride = 10000;
107
+ /// The padding tolerance, default 0.0
108
+ double padding_tolerance = 0.0;
109
+ /// The dictionary key size threshold. 0 to disable dictionary encoding.
110
+ /// 1 to always enable dictionary encoding, default 0.0
111
+ double dictionary_key_size_threshold = 0.0;
112
+ /// The array of columns that use the bloom filter, default empty
113
+ std::vector<int64_t> bloom_filter_columns;
114
+ /// The upper limit of the false-positive rate of the bloom filter, default 0.05
115
+ double bloom_filter_fpp = 0.05;
116
+ };
117
+
118
+ } // namespace orc
119
+ } // namespace adapters
120
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/tensorflow/convert.h ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+
22
+ #include "tensorflow/core/framework/op.h"
23
+
24
+ #include "arrow/type.h"
25
+
26
+ // These utilities are supposed to be included in TensorFlow operators
27
+ // that need to be compiled separately from Arrow because of ABI issues.
28
+ // They therefore need to be header-only.
29
+
30
+ namespace arrow {
31
+
32
+ namespace adapters {
33
+
34
+ namespace tensorflow {
35
+
36
+ Status GetArrowType(::tensorflow::DataType dtype, std::shared_ptr<DataType>* out) {
37
+ switch (dtype) {
38
+ case ::tensorflow::DT_BOOL:
39
+ *out = arrow::boolean();
40
+ break;
41
+ case ::tensorflow::DT_FLOAT:
42
+ *out = arrow::float32();
43
+ break;
44
+ case ::tensorflow::DT_DOUBLE:
45
+ *out = arrow::float64();
46
+ break;
47
+ case ::tensorflow::DT_HALF:
48
+ *out = arrow::float16();
49
+ break;
50
+ case ::tensorflow::DT_INT8:
51
+ *out = arrow::int8();
52
+ break;
53
+ case ::tensorflow::DT_INT16:
54
+ *out = arrow::int16();
55
+ break;
56
+ case ::tensorflow::DT_INT32:
57
+ *out = arrow::int32();
58
+ break;
59
+ case ::tensorflow::DT_INT64:
60
+ *out = arrow::int64();
61
+ break;
62
+ case ::tensorflow::DT_UINT8:
63
+ *out = arrow::uint8();
64
+ break;
65
+ case ::tensorflow::DT_UINT16:
66
+ *out = arrow::uint16();
67
+ break;
68
+ case ::tensorflow::DT_UINT32:
69
+ *out = arrow::uint32();
70
+ break;
71
+ case ::tensorflow::DT_UINT64:
72
+ *out = arrow::uint64();
73
+ break;
74
+ default:
75
+ return Status::TypeError("TensorFlow data type is not supported");
76
+ }
77
+ return Status::OK();
78
+ }
79
+
80
+ Status GetTensorFlowType(std::shared_ptr<DataType> dtype, ::tensorflow::DataType* out) {
81
+ switch (dtype->id()) {
82
+ case Type::BOOL:
83
+ *out = ::tensorflow::DT_BOOL;
84
+ break;
85
+ case Type::UINT8:
86
+ *out = ::tensorflow::DT_UINT8;
87
+ break;
88
+ case Type::INT8:
89
+ *out = ::tensorflow::DT_INT8;
90
+ break;
91
+ case Type::UINT16:
92
+ *out = ::tensorflow::DT_UINT16;
93
+ break;
94
+ case Type::INT16:
95
+ *out = ::tensorflow::DT_INT16;
96
+ break;
97
+ case Type::UINT32:
98
+ *out = ::tensorflow::DT_UINT32;
99
+ break;
100
+ case Type::INT32:
101
+ *out = ::tensorflow::DT_INT32;
102
+ break;
103
+ case Type::UINT64:
104
+ *out = ::tensorflow::DT_UINT64;
105
+ break;
106
+ case Type::INT64:
107
+ *out = ::tensorflow::DT_INT64;
108
+ break;
109
+ case Type::HALF_FLOAT:
110
+ *out = ::tensorflow::DT_HALF;
111
+ break;
112
+ case Type::FLOAT:
113
+ *out = ::tensorflow::DT_FLOAT;
114
+ break;
115
+ case Type::DOUBLE:
116
+ *out = ::tensorflow::DT_DOUBLE;
117
+ break;
118
+ default:
119
+ return Status::TypeError("Arrow data type is not supported");
120
+ }
121
+ return arrow::Status::OK();
122
+ }
123
+
124
+ } // namespace tensorflow
125
+
126
+ } // namespace adapters
127
+
128
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_adaptive.h ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <cstring>
22
+ #include <memory>
23
+ #include <type_traits>
24
+
25
+ #include "arrow/array/builder_base.h"
26
+ #include "arrow/buffer.h"
27
+ #include "arrow/status.h"
28
+ #include "arrow/type.h"
29
+ #include "arrow/util/macros.h"
30
+ #include "arrow/util/visibility.h"
31
+
32
+ namespace arrow {
33
+
34
+ /// \addtogroup numeric-builders
35
+ ///
36
+ /// @{
37
+
38
+ namespace internal {
39
+
40
+ class ARROW_EXPORT AdaptiveIntBuilderBase : public ArrayBuilder {
41
+ public:
42
+ AdaptiveIntBuilderBase(uint8_t start_int_size, MemoryPool* pool,
43
+ int64_t alignment = kDefaultBufferAlignment);
44
+
45
+ explicit AdaptiveIntBuilderBase(MemoryPool* pool,
46
+ int64_t alignment = kDefaultBufferAlignment)
47
+ : AdaptiveIntBuilderBase(sizeof(uint8_t), pool, alignment) {}
48
+
49
+ /// \brief Append multiple nulls
50
+ /// \param[in] length the number of nulls to append
51
+ Status AppendNulls(int64_t length) final {
52
+ ARROW_RETURN_NOT_OK(CommitPendingData());
53
+ if (ARROW_PREDICT_TRUE(length > 0)) {
54
+ ARROW_RETURN_NOT_OK(Reserve(length));
55
+ memset(data_->mutable_data() + length_ * int_size_, 0, int_size_ * length);
56
+ UnsafeSetNull(length);
57
+ }
58
+ return Status::OK();
59
+ }
60
+
61
+ Status AppendNull() final {
62
+ pending_data_[pending_pos_] = 0;
63
+ pending_valid_[pending_pos_] = 0;
64
+ pending_has_nulls_ = true;
65
+ ++pending_pos_;
66
+ ++length_;
67
+ ++null_count_;
68
+
69
+ if (ARROW_PREDICT_FALSE(pending_pos_ >= pending_size_)) {
70
+ return CommitPendingData();
71
+ }
72
+ return Status::OK();
73
+ }
74
+
75
+ Status AppendEmptyValues(int64_t length) final {
76
+ ARROW_RETURN_NOT_OK(CommitPendingData());
77
+ if (ARROW_PREDICT_TRUE(length > 0)) {
78
+ ARROW_RETURN_NOT_OK(Reserve(length));
79
+ memset(data_->mutable_data() + length_ * int_size_, 0, int_size_ * length);
80
+ UnsafeSetNotNull(length);
81
+ }
82
+ return Status::OK();
83
+ }
84
+
85
+ Status AppendEmptyValue() final {
86
+ pending_data_[pending_pos_] = 0;
87
+ pending_valid_[pending_pos_] = 1;
88
+ ++pending_pos_;
89
+ ++length_;
90
+
91
+ if (ARROW_PREDICT_FALSE(pending_pos_ >= pending_size_)) {
92
+ return CommitPendingData();
93
+ }
94
+ return Status::OK();
95
+ }
96
+
97
+ void Reset() override;
98
+ Status Resize(int64_t capacity) override;
99
+
100
+ protected:
101
+ Status AppendInternal(const uint64_t val) {
102
+ pending_data_[pending_pos_] = val;
103
+ pending_valid_[pending_pos_] = 1;
104
+ ++pending_pos_;
105
+ ++length_;
106
+
107
+ if (ARROW_PREDICT_FALSE(pending_pos_ >= pending_size_)) {
108
+ return CommitPendingData();
109
+ }
110
+ return Status::OK();
111
+ }
112
+
113
+ virtual Status CommitPendingData() = 0;
114
+
115
+ template <typename new_type, typename old_type>
116
+ typename std::enable_if<sizeof(old_type) >= sizeof(new_type), Status>::type
117
+ ExpandIntSizeInternal();
118
+ template <typename new_type, typename old_type>
119
+ typename std::enable_if<(sizeof(old_type) < sizeof(new_type)), Status>::type
120
+ ExpandIntSizeInternal();
121
+
122
+ std::shared_ptr<ResizableBuffer> data_;
123
+ uint8_t* raw_data_ = NULLPTR;
124
+
125
+ const uint8_t start_int_size_;
126
+ uint8_t int_size_;
127
+
128
+ static constexpr int32_t pending_size_ = 1024;
129
+ uint8_t pending_valid_[pending_size_];
130
+ uint64_t pending_data_[pending_size_];
131
+ int32_t pending_pos_ = 0;
132
+ bool pending_has_nulls_ = false;
133
+ };
134
+
135
+ } // namespace internal
136
+
137
+ class ARROW_EXPORT AdaptiveUIntBuilder : public internal::AdaptiveIntBuilderBase {
138
+ public:
139
+ explicit AdaptiveUIntBuilder(uint8_t start_int_size,
140
+ MemoryPool* pool = default_memory_pool());
141
+
142
+ explicit AdaptiveUIntBuilder(MemoryPool* pool = default_memory_pool())
143
+ : AdaptiveUIntBuilder(sizeof(uint8_t), pool) {}
144
+
145
+ using internal::AdaptiveIntBuilderBase::Reset;
146
+
147
+ /// Scalar append
148
+ Status Append(const uint64_t val) { return AppendInternal(val); }
149
+
150
+ /// \brief Append a sequence of elements in one shot
151
+ /// \param[in] values a contiguous C array of values
152
+ /// \param[in] length the number of values to append
153
+ /// \param[in] valid_bytes an optional sequence of bytes where non-zero
154
+ /// indicates a valid (non-null) value
155
+ /// \return Status
156
+ Status AppendValues(const uint64_t* values, int64_t length,
157
+ const uint8_t* valid_bytes = NULLPTR);
158
+
159
+ Status FinishInternal(std::shared_ptr<ArrayData>* out) override;
160
+
161
+ std::shared_ptr<DataType> type() const override;
162
+
163
+ protected:
164
+ Status CommitPendingData() override;
165
+ Status ExpandIntSize(uint8_t new_int_size);
166
+
167
+ Status AppendValuesInternal(const uint64_t* values, int64_t length,
168
+ const uint8_t* valid_bytes);
169
+
170
+ template <typename new_type>
171
+ Status ExpandIntSizeN();
172
+ };
173
+
174
+ class ARROW_EXPORT AdaptiveIntBuilder : public internal::AdaptiveIntBuilderBase {
175
+ public:
176
+ explicit AdaptiveIntBuilder(uint8_t start_int_size,
177
+ MemoryPool* pool = default_memory_pool(),
178
+ int64_t alignment = kDefaultBufferAlignment);
179
+
180
+ explicit AdaptiveIntBuilder(MemoryPool* pool = default_memory_pool(),
181
+ int64_t alignment = kDefaultBufferAlignment)
182
+ : AdaptiveIntBuilder(sizeof(uint8_t), pool, alignment) {}
183
+
184
+ using internal::AdaptiveIntBuilderBase::Reset;
185
+
186
+ /// Scalar append
187
+ Status Append(const int64_t val) { return AppendInternal(static_cast<uint64_t>(val)); }
188
+
189
+ /// \brief Append a sequence of elements in one shot
190
+ /// \param[in] values a contiguous C array of values
191
+ /// \param[in] length the number of values to append
192
+ /// \param[in] valid_bytes an optional sequence of bytes where non-zero
193
+ /// indicates a valid (non-null) value
194
+ /// \return Status
195
+ Status AppendValues(const int64_t* values, int64_t length,
196
+ const uint8_t* valid_bytes = NULLPTR);
197
+
198
+ Status FinishInternal(std::shared_ptr<ArrayData>* out) override;
199
+
200
+ std::shared_ptr<DataType> type() const override;
201
+
202
+ protected:
203
+ Status CommitPendingData() override;
204
+ Status ExpandIntSize(uint8_t new_int_size);
205
+
206
+ Status AppendValuesInternal(const int64_t* values, int64_t length,
207
+ const uint8_t* valid_bytes);
208
+
209
+ template <typename new_type>
210
+ Status ExpandIntSizeN();
211
+ };
212
+
213
+ /// @}
214
+
215
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/array/builder_nested.h ADDED
@@ -0,0 +1,838 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <limits>
22
+ #include <memory>
23
+ #include <utility>
24
+ #include <vector>
25
+
26
+ #include "arrow/array/array_nested.h"
27
+ #include "arrow/array/builder_base.h"
28
+ #include "arrow/array/data.h"
29
+ #include "arrow/buffer.h"
30
+ #include "arrow/buffer_builder.h"
31
+ #include "arrow/status.h"
32
+ #include "arrow/type.h"
33
+ #include "arrow/util/macros.h"
34
+ #include "arrow/util/visibility.h"
35
+
36
+ namespace arrow {
37
+
38
+ /// \addtogroup nested-builders
39
+ ///
40
+ /// @{
41
+
42
+ // ----------------------------------------------------------------------
43
+ // VarLengthListLikeBuilder
44
+
45
+ template <typename TYPE>
46
+ class ARROW_EXPORT VarLengthListLikeBuilder : public ArrayBuilder {
47
+ public:
48
+ using TypeClass = TYPE;
49
+ using offset_type = typename TypeClass::offset_type;
50
+
51
+ /// Use this constructor to incrementally build the value array along with offsets and
52
+ /// null bitmap.
53
+ VarLengthListLikeBuilder(MemoryPool* pool,
54
+ std::shared_ptr<ArrayBuilder> const& value_builder,
55
+ const std::shared_ptr<DataType>& type,
56
+ int64_t alignment = kDefaultBufferAlignment)
57
+ : ArrayBuilder(pool, alignment),
58
+ offsets_builder_(pool, alignment),
59
+ value_builder_(value_builder),
60
+ value_field_(type->field(0)->WithType(NULLPTR)) {}
61
+
62
+ VarLengthListLikeBuilder(MemoryPool* pool,
63
+ std::shared_ptr<ArrayBuilder> const& value_builder,
64
+ int64_t alignment = kDefaultBufferAlignment)
65
+ : VarLengthListLikeBuilder(pool, value_builder,
66
+ std::make_shared<TYPE>(value_builder->type()),
67
+ alignment) {}
68
+
69
+ ~VarLengthListLikeBuilder() override = default;
70
+
71
+ Status Resize(int64_t capacity) override {
72
+ if (ARROW_PREDICT_FALSE(capacity > maximum_elements())) {
73
+ return Status::CapacityError(type_name(),
74
+ " array cannot reserve space for more than ",
75
+ maximum_elements(), " got ", capacity);
76
+ }
77
+ ARROW_RETURN_NOT_OK(CheckCapacity(capacity));
78
+
79
+ // One more than requested for list offsets
80
+ const int64_t offsets_capacity =
81
+ is_list_view(TYPE::type_id) ? capacity : capacity + 1;
82
+ ARROW_RETURN_NOT_OK(offsets_builder_.Resize(offsets_capacity));
83
+ return ArrayBuilder::Resize(capacity);
84
+ }
85
+
86
+ void Reset() override {
87
+ ArrayBuilder::Reset();
88
+ offsets_builder_.Reset();
89
+ value_builder_->Reset();
90
+ }
91
+
92
+ /// \brief Start a new variable-length list slot
93
+ ///
94
+ /// This function should be called before appending elements to the
95
+ /// value builder. Elements appended to the value builder before this function
96
+ /// is called for the first time, will not be members of any list value.
97
+ ///
98
+ /// After this function is called, list_length elements SHOULD be appended to
99
+ /// the values builder. If this contract is violated, the behavior is defined by
100
+ /// the concrete builder implementation and SHOULD NOT be relied upon unless
101
+ /// the caller is specifically building a [Large]List or [Large]ListView array.
102
+ ///
103
+ /// For [Large]List arrays, the list slot length will be the number of elements
104
+ /// appended to the values builder before the next call to Append* or Finish. For
105
+ /// [Large]ListView arrays, the list slot length will be exactly list_length, but if
106
+ /// Append* is called before at least list_length elements are appended to the values
107
+ /// builder, the current list slot will share elements with the next list
108
+ /// slots or an invalid [Large]ListView array will be generated because there
109
+ /// aren't enough elements in the values builder to fill the list slots.
110
+ ///
111
+ /// If you're building a [Large]List and don't need to be compatible
112
+ /// with [Large]ListView, then `BaseListBuilder::Append(bool is_valid)`
113
+ /// is a simpler API.
114
+ ///
115
+ /// \pre if is_valid is false, list_length MUST be 0
116
+ /// \param is_valid Whether the new list slot is valid
117
+ /// \param list_length The number of elements in the list
118
+ Status Append(bool is_valid, int64_t list_length) {
119
+ ARROW_RETURN_NOT_OK(Reserve(1));
120
+ assert(is_valid || list_length == 0);
121
+ UnsafeAppendToBitmap(is_valid);
122
+ UnsafeAppendDimensions(/*offset=*/value_builder_->length(), /*size=*/list_length);
123
+ return Status::OK();
124
+ }
125
+
126
+ Status AppendNull() final {
127
+ // Append() a null list slot with list_length=0.
128
+ //
129
+ // When building [Large]List arrays, elements being appended to the values builder
130
+ // before the next call to Append* or Finish will extend the list slot length, but
131
+ // that is totally fine because list arrays admit non-empty null list slots.
132
+ //
133
+ // In the case of [Large]ListViews that's not a problem either because the
134
+ // list slot length remains zero.
135
+ return Append(false, 0);
136
+ }
137
+
138
+ Status AppendNulls(int64_t length) final {
139
+ ARROW_RETURN_NOT_OK(Reserve(length));
140
+ UnsafeAppendToBitmap(length, false);
141
+ UnsafeAppendEmptyDimensions(/*num_values=*/length);
142
+ return Status::OK();
143
+ }
144
+
145
+ /// \brief Append an empty list slot
146
+ ///
147
+ /// \post Another call to Append* or Finish should be made before appending to
148
+ /// the values builder to ensure list slot remains empty
149
+ Status AppendEmptyValue() final { return Append(true, 0); }
150
+
151
+ /// \brief Append an empty list slot
152
+ ///
153
+ /// \post Another call to Append* or Finish should be made before appending to
154
+ /// the values builder to ensure the last list slot remains empty
155
+ Status AppendEmptyValues(int64_t length) final {
156
+ ARROW_RETURN_NOT_OK(Reserve(length));
157
+ UnsafeAppendToBitmap(length, true);
158
+ UnsafeAppendEmptyDimensions(/*num_values=*/length);
159
+ return Status::OK();
160
+ }
161
+
162
+ /// \brief Vector append
163
+ ///
164
+ /// For list-array builders, the sizes are inferred from the offsets.
165
+ /// BaseListBuilder<T> provides an implementation that doesn't take sizes, but
166
+ /// this virtual function allows dispatching calls to both list-array and
167
+ /// list-view-array builders (which need the sizes)
168
+ ///
169
+ /// \param offsets The offsets of the variable-length lists
170
+ /// \param sizes The sizes of the variable-length lists
171
+ /// \param length The number of offsets, sizes, and validity bits to append
172
+ /// \param valid_bytes If passed, valid_bytes is of equal length to values,
173
+ /// and any zero byte will be considered as a null for that slot
174
+ virtual Status AppendValues(const offset_type* offsets, const offset_type* sizes,
175
+ int64_t length, const uint8_t* valid_bytes) = 0;
176
+
177
+ Status AppendArraySlice(const ArraySpan& array, int64_t offset,
178
+ int64_t length) override {
179
+ const offset_type* offsets = array.GetValues<offset_type>(1);
180
+ [[maybe_unused]] const offset_type* sizes = NULLPTR;
181
+ if constexpr (is_list_view(TYPE::type_id)) {
182
+ sizes = array.GetValues<offset_type>(2);
183
+ }
184
+ const bool all_valid = !array.MayHaveLogicalNulls();
185
+ const uint8_t* validity = array.HasValidityBitmap() ? array.buffers[0].data : NULLPTR;
186
+ ARROW_RETURN_NOT_OK(Reserve(length));
187
+ for (int64_t row = offset; row < offset + length; row++) {
188
+ const bool is_valid =
189
+ all_valid || (validity && bit_util::GetBit(validity, array.offset + row)) ||
190
+ array.IsValid(row);
191
+ int64_t size = 0;
192
+ if (is_valid) {
193
+ if constexpr (is_list_view(TYPE::type_id)) {
194
+ size = sizes[row];
195
+ } else {
196
+ size = offsets[row + 1] - offsets[row];
197
+ }
198
+ }
199
+ UnsafeAppendToBitmap(is_valid);
200
+ UnsafeAppendDimensions(/*offset=*/value_builder_->length(), size);
201
+ if (is_valid) {
202
+ ARROW_RETURN_NOT_OK(
203
+ value_builder_->AppendArraySlice(array.child_data[0], offsets[row], size));
204
+ }
205
+ }
206
+ return Status::OK();
207
+ }
208
+
209
+ Status ValidateOverflow(int64_t new_elements) const {
210
+ auto new_length = value_builder_->length() + new_elements;
211
+ if (ARROW_PREDICT_FALSE(new_length > maximum_elements())) {
212
+ return Status::CapacityError(type_name(), " array cannot contain more than ",
213
+ maximum_elements(), " elements, have ", new_elements);
214
+ } else {
215
+ return Status::OK();
216
+ }
217
+ }
218
+
219
+ ArrayBuilder* value_builder() const { return value_builder_.get(); }
220
+
221
+ // Cannot make this a static attribute because of linking issues
222
+ static constexpr int64_t maximum_elements() {
223
+ return std::numeric_limits<offset_type>::max() - 1;
224
+ }
225
+
226
+ std::shared_ptr<DataType> type() const override {
227
+ return std::make_shared<TYPE>(value_field_->WithType(value_builder_->type()));
228
+ }
229
+
230
+ private:
231
+ static constexpr const char* type_name() {
232
+ if constexpr (is_list_view(TYPE::type_id)) {
233
+ return "ListView";
234
+ } else {
235
+ return "List";
236
+ }
237
+ }
238
+
239
+ protected:
240
+ /// \brief Append dimensions for num_values empty list slots.
241
+ ///
242
+ /// ListViewBuilder overrides this to also append the sizes.
243
+ virtual void UnsafeAppendEmptyDimensions(int64_t num_values) {
244
+ const int64_t offset = value_builder_->length();
245
+ for (int64_t i = 0; i < num_values; ++i) {
246
+ offsets_builder_.UnsafeAppend(static_cast<offset_type>(offset));
247
+ }
248
+ }
249
+
250
+ /// \brief Append dimensions for a single list slot.
251
+ ///
252
+ /// ListViewBuilder overrides this to also append the size.
253
+ virtual void UnsafeAppendDimensions(int64_t offset, int64_t size) {
254
+ offsets_builder_.UnsafeAppend(static_cast<offset_type>(offset));
255
+ }
256
+
257
+ TypedBufferBuilder<offset_type> offsets_builder_;
258
+ std::shared_ptr<ArrayBuilder> value_builder_;
259
+ std::shared_ptr<Field> value_field_;
260
+ };
261
+
262
+ // ----------------------------------------------------------------------
263
+ // ListBuilder / LargeListBuilder
264
+
265
+ template <typename TYPE>
266
+ class ARROW_EXPORT BaseListBuilder : public VarLengthListLikeBuilder<TYPE> {
267
+ private:
268
+ using BASE = VarLengthListLikeBuilder<TYPE>;
269
+
270
+ public:
271
+ using TypeClass = TYPE;
272
+ using offset_type = typename BASE::offset_type;
273
+
274
+ using BASE::BASE;
275
+
276
+ using BASE::Append;
277
+
278
+ ~BaseListBuilder() override = default;
279
+
280
+ /// \brief Start a new variable-length list slot
281
+ ///
282
+ /// This function should be called before beginning to append elements to the
283
+ /// value builder
284
+ Status Append(bool is_valid = true) {
285
+ // The value_length parameter to BASE::Append(bool, int64_t) is ignored when
286
+ // building a list array, so we can pass 0 here.
287
+ return BASE::Append(is_valid, 0);
288
+ }
289
+
290
+ /// \brief Vector append
291
+ ///
292
+ /// If passed, valid_bytes is of equal length to values, and any zero byte
293
+ /// will be considered as a null for that slot
294
+ Status AppendValues(const offset_type* offsets, int64_t length,
295
+ const uint8_t* valid_bytes = NULLPTR) {
296
+ ARROW_RETURN_NOT_OK(this->Reserve(length));
297
+ this->UnsafeAppendToBitmap(valid_bytes, length);
298
+ this->offsets_builder_.UnsafeAppend(offsets, length);
299
+ return Status::OK();
300
+ }
301
+
302
+ Status AppendValues(const offset_type* offsets, const offset_type* sizes,
303
+ int64_t length, const uint8_t* valid_bytes) final {
304
+ // Offsets are assumed to be valid, but the first length-1 sizes have to be
305
+ // consistent with the offsets to partially rule out the possibility that the
306
+ // caller is passing sizes that could work if building a list-view, but don't
307
+ // work on building a list that requires offsets to be non-decreasing.
308
+ //
309
+ // CAUTION: the last size element (`sizes[length - 1]`) is not
310
+ // validated and could be inconsistent with the offsets given in a
311
+ // subsequent call to AppendValues.
312
+ #ifndef NDEBUG
313
+ if (sizes) {
314
+ for (int64_t i = 0; i < length - 1; ++i) {
315
+ if (ARROW_PREDICT_FALSE(offsets[i] != offsets[i + 1] - sizes[i])) {
316
+ if (!valid_bytes || valid_bytes[i]) {
317
+ return Status::Invalid(
318
+ "BaseListBuilder: sizes are inconsistent with offsets provided");
319
+ }
320
+ }
321
+ }
322
+ }
323
+ #endif
324
+ return AppendValues(offsets, length, valid_bytes);
325
+ }
326
+
327
+ Status AppendValues(const offset_type* offsets, const offset_type* sizes,
328
+ int64_t length) {
329
+ return AppendValues(offsets, sizes, length, /*valid_bytes=*/NULLPTR);
330
+ }
331
+
332
+ Status AppendNextOffset() {
333
+ ARROW_RETURN_NOT_OK(this->ValidateOverflow(0));
334
+ const int64_t num_values = this->value_builder_->length();
335
+ return this->offsets_builder_.Append(static_cast<offset_type>(num_values));
336
+ }
337
+
338
+ Status FinishInternal(std::shared_ptr<ArrayData>* out) override {
339
+ ARROW_RETURN_NOT_OK(AppendNextOffset());
340
+
341
+ // Offset padding zeroed by BufferBuilder
342
+ std::shared_ptr<Buffer> offsets;
343
+ std::shared_ptr<Buffer> null_bitmap;
344
+ ARROW_RETURN_NOT_OK(this->offsets_builder_.Finish(&offsets));
345
+ ARROW_RETURN_NOT_OK(this->null_bitmap_builder_.Finish(&null_bitmap));
346
+
347
+ if (this->value_builder_->length() == 0) {
348
+ // Try to make sure we get a non-null values buffer (ARROW-2744)
349
+ ARROW_RETURN_NOT_OK(this->value_builder_->Resize(0));
350
+ }
351
+
352
+ std::shared_ptr<ArrayData> items;
353
+ ARROW_RETURN_NOT_OK(this->value_builder_->FinishInternal(&items));
354
+
355
+ *out = ArrayData::Make(this->type(), this->length_,
356
+ {std::move(null_bitmap), std::move(offsets)},
357
+ {std::move(items)}, this->null_count_);
358
+ this->Reset();
359
+ return Status::OK();
360
+ }
361
+ };
362
+
363
+ /// \class ListBuilder
364
+ /// \brief Builder class for variable-length list array value types
365
+ ///
366
+ /// To use this class, you must append values to the child array builder and use
367
+ /// the Append function to delimit each distinct list value (once the values
368
+ /// have been appended to the child array) or use the bulk API to append
369
+ /// a sequence of offsets and null values.
370
+ ///
371
+ /// A note on types. Per arrow/type.h all types in the c++ implementation are
372
+ /// logical so even though this class always builds list array, this can
373
+ /// represent multiple different logical types. If no logical type is provided
374
+ /// at construction time, the class defaults to List<T> where t is taken from the
375
+ /// value_builder/values that the object is constructed with.
376
+ class ARROW_EXPORT ListBuilder : public BaseListBuilder<ListType> {
377
+ public:
378
+ using BaseListBuilder::BaseListBuilder;
379
+
380
+ /// \cond FALSE
381
+ using ArrayBuilder::Finish;
382
+ /// \endcond
383
+
384
+ Status Finish(std::shared_ptr<ListArray>* out) { return FinishTyped(out); }
385
+ };
386
+
387
+ /// \class LargeListBuilder
388
+ /// \brief Builder class for large variable-length list array value types
389
+ ///
390
+ /// Like ListBuilder, but to create large list arrays (with 64-bit offsets).
391
+ class ARROW_EXPORT LargeListBuilder : public BaseListBuilder<LargeListType> {
392
+ public:
393
+ using BaseListBuilder::BaseListBuilder;
394
+
395
+ /// \cond FALSE
396
+ using ArrayBuilder::Finish;
397
+ /// \endcond
398
+
399
+ Status Finish(std::shared_ptr<LargeListArray>* out) { return FinishTyped(out); }
400
+ };
401
+
402
+ // ----------------------------------------------------------------------
403
+ // ListViewBuilder / LargeListViewBuilder
404
+
405
+ template <typename TYPE>
406
+ class ARROW_EXPORT BaseListViewBuilder : public VarLengthListLikeBuilder<TYPE> {
407
+ private:
408
+ using BASE = VarLengthListLikeBuilder<TYPE>;
409
+
410
+ public:
411
+ using TypeClass = TYPE;
412
+ using offset_type = typename BASE::offset_type;
413
+
414
+ using BASE::BASE;
415
+
416
+ ~BaseListViewBuilder() override = default;
417
+
418
+ Status Resize(int64_t capacity) override {
419
+ ARROW_RETURN_NOT_OK(BASE::Resize(capacity));
420
+ return sizes_builder_.Resize(capacity);
421
+ }
422
+
423
+ void Reset() override {
424
+ BASE::Reset();
425
+ sizes_builder_.Reset();
426
+ }
427
+
428
+ /// \brief Vector append
429
+ ///
430
+ /// If passed, valid_bytes is of equal length to values, and any zero byte
431
+ /// will be considered as a null for that slot
432
+ Status AppendValues(const offset_type* offsets, const offset_type* sizes,
433
+ int64_t length, const uint8_t* valid_bytes) final {
434
+ ARROW_RETURN_NOT_OK(this->Reserve(length));
435
+ this->UnsafeAppendToBitmap(valid_bytes, length);
436
+ this->offsets_builder_.UnsafeAppend(offsets, length);
437
+ this->sizes_builder_.UnsafeAppend(sizes, length);
438
+ return Status::OK();
439
+ }
440
+
441
+ Status AppendValues(const offset_type* offsets, const offset_type* sizes,
442
+ int64_t length) {
443
+ return AppendValues(offsets, sizes, length, /*valid_bytes=*/NULLPTR);
444
+ }
445
+
446
+ Status FinishInternal(std::shared_ptr<ArrayData>* out) override {
447
+ // Offset and sizes padding zeroed by BufferBuilder
448
+ std::shared_ptr<Buffer> null_bitmap;
449
+ std::shared_ptr<Buffer> offsets;
450
+ std::shared_ptr<Buffer> sizes;
451
+ ARROW_RETURN_NOT_OK(this->null_bitmap_builder_.Finish(&null_bitmap));
452
+ ARROW_RETURN_NOT_OK(this->offsets_builder_.Finish(&offsets));
453
+ ARROW_RETURN_NOT_OK(this->sizes_builder_.Finish(&sizes));
454
+
455
+ if (this->value_builder_->length() == 0) {
456
+ // Try to make sure we get a non-null values buffer (ARROW-2744)
457
+ ARROW_RETURN_NOT_OK(this->value_builder_->Resize(0));
458
+ }
459
+
460
+ std::shared_ptr<ArrayData> items;
461
+ ARROW_RETURN_NOT_OK(this->value_builder_->FinishInternal(&items));
462
+
463
+ *out = ArrayData::Make(this->type(), this->length_,
464
+ {std::move(null_bitmap), std::move(offsets), std::move(sizes)},
465
+ {std::move(items)}, this->null_count_);
466
+ this->Reset();
467
+ return Status::OK();
468
+ }
469
+
470
+ protected:
471
+ void UnsafeAppendEmptyDimensions(int64_t num_values) override {
472
+ for (int64_t i = 0; i < num_values; ++i) {
473
+ this->offsets_builder_.UnsafeAppend(0);
474
+ }
475
+ for (int64_t i = 0; i < num_values; ++i) {
476
+ this->sizes_builder_.UnsafeAppend(0);
477
+ }
478
+ }
479
+
480
+ void UnsafeAppendDimensions(int64_t offset, int64_t size) override {
481
+ this->offsets_builder_.UnsafeAppend(static_cast<offset_type>(offset));
482
+ this->sizes_builder_.UnsafeAppend(static_cast<offset_type>(size));
483
+ }
484
+
485
+ private:
486
+ TypedBufferBuilder<offset_type> sizes_builder_;
487
+ };
488
+
489
+ class ARROW_EXPORT ListViewBuilder final : public BaseListViewBuilder<ListViewType> {
490
+ public:
491
+ using BaseListViewBuilder::BaseListViewBuilder;
492
+
493
+ /// \cond FALSE
494
+ using ArrayBuilder::Finish;
495
+ /// \endcond
496
+
497
+ Status Finish(std::shared_ptr<ListViewArray>* out) { return FinishTyped(out); }
498
+ };
499
+
500
+ class ARROW_EXPORT LargeListViewBuilder final
501
+ : public BaseListViewBuilder<LargeListViewType> {
502
+ public:
503
+ using BaseListViewBuilder::BaseListViewBuilder;
504
+
505
+ /// \cond FALSE
506
+ using ArrayBuilder::Finish;
507
+ /// \endcond
508
+
509
+ Status Finish(std::shared_ptr<LargeListViewArray>* out) { return FinishTyped(out); }
510
+ };
511
+
512
+ // ----------------------------------------------------------------------
513
+ // Map builder
514
+
515
+ /// \class MapBuilder
516
+ /// \brief Builder class for arrays of variable-size maps
517
+ ///
518
+ /// To use this class, you must use the Append function to delimit each distinct
519
+ /// map before appending values to the key and item array builders, or use the
520
+ /// bulk API to append a sequence of offsets and null maps.
521
+ ///
522
+ /// Key uniqueness and ordering are not validated.
523
+ class ARROW_EXPORT MapBuilder : public ArrayBuilder {
524
+ public:
525
+ /// Use this constructor to define the built array's type explicitly. If key_builder
526
+ /// or item_builder has indeterminate type, this builder will also.
527
+ MapBuilder(MemoryPool* pool, const std::shared_ptr<ArrayBuilder>& key_builder,
528
+ const std::shared_ptr<ArrayBuilder>& item_builder,
529
+ const std::shared_ptr<DataType>& type);
530
+
531
+ /// Use this constructor to infer the built array's type. If key_builder or
532
+ /// item_builder has indeterminate type, this builder will also.
533
+ MapBuilder(MemoryPool* pool, const std::shared_ptr<ArrayBuilder>& key_builder,
534
+ const std::shared_ptr<ArrayBuilder>& item_builder, bool keys_sorted = false);
535
+
536
+ MapBuilder(MemoryPool* pool, const std::shared_ptr<ArrayBuilder>& item_builder,
537
+ const std::shared_ptr<DataType>& type);
538
+
539
+ Status Resize(int64_t capacity) override;
540
+ void Reset() override;
541
+ Status FinishInternal(std::shared_ptr<ArrayData>* out) override;
542
+
543
+ /// \cond FALSE
544
+ using ArrayBuilder::Finish;
545
+ /// \endcond
546
+
547
+ Status Finish(std::shared_ptr<MapArray>* out) { return FinishTyped(out); }
548
+
549
+ /// \brief Vector append
550
+ ///
551
+ /// If passed, valid_bytes is of equal length to values, and any zero byte
552
+ /// will be considered as a null for that slot
553
+ Status AppendValues(const int32_t* offsets, int64_t length,
554
+ const uint8_t* valid_bytes = NULLPTR);
555
+
556
+ /// \brief Start a new variable-length map slot
557
+ ///
558
+ /// This function should be called before beginning to append elements to the
559
+ /// key and item builders
560
+ Status Append();
561
+
562
+ Status AppendNull() final;
563
+
564
+ Status AppendNulls(int64_t length) final;
565
+
566
+ Status AppendEmptyValue() final;
567
+
568
+ Status AppendEmptyValues(int64_t length) final;
569
+
570
+ Status AppendArraySlice(const ArraySpan& array, int64_t offset,
571
+ int64_t length) override {
572
+ const int32_t* offsets = array.GetValues<int32_t>(1);
573
+ const bool all_valid = !array.MayHaveLogicalNulls();
574
+ const uint8_t* validity = array.HasValidityBitmap() ? array.buffers[0].data : NULLPTR;
575
+ for (int64_t row = offset; row < offset + length; row++) {
576
+ const bool is_valid =
577
+ all_valid || (validity && bit_util::GetBit(validity, array.offset + row)) ||
578
+ array.IsValid(row);
579
+ if (is_valid) {
580
+ ARROW_RETURN_NOT_OK(Append());
581
+ const int64_t slot_length = offsets[row + 1] - offsets[row];
582
+ // Add together the inner StructArray offset to the Map/List offset
583
+ int64_t key_value_offset = array.child_data[0].offset + offsets[row];
584
+ ARROW_RETURN_NOT_OK(key_builder_->AppendArraySlice(
585
+ array.child_data[0].child_data[0], key_value_offset, slot_length));
586
+ ARROW_RETURN_NOT_OK(item_builder_->AppendArraySlice(
587
+ array.child_data[0].child_data[1], key_value_offset, slot_length));
588
+ } else {
589
+ ARROW_RETURN_NOT_OK(AppendNull());
590
+ }
591
+ }
592
+ return Status::OK();
593
+ }
594
+
595
+ /// \brief Get builder to append keys.
596
+ ///
597
+ /// Append a key with this builder should be followed by appending
598
+ /// an item or null value with item_builder().
599
+ ArrayBuilder* key_builder() const { return key_builder_.get(); }
600
+
601
+ /// \brief Get builder to append items
602
+ ///
603
+ /// Appending an item with this builder should have been preceded
604
+ /// by appending a key with key_builder().
605
+ ArrayBuilder* item_builder() const { return item_builder_.get(); }
606
+
607
+ /// \brief Get builder to add Map entries as struct values.
608
+ ///
609
+ /// This is used instead of key_builder()/item_builder() and allows
610
+ /// the Map to be built as a list of struct values.
611
+ ArrayBuilder* value_builder() const { return list_builder_->value_builder(); }
612
+
613
+ std::shared_ptr<DataType> type() const override {
614
+ // Key and Item builder may update types, but they don't contain the field names,
615
+ // so we need to reconstruct the type. (See ARROW-13735.)
616
+ return std::make_shared<MapType>(
617
+ field(entries_name_,
618
+ struct_({field(key_name_, key_builder_->type(), false),
619
+ field(item_name_, item_builder_->type(), item_nullable_)}),
620
+ false),
621
+ keys_sorted_);
622
+ }
623
+
624
+ Status ValidateOverflow(int64_t new_elements) {
625
+ return list_builder_->ValidateOverflow(new_elements);
626
+ }
627
+
628
+ protected:
629
+ inline Status AdjustStructBuilderLength();
630
+
631
+ protected:
632
+ bool keys_sorted_ = false;
633
+ bool item_nullable_ = false;
634
+ std::string entries_name_;
635
+ std::string key_name_;
636
+ std::string item_name_;
637
+ std::shared_ptr<ListBuilder> list_builder_;
638
+ std::shared_ptr<ArrayBuilder> key_builder_;
639
+ std::shared_ptr<ArrayBuilder> item_builder_;
640
+ };
641
+
642
+ // ----------------------------------------------------------------------
643
+ // FixedSizeList builder
644
+
645
+ /// \class FixedSizeListBuilder
646
+ /// \brief Builder class for fixed-length list array value types
647
+ class ARROW_EXPORT FixedSizeListBuilder : public ArrayBuilder {
648
+ public:
649
+ /// Use this constructor to define the built array's type explicitly. If value_builder
650
+ /// has indeterminate type, this builder will also.
651
+ FixedSizeListBuilder(MemoryPool* pool,
652
+ std::shared_ptr<ArrayBuilder> const& value_builder,
653
+ int32_t list_size);
654
+
655
+ /// Use this constructor to infer the built array's type. If value_builder has
656
+ /// indeterminate type, this builder will also.
657
+ FixedSizeListBuilder(MemoryPool* pool,
658
+ std::shared_ptr<ArrayBuilder> const& value_builder,
659
+ const std::shared_ptr<DataType>& type);
660
+
661
+ Status Resize(int64_t capacity) override;
662
+ void Reset() override;
663
+ Status FinishInternal(std::shared_ptr<ArrayData>* out) override;
664
+
665
+ /// \cond FALSE
666
+ using ArrayBuilder::Finish;
667
+ /// \endcond
668
+
669
+ Status Finish(std::shared_ptr<FixedSizeListArray>* out) { return FinishTyped(out); }
670
+
671
+ /// \brief Append a valid fixed length list.
672
+ ///
673
+ /// This function affects only the validity bitmap; the child values must be appended
674
+ /// using the child array builder.
675
+ Status Append();
676
+
677
+ /// \brief Vector append
678
+ ///
679
+ /// If passed, valid_bytes will be read and any zero byte
680
+ /// will cause the corresponding slot to be null
681
+ ///
682
+ /// This function affects only the validity bitmap; the child values must be appended
683
+ /// using the child array builder. This includes appending nulls for null lists.
684
+ /// XXX this restriction is confusing, should this method be omitted?
685
+ Status AppendValues(int64_t length, const uint8_t* valid_bytes = NULLPTR);
686
+
687
+ /// \brief Append a null fixed length list.
688
+ ///
689
+ /// The child array builder will have the appropriate number of nulls appended
690
+ /// automatically.
691
+ Status AppendNull() final;
692
+
693
+ /// \brief Append length null fixed length lists.
694
+ ///
695
+ /// The child array builder will have the appropriate number of nulls appended
696
+ /// automatically.
697
+ Status AppendNulls(int64_t length) final;
698
+
699
+ Status ValidateOverflow(int64_t new_elements);
700
+
701
+ Status AppendEmptyValue() final;
702
+
703
+ Status AppendEmptyValues(int64_t length) final;
704
+
705
+ Status AppendArraySlice(const ArraySpan& array, int64_t offset, int64_t length) final {
706
+ const uint8_t* validity = array.MayHaveNulls() ? array.buffers[0].data : NULLPTR;
707
+ for (int64_t row = offset; row < offset + length; row++) {
708
+ if (!validity || bit_util::GetBit(validity, array.offset + row)) {
709
+ ARROW_RETURN_NOT_OK(value_builder_->AppendArraySlice(
710
+ array.child_data[0], list_size_ * (array.offset + row), list_size_));
711
+ ARROW_RETURN_NOT_OK(Append());
712
+ } else {
713
+ ARROW_RETURN_NOT_OK(AppendNull());
714
+ }
715
+ }
716
+ return Status::OK();
717
+ }
718
+
719
+ ArrayBuilder* value_builder() const { return value_builder_.get(); }
720
+
721
+ std::shared_ptr<DataType> type() const override {
722
+ return fixed_size_list(value_field_->WithType(value_builder_->type()), list_size_);
723
+ }
724
+
725
+ // Cannot make this a static attribute because of linking issues
726
+ static constexpr int64_t maximum_elements() {
727
+ return std::numeric_limits<FixedSizeListType::offset_type>::max() - 1;
728
+ }
729
+
730
+ protected:
731
+ std::shared_ptr<Field> value_field_;
732
+ const int32_t list_size_;
733
+ std::shared_ptr<ArrayBuilder> value_builder_;
734
+ };
735
+
736
+ // ----------------------------------------------------------------------
737
+ // Struct
738
+
739
+ // ---------------------------------------------------------------------------------
740
+ // StructArray builder
741
+ /// Append, Resize and Reserve methods are acting on StructBuilder.
742
+ /// Please make sure all these methods of all child-builders' are consistently
743
+ /// called to maintain data-structure consistency.
744
+ class ARROW_EXPORT StructBuilder : public ArrayBuilder {
745
+ public:
746
+ /// If any of field_builders has indeterminate type, this builder will also
747
+ StructBuilder(const std::shared_ptr<DataType>& type, MemoryPool* pool,
748
+ std::vector<std::shared_ptr<ArrayBuilder>> field_builders);
749
+
750
+ Status FinishInternal(std::shared_ptr<ArrayData>* out) override;
751
+
752
+ /// \cond FALSE
753
+ using ArrayBuilder::Finish;
754
+ /// \endcond
755
+
756
+ Status Finish(std::shared_ptr<StructArray>* out) { return FinishTyped(out); }
757
+
758
+ /// Null bitmap is of equal length to every child field, and any zero byte
759
+ /// will be considered as a null for that field, but users must using app-
760
+ /// end methods or advance methods of the child builders' independently to
761
+ /// insert data.
762
+ Status AppendValues(int64_t length, const uint8_t* valid_bytes) {
763
+ ARROW_RETURN_NOT_OK(Reserve(length));
764
+ UnsafeAppendToBitmap(valid_bytes, length);
765
+ return Status::OK();
766
+ }
767
+
768
+ /// Append an element to the Struct. All child-builders' Append method must
769
+ /// be called independently to maintain data-structure consistency.
770
+ Status Append(bool is_valid = true) {
771
+ ARROW_RETURN_NOT_OK(Reserve(1));
772
+ UnsafeAppendToBitmap(is_valid);
773
+ return Status::OK();
774
+ }
775
+
776
+ /// \brief Append a null value. Automatically appends an empty value to each child
777
+ /// builder.
778
+ Status AppendNull() final {
779
+ for (const auto& field : children_) {
780
+ ARROW_RETURN_NOT_OK(field->AppendEmptyValue());
781
+ }
782
+ return Append(false);
783
+ }
784
+
785
+ /// \brief Append multiple null values. Automatically appends empty values to each
786
+ /// child builder.
787
+ Status AppendNulls(int64_t length) final {
788
+ for (const auto& field : children_) {
789
+ ARROW_RETURN_NOT_OK(field->AppendEmptyValues(length));
790
+ }
791
+ ARROW_RETURN_NOT_OK(Reserve(length));
792
+ UnsafeAppendToBitmap(length, false);
793
+ return Status::OK();
794
+ }
795
+
796
+ Status AppendEmptyValue() final {
797
+ for (const auto& field : children_) {
798
+ ARROW_RETURN_NOT_OK(field->AppendEmptyValue());
799
+ }
800
+ return Append(true);
801
+ }
802
+
803
+ Status AppendEmptyValues(int64_t length) final {
804
+ for (const auto& field : children_) {
805
+ ARROW_RETURN_NOT_OK(field->AppendEmptyValues(length));
806
+ }
807
+ ARROW_RETURN_NOT_OK(Reserve(length));
808
+ UnsafeAppendToBitmap(length, true);
809
+ return Status::OK();
810
+ }
811
+
812
+ Status AppendArraySlice(const ArraySpan& array, int64_t offset,
813
+ int64_t length) override {
814
+ for (int i = 0; static_cast<size_t>(i) < children_.size(); i++) {
815
+ ARROW_RETURN_NOT_OK(children_[i]->AppendArraySlice(array.child_data[i],
816
+ array.offset + offset, length));
817
+ }
818
+ const uint8_t* validity = array.MayHaveNulls() ? array.buffers[0].data : NULLPTR;
819
+ ARROW_RETURN_NOT_OK(Reserve(length));
820
+ UnsafeAppendToBitmap(validity, array.offset + offset, length);
821
+ return Status::OK();
822
+ }
823
+
824
+ void Reset() override;
825
+
826
+ ArrayBuilder* field_builder(int i) const { return children_[i].get(); }
827
+
828
+ int num_fields() const { return static_cast<int>(children_.size()); }
829
+
830
+ std::shared_ptr<DataType> type() const override;
831
+
832
+ private:
833
+ std::shared_ptr<DataType> type_;
834
+ };
835
+
836
+ /// @}
837
+
838
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/api.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include "arrow/compute/expression.h"
23
+ #include "arrow/dataset/dataset.h"
24
+ #include "arrow/dataset/discovery.h"
25
+ #include "arrow/dataset/file_base.h"
26
+ #ifdef ARROW_CSV
27
+ #include "arrow/dataset/file_csv.h"
28
+ #endif
29
+ #ifdef ARROW_JSON
30
+ #include "arrow/dataset/file_json.h"
31
+ #endif
32
+ #include "arrow/dataset/file_ipc.h"
33
+ #ifdef ARROW_ORC
34
+ #include "arrow/dataset/file_orc.h"
35
+ #endif
36
+ #ifdef ARROW_PARQUET
37
+ #include "arrow/dataset/file_parquet.h"
38
+ #endif
39
+ #include "arrow/dataset/scanner.h"
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset.h ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <optional>
25
+ #include <string>
26
+ #include <utility>
27
+ #include <vector>
28
+
29
+ #include "arrow/compute/expression.h"
30
+ #include "arrow/dataset/type_fwd.h"
31
+ #include "arrow/dataset/visibility.h"
32
+ #include "arrow/util/async_generator_fwd.h"
33
+ #include "arrow/util/future.h"
34
+ #include "arrow/util/macros.h"
35
+ #include "arrow/util/mutex.h"
36
+
37
+ namespace arrow {
38
+
39
+ namespace internal {
40
+ class Executor;
41
+ } // namespace internal
42
+
43
+ namespace dataset {
44
+
45
+ using RecordBatchGenerator = std::function<Future<std::shared_ptr<RecordBatch>>()>;
46
+
47
+ /// \brief Description of a column to scan
48
+ struct ARROW_DS_EXPORT FragmentSelectionColumn {
49
+ /// \brief The path to the column to load
50
+ FieldPath path;
51
+ /// \brief The type of the column in the dataset schema
52
+ ///
53
+ /// A format may choose to ignore this field completely. For example, when
54
+ /// reading from IPC the reader can just return the column in the data type
55
+ /// that is stored on disk. There is no point in doing anything special.
56
+ ///
57
+ /// However, some formats may be capable of casting on the fly. For example,
58
+ /// when reading from CSV, if we know the target type of the column, we can
59
+ /// convert from string to the target type as we read.
60
+ DataType* requested_type;
61
+ };
62
+
63
+ /// \brief A list of columns that should be loaded from a fragment
64
+ ///
65
+ /// The paths in this selection should be referring to the fragment schema. This class
66
+ /// contains a virtual destructor as it is expected evolution strategies will need to
67
+ /// extend this to add any information needed to later evolve the batches.
68
+ ///
69
+ /// For example, in the basic evolution strategy, we keep track of which columns
70
+ /// were missing from the file so that we can fill those in with null when evolving.
71
+ class ARROW_DS_EXPORT FragmentSelection {
72
+ public:
73
+ explicit FragmentSelection(std::vector<FragmentSelectionColumn> columns)
74
+ : columns_(std::move(columns)) {}
75
+ virtual ~FragmentSelection() = default;
76
+ /// The columns that should be loaded from the fragment
77
+ const std::vector<FragmentSelectionColumn>& columns() const { return columns_; }
78
+
79
+ private:
80
+ std::vector<FragmentSelectionColumn> columns_;
81
+ };
82
+
83
+ /// \brief Instructions for scanning a particular fragment
84
+ ///
85
+ /// The fragment scan request is derived from ScanV2Options. The main
86
+ /// difference is that the scan options are based on the dataset schema
87
+ /// while the fragment request is based on the fragment schema.
88
+ struct ARROW_DS_EXPORT FragmentScanRequest {
89
+ /// \brief A row filter
90
+ ///
91
+ /// The filter expression should be written against the fragment schema.
92
+ ///
93
+ /// \see ScanV2Options for details on how this filter should be applied
94
+ compute::Expression filter = compute::literal(true);
95
+
96
+ /// \brief The columns to scan
97
+ ///
98
+ /// These indices refer to the fragment schema
99
+ ///
100
+ /// Note: This is NOT a simple list of top-level column indices.
101
+ /// For more details \see ScanV2Options
102
+ ///
103
+ /// If possible a fragment should only read from disk the data needed
104
+ /// to satisfy these columns. If a format cannot partially read a nested
105
+ /// column (e.g. JSON) then it must apply the column selection (in memory)
106
+ /// before returning the scanned batch.
107
+ std::shared_ptr<FragmentSelection> fragment_selection;
108
+ /// \brief Options specific to the format being scanned
109
+ const FragmentScanOptions* format_scan_options;
110
+ };
111
+
112
+ /// \brief An iterator-like object that can yield batches created from a fragment
113
+ class ARROW_DS_EXPORT FragmentScanner {
114
+ public:
115
+ /// This instance will only be destroyed after all ongoing scan futures
116
+ /// have been completed.
117
+ ///
118
+ /// This means any callbacks created as part of the scan can safely
119
+ /// capture `this`
120
+ virtual ~FragmentScanner() = default;
121
+ /// \brief Scan a batch of data from the file
122
+ /// \param batch_number The index of the batch to read
123
+ virtual Future<std::shared_ptr<RecordBatch>> ScanBatch(int batch_number) = 0;
124
+ /// \brief Calculate an estimate of how many data bytes the given batch will represent
125
+ ///
126
+ /// "Data bytes" should be the total size of all the buffers once the data has been
127
+ /// decoded into the Arrow format.
128
+ virtual int64_t EstimatedDataBytes(int batch_number) = 0;
129
+ /// \brief The number of batches in the fragment to scan
130
+ virtual int NumBatches() = 0;
131
+ };
132
+
133
+ /// \brief Information learned about a fragment through inspection
134
+ ///
135
+ /// This information can be used to figure out which fields need
136
+ /// to be read from a file and how the data read in should be evolved
137
+ /// to match the dataset schema.
138
+ ///
139
+ /// For example, from a CSV file we can inspect and learn the column
140
+ /// names and use those column names to determine which columns to load
141
+ /// from the CSV file.
142
+ struct ARROW_DS_EXPORT InspectedFragment {
143
+ explicit InspectedFragment(std::vector<std::string> column_names)
144
+ : column_names(std::move(column_names)) {}
145
+ std::vector<std::string> column_names;
146
+ };
147
+
148
+ /// \brief A granular piece of a Dataset, such as an individual file.
149
+ ///
150
+ /// A Fragment can be read/scanned separately from other fragments. It yields a
151
+ /// collection of RecordBatches when scanned
152
+ ///
153
+ /// Note that Fragments have well defined physical schemas which are reconciled by
154
+ /// the Datasets which contain them; these physical schemas may differ from a parent
155
+ /// Dataset's schema and the physical schemas of sibling Fragments.
156
+ class ARROW_DS_EXPORT Fragment : public std::enable_shared_from_this<Fragment> {
157
+ public:
158
+ /// \brief An expression that represents no known partition information
159
+ static const compute::Expression kNoPartitionInformation;
160
+
161
+ /// \brief Return the physical schema of the Fragment.
162
+ ///
163
+ /// The physical schema is also called the writer schema.
164
+ /// This method is blocking and may suffer from high latency filesystem.
165
+ /// The schema is cached after being read once, or may be specified at construction.
166
+ Result<std::shared_ptr<Schema>> ReadPhysicalSchema();
167
+
168
+ /// An asynchronous version of Scan
169
+ virtual Result<RecordBatchGenerator> ScanBatchesAsync(
170
+ const std::shared_ptr<ScanOptions>& options) = 0;
171
+
172
+ /// \brief Inspect a fragment to learn basic information
173
+ ///
174
+ /// This will be called before a scan and a fragment should attach whatever
175
+ /// information will be needed to figure out an evolution strategy. This information
176
+ /// will then be passed to the call to BeginScan
177
+ virtual Future<std::shared_ptr<InspectedFragment>> InspectFragment(
178
+ const FragmentScanOptions* format_options, compute::ExecContext* exec_context);
179
+
180
+ /// \brief Start a scan operation
181
+ virtual Future<std::shared_ptr<FragmentScanner>> BeginScan(
182
+ const FragmentScanRequest& request, const InspectedFragment& inspected_fragment,
183
+ const FragmentScanOptions* format_options, compute::ExecContext* exec_context);
184
+
185
+ /// \brief Count the number of rows in this fragment matching the filter using metadata
186
+ /// only. That is, this method may perform I/O, but will not load data.
187
+ ///
188
+ /// If this is not possible, resolve with an empty optional. The fragment can perform
189
+ /// I/O (e.g. to read metadata) before it deciding whether it can satisfy the request.
190
+ virtual Future<std::optional<int64_t>> CountRows(
191
+ compute::Expression predicate, const std::shared_ptr<ScanOptions>& options);
192
+
193
+ virtual std::string type_name() const = 0;
194
+ virtual std::string ToString() const { return type_name(); }
195
+
196
+ /// \brief An expression which evaluates to true for all data viewed by this
197
+ /// Fragment.
198
+ const compute::Expression& partition_expression() const {
199
+ return partition_expression_;
200
+ }
201
+
202
+ virtual ~Fragment() = default;
203
+
204
+ protected:
205
+ Fragment() = default;
206
+ explicit Fragment(compute::Expression partition_expression,
207
+ std::shared_ptr<Schema> physical_schema);
208
+
209
+ virtual Result<std::shared_ptr<Schema>> ReadPhysicalSchemaImpl() = 0;
210
+
211
+ util::Mutex physical_schema_mutex_;
212
+ compute::Expression partition_expression_ = compute::literal(true);
213
+ std::shared_ptr<Schema> physical_schema_;
214
+ };
215
+
216
+ /// \brief Per-scan options for fragment(s) in a dataset.
217
+ ///
218
+ /// These options are not intrinsic to the format or fragment itself, but do affect
219
+ /// the results of a scan. These are options which make sense to change between
220
+ /// repeated reads of the same dataset, such as format-specific conversion options
221
+ /// (that do not affect the schema).
222
+ ///
223
+ /// \ingroup dataset-scanning
224
+ class ARROW_DS_EXPORT FragmentScanOptions {
225
+ public:
226
+ virtual std::string type_name() const = 0;
227
+ virtual std::string ToString() const { return type_name(); }
228
+ virtual ~FragmentScanOptions() = default;
229
+ };
230
+
231
+ /// \defgroup dataset-implementations Concrete implementations
232
+ ///
233
+ /// @{
234
+
235
+ /// \brief A trivial Fragment that yields ScanTask out of a fixed set of
236
+ /// RecordBatch.
237
+ class ARROW_DS_EXPORT InMemoryFragment : public Fragment {
238
+ public:
239
+ class Scanner;
240
+ InMemoryFragment(std::shared_ptr<Schema> schema, RecordBatchVector record_batches,
241
+ compute::Expression = compute::literal(true));
242
+ explicit InMemoryFragment(RecordBatchVector record_batches,
243
+ compute::Expression = compute::literal(true));
244
+
245
+ Result<RecordBatchGenerator> ScanBatchesAsync(
246
+ const std::shared_ptr<ScanOptions>& options) override;
247
+ Future<std::optional<int64_t>> CountRows(
248
+ compute::Expression predicate,
249
+ const std::shared_ptr<ScanOptions>& options) override;
250
+
251
+ Future<std::shared_ptr<InspectedFragment>> InspectFragment(
252
+ const FragmentScanOptions* format_options,
253
+ compute::ExecContext* exec_context) override;
254
+ Future<std::shared_ptr<FragmentScanner>> BeginScan(
255
+ const FragmentScanRequest& request, const InspectedFragment& inspected_fragment,
256
+ const FragmentScanOptions* format_options,
257
+ compute::ExecContext* exec_context) override;
258
+
259
+ std::string type_name() const override { return "in-memory"; }
260
+
261
+ protected:
262
+ Result<std::shared_ptr<Schema>> ReadPhysicalSchemaImpl() override;
263
+
264
+ RecordBatchVector record_batches_;
265
+ };
266
+
267
+ /// @}
268
+
269
+ using FragmentGenerator = AsyncGenerator<std::shared_ptr<Fragment>>;
270
+
271
+ /// \brief Rules for converting the dataset schema to and from fragment schemas
272
+ class ARROW_DS_EXPORT FragmentEvolutionStrategy {
273
+ public:
274
+ /// This instance will only be destroyed when all scan operations for the
275
+ /// fragment have completed.
276
+ virtual ~FragmentEvolutionStrategy() = default;
277
+ /// \brief A guarantee that applies to all batches of this fragment
278
+ ///
279
+ /// For example, if a fragment is missing one of the fields in the dataset
280
+ /// schema then a typical evolution strategy is to set that field to null.
281
+ ///
282
+ /// So if the column at index 3 is missing then the guarantee is
283
+ /// FieldRef(3) == null
284
+ ///
285
+ /// Individual field guarantees should be AND'd together and returned
286
+ /// as a single expression.
287
+ virtual Result<compute::Expression> GetGuarantee(
288
+ const std::vector<FieldPath>& dataset_schema_selection) const = 0;
289
+
290
+ /// \brief Return a fragment schema selection given a dataset schema selection
291
+ ///
292
+ /// For example, if the user wants fields 2 & 4 of the dataset schema and
293
+ /// in this fragment the field 2 is missing and the field 4 is at index 1 then
294
+ /// this should return {1}
295
+ virtual Result<std::unique_ptr<FragmentSelection>> DevolveSelection(
296
+ const std::vector<FieldPath>& dataset_schema_selection) const = 0;
297
+
298
+ /// \brief Return a filter expression bound to the fragment schema given
299
+ /// a filter expression bound to the dataset schema
300
+ ///
301
+ /// The dataset scan filter will first be simplified by the guarantee returned
302
+ /// by GetGuarantee. This means an evolution that only handles dropping or casting
303
+ /// fields doesn't need to do anything here except return the given filter.
304
+ ///
305
+ /// On the other hand, an evolution that is doing some kind of aliasing will likely
306
+ /// need to convert field references in the filter to the aliased field references
307
+ /// where appropriate.
308
+ virtual Result<compute::Expression> DevolveFilter(
309
+ const compute::Expression& filter) const = 0;
310
+
311
+ /// \brief Convert a batch from the fragment schema to the dataset schema
312
+ ///
313
+ /// Typically this involves casting columns from the data type stored on disk
314
+ /// to the data type of the dataset schema. For example, this fragment might
315
+ /// have columns stored as int32 and the dataset schema might have int64 for
316
+ /// the column. In this case we should cast the column from int32 to int64.
317
+ ///
318
+ /// Note: A fragment may perform this cast as the data is read from disk. In
319
+ /// that case a cast might not be needed.
320
+ virtual Result<compute::ExecBatch> EvolveBatch(
321
+ const std::shared_ptr<RecordBatch>& batch,
322
+ const std::vector<FieldPath>& dataset_selection,
323
+ const FragmentSelection& selection) const = 0;
324
+
325
+ /// \brief Return a string description of this strategy
326
+ virtual std::string ToString() const = 0;
327
+ };
328
+
329
+ /// \brief Lookup to create a FragmentEvolutionStrategy for a given fragment
330
+ class ARROW_DS_EXPORT DatasetEvolutionStrategy {
331
+ public:
332
+ virtual ~DatasetEvolutionStrategy() = default;
333
+ /// \brief Create a strategy for evolving from the given fragment
334
+ /// to the schema of the given dataset
335
+ virtual std::unique_ptr<FragmentEvolutionStrategy> GetStrategy(
336
+ const Dataset& dataset, const Fragment& fragment,
337
+ const InspectedFragment& inspected_fragment) = 0;
338
+
339
+ /// \brief Return a string description of this strategy
340
+ virtual std::string ToString() const = 0;
341
+ };
342
+
343
+ ARROW_DS_EXPORT std::unique_ptr<DatasetEvolutionStrategy>
344
+ MakeBasicDatasetEvolutionStrategy();
345
+
346
+ /// \brief A container of zero or more Fragments.
347
+ ///
348
+ /// A Dataset acts as a union of Fragments, e.g. files deeply nested in a
349
+ /// directory. A Dataset has a schema to which Fragments must align during a
350
+ /// scan operation. This is analogous to Avro's reader and writer schema.
351
+ class ARROW_DS_EXPORT Dataset : public std::enable_shared_from_this<Dataset> {
352
+ public:
353
+ /// \brief Begin to build a new Scan operation against this Dataset
354
+ Result<std::shared_ptr<ScannerBuilder>> NewScan();
355
+
356
+ /// \brief GetFragments returns an iterator of Fragments given a predicate.
357
+ Result<FragmentIterator> GetFragments(compute::Expression predicate);
358
+ Result<FragmentIterator> GetFragments();
359
+
360
+ /// \brief Async versions of `GetFragments`.
361
+ Result<FragmentGenerator> GetFragmentsAsync(compute::Expression predicate);
362
+ Result<FragmentGenerator> GetFragmentsAsync();
363
+
364
+ const std::shared_ptr<Schema>& schema() const { return schema_; }
365
+
366
+ /// \brief An expression which evaluates to true for all data viewed by this Dataset.
367
+ /// May be null, which indicates no information is available.
368
+ const compute::Expression& partition_expression() const {
369
+ return partition_expression_;
370
+ }
371
+
372
+ /// \brief The name identifying the kind of Dataset
373
+ virtual std::string type_name() const = 0;
374
+
375
+ /// \brief Return a copy of this Dataset with a different schema.
376
+ ///
377
+ /// The copy will view the same Fragments. If the new schema is not compatible with the
378
+ /// original dataset's schema then an error will be raised.
379
+ virtual Result<std::shared_ptr<Dataset>> ReplaceSchema(
380
+ std::shared_ptr<Schema> schema) const = 0;
381
+
382
+ /// \brief Rules used by this dataset to handle schema evolution
383
+ DatasetEvolutionStrategy* evolution_strategy() { return evolution_strategy_.get(); }
384
+
385
+ virtual ~Dataset() = default;
386
+
387
+ protected:
388
+ explicit Dataset(std::shared_ptr<Schema> schema) : schema_(std::move(schema)) {}
389
+
390
+ Dataset(std::shared_ptr<Schema> schema, compute::Expression partition_expression);
391
+
392
+ virtual Result<FragmentIterator> GetFragmentsImpl(compute::Expression predicate) = 0;
393
+ /// \brief Default non-virtual implementation method for the base
394
+ /// `GetFragmentsAsyncImpl` method, which creates a fragment generator for
395
+ /// the dataset, possibly filtering results with a predicate (forwarding to
396
+ /// the synchronous `GetFragmentsImpl` method and moving the computations
397
+ /// to the background, using the IO thread pool).
398
+ ///
399
+ /// Currently, `executor` is always the same as `internal::GetCPUThreadPool()`,
400
+ /// which means the results from the underlying fragment generator will be
401
+ /// transferred to the default CPU thread pool. The generator itself is
402
+ /// offloaded to run on the default IO thread pool.
403
+ virtual Result<FragmentGenerator> GetFragmentsAsyncImpl(
404
+ compute::Expression predicate, arrow::internal::Executor* executor);
405
+
406
+ std::shared_ptr<Schema> schema_;
407
+ compute::Expression partition_expression_ = compute::literal(true);
408
+ std::unique_ptr<DatasetEvolutionStrategy> evolution_strategy_ =
409
+ MakeBasicDatasetEvolutionStrategy();
410
+ };
411
+
412
+ /// \addtogroup dataset-implementations
413
+ ///
414
+ /// @{
415
+
416
+ /// \brief A Source which yields fragments wrapping a stream of record batches.
417
+ ///
418
+ /// The record batches must match the schema provided to the source at construction.
419
+ class ARROW_DS_EXPORT InMemoryDataset : public Dataset {
420
+ public:
421
+ class RecordBatchGenerator {
422
+ public:
423
+ virtual ~RecordBatchGenerator() = default;
424
+ virtual RecordBatchIterator Get() const = 0;
425
+ };
426
+
427
+ /// Construct a dataset from a schema and a factory of record batch iterators.
428
+ InMemoryDataset(std::shared_ptr<Schema> schema,
429
+ std::shared_ptr<RecordBatchGenerator> get_batches)
430
+ : Dataset(std::move(schema)), get_batches_(std::move(get_batches)) {}
431
+
432
+ /// Convenience constructor taking a fixed list of batches
433
+ InMemoryDataset(std::shared_ptr<Schema> schema, RecordBatchVector batches);
434
+
435
+ /// Convenience constructor taking a Table
436
+ explicit InMemoryDataset(std::shared_ptr<Table> table);
437
+
438
+ std::string type_name() const override { return "in-memory"; }
439
+
440
+ Result<std::shared_ptr<Dataset>> ReplaceSchema(
441
+ std::shared_ptr<Schema> schema) const override;
442
+
443
+ protected:
444
+ Result<FragmentIterator> GetFragmentsImpl(compute::Expression predicate) override;
445
+
446
+ std::shared_ptr<RecordBatchGenerator> get_batches_;
447
+ };
448
+
449
+ /// \brief A Dataset wrapping child Datasets.
450
+ class ARROW_DS_EXPORT UnionDataset : public Dataset {
451
+ public:
452
+ /// \brief Construct a UnionDataset wrapping child Datasets.
453
+ ///
454
+ /// \param[in] schema the schema of the resulting dataset.
455
+ /// \param[in] children one or more child Datasets. Their schemas must be identical to
456
+ /// schema.
457
+ static Result<std::shared_ptr<UnionDataset>> Make(std::shared_ptr<Schema> schema,
458
+ DatasetVector children);
459
+
460
+ const DatasetVector& children() const { return children_; }
461
+
462
+ std::string type_name() const override { return "union"; }
463
+
464
+ Result<std::shared_ptr<Dataset>> ReplaceSchema(
465
+ std::shared_ptr<Schema> schema) const override;
466
+
467
+ protected:
468
+ Result<FragmentIterator> GetFragmentsImpl(compute::Expression predicate) override;
469
+
470
+ explicit UnionDataset(std::shared_ptr<Schema> schema, DatasetVector children)
471
+ : Dataset(std::move(schema)), children_(std::move(children)) {}
472
+
473
+ DatasetVector children_;
474
+
475
+ friend class UnionDatasetFactory;
476
+ };
477
+
478
+ /// @}
479
+
480
+ } // namespace dataset
481
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset_writer.h ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+
22
+ #include "arrow/dataset/file_base.h"
23
+ #include "arrow/record_batch.h"
24
+ #include "arrow/status.h"
25
+ #include "arrow/util/async_util.h"
26
+ #include "arrow/util/future.h"
27
+
28
+ namespace arrow {
29
+ namespace dataset {
30
+ namespace internal {
31
+
32
+ // This lines up with our other defaults in the scanner and execution plan
33
+ constexpr uint64_t kDefaultDatasetWriterMaxRowsQueued = 8 * 1024 * 1024;
34
+
35
+ /// \brief Utility class that manages a set of writers to different paths
36
+ ///
37
+ /// Writers may be closed and reopened (and a new file created) based on the dataset
38
+ /// write options (for example, max_rows_per_file or max_open_files)
39
+ ///
40
+ /// The dataset writer enforces its own back pressure based on the # of rows (as opposed
41
+ /// to # of batches which is how it is typically enforced elsewhere) and # of files.
42
+ class ARROW_DS_EXPORT DatasetWriter {
43
+ public:
44
+ /// \brief Create a dataset writer
45
+ ///
46
+ /// Will fail if basename_template is invalid or if there is existing data and
47
+ /// existing_data_behavior is kError
48
+ ///
49
+ /// \param write_options options to control how the data should be written
50
+ /// \param max_rows_queued max # of rows allowed to be queued before the dataset_writer
51
+ /// will ask for backpressure
52
+ static Result<std::unique_ptr<DatasetWriter>> Make(
53
+ FileSystemDatasetWriteOptions write_options, util::AsyncTaskScheduler* scheduler,
54
+ std::function<void()> pause_callback, std::function<void()> resume_callback,
55
+ std::function<void()> finish_callback,
56
+ uint64_t max_rows_queued = kDefaultDatasetWriterMaxRowsQueued);
57
+
58
+ ~DatasetWriter();
59
+
60
+ /// \brief Write a batch to the dataset
61
+ /// \param[in] batch The batch to write
62
+ /// \param[in] directory The directory to write to
63
+ ///
64
+ /// Note: The written filename will be {directory}/{filename_factory(i)} where i is a
65
+ /// counter controlled by `max_open_files` and `max_rows_per_file`
66
+ ///
67
+ /// If multiple WriteRecordBatch calls arrive with the same `directory` then the batches
68
+ /// may be written to the same file.
69
+ ///
70
+ /// The returned future will be marked finished when the record batch has been queued
71
+ /// to be written. If the returned future is unfinished then this indicates the dataset
72
+ /// writer's queue is full and the data provider should pause.
73
+ ///
74
+ /// This method is NOT async reentrant. The returned future will only be unfinished
75
+ /// if back pressure needs to be applied. Async reentrancy is not necessary for
76
+ /// concurrent writes to happen. Calling this method again before the previous future
77
+ /// completes will not just violate max_rows_queued but likely lead to race conditions.
78
+ ///
79
+ /// One thing to note is that the ordering of your data can affect your maximum
80
+ /// potential parallelism. If this seems odd then consider a dataset where the first
81
+ /// 1000 batches go to the same directory and then the 1001st batch goes to a different
82
+ /// directory. The only way to get two parallel writes immediately would be to queue
83
+ /// all 1000 pending writes to the first directory.
84
+ void WriteRecordBatch(std::shared_ptr<RecordBatch> batch, const std::string& directory,
85
+ const std::string& prefix = "");
86
+
87
+ /// Finish all pending writes and close any open files
88
+ void Finish();
89
+
90
+ protected:
91
+ DatasetWriter(FileSystemDatasetWriteOptions write_options,
92
+ util::AsyncTaskScheduler* scheduler, std::function<void()> pause_callback,
93
+ std::function<void()> resume_callback,
94
+ std::function<void()> finish_callback,
95
+ uint64_t max_rows_queued = kDefaultDatasetWriterMaxRowsQueued);
96
+
97
+ class DatasetWriterImpl;
98
+ std::unique_ptr<DatasetWriterImpl> impl_;
99
+ };
100
+
101
+ } // namespace internal
102
+ } // namespace dataset
103
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/discovery.h ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ /// Logic for automatically determining the structure of multi-file
19
+ /// dataset with possible partitioning according to available
20
+ /// partitioning
21
+
22
+ // This API is EXPERIMENTAL.
23
+
24
+ #pragma once
25
+
26
+ #include <memory>
27
+ #include <string>
28
+ #include <variant>
29
+ #include <vector>
30
+
31
+ #include "arrow/dataset/partition.h"
32
+ #include "arrow/dataset/type_fwd.h"
33
+ #include "arrow/dataset/visibility.h"
34
+ #include "arrow/filesystem/type_fwd.h"
35
+ #include "arrow/result.h"
36
+ #include "arrow/util/macros.h"
37
+
38
+ namespace arrow {
39
+ namespace dataset {
40
+
41
+ /// \defgroup dataset-discovery Discovery API
42
+ ///
43
+ /// @{
44
+
45
+ struct InspectOptions {
46
+ /// See `fragments` property.
47
+ static constexpr int kInspectAllFragments = -1;
48
+
49
+ /// Indicate how many fragments should be inspected to infer the unified dataset
50
+ /// schema. Limiting the number of fragments accessed improves the latency of
51
+ /// the discovery process when dealing with a high number of fragments and/or
52
+ /// high latency file systems.
53
+ ///
54
+ /// The default value of `1` inspects the schema of the first (in no particular
55
+ /// order) fragment only. If the dataset has a uniform schema for all fragments,
56
+ /// this default is the optimal value. In order to inspect all fragments and
57
+ /// robustly unify their potentially varying schemas, set this option to
58
+ /// `kInspectAllFragments`. A value of `0` disables inspection of fragments
59
+ /// altogether so only the partitioning schema will be inspected.
60
+ int fragments = 1;
61
+
62
+ /// Control how to unify types. By default, types are merged strictly (the
63
+ /// type must match exactly, except nulls can be merged with other types).
64
+ Field::MergeOptions field_merge_options = Field::MergeOptions::Defaults();
65
+ };
66
+
67
+ struct FinishOptions {
68
+ /// Finalize the dataset with this given schema. If the schema is not
69
+ /// provided, infer the schema via the Inspect, see the `inspect_options`
70
+ /// property.
71
+ std::shared_ptr<Schema> schema = NULLPTR;
72
+
73
+ /// If the schema is not provided, it will be discovered by passing the
74
+ /// following options to `DatasetDiscovery::Inspect`.
75
+ InspectOptions inspect_options{};
76
+
77
+ /// Indicate if the given Schema (when specified), should be validated against
78
+ /// the fragments' schemas. `inspect_options` will control how many fragments
79
+ /// are checked.
80
+ bool validate_fragments = false;
81
+ };
82
+
83
+ /// \brief DatasetFactory provides a way to inspect/discover a Dataset's expected
84
+ /// schema before materializing said Dataset.
85
+ class ARROW_DS_EXPORT DatasetFactory {
86
+ public:
87
+ /// \brief Get the schemas of the Fragments and Partitioning.
88
+ virtual Result<std::vector<std::shared_ptr<Schema>>> InspectSchemas(
89
+ InspectOptions options) = 0;
90
+
91
+ /// \brief Get unified schema for the resulting Dataset.
92
+ Result<std::shared_ptr<Schema>> Inspect(InspectOptions options = {});
93
+
94
+ /// \brief Create a Dataset
95
+ Result<std::shared_ptr<Dataset>> Finish();
96
+ /// \brief Create a Dataset with the given schema (see \a InspectOptions::schema)
97
+ Result<std::shared_ptr<Dataset>> Finish(std::shared_ptr<Schema> schema);
98
+ /// \brief Create a Dataset with the given options
99
+ virtual Result<std::shared_ptr<Dataset>> Finish(FinishOptions options) = 0;
100
+
101
+ /// \brief Optional root partition for the resulting Dataset.
102
+ const compute::Expression& root_partition() const { return root_partition_; }
103
+ /// \brief Set the root partition for the resulting Dataset.
104
+ Status SetRootPartition(compute::Expression partition) {
105
+ root_partition_ = std::move(partition);
106
+ return Status::OK();
107
+ }
108
+
109
+ virtual ~DatasetFactory() = default;
110
+
111
+ protected:
112
+ DatasetFactory();
113
+
114
+ compute::Expression root_partition_;
115
+ };
116
+
117
+ /// @}
118
+
119
+ /// \brief DatasetFactory provides a way to inspect/discover a Dataset's
120
+ /// expected schema before materialization.
121
+ /// \ingroup dataset-implementations
122
+ class ARROW_DS_EXPORT UnionDatasetFactory : public DatasetFactory {
123
+ public:
124
+ static Result<std::shared_ptr<DatasetFactory>> Make(
125
+ std::vector<std::shared_ptr<DatasetFactory>> factories);
126
+
127
+ /// \brief Return the list of child DatasetFactory
128
+ const std::vector<std::shared_ptr<DatasetFactory>>& factories() const {
129
+ return factories_;
130
+ }
131
+
132
+ /// \brief Get the schemas of the Datasets.
133
+ ///
134
+ /// Instead of applying options globally, it applies at each child factory.
135
+ /// This will not respect `options.fragments` exactly, but will respect the
136
+ /// spirit of peeking the first fragments or all of them.
137
+ Result<std::vector<std::shared_ptr<Schema>>> InspectSchemas(
138
+ InspectOptions options) override;
139
+
140
+ /// \brief Create a Dataset.
141
+ Result<std::shared_ptr<Dataset>> Finish(FinishOptions options) override;
142
+
143
+ protected:
144
+ explicit UnionDatasetFactory(std::vector<std::shared_ptr<DatasetFactory>> factories);
145
+
146
+ std::vector<std::shared_ptr<DatasetFactory>> factories_;
147
+ };
148
+
149
+ /// \ingroup dataset-filesystem
150
+ struct FileSystemFactoryOptions {
151
+ /// Either an explicit Partitioning or a PartitioningFactory to discover one.
152
+ ///
153
+ /// If a factory is provided, it will be used to infer a schema for partition fields
154
+ /// based on file and directory paths then construct a Partitioning. The default
155
+ /// is a Partitioning which will yield no partition information.
156
+ ///
157
+ /// The (explicit or discovered) partitioning will be applied to discovered files
158
+ /// and the resulting partition information embedded in the Dataset.
159
+ PartitioningOrFactory partitioning{Partitioning::Default()};
160
+
161
+ /// For the purposes of applying the partitioning, paths will be stripped
162
+ /// of the partition_base_dir. Files not matching the partition_base_dir
163
+ /// prefix will be skipped for partition discovery. The ignored files will still
164
+ /// be part of the Dataset, but will not have partition information.
165
+ ///
166
+ /// Example:
167
+ /// partition_base_dir = "/dataset";
168
+ ///
169
+ /// - "/dataset/US/sales.csv" -> "US/sales.csv" will be given to the partitioning
170
+ ///
171
+ /// - "/home/john/late_sales.csv" -> Will be ignored for partition discovery.
172
+ ///
173
+ /// This is useful for partitioning which parses directory when ordering
174
+ /// is important, e.g. DirectoryPartitioning.
175
+ std::string partition_base_dir;
176
+
177
+ /// Invalid files (via selector or explicitly) will be excluded by checking
178
+ /// with the FileFormat::IsSupported method. This will incur IO for each files
179
+ /// in a serial and single threaded fashion. Disabling this feature will skip the
180
+ /// IO, but unsupported files may be present in the Dataset
181
+ /// (resulting in an error at scan time).
182
+ bool exclude_invalid_files = false;
183
+
184
+ /// When discovering from a Selector (and not from an explicit file list), ignore
185
+ /// files and directories matching any of these prefixes.
186
+ ///
187
+ /// Example (with selector = "/dataset/**"):
188
+ /// selector_ignore_prefixes = {"_", ".DS_STORE" };
189
+ ///
190
+ /// - "/dataset/data.csv" -> not ignored
191
+ /// - "/dataset/_metadata" -> ignored
192
+ /// - "/dataset/.DS_STORE" -> ignored
193
+ /// - "/dataset/_hidden/dat" -> ignored
194
+ /// - "/dataset/nested/.DS_STORE" -> ignored
195
+ std::vector<std::string> selector_ignore_prefixes = {
196
+ ".",
197
+ "_",
198
+ };
199
+ };
200
+
201
+ /// \brief FileSystemDatasetFactory creates a Dataset from a vector of
202
+ /// fs::FileInfo or a fs::FileSelector.
203
+ /// \ingroup dataset-filesystem
204
+ class ARROW_DS_EXPORT FileSystemDatasetFactory : public DatasetFactory {
205
+ public:
206
+ /// \brief Build a FileSystemDatasetFactory from an explicit list of
207
+ /// paths.
208
+ ///
209
+ /// \param[in] filesystem passed to FileSystemDataset
210
+ /// \param[in] paths passed to FileSystemDataset
211
+ /// \param[in] format passed to FileSystemDataset
212
+ /// \param[in] options see FileSystemFactoryOptions for more information.
213
+ static Result<std::shared_ptr<DatasetFactory>> Make(
214
+ std::shared_ptr<fs::FileSystem> filesystem, const std::vector<std::string>& paths,
215
+ std::shared_ptr<FileFormat> format, FileSystemFactoryOptions options);
216
+
217
+ /// \brief Build a FileSystemDatasetFactory from a fs::FileSelector.
218
+ ///
219
+ /// The selector will expand to a vector of FileInfo. The expansion/crawling
220
+ /// is performed in this function call. Thus, the finalized Dataset is
221
+ /// working with a snapshot of the filesystem.
222
+ //
223
+ /// If options.partition_base_dir is not provided, it will be overwritten
224
+ /// with selector.base_dir.
225
+ ///
226
+ /// \param[in] filesystem passed to FileSystemDataset
227
+ /// \param[in] selector used to crawl and search files
228
+ /// \param[in] format passed to FileSystemDataset
229
+ /// \param[in] options see FileSystemFactoryOptions for more information.
230
+ static Result<std::shared_ptr<DatasetFactory>> Make(
231
+ std::shared_ptr<fs::FileSystem> filesystem, fs::FileSelector selector,
232
+ std::shared_ptr<FileFormat> format, FileSystemFactoryOptions options);
233
+
234
+ /// \brief Build a FileSystemDatasetFactory from an uri including filesystem
235
+ /// information.
236
+ ///
237
+ /// \param[in] uri passed to FileSystemDataset
238
+ /// \param[in] format passed to FileSystemDataset
239
+ /// \param[in] options see FileSystemFactoryOptions for more information.
240
+ static Result<std::shared_ptr<DatasetFactory>> Make(std::string uri,
241
+ std::shared_ptr<FileFormat> format,
242
+ FileSystemFactoryOptions options);
243
+
244
+ /// \brief Build a FileSystemDatasetFactory from an explicit list of
245
+ /// file information.
246
+ ///
247
+ /// \param[in] filesystem passed to FileSystemDataset
248
+ /// \param[in] files passed to FileSystemDataset
249
+ /// \param[in] format passed to FileSystemDataset
250
+ /// \param[in] options see FileSystemFactoryOptions for more information.
251
+ static Result<std::shared_ptr<DatasetFactory>> Make(
252
+ std::shared_ptr<fs::FileSystem> filesystem, const std::vector<fs::FileInfo>& files,
253
+ std::shared_ptr<FileFormat> format, FileSystemFactoryOptions options);
254
+
255
+ Result<std::vector<std::shared_ptr<Schema>>> InspectSchemas(
256
+ InspectOptions options) override;
257
+
258
+ Result<std::shared_ptr<Dataset>> Finish(FinishOptions options) override;
259
+
260
+ protected:
261
+ FileSystemDatasetFactory(std::vector<fs::FileInfo> files,
262
+ std::shared_ptr<fs::FileSystem> filesystem,
263
+ std::shared_ptr<FileFormat> format,
264
+ FileSystemFactoryOptions options);
265
+
266
+ Result<std::shared_ptr<Schema>> PartitionSchema();
267
+
268
+ std::vector<fs::FileInfo> files_;
269
+ std::shared_ptr<fs::FileSystem> fs_;
270
+ std::shared_ptr<FileFormat> format_;
271
+ FileSystemFactoryOptions options_;
272
+ };
273
+
274
+ } // namespace dataset
275
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_base.h ADDED
@@ -0,0 +1,495 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <string>
25
+ #include <utility>
26
+ #include <vector>
27
+
28
+ #include "arrow/buffer.h"
29
+ #include "arrow/dataset/dataset.h"
30
+ #include "arrow/dataset/partition.h"
31
+ #include "arrow/dataset/scanner.h"
32
+ #include "arrow/dataset/type_fwd.h"
33
+ #include "arrow/dataset/visibility.h"
34
+ #include "arrow/filesystem/filesystem.h"
35
+ #include "arrow/io/file.h"
36
+ #include "arrow/type_fwd.h"
37
+ #include "arrow/util/compression.h"
38
+
39
+ namespace arrow {
40
+
41
+ namespace dataset {
42
+
43
+ /// \defgroup dataset-file-formats File formats for reading and writing datasets
44
+ /// \defgroup dataset-filesystem File system datasets
45
+ ///
46
+ /// @{
47
+
48
+ /// \brief The path and filesystem where an actual file is located or a buffer which can
49
+ /// be read like a file
50
+ class ARROW_DS_EXPORT FileSource : public util::EqualityComparable<FileSource> {
51
+ public:
52
+ FileSource(std::string path, std::shared_ptr<fs::FileSystem> filesystem,
53
+ Compression::type compression = Compression::UNCOMPRESSED)
54
+ : file_info_(std::move(path)),
55
+ filesystem_(std::move(filesystem)),
56
+ compression_(compression) {}
57
+
58
+ FileSource(fs::FileInfo info, std::shared_ptr<fs::FileSystem> filesystem,
59
+ Compression::type compression = Compression::UNCOMPRESSED)
60
+ : file_info_(std::move(info)),
61
+ filesystem_(std::move(filesystem)),
62
+ compression_(compression) {}
63
+
64
+ explicit FileSource(std::shared_ptr<Buffer> buffer,
65
+ Compression::type compression = Compression::UNCOMPRESSED)
66
+ : buffer_(std::move(buffer)), compression_(compression) {}
67
+
68
+ using CustomOpen = std::function<Result<std::shared_ptr<io::RandomAccessFile>>()>;
69
+ FileSource(CustomOpen open, int64_t size)
70
+ : custom_open_(std::move(open)), custom_size_(size) {}
71
+
72
+ using CustomOpenWithCompression =
73
+ std::function<Result<std::shared_ptr<io::RandomAccessFile>>(Compression::type)>;
74
+ FileSource(CustomOpenWithCompression open_with_compression, int64_t size,
75
+ Compression::type compression = Compression::UNCOMPRESSED)
76
+ : custom_open_(std::bind(std::move(open_with_compression), compression)),
77
+ custom_size_(size),
78
+ compression_(compression) {}
79
+
80
+ FileSource(std::shared_ptr<io::RandomAccessFile> file, int64_t size,
81
+ Compression::type compression = Compression::UNCOMPRESSED)
82
+ : custom_open_([=] { return ToResult(file); }),
83
+ custom_size_(size),
84
+ compression_(compression) {}
85
+
86
+ explicit FileSource(std::shared_ptr<io::RandomAccessFile> file,
87
+ Compression::type compression = Compression::UNCOMPRESSED);
88
+
89
+ FileSource() : custom_open_(CustomOpen{&InvalidOpen}) {}
90
+
91
+ static std::vector<FileSource> FromPaths(const std::shared_ptr<fs::FileSystem>& fs,
92
+ std::vector<std::string> paths) {
93
+ std::vector<FileSource> sources;
94
+ for (auto&& path : paths) {
95
+ sources.emplace_back(std::move(path), fs);
96
+ }
97
+ return sources;
98
+ }
99
+
100
+ /// \brief Return the type of raw compression on the file, if any.
101
+ Compression::type compression() const { return compression_; }
102
+
103
+ /// \brief Return the file path, if any. Only valid when file source wraps a path.
104
+ const std::string& path() const {
105
+ static std::string buffer_path = "<Buffer>";
106
+ static std::string custom_open_path = "<Buffer>";
107
+ return filesystem_ ? file_info_.path() : buffer_ ? buffer_path : custom_open_path;
108
+ }
109
+
110
+ /// \brief Return the filesystem, if any. Otherwise returns nullptr
111
+ const std::shared_ptr<fs::FileSystem>& filesystem() const { return filesystem_; }
112
+
113
+ /// \brief Return the buffer containing the file, if any. Otherwise returns nullptr
114
+ const std::shared_ptr<Buffer>& buffer() const { return buffer_; }
115
+
116
+ /// \brief Get a RandomAccessFile which views this file source
117
+ Result<std::shared_ptr<io::RandomAccessFile>> Open() const;
118
+ Future<std::shared_ptr<io::RandomAccessFile>> OpenAsync() const;
119
+
120
+ /// \brief Get the size (in bytes) of the file or buffer
121
+ /// If the file is compressed this should be the compressed (on-disk) size.
122
+ int64_t Size() const;
123
+
124
+ /// \brief Get an InputStream which views this file source (and decompresses if needed)
125
+ /// \param[in] compression If nullopt, guess the compression scheme from the
126
+ /// filename, else decompress with the given codec
127
+ Result<std::shared_ptr<io::InputStream>> OpenCompressed(
128
+ std::optional<Compression::type> compression = std::nullopt) const;
129
+
130
+ /// \brief equality comparison with another FileSource
131
+ bool Equals(const FileSource& other) const;
132
+
133
+ private:
134
+ static Result<std::shared_ptr<io::RandomAccessFile>> InvalidOpen() {
135
+ return Status::Invalid("Called Open() on an uninitialized FileSource");
136
+ }
137
+
138
+ fs::FileInfo file_info_;
139
+ std::shared_ptr<fs::FileSystem> filesystem_;
140
+ std::shared_ptr<Buffer> buffer_;
141
+ CustomOpen custom_open_;
142
+ int64_t custom_size_ = 0;
143
+ Compression::type compression_ = Compression::UNCOMPRESSED;
144
+ };
145
+
146
+ /// \brief Base class for file format implementation
147
+ class ARROW_DS_EXPORT FileFormat : public std::enable_shared_from_this<FileFormat> {
148
+ public:
149
+ /// Options affecting how this format is scanned.
150
+ ///
151
+ /// The options here can be overridden at scan time.
152
+ std::shared_ptr<FragmentScanOptions> default_fragment_scan_options;
153
+
154
+ virtual ~FileFormat() = default;
155
+
156
+ /// \brief The name identifying the kind of file format
157
+ virtual std::string type_name() const = 0;
158
+
159
+ virtual bool Equals(const FileFormat& other) const = 0;
160
+
161
+ /// \brief Indicate if the FileSource is supported/readable by this format.
162
+ virtual Result<bool> IsSupported(const FileSource& source) const = 0;
163
+
164
+ /// \brief Return the schema of the file if possible.
165
+ virtual Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const = 0;
166
+
167
+ /// \brief Learn what we need about the file before we start scanning it
168
+ virtual Future<std::shared_ptr<InspectedFragment>> InspectFragment(
169
+ const FileSource& source, const FragmentScanOptions* format_options,
170
+ compute::ExecContext* exec_context) const;
171
+
172
+ virtual Result<RecordBatchGenerator> ScanBatchesAsync(
173
+ const std::shared_ptr<ScanOptions>& options,
174
+ const std::shared_ptr<FileFragment>& file) const = 0;
175
+
176
+ virtual Future<std::optional<int64_t>> CountRows(
177
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
178
+ const std::shared_ptr<ScanOptions>& options);
179
+
180
+ virtual Future<std::shared_ptr<FragmentScanner>> BeginScan(
181
+ const FragmentScanRequest& request, const InspectedFragment& inspected_fragment,
182
+ const FragmentScanOptions* format_options,
183
+ compute::ExecContext* exec_context) const;
184
+
185
+ /// \brief Open a fragment
186
+ virtual Result<std::shared_ptr<FileFragment>> MakeFragment(
187
+ FileSource source, compute::Expression partition_expression,
188
+ std::shared_ptr<Schema> physical_schema);
189
+
190
+ /// \brief Create a FileFragment for a FileSource.
191
+ Result<std::shared_ptr<FileFragment>> MakeFragment(
192
+ FileSource source, compute::Expression partition_expression);
193
+
194
+ /// \brief Create a FileFragment for a FileSource.
195
+ Result<std::shared_ptr<FileFragment>> MakeFragment(
196
+ FileSource source, std::shared_ptr<Schema> physical_schema = NULLPTR);
197
+
198
+ /// \brief Create a writer for this format.
199
+ virtual Result<std::shared_ptr<FileWriter>> MakeWriter(
200
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
201
+ std::shared_ptr<FileWriteOptions> options,
202
+ fs::FileLocator destination_locator) const = 0;
203
+
204
+ /// \brief Get default write options for this format.
205
+ ///
206
+ /// May return null shared_ptr if this file format does not yet support
207
+ /// writing datasets.
208
+ virtual std::shared_ptr<FileWriteOptions> DefaultWriteOptions() = 0;
209
+
210
+ protected:
211
+ explicit FileFormat(std::shared_ptr<FragmentScanOptions> default_fragment_scan_options)
212
+ : default_fragment_scan_options(std::move(default_fragment_scan_options)) {}
213
+ };
214
+
215
+ /// \brief A Fragment that is stored in a file with a known format
216
+ class ARROW_DS_EXPORT FileFragment : public Fragment,
217
+ public util::EqualityComparable<FileFragment> {
218
+ public:
219
+ Result<RecordBatchGenerator> ScanBatchesAsync(
220
+ const std::shared_ptr<ScanOptions>& options) override;
221
+ Future<std::optional<int64_t>> CountRows(
222
+ compute::Expression predicate,
223
+ const std::shared_ptr<ScanOptions>& options) override;
224
+ Future<std::shared_ptr<FragmentScanner>> BeginScan(
225
+ const FragmentScanRequest& request, const InspectedFragment& inspected_fragment,
226
+ const FragmentScanOptions* format_options,
227
+ compute::ExecContext* exec_context) override;
228
+ Future<std::shared_ptr<InspectedFragment>> InspectFragment(
229
+ const FragmentScanOptions* format_options,
230
+ compute::ExecContext* exec_context) override;
231
+
232
+ std::string type_name() const override { return format_->type_name(); }
233
+ std::string ToString() const override { return source_.path(); };
234
+
235
+ const FileSource& source() const { return source_; }
236
+ const std::shared_ptr<FileFormat>& format() const { return format_; }
237
+
238
+ bool Equals(const FileFragment& other) const;
239
+
240
+ protected:
241
+ FileFragment(FileSource source, std::shared_ptr<FileFormat> format,
242
+ compute::Expression partition_expression,
243
+ std::shared_ptr<Schema> physical_schema)
244
+ : Fragment(std::move(partition_expression), std::move(physical_schema)),
245
+ source_(std::move(source)),
246
+ format_(std::move(format)) {}
247
+
248
+ Result<std::shared_ptr<Schema>> ReadPhysicalSchemaImpl() override;
249
+
250
+ FileSource source_;
251
+ std::shared_ptr<FileFormat> format_;
252
+
253
+ friend class FileFormat;
254
+ };
255
+
256
+ /// \brief A Dataset of FileFragments.
257
+ ///
258
+ /// A FileSystemDataset is composed of one or more FileFragment. The fragments
259
+ /// are independent and don't need to share the same format and/or filesystem.
260
+ class ARROW_DS_EXPORT FileSystemDataset : public Dataset {
261
+ public:
262
+ /// \brief Create a FileSystemDataset.
263
+ ///
264
+ /// \param[in] schema the schema of the dataset
265
+ /// \param[in] root_partition the partition expression of the dataset
266
+ /// \param[in] format the format of each FileFragment.
267
+ /// \param[in] filesystem the filesystem of each FileFragment, or nullptr if the
268
+ /// fragments wrap buffers.
269
+ /// \param[in] fragments list of fragments to create the dataset from.
270
+ /// \param[in] partitioning the Partitioning object in case the dataset is created
271
+ /// with a known partitioning (e.g. from a discovered partitioning
272
+ /// through a DatasetFactory), or nullptr if not known.
273
+ ///
274
+ /// Note that fragments wrapping files resident in differing filesystems are not
275
+ /// permitted; to work with multiple filesystems use a UnionDataset.
276
+ ///
277
+ /// \return A constructed dataset.
278
+ static Result<std::shared_ptr<FileSystemDataset>> Make(
279
+ std::shared_ptr<Schema> schema, compute::Expression root_partition,
280
+ std::shared_ptr<FileFormat> format, std::shared_ptr<fs::FileSystem> filesystem,
281
+ std::vector<std::shared_ptr<FileFragment>> fragments,
282
+ std::shared_ptr<Partitioning> partitioning = NULLPTR);
283
+
284
+ /// \brief Write a dataset.
285
+ static Status Write(const FileSystemDatasetWriteOptions& write_options,
286
+ std::shared_ptr<Scanner> scanner);
287
+
288
+ /// \brief Return the type name of the dataset.
289
+ std::string type_name() const override { return "filesystem"; }
290
+
291
+ /// \brief Replace the schema of the dataset.
292
+ Result<std::shared_ptr<Dataset>> ReplaceSchema(
293
+ std::shared_ptr<Schema> schema) const override;
294
+
295
+ /// \brief Return the path of files.
296
+ std::vector<std::string> files() const;
297
+
298
+ /// \brief Return the format.
299
+ const std::shared_ptr<FileFormat>& format() const { return format_; }
300
+
301
+ /// \brief Return the filesystem. May be nullptr if the fragments wrap buffers.
302
+ const std::shared_ptr<fs::FileSystem>& filesystem() const { return filesystem_; }
303
+
304
+ /// \brief Return the partitioning. May be nullptr if the dataset was not constructed
305
+ /// with a partitioning.
306
+ const std::shared_ptr<Partitioning>& partitioning() const { return partitioning_; }
307
+
308
+ std::string ToString() const;
309
+
310
+ protected:
311
+ struct FragmentSubtrees;
312
+
313
+ explicit FileSystemDataset(std::shared_ptr<Schema> schema)
314
+ : Dataset(std::move(schema)) {}
315
+
316
+ FileSystemDataset(std::shared_ptr<Schema> schema,
317
+ compute::Expression partition_expression)
318
+ : Dataset(std::move(schema), partition_expression) {}
319
+
320
+ Result<FragmentIterator> GetFragmentsImpl(compute::Expression predicate) override;
321
+
322
+ void SetupSubtreePruning();
323
+
324
+ std::shared_ptr<FileFormat> format_;
325
+ std::shared_ptr<fs::FileSystem> filesystem_;
326
+ std::vector<std::shared_ptr<FileFragment>> fragments_;
327
+ std::shared_ptr<Partitioning> partitioning_;
328
+
329
+ std::shared_ptr<FragmentSubtrees> subtrees_;
330
+ };
331
+
332
+ /// \brief Options for writing a file of this format.
333
+ class ARROW_DS_EXPORT FileWriteOptions {
334
+ public:
335
+ virtual ~FileWriteOptions() = default;
336
+
337
+ const std::shared_ptr<FileFormat>& format() const { return format_; }
338
+
339
+ std::string type_name() const { return format_->type_name(); }
340
+
341
+ protected:
342
+ explicit FileWriteOptions(std::shared_ptr<FileFormat> format)
343
+ : format_(std::move(format)) {}
344
+
345
+ std::shared_ptr<FileFormat> format_;
346
+ };
347
+
348
+ /// \brief A writer for this format.
349
+ class ARROW_DS_EXPORT FileWriter {
350
+ public:
351
+ virtual ~FileWriter() = default;
352
+
353
+ /// \brief Write the given batch.
354
+ virtual Status Write(const std::shared_ptr<RecordBatch>& batch) = 0;
355
+
356
+ /// \brief Write all batches from the reader.
357
+ Status Write(RecordBatchReader* batches);
358
+
359
+ /// \brief Indicate that writing is done.
360
+ virtual Future<> Finish();
361
+
362
+ const std::shared_ptr<FileFormat>& format() const { return options_->format(); }
363
+ const std::shared_ptr<Schema>& schema() const { return schema_; }
364
+ const std::shared_ptr<FileWriteOptions>& options() const { return options_; }
365
+ const fs::FileLocator& destination() const { return destination_locator_; }
366
+
367
+ /// \brief After Finish() is called, provides number of bytes written to file.
368
+ Result<int64_t> GetBytesWritten() const;
369
+
370
+ protected:
371
+ FileWriter(std::shared_ptr<Schema> schema, std::shared_ptr<FileWriteOptions> options,
372
+ std::shared_ptr<io::OutputStream> destination,
373
+ fs::FileLocator destination_locator)
374
+ : schema_(std::move(schema)),
375
+ options_(std::move(options)),
376
+ destination_(std::move(destination)),
377
+ destination_locator_(std::move(destination_locator)) {}
378
+
379
+ virtual Future<> FinishInternal() = 0;
380
+
381
+ std::shared_ptr<Schema> schema_;
382
+ std::shared_ptr<FileWriteOptions> options_;
383
+ std::shared_ptr<io::OutputStream> destination_;
384
+ fs::FileLocator destination_locator_;
385
+ std::optional<int64_t> bytes_written_;
386
+ };
387
+
388
+ /// \brief Options for writing a dataset.
389
+ struct ARROW_DS_EXPORT FileSystemDatasetWriteOptions {
390
+ /// Options for individual fragment writing.
391
+ std::shared_ptr<FileWriteOptions> file_write_options;
392
+
393
+ /// FileSystem into which a dataset will be written.
394
+ std::shared_ptr<fs::FileSystem> filesystem;
395
+
396
+ /// Root directory into which the dataset will be written.
397
+ std::string base_dir;
398
+
399
+ /// Partitioning used to generate fragment paths.
400
+ std::shared_ptr<Partitioning> partitioning;
401
+
402
+ /// Maximum number of partitions any batch may be written into, default is 1K.
403
+ int max_partitions = 1024;
404
+
405
+ /// Template string used to generate fragment basenames.
406
+ /// {i} will be replaced by an auto incremented integer.
407
+ std::string basename_template;
408
+
409
+ /// A functor which will be applied on an incremented counter. The result will be
410
+ /// inserted into the basename_template in place of {i}.
411
+ ///
412
+ /// This can be used, for example, to left-pad the file counter.
413
+ std::function<std::string(int)> basename_template_functor;
414
+
415
+ /// If greater than 0 then this will limit the maximum number of files that can be left
416
+ /// open. If an attempt is made to open too many files then the least recently used file
417
+ /// will be closed. If this setting is set too low you may end up fragmenting your data
418
+ /// into many small files.
419
+ ///
420
+ /// The default is 900 which also allows some # of files to be open by the scanner
421
+ /// before hitting the default Linux limit of 1024
422
+ uint32_t max_open_files = 900;
423
+
424
+ /// If greater than 0 then this will limit how many rows are placed in any single file.
425
+ /// Otherwise there will be no limit and one file will be created in each output
426
+ /// directory unless files need to be closed to respect max_open_files
427
+ uint64_t max_rows_per_file = 0;
428
+
429
+ /// If greater than 0 then this will cause the dataset writer to batch incoming data
430
+ /// and only write the row groups to the disk when sufficient rows have accumulated.
431
+ /// The final row group size may be less than this value and other options such as
432
+ /// `max_open_files` or `max_rows_per_file` lead to smaller row group sizes.
433
+ uint64_t min_rows_per_group = 0;
434
+
435
+ /// If greater than 0 then the dataset writer may split up large incoming batches into
436
+ /// multiple row groups. If this value is set then min_rows_per_group should also be
437
+ /// set or else you may end up with very small row groups (e.g. if the incoming row
438
+ /// group size is just barely larger than this value).
439
+ uint64_t max_rows_per_group = 1 << 20;
440
+
441
+ /// Controls what happens if an output directory already exists.
442
+ ExistingDataBehavior existing_data_behavior = ExistingDataBehavior::kError;
443
+
444
+ /// \brief If false the dataset writer will not create directories
445
+ /// This is mainly intended for filesystems that do not require directories such as S3.
446
+ bool create_dir = true;
447
+
448
+ /// Callback to be invoked against all FileWriters before
449
+ /// they are finalized with FileWriter::Finish().
450
+ std::function<Status(FileWriter*)> writer_pre_finish = [](FileWriter*) {
451
+ return Status::OK();
452
+ };
453
+
454
+ /// Callback to be invoked against all FileWriters after they have
455
+ /// called FileWriter::Finish().
456
+ std::function<Status(FileWriter*)> writer_post_finish = [](FileWriter*) {
457
+ return Status::OK();
458
+ };
459
+
460
+ const std::shared_ptr<FileFormat>& format() const {
461
+ return file_write_options->format();
462
+ }
463
+ };
464
+
465
+ /// \brief Wraps FileSystemDatasetWriteOptions for consumption as compute::ExecNodeOptions
466
+ class ARROW_DS_EXPORT WriteNodeOptions : public acero::ExecNodeOptions {
467
+ public:
468
+ explicit WriteNodeOptions(
469
+ FileSystemDatasetWriteOptions options,
470
+ std::shared_ptr<const KeyValueMetadata> custom_metadata = NULLPTR)
471
+ : write_options(std::move(options)), custom_metadata(std::move(custom_metadata)) {}
472
+
473
+ /// \brief Options to control how to write the dataset
474
+ FileSystemDatasetWriteOptions write_options;
475
+ /// \brief Optional schema to attach to all written batches
476
+ ///
477
+ /// By default, we will use the output schema of the input.
478
+ ///
479
+ /// This can be used to alter schema metadata, field nullability, or field metadata.
480
+ /// However, this cannot be used to change the type of data. If the custom schema does
481
+ /// not have the same number of fields and the same data types as the input then the
482
+ /// plan will fail.
483
+ std::shared_ptr<Schema> custom_schema;
484
+ /// \brief Optional metadata to attach to written batches
485
+ std::shared_ptr<const KeyValueMetadata> custom_metadata;
486
+ };
487
+
488
+ /// @}
489
+
490
+ namespace internal {
491
+ ARROW_DS_EXPORT void InitializeDatasetWriter(arrow::acero::ExecFactoryRegistry* registry);
492
+ }
493
+
494
+ } // namespace dataset
495
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_csv.h ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <string>
22
+
23
+ #include "arrow/csv/options.h"
24
+ #include "arrow/dataset/dataset.h"
25
+ #include "arrow/dataset/file_base.h"
26
+ #include "arrow/dataset/type_fwd.h"
27
+ #include "arrow/dataset/visibility.h"
28
+ #include "arrow/ipc/type_fwd.h"
29
+ #include "arrow/status.h"
30
+ #include "arrow/util/compression.h"
31
+
32
+ namespace arrow {
33
+ namespace dataset {
34
+
35
+ constexpr char kCsvTypeName[] = "csv";
36
+
37
+ /// \addtogroup dataset-file-formats
38
+ ///
39
+ /// @{
40
+
41
+ /// \brief A FileFormat implementation that reads from and writes to Csv files
42
+ class ARROW_DS_EXPORT CsvFileFormat : public FileFormat {
43
+ public:
44
+ // TODO(ARROW-18328) Remove this, moved to CsvFragmentScanOptions
45
+ /// Options affecting the parsing of CSV files
46
+ csv::ParseOptions parse_options = csv::ParseOptions::Defaults();
47
+
48
+ CsvFileFormat();
49
+
50
+ std::string type_name() const override { return kCsvTypeName; }
51
+
52
+ bool Equals(const FileFormat& other) const override;
53
+
54
+ Result<bool> IsSupported(const FileSource& source) const override;
55
+
56
+ /// \brief Return the schema of the file if possible.
57
+ Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const override;
58
+
59
+ Future<std::shared_ptr<FragmentScanner>> BeginScan(
60
+ const FragmentScanRequest& request, const InspectedFragment& inspected_fragment,
61
+ const FragmentScanOptions* format_options,
62
+ compute::ExecContext* exec_context) const override;
63
+
64
+ Result<RecordBatchGenerator> ScanBatchesAsync(
65
+ const std::shared_ptr<ScanOptions>& scan_options,
66
+ const std::shared_ptr<FileFragment>& file) const override;
67
+
68
+ Future<std::shared_ptr<InspectedFragment>> InspectFragment(
69
+ const FileSource& source, const FragmentScanOptions* format_options,
70
+ compute::ExecContext* exec_context) const override;
71
+
72
+ Future<std::optional<int64_t>> CountRows(
73
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
74
+ const std::shared_ptr<ScanOptions>& options) override;
75
+
76
+ Result<std::shared_ptr<FileWriter>> MakeWriter(
77
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
78
+ std::shared_ptr<FileWriteOptions> options,
79
+ fs::FileLocator destination_locator) const override;
80
+
81
+ std::shared_ptr<FileWriteOptions> DefaultWriteOptions() override;
82
+ };
83
+
84
+ /// \brief Per-scan options for CSV fragments
85
+ struct ARROW_DS_EXPORT CsvFragmentScanOptions : public FragmentScanOptions {
86
+ std::string type_name() const override { return kCsvTypeName; }
87
+
88
+ using StreamWrapFunc = std::function<Result<std::shared_ptr<io::InputStream>>(
89
+ std::shared_ptr<io::InputStream>)>;
90
+
91
+ /// CSV conversion options
92
+ csv::ConvertOptions convert_options = csv::ConvertOptions::Defaults();
93
+
94
+ /// CSV reading options
95
+ ///
96
+ /// Note that use_threads is always ignored.
97
+ csv::ReadOptions read_options = csv::ReadOptions::Defaults();
98
+
99
+ /// CSV parse options
100
+ csv::ParseOptions parse_options = csv::ParseOptions::Defaults();
101
+
102
+ /// Optional stream wrapping function
103
+ ///
104
+ /// If defined, all open dataset file fragments will be passed
105
+ /// through this function. One possible use case is to transparently
106
+ /// transcode all input files from a given character set to utf8.
107
+ StreamWrapFunc stream_transform_func{};
108
+ };
109
+
110
+ class ARROW_DS_EXPORT CsvFileWriteOptions : public FileWriteOptions {
111
+ public:
112
+ /// Options passed to csv::MakeCSVWriter.
113
+ std::shared_ptr<csv::WriteOptions> write_options;
114
+
115
+ protected:
116
+ explicit CsvFileWriteOptions(std::shared_ptr<FileFormat> format)
117
+ : FileWriteOptions(std::move(format)) {}
118
+
119
+ friend class CsvFileFormat;
120
+ };
121
+
122
+ class ARROW_DS_EXPORT CsvFileWriter : public FileWriter {
123
+ public:
124
+ Status Write(const std::shared_ptr<RecordBatch>& batch) override;
125
+
126
+ private:
127
+ CsvFileWriter(std::shared_ptr<io::OutputStream> destination,
128
+ std::shared_ptr<ipc::RecordBatchWriter> writer,
129
+ std::shared_ptr<Schema> schema,
130
+ std::shared_ptr<CsvFileWriteOptions> options,
131
+ fs::FileLocator destination_locator);
132
+
133
+ Future<> FinishInternal() override;
134
+
135
+ std::shared_ptr<io::OutputStream> destination_;
136
+ std::shared_ptr<ipc::RecordBatchWriter> batch_writer_;
137
+
138
+ friend class CsvFileFormat;
139
+ };
140
+
141
+ /// @}
142
+
143
+ } // namespace dataset
144
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_ipc.h ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+ #include <string>
24
+
25
+ #include "arrow/dataset/file_base.h"
26
+ #include "arrow/dataset/type_fwd.h"
27
+ #include "arrow/dataset/visibility.h"
28
+ #include "arrow/io/type_fwd.h"
29
+ #include "arrow/ipc/type_fwd.h"
30
+ #include "arrow/result.h"
31
+
32
+ namespace arrow {
33
+ namespace dataset {
34
+
35
+ /// \addtogroup dataset-file-formats
36
+ ///
37
+ /// @{
38
+
39
+ constexpr char kIpcTypeName[] = "ipc";
40
+
41
+ /// \brief A FileFormat implementation that reads from and writes to Ipc files
42
+ class ARROW_DS_EXPORT IpcFileFormat : public FileFormat {
43
+ public:
44
+ std::string type_name() const override { return kIpcTypeName; }
45
+
46
+ IpcFileFormat();
47
+
48
+ bool Equals(const FileFormat& other) const override {
49
+ return type_name() == other.type_name();
50
+ }
51
+
52
+ Result<bool> IsSupported(const FileSource& source) const override;
53
+
54
+ /// \brief Return the schema of the file if possible.
55
+ Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const override;
56
+
57
+ Result<RecordBatchGenerator> ScanBatchesAsync(
58
+ const std::shared_ptr<ScanOptions>& options,
59
+ const std::shared_ptr<FileFragment>& file) const override;
60
+
61
+ Future<std::optional<int64_t>> CountRows(
62
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
63
+ const std::shared_ptr<ScanOptions>& options) override;
64
+
65
+ Result<std::shared_ptr<FileWriter>> MakeWriter(
66
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
67
+ std::shared_ptr<FileWriteOptions> options,
68
+ fs::FileLocator destination_locator) const override;
69
+
70
+ std::shared_ptr<FileWriteOptions> DefaultWriteOptions() override;
71
+ };
72
+
73
+ /// \brief Per-scan options for IPC fragments
74
+ class ARROW_DS_EXPORT IpcFragmentScanOptions : public FragmentScanOptions {
75
+ public:
76
+ std::string type_name() const override { return kIpcTypeName; }
77
+
78
+ /// Options passed to the IPC file reader.
79
+ /// included_fields, memory_pool, and use_threads are ignored.
80
+ std::shared_ptr<ipc::IpcReadOptions> options;
81
+ /// If present, the async scanner will enable I/O coalescing.
82
+ /// This is ignored by the sync scanner.
83
+ std::shared_ptr<io::CacheOptions> cache_options;
84
+ };
85
+
86
+ class ARROW_DS_EXPORT IpcFileWriteOptions : public FileWriteOptions {
87
+ public:
88
+ /// Options passed to ipc::MakeFileWriter. use_threads is ignored
89
+ std::shared_ptr<ipc::IpcWriteOptions> options;
90
+
91
+ /// custom_metadata written to the file's footer
92
+ std::shared_ptr<const KeyValueMetadata> metadata;
93
+
94
+ protected:
95
+ explicit IpcFileWriteOptions(std::shared_ptr<FileFormat> format)
96
+ : FileWriteOptions(std::move(format)) {}
97
+
98
+ friend class IpcFileFormat;
99
+ };
100
+
101
+ class ARROW_DS_EXPORT IpcFileWriter : public FileWriter {
102
+ public:
103
+ Status Write(const std::shared_ptr<RecordBatch>& batch) override;
104
+
105
+ private:
106
+ IpcFileWriter(std::shared_ptr<io::OutputStream> destination,
107
+ std::shared_ptr<ipc::RecordBatchWriter> writer,
108
+ std::shared_ptr<Schema> schema,
109
+ std::shared_ptr<IpcFileWriteOptions> options,
110
+ fs::FileLocator destination_locator);
111
+
112
+ Future<> FinishInternal() override;
113
+
114
+ std::shared_ptr<io::OutputStream> destination_;
115
+ std::shared_ptr<ipc::RecordBatchWriter> batch_writer_;
116
+
117
+ friend class IpcFileFormat;
118
+ };
119
+
120
+ /// @}
121
+
122
+ } // namespace dataset
123
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_json.h ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <optional>
22
+ #include <string>
23
+
24
+ #include "arrow/dataset/dataset.h"
25
+ #include "arrow/dataset/file_base.h"
26
+ #include "arrow/dataset/type_fwd.h"
27
+ #include "arrow/dataset/visibility.h"
28
+ #include "arrow/ipc/type_fwd.h"
29
+ #include "arrow/json/options.h"
30
+ #include "arrow/result.h"
31
+ #include "arrow/status.h"
32
+ #include "arrow/util/future.h"
33
+ #include "arrow/util/macros.h"
34
+
35
+ namespace arrow::dataset {
36
+
37
+ /// \addtogroup dataset-file-formats
38
+ ///
39
+ /// @{
40
+
41
+ constexpr char kJsonTypeName[] = "json";
42
+
43
+ /// \brief A FileFormat implementation that reads from JSON files
44
+ class ARROW_DS_EXPORT JsonFileFormat : public FileFormat {
45
+ public:
46
+ JsonFileFormat();
47
+
48
+ std::string type_name() const override { return kJsonTypeName; }
49
+
50
+ bool Equals(const FileFormat& other) const override;
51
+
52
+ Result<bool> IsSupported(const FileSource& source) const override;
53
+
54
+ Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const override;
55
+
56
+ Future<std::shared_ptr<InspectedFragment>> InspectFragment(
57
+ const FileSource& source, const FragmentScanOptions* format_options,
58
+ compute::ExecContext* exec_context) const override;
59
+
60
+ Future<std::shared_ptr<FragmentScanner>> BeginScan(
61
+ const FragmentScanRequest& scan_request, const InspectedFragment& inspected,
62
+ const FragmentScanOptions* format_options,
63
+ compute::ExecContext* exec_context) const override;
64
+
65
+ Result<RecordBatchGenerator> ScanBatchesAsync(
66
+ const std::shared_ptr<ScanOptions>& scan_options,
67
+ const std::shared_ptr<FileFragment>& file) const override;
68
+
69
+ Future<std::optional<int64_t>> CountRows(
70
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
71
+ const std::shared_ptr<ScanOptions>& scan_options) override;
72
+
73
+ Result<std::shared_ptr<FileWriter>> MakeWriter(
74
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
75
+ std::shared_ptr<FileWriteOptions> options,
76
+ fs::FileLocator destination_locator) const override {
77
+ return Status::NotImplemented("Writing JSON files is not currently supported");
78
+ }
79
+
80
+ std::shared_ptr<FileWriteOptions> DefaultWriteOptions() override { return NULLPTR; }
81
+ };
82
+
83
+ /// \brief Per-scan options for JSON fragments
84
+ struct ARROW_DS_EXPORT JsonFragmentScanOptions : public FragmentScanOptions {
85
+ std::string type_name() const override { return kJsonTypeName; }
86
+
87
+ /// @brief Options that affect JSON parsing
88
+ ///
89
+ /// Note: `explicit_schema` and `unexpected_field_behavior` are ignored.
90
+ json::ParseOptions parse_options = json::ParseOptions::Defaults();
91
+
92
+ /// @brief Options that affect JSON reading
93
+ json::ReadOptions read_options = json::ReadOptions::Defaults();
94
+ };
95
+
96
+ /// @}
97
+
98
+ } // namespace arrow::dataset
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_orc.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+ #include <string>
24
+
25
+ #include "arrow/dataset/file_base.h"
26
+ #include "arrow/dataset/type_fwd.h"
27
+ #include "arrow/dataset/visibility.h"
28
+ #include "arrow/io/type_fwd.h"
29
+ #include "arrow/result.h"
30
+
31
+ namespace arrow {
32
+ namespace dataset {
33
+
34
+ /// \addtogroup dataset-file-formats
35
+ ///
36
+ /// @{
37
+
38
+ constexpr char kOrcTypeName[] = "orc";
39
+
40
+ /// \brief A FileFormat implementation that reads from and writes to ORC files
41
+ class ARROW_DS_EXPORT OrcFileFormat : public FileFormat {
42
+ public:
43
+ OrcFileFormat();
44
+
45
+ std::string type_name() const override { return kOrcTypeName; }
46
+
47
+ bool Equals(const FileFormat& other) const override {
48
+ return type_name() == other.type_name();
49
+ }
50
+
51
+ Result<bool> IsSupported(const FileSource& source) const override;
52
+
53
+ /// \brief Return the schema of the file if possible.
54
+ Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const override;
55
+
56
+ Result<RecordBatchGenerator> ScanBatchesAsync(
57
+ const std::shared_ptr<ScanOptions>& options,
58
+ const std::shared_ptr<FileFragment>& file) const override;
59
+
60
+ Future<std::optional<int64_t>> CountRows(
61
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
62
+ const std::shared_ptr<ScanOptions>& options) override;
63
+
64
+ Result<std::shared_ptr<FileWriter>> MakeWriter(
65
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
66
+ std::shared_ptr<FileWriteOptions> options,
67
+ fs::FileLocator destination_locator) const override;
68
+
69
+ std::shared_ptr<FileWriteOptions> DefaultWriteOptions() override;
70
+ };
71
+
72
+ /// @}
73
+
74
+ } // namespace dataset
75
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_parquet.h ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+ #include <optional>
24
+ #include <string>
25
+ #include <unordered_set>
26
+ #include <utility>
27
+ #include <vector>
28
+
29
+ #include "arrow/dataset/discovery.h"
30
+ #include "arrow/dataset/file_base.h"
31
+ #include "arrow/dataset/type_fwd.h"
32
+ #include "arrow/dataset/visibility.h"
33
+ #include "arrow/io/caching.h"
34
+
35
+ namespace parquet {
36
+ class ParquetFileReader;
37
+ class Statistics;
38
+ class ColumnChunkMetaData;
39
+ class RowGroupMetaData;
40
+ class FileMetaData;
41
+ class FileDecryptionProperties;
42
+ class FileEncryptionProperties;
43
+
44
+ class ReaderProperties;
45
+ class ArrowReaderProperties;
46
+
47
+ class WriterProperties;
48
+ class ArrowWriterProperties;
49
+
50
+ namespace arrow {
51
+ class FileReader;
52
+ class FileWriter;
53
+ struct SchemaManifest;
54
+ } // namespace arrow
55
+ } // namespace parquet
56
+
57
+ namespace arrow {
58
+ namespace dataset {
59
+
60
+ struct ParquetDecryptionConfig;
61
+ struct ParquetEncryptionConfig;
62
+
63
+ /// \addtogroup dataset-file-formats
64
+ ///
65
+ /// @{
66
+
67
+ constexpr char kParquetTypeName[] = "parquet";
68
+
69
+ /// \brief A FileFormat implementation that reads from Parquet files
70
+ class ARROW_DS_EXPORT ParquetFileFormat : public FileFormat {
71
+ public:
72
+ ParquetFileFormat();
73
+
74
+ /// Convenience constructor which copies properties from a parquet::ReaderProperties.
75
+ /// memory_pool will be ignored.
76
+ explicit ParquetFileFormat(const parquet::ReaderProperties& reader_properties);
77
+
78
+ std::string type_name() const override { return kParquetTypeName; }
79
+
80
+ bool Equals(const FileFormat& other) const override;
81
+
82
+ struct ReaderOptions {
83
+ /// \defgroup parquet-file-format-arrow-reader-properties properties which correspond
84
+ /// to members of parquet::ArrowReaderProperties.
85
+ ///
86
+ /// We don't embed parquet::ReaderProperties directly because column names (rather
87
+ /// than indices) are used to indicate dictionary columns, and other options are
88
+ /// deferred to scan time.
89
+ ///
90
+ /// @{
91
+ std::unordered_set<std::string> dict_columns;
92
+ arrow::TimeUnit::type coerce_int96_timestamp_unit = arrow::TimeUnit::NANO;
93
+ /// @}
94
+ } reader_options;
95
+
96
+ Result<bool> IsSupported(const FileSource& source) const override;
97
+
98
+ /// \brief Return the schema of the file if possible.
99
+ Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const override;
100
+
101
+ Result<RecordBatchGenerator> ScanBatchesAsync(
102
+ const std::shared_ptr<ScanOptions>& options,
103
+ const std::shared_ptr<FileFragment>& file) const override;
104
+
105
+ Future<std::optional<int64_t>> CountRows(
106
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
107
+ const std::shared_ptr<ScanOptions>& options) override;
108
+
109
+ using FileFormat::MakeFragment;
110
+
111
+ /// \brief Create a Fragment targeting all RowGroups.
112
+ Result<std::shared_ptr<FileFragment>> MakeFragment(
113
+ FileSource source, compute::Expression partition_expression,
114
+ std::shared_ptr<Schema> physical_schema) override;
115
+
116
+ /// \brief Create a Fragment, restricted to the specified row groups.
117
+ Result<std::shared_ptr<ParquetFileFragment>> MakeFragment(
118
+ FileSource source, compute::Expression partition_expression,
119
+ std::shared_ptr<Schema> physical_schema, std::vector<int> row_groups);
120
+
121
+ /// \brief Return a FileReader on the given source.
122
+ Result<std::shared_ptr<parquet::arrow::FileReader>> GetReader(
123
+ const FileSource& source, const std::shared_ptr<ScanOptions>& options) const;
124
+
125
+ Result<std::shared_ptr<parquet::arrow::FileReader>> GetReader(
126
+ const FileSource& source, const std::shared_ptr<ScanOptions>& options,
127
+ const std::shared_ptr<parquet::FileMetaData>& metadata) const;
128
+
129
+ Future<std::shared_ptr<parquet::arrow::FileReader>> GetReaderAsync(
130
+ const FileSource& source, const std::shared_ptr<ScanOptions>& options) const;
131
+
132
+ Future<std::shared_ptr<parquet::arrow::FileReader>> GetReaderAsync(
133
+ const FileSource& source, const std::shared_ptr<ScanOptions>& options,
134
+ const std::shared_ptr<parquet::FileMetaData>& metadata) const;
135
+
136
+ Result<std::shared_ptr<FileWriter>> MakeWriter(
137
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
138
+ std::shared_ptr<FileWriteOptions> options,
139
+ fs::FileLocator destination_locator) const override;
140
+
141
+ std::shared_ptr<FileWriteOptions> DefaultWriteOptions() override;
142
+ };
143
+
144
+ /// \brief A FileFragment with parquet logic.
145
+ ///
146
+ /// ParquetFileFragment provides a lazy (with respect to IO) interface to
147
+ /// scan parquet files. Any heavy IO calls are deferred to the Scan() method.
148
+ ///
149
+ /// The caller can provide an optional list of selected RowGroups to limit the
150
+ /// number of scanned RowGroups, or to partition the scans across multiple
151
+ /// threads.
152
+ ///
153
+ /// Metadata can be explicitly provided, enabling pushdown predicate benefits without
154
+ /// the potentially heavy IO of loading Metadata from the file system. This can induce
155
+ /// significant performance boost when scanning high latency file systems.
156
+ class ARROW_DS_EXPORT ParquetFileFragment : public FileFragment {
157
+ public:
158
+ Result<FragmentVector> SplitByRowGroup(compute::Expression predicate);
159
+
160
+ /// \brief Return the RowGroups selected by this fragment.
161
+ const std::vector<int>& row_groups() const {
162
+ if (row_groups_) return *row_groups_;
163
+ static std::vector<int> empty;
164
+ return empty;
165
+ }
166
+
167
+ /// \brief Return the FileMetaData associated with this fragment.
168
+ std::shared_ptr<parquet::FileMetaData> metadata();
169
+
170
+ /// \brief Ensure this fragment's FileMetaData is in memory.
171
+ Status EnsureCompleteMetadata(parquet::arrow::FileReader* reader = NULLPTR);
172
+
173
+ /// \brief Return fragment which selects a filtered subset of this fragment's RowGroups.
174
+ Result<std::shared_ptr<Fragment>> Subset(compute::Expression predicate);
175
+ Result<std::shared_ptr<Fragment>> Subset(std::vector<int> row_group_ids);
176
+
177
+ static std::optional<compute::Expression> EvaluateStatisticsAsExpression(
178
+ const Field& field, const parquet::Statistics& statistics);
179
+
180
+ static std::optional<compute::Expression> EvaluateStatisticsAsExpression(
181
+ const Field& field, const FieldRef& field_ref,
182
+ const parquet::Statistics& statistics);
183
+
184
+ private:
185
+ ParquetFileFragment(FileSource source, std::shared_ptr<FileFormat> format,
186
+ compute::Expression partition_expression,
187
+ std::shared_ptr<Schema> physical_schema,
188
+ std::optional<std::vector<int>> row_groups);
189
+
190
+ Status SetMetadata(std::shared_ptr<parquet::FileMetaData> metadata,
191
+ std::shared_ptr<parquet::arrow::SchemaManifest> manifest,
192
+ std::shared_ptr<parquet::FileMetaData> original_metadata = {});
193
+
194
+ // Overridden to opportunistically set metadata since a reader must be opened anyway.
195
+ Result<std::shared_ptr<Schema>> ReadPhysicalSchemaImpl() override {
196
+ ARROW_RETURN_NOT_OK(EnsureCompleteMetadata());
197
+ return physical_schema_;
198
+ }
199
+
200
+ /// Return a filtered subset of row group indices.
201
+ Result<std::vector<int>> FilterRowGroups(compute::Expression predicate);
202
+ /// Simplify the predicate against the statistics of each row group.
203
+ Result<std::vector<compute::Expression>> TestRowGroups(compute::Expression predicate);
204
+ /// Try to count rows matching the predicate using metadata. Expects
205
+ /// metadata to be present, and expects the predicate to have been
206
+ /// simplified against the partition expression already.
207
+ Result<std::optional<int64_t>> TryCountRows(compute::Expression predicate);
208
+
209
+ ParquetFileFormat& parquet_format_;
210
+
211
+ /// Indices of row groups selected by this fragment,
212
+ /// or std::nullopt if all row groups are selected.
213
+ std::optional<std::vector<int>> row_groups_;
214
+
215
+ // the expressions (combined for all columns for which statistics have been
216
+ // processed) are stored per column group
217
+ std::vector<compute::Expression> statistics_expressions_;
218
+ // statistics status are kept track of by Parquet Schema column indices
219
+ // (i.e. not Arrow schema field index)
220
+ std::vector<bool> statistics_expressions_complete_;
221
+ std::shared_ptr<parquet::FileMetaData> metadata_;
222
+ std::shared_ptr<parquet::arrow::SchemaManifest> manifest_;
223
+ // The FileMetaData that owns the SchemaDescriptor pointed by SchemaManifest.
224
+ std::shared_ptr<parquet::FileMetaData> original_metadata_;
225
+
226
+ friend class ParquetFileFormat;
227
+ friend class ParquetDatasetFactory;
228
+ };
229
+
230
+ /// \brief Per-scan options for Parquet fragments
231
+ class ARROW_DS_EXPORT ParquetFragmentScanOptions : public FragmentScanOptions {
232
+ public:
233
+ ParquetFragmentScanOptions();
234
+ std::string type_name() const override { return kParquetTypeName; }
235
+
236
+ /// Reader properties. Not all properties are respected: memory_pool comes from
237
+ /// ScanOptions.
238
+ std::shared_ptr<parquet::ReaderProperties> reader_properties;
239
+ /// Arrow reader properties. Not all properties are respected: batch_size comes from
240
+ /// ScanOptions. Additionally, dictionary columns come from
241
+ /// ParquetFileFormat::ReaderOptions::dict_columns.
242
+ std::shared_ptr<parquet::ArrowReaderProperties> arrow_reader_properties;
243
+ /// A configuration structure that provides decryption properties for a dataset
244
+ std::shared_ptr<ParquetDecryptionConfig> parquet_decryption_config = NULLPTR;
245
+ };
246
+
247
+ class ARROW_DS_EXPORT ParquetFileWriteOptions : public FileWriteOptions {
248
+ public:
249
+ /// \brief Parquet writer properties.
250
+ std::shared_ptr<parquet::WriterProperties> writer_properties;
251
+
252
+ /// \brief Parquet Arrow writer properties.
253
+ std::shared_ptr<parquet::ArrowWriterProperties> arrow_writer_properties;
254
+
255
+ // A configuration structure that provides encryption properties for a dataset
256
+ std::shared_ptr<ParquetEncryptionConfig> parquet_encryption_config = NULLPTR;
257
+
258
+ protected:
259
+ explicit ParquetFileWriteOptions(std::shared_ptr<FileFormat> format)
260
+ : FileWriteOptions(std::move(format)) {}
261
+
262
+ friend class ParquetFileFormat;
263
+ };
264
+
265
+ class ARROW_DS_EXPORT ParquetFileWriter : public FileWriter {
266
+ public:
267
+ const std::shared_ptr<parquet::arrow::FileWriter>& parquet_writer() const {
268
+ return parquet_writer_;
269
+ }
270
+
271
+ Status Write(const std::shared_ptr<RecordBatch>& batch) override;
272
+
273
+ private:
274
+ ParquetFileWriter(std::shared_ptr<io::OutputStream> destination,
275
+ std::shared_ptr<parquet::arrow::FileWriter> writer,
276
+ std::shared_ptr<ParquetFileWriteOptions> options,
277
+ fs::FileLocator destination_locator);
278
+
279
+ Future<> FinishInternal() override;
280
+
281
+ std::shared_ptr<parquet::arrow::FileWriter> parquet_writer_;
282
+
283
+ friend class ParquetFileFormat;
284
+ };
285
+
286
+ /// \brief Options for making a FileSystemDataset from a Parquet _metadata file.
287
+ struct ParquetFactoryOptions {
288
+ /// Either an explicit Partitioning or a PartitioningFactory to discover one.
289
+ ///
290
+ /// If a factory is provided, it will be used to infer a schema for partition fields
291
+ /// based on file and directory paths then construct a Partitioning. The default
292
+ /// is a Partitioning which will yield no partition information.
293
+ ///
294
+ /// The (explicit or discovered) partitioning will be applied to discovered files
295
+ /// and the resulting partition information embedded in the Dataset.
296
+ PartitioningOrFactory partitioning{Partitioning::Default()};
297
+
298
+ /// For the purposes of applying the partitioning, paths will be stripped
299
+ /// of the partition_base_dir. Files not matching the partition_base_dir
300
+ /// prefix will be skipped for partition discovery. The ignored files will still
301
+ /// be part of the Dataset, but will not have partition information.
302
+ ///
303
+ /// Example:
304
+ /// partition_base_dir = "/dataset";
305
+ ///
306
+ /// - "/dataset/US/sales.csv" -> "US/sales.csv" will be given to the partitioning
307
+ ///
308
+ /// - "/home/john/late_sales.csv" -> Will be ignored for partition discovery.
309
+ ///
310
+ /// This is useful for partitioning which parses directory when ordering
311
+ /// is important, e.g. DirectoryPartitioning.
312
+ std::string partition_base_dir;
313
+
314
+ /// Assert that all ColumnChunk paths are consistent. The parquet spec allows for
315
+ /// ColumnChunk data to be stored in multiple files, but ParquetDatasetFactory
316
+ /// supports only a single file with all ColumnChunk data. If this flag is set
317
+ /// construction of a ParquetDatasetFactory will raise an error if ColumnChunk
318
+ /// data is not resident in a single file.
319
+ bool validate_column_chunk_paths = false;
320
+ };
321
+
322
+ /// \brief Create FileSystemDataset from custom `_metadata` cache file.
323
+ ///
324
+ /// Dask and other systems will generate a cache metadata file by concatenating
325
+ /// the RowGroupMetaData of multiple parquet files into a single parquet file
326
+ /// that only contains metadata and no ColumnChunk data.
327
+ ///
328
+ /// ParquetDatasetFactory creates a FileSystemDataset composed of
329
+ /// ParquetFileFragment where each fragment is pre-populated with the exact
330
+ /// number of row groups and statistics for each columns.
331
+ class ARROW_DS_EXPORT ParquetDatasetFactory : public DatasetFactory {
332
+ public:
333
+ /// \brief Create a ParquetDatasetFactory from a metadata path.
334
+ ///
335
+ /// The `metadata_path` will be read from `filesystem`. Each RowGroup
336
+ /// contained in the metadata file will be relative to `dirname(metadata_path)`.
337
+ ///
338
+ /// \param[in] metadata_path path of the metadata parquet file
339
+ /// \param[in] filesystem from which to open/read the path
340
+ /// \param[in] format to read the file with.
341
+ /// \param[in] options see ParquetFactoryOptions
342
+ static Result<std::shared_ptr<DatasetFactory>> Make(
343
+ const std::string& metadata_path, std::shared_ptr<fs::FileSystem> filesystem,
344
+ std::shared_ptr<ParquetFileFormat> format, ParquetFactoryOptions options);
345
+
346
+ /// \brief Create a ParquetDatasetFactory from a metadata source.
347
+ ///
348
+ /// Similar to the previous Make definition, but the metadata can be a Buffer
349
+ /// and the base_path is explicit instead of inferred from the metadata
350
+ /// path.
351
+ ///
352
+ /// \param[in] metadata source to open the metadata parquet file from
353
+ /// \param[in] base_path used as the prefix of every parquet files referenced
354
+ /// \param[in] filesystem from which to read the files referenced.
355
+ /// \param[in] format to read the file with.
356
+ /// \param[in] options see ParquetFactoryOptions
357
+ static Result<std::shared_ptr<DatasetFactory>> Make(
358
+ const FileSource& metadata, const std::string& base_path,
359
+ std::shared_ptr<fs::FileSystem> filesystem,
360
+ std::shared_ptr<ParquetFileFormat> format, ParquetFactoryOptions options);
361
+
362
+ Result<std::vector<std::shared_ptr<Schema>>> InspectSchemas(
363
+ InspectOptions options) override;
364
+
365
+ Result<std::shared_ptr<Dataset>> Finish(FinishOptions options) override;
366
+
367
+ protected:
368
+ ParquetDatasetFactory(
369
+ std::shared_ptr<fs::FileSystem> filesystem,
370
+ std::shared_ptr<ParquetFileFormat> format,
371
+ std::shared_ptr<parquet::FileMetaData> metadata,
372
+ std::shared_ptr<parquet::arrow::SchemaManifest> manifest,
373
+ std::shared_ptr<Schema> physical_schema, std::string base_path,
374
+ ParquetFactoryOptions options,
375
+ std::vector<std::pair<std::string, std::vector<int>>> paths_with_row_group_ids)
376
+ : filesystem_(std::move(filesystem)),
377
+ format_(std::move(format)),
378
+ metadata_(std::move(metadata)),
379
+ manifest_(std::move(manifest)),
380
+ physical_schema_(std::move(physical_schema)),
381
+ base_path_(std::move(base_path)),
382
+ options_(std::move(options)),
383
+ paths_with_row_group_ids_(std::move(paths_with_row_group_ids)) {}
384
+
385
+ std::shared_ptr<fs::FileSystem> filesystem_;
386
+ std::shared_ptr<ParquetFileFormat> format_;
387
+ std::shared_ptr<parquet::FileMetaData> metadata_;
388
+ std::shared_ptr<parquet::arrow::SchemaManifest> manifest_;
389
+ std::shared_ptr<Schema> physical_schema_;
390
+ std::string base_path_;
391
+ ParquetFactoryOptions options_;
392
+ std::vector<std::pair<std::string, std::vector<int>>> paths_with_row_group_ids_;
393
+
394
+ private:
395
+ Result<std::vector<std::shared_ptr<FileFragment>>> CollectParquetFragments(
396
+ const Partitioning& partitioning);
397
+
398
+ Result<std::shared_ptr<Schema>> PartitionSchema();
399
+ };
400
+
401
+ /// @}
402
+
403
+ } // namespace dataset
404
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/parquet_encryption_config.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/dataset/type_fwd.h"
21
+
22
+ namespace parquet::encryption {
23
+ class CryptoFactory;
24
+ struct KmsConnectionConfig;
25
+ struct EncryptionConfiguration;
26
+ struct DecryptionConfiguration;
27
+ } // namespace parquet::encryption
28
+
29
+ namespace arrow {
30
+ namespace dataset {
31
+
32
+ /// \brief Core configuration class encapsulating parameters for high-level encryption
33
+ /// within Parquet framework.
34
+ ///
35
+ /// ParquetEncryptionConfig serves as a bridge, passing encryption-related
36
+ /// parameters to appropriate components within the Parquet library. It holds references
37
+ /// to objects defining encryption strategy, Key Management Service (KMS) configuration,
38
+ /// and specific encryption configurations for Parquet data.
39
+ struct ARROW_DS_EXPORT ParquetEncryptionConfig {
40
+ /// Shared pointer to CryptoFactory object, responsible for creating cryptographic
41
+ /// components like encryptors and decryptors.
42
+ std::shared_ptr<parquet::encryption::CryptoFactory> crypto_factory;
43
+
44
+ /// Shared pointer to KmsConnectionConfig object, holding configuration parameters for
45
+ /// connecting to a Key Management Service (KMS).
46
+ std::shared_ptr<parquet::encryption::KmsConnectionConfig> kms_connection_config;
47
+
48
+ /// Shared pointer to EncryptionConfiguration object, defining specific encryption
49
+ /// settings for Parquet data, like keys for different columns.
50
+ std::shared_ptr<parquet::encryption::EncryptionConfiguration> encryption_config;
51
+ };
52
+
53
+ /// \brief Core configuration class encapsulating parameters for high-level decryption
54
+ /// within Parquet framework.
55
+ ///
56
+ /// ParquetDecryptionConfig is designed to pass decryption-related parameters to
57
+ /// appropriate decryption components within Parquet library. It holds references to
58
+ /// objects defining decryption strategy, Key Management Service (KMS) configuration,
59
+ /// and specific decryption configurations for reading encrypted Parquet data.
60
+ struct ARROW_DS_EXPORT ParquetDecryptionConfig {
61
+ /// Shared pointer to CryptoFactory object, pivotal in creating cryptographic
62
+ /// components for decryption process.
63
+ std::shared_ptr<parquet::encryption::CryptoFactory> crypto_factory;
64
+
65
+ /// Shared pointer to KmsConnectionConfig object, containing parameters for connecting
66
+ /// to a Key Management Service (KMS) during decryption.
67
+ std::shared_ptr<parquet::encryption::KmsConnectionConfig> kms_connection_config;
68
+
69
+ /// Shared pointer to DecryptionConfiguration object, specifying decryption settings
70
+ /// for reading encrypted Parquet data.
71
+ std::shared_ptr<parquet::encryption::DecryptionConfiguration> decryption_config;
72
+ };
73
+
74
+ } // namespace dataset
75
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/partition.h ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <functional>
23
+ #include <iosfwd>
24
+ #include <memory>
25
+ #include <optional>
26
+ #include <string>
27
+ #include <unordered_map>
28
+ #include <utility>
29
+ #include <vector>
30
+
31
+ #include "arrow/compute/expression.h"
32
+ #include "arrow/dataset/type_fwd.h"
33
+ #include "arrow/dataset/visibility.h"
34
+ #include "arrow/util/compare.h"
35
+
36
+ namespace arrow {
37
+
38
+ namespace dataset {
39
+
40
+ constexpr char kFilenamePartitionSep = '_';
41
+
42
+ struct ARROW_DS_EXPORT PartitionPathFormat {
43
+ std::string directory, filename;
44
+ };
45
+
46
+ // ----------------------------------------------------------------------
47
+ // Partitioning
48
+
49
+ /// \defgroup dataset-partitioning Partitioning API
50
+ ///
51
+ /// @{
52
+
53
+ /// \brief Interface for parsing partition expressions from string partition
54
+ /// identifiers.
55
+ ///
56
+ /// For example, the identifier "foo=5" might be parsed to an equality expression
57
+ /// between the "foo" field and the value 5.
58
+ ///
59
+ /// Some partitionings may store the field names in a metadata
60
+ /// store instead of in file paths, for example
61
+ /// dataset_root/2009/11/... could be used when the partition fields
62
+ /// are "year" and "month"
63
+ ///
64
+ /// Paths are consumed from left to right. Paths must be relative to
65
+ /// the root of a partition; path prefixes must be removed before passing
66
+ /// the path to a partitioning for parsing.
67
+ class ARROW_DS_EXPORT Partitioning : public util::EqualityComparable<Partitioning> {
68
+ public:
69
+ virtual ~Partitioning() = default;
70
+
71
+ /// \brief The name identifying the kind of partitioning
72
+ virtual std::string type_name() const = 0;
73
+
74
+ //// \brief Return whether the partitionings are equal
75
+ virtual bool Equals(const Partitioning& other) const {
76
+ return schema_->Equals(other.schema_, /*check_metadata=*/false);
77
+ }
78
+
79
+ /// \brief If the input batch shares any fields with this partitioning,
80
+ /// produce sub-batches which satisfy mutually exclusive Expressions.
81
+ struct PartitionedBatches {
82
+ RecordBatchVector batches;
83
+ std::vector<compute::Expression> expressions;
84
+ };
85
+ virtual Result<PartitionedBatches> Partition(
86
+ const std::shared_ptr<RecordBatch>& batch) const = 0;
87
+
88
+ /// \brief Parse a path into a partition expression
89
+ virtual Result<compute::Expression> Parse(const std::string& path) const = 0;
90
+
91
+ virtual Result<PartitionPathFormat> Format(const compute::Expression& expr) const = 0;
92
+
93
+ /// \brief A default Partitioning which is a DirectoryPartitioning
94
+ /// with an empty schema.
95
+ static std::shared_ptr<Partitioning> Default();
96
+
97
+ /// \brief The partition schema.
98
+ const std::shared_ptr<Schema>& schema() const { return schema_; }
99
+
100
+ protected:
101
+ explicit Partitioning(std::shared_ptr<Schema> schema) : schema_(std::move(schema)) {}
102
+
103
+ std::shared_ptr<Schema> schema_;
104
+ };
105
+
106
+ /// \brief The encoding of partition segments.
107
+ enum class SegmentEncoding : int8_t {
108
+ /// No encoding.
109
+ None = 0,
110
+ /// Segment values are URL-encoded.
111
+ Uri = 1,
112
+ };
113
+
114
+ ARROW_DS_EXPORT
115
+ std::ostream& operator<<(std::ostream& os, SegmentEncoding segment_encoding);
116
+
117
+ /// \brief Options for key-value based partitioning (hive/directory).
118
+ struct ARROW_DS_EXPORT KeyValuePartitioningOptions {
119
+ /// After splitting a path into components, decode the path components
120
+ /// before parsing according to this scheme.
121
+ SegmentEncoding segment_encoding = SegmentEncoding::Uri;
122
+ };
123
+
124
+ /// \brief Options for inferring a partitioning.
125
+ struct ARROW_DS_EXPORT PartitioningFactoryOptions {
126
+ /// When inferring a schema for partition fields, yield dictionary encoded types
127
+ /// instead of plain. This can be more efficient when materializing virtual
128
+ /// columns, and Expressions parsed by the finished Partitioning will include
129
+ /// dictionaries of all unique inspected values for each field.
130
+ bool infer_dictionary = false;
131
+ /// Optionally, an expected schema can be provided, in which case inference
132
+ /// will only check discovered fields against the schema and update internal
133
+ /// state (such as dictionaries).
134
+ std::shared_ptr<Schema> schema;
135
+ /// After splitting a path into components, decode the path components
136
+ /// before parsing according to this scheme.
137
+ SegmentEncoding segment_encoding = SegmentEncoding::Uri;
138
+
139
+ KeyValuePartitioningOptions AsPartitioningOptions() const;
140
+ };
141
+
142
+ /// \brief Options for inferring a hive-style partitioning.
143
+ struct ARROW_DS_EXPORT HivePartitioningFactoryOptions : PartitioningFactoryOptions {
144
+ /// The hive partitioning scheme maps null to a hard coded fallback string.
145
+ std::string null_fallback;
146
+
147
+ HivePartitioningOptions AsHivePartitioningOptions() const;
148
+ };
149
+
150
+ /// \brief PartitioningFactory provides creation of a partitioning when the
151
+ /// specific schema must be inferred from available paths (no explicit schema is known).
152
+ class ARROW_DS_EXPORT PartitioningFactory {
153
+ public:
154
+ virtual ~PartitioningFactory() = default;
155
+
156
+ /// \brief The name identifying the kind of partitioning
157
+ virtual std::string type_name() const = 0;
158
+
159
+ /// Get the schema for the resulting Partitioning.
160
+ /// This may reset internal state, for example dictionaries of unique representations.
161
+ virtual Result<std::shared_ptr<Schema>> Inspect(
162
+ const std::vector<std::string>& paths) = 0;
163
+
164
+ /// Create a partitioning using the provided schema
165
+ /// (fields may be dropped).
166
+ virtual Result<std::shared_ptr<Partitioning>> Finish(
167
+ const std::shared_ptr<Schema>& schema) const = 0;
168
+ };
169
+
170
+ /// \brief Subclass for the common case of a partitioning which yields an equality
171
+ /// expression for each segment
172
+ class ARROW_DS_EXPORT KeyValuePartitioning : public Partitioning {
173
+ public:
174
+ /// An unconverted equality expression consisting of a field name and the representation
175
+ /// of a scalar value
176
+ struct Key {
177
+ std::string name;
178
+ std::optional<std::string> value;
179
+ };
180
+
181
+ Result<PartitionedBatches> Partition(
182
+ const std::shared_ptr<RecordBatch>& batch) const override;
183
+
184
+ Result<compute::Expression> Parse(const std::string& path) const override;
185
+
186
+ Result<PartitionPathFormat> Format(const compute::Expression& expr) const override;
187
+
188
+ const ArrayVector& dictionaries() const { return dictionaries_; }
189
+
190
+ SegmentEncoding segment_encoding() const { return options_.segment_encoding; }
191
+
192
+ bool Equals(const Partitioning& other) const override;
193
+
194
+ protected:
195
+ KeyValuePartitioning(std::shared_ptr<Schema> schema, ArrayVector dictionaries,
196
+ KeyValuePartitioningOptions options)
197
+ : Partitioning(std::move(schema)),
198
+ dictionaries_(std::move(dictionaries)),
199
+ options_(options) {
200
+ if (dictionaries_.empty()) {
201
+ dictionaries_.resize(schema_->num_fields());
202
+ }
203
+ }
204
+
205
+ virtual Result<std::vector<Key>> ParseKeys(const std::string& path) const = 0;
206
+
207
+ virtual Result<PartitionPathFormat> FormatValues(const ScalarVector& values) const = 0;
208
+
209
+ /// Convert a Key to a full expression.
210
+ Result<compute::Expression> ConvertKey(const Key& key) const;
211
+
212
+ Result<std::vector<std::string>> FormatPartitionSegments(
213
+ const ScalarVector& values) const;
214
+ Result<std::vector<Key>> ParsePartitionSegments(
215
+ const std::vector<std::string>& segments) const;
216
+
217
+ ArrayVector dictionaries_;
218
+ KeyValuePartitioningOptions options_;
219
+ };
220
+
221
+ /// \brief DirectoryPartitioning parses one segment of a path for each field in its
222
+ /// schema. All fields are required, so paths passed to DirectoryPartitioning::Parse
223
+ /// must contain segments for each field.
224
+ ///
225
+ /// For example given schema<year:int16, month:int8> the path "/2009/11" would be
226
+ /// parsed to ("year"_ == 2009 and "month"_ == 11)
227
+ class ARROW_DS_EXPORT DirectoryPartitioning : public KeyValuePartitioning {
228
+ public:
229
+ /// If a field in schema is of dictionary type, the corresponding element of
230
+ /// dictionaries must be contain the dictionary of values for that field.
231
+ explicit DirectoryPartitioning(std::shared_ptr<Schema> schema,
232
+ ArrayVector dictionaries = {},
233
+ KeyValuePartitioningOptions options = {});
234
+
235
+ std::string type_name() const override { return "directory"; }
236
+
237
+ bool Equals(const Partitioning& other) const override;
238
+
239
+ /// \brief Create a factory for a directory partitioning.
240
+ ///
241
+ /// \param[in] field_names The names for the partition fields. Types will be
242
+ /// inferred.
243
+ static std::shared_ptr<PartitioningFactory> MakeFactory(
244
+ std::vector<std::string> field_names, PartitioningFactoryOptions = {});
245
+
246
+ private:
247
+ Result<std::vector<Key>> ParseKeys(const std::string& path) const override;
248
+
249
+ Result<PartitionPathFormat> FormatValues(const ScalarVector& values) const override;
250
+ };
251
+
252
+ /// \brief The default fallback used for null values in a Hive-style partitioning.
253
+ static constexpr char kDefaultHiveNullFallback[] = "__HIVE_DEFAULT_PARTITION__";
254
+
255
+ struct ARROW_DS_EXPORT HivePartitioningOptions : public KeyValuePartitioningOptions {
256
+ std::string null_fallback = kDefaultHiveNullFallback;
257
+
258
+ static HivePartitioningOptions DefaultsWithNullFallback(std::string fallback) {
259
+ HivePartitioningOptions options;
260
+ options.null_fallback = std::move(fallback);
261
+ return options;
262
+ }
263
+ };
264
+
265
+ /// \brief Multi-level, directory based partitioning
266
+ /// originating from Apache Hive with all data files stored in the
267
+ /// leaf directories. Data is partitioned by static values of a
268
+ /// particular column in the schema. Partition keys are represented in
269
+ /// the form $key=$value in directory names.
270
+ /// Field order is ignored, as are missing or unrecognized field names.
271
+ ///
272
+ /// For example given schema<year:int16, month:int8, day:int8> the path
273
+ /// "/day=321/ignored=3.4/year=2009" parses to ("year"_ == 2009 and "day"_ == 321)
274
+ class ARROW_DS_EXPORT HivePartitioning : public KeyValuePartitioning {
275
+ public:
276
+ /// If a field in schema is of dictionary type, the corresponding element of
277
+ /// dictionaries must be contain the dictionary of values for that field.
278
+ explicit HivePartitioning(std::shared_ptr<Schema> schema, ArrayVector dictionaries = {},
279
+ std::string null_fallback = kDefaultHiveNullFallback)
280
+ : KeyValuePartitioning(std::move(schema), std::move(dictionaries),
281
+ KeyValuePartitioningOptions()),
282
+ hive_options_(
283
+ HivePartitioningOptions::DefaultsWithNullFallback(std::move(null_fallback))) {
284
+ }
285
+
286
+ explicit HivePartitioning(std::shared_ptr<Schema> schema, ArrayVector dictionaries,
287
+ HivePartitioningOptions options)
288
+ : KeyValuePartitioning(std::move(schema), std::move(dictionaries), options),
289
+ hive_options_(options) {}
290
+
291
+ std::string type_name() const override { return "hive"; }
292
+ std::string null_fallback() const { return hive_options_.null_fallback; }
293
+ const HivePartitioningOptions& options() const { return hive_options_; }
294
+
295
+ static Result<std::optional<Key>> ParseKey(const std::string& segment,
296
+ const HivePartitioningOptions& options);
297
+
298
+ bool Equals(const Partitioning& other) const override;
299
+
300
+ /// \brief Create a factory for a hive partitioning.
301
+ static std::shared_ptr<PartitioningFactory> MakeFactory(
302
+ HivePartitioningFactoryOptions = {});
303
+
304
+ private:
305
+ const HivePartitioningOptions hive_options_;
306
+ Result<std::vector<Key>> ParseKeys(const std::string& path) const override;
307
+
308
+ Result<PartitionPathFormat> FormatValues(const ScalarVector& values) const override;
309
+ };
310
+
311
+ /// \brief Implementation provided by lambda or other callable
312
+ class ARROW_DS_EXPORT FunctionPartitioning : public Partitioning {
313
+ public:
314
+ using ParseImpl = std::function<Result<compute::Expression>(const std::string&)>;
315
+
316
+ using FormatImpl =
317
+ std::function<Result<PartitionPathFormat>(const compute::Expression&)>;
318
+
319
+ FunctionPartitioning(std::shared_ptr<Schema> schema, ParseImpl parse_impl,
320
+ FormatImpl format_impl = NULLPTR, std::string name = "function")
321
+ : Partitioning(std::move(schema)),
322
+ parse_impl_(std::move(parse_impl)),
323
+ format_impl_(std::move(format_impl)),
324
+ name_(std::move(name)) {}
325
+
326
+ std::string type_name() const override { return name_; }
327
+
328
+ bool Equals(const Partitioning& other) const override { return false; }
329
+
330
+ Result<compute::Expression> Parse(const std::string& path) const override {
331
+ return parse_impl_(path);
332
+ }
333
+
334
+ Result<PartitionPathFormat> Format(const compute::Expression& expr) const override {
335
+ if (format_impl_) {
336
+ return format_impl_(expr);
337
+ }
338
+ return Status::NotImplemented("formatting paths from ", type_name(), " Partitioning");
339
+ }
340
+
341
+ Result<PartitionedBatches> Partition(
342
+ const std::shared_ptr<RecordBatch>& batch) const override {
343
+ return Status::NotImplemented("partitioning batches from ", type_name(),
344
+ " Partitioning");
345
+ }
346
+
347
+ private:
348
+ ParseImpl parse_impl_;
349
+ FormatImpl format_impl_;
350
+ std::string name_;
351
+ };
352
+
353
+ class ARROW_DS_EXPORT FilenamePartitioning : public KeyValuePartitioning {
354
+ public:
355
+ /// \brief Construct a FilenamePartitioning from its components.
356
+ ///
357
+ /// If a field in schema is of dictionary type, the corresponding element of
358
+ /// dictionaries must be contain the dictionary of values for that field.
359
+ explicit FilenamePartitioning(std::shared_ptr<Schema> schema,
360
+ ArrayVector dictionaries = {},
361
+ KeyValuePartitioningOptions options = {});
362
+
363
+ std::string type_name() const override { return "filename"; }
364
+
365
+ /// \brief Create a factory for a filename partitioning.
366
+ ///
367
+ /// \param[in] field_names The names for the partition fields. Types will be
368
+ /// inferred.
369
+ static std::shared_ptr<PartitioningFactory> MakeFactory(
370
+ std::vector<std::string> field_names, PartitioningFactoryOptions = {});
371
+
372
+ bool Equals(const Partitioning& other) const override;
373
+
374
+ private:
375
+ Result<std::vector<Key>> ParseKeys(const std::string& path) const override;
376
+
377
+ Result<PartitionPathFormat> FormatValues(const ScalarVector& values) const override;
378
+ };
379
+
380
+ ARROW_DS_EXPORT std::string StripPrefix(const std::string& path,
381
+ const std::string& prefix);
382
+
383
+ /// \brief Extracts the directory and filename and removes the prefix of a path
384
+ ///
385
+ /// e.g., `StripPrefixAndFilename("/data/year=2019/c.txt", "/data") ->
386
+ /// {"year=2019","c.txt"}`
387
+ ARROW_DS_EXPORT std::string StripPrefixAndFilename(const std::string& path,
388
+ const std::string& prefix);
389
+
390
+ /// \brief Vector version of StripPrefixAndFilename.
391
+ ARROW_DS_EXPORT std::vector<std::string> StripPrefixAndFilename(
392
+ const std::vector<std::string>& paths, const std::string& prefix);
393
+
394
+ /// \brief Vector version of StripPrefixAndFilename.
395
+ ARROW_DS_EXPORT std::vector<std::string> StripPrefixAndFilename(
396
+ const std::vector<fs::FileInfo>& files, const std::string& prefix);
397
+
398
+ /// \brief Either a Partitioning or a PartitioningFactory
399
+ class ARROW_DS_EXPORT PartitioningOrFactory {
400
+ public:
401
+ explicit PartitioningOrFactory(std::shared_ptr<Partitioning> partitioning)
402
+ : partitioning_(std::move(partitioning)) {}
403
+
404
+ explicit PartitioningOrFactory(std::shared_ptr<PartitioningFactory> factory)
405
+ : factory_(std::move(factory)) {}
406
+
407
+ PartitioningOrFactory& operator=(std::shared_ptr<Partitioning> partitioning) {
408
+ return *this = PartitioningOrFactory(std::move(partitioning));
409
+ }
410
+
411
+ PartitioningOrFactory& operator=(std::shared_ptr<PartitioningFactory> factory) {
412
+ return *this = PartitioningOrFactory(std::move(factory));
413
+ }
414
+
415
+ /// \brief The partitioning (if given).
416
+ const std::shared_ptr<Partitioning>& partitioning() const { return partitioning_; }
417
+
418
+ /// \brief The partition factory (if given).
419
+ const std::shared_ptr<PartitioningFactory>& factory() const { return factory_; }
420
+
421
+ /// \brief Get the partition schema, inferring it with the given factory if needed.
422
+ Result<std::shared_ptr<Schema>> GetOrInferSchema(const std::vector<std::string>& paths);
423
+
424
+ private:
425
+ std::shared_ptr<PartitioningFactory> factory_;
426
+ std::shared_ptr<Partitioning> partitioning_;
427
+ };
428
+
429
+ /// @}
430
+
431
+ } // namespace dataset
432
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/pch.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Often-used headers, for precompiling.
19
+ // If updating this header, please make sure you check compilation speed
20
+ // before checking in. Adding headers which are not used extremely often
21
+ // may incur a slowdown, since it makes the precompiled header heavier to load.
22
+
23
+ // This API is EXPERIMENTAL.
24
+
25
+ #include "arrow/dataset/dataset.h"
26
+ #include "arrow/dataset/scanner.h"
27
+ #include "arrow/pch.h"
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/plan.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #include "arrow/dataset/visibility.h"
21
+
22
+ namespace arrow {
23
+ namespace dataset {
24
+ namespace internal {
25
+
26
+ /// Register dataset-based exec nodes with the exec node registry
27
+ ///
28
+ /// This function must be called before using dataset ExecNode factories
29
+ ARROW_DS_EXPORT void Initialize();
30
+
31
+ } // namespace internal
32
+ } // namespace dataset
33
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/projector.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include "arrow/dataset/visibility.h"
23
+ #include "arrow/type_fwd.h"
24
+
25
+ namespace arrow {
26
+ namespace dataset {
27
+
28
+ // FIXME this is superceded by compute::Expression::Bind
29
+ ARROW_DS_EXPORT Status CheckProjectable(const Schema& from, const Schema& to);
30
+
31
+ } // namespace dataset
32
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/scanner.h ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <string>
25
+ #include <utility>
26
+ #include <vector>
27
+
28
+ #include "arrow/acero/options.h"
29
+ #include "arrow/compute/expression.h"
30
+ #include "arrow/compute/type_fwd.h"
31
+ #include "arrow/dataset/dataset.h"
32
+ #include "arrow/dataset/projector.h"
33
+ #include "arrow/dataset/type_fwd.h"
34
+ #include "arrow/dataset/visibility.h"
35
+ #include "arrow/io/interfaces.h"
36
+ #include "arrow/memory_pool.h"
37
+ #include "arrow/type_fwd.h"
38
+ #include "arrow/util/async_generator.h"
39
+ #include "arrow/util/iterator.h"
40
+ #include "arrow/util/thread_pool.h"
41
+ #include "arrow/util/type_fwd.h"
42
+
43
+ namespace arrow {
44
+
45
+ using RecordBatchGenerator = std::function<Future<std::shared_ptr<RecordBatch>>()>;
46
+
47
+ namespace dataset {
48
+
49
+ /// \defgroup dataset-scanning Scanning API
50
+ ///
51
+ /// @{
52
+
53
+ constexpr int64_t kDefaultBatchSize = 1 << 17; // 128Ki rows
54
+ // This will yield 64 batches ~ 8Mi rows
55
+ constexpr int32_t kDefaultBatchReadahead = 16;
56
+ constexpr int32_t kDefaultFragmentReadahead = 4;
57
+ constexpr int32_t kDefaultBytesReadahead = 1 << 25; // 32MiB
58
+
59
+ /// Scan-specific options, which can be changed between scans of the same dataset.
60
+ struct ARROW_DS_EXPORT ScanOptions {
61
+ /// A row filter (which will be pushed down to partitioning/reading if supported).
62
+ compute::Expression filter = compute::literal(true);
63
+ /// A projection expression (which can add/remove/rename columns).
64
+ compute::Expression projection;
65
+
66
+ /// Schema with which batches will be read from fragments. This is also known as the
67
+ /// "reader schema" it will be used (for example) in constructing CSV file readers to
68
+ /// identify column types for parsing. Usually only a subset of its fields (see
69
+ /// MaterializedFields) will be materialized during a scan.
70
+ std::shared_ptr<Schema> dataset_schema;
71
+
72
+ /// Schema of projected record batches. This is independent of dataset_schema as its
73
+ /// fields are derived from the projection. For example, let
74
+ ///
75
+ /// dataset_schema = {"a": int32, "b": int32, "id": utf8}
76
+ /// projection = project({equal(field_ref("a"), field_ref("b"))}, {"a_plus_b"})
77
+ ///
78
+ /// (no filter specified). In this case, the projected_schema would be
79
+ ///
80
+ /// {"a_plus_b": int32}
81
+ std::shared_ptr<Schema> projected_schema;
82
+
83
+ /// Maximum row count for scanned batches.
84
+ int64_t batch_size = kDefaultBatchSize;
85
+
86
+ /// How many batches to read ahead within a fragment.
87
+ ///
88
+ /// Set to 0 to disable batch readahead
89
+ ///
90
+ /// Note: May not be supported by all formats
91
+ /// Note: Will be ignored if use_threads is set to false
92
+ int32_t batch_readahead = kDefaultBatchReadahead;
93
+
94
+ /// How many files to read ahead
95
+ ///
96
+ /// Set to 0 to disable fragment readahead
97
+ ///
98
+ /// Note: May not be enforced by all scanners
99
+ /// Note: Will be ignored if use_threads is set to false
100
+ int32_t fragment_readahead = kDefaultFragmentReadahead;
101
+
102
+ /// A pool from which materialized and scanned arrays will be allocated.
103
+ MemoryPool* pool = arrow::default_memory_pool();
104
+
105
+ /// IOContext for any IO tasks
106
+ ///
107
+ /// Note: The IOContext executor will be ignored if use_threads is set to false
108
+ io::IOContext io_context;
109
+
110
+ /// If true the scanner will scan in parallel
111
+ ///
112
+ /// Note: If true, this will use threads from both the cpu_executor and the
113
+ /// io_context.executor
114
+ /// Note: This must be true in order for any readahead to happen
115
+ bool use_threads = false;
116
+
117
+ /// Fragment-specific scan options.
118
+ std::shared_ptr<FragmentScanOptions> fragment_scan_options;
119
+
120
+ /// Return a vector of FieldRefs that require materialization.
121
+ ///
122
+ /// This is usually the union of the fields referenced in the projection and the
123
+ /// filter expression. Examples:
124
+ ///
125
+ /// - `SELECT a, b WHERE a < 2 && c > 1` => ["a", "b", "a", "c"]
126
+ /// - `SELECT a + b < 3 WHERE a > 1` => ["a", "b", "a"]
127
+ ///
128
+ /// This is needed for expression where a field may not be directly
129
+ /// used in the final projection but is still required to evaluate the
130
+ /// expression.
131
+ ///
132
+ /// This is used by Fragment implementations to apply the column
133
+ /// sub-selection optimization.
134
+ std::vector<FieldRef> MaterializedFields() const;
135
+
136
+ /// Parameters which control when the plan should pause for a slow consumer
137
+ acero::BackpressureOptions backpressure =
138
+ acero::BackpressureOptions::DefaultBackpressure();
139
+ };
140
+
141
+ /// Scan-specific options, which can be changed between scans of the same dataset.
142
+ ///
143
+ /// A dataset consists of one or more individual fragments. A fragment is anything
144
+ /// that is independently scannable, often a file.
145
+ ///
146
+ /// Batches from all fragments will be converted to a single schema. This unified
147
+ /// schema is referred to as the "dataset schema" and is the output schema for
148
+ /// this node.
149
+ ///
150
+ /// Individual fragments may have schemas that are different from the dataset
151
+ /// schema. This is sometimes referred to as the physical or fragment schema.
152
+ /// Conversion from the fragment schema to the dataset schema is a process
153
+ /// known as evolution.
154
+ struct ARROW_DS_EXPORT ScanV2Options : public acero::ExecNodeOptions {
155
+ explicit ScanV2Options(std::shared_ptr<Dataset> dataset)
156
+ : dataset(std::move(dataset)) {}
157
+
158
+ /// \brief The dataset to scan
159
+ std::shared_ptr<Dataset> dataset;
160
+ /// \brief A row filter
161
+ ///
162
+ /// The filter expression should be written against the dataset schema.
163
+ /// The filter must be unbound.
164
+ ///
165
+ /// This is an opportunistic pushdown filter. Filtering capabilities will
166
+ /// vary between formats. If a format is not capable of applying the filter
167
+ /// then it will ignore it.
168
+ ///
169
+ /// Each fragment will do its best to filter the data based on the information
170
+ /// (partitioning guarantees, statistics) available to it. If it is able to
171
+ /// apply some filtering then it will indicate what filtering it was able to
172
+ /// apply by attaching a guarantee to the batch.
173
+ ///
174
+ /// For example, if a filter is x < 50 && y > 40 then a batch may be able to
175
+ /// apply a guarantee x < 50. Post-scan filtering would then only need to
176
+ /// consider y > 40 (for this specific batch). The next batch may not be able
177
+ /// to attach any guarantee and both clauses would need to be applied to that batch.
178
+ ///
179
+ /// A single guarantee-aware filtering operation should generally be applied to all
180
+ /// resulting batches. The scan node is not responsible for this.
181
+ ///
182
+ /// Fields that are referenced by the filter should be included in the `columns` vector.
183
+ /// The scan node will not automatically fetch fields referenced by the filter
184
+ /// expression. \see AddFieldsNeededForFilter
185
+ ///
186
+ /// If the filter references fields that are not included in `columns` this may or may
187
+ /// not be an error, depending on the format.
188
+ compute::Expression filter = compute::literal(true);
189
+
190
+ /// \brief The columns to scan
191
+ ///
192
+ /// This is not a simple list of top-level column indices but instead a set of paths
193
+ /// allowing for partial selection of columns
194
+ ///
195
+ /// These paths refer to the dataset schema
196
+ ///
197
+ /// For example, consider the following dataset schema:
198
+ /// schema({
199
+ /// field("score", int32()),
200
+ /// "marker", struct_({
201
+ /// field("color", utf8()),
202
+ /// field("location", struct_({
203
+ /// field("x", float64()),
204
+ /// field("y", float64())
205
+ /// })
206
+ /// })
207
+ /// })
208
+ ///
209
+ /// If `columns` is {{0}, {1,1,0}} then the output schema is:
210
+ /// schema({field("score", int32()), field("x", float64())})
211
+ ///
212
+ /// If `columns` is {{1,1,1}, {1,1}} then the output schema is:
213
+ /// schema({
214
+ /// field("y", float64()),
215
+ /// field("location", struct_({
216
+ /// field("x", float64()),
217
+ /// field("y", float64())
218
+ /// })
219
+ /// })
220
+ std::vector<FieldPath> columns;
221
+
222
+ /// \brief Target number of bytes to read ahead in a fragment
223
+ ///
224
+ /// This limit involves some amount of estimation. Formats typically only know
225
+ /// batch boundaries in terms of rows (not decoded bytes) and so an estimation
226
+ /// must be done to guess the average row size. Other formats like CSV and JSON
227
+ /// must make even more generalized guesses.
228
+ ///
229
+ /// This is a best-effort guide. Some formats may need to read ahead further,
230
+ /// for example, if scanning a parquet file that has batches with 100MiB of data
231
+ /// then the actual readahead will be at least 100MiB
232
+ ///
233
+ /// Set to 0 to disable readahead. When disabled, the scanner will read the
234
+ /// dataset one batch at a time
235
+ ///
236
+ /// This limit applies across all fragments. If the limit is 32MiB and the
237
+ /// fragment readahead allows for 20 fragments to be read at once then the
238
+ /// total readahead will still be 32MiB and NOT 20 * 32MiB.
239
+ int32_t target_bytes_readahead = kDefaultBytesReadahead;
240
+
241
+ /// \brief Number of fragments to read ahead
242
+ ///
243
+ /// Higher readahead will potentially lead to more efficient I/O but will lead
244
+ /// to the scan operation using more RAM. The default is fairly conservative
245
+ /// and designed for fast local disks (or slow local spinning disks which cannot
246
+ /// handle much parallelism anyways). When using a highly parallel remote filesystem
247
+ /// you will likely want to increase these values.
248
+ ///
249
+ /// Set to 0 to disable fragment readahead. When disabled the dataset will be scanned
250
+ /// one fragment at a time.
251
+ int32_t fragment_readahead = kDefaultFragmentReadahead;
252
+ /// \brief Options specific to the file format
253
+ const FragmentScanOptions* format_options = NULLPTR;
254
+
255
+ /// \brief Utility method to get a selection representing all columns in a dataset
256
+ static std::vector<FieldPath> AllColumns(const Schema& dataset_schema);
257
+
258
+ /// \brief Utility method to add fields needed for the current filter
259
+ ///
260
+ /// This method adds any fields that are needed by `filter` which are not already
261
+ /// included in the list of columns. Any new fields added will be added to the end
262
+ /// in no particular order.
263
+ static Status AddFieldsNeededForFilter(ScanV2Options* options);
264
+ };
265
+
266
+ /// \brief Describes a projection
267
+ struct ARROW_DS_EXPORT ProjectionDescr {
268
+ /// \brief The projection expression itself
269
+ /// This expression must be a call to make_struct
270
+ compute::Expression expression;
271
+ /// \brief The output schema of the projection.
272
+
273
+ /// This can be calculated from the input schema and the expression but it
274
+ /// is cached here for convenience.
275
+ std::shared_ptr<Schema> schema;
276
+
277
+ /// \brief Create a ProjectionDescr by binding an expression to the dataset schema
278
+ ///
279
+ /// expression must return a struct type
280
+ static Result<ProjectionDescr> FromStructExpression(
281
+ const compute::Expression& expression, const Schema& dataset_schema);
282
+
283
+ /// \brief Create a ProjectionDescr from expressions/names for each field
284
+ static Result<ProjectionDescr> FromExpressions(std::vector<compute::Expression> exprs,
285
+ std::vector<std::string> names,
286
+ const Schema& dataset_schema);
287
+
288
+ /// \brief Create a default projection referencing fields in the dataset schema
289
+ static Result<ProjectionDescr> FromNames(std::vector<std::string> names,
290
+ const Schema& dataset_schema);
291
+
292
+ /// \brief Make a projection that projects every field in the dataset schema
293
+ static Result<ProjectionDescr> Default(const Schema& dataset_schema);
294
+ };
295
+
296
+ /// \brief Utility method to set the projection expression and schema
297
+ ARROW_DS_EXPORT void SetProjection(ScanOptions* options, ProjectionDescr projection);
298
+
299
+ /// \brief Combines a record batch with the fragment that the record batch originated
300
+ /// from
301
+ ///
302
+ /// Knowing the source fragment can be useful for debugging & understanding loaded
303
+ /// data
304
+ struct TaggedRecordBatch {
305
+ std::shared_ptr<RecordBatch> record_batch;
306
+ std::shared_ptr<Fragment> fragment;
307
+ };
308
+ using TaggedRecordBatchGenerator = std::function<Future<TaggedRecordBatch>()>;
309
+ using TaggedRecordBatchIterator = Iterator<TaggedRecordBatch>;
310
+
311
+ /// \brief Combines a tagged batch with positional information
312
+ ///
313
+ /// This is returned when scanning batches in an unordered fashion. This information is
314
+ /// needed if you ever want to reassemble the batches in order
315
+ struct EnumeratedRecordBatch {
316
+ Enumerated<std::shared_ptr<RecordBatch>> record_batch;
317
+ Enumerated<std::shared_ptr<Fragment>> fragment;
318
+ };
319
+ using EnumeratedRecordBatchGenerator = std::function<Future<EnumeratedRecordBatch>()>;
320
+ using EnumeratedRecordBatchIterator = Iterator<EnumeratedRecordBatch>;
321
+
322
+ /// @}
323
+
324
+ } // namespace dataset
325
+
326
+ template <>
327
+ struct IterationTraits<dataset::TaggedRecordBatch> {
328
+ static dataset::TaggedRecordBatch End() {
329
+ return dataset::TaggedRecordBatch{NULLPTR, NULLPTR};
330
+ }
331
+ static bool IsEnd(const dataset::TaggedRecordBatch& val) {
332
+ return val.record_batch == NULLPTR;
333
+ }
334
+ };
335
+
336
+ template <>
337
+ struct IterationTraits<dataset::EnumeratedRecordBatch> {
338
+ static dataset::EnumeratedRecordBatch End() {
339
+ return dataset::EnumeratedRecordBatch{
340
+ IterationEnd<Enumerated<std::shared_ptr<RecordBatch>>>(),
341
+ IterationEnd<Enumerated<std::shared_ptr<dataset::Fragment>>>()};
342
+ }
343
+ static bool IsEnd(const dataset::EnumeratedRecordBatch& val) {
344
+ return IsIterationEnd(val.fragment);
345
+ }
346
+ };
347
+
348
+ namespace dataset {
349
+
350
+ /// \defgroup dataset-scanning Scanning API
351
+ ///
352
+ /// @{
353
+
354
+ /// \brief A scanner glues together several dataset classes to load in data.
355
+ /// The dataset contains a collection of fragments and partitioning rules.
356
+ ///
357
+ /// The fragments identify independently loadable units of data (i.e. each fragment has
358
+ /// a potentially unique schema and possibly even format. It should be possible to read
359
+ /// fragments in parallel if desired).
360
+ ///
361
+ /// The fragment's format contains the logic necessary to actually create a task to load
362
+ /// the fragment into memory. That task may or may not support parallel execution of
363
+ /// its own.
364
+ ///
365
+ /// The scanner is then responsible for creating scan tasks from every fragment in the
366
+ /// dataset and (potentially) sequencing the loaded record batches together.
367
+ ///
368
+ /// The scanner should not buffer the entire dataset in memory (unless asked) instead
369
+ /// yielding record batches as soon as they are ready to scan. Various readahead
370
+ /// properties control how much data is allowed to be scanned before pausing to let a
371
+ /// slow consumer catchup.
372
+ ///
373
+ /// Today the scanner also handles projection & filtering although that may change in
374
+ /// the future.
375
+ class ARROW_DS_EXPORT Scanner {
376
+ public:
377
+ virtual ~Scanner() = default;
378
+
379
+ /// \brief Apply a visitor to each RecordBatch as it is scanned. If multiple threads
380
+ /// are used (via use_threads), the visitor will be invoked from those threads and is
381
+ /// responsible for any synchronization.
382
+ virtual Status Scan(std::function<Status(TaggedRecordBatch)> visitor) = 0;
383
+ /// \brief Convert a Scanner into a Table.
384
+ ///
385
+ /// Use this convenience utility with care. This will serially materialize the
386
+ /// Scan result in memory before creating the Table.
387
+ virtual Result<std::shared_ptr<Table>> ToTable() = 0;
388
+ /// \brief Scan the dataset into a stream of record batches. Each batch is tagged
389
+ /// with the fragment it originated from. The batches will arrive in order. The
390
+ /// order of fragments is determined by the dataset.
391
+ ///
392
+ /// Note: The scanner will perform some readahead but will avoid materializing too
393
+ /// much in memory (this is goverended by the readahead options and use_threads option).
394
+ /// If the readahead queue fills up then I/O will pause until the calling thread catches
395
+ /// up.
396
+ virtual Result<TaggedRecordBatchIterator> ScanBatches() = 0;
397
+ virtual Result<TaggedRecordBatchGenerator> ScanBatchesAsync() = 0;
398
+ virtual Result<TaggedRecordBatchGenerator> ScanBatchesAsync(
399
+ ::arrow::internal::Executor* cpu_thread_pool) = 0;
400
+ /// \brief Scan the dataset into a stream of record batches. Unlike ScanBatches this
401
+ /// method may allow record batches to be returned out of order. This allows for more
402
+ /// efficient scanning: some fragments may be accessed more quickly than others (e.g.
403
+ /// may be cached in RAM or just happen to get scheduled earlier by the I/O)
404
+ ///
405
+ /// To make up for the out-of-order iteration each batch is further tagged with
406
+ /// positional information.
407
+ virtual Result<EnumeratedRecordBatchIterator> ScanBatchesUnordered() = 0;
408
+ virtual Result<EnumeratedRecordBatchGenerator> ScanBatchesUnorderedAsync() = 0;
409
+ virtual Result<EnumeratedRecordBatchGenerator> ScanBatchesUnorderedAsync(
410
+ ::arrow::internal::Executor* cpu_thread_pool) = 0;
411
+ /// \brief A convenience to synchronously load the given rows by index.
412
+ ///
413
+ /// Will only consume as many batches as needed from ScanBatches().
414
+ virtual Result<std::shared_ptr<Table>> TakeRows(const Array& indices) = 0;
415
+ /// \brief Get the first N rows.
416
+ virtual Result<std::shared_ptr<Table>> Head(int64_t num_rows) = 0;
417
+ /// \brief Count rows matching a predicate.
418
+ ///
419
+ /// This method will push down the predicate and compute the result based on fragment
420
+ /// metadata if possible.
421
+ virtual Result<int64_t> CountRows() = 0;
422
+ virtual Future<int64_t> CountRowsAsync() = 0;
423
+ /// \brief Convert the Scanner to a RecordBatchReader so it can be
424
+ /// easily used with APIs that expect a reader.
425
+ virtual Result<std::shared_ptr<RecordBatchReader>> ToRecordBatchReader() = 0;
426
+
427
+ /// \brief Get the options for this scan.
428
+ const std::shared_ptr<ScanOptions>& options() const { return scan_options_; }
429
+ /// \brief Get the dataset that this scanner will scan
430
+ virtual const std::shared_ptr<Dataset>& dataset() const = 0;
431
+
432
+ protected:
433
+ explicit Scanner(std::shared_ptr<ScanOptions> scan_options)
434
+ : scan_options_(std::move(scan_options)) {}
435
+
436
+ Result<EnumeratedRecordBatchIterator> AddPositioningToInOrderScan(
437
+ TaggedRecordBatchIterator scan);
438
+
439
+ const std::shared_ptr<ScanOptions> scan_options_;
440
+ };
441
+
442
+ /// \brief ScannerBuilder is a factory class to construct a Scanner. It is used
443
+ /// to pass information, notably a potential filter expression and a subset of
444
+ /// columns to materialize.
445
+ class ARROW_DS_EXPORT ScannerBuilder {
446
+ public:
447
+ explicit ScannerBuilder(std::shared_ptr<Dataset> dataset);
448
+
449
+ ScannerBuilder(std::shared_ptr<Dataset> dataset,
450
+ std::shared_ptr<ScanOptions> scan_options);
451
+
452
+ ScannerBuilder(std::shared_ptr<Schema> schema, std::shared_ptr<Fragment> fragment,
453
+ std::shared_ptr<ScanOptions> scan_options);
454
+
455
+ /// \brief Make a scanner from a record batch reader.
456
+ ///
457
+ /// The resulting scanner can be scanned only once. This is intended
458
+ /// to support writing data from streaming sources or other sources
459
+ /// that can be iterated only once.
460
+ static std::shared_ptr<ScannerBuilder> FromRecordBatchReader(
461
+ std::shared_ptr<RecordBatchReader> reader);
462
+
463
+ /// \brief Set the subset of columns to materialize.
464
+ ///
465
+ /// Columns which are not referenced may not be read from fragments.
466
+ ///
467
+ /// \param[in] columns list of columns to project. Order and duplicates will
468
+ /// be preserved.
469
+ ///
470
+ /// \return Failure if any column name does not exists in the dataset's
471
+ /// Schema.
472
+ Status Project(std::vector<std::string> columns);
473
+
474
+ /// \brief Set expressions which will be evaluated to produce the materialized
475
+ /// columns.
476
+ ///
477
+ /// Columns which are not referenced may not be read from fragments.
478
+ ///
479
+ /// \param[in] exprs expressions to evaluate to produce columns.
480
+ /// \param[in] names list of names for the resulting columns.
481
+ ///
482
+ /// \return Failure if any referenced column does not exists in the dataset's
483
+ /// Schema.
484
+ Status Project(std::vector<compute::Expression> exprs, std::vector<std::string> names);
485
+
486
+ /// \brief Set the filter expression to return only rows matching the filter.
487
+ ///
488
+ /// The predicate will be passed down to Sources and corresponding
489
+ /// Fragments to exploit predicate pushdown if possible using
490
+ /// partition information or Fragment internal metadata, e.g. Parquet statistics.
491
+ /// Columns which are not referenced may not be read from fragments.
492
+ ///
493
+ /// \param[in] filter expression to filter rows with.
494
+ ///
495
+ /// \return Failure if any referenced columns does not exist in the dataset's
496
+ /// Schema.
497
+ Status Filter(const compute::Expression& filter);
498
+
499
+ /// \brief Indicate if the Scanner should make use of the available
500
+ /// ThreadPool found in ScanOptions;
501
+ Status UseThreads(bool use_threads = true);
502
+
503
+ /// \brief Set the maximum number of rows per RecordBatch.
504
+ ///
505
+ /// \param[in] batch_size the maximum number of rows.
506
+ /// \returns An error if the number for batch is not greater than 0.
507
+ ///
508
+ /// This option provides a control limiting the memory owned by any RecordBatch.
509
+ Status BatchSize(int64_t batch_size);
510
+
511
+ /// \brief Set the number of batches to read ahead within a fragment.
512
+ ///
513
+ /// \param[in] batch_readahead How many batches to read ahead within a fragment
514
+ /// \returns an error if this number is less than 0.
515
+ ///
516
+ /// This option provides a control on the RAM vs I/O tradeoff.
517
+ /// It might not be supported by all file formats, in which case it will
518
+ /// simply be ignored.
519
+ Status BatchReadahead(int32_t batch_readahead);
520
+
521
+ /// \brief Set the number of fragments to read ahead
522
+ ///
523
+ /// \param[in] fragment_readahead How many fragments to read ahead
524
+ /// \returns an error if this number is less than 0.
525
+ ///
526
+ /// This option provides a control on the RAM vs I/O tradeoff.
527
+ Status FragmentReadahead(int32_t fragment_readahead);
528
+
529
+ /// \brief Set the pool from which materialized and scanned arrays will be allocated.
530
+ Status Pool(MemoryPool* pool);
531
+
532
+ /// \brief Set fragment-specific scan options.
533
+ Status FragmentScanOptions(std::shared_ptr<FragmentScanOptions> fragment_scan_options);
534
+
535
+ /// \brief Override default backpressure configuration
536
+ Status Backpressure(acero::BackpressureOptions backpressure);
537
+
538
+ /// \brief Return the current scan options for the builder.
539
+ Result<std::shared_ptr<ScanOptions>> GetScanOptions();
540
+
541
+ /// \brief Return the constructed now-immutable Scanner object
542
+ Result<std::shared_ptr<Scanner>> Finish();
543
+
544
+ const std::shared_ptr<Schema>& schema() const;
545
+ const std::shared_ptr<Schema>& projected_schema() const;
546
+
547
+ private:
548
+ std::shared_ptr<Dataset> dataset_;
549
+ std::shared_ptr<ScanOptions> scan_options_ = std::make_shared<ScanOptions>();
550
+ };
551
+
552
+ /// \brief Construct a source ExecNode which yields batches from a dataset scan.
553
+ ///
554
+ /// Does not construct associated filter or project nodes.
555
+ /// Yielded batches will be augmented with fragment/batch indices to enable stable
556
+ /// ordering for simple ExecPlans.
557
+ class ARROW_DS_EXPORT ScanNodeOptions : public acero::ExecNodeOptions {
558
+ public:
559
+ explicit ScanNodeOptions(std::shared_ptr<Dataset> dataset,
560
+ std::shared_ptr<ScanOptions> scan_options,
561
+ bool require_sequenced_output = false)
562
+ : dataset(std::move(dataset)),
563
+ scan_options(std::move(scan_options)),
564
+ require_sequenced_output(require_sequenced_output) {}
565
+
566
+ std::shared_ptr<Dataset> dataset;
567
+ std::shared_ptr<ScanOptions> scan_options;
568
+ bool require_sequenced_output;
569
+ };
570
+
571
+ /// @}
572
+
573
+ namespace internal {
574
+ ARROW_DS_EXPORT void InitializeScanner(arrow::acero::ExecFactoryRegistry* registry);
575
+ ARROW_DS_EXPORT void InitializeScannerV2(arrow::acero::ExecFactoryRegistry* registry);
576
+ } // namespace internal
577
+ } // namespace dataset
578
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/type_fwd.h ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+ #include <vector>
24
+
25
+ #include "arrow/compute/type_fwd.h" // IWYU pragma: export
26
+ #include "arrow/dataset/visibility.h"
27
+ #include "arrow/filesystem/type_fwd.h" // IWYU pragma: export
28
+ #include "arrow/type_fwd.h" // IWYU pragma: export
29
+
30
+ namespace arrow {
31
+ namespace dataset {
32
+
33
+ class Dataset;
34
+ class DatasetFactory;
35
+ using DatasetVector = std::vector<std::shared_ptr<Dataset>>;
36
+
37
+ class UnionDataset;
38
+ class UnionDatasetFactory;
39
+
40
+ class Fragment;
41
+ using FragmentIterator = Iterator<std::shared_ptr<Fragment>>;
42
+ using FragmentVector = std::vector<std::shared_ptr<Fragment>>;
43
+
44
+ class FragmentScanOptions;
45
+
46
+ class FileSource;
47
+ class FileFormat;
48
+ class FileFragment;
49
+ class FileWriter;
50
+ class FileWriteOptions;
51
+ class FileSystemDataset;
52
+ class FileSystemDatasetFactory;
53
+ struct FileSystemDatasetWriteOptions;
54
+ class WriteNodeOptions;
55
+
56
+ /// \brief Controls what happens if files exist in an output directory during a dataset
57
+ /// write
58
+ enum class ExistingDataBehavior : int8_t {
59
+ /// Deletes all files in a directory the first time that directory is encountered
60
+ kDeleteMatchingPartitions,
61
+ /// Ignores existing files, overwriting any that happen to have the same name as an
62
+ /// output file
63
+ kOverwriteOrIgnore,
64
+ /// Returns an error if there are any files or subdirectories in the output directory
65
+ kError,
66
+ };
67
+
68
+ class InMemoryDataset;
69
+
70
+ class CsvFileFormat;
71
+ class CsvFileWriter;
72
+ class CsvFileWriteOptions;
73
+ struct CsvFragmentScanOptions;
74
+
75
+ class JsonFileFormat;
76
+ class JsonFileWriter;
77
+ class JsonFileWriteOptions;
78
+ struct JsonFragmentScanOptions;
79
+
80
+ class IpcFileFormat;
81
+ class IpcFileWriter;
82
+ class IpcFileWriteOptions;
83
+ class IpcFragmentScanOptions;
84
+
85
+ class ParquetFileFormat;
86
+ class ParquetFileFragment;
87
+ class ParquetFragmentScanOptions;
88
+ class ParquetFileWriter;
89
+ class ParquetFileWriteOptions;
90
+
91
+ class Partitioning;
92
+ class PartitioningFactory;
93
+ class PartitioningOrFactory;
94
+ struct KeyValuePartitioningOptions;
95
+ class DirectoryPartitioning;
96
+ class HivePartitioning;
97
+ struct HivePartitioningOptions;
98
+ class FilenamePartitioning;
99
+ struct FilenamePartitioningOptions;
100
+
101
+ class ScanNodeOptions;
102
+ struct ScanOptions;
103
+
104
+ class Scanner;
105
+
106
+ class ScannerBuilder;
107
+
108
+ class ScanTask;
109
+ using ScanTaskVector = std::vector<std::shared_ptr<ScanTask>>;
110
+ using ScanTaskIterator = Iterator<std::shared_ptr<ScanTask>>;
111
+
112
+ } // namespace dataset
113
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/visibility.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #if defined(_WIN32) || defined(__CYGWIN__)
23
+ #if defined(_MSC_VER)
24
+ #pragma warning(push)
25
+ #pragma warning(disable : 4251)
26
+ #else
27
+ #pragma GCC diagnostic ignored "-Wattributes"
28
+ #endif
29
+
30
+ #ifdef ARROW_DS_STATIC
31
+ #define ARROW_DS_EXPORT
32
+ #elif defined(ARROW_DS_EXPORTING)
33
+ #define ARROW_DS_EXPORT __declspec(dllexport)
34
+ #else
35
+ #define ARROW_DS_EXPORT __declspec(dllimport)
36
+ #endif
37
+
38
+ #define ARROW_DS_NO_EXPORT
39
+ #else // Not Windows
40
+ #ifndef ARROW_DS_EXPORT
41
+ #define ARROW_DS_EXPORT __attribute__((visibility("default")))
42
+ #endif
43
+ #ifndef ARROW_DS_NO_EXPORT
44
+ #define ARROW_DS_NO_EXPORT __attribute__((visibility("hidden")))
45
+ #endif
46
+ #endif // Non-Windows
47
+
48
+ #if defined(_MSC_VER)
49
+ #pragma warning(pop)
50
+ #endif
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/api.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/flight/client.h"
21
+ #include "arrow/flight/client_auth.h"
22
+ #include "arrow/flight/client_middleware.h"
23
+ #include "arrow/flight/client_tracing_middleware.h"
24
+ #include "arrow/flight/middleware.h"
25
+ #include "arrow/flight/server.h"
26
+ #include "arrow/flight/server_auth.h"
27
+ #include "arrow/flight/server_middleware.h"
28
+ #include "arrow/flight/server_tracing_middleware.h"
29
+ #include "arrow/flight/types.h"
30
+ #include "arrow/flight/types_async.h"
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client.h ADDED
@@ -0,0 +1,436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ /// \brief Implementation of Flight RPC client. API should be
19
+ /// considered experimental for now
20
+
21
+ #pragma once
22
+
23
+ #include <chrono>
24
+ #include <memory>
25
+ #include <string>
26
+ #include <utility>
27
+ #include <variant>
28
+ #include <vector>
29
+
30
+ #include "arrow/ipc/options.h"
31
+ #include "arrow/ipc/reader.h"
32
+ #include "arrow/ipc/writer.h"
33
+ #include "arrow/result.h"
34
+ #include "arrow/status.h"
35
+ #include "arrow/util/cancel.h"
36
+
37
+ #include "arrow/flight/type_fwd.h"
38
+ #include "arrow/flight/types.h" // IWYU pragma: keep
39
+ #include "arrow/flight/visibility.h"
40
+
41
+ namespace arrow {
42
+
43
+ class RecordBatch;
44
+ class Schema;
45
+
46
+ namespace flight {
47
+
48
+ /// \brief A duration type for Flight call timeouts.
49
+ typedef std::chrono::duration<double, std::chrono::seconds::period> TimeoutDuration;
50
+
51
+ /// \brief Hints to the underlying RPC layer for Arrow Flight calls.
52
+ class ARROW_FLIGHT_EXPORT FlightCallOptions {
53
+ public:
54
+ /// Create a default set of call options.
55
+ FlightCallOptions();
56
+
57
+ /// \brief An optional timeout for this call. Negative durations
58
+ /// mean an implementation-defined default behavior will be used
59
+ /// instead. This is the default value.
60
+ TimeoutDuration timeout;
61
+
62
+ /// \brief IPC reader options, if applicable for the call.
63
+ ipc::IpcReadOptions read_options;
64
+
65
+ /// \brief IPC writer options, if applicable for the call.
66
+ ipc::IpcWriteOptions write_options;
67
+
68
+ /// \brief Headers for client to add to context.
69
+ std::vector<std::pair<std::string, std::string>> headers;
70
+
71
+ /// \brief A token to enable interactive user cancellation of long-running requests.
72
+ StopToken stop_token;
73
+
74
+ /// \brief An optional memory manager to control where to allocate incoming data.
75
+ std::shared_ptr<MemoryManager> memory_manager;
76
+ };
77
+
78
+ /// \brief Indicate that the client attempted to write a message
79
+ /// larger than the soft limit set via write_size_limit_bytes.
80
+ class ARROW_FLIGHT_EXPORT FlightWriteSizeStatusDetail : public arrow::StatusDetail {
81
+ public:
82
+ explicit FlightWriteSizeStatusDetail(int64_t limit, int64_t actual)
83
+ : limit_(limit), actual_(actual) {}
84
+ const char* type_id() const override;
85
+ std::string ToString() const override;
86
+ int64_t limit() const { return limit_; }
87
+ int64_t actual() const { return actual_; }
88
+
89
+ /// \brief Extract this status detail from a status, or return
90
+ /// nullptr if the status doesn't contain this status detail.
91
+ static std::shared_ptr<FlightWriteSizeStatusDetail> UnwrapStatus(
92
+ const arrow::Status& status);
93
+
94
+ private:
95
+ int64_t limit_;
96
+ int64_t actual_;
97
+ };
98
+
99
+ struct ARROW_FLIGHT_EXPORT FlightClientOptions {
100
+ /// \brief Root certificates to use for validating server
101
+ /// certificates.
102
+ std::string tls_root_certs;
103
+ /// \brief Override the hostname checked by TLS. Use with caution.
104
+ std::string override_hostname;
105
+ /// \brief The client certificate to use if using Mutual TLS
106
+ std::string cert_chain;
107
+ /// \brief The private key associated with the client certificate for Mutual TLS
108
+ std::string private_key;
109
+ /// \brief A list of client middleware to apply.
110
+ std::vector<std::shared_ptr<ClientMiddlewareFactory>> middleware;
111
+ /// \brief A soft limit on the number of bytes to write in a single
112
+ /// batch when sending Arrow data to a server.
113
+ ///
114
+ /// Used to help limit server memory consumption. Only enabled if
115
+ /// positive. When enabled, FlightStreamWriter.Write* may yield a
116
+ /// IOError with error detail FlightWriteSizeStatusDetail.
117
+ int64_t write_size_limit_bytes = 0;
118
+
119
+ /// \brief Generic connection options, passed to the underlying
120
+ /// transport; interpretation is implementation-dependent.
121
+ std::vector<std::pair<std::string, std::variant<int, std::string>>> generic_options;
122
+
123
+ /// \brief Use TLS without validating the server certificate. Use with caution.
124
+ bool disable_server_verification = false;
125
+
126
+ /// \brief Get default options.
127
+ static FlightClientOptions Defaults();
128
+ };
129
+
130
+ /// \brief A RecordBatchReader exposing Flight metadata and cancel
131
+ /// operations.
132
+ class ARROW_FLIGHT_EXPORT FlightStreamReader : public MetadataRecordBatchReader {
133
+ public:
134
+ /// \brief Try to cancel the call.
135
+ virtual void Cancel() = 0;
136
+
137
+ using MetadataRecordBatchReader::ToRecordBatches;
138
+ /// \brief Consume entire stream as a vector of record batches
139
+ virtual arrow::Result<std::vector<std::shared_ptr<RecordBatch>>> ToRecordBatches(
140
+ const StopToken& stop_token) = 0;
141
+
142
+ using MetadataRecordBatchReader::ToTable;
143
+ /// \brief Consume entire stream as a Table
144
+ arrow::Result<std::shared_ptr<Table>> ToTable(const StopToken& stop_token);
145
+ };
146
+
147
+ // Silence warning
148
+ // "non dll-interface class RecordBatchReader used as base for dll-interface class"
149
+ #ifdef _MSC_VER
150
+ #pragma warning(push)
151
+ #pragma warning(disable : 4275)
152
+ #endif
153
+
154
+ /// \brief A RecordBatchWriter that also allows sending
155
+ /// application-defined metadata via the Flight protocol.
156
+ class ARROW_FLIGHT_EXPORT FlightStreamWriter : public MetadataRecordBatchWriter {
157
+ public:
158
+ /// \brief Indicate that the application is done writing to this stream.
159
+ ///
160
+ /// The application may not write to this stream after calling
161
+ /// this. This differs from closing the stream because this writer
162
+ /// may represent only one half of a readable and writable stream.
163
+ virtual Status DoneWriting() = 0;
164
+ };
165
+
166
+ #ifdef _MSC_VER
167
+ #pragma warning(pop)
168
+ #endif
169
+
170
+ /// \brief A reader for application-specific metadata sent back to the
171
+ /// client during an upload.
172
+ class ARROW_FLIGHT_EXPORT FlightMetadataReader {
173
+ public:
174
+ virtual ~FlightMetadataReader();
175
+ /// \brief Read a message from the server.
176
+ virtual Status ReadMetadata(std::shared_ptr<Buffer>* out) = 0;
177
+ };
178
+
179
+ /// \brief Client class for Arrow Flight RPC services.
180
+ /// API experimental for now
181
+ class ARROW_FLIGHT_EXPORT FlightClient {
182
+ public:
183
+ ~FlightClient();
184
+
185
+ /// \brief Connect to an unauthenticated flight service
186
+ /// \param[in] location the URI
187
+ /// \return Arrow result with the created FlightClient, OK status may not indicate that
188
+ /// the connection was successful
189
+ static arrow::Result<std::unique_ptr<FlightClient>> Connect(const Location& location);
190
+
191
+ /// \brief Connect to an unauthenticated flight service
192
+ /// \param[in] location the URI
193
+ /// \param[in] options Other options for setting up the client
194
+ /// \return Arrow result with the created FlightClient, OK status may not indicate that
195
+ /// the connection was successful
196
+ static arrow::Result<std::unique_ptr<FlightClient>> Connect(
197
+ const Location& location, const FlightClientOptions& options);
198
+
199
+ /// \brief Authenticate to the server using the given handler.
200
+ /// \param[in] options Per-RPC options
201
+ /// \param[in] auth_handler The authentication mechanism to use
202
+ /// \return Status OK if the client authenticated successfully
203
+ Status Authenticate(const FlightCallOptions& options,
204
+ std::unique_ptr<ClientAuthHandler> auth_handler);
205
+
206
+ /// \brief Authenticate to the server using basic HTTP style authentication.
207
+ /// \param[in] options Per-RPC options
208
+ /// \param[in] username Username to use
209
+ /// \param[in] password Password to use
210
+ /// \return Arrow result with bearer token and status OK if client authenticated
211
+ /// successfully
212
+ arrow::Result<std::pair<std::string, std::string>> AuthenticateBasicToken(
213
+ const FlightCallOptions& options, const std::string& username,
214
+ const std::string& password);
215
+
216
+ /// \brief Perform the indicated action, returning an iterator to the stream
217
+ /// of results, if any
218
+ /// \param[in] options Per-RPC options
219
+ /// \param[in] action the action to be performed
220
+ /// \return Arrow result with an iterator object for reading the returned results
221
+ arrow::Result<std::unique_ptr<ResultStream>> DoAction(const FlightCallOptions& options,
222
+ const Action& action);
223
+ arrow::Result<std::unique_ptr<ResultStream>> DoAction(const Action& action) {
224
+ return DoAction({}, action);
225
+ }
226
+
227
+ /// \brief Perform the CancelFlightInfo action, returning a
228
+ /// CancelFlightInfoResult
229
+ ///
230
+ /// \param[in] options Per-RPC options
231
+ /// \param[in] request The CancelFlightInfoRequest
232
+ /// \return Arrow result with a CancelFlightInfoResult
233
+ arrow::Result<CancelFlightInfoResult> CancelFlightInfo(
234
+ const FlightCallOptions& options, const CancelFlightInfoRequest& request);
235
+ arrow::Result<CancelFlightInfoResult> CancelFlightInfo(
236
+ const CancelFlightInfoRequest& request) {
237
+ return CancelFlightInfo({}, request);
238
+ }
239
+
240
+ /// \brief Perform the RenewFlightEndpoint action, returning a renewed
241
+ /// FlightEndpoint
242
+ ///
243
+ /// \param[in] options Per-RPC options
244
+ /// \param[in] request The RenewFlightEndpointRequest
245
+ /// \return Arrow result with a renewed FlightEndpoint
246
+ arrow::Result<FlightEndpoint> RenewFlightEndpoint(
247
+ const FlightCallOptions& options, const RenewFlightEndpointRequest& request);
248
+ arrow::Result<FlightEndpoint> RenewFlightEndpoint(
249
+ const RenewFlightEndpointRequest& request) {
250
+ return RenewFlightEndpoint({}, request);
251
+ }
252
+
253
+ /// \brief Retrieve a list of available Action types
254
+ /// \param[in] options Per-RPC options
255
+ /// \return Arrow result with the available actions
256
+ arrow::Result<std::vector<ActionType>> ListActions(const FlightCallOptions& options);
257
+ arrow::Result<std::vector<ActionType>> ListActions() {
258
+ return ListActions(FlightCallOptions());
259
+ }
260
+
261
+ /// \brief Request access plan for a single flight, which may be an existing
262
+ /// dataset or a command to be executed
263
+ /// \param[in] options Per-RPC options
264
+ /// \param[in] descriptor the dataset request, whether a named dataset or
265
+ /// command
266
+ /// \return Arrow result with the FlightInfo describing where to access the dataset
267
+ arrow::Result<std::unique_ptr<FlightInfo>> GetFlightInfo(
268
+ const FlightCallOptions& options, const FlightDescriptor& descriptor);
269
+ arrow::Result<std::unique_ptr<FlightInfo>> GetFlightInfo(
270
+ const FlightDescriptor& descriptor) {
271
+ return GetFlightInfo({}, descriptor);
272
+ }
273
+
274
+ /// \brief Asynchronous GetFlightInfo.
275
+ /// \param[in] options Per-RPC options
276
+ /// \param[in] descriptor the dataset request
277
+ /// \param[in] listener Callbacks for response and RPC completion
278
+ ///
279
+ /// This API is EXPERIMENTAL.
280
+ void GetFlightInfoAsync(const FlightCallOptions& options,
281
+ const FlightDescriptor& descriptor,
282
+ std::shared_ptr<AsyncListener<FlightInfo>> listener);
283
+ void GetFlightInfoAsync(const FlightDescriptor& descriptor,
284
+ std::shared_ptr<AsyncListener<FlightInfo>> listener) {
285
+ return GetFlightInfoAsync({}, descriptor, std::move(listener));
286
+ }
287
+
288
+ /// \brief Asynchronous GetFlightInfo returning a Future.
289
+ /// \param[in] options Per-RPC options
290
+ /// \param[in] descriptor the dataset request
291
+ ///
292
+ /// This API is EXPERIMENTAL.
293
+ arrow::Future<FlightInfo> GetFlightInfoAsync(const FlightCallOptions& options,
294
+ const FlightDescriptor& descriptor);
295
+ arrow::Future<FlightInfo> GetFlightInfoAsync(const FlightDescriptor& descriptor) {
296
+ return GetFlightInfoAsync({}, descriptor);
297
+ }
298
+
299
+ /// \brief Request and poll a long running query
300
+ /// \param[in] options Per-RPC options
301
+ /// \param[in] descriptor the dataset request or a descriptor returned by a
302
+ /// prior PollFlightInfo call
303
+ /// \return Arrow result with the PollInfo describing the status of
304
+ /// the requested query
305
+ arrow::Result<std::unique_ptr<PollInfo>> PollFlightInfo(
306
+ const FlightCallOptions& options, const FlightDescriptor& descriptor);
307
+ arrow::Result<std::unique_ptr<PollInfo>> PollFlightInfo(
308
+ const FlightDescriptor& descriptor) {
309
+ return PollFlightInfo({}, descriptor);
310
+ }
311
+
312
+ /// \brief Request schema for a single flight, which may be an existing
313
+ /// dataset or a command to be executed
314
+ /// \param[in] options Per-RPC options
315
+ /// \param[in] descriptor the dataset request, whether a named dataset or
316
+ /// command
317
+ /// \return Arrow result with the SchemaResult describing the dataset schema
318
+ arrow::Result<std::unique_ptr<SchemaResult>> GetSchema(
319
+ const FlightCallOptions& options, const FlightDescriptor& descriptor);
320
+
321
+ arrow::Result<std::unique_ptr<SchemaResult>> GetSchema(
322
+ const FlightDescriptor& descriptor) {
323
+ return GetSchema({}, descriptor);
324
+ }
325
+
326
+ /// \brief List all available flights known to the server
327
+ /// \return Arrow result with an iterator that returns a FlightInfo for each flight
328
+ arrow::Result<std::unique_ptr<FlightListing>> ListFlights();
329
+
330
+ /// \brief List available flights given indicated filter criteria
331
+ /// \param[in] options Per-RPC options
332
+ /// \param[in] criteria the filter criteria (opaque)
333
+ /// \return Arrow result with an iterator that returns a FlightInfo for each flight
334
+ arrow::Result<std::unique_ptr<FlightListing>> ListFlights(
335
+ const FlightCallOptions& options, const Criteria& criteria);
336
+
337
+ /// \brief Given a flight ticket and schema, request to be sent the
338
+ /// stream. Returns record batch stream reader
339
+ /// \param[in] options Per-RPC options
340
+ /// \param[in] ticket The flight ticket to use
341
+ /// \return Arrow result with the returned RecordBatchReader
342
+ arrow::Result<std::unique_ptr<FlightStreamReader>> DoGet(
343
+ const FlightCallOptions& options, const Ticket& ticket);
344
+ arrow::Result<std::unique_ptr<FlightStreamReader>> DoGet(const Ticket& ticket) {
345
+ return DoGet({}, ticket);
346
+ }
347
+
348
+ /// \brief DoPut return value
349
+ struct DoPutResult {
350
+ /// \brief a writer to write record batches to
351
+ std::unique_ptr<FlightStreamWriter> writer;
352
+ /// \brief a reader for application metadata from the server
353
+ std::unique_ptr<FlightMetadataReader> reader;
354
+ };
355
+ /// \brief Upload data to a Flight described by the given
356
+ /// descriptor. The caller must call Close() on the returned stream
357
+ /// once they are done writing.
358
+ ///
359
+ /// The reader and writer are linked; closing the writer will also
360
+ /// close the reader. Use \a DoneWriting to only close the write
361
+ /// side of the channel.
362
+ ///
363
+ /// \param[in] options Per-RPC options
364
+ /// \param[in] descriptor the descriptor of the stream
365
+ /// \param[in] schema the schema for the data to upload
366
+ /// \return Arrow result with a DoPutResult struct holding a reader and a writer
367
+ arrow::Result<DoPutResult> DoPut(const FlightCallOptions& options,
368
+ const FlightDescriptor& descriptor,
369
+ const std::shared_ptr<Schema>& schema);
370
+
371
+ arrow::Result<DoPutResult> DoPut(const FlightDescriptor& descriptor,
372
+ const std::shared_ptr<Schema>& schema) {
373
+ return DoPut({}, descriptor, schema);
374
+ }
375
+
376
+ struct DoExchangeResult {
377
+ std::unique_ptr<FlightStreamWriter> writer;
378
+ std::unique_ptr<FlightStreamReader> reader;
379
+ };
380
+ arrow::Result<DoExchangeResult> DoExchange(const FlightCallOptions& options,
381
+ const FlightDescriptor& descriptor);
382
+ arrow::Result<DoExchangeResult> DoExchange(const FlightDescriptor& descriptor) {
383
+ return DoExchange({}, descriptor);
384
+ }
385
+
386
+ /// \brief Set server session option(s) by name/value. Sessions are generally
387
+ /// persisted via HTTP cookies.
388
+ /// \param[in] options Per-RPC options
389
+ /// \param[in] request The server session options to set
390
+ ::arrow::Result<SetSessionOptionsResult> SetSessionOptions(
391
+ const FlightCallOptions& options, const SetSessionOptionsRequest& request);
392
+
393
+ /// \brief Get the current server session options. The session is generally
394
+ /// accessed via an HTTP cookie.
395
+ /// \param[in] options Per-RPC options
396
+ /// \param[in] request The (empty) GetSessionOptions request object.
397
+ ::arrow::Result<GetSessionOptionsResult> GetSessionOptions(
398
+ const FlightCallOptions& options, const GetSessionOptionsRequest& request);
399
+
400
+ /// \brief Close/invalidate the current server session. The session is generally
401
+ /// accessed via an HTTP cookie.
402
+ /// \param[in] options Per-RPC options
403
+ /// \param[in] request The (empty) CloseSession request object.
404
+ ::arrow::Result<CloseSessionResult> CloseSession(const FlightCallOptions& options,
405
+ const CloseSessionRequest& request);
406
+
407
+ /// \brief Explicitly shut down and clean up the client.
408
+ ///
409
+ /// For backwards compatibility, this will be implicitly called by
410
+ /// the destructor if not already called, but this gives the
411
+ /// application no chance to handle errors, so it is recommended to
412
+ /// explicitly close the client.
413
+ ///
414
+ /// \since 8.0.0
415
+ Status Close();
416
+
417
+ /// \brief Whether this client supports asynchronous methods.
418
+ bool supports_async() const;
419
+
420
+ /// \brief Check whether this client supports asynchronous methods.
421
+ ///
422
+ /// This is like supports_async(), except that a detailed error message
423
+ /// is returned if async support is not available. If async support is
424
+ /// available, this function returns successfully.
425
+ Status CheckAsyncSupport() const;
426
+
427
+ private:
428
+ FlightClient();
429
+ Status CheckOpen() const;
430
+ std::unique_ptr<internal::ClientTransport> transport_;
431
+ bool closed_;
432
+ int64_t write_size_limit_bytes_;
433
+ };
434
+
435
+ } // namespace flight
436
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_auth.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+
22
+ #include "arrow/flight/visibility.h"
23
+ #include "arrow/status.h"
24
+
25
+ namespace arrow {
26
+
27
+ namespace flight {
28
+
29
+ /// \brief A reader for messages from the server during an
30
+ /// authentication handshake.
31
+ class ARROW_FLIGHT_EXPORT ClientAuthReader {
32
+ public:
33
+ virtual ~ClientAuthReader() = default;
34
+ virtual Status Read(std::string* response) = 0;
35
+ };
36
+
37
+ /// \brief A writer for messages to the server during an
38
+ /// authentication handshake.
39
+ class ARROW_FLIGHT_EXPORT ClientAuthSender {
40
+ public:
41
+ virtual ~ClientAuthSender() = default;
42
+ virtual Status Write(const std::string& token) = 0;
43
+ };
44
+
45
+ /// \brief An authentication implementation for a Flight service.
46
+ /// Authentication includes both an initial negotiation and a per-call
47
+ /// token validation. Implementations may choose to use either or both
48
+ /// mechanisms.
49
+ class ARROW_FLIGHT_EXPORT ClientAuthHandler {
50
+ public:
51
+ virtual ~ClientAuthHandler() = default;
52
+ /// \brief Authenticate the client on initial connection. The client
53
+ /// can send messages to/read responses from the server at any time.
54
+ /// \return Status OK if authenticated successfully
55
+ virtual Status Authenticate(ClientAuthSender* outgoing, ClientAuthReader* incoming) = 0;
56
+ /// \brief Get a per-call token.
57
+ /// \param[out] token The token to send to the server.
58
+ virtual Status GetToken(std::string* token) = 0;
59
+ };
60
+
61
+ } // namespace flight
62
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_tracing_middleware.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Middleware implementation for propagating OpenTelemetry spans.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+
24
+ #include "arrow/flight/client_middleware.h"
25
+
26
+ namespace arrow {
27
+ namespace flight {
28
+
29
+ /// \brief Returns a ClientMiddlewareFactory that handles sending OpenTelemetry spans.
30
+ ARROW_FLIGHT_EXPORT std::shared_ptr<ClientMiddlewareFactory>
31
+ MakeTracingClientMiddlewareFactory();
32
+
33
+ } // namespace flight
34
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/middleware.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Interfaces for defining middleware for Flight clients and
19
+ // servers. Currently experimental.
20
+
21
+ #pragma once
22
+
23
+ #include <memory>
24
+ #include <string>
25
+ #include <string_view>
26
+ #include <utility>
27
+
28
+ #include "arrow/flight/types.h"
29
+ #include "arrow/status.h"
30
+
31
+ namespace arrow {
32
+ namespace flight {
33
+
34
+ /// \brief A write-only wrapper around headers for an RPC call.
35
+ class ARROW_FLIGHT_EXPORT AddCallHeaders {
36
+ public:
37
+ virtual ~AddCallHeaders() = default;
38
+
39
+ /// \brief Add a header to be sent to the client.
40
+ ///
41
+ /// \param[in] key The header name. Must be lowercase ASCII; some
42
+ /// transports may reject invalid header names.
43
+ /// \param[in] value The header value. Some transports may only
44
+ /// accept binary header values if the header name ends in "-bin".
45
+ virtual void AddHeader(const std::string& key, const std::string& value) = 0;
46
+ };
47
+
48
+ /// \brief An enumeration of the RPC methods Flight implements.
49
+ enum class FlightMethod : char {
50
+ Invalid = 0,
51
+ Handshake = 1,
52
+ ListFlights = 2,
53
+ GetFlightInfo = 3,
54
+ GetSchema = 4,
55
+ DoGet = 5,
56
+ DoPut = 6,
57
+ DoAction = 7,
58
+ ListActions = 8,
59
+ DoExchange = 9,
60
+ PollFlightInfo = 10,
61
+ };
62
+
63
+ /// \brief Get a human-readable name for a Flight method.
64
+ ARROW_FLIGHT_EXPORT
65
+ std::string ToString(FlightMethod method);
66
+
67
+ /// \brief Information about an instance of a Flight RPC.
68
+ struct ARROW_FLIGHT_EXPORT CallInfo {
69
+ public:
70
+ /// \brief The RPC method of this call.
71
+ FlightMethod method;
72
+ };
73
+
74
+ } // namespace flight
75
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/platform.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Internal header. Platform-specific definitions for Flight.
19
+
20
+ #pragma once
21
+
22
+ #ifdef _MSC_VER
23
+
24
+ // The protobuf documentation says that C4251 warnings when using the
25
+ // library are spurious and suppressed when the build the library and
26
+ // compiler, but must be also suppressed in downstream projects
27
+ #pragma warning(disable : 4251)
28
+
29
+ #endif // _MSC_VER
30
+
31
+ #include "arrow/util/config.h" // IWYU pragma: keep
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server_auth.h ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ /// \brief Server-side APIs to implement authentication for Flight.
19
+
20
+ #pragma once
21
+
22
+ #include <string>
23
+
24
+ #include "arrow/flight/type_fwd.h"
25
+ #include "arrow/flight/visibility.h"
26
+ #include "arrow/status.h"
27
+
28
+ namespace arrow {
29
+
30
+ namespace flight {
31
+
32
+ /// \brief A reader for messages from the client during an
33
+ /// authentication handshake.
34
+ class ARROW_FLIGHT_EXPORT ServerAuthReader {
35
+ public:
36
+ virtual ~ServerAuthReader() = default;
37
+ virtual Status Read(std::string* token) = 0;
38
+ };
39
+
40
+ /// \brief A writer for messages to the client during an
41
+ /// authentication handshake.
42
+ class ARROW_FLIGHT_EXPORT ServerAuthSender {
43
+ public:
44
+ virtual ~ServerAuthSender() = default;
45
+ virtual Status Write(const std::string& message) = 0;
46
+ };
47
+
48
+ /// \brief An authentication implementation for a Flight service.
49
+ /// Authentication includes both an initial negotiation and a per-call
50
+ /// token validation. Implementations may choose to use either or both
51
+ /// mechanisms.
52
+ /// An implementation may need to track some state, e.g. a mapping of
53
+ /// client tokens to authenticated identities.
54
+ class ARROW_FLIGHT_EXPORT ServerAuthHandler {
55
+ public:
56
+ virtual ~ServerAuthHandler();
57
+ /// \brief Authenticate the client on initial connection. The server
58
+ /// can send and read responses from the client at any time.
59
+ /// \param[in] context The call context.
60
+ /// \param[in] outgoing The writer for messages to the client.
61
+ /// \param[in] incoming The reader for messages from the client.
62
+ /// \return Status OK if this authentication is succeeded.
63
+ virtual Status Authenticate(const ServerCallContext& context,
64
+ ServerAuthSender* outgoing, ServerAuthReader* incoming) {
65
+ // TODO: We can make this pure virtual function when we remove
66
+ // the deprecated version.
67
+ ARROW_SUPPRESS_DEPRECATION_WARNING
68
+ return Authenticate(outgoing, incoming);
69
+ ARROW_UNSUPPRESS_DEPRECATION_WARNING
70
+ }
71
+ /// \brief Authenticate the client on initial connection. The server
72
+ /// can send and read responses from the client at any time.
73
+ /// \param[in] outgoing The writer for messages to the client.
74
+ /// \param[in] incoming The reader for messages from the client.
75
+ /// \return Status OK if this authentication is succeeded.
76
+ /// \deprecated Deprecated in 13.0.0. Implement the Authentication()
77
+ /// with ServerCallContext version instead.
78
+ ARROW_DEPRECATED("Deprecated in 13.0.0. Use ServerCallContext overload instead.")
79
+ virtual Status Authenticate(ServerAuthSender* outgoing, ServerAuthReader* incoming) {
80
+ return Status::NotImplemented(typeid(this).name(),
81
+ "::Authenticate() isn't implemented");
82
+ }
83
+ /// \brief Validate a per-call client token.
84
+ /// \param[in] context The call context.
85
+ /// \param[in] token The client token. May be the empty string if
86
+ /// the client does not provide a token.
87
+ /// \param[out] peer_identity The identity of the peer, if this
88
+ /// authentication method supports it.
89
+ /// \return Status OK if the token is valid, any other status if
90
+ /// validation failed
91
+ virtual Status IsValid(const ServerCallContext& context, const std::string& token,
92
+ std::string* peer_identity) {
93
+ // TODO: We can make this pure virtual function when we remove
94
+ // the deprecated version.
95
+ ARROW_SUPPRESS_DEPRECATION_WARNING
96
+ return IsValid(token, peer_identity);
97
+ ARROW_UNSUPPRESS_DEPRECATION_WARNING
98
+ }
99
+ /// \brief Validate a per-call client token.
100
+ /// \param[in] token The client token. May be the empty string if
101
+ /// the client does not provide a token.
102
+ /// \param[out] peer_identity The identity of the peer, if this
103
+ /// authentication method supports it.
104
+ /// \return Status OK if the token is valid, any other status if
105
+ /// validation failed
106
+ /// \deprecated Deprecated in 13.0.0. Implement the IsValid()
107
+ /// with ServerCallContext version instead.
108
+ ARROW_DEPRECATED("Deprecated in 13.0.0. Use ServerCallContext overload instead.")
109
+ virtual Status IsValid(const std::string& token, std::string* peer_identity) {
110
+ return Status::NotImplemented(typeid(this).name(), "::IsValid() isn't implemented");
111
+ }
112
+ };
113
+
114
+ /// \brief An authentication mechanism that does nothing.
115
+ class ARROW_FLIGHT_EXPORT NoOpAuthHandler : public ServerAuthHandler {
116
+ public:
117
+ ~NoOpAuthHandler() override;
118
+ Status Authenticate(const ServerCallContext& context, ServerAuthSender* outgoing,
119
+ ServerAuthReader* incoming) override;
120
+ Status IsValid(const ServerCallContext& context, const std::string& token,
121
+ std::string* peer_identity) override;
122
+ };
123
+
124
+ } // namespace flight
125
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/transport.h ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ /// \file
19
+ /// Internal (but not private) interface for implementing
20
+ /// alternate network transports in Flight.
21
+ ///
22
+ /// \warning EXPERIMENTAL. Subject to change.
23
+ ///
24
+ /// To implement a transport, implement ServerTransport and
25
+ /// ClientTransport, and register the desired URI schemes with
26
+ /// TransportRegistry. Flight takes care of most of the per-RPC
27
+ /// details; transports only handle connections and providing a I/O
28
+ /// stream implementation (TransportDataStream).
29
+ ///
30
+ /// On the server side:
31
+ ///
32
+ /// 1. Applications subclass FlightServerBase and override RPC handlers.
33
+ /// 2. FlightServerBase::Init will look up and create a ServerTransport
34
+ /// based on the scheme of the Location given to it.
35
+ /// 3. The ServerTransport will start the actual server. (For instance,
36
+ /// for gRPC, it creates a gRPC server and registers a gRPC service.)
37
+ /// That server will handle connections.
38
+ /// 4. The transport should forward incoming calls to the server to the RPC
39
+ /// handlers defined on ServerTransport, which implements the actual
40
+ /// RPC handler using the interfaces here. Any I/O the RPC handler needs
41
+ /// to do is managed by transport-specific implementations of
42
+ /// TransportDataStream.
43
+ /// 5. ServerTransport calls FlightServerBase for the actual application
44
+ /// logic.
45
+ ///
46
+ /// On the client side:
47
+ ///
48
+ /// 1. Applications create a FlightClient with a Location.
49
+ /// 2. FlightClient will look up and create a ClientTransport based on
50
+ /// the scheme of the Location given to it.
51
+ /// 3. When calling a method on FlightClient, FlightClient will delegate to
52
+ /// the ClientTransport. There is some indirection, e.g. for DoGet,
53
+ /// FlightClient only requests that the ClientTransport start the
54
+ /// call and provide it with an I/O stream. The "Flight implementation"
55
+ /// itself still lives in FlightClient.
56
+
57
+ #pragma once
58
+
59
+ #include <functional>
60
+ #include <memory>
61
+ #include <optional>
62
+ #include <string>
63
+ #include <utility>
64
+ #include <vector>
65
+
66
+ #include "arrow/flight/type_fwd.h"
67
+ #include "arrow/flight/types.h"
68
+ #include "arrow/flight/visibility.h"
69
+ #include "arrow/ipc/options.h"
70
+ #include "arrow/type_fwd.h"
71
+
72
+ namespace arrow {
73
+ namespace ipc {
74
+ class Message;
75
+ }
76
+ namespace flight {
77
+ class FlightStatusDetail;
78
+ namespace internal {
79
+
80
+ /// Internal, not user-visible type used for memory-efficient reads
81
+ struct FlightData {
82
+ /// Used only for puts, may be null
83
+ std::unique_ptr<FlightDescriptor> descriptor;
84
+
85
+ /// Non-length-prefixed Message header as described in format/Message.fbs
86
+ std::shared_ptr<Buffer> metadata;
87
+
88
+ /// Application-defined metadata
89
+ std::shared_ptr<Buffer> app_metadata;
90
+
91
+ /// Message body
92
+ std::shared_ptr<Buffer> body;
93
+
94
+ /// Open IPC message from the metadata and body
95
+ ::arrow::Result<std::unique_ptr<ipc::Message>> OpenMessage();
96
+ };
97
+
98
+ /// \brief A transport-specific interface for reading/writing Arrow data.
99
+ ///
100
+ /// New transports will implement this to read/write IPC payloads to
101
+ /// the underlying stream.
102
+ class ARROW_FLIGHT_EXPORT TransportDataStream {
103
+ public:
104
+ virtual ~TransportDataStream() = default;
105
+ /// \brief Attempt to read the next FlightData message.
106
+ ///
107
+ /// \return success true if data was populated, false if there was
108
+ /// an error. For clients, the error can be retrieved from
109
+ /// Finish(Status).
110
+ virtual bool ReadData(FlightData* data);
111
+ /// \brief Attempt to write a FlightPayload.
112
+ ///
113
+ /// \param[in] payload The data to write.
114
+ /// \return true if the message was accepted by the transport, false
115
+ /// if not (e.g. due to client/server disconnect), Status if there
116
+ /// was an error (e.g. with the payload itself).
117
+ virtual arrow::Result<bool> WriteData(const FlightPayload& payload);
118
+ /// \brief Indicate that there are no more writes on this stream.
119
+ ///
120
+ /// This is only a hint for the underlying transport and may not
121
+ /// actually do anything.
122
+ virtual Status WritesDone();
123
+ };
124
+
125
+ /// \brief A transport-specific interface for reading/writing Arrow
126
+ /// data for a client.
127
+ class ARROW_FLIGHT_EXPORT ClientDataStream : public TransportDataStream {
128
+ public:
129
+ /// \brief Attempt to read a non-data message.
130
+ ///
131
+ /// Only implemented for DoPut; mutually exclusive with
132
+ /// ReadData(FlightData*).
133
+ virtual bool ReadPutMetadata(std::shared_ptr<Buffer>* out);
134
+ /// \brief Attempt to cancel the call.
135
+ ///
136
+ /// This is only a hint and may not take effect immediately. The
137
+ /// client should still finish the call with Finish(Status) as usual.
138
+ virtual void TryCancel() {}
139
+ /// \brief Finish the call, reporting the server-sent status and/or
140
+ /// any client-side errors as appropriate.
141
+ ///
142
+ /// Implies WritesDone() and DoFinish().
143
+ ///
144
+ /// \param[in] st A client-side status to combine with the
145
+ /// server-side error. That is, if an error occurs on the
146
+ /// client-side, call Finish(Status) to finish the server-side
147
+ /// call, get the server-side status, and merge the statuses
148
+ /// together so context is not lost.
149
+ Status Finish(Status st);
150
+
151
+ protected:
152
+ /// \brief End the call, returning the final server status.
153
+ ///
154
+ /// For implementors: should imply WritesDone() (even if it does not
155
+ /// directly call it).
156
+ ///
157
+ /// Implies WritesDone().
158
+ virtual Status DoFinish() = 0;
159
+ };
160
+
161
+ /// An implementation of a Flight client for a particular transport.
162
+ ///
163
+ /// Transports should override the methods they are capable of
164
+ /// supporting. The default method implementations return an error.
165
+ class ARROW_FLIGHT_EXPORT ClientTransport {
166
+ public:
167
+ virtual ~ClientTransport() = default;
168
+
169
+ /// Initialize the client.
170
+ virtual Status Init(const FlightClientOptions& options, const Location& location,
171
+ const arrow::util::Uri& uri) = 0;
172
+ /// Close the client. Once this returns, the client is no longer usable.
173
+ virtual Status Close() = 0;
174
+
175
+ virtual Status Authenticate(const FlightCallOptions& options,
176
+ std::unique_ptr<ClientAuthHandler> auth_handler);
177
+ virtual arrow::Result<std::pair<std::string, std::string>> AuthenticateBasicToken(
178
+ const FlightCallOptions& options, const std::string& username,
179
+ const std::string& password);
180
+ virtual Status DoAction(const FlightCallOptions& options, const Action& action,
181
+ std::unique_ptr<ResultStream>* results);
182
+ virtual Status ListActions(const FlightCallOptions& options,
183
+ std::vector<ActionType>* actions);
184
+ virtual Status GetFlightInfo(const FlightCallOptions& options,
185
+ const FlightDescriptor& descriptor,
186
+ std::unique_ptr<FlightInfo>* info);
187
+ virtual void GetFlightInfoAsync(const FlightCallOptions& options,
188
+ const FlightDescriptor& descriptor,
189
+ std::shared_ptr<AsyncListener<FlightInfo>> listener);
190
+ virtual Status PollFlightInfo(const FlightCallOptions& options,
191
+ const FlightDescriptor& descriptor,
192
+ std::unique_ptr<PollInfo>* info);
193
+ virtual arrow::Result<std::unique_ptr<SchemaResult>> GetSchema(
194
+ const FlightCallOptions& options, const FlightDescriptor& descriptor);
195
+ virtual Status ListFlights(const FlightCallOptions& options, const Criteria& criteria,
196
+ std::unique_ptr<FlightListing>* listing);
197
+ virtual Status DoGet(const FlightCallOptions& options, const Ticket& ticket,
198
+ std::unique_ptr<ClientDataStream>* stream);
199
+ virtual Status DoPut(const FlightCallOptions& options,
200
+ std::unique_ptr<ClientDataStream>* stream);
201
+ virtual Status DoExchange(const FlightCallOptions& options,
202
+ std::unique_ptr<ClientDataStream>* stream);
203
+
204
+ bool supports_async() const { return CheckAsyncSupport().ok(); }
205
+ virtual Status CheckAsyncSupport() const {
206
+ return Status::NotImplemented(
207
+ "this Flight transport does not support async operations");
208
+ }
209
+
210
+ static void SetAsyncRpc(AsyncListenerBase* listener, std::unique_ptr<AsyncRpc>&& rpc);
211
+ static AsyncRpc* GetAsyncRpc(AsyncListenerBase* listener);
212
+ static std::unique_ptr<AsyncRpc> ReleaseAsyncRpc(AsyncListenerBase* listener);
213
+ };
214
+
215
+ /// A registry of transport implementations.
216
+ class ARROW_FLIGHT_EXPORT TransportRegistry {
217
+ public:
218
+ using ClientFactory = std::function<arrow::Result<std::unique_ptr<ClientTransport>>()>;
219
+ using ServerFactory = std::function<arrow::Result<std::unique_ptr<ServerTransport>>(
220
+ FlightServerBase*, std::shared_ptr<MemoryManager> memory_manager)>;
221
+
222
+ TransportRegistry();
223
+ ~TransportRegistry();
224
+
225
+ arrow::Result<std::unique_ptr<ClientTransport>> MakeClient(
226
+ const std::string& scheme) const;
227
+ arrow::Result<std::unique_ptr<ServerTransport>> MakeServer(
228
+ const std::string& scheme, FlightServerBase* base,
229
+ std::shared_ptr<MemoryManager> memory_manager) const;
230
+
231
+ Status RegisterClient(const std::string& scheme, ClientFactory factory);
232
+ Status RegisterServer(const std::string& scheme, ServerFactory factory);
233
+
234
+ private:
235
+ class Impl;
236
+ std::unique_ptr<Impl> impl_;
237
+ };
238
+
239
+ /// \brief Get the registry of transport implementations.
240
+ ARROW_FLIGHT_EXPORT
241
+ TransportRegistry* GetDefaultTransportRegistry();
242
+
243
+ //------------------------------------------------------------
244
+ // Async APIs
245
+
246
+ /// \brief Transport-specific state for an async RPC.
247
+ ///
248
+ /// Transport implementations may subclass this to store their own
249
+ /// state, and stash an instance in a user-supplied AsyncListener via
250
+ /// ClientTransport::GetAsyncRpc and ClientTransport::SetAsyncRpc.
251
+ ///
252
+ /// This API is EXPERIMENTAL.
253
+ class ARROW_FLIGHT_EXPORT AsyncRpc {
254
+ public:
255
+ virtual ~AsyncRpc() = default;
256
+ /// \brief Request cancellation of the RPC.
257
+ virtual void TryCancel() {}
258
+
259
+ /// Only needed for DoPut/DoExchange
260
+ virtual void Begin(const FlightDescriptor& descriptor, std::shared_ptr<Schema> schema) {
261
+ }
262
+ /// Only needed for DoPut/DoExchange
263
+ virtual void Write(arrow::flight::FlightStreamChunk chunk) {}
264
+ /// Only needed for DoPut/DoExchange
265
+ virtual void DoneWriting() {}
266
+ };
267
+
268
+ //------------------------------------------------------------
269
+ // Error propagation helpers
270
+
271
+ /// \brief Abstract error status.
272
+ ///
273
+ /// Transport implementations may use side channels (e.g. HTTP
274
+ /// trailers) to convey additional information to reconstruct the
275
+ /// original C++ status for implementations that can use it.
276
+ struct ARROW_FLIGHT_EXPORT TransportStatus {
277
+ TransportStatusCode code;
278
+ std::string message;
279
+
280
+ /// \brief Convert a C++ status to an abstract transport status.
281
+ static TransportStatus FromStatus(const Status& arrow_status);
282
+
283
+ /// \brief Reconstruct a string-encoded TransportStatus.
284
+ static TransportStatus FromCodeStringAndMessage(const std::string& code_str,
285
+ std::string message);
286
+
287
+ /// \brief Convert an abstract transport status to a C++ status.
288
+ Status ToStatus() const;
289
+ };
290
+
291
+ /// \brief Convert the string representation of an Arrow status code
292
+ /// back to an Arrow status.
293
+ ARROW_FLIGHT_EXPORT
294
+ Status ReconstructStatus(const std::string& code_str, const Status& current_status,
295
+ std::optional<std::string> message,
296
+ std::optional<std::string> detail_message,
297
+ std::optional<std::string> detail_bin,
298
+ std::shared_ptr<FlightStatusDetail> detail);
299
+
300
+ } // namespace internal
301
+ } // namespace flight
302
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/transport_server.h ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <chrono>
21
+ #include <memory>
22
+
23
+ #include "arrow/flight/transport.h"
24
+ #include "arrow/flight/type_fwd.h"
25
+ #include "arrow/flight/visibility.h"
26
+ #include "arrow/type_fwd.h"
27
+
28
+ namespace arrow {
29
+ namespace ipc {
30
+ class Message;
31
+ }
32
+ namespace flight {
33
+ namespace internal {
34
+
35
+ /// \brief A transport-specific interface for reading/writing Arrow
36
+ /// data for a server.
37
+ class ARROW_FLIGHT_EXPORT ServerDataStream : public TransportDataStream {
38
+ public:
39
+ /// \brief Attempt to write a non-data message.
40
+ ///
41
+ /// Only implemented for DoPut; mutually exclusive with
42
+ /// WriteData(const FlightPayload&).
43
+ virtual Status WritePutMetadata(const Buffer& payload);
44
+ };
45
+
46
+ /// \brief An implementation of a Flight server for a particular
47
+ /// transport.
48
+ ///
49
+ /// This class (the transport implementation) implements the underlying
50
+ /// server and handles connections/incoming RPC calls. It should forward RPC
51
+ /// calls to the RPC handlers defined on this class, which work in terms of
52
+ /// the generic interfaces above. The RPC handlers here then forward calls
53
+ /// to the underlying FlightServerBase instance that contains the actual
54
+ /// application RPC method handlers.
55
+ ///
56
+ /// Used by FlightServerBase to manage the server lifecycle.
57
+ class ARROW_FLIGHT_EXPORT ServerTransport {
58
+ public:
59
+ ServerTransport(FlightServerBase* base, std::shared_ptr<MemoryManager> memory_manager)
60
+ : base_(base), memory_manager_(std::move(memory_manager)) {}
61
+ virtual ~ServerTransport() = default;
62
+
63
+ /// \name Server Lifecycle Methods
64
+ /// Transports implement these methods to start/shutdown the underlying
65
+ /// server.
66
+ /// @{
67
+ /// \brief Initialize the server.
68
+ ///
69
+ /// This method should launch the server in a background thread, i.e. it
70
+ /// should not block. Once this returns, the server should be active.
71
+ virtual Status Init(const FlightServerOptions& options,
72
+ const arrow::util::Uri& uri) = 0;
73
+ /// \brief Shutdown the server.
74
+ ///
75
+ /// This should wait for active RPCs to finish. Once this returns, the
76
+ /// server is no longer listening.
77
+ virtual Status Shutdown() = 0;
78
+ /// \brief Shutdown the server with a deadline.
79
+ ///
80
+ /// This should wait for active RPCs to finish, or for the deadline to
81
+ /// expire. Once this returns, the server is no longer listening.
82
+ virtual Status Shutdown(const std::chrono::system_clock::time_point& deadline) = 0;
83
+ /// \brief Wait for the server to shutdown (but do not shut down the server).
84
+ ///
85
+ /// Once this returns, the server is no longer listening.
86
+ virtual Status Wait() = 0;
87
+ /// \brief Get the address the server is listening on, else an empty Location.
88
+ virtual Location location() const = 0;
89
+ ///@}
90
+
91
+ /// \name RPC Handlers
92
+ /// Implementations of RPC handlers for Flight methods using the common
93
+ /// interfaces here. Transports should call these methods from their
94
+ /// server implementation to handle the actual RPC calls.
95
+ ///@{
96
+ /// \brief Get the FlightServerBase.
97
+ ///
98
+ /// Intended as an escape hatch for now since not all methods have been
99
+ /// factored into a transport-agnostic interface.
100
+ FlightServerBase* base() const { return base_; }
101
+ /// \brief Implement DoGet in terms of a transport-level stream.
102
+ ///
103
+ /// \param[in] context The server context.
104
+ /// \param[in] request The request payload.
105
+ /// \param[in] stream The transport-specific data stream
106
+ /// implementation. Must implement WriteData(const
107
+ /// FlightPayload&).
108
+ Status DoGet(const ServerCallContext& context, const Ticket& request,
109
+ ServerDataStream* stream);
110
+ /// \brief Implement DoPut in terms of a transport-level stream.
111
+ ///
112
+ /// \param[in] context The server context.
113
+ /// \param[in] stream The transport-specific data stream
114
+ /// implementation. Must implement ReadData(FlightData*)
115
+ /// and WritePutMetadata(const Buffer&).
116
+ Status DoPut(const ServerCallContext& context, ServerDataStream* stream);
117
+ /// \brief Implement DoExchange in terms of a transport-level stream.
118
+ ///
119
+ /// \param[in] context The server context.
120
+ /// \param[in] stream The transport-specific data stream
121
+ /// implementation. Must implement ReadData(FlightData*)
122
+ /// and WriteData(const FlightPayload&).
123
+ Status DoExchange(const ServerCallContext& context, ServerDataStream* stream);
124
+ ///@}
125
+
126
+ protected:
127
+ FlightServerBase* base_;
128
+ std::shared_ptr<MemoryManager> memory_manager_;
129
+ };
130
+
131
+ } // namespace internal
132
+ } // namespace flight
133
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/type_fwd.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ namespace arrow {
21
+ namespace internal {
22
+ class Uri;
23
+ }
24
+ namespace flight {
25
+ struct Action;
26
+ struct ActionType;
27
+ template <typename T>
28
+ class AsyncListener;
29
+ class AsyncListenerBase;
30
+ class AsyncRpc;
31
+ struct BasicAuth;
32
+ class ClientAuthHandler;
33
+ class ClientMiddleware;
34
+ class ClientMiddlewareFactory;
35
+ struct Criteria;
36
+ class FlightCallOptions;
37
+ struct FlightClientOptions;
38
+ struct FlightDescriptor;
39
+ struct FlightEndpoint;
40
+ class FlightInfo;
41
+ class PollInfo;
42
+ class FlightListing;
43
+ class FlightMetadataReader;
44
+ class FlightMetadataWriter;
45
+ struct FlightPayload;
46
+ class FlightServerBase;
47
+ class FlightServerOptions;
48
+ class FlightStreamReader;
49
+ class FlightStreamWriter;
50
+ struct Location;
51
+ struct Result;
52
+ class ResultStream;
53
+ struct SchemaResult;
54
+ class ServerCallContext;
55
+ class ServerMiddleware;
56
+ class ServerMiddlewareFactory;
57
+ struct Ticket;
58
+ namespace internal {
59
+ class AsyncRpc;
60
+ class ClientTransport;
61
+ struct FlightData;
62
+ class ServerTransport;
63
+ } // namespace internal
64
+ } // namespace flight
65
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/flight/types_async.h ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+
22
+ #include "arrow/flight/type_fwd.h"
23
+ #include "arrow/flight/types.h"
24
+ #include "arrow/ipc/options.h"
25
+ #include "arrow/type_fwd.h"
26
+
27
+ namespace arrow::flight {
28
+
29
+ /// \defgroup flight-async Async Flight Types
30
+ /// Common types used for asynchronous Flight APIs.
31
+ /// @{
32
+
33
+ /// \brief Non-templated state for an async RPC.
34
+ ///
35
+ /// This API is EXPERIMENTAL.
36
+ class ARROW_FLIGHT_EXPORT AsyncListenerBase {
37
+ public:
38
+ AsyncListenerBase();
39
+ virtual ~AsyncListenerBase();
40
+
41
+ /// \brief Request cancellation of the RPC.
42
+ ///
43
+ /// The RPC is not cancelled until AsyncListener::OnFinish is called.
44
+ void TryCancel();
45
+
46
+ private:
47
+ friend class arrow::flight::internal::ClientTransport;
48
+
49
+ /// Transport-specific state for this RPC. Transport
50
+ /// implementations may store and retrieve state here via
51
+ /// ClientTransport::SetAsyncRpc and ClientTransport::GetAsyncRpc.
52
+ std::unique_ptr<internal::AsyncRpc> rpc_state_;
53
+ };
54
+
55
+ /// \brief Callbacks for results from async RPCs.
56
+ ///
57
+ /// A single listener may not be used for multiple concurrent RPC
58
+ /// calls. The application MUST hold the listener alive until
59
+ /// OnFinish() is called and has finished.
60
+ ///
61
+ /// This API is EXPERIMENTAL.
62
+ template <typename T>
63
+ class ARROW_FLIGHT_EXPORT AsyncListener : public AsyncListenerBase {
64
+ public:
65
+ /// \brief Get the next server result.
66
+ ///
67
+ /// This will never be called concurrently with itself or OnFinish.
68
+ virtual void OnNext(T message) = 0;
69
+ /// \brief Get the final status.
70
+ ///
71
+ /// This will never be called concurrently with itself or OnNext. If the
72
+ /// error comes from the remote server, then a TransportStatusDetail will be
73
+ /// attached. Otherwise, the error is generated by the client-side
74
+ /// transport and will not have a TransportStatusDetail.
75
+ virtual void OnFinish(Status status) = 0;
76
+ };
77
+
78
+ /// @}
79
+
80
+ } // namespace arrow::flight
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/result.h"
21
+
22
+ namespace arrow {
23
+
24
+ template <typename InputIterator, typename OutputIterator, typename UnaryOperation>
25
+ Status MaybeTransform(InputIterator first, InputIterator last, OutputIterator out,
26
+ UnaryOperation unary_op) {
27
+ for (; first != last; ++first, (void)++out) {
28
+ ARROW_ASSIGN_OR_RAISE(*out, unary_op(*first));
29
+ }
30
+ return Status::OK();
31
+ }
32
+
33
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+
22
+ #include "arrow/memory_pool.h"
23
+ #include "arrow/type_fwd.h"
24
+ #include "arrow/util/bit_util.h"
25
+
26
+ namespace arrow {
27
+ namespace internal {
28
+
29
+ struct BitmapWordAlignParams {
30
+ int64_t leading_bits;
31
+ int64_t trailing_bits;
32
+ int64_t trailing_bit_offset;
33
+ const uint8_t* aligned_start;
34
+ int64_t aligned_bits;
35
+ int64_t aligned_words;
36
+ };
37
+
38
+ // Compute parameters for accessing a bitmap using aligned word instructions.
39
+ // The returned parameters describe:
40
+ // - a leading area of size `leading_bits` before the aligned words
41
+ // - a word-aligned area of size `aligned_bits`
42
+ // - a trailing area of size `trailing_bits` after the aligned words
43
+ template <uint64_t ALIGN_IN_BYTES>
44
+ inline BitmapWordAlignParams BitmapWordAlign(const uint8_t* data, int64_t bit_offset,
45
+ int64_t length) {
46
+ static_assert(bit_util::IsPowerOf2(ALIGN_IN_BYTES),
47
+ "ALIGN_IN_BYTES should be a positive power of two");
48
+ constexpr uint64_t ALIGN_IN_BITS = ALIGN_IN_BYTES * 8;
49
+
50
+ BitmapWordAlignParams p;
51
+
52
+ // Compute a "bit address" that we can align up to ALIGN_IN_BITS.
53
+ // We don't care about losing the upper bits since we are only interested in the
54
+ // difference between both addresses.
55
+ const uint64_t bit_addr =
56
+ reinterpret_cast<size_t>(data) * 8 + static_cast<uint64_t>(bit_offset);
57
+ const uint64_t aligned_bit_addr = bit_util::RoundUpToPowerOf2(bit_addr, ALIGN_IN_BITS);
58
+
59
+ p.leading_bits = std::min<int64_t>(length, aligned_bit_addr - bit_addr);
60
+ p.aligned_words = (length - p.leading_bits) / ALIGN_IN_BITS;
61
+ p.aligned_bits = p.aligned_words * ALIGN_IN_BITS;
62
+ p.trailing_bits = length - p.leading_bits - p.aligned_bits;
63
+ p.trailing_bit_offset = bit_offset + p.leading_bits + p.aligned_bits;
64
+
65
+ p.aligned_start = data + (bit_offset + p.leading_bits) / 8;
66
+ return p;
67
+ }
68
+ } // namespace internal
69
+
70
+ namespace util {
71
+
72
+ // Functions to check if the provided Arrow object is aligned by the specified alignment
73
+
74
+ /// \brief Special alignment value to use data type-specific alignment
75
+ ///
76
+ /// If this is passed as the `alignment` in one of the CheckAlignment or EnsureAlignment
77
+ /// functions, then the function will ensure each buffer is suitably aligned
78
+ /// for the data type of the array. For example, given an int32 buffer the values
79
+ /// buffer's address must be a multiple of 4. Given a large_string buffer the offsets
80
+ /// buffer's address must be a multiple of 8.
81
+ constexpr int64_t kValueAlignment = -3;
82
+
83
+ /// \brief Calculate if the buffer's address is a multiple of `alignment`
84
+ ///
85
+ /// If `alignment` is less than or equal to 0 then this method will always return true
86
+ /// \param buffer the buffer to check
87
+ /// \param alignment the alignment (in bytes) to check for
88
+ ARROW_EXPORT bool CheckAlignment(const Buffer& buffer, int64_t alignment);
89
+ /// \brief Calculate if all buffers in the array data are aligned
90
+ ///
91
+ /// This will also check the buffers in the dictionary and any children
92
+ /// \param array the array data to check
93
+ /// \param alignment the alignment (in bytes) to check for
94
+ ARROW_EXPORT bool CheckAlignment(const ArrayData& array, int64_t alignment);
95
+ /// \brief Calculate if all buffers in the array are aligned
96
+ ///
97
+ /// This will also check the buffers in the dictionary and any children
98
+ /// \param array the array to check
99
+ /// \param alignment the alignment (in bytes) to check for
100
+ ARROW_EXPORT bool CheckAlignment(const Array& array, int64_t alignment);
101
+
102
+ // Following functions require an additional boolean vector which stores the
103
+ // alignment check bits of the constituent objects.
104
+ // For example, needs_alignment vector for a ChunkedArray will contain the
105
+ // check bits of the constituent Arrays.
106
+ // The boolean vector check was introduced to minimize the repetitive checks
107
+ // of the constituent objects during the EnsureAlignment function where certain
108
+ // objects can be ignored for further checking if we already know that they are
109
+ // completely aligned.
110
+
111
+ /// \brief Calculate which (if any) chunks in a chunked array are unaligned
112
+ /// \param array the array to check
113
+ /// \param alignment the alignment (in bytes) to check for
114
+ /// \param needs_alignment an output vector that will store the results of the check
115
+ /// it must be set to a valid vector. Extra elements will be added to the end
116
+ /// of the vector for each chunk that is checked. `true` will be stored if
117
+ /// the chunk is unaligned.
118
+ /// \param offset the index of the chunk to start checking
119
+ /// \return true if all chunks (starting at `offset`) are aligned, false otherwise
120
+ ARROW_EXPORT bool CheckAlignment(const ChunkedArray& array, int64_t alignment,
121
+ std::vector<bool>* needs_alignment, int offset = 0);
122
+
123
+ /// \brief calculate which (if any) columns in a record batch are unaligned
124
+ /// \param batch the batch to check
125
+ /// \param alignment the alignment (in bytes) to check for
126
+ /// \param needs_alignment an output vector that will store the results of the
127
+ /// check. It must be set to a valid vector. Extra elements will be added
128
+ /// to the end of the vector for each column that is checked. `true` will be
129
+ /// stored if the column is unaligned.
130
+ ARROW_EXPORT bool CheckAlignment(const RecordBatch& batch, int64_t alignment,
131
+ std::vector<bool>* needs_alignment);
132
+
133
+ /// \brief calculate which (if any) columns in a table are unaligned
134
+ /// \param table the table to check
135
+ /// \param alignment the alignment (in bytes) to check for
136
+ /// \param needs_alignment an output vector that will store the results of the
137
+ /// check. It must be set to a valid vector. Extra elements will be added
138
+ /// to the end of the vector for each column that is checked. `true` will be
139
+ /// stored if the column is unaligned.
140
+ ARROW_EXPORT bool CheckAlignment(const Table& table, int64_t alignment,
141
+ std::vector<bool>* needs_alignment);
142
+
143
+ /// \brief return a buffer that has the given alignment and the same data as the input
144
+ /// buffer
145
+ ///
146
+ /// If the input buffer is already aligned then this method will return the input buffer
147
+ /// If the input buffer is not already aligned then this method will allocate a new
148
+ /// buffer. The alignment of the new buffer will have at least
149
+ /// max(kDefaultBufferAlignment, alignment) bytes of alignment.
150
+ ///
151
+ /// \param buffer the buffer to check
152
+ /// \param alignment the alignment (in bytes) to check for
153
+ /// \param memory_pool a memory pool that will be used to allocate a new buffer if the
154
+ /// input buffer is not sufficiently aligned
155
+ ARROW_EXPORT Result<std::shared_ptr<Buffer>> EnsureAlignment(
156
+ std::shared_ptr<Buffer> buffer, int64_t alignment, MemoryPool* memory_pool);
157
+
158
+ /// \brief return an array data where all buffers are aligned by the given alignment
159
+ ///
160
+ /// If any input buffer is already aligned then this method will reuse that same input
161
+ /// buffer.
162
+ ///
163
+ /// \param array_data the array data to check
164
+ /// \param alignment the alignment (in bytes) to check for
165
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
166
+ /// input buffer is not sufficiently aligned
167
+ ARROW_EXPORT Result<std::shared_ptr<ArrayData>> EnsureAlignment(
168
+ std::shared_ptr<ArrayData> array_data, int64_t alignment, MemoryPool* memory_pool);
169
+
170
+ /// \brief return an array where all buffers are aligned by the given alignment
171
+ ///
172
+ /// If any input buffer is already aligned then this method will reuse that same input
173
+ /// buffer.
174
+ ///
175
+ /// \param array the array to check
176
+ /// \param alignment the alignment (in bytes) to check for
177
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
178
+ /// input buffer is not sufficiently aligned
179
+ ARROW_EXPORT Result<std::shared_ptr<Array>> EnsureAlignment(std::shared_ptr<Array> array,
180
+ int64_t alignment,
181
+ MemoryPool* memory_pool);
182
+
183
+ /// \brief return a chunked array where all buffers are aligned by the given alignment
184
+ ///
185
+ /// If any input buffer is already aligned then this method will reuse that same input
186
+ /// buffer.
187
+ ///
188
+ /// \param array the chunked array to check
189
+ /// \param alignment the alignment (in bytes) to check for
190
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
191
+ /// input buffer is not sufficiently aligned
192
+ ARROW_EXPORT Result<std::shared_ptr<ChunkedArray>> EnsureAlignment(
193
+ std::shared_ptr<ChunkedArray> array, int64_t alignment, MemoryPool* memory_pool);
194
+
195
+ /// \brief return a record batch where all buffers are aligned by the given alignment
196
+ ///
197
+ /// If any input buffer is already aligned then this method will reuse that same input
198
+ /// buffer.
199
+ ///
200
+ /// \param batch the batch to check
201
+ /// \param alignment the alignment (in bytes) to check for
202
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
203
+ /// input buffer is not sufficiently aligned
204
+ ARROW_EXPORT Result<std::shared_ptr<RecordBatch>> EnsureAlignment(
205
+ std::shared_ptr<RecordBatch> batch, int64_t alignment, MemoryPool* memory_pool);
206
+
207
+ /// \brief return a table where all buffers are aligned by the given alignment
208
+ ///
209
+ /// If any input buffer is already aligned then this method will reuse that same input
210
+ /// buffer.
211
+ ///
212
+ /// \param table the table to check
213
+ /// \param alignment the alignment (in bytes) to check for
214
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
215
+ /// input buffer is not sufficiently aligned
216
+ ARROW_EXPORT Result<std::shared_ptr<Table>> EnsureAlignment(std::shared_ptr<Table> table,
217
+ int64_t alignment,
218
+ MemoryPool* memory_pool);
219
+
220
+ } // namespace util
221
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator_fwd.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+
22
+ #include "arrow/type_fwd.h"
23
+
24
+ namespace arrow {
25
+
26
+ template <typename T>
27
+ using AsyncGenerator = std::function<Future<T>()>;
28
+
29
+ template <typename T, typename V>
30
+ class MappingGenerator;
31
+
32
+ template <typename T, typename ComesAfter, typename IsNext>
33
+ class SequencingGenerator;
34
+
35
+ template <typename T, typename V>
36
+ class TransformingGenerator;
37
+
38
+ template <typename T>
39
+ class SerialReadaheadGenerator;
40
+
41
+ template <typename T>
42
+ class ReadaheadGenerator;
43
+
44
+ template <typename T>
45
+ class PushGenerator;
46
+
47
+ template <typename T>
48
+ class MergedGenerator;
49
+
50
+ template <typename T>
51
+ struct Enumerated;
52
+
53
+ template <typename T>
54
+ class EnumeratingGenerator;
55
+
56
+ template <typename T>
57
+ class TransferringGenerator;
58
+
59
+ template <typename T>
60
+ class BackgroundGenerator;
61
+
62
+ template <typename T>
63
+ class GeneratorIterator;
64
+
65
+ template <typename T>
66
+ struct CancellableGenerator;
67
+
68
+ template <typename T>
69
+ class DefaultIfEmptyGenerator;
70
+
71
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_util.h ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <atomic>
21
+ #include <functional>
22
+ #include <list>
23
+ #include <memory>
24
+
25
+ #include "arrow/result.h"
26
+ #include "arrow/status.h"
27
+ #include "arrow/util/cancel.h"
28
+ #include "arrow/util/functional.h"
29
+ #include "arrow/util/future.h"
30
+ #include "arrow/util/iterator.h"
31
+ #include "arrow/util/mutex.h"
32
+ #include "arrow/util/thread_pool.h"
33
+ #include "arrow/util/tracing.h"
34
+
35
+ namespace arrow {
36
+
37
+ using internal::FnOnce;
38
+
39
+ namespace util {
40
+
41
+ /// A utility which keeps tracks of, and schedules, asynchronous tasks
42
+ ///
43
+ /// An asynchronous task has a synchronous component and an asynchronous component.
44
+ /// The synchronous component typically schedules some kind of work on an external
45
+ /// resource (e.g. the I/O thread pool or some kind of kernel-based asynchronous
46
+ /// resource like io_uring). The asynchronous part represents the work
47
+ /// done on that external resource. Executing the synchronous part will be referred
48
+ /// to as "submitting the task" since this usually includes submitting the asynchronous
49
+ /// portion to the external thread pool.
50
+ ///
51
+ /// By default the scheduler will submit the task (execute the synchronous part) as
52
+ /// soon as it is added, assuming the underlying thread pool hasn't terminated or the
53
+ /// scheduler hasn't aborted. In this mode, the scheduler is simply acting as
54
+ /// a simple task group.
55
+ ///
56
+ /// A task scheduler starts with an initial task. That task, and all subsequent tasks
57
+ /// are free to add subtasks. Once all submitted tasks finish the scheduler will
58
+ /// finish. Note, it is not an error to add additional tasks after a scheduler has
59
+ /// aborted. These tasks will be ignored and never submitted. The scheduler returns a
60
+ /// future which will complete when all submitted tasks have finished executing. Once all
61
+ /// tasks have been finished the scheduler is invalid and should no longer be used.
62
+ ///
63
+ /// Task failure (either the synchronous portion or the asynchronous portion) will cause
64
+ /// the scheduler to enter an aborted state. The first such failure will be reported in
65
+ /// the final task future.
66
+ class ARROW_EXPORT AsyncTaskScheduler {
67
+ public:
68
+ /// Destructor for AsyncTaskScheduler
69
+ ///
70
+ /// The lifetime of the task scheduled is managed automatically. The scheduler
71
+ /// will remain valid while any tasks are running (and can always be safely accessed)
72
+ /// within tasks) and will be destroyed as soon as all tasks have finished.
73
+ virtual ~AsyncTaskScheduler() = default;
74
+ /// An interface for a task
75
+ ///
76
+ /// Users may want to override this, for example, to add priority
77
+ /// information for use by a queue.
78
+ class Task {
79
+ public:
80
+ virtual ~Task() = default;
81
+ /// Submit the task
82
+ ///
83
+ /// This will be called by the scheduler at most once when there
84
+ /// is space to run the task. This is expected to be a fairly quick
85
+ /// function that simply submits the actual task work to an external
86
+ /// resource (e.g. I/O thread pool).
87
+ ///
88
+ /// If this call fails then the scheduler will enter an aborted state.
89
+ virtual Result<Future<>> operator()() = 0;
90
+ /// The cost of the task
91
+ ///
92
+ /// A ThrottledAsyncTaskScheduler can be used to limit the number of concurrent tasks.
93
+ /// A custom cost may be used, for example, if you would like to limit the number of
94
+ /// tasks based on the total expected RAM usage of the tasks (this is done in the
95
+ /// scanner)
96
+ virtual int cost() const { return 1; }
97
+ /// The name of the task
98
+ ///
99
+ /// This is used for debugging and traceability. The returned view must remain
100
+ /// valid for the lifetime of the task.
101
+ virtual std::string_view name() const = 0;
102
+
103
+ /// a span tied to the lifetime of the task, for internal use only
104
+ tracing::Span span;
105
+ };
106
+
107
+ /// Add a task to the scheduler
108
+ ///
109
+ /// If the scheduler is in an aborted state this call will return false and the task
110
+ /// will never be run. This is harmless and does not need to be guarded against.
111
+ ///
112
+ /// The return value for this call can usually be ignored. There is little harm in
113
+ /// attempting to add tasks to an aborted scheduler. It is only included for callers
114
+ /// that want to avoid future task generation to save effort.
115
+ ///
116
+ /// \param task the task to submit
117
+ ///
118
+ /// A task's name must remain valid for the duration of the task. It is used for
119
+ /// debugging (e.g. when debugging a deadlock to see which tasks still remain) and for
120
+ /// traceability (the name will be used for spans assigned to the task)
121
+ ///
122
+ /// \return true if the task was submitted or queued, false if the task was ignored
123
+ virtual bool AddTask(std::unique_ptr<Task> task) = 0;
124
+
125
+ /// Adds an async generator to the scheduler
126
+ ///
127
+ /// The async generator will be visited, one item at a time. Submitting a task
128
+ /// will consist of polling the generator for the next future. The generator's future
129
+ /// will then represent the task itself.
130
+ ///
131
+ /// This visits the task serially without readahead. If readahead or parallelism
132
+ /// is desired then it should be added in the generator itself.
133
+ ///
134
+ /// The generator itself will be kept alive until all tasks have been completed.
135
+ /// However, if the scheduler is aborted, the generator will be destroyed as soon as the
136
+ /// next item would be requested.
137
+ ///
138
+ /// \param generator the generator to submit to the scheduler
139
+ /// \param visitor a function which visits each generator future as it completes
140
+ /// \param name a name which will be used for each submitted task
141
+ template <typename T>
142
+ bool AddAsyncGenerator(std::function<Future<T>()> generator,
143
+ std::function<Status(const T&)> visitor, std::string_view name);
144
+
145
+ template <typename Callable>
146
+ struct SimpleTask : public Task {
147
+ SimpleTask(Callable callable, std::string_view name)
148
+ : callable(std::move(callable)), name_(name) {}
149
+ SimpleTask(Callable callable, std::string name)
150
+ : callable(std::move(callable)), owned_name_(std::move(name)) {
151
+ name_ = *owned_name_;
152
+ }
153
+ Result<Future<>> operator()() override { return callable(); }
154
+ std::string_view name() const override { return name_; }
155
+ Callable callable;
156
+ std::string_view name_;
157
+ std::optional<std::string> owned_name_;
158
+ };
159
+
160
+ /// Add a task with cost 1 to the scheduler
161
+ ///
162
+ /// \param callable a "submit" function that should return a future
163
+ /// \param name a name for the task
164
+ ///
165
+ /// `name` must remain valid until the task has been submitted AND the returned
166
+ /// future completes. It is used for debugging and tracing.
167
+ ///
168
+ /// \see AddTask for more details
169
+ template <typename Callable>
170
+ bool AddSimpleTask(Callable callable, std::string_view name) {
171
+ return AddTask(std::make_unique<SimpleTask<Callable>>(std::move(callable), name));
172
+ }
173
+
174
+ /// Add a task with cost 1 to the scheduler
175
+ ///
176
+ /// This is an overload of \see AddSimpleTask that keeps `name` alive
177
+ /// in the task.
178
+ template <typename Callable>
179
+ bool AddSimpleTask(Callable callable, std::string name) {
180
+ return AddTask(
181
+ std::make_unique<SimpleTask<Callable>>(std::move(callable), std::move(name)));
182
+ }
183
+
184
+ /// Construct a scheduler
185
+ ///
186
+ /// \param initial_task The initial task which is responsible for adding
187
+ /// the first subtasks to the scheduler.
188
+ /// \param abort_callback A callback that will be triggered immediately after a task
189
+ /// fails while other tasks may still be running. Nothing needs to be done here,
190
+ /// when a task fails the scheduler will stop accepting new tasks and eventually
191
+ /// return the error. However, this callback can be used to more quickly end
192
+ /// long running tasks that have already been submitted. Defaults to doing
193
+ /// nothing.
194
+ /// \param stop_token An optional stop token that will allow cancellation of the
195
+ /// scheduler. This will be checked before each task is submitted and, in the
196
+ /// event of a cancellation, the scheduler will enter an aborted state. This is
197
+ /// a graceful cancellation and submitted tasks will still complete.
198
+ /// \return A future that will be completed when the initial task and all subtasks have
199
+ /// finished.
200
+ static Future<> Make(
201
+ FnOnce<Status(AsyncTaskScheduler*)> initial_task,
202
+ FnOnce<void(const Status&)> abort_callback = [](const Status&) {},
203
+ StopToken stop_token = StopToken::Unstoppable());
204
+
205
+ /// A span tracking execution of the scheduler's tasks, for internal use only
206
+ virtual const tracing::Span& span() const = 0;
207
+ };
208
+
209
+ class ARROW_EXPORT ThrottledAsyncTaskScheduler : public AsyncTaskScheduler {
210
+ public:
211
+ /// An interface for a task queue
212
+ ///
213
+ /// A queue's methods will not be called concurrently
214
+ class Queue {
215
+ public:
216
+ virtual ~Queue() = default;
217
+ /// Push a task to the queue
218
+ ///
219
+ /// \param task the task to enqueue
220
+ virtual void Push(std::unique_ptr<Task> task) = 0;
221
+ /// Pop the next task from the queue
222
+ virtual std::unique_ptr<Task> Pop() = 0;
223
+ /// Peek the next task in the queue
224
+ virtual const Task& Peek() = 0;
225
+ /// Check if the queue is empty
226
+ virtual bool Empty() = 0;
227
+ /// Purge the queue of all items
228
+ virtual void Purge() = 0;
229
+ virtual std::size_t Size() const = 0;
230
+ };
231
+
232
+ class Throttle {
233
+ public:
234
+ virtual ~Throttle() = default;
235
+ /// Acquire amt permits
236
+ ///
237
+ /// If nullopt is returned then the permits were immediately
238
+ /// acquired and the caller can proceed. If a future is returned then the caller
239
+ /// should wait for the future to complete first. When the returned future completes
240
+ /// the permits have NOT been acquired and the caller must call Acquire again
241
+ ///
242
+ /// \param amt the number of permits to acquire
243
+ virtual std::optional<Future<>> TryAcquire(int amt) = 0;
244
+ /// Release amt permits
245
+ ///
246
+ /// This will possibly complete waiting futures and should probably not be
247
+ /// called while holding locks.
248
+ ///
249
+ /// \param amt the number of permits to release
250
+ virtual void Release(int amt) = 0;
251
+
252
+ /// The size of the largest task that can run
253
+ ///
254
+ /// Incoming tasks will have their cost latched to this value to ensure
255
+ /// they can still run (although they will be the only thing allowed to
256
+ /// run at that time).
257
+ virtual int Capacity() = 0;
258
+
259
+ /// Pause the throttle
260
+ ///
261
+ /// Any tasks that have been submitted already will continue. However, no new tasks
262
+ /// will be run until the throttle is resumed.
263
+ virtual void Pause() = 0;
264
+ /// Resume the throttle
265
+ ///
266
+ /// Allows task to be submitted again. If there is a max_concurrent_cost limit then
267
+ /// it will still apply.
268
+ virtual void Resume() = 0;
269
+ };
270
+
271
+ /// Pause the throttle
272
+ ///
273
+ /// Any tasks that have been submitted already will continue. However, no new tasks
274
+ /// will be run until the throttle is resumed.
275
+ virtual void Pause() = 0;
276
+ /// Resume the throttle
277
+ ///
278
+ /// Allows task to be submitted again. If there is a max_concurrent_cost limit then
279
+ /// it will still apply.
280
+ virtual void Resume() = 0;
281
+ /// Return the number of tasks queued but not yet submitted
282
+ virtual std::size_t QueueSize() = 0;
283
+
284
+ /// Create a throttled view of a scheduler
285
+ ///
286
+ /// Tasks added via this view will be subjected to the throttle and, if the tasks cannot
287
+ /// run immediately, will be placed into a queue.
288
+ ///
289
+ /// Although a shared_ptr is returned it should generally be assumed that the caller
290
+ /// is being given exclusive ownership. The shared_ptr is used to share the view with
291
+ /// queued and submitted tasks and the lifetime of those is unpredictable. It is
292
+ /// important the caller keep the returned pointer alive for as long as they plan to add
293
+ /// tasks to the view.
294
+ ///
295
+ /// \param scheduler a scheduler to submit tasks to after throttling
296
+ ///
297
+ /// This can be the root scheduler, another throttled scheduler, or a task group. These
298
+ /// are all composable.
299
+ ///
300
+ /// \param max_concurrent_cost the maximum amount of cost allowed to run at any one time
301
+ ///
302
+ /// If a task is added that has a cost greater than max_concurrent_cost then its cost
303
+ /// will be reduced to max_concurrent_cost so that it is still possible for the task to
304
+ /// run.
305
+ ///
306
+ /// \param queue the queue to use when tasks cannot be submitted
307
+ ///
308
+ /// By default a FIFO queue will be used. However, a custom queue can be provided if
309
+ /// some tasks have higher priority than other tasks.
310
+ static std::shared_ptr<ThrottledAsyncTaskScheduler> Make(
311
+ AsyncTaskScheduler* scheduler, int max_concurrent_cost,
312
+ std::unique_ptr<Queue> queue = NULLPTR);
313
+
314
+ /// @brief Create a ThrottledAsyncTaskScheduler using a custom throttle
315
+ ///
316
+ /// \see Make
317
+ static std::shared_ptr<ThrottledAsyncTaskScheduler> MakeWithCustomThrottle(
318
+ AsyncTaskScheduler* scheduler, std::unique_ptr<Throttle> throttle,
319
+ std::unique_ptr<Queue> queue = NULLPTR);
320
+ };
321
+
322
+ /// A utility to keep track of a collection of tasks
323
+ ///
324
+ /// Often it is useful to keep track of some state that only needs to stay alive
325
+ /// for some small collection of tasks, or to perform some kind of final cleanup
326
+ /// when a collection of tasks is finished.
327
+ ///
328
+ /// For example, when scanning, we need to keep the file reader alive while all scan
329
+ /// tasks run for a given file, and then we can gracefully close it when we finish the
330
+ /// file.
331
+ class ARROW_EXPORT AsyncTaskGroup : public AsyncTaskScheduler {
332
+ public:
333
+ /// Destructor for the task group
334
+ ///
335
+ /// The destructor might trigger the finish callback. If the finish callback fails
336
+ /// then the error will be reported as a task on the scheduler.
337
+ ///
338
+ /// Failure to destroy the async task group will not prevent the scheduler from
339
+ /// finishing. If the scheduler finishes before the async task group is done then
340
+ /// the finish callback will be run immediately when the async task group finishes.
341
+ ///
342
+ /// If the scheduler has aborted then the finish callback will not run.
343
+ ~AsyncTaskGroup() = default;
344
+ /// Create an async task group
345
+ ///
346
+ /// The finish callback will not run until the task group is destroyed and all
347
+ /// tasks are finished so you will generally want to reset / destroy the returned
348
+ /// unique_ptr at some point.
349
+ ///
350
+ /// \param scheduler The underlying scheduler to submit tasks to
351
+ /// \param finish_callback A callback that will be run only after the task group has
352
+ /// been destroyed and all tasks added by the group have
353
+ /// finished.
354
+ ///
355
+ /// Note: in error scenarios the finish callback may not run. However, it will still,
356
+ /// of course, be destroyed.
357
+ static std::unique_ptr<AsyncTaskGroup> Make(AsyncTaskScheduler* scheduler,
358
+ FnOnce<Status()> finish_callback);
359
+ };
360
+
361
+ /// Create a task group that is also throttled
362
+ ///
363
+ /// This is a utility factory that creates a throttled view of a scheduler and then
364
+ /// wraps that throttled view with a task group that destroys the throttle when finished.
365
+ ///
366
+ /// \see ThrottledAsyncTaskScheduler
367
+ /// \see AsyncTaskGroup
368
+ /// \param target the underlying scheduler to submit tasks to
369
+ /// \param max_concurrent_cost the maximum amount of cost allowed to run at any one time
370
+ /// \param queue the queue to use when tasks cannot be submitted
371
+ /// \param finish_callback A callback that will be run only after the task group has
372
+ /// been destroyed and all tasks added by the group have finished
373
+ ARROW_EXPORT std::unique_ptr<ThrottledAsyncTaskScheduler> MakeThrottledAsyncTaskGroup(
374
+ AsyncTaskScheduler* target, int max_concurrent_cost,
375
+ std::unique_ptr<ThrottledAsyncTaskScheduler::Queue> queue,
376
+ FnOnce<Status()> finish_callback);
377
+
378
+ // Defined down here to avoid circular dependency between AsyncTaskScheduler and
379
+ // AsyncTaskGroup
380
+ template <typename T>
381
+ bool AsyncTaskScheduler::AddAsyncGenerator(std::function<Future<T>()> generator,
382
+ std::function<Status(const T&)> visitor,
383
+ std::string_view name) {
384
+ struct State {
385
+ State(std::function<Future<T>()> generator, std::function<Status(const T&)> visitor,
386
+ std::unique_ptr<AsyncTaskGroup> task_group, std::string_view name)
387
+ : generator(std::move(generator)),
388
+ visitor(std::move(visitor)),
389
+ task_group(std::move(task_group)),
390
+ name(name) {}
391
+ std::function<Future<T>()> generator;
392
+ std::function<Status(const T&)> visitor;
393
+ std::unique_ptr<AsyncTaskGroup> task_group;
394
+ std::string_view name;
395
+ };
396
+ struct SubmitTask : public Task {
397
+ explicit SubmitTask(std::unique_ptr<State> state_holder)
398
+ : state_holder(std::move(state_holder)) {}
399
+
400
+ struct SubmitTaskCallback {
401
+ SubmitTaskCallback(std::unique_ptr<State> state_holder, Future<> task_completion)
402
+ : state_holder(std::move(state_holder)),
403
+ task_completion(std::move(task_completion)) {}
404
+ void operator()(const Result<T>& maybe_item) {
405
+ if (!maybe_item.ok()) {
406
+ task_completion.MarkFinished(maybe_item.status());
407
+ return;
408
+ }
409
+ const auto& item = *maybe_item;
410
+ if (IsIterationEnd(item)) {
411
+ task_completion.MarkFinished();
412
+ return;
413
+ }
414
+ Status visit_st = state_holder->visitor(item);
415
+ if (!visit_st.ok()) {
416
+ task_completion.MarkFinished(std::move(visit_st));
417
+ return;
418
+ }
419
+ state_holder->task_group->AddTask(
420
+ std::make_unique<SubmitTask>(std::move(state_holder)));
421
+ task_completion.MarkFinished();
422
+ }
423
+ std::unique_ptr<State> state_holder;
424
+ Future<> task_completion;
425
+ };
426
+
427
+ Result<Future<>> operator()() {
428
+ Future<> task = Future<>::Make();
429
+ // Consume as many items as we can (those that are already finished)
430
+ // synchronously to avoid recursion / stack overflow.
431
+ while (true) {
432
+ Future<T> next = state_holder->generator();
433
+ if (next.TryAddCallback(
434
+ [&] { return SubmitTaskCallback(std::move(state_holder), task); })) {
435
+ return task;
436
+ }
437
+ ARROW_ASSIGN_OR_RAISE(T item, next.result());
438
+ if (IsIterationEnd(item)) {
439
+ task.MarkFinished();
440
+ return task;
441
+ }
442
+ ARROW_RETURN_NOT_OK(state_holder->visitor(item));
443
+ }
444
+ }
445
+
446
+ std::string_view name() const { return state_holder->name; }
447
+
448
+ std::unique_ptr<State> state_holder;
449
+ };
450
+ std::unique_ptr<AsyncTaskGroup> task_group =
451
+ AsyncTaskGroup::Make(this, [] { return Status::OK(); });
452
+ AsyncTaskGroup* task_group_view = task_group.get();
453
+ std::unique_ptr<State> state_holder = std::make_unique<State>(
454
+ std::move(generator), std::move(visitor), std::move(task_group), name);
455
+ task_group_view->AddTask(std::make_unique<SubmitTask>(std::move(state_holder)));
456
+ return true;
457
+ }
458
+
459
+ } // namespace util
460
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/base64.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+ #include <string_view>
22
+
23
+ #include "arrow/util/visibility.h"
24
+
25
+ namespace arrow {
26
+ namespace util {
27
+
28
+ ARROW_EXPORT
29
+ std::string base64_encode(std::string_view s);
30
+
31
+ ARROW_EXPORT
32
+ std::string base64_decode(std::string_view s);
33
+
34
+ } // namespace util
35
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string_view>
21
+ #include <utility>
22
+
23
+ #include "arrow/type.h"
24
+ #include "arrow/util/span.h"
25
+
26
+ namespace arrow::util {
27
+
28
+ inline BinaryViewType::c_type ToInlineBinaryView(const void* data, int32_t size) {
29
+ // Small string: inlined. Bytes beyond size are zeroed
30
+ BinaryViewType::c_type out;
31
+ out.inlined = {size, {}};
32
+ memcpy(&out.inlined.data, data, size);
33
+ return out;
34
+ }
35
+
36
+ inline BinaryViewType::c_type ToInlineBinaryView(std::string_view v) {
37
+ return ToInlineBinaryView(v.data(), static_cast<int32_t>(v.size()));
38
+ }
39
+
40
+ inline BinaryViewType::c_type ToBinaryView(const void* data, int32_t size,
41
+ int32_t buffer_index, int32_t offset) {
42
+ if (size <= BinaryViewType::kInlineSize) {
43
+ return ToInlineBinaryView(data, size);
44
+ }
45
+
46
+ // Large string: store index/offset.
47
+ BinaryViewType::c_type out;
48
+ out.ref = {size, {}, buffer_index, offset};
49
+ memcpy(&out.ref.prefix, data, sizeof(out.ref.prefix));
50
+ return out;
51
+ }
52
+
53
+ inline BinaryViewType::c_type ToBinaryView(std::string_view v, int32_t buffer_index,
54
+ int32_t offset) {
55
+ return ToBinaryView(v.data(), static_cast<int32_t>(v.size()), buffer_index, offset);
56
+ }
57
+
58
+ template <typename BufferPtr>
59
+ std::string_view FromBinaryView(const BinaryViewType::c_type& v,
60
+ const BufferPtr* data_buffers) {
61
+ auto* data = v.is_inline() ? v.inlined.data.data()
62
+ : data_buffers[v.ref.buffer_index]->data() + v.ref.offset;
63
+ return {reinterpret_cast<const char*>(data), static_cast<size_t>(v.size())};
64
+ }
65
+ template <typename BufferPtr>
66
+ std::string_view FromBinaryView(BinaryViewType::c_type&&, const BufferPtr*) = delete;
67
+
68
+ template <typename BufferPtr>
69
+ bool EqualBinaryView(BinaryViewType::c_type l, BinaryViewType::c_type r,
70
+ const BufferPtr* l_buffers, const BufferPtr* r_buffers) {
71
+ int64_t l_size_and_prefix, r_size_and_prefix;
72
+ memcpy(&l_size_and_prefix, &l, sizeof(l_size_and_prefix));
73
+ memcpy(&r_size_and_prefix, &r, sizeof(r_size_and_prefix));
74
+
75
+ if (l_size_and_prefix != r_size_and_prefix) return false;
76
+
77
+ if (l.is_inline()) {
78
+ // The columnar spec mandates that the inlined part be zero-padded, so we can compare
79
+ // a word at a time regardless of the exact size.
80
+ int64_t l_inlined, r_inlined;
81
+ memcpy(&l_inlined, l.inline_data() + BinaryViewType::kPrefixSize, sizeof(l_inlined));
82
+ memcpy(&r_inlined, r.inline_data() + BinaryViewType::kPrefixSize, sizeof(r_inlined));
83
+ return l_inlined == r_inlined;
84
+ }
85
+
86
+ // Sizes are equal and this is not inline, therefore both are out
87
+ // of line and have kPrefixSize first in common.
88
+ const uint8_t* l_data = l_buffers[l.ref.buffer_index]->data() + l.ref.offset;
89
+ const uint8_t* r_data = r_buffers[r.ref.buffer_index]->data() + r.ref.offset;
90
+ return memcmp(l_data + BinaryViewType::kPrefixSize,
91
+ r_data + BinaryViewType::kPrefixSize,
92
+ l.size() - BinaryViewType::kPrefixSize) == 0;
93
+ }
94
+
95
+ } // namespace arrow::util
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_run_reader.h ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cassert>
21
+ #include <cstdint>
22
+ #include <cstring>
23
+ #include <string>
24
+
25
+ #include "arrow/util/bit_util.h"
26
+ #include "arrow/util/bitmap_reader.h"
27
+ #include "arrow/util/endian.h"
28
+ #include "arrow/util/macros.h"
29
+ #include "arrow/util/visibility.h"
30
+
31
+ namespace arrow {
32
+ namespace internal {
33
+
34
+ struct BitRun {
35
+ int64_t length;
36
+ // Whether bits are set at this point.
37
+ bool set;
38
+
39
+ std::string ToString() const {
40
+ return std::string("{Length: ") + std::to_string(length) +
41
+ ", set=" + std::to_string(set) + "}";
42
+ }
43
+ };
44
+
45
+ inline bool operator==(const BitRun& lhs, const BitRun& rhs) {
46
+ return lhs.length == rhs.length && lhs.set == rhs.set;
47
+ }
48
+
49
+ inline bool operator!=(const BitRun& lhs, const BitRun& rhs) {
50
+ return lhs.length != rhs.length || lhs.set != rhs.set;
51
+ }
52
+
53
+ class BitRunReaderLinear {
54
+ public:
55
+ BitRunReaderLinear(const uint8_t* bitmap, int64_t start_offset, int64_t length)
56
+ : reader_(bitmap, start_offset, length) {}
57
+
58
+ BitRun NextRun() {
59
+ BitRun rl = {/*length=*/0, reader_.IsSet()};
60
+ // Advance while the values are equal and not at the end of list.
61
+ while (reader_.position() < reader_.length() && reader_.IsSet() == rl.set) {
62
+ rl.length++;
63
+ reader_.Next();
64
+ }
65
+ return rl;
66
+ }
67
+
68
+ private:
69
+ BitmapReader reader_;
70
+ };
71
+
72
+ #if ARROW_LITTLE_ENDIAN
73
+ /// A convenience class for counting the number of contiguous set/unset bits
74
+ /// in a bitmap.
75
+ class ARROW_EXPORT BitRunReader {
76
+ public:
77
+ /// \brief Constructs new BitRunReader.
78
+ ///
79
+ /// \param[in] bitmap source data
80
+ /// \param[in] start_offset bit offset into the source data
81
+ /// \param[in] length number of bits to copy
82
+ BitRunReader(const uint8_t* bitmap, int64_t start_offset, int64_t length);
83
+
84
+ /// Returns a new BitRun containing the number of contiguous
85
+ /// bits with the same value. length == 0 indicates the
86
+ /// end of the bitmap.
87
+ BitRun NextRun() {
88
+ if (ARROW_PREDICT_FALSE(position_ >= length_)) {
89
+ return {/*length=*/0, false};
90
+ }
91
+ // This implementation relies on a efficient implementations of
92
+ // CountTrailingZeros and assumes that runs are more often then
93
+ // not. The logic is to incrementally find the next bit change
94
+ // from the current position. This is done by zeroing all
95
+ // bits in word_ up to position_ and using the TrailingZeroCount
96
+ // to find the index of the next set bit.
97
+
98
+ // The runs alternate on each call, so flip the bit.
99
+ current_run_bit_set_ = !current_run_bit_set_;
100
+
101
+ int64_t start_position = position_;
102
+ int64_t start_bit_offset = start_position & 63;
103
+ // Invert the word for proper use of CountTrailingZeros and
104
+ // clear bits so CountTrailingZeros can do it magic.
105
+ word_ = ~word_ & ~bit_util::LeastSignificantBitMask(start_bit_offset);
106
+
107
+ // Go forward until the next change from unset to set.
108
+ int64_t new_bits = bit_util::CountTrailingZeros(word_) - start_bit_offset;
109
+ position_ += new_bits;
110
+
111
+ if (ARROW_PREDICT_FALSE(bit_util::IsMultipleOf64(position_)) &&
112
+ ARROW_PREDICT_TRUE(position_ < length_)) {
113
+ // Continue extending position while we can advance an entire word.
114
+ // (updates position_ accordingly).
115
+ AdvanceUntilChange();
116
+ }
117
+
118
+ return {/*length=*/position_ - start_position, current_run_bit_set_};
119
+ }
120
+
121
+ private:
122
+ void AdvanceUntilChange() {
123
+ int64_t new_bits = 0;
124
+ do {
125
+ // Advance the position of the bitmap for loading.
126
+ bitmap_ += sizeof(uint64_t);
127
+ LoadNextWord();
128
+ new_bits = bit_util::CountTrailingZeros(word_);
129
+ // Continue calculating run length.
130
+ position_ += new_bits;
131
+ } while (ARROW_PREDICT_FALSE(bit_util::IsMultipleOf64(position_)) &&
132
+ ARROW_PREDICT_TRUE(position_ < length_) && new_bits > 0);
133
+ }
134
+
135
+ void LoadNextWord() { return LoadWord(length_ - position_); }
136
+
137
+ // Helper method for Loading the next word.
138
+ void LoadWord(int64_t bits_remaining) {
139
+ word_ = 0;
140
+ // we need at least an extra byte in this case.
141
+ if (ARROW_PREDICT_TRUE(bits_remaining >= 64)) {
142
+ std::memcpy(&word_, bitmap_, 8);
143
+ } else {
144
+ int64_t bytes_to_load = bit_util::BytesForBits(bits_remaining);
145
+ auto word_ptr = reinterpret_cast<uint8_t*>(&word_);
146
+ std::memcpy(word_ptr, bitmap_, bytes_to_load);
147
+ // Ensure stoppage at last bit in bitmap by reversing the next higher
148
+ // order bit.
149
+ bit_util::SetBitTo(word_ptr, bits_remaining,
150
+ !bit_util::GetBit(word_ptr, bits_remaining - 1));
151
+ }
152
+
153
+ // Two cases:
154
+ // 1. For unset, CountTrailingZeros works naturally so we don't
155
+ // invert the word.
156
+ // 2. Otherwise invert so we can use CountTrailingZeros.
157
+ if (current_run_bit_set_) {
158
+ word_ = ~word_;
159
+ }
160
+ }
161
+ const uint8_t* bitmap_;
162
+ int64_t position_;
163
+ int64_t length_;
164
+ uint64_t word_;
165
+ bool current_run_bit_set_;
166
+ };
167
+ #else
168
+ using BitRunReader = BitRunReaderLinear;
169
+ #endif
170
+
171
+ struct SetBitRun {
172
+ int64_t position;
173
+ int64_t length;
174
+
175
+ bool AtEnd() const { return length == 0; }
176
+
177
+ std::string ToString() const {
178
+ return std::string("{pos=") + std::to_string(position) +
179
+ ", len=" + std::to_string(length) + "}";
180
+ }
181
+
182
+ bool operator==(const SetBitRun& other) const {
183
+ return position == other.position && length == other.length;
184
+ }
185
+ bool operator!=(const SetBitRun& other) const {
186
+ return position != other.position || length != other.length;
187
+ }
188
+ };
189
+
190
+ template <bool Reverse>
191
+ class BaseSetBitRunReader {
192
+ public:
193
+ /// \brief Constructs new SetBitRunReader.
194
+ ///
195
+ /// \param[in] bitmap source data
196
+ /// \param[in] start_offset bit offset into the source data
197
+ /// \param[in] length number of bits to copy
198
+ ARROW_NOINLINE
199
+ BaseSetBitRunReader(const uint8_t* bitmap, int64_t start_offset, int64_t length)
200
+ : bitmap_(util::MakeNonNull(bitmap)),
201
+ length_(length),
202
+ remaining_(length_),
203
+ current_word_(0),
204
+ current_num_bits_(0) {
205
+ if (Reverse) {
206
+ bitmap_ += (start_offset + length) / 8;
207
+ const int8_t end_bit_offset = static_cast<int8_t>((start_offset + length) % 8);
208
+ if (length > 0 && end_bit_offset) {
209
+ // Get LSBs from last byte
210
+ ++bitmap_;
211
+ current_num_bits_ =
212
+ std::min(static_cast<int32_t>(length), static_cast<int32_t>(end_bit_offset));
213
+ current_word_ = LoadPartialWord(8 - end_bit_offset, current_num_bits_);
214
+ }
215
+ } else {
216
+ bitmap_ += start_offset / 8;
217
+ const int8_t bit_offset = static_cast<int8_t>(start_offset % 8);
218
+ if (length > 0 && bit_offset) {
219
+ // Get MSBs from first byte
220
+ current_num_bits_ =
221
+ std::min(static_cast<int32_t>(length), static_cast<int32_t>(8 - bit_offset));
222
+ current_word_ = LoadPartialWord(bit_offset, current_num_bits_);
223
+ }
224
+ }
225
+ }
226
+
227
+ ARROW_NOINLINE
228
+ SetBitRun NextRun() {
229
+ int64_t pos = 0;
230
+ int64_t len = 0;
231
+ if (current_num_bits_) {
232
+ const auto run = FindCurrentRun();
233
+ assert(remaining_ >= 0);
234
+ if (run.length && current_num_bits_) {
235
+ // The run ends in current_word_
236
+ return AdjustRun(run);
237
+ }
238
+ pos = run.position;
239
+ len = run.length;
240
+ }
241
+ if (!len) {
242
+ // We didn't get any ones in current_word_, so we can skip any zeros
243
+ // in the following words
244
+ SkipNextZeros();
245
+ if (remaining_ == 0) {
246
+ return {0, 0};
247
+ }
248
+ assert(current_num_bits_);
249
+ pos = position();
250
+ } else if (!current_num_bits_) {
251
+ if (ARROW_PREDICT_TRUE(remaining_ >= 64)) {
252
+ current_word_ = LoadFullWord();
253
+ current_num_bits_ = 64;
254
+ } else if (remaining_ > 0) {
255
+ current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_);
256
+ current_num_bits_ = static_cast<int32_t>(remaining_);
257
+ } else {
258
+ // No bits remaining, perhaps we found a run?
259
+ return AdjustRun({pos, len});
260
+ }
261
+ // If current word starts with a zero, we got a full run
262
+ if (!(current_word_ & kFirstBit)) {
263
+ return AdjustRun({pos, len});
264
+ }
265
+ }
266
+ // Current word should now start with a set bit
267
+ len += CountNextOnes();
268
+ return AdjustRun({pos, len});
269
+ }
270
+
271
+ protected:
272
+ int64_t position() const {
273
+ if (Reverse) {
274
+ return remaining_;
275
+ } else {
276
+ return length_ - remaining_;
277
+ }
278
+ }
279
+
280
+ SetBitRun AdjustRun(SetBitRun run) {
281
+ if (Reverse) {
282
+ assert(run.position >= run.length);
283
+ run.position -= run.length;
284
+ }
285
+ return run;
286
+ }
287
+
288
+ uint64_t LoadFullWord() {
289
+ uint64_t word;
290
+ if (Reverse) {
291
+ bitmap_ -= 8;
292
+ }
293
+ memcpy(&word, bitmap_, 8);
294
+ if (!Reverse) {
295
+ bitmap_ += 8;
296
+ }
297
+ return bit_util::ToLittleEndian(word);
298
+ }
299
+
300
+ uint64_t LoadPartialWord(int8_t bit_offset, int64_t num_bits) {
301
+ assert(num_bits > 0);
302
+ uint64_t word = 0;
303
+ const int64_t num_bytes = bit_util::BytesForBits(num_bits);
304
+ if (Reverse) {
305
+ // Read in the most significant bytes of the word
306
+ bitmap_ -= num_bytes;
307
+ memcpy(reinterpret_cast<char*>(&word) + 8 - num_bytes, bitmap_, num_bytes);
308
+ // XXX MostSignificantBitmask
309
+ return (bit_util::ToLittleEndian(word) << bit_offset) &
310
+ ~bit_util::LeastSignificantBitMask(64 - num_bits);
311
+ } else {
312
+ memcpy(&word, bitmap_, num_bytes);
313
+ bitmap_ += num_bytes;
314
+ return (bit_util::ToLittleEndian(word) >> bit_offset) &
315
+ bit_util::LeastSignificantBitMask(num_bits);
316
+ }
317
+ }
318
+
319
+ void SkipNextZeros() {
320
+ assert(current_num_bits_ == 0);
321
+ while (ARROW_PREDICT_TRUE(remaining_ >= 64)) {
322
+ current_word_ = LoadFullWord();
323
+ const auto num_zeros = CountFirstZeros(current_word_);
324
+ if (num_zeros < 64) {
325
+ // Run of zeros ends here
326
+ current_word_ = ConsumeBits(current_word_, num_zeros);
327
+ current_num_bits_ = 64 - num_zeros;
328
+ remaining_ -= num_zeros;
329
+ assert(remaining_ >= 0);
330
+ assert(current_num_bits_ >= 0);
331
+ return;
332
+ }
333
+ remaining_ -= 64;
334
+ }
335
+ // Run of zeros continues in last bitmap word
336
+ if (remaining_ > 0) {
337
+ current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_);
338
+ current_num_bits_ = static_cast<int32_t>(remaining_);
339
+ const auto num_zeros =
340
+ std::min<int32_t>(current_num_bits_, CountFirstZeros(current_word_));
341
+ current_word_ = ConsumeBits(current_word_, num_zeros);
342
+ current_num_bits_ -= num_zeros;
343
+ remaining_ -= num_zeros;
344
+ assert(remaining_ >= 0);
345
+ assert(current_num_bits_ >= 0);
346
+ }
347
+ }
348
+
349
+ int64_t CountNextOnes() {
350
+ assert(current_word_ & kFirstBit);
351
+
352
+ int64_t len;
353
+ if (~current_word_) {
354
+ const auto num_ones = CountFirstZeros(~current_word_);
355
+ assert(num_ones <= current_num_bits_);
356
+ assert(num_ones <= remaining_);
357
+ remaining_ -= num_ones;
358
+ current_word_ = ConsumeBits(current_word_, num_ones);
359
+ current_num_bits_ -= num_ones;
360
+ if (current_num_bits_) {
361
+ // Run of ones ends here
362
+ return num_ones;
363
+ }
364
+ len = num_ones;
365
+ } else {
366
+ // current_word_ is all ones
367
+ remaining_ -= 64;
368
+ current_num_bits_ = 0;
369
+ len = 64;
370
+ }
371
+
372
+ while (ARROW_PREDICT_TRUE(remaining_ >= 64)) {
373
+ current_word_ = LoadFullWord();
374
+ const auto num_ones = CountFirstZeros(~current_word_);
375
+ len += num_ones;
376
+ remaining_ -= num_ones;
377
+ if (num_ones < 64) {
378
+ // Run of ones ends here
379
+ current_word_ = ConsumeBits(current_word_, num_ones);
380
+ current_num_bits_ = 64 - num_ones;
381
+ return len;
382
+ }
383
+ }
384
+ // Run of ones continues in last bitmap word
385
+ if (remaining_ > 0) {
386
+ current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_);
387
+ current_num_bits_ = static_cast<int32_t>(remaining_);
388
+ const auto num_ones = CountFirstZeros(~current_word_);
389
+ assert(num_ones <= current_num_bits_);
390
+ assert(num_ones <= remaining_);
391
+ current_word_ = ConsumeBits(current_word_, num_ones);
392
+ current_num_bits_ -= num_ones;
393
+ remaining_ -= num_ones;
394
+ len += num_ones;
395
+ }
396
+ return len;
397
+ }
398
+
399
+ SetBitRun FindCurrentRun() {
400
+ // Skip any pending zeros
401
+ const auto num_zeros = CountFirstZeros(current_word_);
402
+ if (num_zeros >= current_num_bits_) {
403
+ remaining_ -= current_num_bits_;
404
+ current_word_ = 0;
405
+ current_num_bits_ = 0;
406
+ return {0, 0};
407
+ }
408
+ assert(num_zeros <= remaining_);
409
+ current_word_ = ConsumeBits(current_word_, num_zeros);
410
+ current_num_bits_ -= num_zeros;
411
+ remaining_ -= num_zeros;
412
+ const int64_t pos = position();
413
+ // Count any ones
414
+ const auto num_ones = CountFirstZeros(~current_word_);
415
+ assert(num_ones <= current_num_bits_);
416
+ assert(num_ones <= remaining_);
417
+ current_word_ = ConsumeBits(current_word_, num_ones);
418
+ current_num_bits_ -= num_ones;
419
+ remaining_ -= num_ones;
420
+ return {pos, num_ones};
421
+ }
422
+
423
+ inline int CountFirstZeros(uint64_t word);
424
+ inline uint64_t ConsumeBits(uint64_t word, int32_t num_bits);
425
+
426
+ const uint8_t* bitmap_;
427
+ const int64_t length_;
428
+ int64_t remaining_;
429
+ uint64_t current_word_;
430
+ int32_t current_num_bits_;
431
+
432
+ static constexpr uint64_t kFirstBit = Reverse ? 0x8000000000000000ULL : 1;
433
+ };
434
+
435
+ template <>
436
+ inline int BaseSetBitRunReader<false>::CountFirstZeros(uint64_t word) {
437
+ return bit_util::CountTrailingZeros(word);
438
+ }
439
+
440
+ template <>
441
+ inline int BaseSetBitRunReader<true>::CountFirstZeros(uint64_t word) {
442
+ return bit_util::CountLeadingZeros(word);
443
+ }
444
+
445
+ template <>
446
+ inline uint64_t BaseSetBitRunReader<false>::ConsumeBits(uint64_t word, int32_t num_bits) {
447
+ return word >> num_bits;
448
+ }
449
+
450
+ template <>
451
+ inline uint64_t BaseSetBitRunReader<true>::ConsumeBits(uint64_t word, int32_t num_bits) {
452
+ return word << num_bits;
453
+ }
454
+
455
+ using SetBitRunReader = BaseSetBitRunReader</*Reverse=*/false>;
456
+ using ReverseSetBitRunReader = BaseSetBitRunReader</*Reverse=*/true>;
457
+
458
+ // Functional-style bit run visitors.
459
+
460
+ // XXX: Try to make this function small so the compiler can inline and optimize
461
+ // the `visit` function, which is normally a hot loop with vectorizable code.
462
+ // - don't inline SetBitRunReader constructor, it doesn't hurt performance
463
+ // - un-inline NextRun hurts 'many null' cases a bit, but improves normal cases
464
+ template <typename Visit>
465
+ inline Status VisitSetBitRuns(const uint8_t* bitmap, int64_t offset, int64_t length,
466
+ Visit&& visit) {
467
+ if (bitmap == NULLPTR) {
468
+ // Assuming all set (as in a null bitmap)
469
+ return visit(static_cast<int64_t>(0), static_cast<int64_t>(length));
470
+ }
471
+ SetBitRunReader reader(bitmap, offset, length);
472
+ while (true) {
473
+ const auto run = reader.NextRun();
474
+ if (run.length == 0) {
475
+ break;
476
+ }
477
+ ARROW_RETURN_NOT_OK(visit(run.position, run.length));
478
+ }
479
+ return Status::OK();
480
+ }
481
+
482
+ template <typename Visit>
483
+ inline void VisitSetBitRunsVoid(const uint8_t* bitmap, int64_t offset, int64_t length,
484
+ Visit&& visit) {
485
+ if (bitmap == NULLPTR) {
486
+ // Assuming all set (as in a null bitmap)
487
+ visit(static_cast<int64_t>(0), static_cast<int64_t>(length));
488
+ return;
489
+ }
490
+ SetBitRunReader reader(bitmap, offset, length);
491
+ while (true) {
492
+ const auto run = reader.NextRun();
493
+ if (run.length == 0) {
494
+ break;
495
+ }
496
+ visit(run.position, run.length);
497
+ }
498
+ }
499
+
500
+ template <typename Visit>
501
+ inline Status VisitSetBitRuns(const std::shared_ptr<Buffer>& bitmap, int64_t offset,
502
+ int64_t length, Visit&& visit) {
503
+ return VisitSetBitRuns(bitmap ? bitmap->data() : NULLPTR, offset, length,
504
+ std::forward<Visit>(visit));
505
+ }
506
+
507
+ template <typename Visit>
508
+ inline void VisitSetBitRunsVoid(const std::shared_ptr<Buffer>& bitmap, int64_t offset,
509
+ int64_t length, Visit&& visit) {
510
+ VisitSetBitRunsVoid(bitmap ? bitmap->data() : NULLPTR, offset, length,
511
+ std::forward<Visit>(visit));
512
+ }
513
+
514
+ } // namespace internal
515
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_stream_utils.h ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // From Apache Impala (incubating) as of 2016-01-29
19
+
20
+ #pragma once
21
+
22
+ #include <algorithm>
23
+ #include <cstdint>
24
+ #include <cstring>
25
+
26
+ #include "arrow/util/bit_util.h"
27
+ #include "arrow/util/bpacking.h"
28
+ #include "arrow/util/logging.h"
29
+ #include "arrow/util/macros.h"
30
+ #include "arrow/util/ubsan.h"
31
+
32
+ namespace arrow {
33
+ namespace bit_util {
34
+
35
+ /// Utility class to write bit/byte streams. This class can write data to either be
36
+ /// bit packed or byte aligned (and a single stream that has a mix of both).
37
+ /// This class does not allocate memory.
38
+ class BitWriter {
39
+ public:
40
+ /// buffer: buffer to write bits to. Buffer should be preallocated with
41
+ /// 'buffer_len' bytes.
42
+ BitWriter(uint8_t* buffer, int buffer_len) : buffer_(buffer), max_bytes_(buffer_len) {
43
+ Clear();
44
+ }
45
+
46
+ void Clear() {
47
+ buffered_values_ = 0;
48
+ byte_offset_ = 0;
49
+ bit_offset_ = 0;
50
+ }
51
+
52
+ /// The number of current bytes written, including the current byte (i.e. may include a
53
+ /// fraction of a byte). Includes buffered values.
54
+ int bytes_written() const {
55
+ return byte_offset_ + static_cast<int>(bit_util::BytesForBits(bit_offset_));
56
+ }
57
+ uint8_t* buffer() const { return buffer_; }
58
+ int buffer_len() const { return max_bytes_; }
59
+
60
+ /// Writes a value to buffered_values_, flushing to buffer_ if necessary. This is bit
61
+ /// packed. Returns false if there was not enough space. num_bits must be <= 32.
62
+ bool PutValue(uint64_t v, int num_bits);
63
+
64
+ /// Writes v to the next aligned byte using num_bytes. If T is larger than
65
+ /// num_bytes, the extra high-order bytes will be ignored. Returns false if
66
+ /// there was not enough space.
67
+ /// Assume the v is stored in buffer_ as a little-endian format
68
+ template <typename T>
69
+ bool PutAligned(T v, int num_bytes);
70
+
71
+ /// Write a Vlq encoded int to the buffer. Returns false if there was not enough
72
+ /// room. The value is written byte aligned.
73
+ /// For more details on vlq:
74
+ /// en.wikipedia.org/wiki/Variable-length_quantity
75
+ bool PutVlqInt(uint32_t v);
76
+
77
+ // Writes an int zigzag encoded.
78
+ bool PutZigZagVlqInt(int32_t v);
79
+
80
+ /// Write a Vlq encoded int64 to the buffer. Returns false if there was not enough
81
+ /// room. The value is written byte aligned.
82
+ /// For more details on vlq:
83
+ /// en.wikipedia.org/wiki/Variable-length_quantity
84
+ bool PutVlqInt(uint64_t v);
85
+
86
+ // Writes an int64 zigzag encoded.
87
+ bool PutZigZagVlqInt(int64_t v);
88
+
89
+ /// Get a pointer to the next aligned byte and advance the underlying buffer
90
+ /// by num_bytes.
91
+ /// Returns NULL if there was not enough space.
92
+ uint8_t* GetNextBytePtr(int num_bytes = 1);
93
+
94
+ /// Flushes all buffered values to the buffer. Call this when done writing to
95
+ /// the buffer. If 'align' is true, buffered_values_ is reset and any future
96
+ /// writes will be written to the next byte boundary.
97
+ void Flush(bool align = false);
98
+
99
+ private:
100
+ uint8_t* buffer_;
101
+ int max_bytes_;
102
+
103
+ /// Bit-packed values are initially written to this variable before being memcpy'd to
104
+ /// buffer_. This is faster than writing values byte by byte directly to buffer_.
105
+ uint64_t buffered_values_;
106
+
107
+ int byte_offset_; // Offset in buffer_
108
+ int bit_offset_; // Offset in buffered_values_
109
+ };
110
+
111
+ namespace detail {
112
+
113
+ inline uint64_t ReadLittleEndianWord(const uint8_t* buffer, int bytes_remaining) {
114
+ uint64_t le_value = 0;
115
+ if (ARROW_PREDICT_TRUE(bytes_remaining >= 8)) {
116
+ memcpy(&le_value, buffer, 8);
117
+ } else {
118
+ memcpy(&le_value, buffer, bytes_remaining);
119
+ }
120
+ return arrow::bit_util::FromLittleEndian(le_value);
121
+ }
122
+
123
+ } // namespace detail
124
+
125
+ /// Utility class to read bit/byte stream. This class can read bits or bytes
126
+ /// that are either byte aligned or not. It also has utilities to read multiple
127
+ /// bytes in one read (e.g. encoded int).
128
+ class BitReader {
129
+ public:
130
+ BitReader() = default;
131
+
132
+ /// 'buffer' is the buffer to read from. The buffer's length is 'buffer_len'.
133
+ BitReader(const uint8_t* buffer, int buffer_len) : BitReader() {
134
+ Reset(buffer, buffer_len);
135
+ }
136
+
137
+ void Reset(const uint8_t* buffer, int buffer_len) {
138
+ buffer_ = buffer;
139
+ max_bytes_ = buffer_len;
140
+ byte_offset_ = 0;
141
+ bit_offset_ = 0;
142
+ buffered_values_ =
143
+ detail::ReadLittleEndianWord(buffer_ + byte_offset_, max_bytes_ - byte_offset_);
144
+ }
145
+
146
+ /// Gets the next value from the buffer. Returns true if 'v' could be read or false if
147
+ /// there are not enough bytes left.
148
+ template <typename T>
149
+ bool GetValue(int num_bits, T* v);
150
+
151
+ /// Get a number of values from the buffer. Return the number of values actually read.
152
+ template <typename T>
153
+ int GetBatch(int num_bits, T* v, int batch_size);
154
+
155
+ /// Reads a 'num_bytes'-sized value from the buffer and stores it in 'v'. T
156
+ /// needs to be a little-endian native type and big enough to store
157
+ /// 'num_bytes'. The value is assumed to be byte-aligned so the stream will
158
+ /// be advanced to the start of the next byte before 'v' is read. Returns
159
+ /// false if there are not enough bytes left.
160
+ /// Assume the v was stored in buffer_ as a little-endian format
161
+ template <typename T>
162
+ bool GetAligned(int num_bytes, T* v);
163
+
164
+ /// Advances the stream by a number of bits. Returns true if succeed or false if there
165
+ /// are not enough bits left.
166
+ bool Advance(int64_t num_bits);
167
+
168
+ /// Reads a vlq encoded int from the stream. The encoded int must start at
169
+ /// the beginning of a byte. Return false if there were not enough bytes in
170
+ /// the buffer.
171
+ bool GetVlqInt(uint32_t* v);
172
+
173
+ // Reads a zigzag encoded int `into` v.
174
+ bool GetZigZagVlqInt(int32_t* v);
175
+
176
+ /// Reads a vlq encoded int64 from the stream. The encoded int must start at
177
+ /// the beginning of a byte. Return false if there were not enough bytes in
178
+ /// the buffer.
179
+ bool GetVlqInt(uint64_t* v);
180
+
181
+ // Reads a zigzag encoded int64 `into` v.
182
+ bool GetZigZagVlqInt(int64_t* v);
183
+
184
+ /// Returns the number of bytes left in the stream, not including the current
185
+ /// byte (i.e., there may be an additional fraction of a byte).
186
+ int bytes_left() const {
187
+ return max_bytes_ -
188
+ (byte_offset_ + static_cast<int>(bit_util::BytesForBits(bit_offset_)));
189
+ }
190
+
191
+ /// Maximum byte length of a vlq encoded int
192
+ static constexpr int kMaxVlqByteLength = 5;
193
+
194
+ /// Maximum byte length of a vlq encoded int64
195
+ static constexpr int kMaxVlqByteLengthForInt64 = 10;
196
+
197
+ private:
198
+ const uint8_t* buffer_;
199
+ int max_bytes_;
200
+
201
+ /// Bytes are memcpy'd from buffer_ and values are read from this variable. This is
202
+ /// faster than reading values byte by byte directly from buffer_.
203
+ uint64_t buffered_values_;
204
+
205
+ int byte_offset_; // Offset in buffer_
206
+ int bit_offset_; // Offset in buffered_values_
207
+ };
208
+
209
+ inline bool BitWriter::PutValue(uint64_t v, int num_bits) {
210
+ DCHECK_LE(num_bits, 64);
211
+ if (num_bits < 64) {
212
+ DCHECK_EQ(v >> num_bits, 0) << "v = " << v << ", num_bits = " << num_bits;
213
+ }
214
+
215
+ if (ARROW_PREDICT_FALSE(byte_offset_ * 8 + bit_offset_ + num_bits > max_bytes_ * 8))
216
+ return false;
217
+
218
+ buffered_values_ |= v << bit_offset_;
219
+ bit_offset_ += num_bits;
220
+
221
+ if (ARROW_PREDICT_FALSE(bit_offset_ >= 64)) {
222
+ // Flush buffered_values_ and write out bits of v that did not fit
223
+ buffered_values_ = arrow::bit_util::ToLittleEndian(buffered_values_);
224
+ memcpy(buffer_ + byte_offset_, &buffered_values_, 8);
225
+ buffered_values_ = 0;
226
+ byte_offset_ += 8;
227
+ bit_offset_ -= 64;
228
+ buffered_values_ =
229
+ (num_bits - bit_offset_ == 64) ? 0 : (v >> (num_bits - bit_offset_));
230
+ }
231
+ DCHECK_LT(bit_offset_, 64);
232
+ return true;
233
+ }
234
+
235
+ inline void BitWriter::Flush(bool align) {
236
+ int num_bytes = static_cast<int>(bit_util::BytesForBits(bit_offset_));
237
+ DCHECK_LE(byte_offset_ + num_bytes, max_bytes_);
238
+ auto buffered_values = arrow::bit_util::ToLittleEndian(buffered_values_);
239
+ memcpy(buffer_ + byte_offset_, &buffered_values, num_bytes);
240
+
241
+ if (align) {
242
+ buffered_values_ = 0;
243
+ byte_offset_ += num_bytes;
244
+ bit_offset_ = 0;
245
+ }
246
+ }
247
+
248
+ inline uint8_t* BitWriter::GetNextBytePtr(int num_bytes) {
249
+ Flush(/* align */ true);
250
+ DCHECK_LE(byte_offset_, max_bytes_);
251
+ if (byte_offset_ + num_bytes > max_bytes_) return NULL;
252
+ uint8_t* ptr = buffer_ + byte_offset_;
253
+ byte_offset_ += num_bytes;
254
+ return ptr;
255
+ }
256
+
257
+ template <typename T>
258
+ inline bool BitWriter::PutAligned(T val, int num_bytes) {
259
+ uint8_t* ptr = GetNextBytePtr(num_bytes);
260
+ if (ptr == NULL) return false;
261
+ val = arrow::bit_util::ToLittleEndian(val);
262
+ memcpy(ptr, &val, num_bytes);
263
+ return true;
264
+ }
265
+
266
+ namespace detail {
267
+
268
+ template <typename T>
269
+ inline void GetValue_(int num_bits, T* v, int max_bytes, const uint8_t* buffer,
270
+ int* bit_offset, int* byte_offset, uint64_t* buffered_values) {
271
+ #ifdef _MSC_VER
272
+ #pragma warning(push)
273
+ #pragma warning(disable : 4800)
274
+ #endif
275
+ *v = static_cast<T>(bit_util::TrailingBits(*buffered_values, *bit_offset + num_bits) >>
276
+ *bit_offset);
277
+ #ifdef _MSC_VER
278
+ #pragma warning(pop)
279
+ #endif
280
+ *bit_offset += num_bits;
281
+ if (*bit_offset >= 64) {
282
+ *byte_offset += 8;
283
+ *bit_offset -= 64;
284
+
285
+ *buffered_values =
286
+ detail::ReadLittleEndianWord(buffer + *byte_offset, max_bytes - *byte_offset);
287
+ #ifdef _MSC_VER
288
+ #pragma warning(push)
289
+ #pragma warning(disable : 4800 4805)
290
+ #endif
291
+ // Read bits of v that crossed into new buffered_values_
292
+ if (ARROW_PREDICT_TRUE(num_bits - *bit_offset < static_cast<int>(8 * sizeof(T)))) {
293
+ // if shift exponent(num_bits - *bit_offset) is not less than sizeof(T), *v will not
294
+ // change and the following code may cause a runtime error that the shift exponent
295
+ // is too large
296
+ *v = *v | static_cast<T>(bit_util::TrailingBits(*buffered_values, *bit_offset)
297
+ << (num_bits - *bit_offset));
298
+ }
299
+ #ifdef _MSC_VER
300
+ #pragma warning(pop)
301
+ #endif
302
+ DCHECK_LE(*bit_offset, 64);
303
+ }
304
+ }
305
+
306
+ } // namespace detail
307
+
308
+ template <typename T>
309
+ inline bool BitReader::GetValue(int num_bits, T* v) {
310
+ return GetBatch(num_bits, v, 1) == 1;
311
+ }
312
+
313
+ template <typename T>
314
+ inline int BitReader::GetBatch(int num_bits, T* v, int batch_size) {
315
+ DCHECK(buffer_ != NULL);
316
+ DCHECK_LE(num_bits, static_cast<int>(sizeof(T) * 8)) << "num_bits: " << num_bits;
317
+
318
+ int bit_offset = bit_offset_;
319
+ int byte_offset = byte_offset_;
320
+ uint64_t buffered_values = buffered_values_;
321
+ int max_bytes = max_bytes_;
322
+ const uint8_t* buffer = buffer_;
323
+
324
+ const int64_t needed_bits = num_bits * static_cast<int64_t>(batch_size);
325
+ constexpr uint64_t kBitsPerByte = 8;
326
+ const int64_t remaining_bits =
327
+ static_cast<int64_t>(max_bytes - byte_offset) * kBitsPerByte - bit_offset;
328
+ if (remaining_bits < needed_bits) {
329
+ batch_size = static_cast<int>(remaining_bits / num_bits);
330
+ }
331
+
332
+ int i = 0;
333
+ if (ARROW_PREDICT_FALSE(bit_offset != 0)) {
334
+ for (; i < batch_size && bit_offset != 0; ++i) {
335
+ detail::GetValue_(num_bits, &v[i], max_bytes, buffer, &bit_offset, &byte_offset,
336
+ &buffered_values);
337
+ }
338
+ }
339
+
340
+ if (sizeof(T) == 4) {
341
+ int num_unpacked =
342
+ internal::unpack32(reinterpret_cast<const uint32_t*>(buffer + byte_offset),
343
+ reinterpret_cast<uint32_t*>(v + i), batch_size - i, num_bits);
344
+ i += num_unpacked;
345
+ byte_offset += num_unpacked * num_bits / 8;
346
+ } else if (sizeof(T) == 8 && num_bits > 32) {
347
+ // Use unpack64 only if num_bits is larger than 32
348
+ // TODO (ARROW-13677): improve the performance of internal::unpack64
349
+ // and remove the restriction of num_bits
350
+ int num_unpacked =
351
+ internal::unpack64(buffer + byte_offset, reinterpret_cast<uint64_t*>(v + i),
352
+ batch_size - i, num_bits);
353
+ i += num_unpacked;
354
+ byte_offset += num_unpacked * num_bits / 8;
355
+ } else {
356
+ // TODO: revisit this limit if necessary
357
+ DCHECK_LE(num_bits, 32);
358
+ const int buffer_size = 1024;
359
+ uint32_t unpack_buffer[buffer_size];
360
+ while (i < batch_size) {
361
+ int unpack_size = std::min(buffer_size, batch_size - i);
362
+ int num_unpacked =
363
+ internal::unpack32(reinterpret_cast<const uint32_t*>(buffer + byte_offset),
364
+ unpack_buffer, unpack_size, num_bits);
365
+ if (num_unpacked == 0) {
366
+ break;
367
+ }
368
+ for (int k = 0; k < num_unpacked; ++k) {
369
+ #ifdef _MSC_VER
370
+ #pragma warning(push)
371
+ #pragma warning(disable : 4800)
372
+ #endif
373
+ v[i + k] = static_cast<T>(unpack_buffer[k]);
374
+ #ifdef _MSC_VER
375
+ #pragma warning(pop)
376
+ #endif
377
+ }
378
+ i += num_unpacked;
379
+ byte_offset += num_unpacked * num_bits / 8;
380
+ }
381
+ }
382
+
383
+ buffered_values =
384
+ detail::ReadLittleEndianWord(buffer + byte_offset, max_bytes - byte_offset);
385
+
386
+ for (; i < batch_size; ++i) {
387
+ detail::GetValue_(num_bits, &v[i], max_bytes, buffer, &bit_offset, &byte_offset,
388
+ &buffered_values);
389
+ }
390
+
391
+ bit_offset_ = bit_offset;
392
+ byte_offset_ = byte_offset;
393
+ buffered_values_ = buffered_values;
394
+
395
+ return batch_size;
396
+ }
397
+
398
+ template <typename T>
399
+ inline bool BitReader::GetAligned(int num_bytes, T* v) {
400
+ if (ARROW_PREDICT_FALSE(num_bytes > static_cast<int>(sizeof(T)))) {
401
+ return false;
402
+ }
403
+
404
+ int bytes_read = static_cast<int>(bit_util::BytesForBits(bit_offset_));
405
+ if (ARROW_PREDICT_FALSE(byte_offset_ + bytes_read + num_bytes > max_bytes_)) {
406
+ return false;
407
+ }
408
+
409
+ // Advance byte_offset to next unread byte and read num_bytes
410
+ byte_offset_ += bytes_read;
411
+ if constexpr (std::is_same_v<T, bool>) {
412
+ // ARROW-18031: if we're trying to get an aligned bool, just check
413
+ // the LSB of the next byte and move on. If we memcpy + FromLittleEndian
414
+ // as usual, we have potential undefined behavior for bools if the value
415
+ // isn't 0 or 1
416
+ *v = *(buffer_ + byte_offset_) & 1;
417
+ } else {
418
+ memcpy(v, buffer_ + byte_offset_, num_bytes);
419
+ *v = arrow::bit_util::FromLittleEndian(*v);
420
+ }
421
+ byte_offset_ += num_bytes;
422
+
423
+ bit_offset_ = 0;
424
+ buffered_values_ =
425
+ detail::ReadLittleEndianWord(buffer_ + byte_offset_, max_bytes_ - byte_offset_);
426
+ return true;
427
+ }
428
+
429
+ inline bool BitReader::Advance(int64_t num_bits) {
430
+ int64_t bits_required = bit_offset_ + num_bits;
431
+ int64_t bytes_required = bit_util::BytesForBits(bits_required);
432
+ if (ARROW_PREDICT_FALSE(bytes_required > max_bytes_ - byte_offset_)) {
433
+ return false;
434
+ }
435
+ byte_offset_ += static_cast<int>(bits_required >> 3);
436
+ bit_offset_ = static_cast<int>(bits_required & 7);
437
+ buffered_values_ =
438
+ detail::ReadLittleEndianWord(buffer_ + byte_offset_, max_bytes_ - byte_offset_);
439
+ return true;
440
+ }
441
+
442
+ inline bool BitWriter::PutVlqInt(uint32_t v) {
443
+ bool result = true;
444
+ while ((v & 0xFFFFFF80UL) != 0UL) {
445
+ result &= PutAligned<uint8_t>(static_cast<uint8_t>((v & 0x7F) | 0x80), 1);
446
+ v >>= 7;
447
+ }
448
+ result &= PutAligned<uint8_t>(static_cast<uint8_t>(v & 0x7F), 1);
449
+ return result;
450
+ }
451
+
452
+ inline bool BitReader::GetVlqInt(uint32_t* v) {
453
+ uint32_t tmp = 0;
454
+
455
+ for (int i = 0; i < kMaxVlqByteLength; i++) {
456
+ uint8_t byte = 0;
457
+ if (ARROW_PREDICT_FALSE(!GetAligned<uint8_t>(1, &byte))) {
458
+ return false;
459
+ }
460
+ tmp |= static_cast<uint32_t>(byte & 0x7F) << (7 * i);
461
+
462
+ if ((byte & 0x80) == 0) {
463
+ *v = tmp;
464
+ return true;
465
+ }
466
+ }
467
+
468
+ return false;
469
+ }
470
+
471
+ inline bool BitWriter::PutZigZagVlqInt(int32_t v) {
472
+ uint32_t u_v = ::arrow::util::SafeCopy<uint32_t>(v);
473
+ u_v = (u_v << 1) ^ static_cast<uint32_t>(v >> 31);
474
+ return PutVlqInt(u_v);
475
+ }
476
+
477
+ inline bool BitReader::GetZigZagVlqInt(int32_t* v) {
478
+ uint32_t u;
479
+ if (!GetVlqInt(&u)) return false;
480
+ u = (u >> 1) ^ (~(u & 1) + 1);
481
+ *v = ::arrow::util::SafeCopy<int32_t>(u);
482
+ return true;
483
+ }
484
+
485
+ inline bool BitWriter::PutVlqInt(uint64_t v) {
486
+ bool result = true;
487
+ while ((v & 0xFFFFFFFFFFFFFF80ULL) != 0ULL) {
488
+ result &= PutAligned<uint8_t>(static_cast<uint8_t>((v & 0x7F) | 0x80), 1);
489
+ v >>= 7;
490
+ }
491
+ result &= PutAligned<uint8_t>(static_cast<uint8_t>(v & 0x7F), 1);
492
+ return result;
493
+ }
494
+
495
+ inline bool BitReader::GetVlqInt(uint64_t* v) {
496
+ uint64_t tmp = 0;
497
+
498
+ for (int i = 0; i < kMaxVlqByteLengthForInt64; i++) {
499
+ uint8_t byte = 0;
500
+ if (ARROW_PREDICT_FALSE(!GetAligned<uint8_t>(1, &byte))) {
501
+ return false;
502
+ }
503
+ tmp |= static_cast<uint64_t>(byte & 0x7F) << (7 * i);
504
+
505
+ if ((byte & 0x80) == 0) {
506
+ *v = tmp;
507
+ return true;
508
+ }
509
+ }
510
+
511
+ return false;
512
+ }
513
+
514
+ inline bool BitWriter::PutZigZagVlqInt(int64_t v) {
515
+ uint64_t u_v = ::arrow::util::SafeCopy<uint64_t>(v);
516
+ u_v = (u_v << 1) ^ static_cast<uint64_t>(v >> 63);
517
+ return PutVlqInt(u_v);
518
+ }
519
+
520
+ inline bool BitReader::GetZigZagVlqInt(int64_t* v) {
521
+ uint64_t u;
522
+ if (!GetVlqInt(&u)) return false;
523
+ u = (u >> 1) ^ (~(u & 1) + 1);
524
+ *v = ::arrow::util::SafeCopy<int64_t>(u);
525
+ return true;
526
+ }
527
+
528
+ } // namespace bit_util
529
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_visit.h ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+
22
+ #include "arrow/util/bit_util.h"
23
+ #include "arrow/util/bitmap_reader.h"
24
+
25
+ namespace arrow {
26
+ namespace internal {
27
+
28
+ // A function that visits each bit in a bitmap and calls a visitor function with a
29
+ // boolean representation of that bit. This is intended to be analogous to
30
+ // GenerateBits.
31
+ template <class Visitor>
32
+ void VisitBits(const uint8_t* bitmap, int64_t start_offset, int64_t length,
33
+ Visitor&& visit) {
34
+ BitmapReader reader(bitmap, start_offset, length);
35
+ for (int64_t index = 0; index < length; ++index) {
36
+ visit(reader.IsSet());
37
+ reader.Next();
38
+ }
39
+ }
40
+
41
+ // Like VisitBits(), but unrolls its main loop for better performance.
42
+ template <class Visitor>
43
+ void VisitBitsUnrolled(const uint8_t* bitmap, int64_t start_offset, int64_t length,
44
+ Visitor&& visit) {
45
+ if (length == 0) {
46
+ return;
47
+ }
48
+
49
+ // Start by visiting any bits preceding the first full byte.
50
+ int64_t num_bits_before_full_bytes =
51
+ bit_util::RoundUpToMultipleOf8(start_offset) - start_offset;
52
+ // Truncate num_bits_before_full_bytes if it is greater than length.
53
+ if (num_bits_before_full_bytes > length) {
54
+ num_bits_before_full_bytes = length;
55
+ }
56
+ // Use the non loop-unrolled VisitBits since we don't want to add branches
57
+ VisitBits<Visitor>(bitmap, start_offset, num_bits_before_full_bytes, visit);
58
+
59
+ // Shift the start pointer to the first full byte and compute the
60
+ // number of full bytes to be read.
61
+ const uint8_t* first_full_byte = bitmap + bit_util::CeilDiv(start_offset, 8);
62
+ const int64_t num_full_bytes = (length - num_bits_before_full_bytes) / 8;
63
+
64
+ // Iterate over each full byte of the input bitmap and call the visitor in
65
+ // a loop-unrolled manner.
66
+ for (int64_t byte_index = 0; byte_index < num_full_bytes; ++byte_index) {
67
+ // Get the current bit-packed byte value from the bitmap.
68
+ const uint8_t byte = *(first_full_byte + byte_index);
69
+
70
+ // Execute the visitor function on each bit of the current byte.
71
+ visit(bit_util::GetBitFromByte(byte, 0));
72
+ visit(bit_util::GetBitFromByte(byte, 1));
73
+ visit(bit_util::GetBitFromByte(byte, 2));
74
+ visit(bit_util::GetBitFromByte(byte, 3));
75
+ visit(bit_util::GetBitFromByte(byte, 4));
76
+ visit(bit_util::GetBitFromByte(byte, 5));
77
+ visit(bit_util::GetBitFromByte(byte, 6));
78
+ visit(bit_util::GetBitFromByte(byte, 7));
79
+ }
80
+
81
+ // Write any leftover bits in the last byte.
82
+ const int64_t num_bits_after_full_bytes = (length - num_bits_before_full_bytes) % 8;
83
+ VisitBits<Visitor>(first_full_byte + num_full_bytes, 0, num_bits_after_full_bytes,
84
+ visit);
85
+ }
86
+
87
+ } // namespace internal
88
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_writer.h ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <cstring>
22
+
23
+ #include "arrow/util/bit_util.h"
24
+ #include "arrow/util/endian.h"
25
+ #include "arrow/util/macros.h"
26
+
27
+ namespace arrow {
28
+ namespace internal {
29
+
30
+ class BitmapWriter {
31
+ // A sequential bitwise writer that preserves surrounding bit values.
32
+
33
+ public:
34
+ BitmapWriter(uint8_t* bitmap, int64_t start_offset, int64_t length)
35
+ : bitmap_(bitmap), position_(0), length_(length) {
36
+ byte_offset_ = start_offset / 8;
37
+ bit_mask_ = bit_util::kBitmask[start_offset % 8];
38
+ if (length > 0) {
39
+ current_byte_ = bitmap[byte_offset_];
40
+ } else {
41
+ current_byte_ = 0;
42
+ }
43
+ }
44
+
45
+ void Set() { current_byte_ |= bit_mask_; }
46
+
47
+ void Clear() { current_byte_ &= bit_mask_ ^ 0xFF; }
48
+
49
+ void Next() {
50
+ bit_mask_ = static_cast<uint8_t>(bit_mask_ << 1);
51
+ ++position_;
52
+ if (bit_mask_ == 0) {
53
+ // Finished this byte, need advancing
54
+ bit_mask_ = 0x01;
55
+ bitmap_[byte_offset_++] = current_byte_;
56
+ if (ARROW_PREDICT_TRUE(position_ < length_)) {
57
+ current_byte_ = bitmap_[byte_offset_];
58
+ }
59
+ }
60
+ }
61
+
62
+ void Finish() {
63
+ // Store current byte if we didn't went past bitmap storage
64
+ if (length_ > 0 && (bit_mask_ != 0x01 || position_ < length_)) {
65
+ bitmap_[byte_offset_] = current_byte_;
66
+ }
67
+ }
68
+
69
+ int64_t position() const { return position_; }
70
+
71
+ private:
72
+ uint8_t* bitmap_;
73
+ int64_t position_;
74
+ int64_t length_;
75
+
76
+ uint8_t current_byte_;
77
+ uint8_t bit_mask_;
78
+ int64_t byte_offset_;
79
+ };
80
+
81
+ class FirstTimeBitmapWriter {
82
+ // Like BitmapWriter, but any bit values *following* the bits written
83
+ // might be clobbered. It is hence faster than BitmapWriter, and can
84
+ // also avoid false positives with Valgrind.
85
+
86
+ public:
87
+ FirstTimeBitmapWriter(uint8_t* bitmap, int64_t start_offset, int64_t length)
88
+ : bitmap_(bitmap), position_(0), length_(length) {
89
+ current_byte_ = 0;
90
+ byte_offset_ = start_offset / 8;
91
+ bit_mask_ = bit_util::kBitmask[start_offset % 8];
92
+ if (length > 0) {
93
+ current_byte_ =
94
+ bitmap[byte_offset_] & bit_util::kPrecedingBitmask[start_offset % 8];
95
+ } else {
96
+ current_byte_ = 0;
97
+ }
98
+ }
99
+
100
+ /// Appends number_of_bits from word to valid_bits and valid_bits_offset.
101
+ ///
102
+ /// \param[in] word The LSB bitmap to append. Any bits past number_of_bits are assumed
103
+ /// to be unset (i.e. 0).
104
+ /// \param[in] number_of_bits The number of bits to append from word.
105
+ void AppendWord(uint64_t word, int64_t number_of_bits) {
106
+ if (ARROW_PREDICT_FALSE(number_of_bits == 0)) {
107
+ return;
108
+ }
109
+
110
+ // Location that the first byte needs to be written to.
111
+ uint8_t* append_position = bitmap_ + byte_offset_;
112
+
113
+ // Update state variables except for current_byte_ here.
114
+ position_ += number_of_bits;
115
+ int64_t bit_offset = bit_util::CountTrailingZeros(static_cast<uint32_t>(bit_mask_));
116
+ bit_mask_ = bit_util::kBitmask[(bit_offset + number_of_bits) % 8];
117
+ byte_offset_ += (bit_offset + number_of_bits) / 8;
118
+
119
+ if (bit_offset != 0) {
120
+ // We are in the middle of the byte. This code updates the byte and shifts
121
+ // bits appropriately within word so it can be memcpy'd below.
122
+ int64_t bits_to_carry = 8 - bit_offset;
123
+ // Carry over bits from word to current_byte_. We assume any extra bits in word
124
+ // unset so no additional accounting is needed for when number_of_bits <
125
+ // bits_to_carry.
126
+ current_byte_ |= (word & bit_util::kPrecedingBitmask[bits_to_carry]) << bit_offset;
127
+ // Check if everything is transferred into current_byte_.
128
+ if (ARROW_PREDICT_FALSE(number_of_bits < bits_to_carry)) {
129
+ return;
130
+ }
131
+ *append_position = current_byte_;
132
+ append_position++;
133
+ // Move the carry bits off of word.
134
+ word = word >> bits_to_carry;
135
+ number_of_bits -= bits_to_carry;
136
+ }
137
+ word = bit_util::ToLittleEndian(word);
138
+ int64_t bytes_for_word = ::arrow::bit_util::BytesForBits(number_of_bits);
139
+ std::memcpy(append_position, &word, bytes_for_word);
140
+ // At this point, the previous current_byte_ has been written to bitmap_.
141
+ // The new current_byte_ is either the last relevant byte in 'word'
142
+ // or cleared if the new position is byte aligned (i.e. a fresh byte).
143
+ if (bit_mask_ == 0x1) {
144
+ current_byte_ = 0;
145
+ } else {
146
+ current_byte_ = *(append_position + bytes_for_word - 1);
147
+ }
148
+ }
149
+
150
+ void Set() { current_byte_ |= bit_mask_; }
151
+
152
+ void Clear() {}
153
+
154
+ void Next() {
155
+ bit_mask_ = static_cast<uint8_t>(bit_mask_ << 1);
156
+ ++position_;
157
+ if (bit_mask_ == 0) {
158
+ // Finished this byte, need advancing
159
+ bit_mask_ = 0x01;
160
+ bitmap_[byte_offset_++] = current_byte_;
161
+ current_byte_ = 0;
162
+ }
163
+ }
164
+
165
+ void Finish() {
166
+ // Store current byte if we didn't went go bitmap storage
167
+ if (length_ > 0 && (bit_mask_ != 0x01 || position_ < length_)) {
168
+ bitmap_[byte_offset_] = current_byte_;
169
+ }
170
+ }
171
+
172
+ int64_t position() const { return position_; }
173
+
174
+ private:
175
+ uint8_t* bitmap_;
176
+ int64_t position_;
177
+ int64_t length_;
178
+
179
+ uint8_t current_byte_;
180
+ uint8_t bit_mask_;
181
+ int64_t byte_offset_;
182
+ };
183
+
184
+ template <typename Word, bool may_have_byte_offset = true>
185
+ class BitmapWordWriter {
186
+ public:
187
+ BitmapWordWriter() = default;
188
+ BitmapWordWriter(uint8_t* bitmap, int64_t offset, int64_t length)
189
+ : offset_(static_cast<int64_t>(may_have_byte_offset) * (offset % 8)),
190
+ bitmap_(bitmap + offset / 8),
191
+ bitmap_end_(bitmap_ + bit_util::BytesForBits(offset_ + length)),
192
+ mask_((1U << offset_) - 1) {
193
+ if (offset_) {
194
+ if (length >= static_cast<int>(sizeof(Word) * 8)) {
195
+ current_data.word_ = load<Word>(bitmap_);
196
+ } else if (length > 0) {
197
+ current_data.epi.byte_ = load<uint8_t>(bitmap_);
198
+ }
199
+ }
200
+ }
201
+
202
+ void PutNextWord(Word word) {
203
+ if (may_have_byte_offset && offset_) {
204
+ // split one word into two adjacent words, don't touch unused bits
205
+ // |<------ word ----->|
206
+ // +-----+-------------+
207
+ // | A | B |
208
+ // +-----+-------------+
209
+ // | |
210
+ // v v offset
211
+ // +-------------+-----+-------------+-----+
212
+ // | --- | A | B | --- |
213
+ // +-------------+-----+-------------+-----+
214
+ // |<------ next ----->|<---- current ---->|
215
+ word = (word << offset_) | (word >> (sizeof(Word) * 8 - offset_));
216
+ Word next_word = load<Word>(bitmap_ + sizeof(Word));
217
+ current_data.word_ = (current_data.word_ & mask_) | (word & ~mask_);
218
+ next_word = (next_word & ~mask_) | (word & mask_);
219
+ store<Word>(bitmap_, current_data.word_);
220
+ store<Word>(bitmap_ + sizeof(Word), next_word);
221
+ current_data.word_ = next_word;
222
+ } else {
223
+ store<Word>(bitmap_, word);
224
+ }
225
+ bitmap_ += sizeof(Word);
226
+ }
227
+
228
+ void PutNextTrailingByte(uint8_t byte, int valid_bits) {
229
+ if (valid_bits == 8) {
230
+ if (may_have_byte_offset && offset_) {
231
+ byte = (byte << offset_) | (byte >> (8 - offset_));
232
+ uint8_t next_byte = load<uint8_t>(bitmap_ + 1);
233
+ current_data.epi.byte_ = (current_data.epi.byte_ & mask_) | (byte & ~mask_);
234
+ next_byte = (next_byte & ~mask_) | (byte & mask_);
235
+ store<uint8_t>(bitmap_, current_data.epi.byte_);
236
+ store<uint8_t>(bitmap_ + 1, next_byte);
237
+ current_data.epi.byte_ = next_byte;
238
+ } else {
239
+ store<uint8_t>(bitmap_, byte);
240
+ }
241
+ ++bitmap_;
242
+ } else {
243
+ assert(valid_bits > 0);
244
+ assert(valid_bits < 8);
245
+ assert(bitmap_ + bit_util::BytesForBits(offset_ + valid_bits) <= bitmap_end_);
246
+ internal::BitmapWriter writer(bitmap_, offset_, valid_bits);
247
+ for (int i = 0; i < valid_bits; ++i) {
248
+ (byte & 0x01) ? writer.Set() : writer.Clear();
249
+ writer.Next();
250
+ byte >>= 1;
251
+ }
252
+ writer.Finish();
253
+ }
254
+ }
255
+
256
+ private:
257
+ int64_t offset_;
258
+ uint8_t* bitmap_;
259
+
260
+ const uint8_t* bitmap_end_;
261
+ uint64_t mask_;
262
+ union {
263
+ Word word_;
264
+ struct {
265
+ #if ARROW_LITTLE_ENDIAN == 0
266
+ uint8_t padding_bytes_[sizeof(Word) - 1];
267
+ #endif
268
+ uint8_t byte_;
269
+ } epi;
270
+ } current_data;
271
+
272
+ template <typename DType>
273
+ DType load(const uint8_t* bitmap) {
274
+ assert(bitmap + sizeof(DType) <= bitmap_end_);
275
+ return bit_util::ToLittleEndian(util::SafeLoadAs<DType>(bitmap));
276
+ }
277
+
278
+ template <typename DType>
279
+ void store(uint8_t* bitmap, DType data) {
280
+ assert(bitmap + sizeof(DType) <= bitmap_end_);
281
+ util::SafeStore(bitmap, bit_util::FromLittleEndian(data));
282
+ }
283
+ };
284
+
285
+ } // namespace internal
286
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitset_stack.h ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+ #include <array>
22
+ #include <bitset>
23
+ #include <cassert>
24
+ #include <cstdint>
25
+ #include <cstring>
26
+ #include <memory>
27
+ #include <string>
28
+ #include <string_view>
29
+ #include <type_traits>
30
+ #include <utility>
31
+ #include <vector>
32
+
33
+ #include "arrow/buffer.h"
34
+ #include "arrow/memory_pool.h"
35
+ #include "arrow/result.h"
36
+ #include "arrow/type_fwd.h"
37
+ #include "arrow/util/bit_util.h"
38
+ #include "arrow/util/compare.h"
39
+ #include "arrow/util/functional.h"
40
+ #include "arrow/util/macros.h"
41
+ #include "arrow/util/string_builder.h"
42
+ #include "arrow/util/type_traits.h"
43
+ #include "arrow/util/visibility.h"
44
+
45
+ namespace arrow {
46
+ namespace internal {
47
+
48
+ /// \brief Store a stack of bitsets efficiently. The top bitset may be
49
+ /// accessed and its bits may be modified, but it may not be resized.
50
+ class BitsetStack {
51
+ public:
52
+ using reference = typename std::vector<bool>::reference;
53
+
54
+ /// \brief push a bitset onto the stack
55
+ /// \param size number of bits in the next bitset
56
+ /// \param value initial value for bits in the pushed bitset
57
+ void Push(int size, bool value) {
58
+ offsets_.push_back(bit_count());
59
+ bits_.resize(bit_count() + size, value);
60
+ }
61
+
62
+ /// \brief number of bits in the bitset at the top of the stack
63
+ int TopSize() const {
64
+ if (offsets_.size() == 0) return 0;
65
+ return bit_count() - offsets_.back();
66
+ }
67
+
68
+ /// \brief pop a bitset off the stack
69
+ void Pop() {
70
+ bits_.resize(offsets_.back());
71
+ offsets_.pop_back();
72
+ }
73
+
74
+ /// \brief get the value of a bit in the top bitset
75
+ /// \param i index of the bit to access
76
+ bool operator[](int i) const { return bits_[offsets_.back() + i]; }
77
+
78
+ /// \brief get a mutable reference to a bit in the top bitset
79
+ /// \param i index of the bit to access
80
+ reference operator[](int i) { return bits_[offsets_.back() + i]; }
81
+
82
+ private:
83
+ int bit_count() const { return static_cast<int>(bits_.size()); }
84
+ std::vector<bool> bits_;
85
+ std::vector<int> offsets_;
86
+ };
87
+
88
+ } // namespace internal
89
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/util/endian.h"
21
+ #include "arrow/util/visibility.h"
22
+
23
+ #include <stdint.h>
24
+
25
+ namespace arrow {
26
+ namespace internal {
27
+
28
+ ARROW_EXPORT
29
+ int unpack32(const uint32_t* in, uint32_t* out, int batch_size, int num_bits);
30
+ ARROW_EXPORT
31
+ int unpack64(const uint8_t* in, uint64_t* out, int batch_size, int num_bits);
32
+
33
+ } // namespace internal
34
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/byte_size.h ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+
22
+ #include "arrow/type_fwd.h"
23
+
24
+ namespace arrow {
25
+
26
+ namespace util {
27
+
28
+ /// \brief The sum of bytes in each buffer referenced by the array
29
+ ///
30
+ /// Note: An array may only reference a portion of a buffer.
31
+ /// This method will overestimate in this case and return the
32
+ /// byte size of the entire buffer.
33
+ /// Note: If a buffer is referenced multiple times then it will
34
+ /// only be counted once.
35
+ ARROW_EXPORT int64_t TotalBufferSize(const ArrayData& array_data);
36
+ /// \brief The sum of bytes in each buffer referenced by the array
37
+ /// \see TotalBufferSize(const ArrayData& array_data) for details
38
+ ARROW_EXPORT int64_t TotalBufferSize(const Array& array);
39
+ /// \brief The sum of bytes in each buffer referenced by the array
40
+ /// \see TotalBufferSize(const ArrayData& array_data) for details
41
+ ARROW_EXPORT int64_t TotalBufferSize(const ChunkedArray& chunked_array);
42
+ /// \brief The sum of bytes in each buffer referenced by the batch
43
+ /// \see TotalBufferSize(const ArrayData& array_data) for details
44
+ ARROW_EXPORT int64_t TotalBufferSize(const RecordBatch& record_batch);
45
+ /// \brief The sum of bytes in each buffer referenced by the table
46
+ /// \see TotalBufferSize(const ArrayData& array_data) for details
47
+ ARROW_EXPORT int64_t TotalBufferSize(const Table& table);
48
+
49
+ /// \brief Calculate the buffer ranges referenced by the array
50
+ ///
51
+ /// These ranges will take into account array offsets
52
+ ///
53
+ /// The ranges may contain duplicates
54
+ ///
55
+ /// Dictionary arrays will ignore the offset of their containing array
56
+ ///
57
+ /// The return value will be a struct array corresponding to the schema:
58
+ /// schema({field("start", uint64()), field("offset", uint64()), field("length",
59
+ /// uint64()))
60
+ ARROW_EXPORT Result<std::shared_ptr<Array>> ReferencedRanges(const ArrayData& array_data);
61
+
62
+ /// \brief Returns the sum of bytes from all buffer ranges referenced
63
+ ///
64
+ /// Unlike TotalBufferSize this method will account for array
65
+ /// offsets.
66
+ ///
67
+ /// If buffers are shared between arrays then the shared
68
+ /// portion will be counted multiple times.
69
+ ///
70
+ /// Dictionary arrays will always be counted in their entirety
71
+ /// even if the array only references a portion of the dictionary.
72
+ ARROW_EXPORT Result<int64_t> ReferencedBufferSize(const ArrayData& array_data);
73
+ /// \brief Returns the sum of bytes from all buffer ranges referenced
74
+ /// \see ReferencedBufferSize(const ArrayData& array_data) for details
75
+ ARROW_EXPORT Result<int64_t> ReferencedBufferSize(const Array& array_data);
76
+ /// \brief Returns the sum of bytes from all buffer ranges referenced
77
+ /// \see ReferencedBufferSize(const ArrayData& array_data) for details
78
+ ARROW_EXPORT Result<int64_t> ReferencedBufferSize(const ChunkedArray& array_data);
79
+ /// \brief Returns the sum of bytes from all buffer ranges referenced
80
+ /// \see ReferencedBufferSize(const ArrayData& array_data) for details
81
+ ARROW_EXPORT Result<int64_t> ReferencedBufferSize(const RecordBatch& array_data);
82
+ /// \brief Returns the sum of bytes from all buffer ranges referenced
83
+ /// \see ReferencedBufferSize(const ArrayData& array_data) for details
84
+ ARROW_EXPORT Result<int64_t> ReferencedBufferSize(const Table& array_data);
85
+
86
+ } // namespace util
87
+
88
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/checked_cast.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <type_traits>
22
+ #include <utility>
23
+
24
+ namespace arrow {
25
+ namespace internal {
26
+
27
+ template <typename OutputType, typename InputType>
28
+ inline OutputType checked_cast(InputType&& value) {
29
+ static_assert(std::is_class<typename std::remove_pointer<
30
+ typename std::remove_reference<InputType>::type>::type>::value,
31
+ "checked_cast input type must be a class");
32
+ static_assert(std::is_class<typename std::remove_pointer<
33
+ typename std::remove_reference<OutputType>::type>::type>::value,
34
+ "checked_cast output type must be a class");
35
+ #ifdef NDEBUG
36
+ return static_cast<OutputType>(value);
37
+ #else
38
+ return dynamic_cast<OutputType>(value);
39
+ #endif
40
+ }
41
+
42
+ template <class T, class U>
43
+ std::shared_ptr<T> checked_pointer_cast(std::shared_ptr<U> r) noexcept {
44
+ #ifdef NDEBUG
45
+ return std::static_pointer_cast<T>(std::move(r));
46
+ #else
47
+ return std::dynamic_pointer_cast<T>(std::move(r));
48
+ #endif
49
+ }
50
+
51
+ template <class T, class U>
52
+ std::unique_ptr<T> checked_pointer_cast(std::unique_ptr<U> r) noexcept {
53
+ #ifdef NDEBUG
54
+ return std::unique_ptr<T>(static_cast<T*>(r.release()));
55
+ #else
56
+ return std::unique_ptr<T>(dynamic_cast<T*>(r.release()));
57
+ #endif
58
+ }
59
+
60
+ } // namespace internal
61
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/cpu_info.h ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // From Apache Impala (incubating) as of 2016-01-29. Pared down to a minimal
19
+ // set of functions needed for Apache Arrow / Apache parquet-cpp
20
+
21
+ #pragma once
22
+
23
+ #include <cstdint>
24
+ #include <memory>
25
+ #include <string>
26
+
27
+ #include "arrow/util/macros.h"
28
+ #include "arrow/util/visibility.h"
29
+
30
+ namespace arrow {
31
+ namespace internal {
32
+
33
+ /// CpuInfo is an interface to query for cpu information at runtime. The caller can
34
+ /// ask for the sizes of the caches and what hardware features are supported.
35
+ /// On Linux, this information is pulled from a couple of sys files (/proc/cpuinfo and
36
+ /// /sys/devices)
37
+ class ARROW_EXPORT CpuInfo {
38
+ public:
39
+ ~CpuInfo();
40
+
41
+ /// x86 features
42
+ static constexpr int64_t SSSE3 = (1LL << 0);
43
+ static constexpr int64_t SSE4_1 = (1LL << 1);
44
+ static constexpr int64_t SSE4_2 = (1LL << 2);
45
+ static constexpr int64_t POPCNT = (1LL << 3);
46
+ static constexpr int64_t AVX = (1LL << 4);
47
+ static constexpr int64_t AVX2 = (1LL << 5);
48
+ static constexpr int64_t AVX512F = (1LL << 6);
49
+ static constexpr int64_t AVX512CD = (1LL << 7);
50
+ static constexpr int64_t AVX512VL = (1LL << 8);
51
+ static constexpr int64_t AVX512DQ = (1LL << 9);
52
+ static constexpr int64_t AVX512BW = (1LL << 10);
53
+ static constexpr int64_t AVX512 = AVX512F | AVX512CD | AVX512VL | AVX512DQ | AVX512BW;
54
+ static constexpr int64_t BMI1 = (1LL << 11);
55
+ static constexpr int64_t BMI2 = (1LL << 12);
56
+
57
+ /// Arm features
58
+ static constexpr int64_t ASIMD = (1LL << 32);
59
+
60
+ /// Cache enums for L1 (data), L2 and L3
61
+ enum class CacheLevel { L1 = 0, L2, L3, Last = L3 };
62
+
63
+ /// CPU vendors
64
+ enum class Vendor { Unknown, Intel, AMD };
65
+
66
+ static const CpuInfo* GetInstance();
67
+
68
+ /// Returns all the flags for this cpu
69
+ int64_t hardware_flags() const;
70
+
71
+ /// Returns the number of cores (including hyper-threaded) on this machine.
72
+ int num_cores() const;
73
+
74
+ /// Returns the vendor of the cpu.
75
+ Vendor vendor() const;
76
+
77
+ /// Returns the model name of the cpu (e.g. Intel i7-2600)
78
+ const std::string& model_name() const;
79
+
80
+ /// Returns the size of the cache in KB at this cache level
81
+ int64_t CacheSize(CacheLevel level) const;
82
+
83
+ /// \brief Returns whether or not the given feature is enabled.
84
+ ///
85
+ /// IsSupported() is true iff IsDetected() is also true and the feature
86
+ /// wasn't disabled by the user (for example by setting the ARROW_USER_SIMD_LEVEL
87
+ /// environment variable).
88
+ bool IsSupported(int64_t flags) const;
89
+
90
+ /// Returns whether or not the given feature is available on the CPU.
91
+ bool IsDetected(int64_t flags) const;
92
+
93
+ /// Determine if the CPU meets the minimum CPU requirements and if not, issue an error
94
+ /// and terminate.
95
+ void VerifyCpuRequirements() const;
96
+
97
+ /// Toggle a hardware feature on and off. It is not valid to turn on a feature
98
+ /// that the underlying hardware cannot support. This is useful for testing.
99
+ void EnableFeature(int64_t flag, bool enable);
100
+
101
+ bool HasEfficientBmi2() const {
102
+ // BMI2 (pext, pdep) is only efficient on Intel X86 processors.
103
+ return vendor() == Vendor::Intel && IsSupported(BMI2);
104
+ }
105
+
106
+ private:
107
+ CpuInfo();
108
+
109
+ struct Impl;
110
+ std::unique_ptr<Impl> impl_;
111
+ };
112
+
113
+ } // namespace internal
114
+ } // namespace arrow
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/util/crc32.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include <cstddef>
19
+ #include <cstdint>
20
+
21
+ #include "arrow/util/visibility.h"
22
+
23
+ namespace arrow {
24
+ namespace internal {
25
+
26
+ /// \brief Compute the CRC32 checksum of the given data
27
+ ///
28
+ /// This function computes CRC32 with the polynomial 0x04C11DB7,
29
+ /// as used in zlib and others (note this is different from CRC32C).
30
+ /// To compute a running CRC32, pass the previous value in `prev`,
31
+ /// otherwise `prev` should be 0.
32
+ ARROW_EXPORT
33
+ uint32_t crc32(uint32_t prev, const void* data, size_t length);
34
+
35
+ } // namespace internal
36
+ } // namespace arrow