Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/adapter.h +323 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/options.h +120 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/tensorflow/convert.h +128 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/c/abi.h +233 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack.h +51 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack_abi.h +321 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api.h +53 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_aggregate.h +466 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_scalar.h +1717 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_vector.h +697 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/cast.h +134 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/exec.h +489 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/expression.h +295 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function.h +394 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function_options.h +81 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/kernel.h +752 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/key_hash.h +223 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/key_map.h +288 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/light_array.h +451 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/ordering.h +120 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/registry.h +121 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/row/grouper.h +184 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/type_fwd.h +58 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/util.h +290 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/discovery.h +275 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/api.h +31 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/azurefs.h +237 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem.h +565 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/gcsfs.h +247 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/hdfs.h +114 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/localfs.h +126 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/mockfs.h +133 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/path_util.h +174 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3_test_util.h +101 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3fs.h +396 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/test_util.h +252 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/type_fwd.h +52 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/feather.h +150 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/json_simple.h +71 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/message.h +565 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/options.h +178 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/writer.h +475 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/api.h +21 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/chunked_builder.h +68 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/chunker.h +35 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/converter.h +94 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/object_writer.h +48 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/options.h +74 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/parser.h +107 -0
.gitattributes
CHANGED
@@ -182,3 +182,5 @@ env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1500 filter
|
|
182 |
env-llmeval/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
183 |
env-llmeval/lib/python3.10/site-packages/pyarrow/libparquet.so.1500 filter=lfs diff=lfs merge=lfs -text
|
184 |
env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_acero.so.1500 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
182 |
env-llmeval/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
183 |
env-llmeval/lib/python3.10/site-packages/pyarrow/libparquet.so.1500 filter=lfs diff=lfs merge=lfs -text
|
184 |
env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_acero.so.1500 filter=lfs diff=lfs merge=lfs -text
|
185 |
+
env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1500 filter=lfs diff=lfs merge=lfs -text
|
186 |
+
env-llmeval/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/adapter.h
ADDED
@@ -0,0 +1,323 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <memory>
|
22 |
+
#include <vector>
|
23 |
+
|
24 |
+
#include "arrow/adapters/orc/options.h"
|
25 |
+
#include "arrow/io/interfaces.h"
|
26 |
+
#include "arrow/memory_pool.h"
|
27 |
+
#include "arrow/record_batch.h"
|
28 |
+
#include "arrow/status.h"
|
29 |
+
#include "arrow/type.h"
|
30 |
+
#include "arrow/type_fwd.h"
|
31 |
+
#include "arrow/util/macros.h"
|
32 |
+
#include "arrow/util/visibility.h"
|
33 |
+
|
34 |
+
namespace arrow {
|
35 |
+
namespace adapters {
|
36 |
+
namespace orc {
|
37 |
+
|
38 |
+
/// \brief Information about an ORC stripe
|
39 |
+
struct StripeInformation {
|
40 |
+
/// \brief Offset of the stripe from the start of the file, in bytes
|
41 |
+
int64_t offset;
|
42 |
+
/// \brief Length of the stripe, in bytes
|
43 |
+
int64_t length;
|
44 |
+
/// \brief Number of rows in the stripe
|
45 |
+
int64_t num_rows;
|
46 |
+
/// \brief Index of the first row of the stripe
|
47 |
+
int64_t first_row_id;
|
48 |
+
};
|
49 |
+
|
50 |
+
/// \class ORCFileReader
|
51 |
+
/// \brief Read an Arrow Table or RecordBatch from an ORC file.
|
52 |
+
class ARROW_EXPORT ORCFileReader {
|
53 |
+
public:
|
54 |
+
~ORCFileReader();
|
55 |
+
|
56 |
+
/// \brief Creates a new ORC reader
|
57 |
+
///
|
58 |
+
/// \param[in] file the data source
|
59 |
+
/// \param[in] pool a MemoryPool to use for buffer allocations
|
60 |
+
/// \return the returned reader object
|
61 |
+
static Result<std::unique_ptr<ORCFileReader>> Open(
|
62 |
+
const std::shared_ptr<io::RandomAccessFile>& file, MemoryPool* pool);
|
63 |
+
|
64 |
+
/// \brief Return the schema read from the ORC file
|
65 |
+
///
|
66 |
+
/// \return the returned Schema object
|
67 |
+
Result<std::shared_ptr<Schema>> ReadSchema();
|
68 |
+
|
69 |
+
/// \brief Read the file as a Table
|
70 |
+
///
|
71 |
+
/// The table will be composed of one record batch per stripe.
|
72 |
+
///
|
73 |
+
/// \return the returned Table
|
74 |
+
Result<std::shared_ptr<Table>> Read();
|
75 |
+
|
76 |
+
/// \brief Read the file as a Table
|
77 |
+
///
|
78 |
+
/// The table will be composed of one record batch per stripe.
|
79 |
+
///
|
80 |
+
/// \param[in] schema the Table schema
|
81 |
+
/// \return the returned Table
|
82 |
+
Result<std::shared_ptr<Table>> Read(const std::shared_ptr<Schema>& schema);
|
83 |
+
|
84 |
+
/// \brief Read the file as a Table
|
85 |
+
///
|
86 |
+
/// The table will be composed of one record batch per stripe.
|
87 |
+
///
|
88 |
+
/// \param[in] include_indices the selected field indices to read
|
89 |
+
/// \return the returned Table
|
90 |
+
Result<std::shared_ptr<Table>> Read(const std::vector<int>& include_indices);
|
91 |
+
|
92 |
+
/// \brief Read the file as a Table
|
93 |
+
///
|
94 |
+
/// The table will be composed of one record batch per stripe.
|
95 |
+
///
|
96 |
+
/// \param[in] include_names the selected field names to read
|
97 |
+
/// \return the returned Table
|
98 |
+
Result<std::shared_ptr<Table>> Read(const std::vector<std::string>& include_names);
|
99 |
+
|
100 |
+
/// \brief Read the file as a Table
|
101 |
+
///
|
102 |
+
/// The table will be composed of one record batch per stripe.
|
103 |
+
///
|
104 |
+
/// \param[in] schema the Table schema
|
105 |
+
/// \param[in] include_indices the selected field indices to read
|
106 |
+
/// \return the returned Table
|
107 |
+
Result<std::shared_ptr<Table>> Read(const std::shared_ptr<Schema>& schema,
|
108 |
+
const std::vector<int>& include_indices);
|
109 |
+
|
110 |
+
/// \brief Read a single stripe as a RecordBatch
|
111 |
+
///
|
112 |
+
/// \param[in] stripe the stripe index
|
113 |
+
/// \return the returned RecordBatch
|
114 |
+
Result<std::shared_ptr<RecordBatch>> ReadStripe(int64_t stripe);
|
115 |
+
|
116 |
+
/// \brief Read a single stripe as a RecordBatch
|
117 |
+
///
|
118 |
+
/// \param[in] stripe the stripe index
|
119 |
+
/// \param[in] include_indices the selected field indices to read
|
120 |
+
/// \return the returned RecordBatch
|
121 |
+
Result<std::shared_ptr<RecordBatch>> ReadStripe(
|
122 |
+
int64_t stripe, const std::vector<int>& include_indices);
|
123 |
+
|
124 |
+
/// \brief Read a single stripe as a RecordBatch
|
125 |
+
///
|
126 |
+
/// \param[in] stripe the stripe index
|
127 |
+
/// \param[in] include_names the selected field names to read
|
128 |
+
/// \return the returned RecordBatch
|
129 |
+
Result<std::shared_ptr<RecordBatch>> ReadStripe(
|
130 |
+
int64_t stripe, const std::vector<std::string>& include_names);
|
131 |
+
|
132 |
+
/// \brief Seek to designated row. Invoke NextStripeReader() after seek
|
133 |
+
/// will return stripe reader starting from designated row.
|
134 |
+
///
|
135 |
+
/// \param[in] row_number the rows number to seek
|
136 |
+
Status Seek(int64_t row_number);
|
137 |
+
|
138 |
+
/// \brief Get a stripe level record batch iterator.
|
139 |
+
///
|
140 |
+
/// Each record batch will have up to `batch_size` rows.
|
141 |
+
/// NextStripeReader serves as a fine-grained alternative to ReadStripe
|
142 |
+
/// which may cause OOM issues by loading the whole stripe into memory.
|
143 |
+
///
|
144 |
+
/// Note this will only read rows for the current stripe, not the entire
|
145 |
+
/// file.
|
146 |
+
///
|
147 |
+
/// \param[in] batch_size the maximum number of rows in each record batch
|
148 |
+
/// \return the returned stripe reader
|
149 |
+
Result<std::shared_ptr<RecordBatchReader>> NextStripeReader(int64_t batch_size);
|
150 |
+
|
151 |
+
/// \brief Get a stripe level record batch iterator.
|
152 |
+
///
|
153 |
+
/// Each record batch will have up to `batch_size` rows.
|
154 |
+
/// NextStripeReader serves as a fine-grained alternative to ReadStripe
|
155 |
+
/// which may cause OOM issues by loading the whole stripe into memory.
|
156 |
+
///
|
157 |
+
/// Note this will only read rows for the current stripe, not the entire
|
158 |
+
/// file.
|
159 |
+
///
|
160 |
+
/// \param[in] batch_size the maximum number of rows in each record batch
|
161 |
+
/// \param[in] include_indices the selected field indices to read
|
162 |
+
/// \return the stripe reader
|
163 |
+
Result<std::shared_ptr<RecordBatchReader>> NextStripeReader(
|
164 |
+
int64_t batch_size, const std::vector<int>& include_indices);
|
165 |
+
|
166 |
+
/// \brief Get a record batch iterator for the entire file.
|
167 |
+
///
|
168 |
+
/// Each record batch will have up to `batch_size` rows.
|
169 |
+
///
|
170 |
+
/// \param[in] batch_size the maximum number of rows in each record batch
|
171 |
+
/// \param[in] include_names the selected field names to read, if not empty
|
172 |
+
/// (otherwise all fields are read)
|
173 |
+
/// \return the record batch iterator
|
174 |
+
Result<std::shared_ptr<RecordBatchReader>> GetRecordBatchReader(
|
175 |
+
int64_t batch_size, const std::vector<std::string>& include_names);
|
176 |
+
|
177 |
+
/// \brief The number of stripes in the file
|
178 |
+
int64_t NumberOfStripes();
|
179 |
+
|
180 |
+
/// \brief The number of rows in the file
|
181 |
+
int64_t NumberOfRows();
|
182 |
+
|
183 |
+
/// \brief StripeInformation for each stripe.
|
184 |
+
StripeInformation GetStripeInformation(int64_t stripe);
|
185 |
+
|
186 |
+
/// \brief Get the format version of the file.
|
187 |
+
/// Currently known values are 0.11 and 0.12.
|
188 |
+
///
|
189 |
+
/// \return The FileVersion of the ORC file.
|
190 |
+
FileVersion GetFileVersion();
|
191 |
+
|
192 |
+
/// \brief Get the software instance and version that wrote this file.
|
193 |
+
///
|
194 |
+
/// \return a user-facing string that specifies the software version
|
195 |
+
std::string GetSoftwareVersion();
|
196 |
+
|
197 |
+
/// \brief Get the compression kind of the file.
|
198 |
+
///
|
199 |
+
/// \return The kind of compression in the ORC file.
|
200 |
+
Result<Compression::type> GetCompression();
|
201 |
+
|
202 |
+
/// \brief Get the buffer size for the compression.
|
203 |
+
///
|
204 |
+
/// \return Number of bytes to buffer for the compression codec.
|
205 |
+
int64_t GetCompressionSize();
|
206 |
+
|
207 |
+
/// \brief Get the number of rows per an entry in the row index.
|
208 |
+
/// \return the number of rows per an entry in the row index or 0 if there
|
209 |
+
/// is no row index.
|
210 |
+
int64_t GetRowIndexStride();
|
211 |
+
|
212 |
+
/// \brief Get ID of writer that generated the file.
|
213 |
+
///
|
214 |
+
/// \return UNKNOWN_WRITER if the writer ID is undefined
|
215 |
+
WriterId GetWriterId();
|
216 |
+
|
217 |
+
/// \brief Get the writer id value when getWriterId() returns an unknown writer.
|
218 |
+
///
|
219 |
+
/// \return the integer value of the writer ID.
|
220 |
+
int32_t GetWriterIdValue();
|
221 |
+
|
222 |
+
/// \brief Get the version of the writer.
|
223 |
+
///
|
224 |
+
/// \return the version of the writer.
|
225 |
+
|
226 |
+
WriterVersion GetWriterVersion();
|
227 |
+
|
228 |
+
/// \brief Get the number of stripe statistics in the file.
|
229 |
+
///
|
230 |
+
/// \return the number of stripe statistics
|
231 |
+
int64_t GetNumberOfStripeStatistics();
|
232 |
+
|
233 |
+
/// \brief Get the length of the data stripes in the file.
|
234 |
+
///
|
235 |
+
/// \return return the number of bytes in stripes
|
236 |
+
int64_t GetContentLength();
|
237 |
+
|
238 |
+
/// \brief Get the length of the file stripe statistics.
|
239 |
+
///
|
240 |
+
/// \return the number of compressed bytes in the file stripe statistics
|
241 |
+
int64_t GetStripeStatisticsLength();
|
242 |
+
|
243 |
+
/// \brief Get the length of the file footer.
|
244 |
+
///
|
245 |
+
/// \return the number of compressed bytes in the file footer
|
246 |
+
int64_t GetFileFooterLength();
|
247 |
+
|
248 |
+
/// \brief Get the length of the file postscript.
|
249 |
+
///
|
250 |
+
/// \return the number of bytes in the file postscript
|
251 |
+
int64_t GetFilePostscriptLength();
|
252 |
+
|
253 |
+
/// \brief Get the total length of the file.
|
254 |
+
///
|
255 |
+
/// \return the number of bytes in the file
|
256 |
+
int64_t GetFileLength();
|
257 |
+
|
258 |
+
/// \brief Get the serialized file tail.
|
259 |
+
/// Useful if another reader of the same file wants to avoid re-reading
|
260 |
+
/// the file tail. See ReadOptions.SetSerializedFileTail().
|
261 |
+
///
|
262 |
+
/// \return a string of bytes with the file tail
|
263 |
+
std::string GetSerializedFileTail();
|
264 |
+
|
265 |
+
/// \brief Return the metadata read from the ORC file
|
266 |
+
///
|
267 |
+
/// \return A KeyValueMetadata object containing the ORC metadata
|
268 |
+
Result<std::shared_ptr<const KeyValueMetadata>> ReadMetadata();
|
269 |
+
|
270 |
+
private:
|
271 |
+
class Impl;
|
272 |
+
std::unique_ptr<Impl> impl_;
|
273 |
+
ORCFileReader();
|
274 |
+
};
|
275 |
+
|
276 |
+
/// \class ORCFileWriter
|
277 |
+
/// \brief Write an Arrow Table or RecordBatch to an ORC file.
|
278 |
+
class ARROW_EXPORT ORCFileWriter {
|
279 |
+
public:
|
280 |
+
~ORCFileWriter();
|
281 |
+
/// \brief Creates a new ORC writer.
|
282 |
+
///
|
283 |
+
/// \param[in] output_stream a pointer to the io::OutputStream to write into
|
284 |
+
/// \param[in] write_options the ORC writer options for Arrow
|
285 |
+
/// \return the returned writer object
|
286 |
+
static Result<std::unique_ptr<ORCFileWriter>> Open(
|
287 |
+
io::OutputStream* output_stream,
|
288 |
+
const WriteOptions& write_options = WriteOptions());
|
289 |
+
|
290 |
+
/// \brief Write a table. This can be called multiple times.
|
291 |
+
///
|
292 |
+
/// Tables passed in subsequent calls must match the schema of the table that was
|
293 |
+
/// written first.
|
294 |
+
///
|
295 |
+
/// \param[in] table the Arrow table from which data is extracted.
|
296 |
+
/// \return Status
|
297 |
+
Status Write(const Table& table);
|
298 |
+
|
299 |
+
/// \brief Write a RecordBatch. This can be called multiple times.
|
300 |
+
///
|
301 |
+
/// RecordBatches passed in subsequent calls must match the schema of the
|
302 |
+
/// RecordBatch that was written first.
|
303 |
+
///
|
304 |
+
/// \param[in] record_batch the Arrow RecordBatch from which data is extracted.
|
305 |
+
/// \return Status
|
306 |
+
Status Write(const RecordBatch& record_batch);
|
307 |
+
|
308 |
+
/// \brief Close an ORC writer (orc::Writer)
|
309 |
+
///
|
310 |
+
/// \return Status
|
311 |
+
Status Close();
|
312 |
+
|
313 |
+
private:
|
314 |
+
class Impl;
|
315 |
+
std::unique_ptr<Impl> impl_;
|
316 |
+
|
317 |
+
private:
|
318 |
+
ORCFileWriter();
|
319 |
+
};
|
320 |
+
|
321 |
+
} // namespace orc
|
322 |
+
} // namespace adapters
|
323 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/orc/options.h
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <vector>
|
21 |
+
|
22 |
+
#include "arrow/io/interfaces.h"
|
23 |
+
#include "arrow/status.h"
|
24 |
+
#include "arrow/util/type_fwd.h"
|
25 |
+
#include "arrow/util/visibility.h"
|
26 |
+
|
27 |
+
namespace arrow {
|
28 |
+
|
29 |
+
namespace adapters {
|
30 |
+
|
31 |
+
namespace orc {
|
32 |
+
|
33 |
+
enum class WriterId : int32_t {
|
34 |
+
kOrcJava = 0,
|
35 |
+
kOrcCpp = 1,
|
36 |
+
kPresto = 2,
|
37 |
+
kScritchleyGo = 3,
|
38 |
+
kTrino = 4,
|
39 |
+
kUnknown = INT32_MAX
|
40 |
+
};
|
41 |
+
|
42 |
+
enum class WriterVersion : int32_t {
|
43 |
+
kOriginal = 0,
|
44 |
+
kHive8732 = 1,
|
45 |
+
kHive4243 = 2,
|
46 |
+
kHive12055 = 3,
|
47 |
+
kHive13083 = 4,
|
48 |
+
kOrc101 = 5,
|
49 |
+
kOrc135 = 6,
|
50 |
+
kOrc517 = 7,
|
51 |
+
kOrc203 = 8,
|
52 |
+
kOrc14 = 9,
|
53 |
+
kMax = INT32_MAX
|
54 |
+
};
|
55 |
+
|
56 |
+
enum class CompressionStrategy : int32_t { kSpeed = 0, kCompression };
|
57 |
+
|
58 |
+
class ARROW_EXPORT FileVersion {
|
59 |
+
private:
|
60 |
+
int32_t major_version_;
|
61 |
+
int32_t minor_version_;
|
62 |
+
|
63 |
+
public:
|
64 |
+
static const FileVersion& v_0_11();
|
65 |
+
static const FileVersion& v_0_12();
|
66 |
+
|
67 |
+
FileVersion(int32_t major, int32_t minor)
|
68 |
+
: major_version_(major), minor_version_(minor) {}
|
69 |
+
|
70 |
+
/**
|
71 |
+
* Get major version
|
72 |
+
*/
|
73 |
+
int32_t major_version() const { return this->major_version_; }
|
74 |
+
|
75 |
+
/**
|
76 |
+
* Get minor version
|
77 |
+
*/
|
78 |
+
int32_t minor_version() const { return this->minor_version_; }
|
79 |
+
|
80 |
+
bool operator==(const FileVersion& right) const {
|
81 |
+
return this->major_version() == right.major_version() &&
|
82 |
+
this->minor_version() == right.minor_version();
|
83 |
+
}
|
84 |
+
|
85 |
+
bool operator!=(const FileVersion& right) const { return !(*this == right); }
|
86 |
+
|
87 |
+
std::string ToString() const;
|
88 |
+
};
|
89 |
+
|
90 |
+
/// Options for the ORC Writer
|
91 |
+
struct ARROW_EXPORT WriteOptions {
|
92 |
+
/// Number of rows the ORC writer writes at a time, default 1024
|
93 |
+
int64_t batch_size = 1024;
|
94 |
+
/// Which ORC file version to use, default FileVersion(0, 12)
|
95 |
+
FileVersion file_version = FileVersion(0, 12);
|
96 |
+
/// Size of each ORC stripe in bytes, default 64 MiB
|
97 |
+
int64_t stripe_size = 64 * 1024 * 1024;
|
98 |
+
/// The compression codec of the ORC file, there is no compression by default
|
99 |
+
Compression::type compression = Compression::UNCOMPRESSED;
|
100 |
+
/// The size of each compression block in bytes, default 64 KiB
|
101 |
+
int64_t compression_block_size = 64 * 1024;
|
102 |
+
/// The compression strategy i.e. speed vs size reduction, default
|
103 |
+
/// CompressionStrategy::kSpeed
|
104 |
+
CompressionStrategy compression_strategy = CompressionStrategy::kSpeed;
|
105 |
+
/// The number of rows per an entry in the row index, default 10000
|
106 |
+
int64_t row_index_stride = 10000;
|
107 |
+
/// The padding tolerance, default 0.0
|
108 |
+
double padding_tolerance = 0.0;
|
109 |
+
/// The dictionary key size threshold. 0 to disable dictionary encoding.
|
110 |
+
/// 1 to always enable dictionary encoding, default 0.0
|
111 |
+
double dictionary_key_size_threshold = 0.0;
|
112 |
+
/// The array of columns that use the bloom filter, default empty
|
113 |
+
std::vector<int64_t> bloom_filter_columns;
|
114 |
+
/// The upper limit of the false-positive rate of the bloom filter, default 0.05
|
115 |
+
double bloom_filter_fpp = 0.05;
|
116 |
+
};
|
117 |
+
|
118 |
+
} // namespace orc
|
119 |
+
} // namespace adapters
|
120 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/adapters/tensorflow/convert.h
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
|
22 |
+
#include "tensorflow/core/framework/op.h"
|
23 |
+
|
24 |
+
#include "arrow/type.h"
|
25 |
+
|
26 |
+
// These utilities are supposed to be included in TensorFlow operators
|
27 |
+
// that need to be compiled separately from Arrow because of ABI issues.
|
28 |
+
// They therefore need to be header-only.
|
29 |
+
|
30 |
+
namespace arrow {
|
31 |
+
|
32 |
+
namespace adapters {
|
33 |
+
|
34 |
+
namespace tensorflow {
|
35 |
+
|
36 |
+
Status GetArrowType(::tensorflow::DataType dtype, std::shared_ptr<DataType>* out) {
|
37 |
+
switch (dtype) {
|
38 |
+
case ::tensorflow::DT_BOOL:
|
39 |
+
*out = arrow::boolean();
|
40 |
+
break;
|
41 |
+
case ::tensorflow::DT_FLOAT:
|
42 |
+
*out = arrow::float32();
|
43 |
+
break;
|
44 |
+
case ::tensorflow::DT_DOUBLE:
|
45 |
+
*out = arrow::float64();
|
46 |
+
break;
|
47 |
+
case ::tensorflow::DT_HALF:
|
48 |
+
*out = arrow::float16();
|
49 |
+
break;
|
50 |
+
case ::tensorflow::DT_INT8:
|
51 |
+
*out = arrow::int8();
|
52 |
+
break;
|
53 |
+
case ::tensorflow::DT_INT16:
|
54 |
+
*out = arrow::int16();
|
55 |
+
break;
|
56 |
+
case ::tensorflow::DT_INT32:
|
57 |
+
*out = arrow::int32();
|
58 |
+
break;
|
59 |
+
case ::tensorflow::DT_INT64:
|
60 |
+
*out = arrow::int64();
|
61 |
+
break;
|
62 |
+
case ::tensorflow::DT_UINT8:
|
63 |
+
*out = arrow::uint8();
|
64 |
+
break;
|
65 |
+
case ::tensorflow::DT_UINT16:
|
66 |
+
*out = arrow::uint16();
|
67 |
+
break;
|
68 |
+
case ::tensorflow::DT_UINT32:
|
69 |
+
*out = arrow::uint32();
|
70 |
+
break;
|
71 |
+
case ::tensorflow::DT_UINT64:
|
72 |
+
*out = arrow::uint64();
|
73 |
+
break;
|
74 |
+
default:
|
75 |
+
return Status::TypeError("TensorFlow data type is not supported");
|
76 |
+
}
|
77 |
+
return Status::OK();
|
78 |
+
}
|
79 |
+
|
80 |
+
Status GetTensorFlowType(std::shared_ptr<DataType> dtype, ::tensorflow::DataType* out) {
|
81 |
+
switch (dtype->id()) {
|
82 |
+
case Type::BOOL:
|
83 |
+
*out = ::tensorflow::DT_BOOL;
|
84 |
+
break;
|
85 |
+
case Type::UINT8:
|
86 |
+
*out = ::tensorflow::DT_UINT8;
|
87 |
+
break;
|
88 |
+
case Type::INT8:
|
89 |
+
*out = ::tensorflow::DT_INT8;
|
90 |
+
break;
|
91 |
+
case Type::UINT16:
|
92 |
+
*out = ::tensorflow::DT_UINT16;
|
93 |
+
break;
|
94 |
+
case Type::INT16:
|
95 |
+
*out = ::tensorflow::DT_INT16;
|
96 |
+
break;
|
97 |
+
case Type::UINT32:
|
98 |
+
*out = ::tensorflow::DT_UINT32;
|
99 |
+
break;
|
100 |
+
case Type::INT32:
|
101 |
+
*out = ::tensorflow::DT_INT32;
|
102 |
+
break;
|
103 |
+
case Type::UINT64:
|
104 |
+
*out = ::tensorflow::DT_UINT64;
|
105 |
+
break;
|
106 |
+
case Type::INT64:
|
107 |
+
*out = ::tensorflow::DT_INT64;
|
108 |
+
break;
|
109 |
+
case Type::HALF_FLOAT:
|
110 |
+
*out = ::tensorflow::DT_HALF;
|
111 |
+
break;
|
112 |
+
case Type::FLOAT:
|
113 |
+
*out = ::tensorflow::DT_FLOAT;
|
114 |
+
break;
|
115 |
+
case Type::DOUBLE:
|
116 |
+
*out = ::tensorflow::DT_DOUBLE;
|
117 |
+
break;
|
118 |
+
default:
|
119 |
+
return Status::TypeError("Arrow data type is not supported");
|
120 |
+
}
|
121 |
+
return arrow::Status::OK();
|
122 |
+
}
|
123 |
+
|
124 |
+
} // namespace tensorflow
|
125 |
+
|
126 |
+
} // namespace adapters
|
127 |
+
|
128 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/c/abi.h
ADDED
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
/// \file abi.h Arrow C Data Interface
|
19 |
+
///
|
20 |
+
/// The Arrow C Data interface defines a very small, stable set
|
21 |
+
/// of C definitions which can be easily copied into any project's
|
22 |
+
/// source code and vendored to be used for columnar data interchange
|
23 |
+
/// in the Arrow format. For non-C/C++ languages and runtimes,
|
24 |
+
/// it should be almost as easy to translate the C definitions into
|
25 |
+
/// the corresponding C FFI declarations.
|
26 |
+
///
|
27 |
+
/// Applications and libraries can therefore work with Arrow memory
|
28 |
+
/// without necessarily using the Arrow libraries or reinventing
|
29 |
+
/// the wheel. Developers can choose between tight integration
|
30 |
+
/// with the Arrow software project or minimal integration with
|
31 |
+
/// the Arrow format only.
|
32 |
+
|
33 |
+
#pragma once
|
34 |
+
|
35 |
+
#include <stdint.h>
|
36 |
+
|
37 |
+
// Spec and documentation: https://arrow.apache.org/docs/format/CDataInterface.html
|
38 |
+
|
39 |
+
#ifdef __cplusplus
|
40 |
+
extern "C" {
|
41 |
+
#endif
|
42 |
+
|
43 |
+
#ifndef ARROW_C_DATA_INTERFACE
|
44 |
+
#define ARROW_C_DATA_INTERFACE
|
45 |
+
|
46 |
+
#define ARROW_FLAG_DICTIONARY_ORDERED 1
|
47 |
+
#define ARROW_FLAG_NULLABLE 2
|
48 |
+
#define ARROW_FLAG_MAP_KEYS_SORTED 4
|
49 |
+
|
50 |
+
struct ArrowSchema {
|
51 |
+
// Array type description
|
52 |
+
const char* format;
|
53 |
+
const char* name;
|
54 |
+
const char* metadata;
|
55 |
+
int64_t flags;
|
56 |
+
int64_t n_children;
|
57 |
+
struct ArrowSchema** children;
|
58 |
+
struct ArrowSchema* dictionary;
|
59 |
+
|
60 |
+
// Release callback
|
61 |
+
void (*release)(struct ArrowSchema*);
|
62 |
+
// Opaque producer-specific data
|
63 |
+
void* private_data;
|
64 |
+
};
|
65 |
+
|
66 |
+
struct ArrowArray {
|
67 |
+
// Array data description
|
68 |
+
int64_t length;
|
69 |
+
int64_t null_count;
|
70 |
+
int64_t offset;
|
71 |
+
int64_t n_buffers;
|
72 |
+
int64_t n_children;
|
73 |
+
const void** buffers;
|
74 |
+
struct ArrowArray** children;
|
75 |
+
struct ArrowArray* dictionary;
|
76 |
+
|
77 |
+
// Release callback
|
78 |
+
void (*release)(struct ArrowArray*);
|
79 |
+
// Opaque producer-specific data
|
80 |
+
void* private_data;
|
81 |
+
};
|
82 |
+
|
83 |
+
#endif // ARROW_C_DATA_INTERFACE
|
84 |
+
|
85 |
+
#ifndef ARROW_C_DEVICE_DATA_INTERFACE
|
86 |
+
#define ARROW_C_DEVICE_DATA_INTERFACE
|
87 |
+
|
88 |
+
// Spec and Documentation: https://arrow.apache.org/docs/format/CDeviceDataInterface.html
|
89 |
+
|
90 |
+
// DeviceType for the allocated memory
|
91 |
+
typedef int32_t ArrowDeviceType;
|
92 |
+
|
93 |
+
// CPU device, same as using ArrowArray directly
|
94 |
+
#define ARROW_DEVICE_CPU 1
|
95 |
+
// CUDA GPU Device
|
96 |
+
#define ARROW_DEVICE_CUDA 2
|
97 |
+
// Pinned CUDA CPU memory by cudaMallocHost
|
98 |
+
#define ARROW_DEVICE_CUDA_HOST 3
|
99 |
+
// OpenCL Device
|
100 |
+
#define ARROW_DEVICE_OPENCL 4
|
101 |
+
// Vulkan buffer for next-gen graphics
|
102 |
+
#define ARROW_DEVICE_VULKAN 7
|
103 |
+
// Metal for Apple GPU
|
104 |
+
#define ARROW_DEVICE_METAL 8
|
105 |
+
// Verilog simulator buffer
|
106 |
+
#define ARROW_DEVICE_VPI 9
|
107 |
+
// ROCm GPUs for AMD GPUs
|
108 |
+
#define ARROW_DEVICE_ROCM 10
|
109 |
+
// Pinned ROCm CPU memory allocated by hipMallocHost
|
110 |
+
#define ARROW_DEVICE_ROCM_HOST 11
|
111 |
+
// Reserved for extension
|
112 |
+
#define ARROW_DEVICE_EXT_DEV 12
|
113 |
+
// CUDA managed/unified memory allocated by cudaMallocManaged
|
114 |
+
#define ARROW_DEVICE_CUDA_MANAGED 13
|
115 |
+
// unified shared memory allocated on a oneAPI non-partitioned device.
|
116 |
+
#define ARROW_DEVICE_ONEAPI 14
|
117 |
+
// GPU support for next-gen WebGPU standard
|
118 |
+
#define ARROW_DEVICE_WEBGPU 15
|
119 |
+
// Qualcomm Hexagon DSP
|
120 |
+
#define ARROW_DEVICE_HEXAGON 16
|
121 |
+
|
122 |
+
struct ArrowDeviceArray {
|
123 |
+
// the Allocated Array
|
124 |
+
//
|
125 |
+
// the buffers in the array (along with the buffers of any
|
126 |
+
// children) are what is allocated on the device.
|
127 |
+
struct ArrowArray array;
|
128 |
+
// The device id to identify a specific device
|
129 |
+
int64_t device_id;
|
130 |
+
// The type of device which can access this memory.
|
131 |
+
ArrowDeviceType device_type;
|
132 |
+
// An event-like object to synchronize on if needed.
|
133 |
+
void* sync_event;
|
134 |
+
// Reserved bytes for future expansion.
|
135 |
+
int64_t reserved[3];
|
136 |
+
};
|
137 |
+
|
138 |
+
#endif // ARROW_C_DEVICE_DATA_INTERFACE
|
139 |
+
|
140 |
+
#ifndef ARROW_C_STREAM_INTERFACE
|
141 |
+
#define ARROW_C_STREAM_INTERFACE
|
142 |
+
|
143 |
+
struct ArrowArrayStream {
|
144 |
+
// Callback to get the stream type
|
145 |
+
// (will be the same for all arrays in the stream).
|
146 |
+
//
|
147 |
+
// Return value: 0 if successful, an `errno`-compatible error code otherwise.
|
148 |
+
//
|
149 |
+
// If successful, the ArrowSchema must be released independently from the stream.
|
150 |
+
int (*get_schema)(struct ArrowArrayStream*, struct ArrowSchema* out);
|
151 |
+
|
152 |
+
// Callback to get the next array
|
153 |
+
// (if no error and the array is released, the stream has ended)
|
154 |
+
//
|
155 |
+
// Return value: 0 if successful, an `errno`-compatible error code otherwise.
|
156 |
+
//
|
157 |
+
// If successful, the ArrowArray must be released independently from the stream.
|
158 |
+
int (*get_next)(struct ArrowArrayStream*, struct ArrowArray* out);
|
159 |
+
|
160 |
+
// Callback to get optional detailed error information.
|
161 |
+
// This must only be called if the last stream operation failed
|
162 |
+
// with a non-0 return code.
|
163 |
+
//
|
164 |
+
// Return value: pointer to a null-terminated character array describing
|
165 |
+
// the last error, or NULL if no description is available.
|
166 |
+
//
|
167 |
+
// The returned pointer is only valid until the next operation on this stream
|
168 |
+
// (including release).
|
169 |
+
const char* (*get_last_error)(struct ArrowArrayStream*);
|
170 |
+
|
171 |
+
// Release callback: release the stream's own resources.
|
172 |
+
// Note that arrays returned by `get_next` must be individually released.
|
173 |
+
void (*release)(struct ArrowArrayStream*);
|
174 |
+
|
175 |
+
// Opaque producer-specific data
|
176 |
+
void* private_data;
|
177 |
+
};
|
178 |
+
|
179 |
+
#endif // ARROW_C_STREAM_INTERFACE
|
180 |
+
|
181 |
+
#ifndef ARROW_C_DEVICE_STREAM_INTERFACE
|
182 |
+
#define ARROW_C_DEVICE_STREAM_INTERFACE
|
183 |
+
|
184 |
+
// Equivalent to ArrowArrayStream, but for ArrowDeviceArrays.
|
185 |
+
//
|
186 |
+
// This stream is intended to provide a stream of data on a single
|
187 |
+
// device, if a producer wants data to be produced on multiple devices
|
188 |
+
// then multiple streams should be provided. One per device.
|
189 |
+
struct ArrowDeviceArrayStream {
|
190 |
+
// The device that this stream produces data on.
|
191 |
+
ArrowDeviceType device_type;
|
192 |
+
|
193 |
+
// Callback to get the stream schema
|
194 |
+
// (will be the same for all arrays in the stream).
|
195 |
+
//
|
196 |
+
// Return value 0 if successful, an `errno`-compatible error code otherwise.
|
197 |
+
//
|
198 |
+
// If successful, the ArrowSchema must be released independently from the stream.
|
199 |
+
// The schema should be accessible via CPU memory.
|
200 |
+
int (*get_schema)(struct ArrowDeviceArrayStream* self, struct ArrowSchema* out);
|
201 |
+
|
202 |
+
// Callback to get the next array
|
203 |
+
// (if no error and the array is released, the stream has ended)
|
204 |
+
//
|
205 |
+
// Return value: 0 if successful, an `errno`-compatible error code otherwise.
|
206 |
+
//
|
207 |
+
// If successful, the ArrowDeviceArray must be released independently from the stream.
|
208 |
+
int (*get_next)(struct ArrowDeviceArrayStream* self, struct ArrowDeviceArray* out);
|
209 |
+
|
210 |
+
// Callback to get optional detailed error information.
|
211 |
+
// This must only be called if the last stream operation failed
|
212 |
+
// with a non-0 return code.
|
213 |
+
//
|
214 |
+
// Return value: pointer to a null-terminated character array describing
|
215 |
+
// the last error, or NULL if no description is available.
|
216 |
+
//
|
217 |
+
// The returned pointer is only valid until the next operation on this stream
|
218 |
+
// (including release).
|
219 |
+
const char* (*get_last_error)(struct ArrowDeviceArrayStream* self);
|
220 |
+
|
221 |
+
// Release callback: release the stream's own resources.
|
222 |
+
// Note that arrays returned by `get_next` must be individually released.
|
223 |
+
void (*release)(struct ArrowDeviceArrayStream* self);
|
224 |
+
|
225 |
+
// Opaque producer-specific data
|
226 |
+
void* private_data;
|
227 |
+
};
|
228 |
+
|
229 |
+
#endif // ARROW_C_DEVICE_STREAM_INTERFACE
|
230 |
+
|
231 |
+
#ifdef __cplusplus
|
232 |
+
}
|
233 |
+
#endif
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack.h
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include "arrow/array/array_base.h"
|
21 |
+
#include "arrow/c/dlpack_abi.h"
|
22 |
+
|
23 |
+
namespace arrow::dlpack {
|
24 |
+
|
25 |
+
/// \brief Export Arrow array as DLPack tensor.
|
26 |
+
///
|
27 |
+
/// DLMangedTensor is produced as defined by the DLPack protocol,
|
28 |
+
/// see https://dmlc.github.io/dlpack/latest/.
|
29 |
+
///
|
30 |
+
/// Data types for which the protocol is supported are
|
31 |
+
/// integer and floating-point data types.
|
32 |
+
///
|
33 |
+
/// DLPack protocol only supports arrays with one contiguous
|
34 |
+
/// memory region which means Arrow Arrays with validity buffers
|
35 |
+
/// are not supported.
|
36 |
+
///
|
37 |
+
/// \param[in] arr Arrow array
|
38 |
+
/// \return DLManagedTensor struct
|
39 |
+
ARROW_EXPORT
|
40 |
+
Result<DLManagedTensor*> ExportArray(const std::shared_ptr<Array>& arr);
|
41 |
+
|
42 |
+
/// \brief Get DLDevice with enumerator specifying the
|
43 |
+
/// type of the device data is stored on and index of the
|
44 |
+
/// device which is 0 by default for CPU.
|
45 |
+
///
|
46 |
+
/// \param[in] arr Arrow array
|
47 |
+
/// \return DLDevice struct
|
48 |
+
ARROW_EXPORT
|
49 |
+
Result<DLDevice> ExportDevice(const std::shared_ptr<Array>& arr);
|
50 |
+
|
51 |
+
} // namespace arrow::dlpack
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack_abi.h
ADDED
@@ -0,0 +1,321 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Taken from:
|
2 |
+
// https://github.com/dmlc/dlpack/blob/ca4d00ad3e2e0f410eeab3264d21b8a39397f362/include/dlpack/dlpack.h
|
3 |
+
/*!
|
4 |
+
* Copyright (c) 2017 by Contributors
|
5 |
+
* \file dlpack.h
|
6 |
+
* \brief The common header of DLPack.
|
7 |
+
*/
|
8 |
+
#ifndef DLPACK_DLPACK_H_
|
9 |
+
#define DLPACK_DLPACK_H_
|
10 |
+
|
11 |
+
/**
|
12 |
+
* \brief Compatibility with C++
|
13 |
+
*/
|
14 |
+
#ifdef __cplusplus
|
15 |
+
#define DLPACK_EXTERN_C extern "C"
|
16 |
+
#else
|
17 |
+
#define DLPACK_EXTERN_C
|
18 |
+
#endif
|
19 |
+
|
20 |
+
/*! \brief The current major version of dlpack */
|
21 |
+
#define DLPACK_MAJOR_VERSION 1
|
22 |
+
|
23 |
+
/*! \brief The current minor version of dlpack */
|
24 |
+
#define DLPACK_MINOR_VERSION 0
|
25 |
+
|
26 |
+
/*! \brief DLPACK_DLL prefix for windows */
|
27 |
+
#ifdef _WIN32
|
28 |
+
#ifdef DLPACK_EXPORTS
|
29 |
+
#define DLPACK_DLL __declspec(dllexport)
|
30 |
+
#else
|
31 |
+
#define DLPACK_DLL __declspec(dllimport)
|
32 |
+
#endif
|
33 |
+
#else
|
34 |
+
#define DLPACK_DLL
|
35 |
+
#endif
|
36 |
+
|
37 |
+
#include <stddef.h>
|
38 |
+
#include <stdint.h>
|
39 |
+
|
40 |
+
#ifdef __cplusplus
|
41 |
+
extern "C" {
|
42 |
+
#endif
|
43 |
+
|
44 |
+
/*!
|
45 |
+
* \brief The DLPack version.
|
46 |
+
*
|
47 |
+
* A change in major version indicates that we have changed the
|
48 |
+
* data layout of the ABI - DLManagedTensorVersioned.
|
49 |
+
*
|
50 |
+
* A change in minor version indicates that we have added new
|
51 |
+
* code, such as a new device type, but the ABI is kept the same.
|
52 |
+
*
|
53 |
+
* If an obtained DLPack tensor has a major version that disagrees
|
54 |
+
* with the version number specified in this header file
|
55 |
+
* (i.e. major != DLPACK_MAJOR_VERSION), the consumer must call the deleter
|
56 |
+
* (and it is safe to do so). It is not safe to access any other fields
|
57 |
+
* as the memory layout will have changed.
|
58 |
+
*
|
59 |
+
* In the case of a minor version mismatch, the tensor can be safely used as
|
60 |
+
* long as the consumer knows how to interpret all fields. Minor version
|
61 |
+
* updates indicate the addition of enumeration values.
|
62 |
+
*/
|
63 |
+
typedef struct {
|
64 |
+
/*! \brief DLPack major version. */
|
65 |
+
uint32_t major;
|
66 |
+
/*! \brief DLPack minor version. */
|
67 |
+
uint32_t minor;
|
68 |
+
} DLPackVersion;
|
69 |
+
|
70 |
+
/*!
|
71 |
+
* \brief The device type in DLDevice.
|
72 |
+
*/
|
73 |
+
#ifdef __cplusplus
|
74 |
+
typedef enum : int32_t {
|
75 |
+
#else
|
76 |
+
typedef enum {
|
77 |
+
#endif
|
78 |
+
/*! \brief CPU device */
|
79 |
+
kDLCPU = 1,
|
80 |
+
/*! \brief CUDA GPU device */
|
81 |
+
kDLCUDA = 2,
|
82 |
+
/*!
|
83 |
+
* \brief Pinned CUDA CPU memory by cudaMallocHost
|
84 |
+
*/
|
85 |
+
kDLCUDAHost = 3,
|
86 |
+
/*! \brief OpenCL devices. */
|
87 |
+
kDLOpenCL = 4,
|
88 |
+
/*! \brief Vulkan buffer for next generation graphics. */
|
89 |
+
kDLVulkan = 7,
|
90 |
+
/*! \brief Metal for Apple GPU. */
|
91 |
+
kDLMetal = 8,
|
92 |
+
/*! \brief Verilog simulator buffer */
|
93 |
+
kDLVPI = 9,
|
94 |
+
/*! \brief ROCm GPUs for AMD GPUs */
|
95 |
+
kDLROCM = 10,
|
96 |
+
/*!
|
97 |
+
* \brief Pinned ROCm CPU memory allocated by hipMallocHost
|
98 |
+
*/
|
99 |
+
kDLROCMHost = 11,
|
100 |
+
/*!
|
101 |
+
* \brief Reserved extension device type,
|
102 |
+
* used for quickly test extension device
|
103 |
+
* The semantics can differ depending on the implementation.
|
104 |
+
*/
|
105 |
+
kDLExtDev = 12,
|
106 |
+
/*!
|
107 |
+
* \brief CUDA managed/unified memory allocated by cudaMallocManaged
|
108 |
+
*/
|
109 |
+
kDLCUDAManaged = 13,
|
110 |
+
/*!
|
111 |
+
* \brief Unified shared memory allocated on a oneAPI non-partititioned
|
112 |
+
* device. Call to oneAPI runtime is required to determine the device
|
113 |
+
* type, the USM allocation type and the sycl context it is bound to.
|
114 |
+
*
|
115 |
+
*/
|
116 |
+
kDLOneAPI = 14,
|
117 |
+
/*! \brief GPU support for next generation WebGPU standard. */
|
118 |
+
kDLWebGPU = 15,
|
119 |
+
/*! \brief Qualcomm Hexagon DSP */
|
120 |
+
kDLHexagon = 16,
|
121 |
+
} DLDeviceType;
|
122 |
+
|
123 |
+
/*!
|
124 |
+
* \brief A Device for Tensor and operator.
|
125 |
+
*/
|
126 |
+
typedef struct {
|
127 |
+
/*! \brief The device type used in the device. */
|
128 |
+
DLDeviceType device_type;
|
129 |
+
/*!
|
130 |
+
* \brief The device index.
|
131 |
+
* For vanilla CPU memory, pinned memory, or managed memory, this is set to 0.
|
132 |
+
*/
|
133 |
+
int32_t device_id;
|
134 |
+
} DLDevice;
|
135 |
+
|
136 |
+
/*!
|
137 |
+
* \brief The type code options DLDataType.
|
138 |
+
*/
|
139 |
+
typedef enum {
|
140 |
+
/*! \brief signed integer */
|
141 |
+
kDLInt = 0U,
|
142 |
+
/*! \brief unsigned integer */
|
143 |
+
kDLUInt = 1U,
|
144 |
+
/*! \brief IEEE floating point */
|
145 |
+
kDLFloat = 2U,
|
146 |
+
/*!
|
147 |
+
* \brief Opaque handle type, reserved for testing purposes.
|
148 |
+
* Frameworks need to agree on the handle data type for the exchange to be well-defined.
|
149 |
+
*/
|
150 |
+
kDLOpaqueHandle = 3U,
|
151 |
+
/*! \brief bfloat16 */
|
152 |
+
kDLBfloat = 4U,
|
153 |
+
/*!
|
154 |
+
* \brief complex number
|
155 |
+
* (C/C++/Python layout: compact struct per complex number)
|
156 |
+
*/
|
157 |
+
kDLComplex = 5U,
|
158 |
+
/*! \brief boolean */
|
159 |
+
kDLBool = 6U,
|
160 |
+
} DLDataTypeCode;
|
161 |
+
|
162 |
+
/*!
|
163 |
+
* \brief The data type the tensor can hold. The data type is assumed to follow the
|
164 |
+
* native endian-ness. An explicit error message should be raised when attempting to
|
165 |
+
* export an array with non-native endianness
|
166 |
+
*
|
167 |
+
* Examples
|
168 |
+
* - float: type_code = 2, bits = 32, lanes = 1
|
169 |
+
* - float4(vectorized 4 float): type_code = 2, bits = 32, lanes = 4
|
170 |
+
* - int8: type_code = 0, bits = 8, lanes = 1
|
171 |
+
* - std::complex<float>: type_code = 5, bits = 64, lanes = 1
|
172 |
+
* - bool: type_code = 6, bits = 8, lanes = 1 (as per common array library convention,
|
173 |
+
* the underlying storage size of bool is 8 bits)
|
174 |
+
*/
|
175 |
+
typedef struct {
|
176 |
+
/*!
|
177 |
+
* \brief Type code of base types.
|
178 |
+
* We keep it uint8_t instead of DLDataTypeCode for minimal memory
|
179 |
+
* footprint, but the value should be one of DLDataTypeCode enum values.
|
180 |
+
* */
|
181 |
+
uint8_t code;
|
182 |
+
/*!
|
183 |
+
* \brief Number of bits, common choices are 8, 16, 32.
|
184 |
+
*/
|
185 |
+
uint8_t bits;
|
186 |
+
/*! \brief Number of lanes in the type, used for vector types. */
|
187 |
+
uint16_t lanes;
|
188 |
+
} DLDataType;
|
189 |
+
|
190 |
+
/*!
|
191 |
+
* \brief Plain C Tensor object, does not manage memory.
|
192 |
+
*/
|
193 |
+
typedef struct {
|
194 |
+
/*!
|
195 |
+
* \brief The data pointer points to the allocated data. This will be CUDA
|
196 |
+
* device pointer or cl_mem handle in OpenCL. It may be opaque on some device
|
197 |
+
* types. This pointer is always aligned to 256 bytes as in CUDA. The
|
198 |
+
* `byte_offset` field should be used to point to the beginning of the data.
|
199 |
+
*
|
200 |
+
* Note that as of Nov 2021, multiply libraries (CuPy, PyTorch, TensorFlow,
|
201 |
+
* TVM, perhaps others) do not adhere to this 256 byte aligment requirement
|
202 |
+
* on CPU/CUDA/ROCm, and always use `byte_offset=0`. This must be fixed
|
203 |
+
* (after which this note will be updated); at the moment it is recommended
|
204 |
+
* to not rely on the data pointer being correctly aligned.
|
205 |
+
*
|
206 |
+
* For given DLTensor, the size of memory required to store the contents of
|
207 |
+
* data is calculated as follows:
|
208 |
+
*
|
209 |
+
* \code{.c}
|
210 |
+
* static inline size_t GetDataSize(const DLTensor* t) {
|
211 |
+
* size_t size = 1;
|
212 |
+
* for (tvm_index_t i = 0; i < t->ndim; ++i) {
|
213 |
+
* size *= t->shape[i];
|
214 |
+
* }
|
215 |
+
* size *= (t->dtype.bits * t->dtype.lanes + 7) / 8;
|
216 |
+
* return size;
|
217 |
+
* }
|
218 |
+
* \endcode
|
219 |
+
*/
|
220 |
+
void* data;
|
221 |
+
/*! \brief The device of the tensor */
|
222 |
+
DLDevice device;
|
223 |
+
/*! \brief Number of dimensions */
|
224 |
+
int32_t ndim;
|
225 |
+
/*! \brief The data type of the pointer*/
|
226 |
+
DLDataType dtype;
|
227 |
+
/*! \brief The shape of the tensor */
|
228 |
+
int64_t* shape;
|
229 |
+
/*!
|
230 |
+
* \brief strides of the tensor (in number of elements, not bytes)
|
231 |
+
* can be NULL, indicating tensor is compact and row-majored.
|
232 |
+
*/
|
233 |
+
int64_t* strides;
|
234 |
+
/*! \brief The offset in bytes to the beginning pointer to data */
|
235 |
+
uint64_t byte_offset;
|
236 |
+
} DLTensor;
|
237 |
+
|
238 |
+
/*!
|
239 |
+
* \brief C Tensor object, manage memory of DLTensor. This data structure is
|
240 |
+
* intended to facilitate the borrowing of DLTensor by another framework. It is
|
241 |
+
* not meant to transfer the tensor. When the borrowing framework doesn't need
|
242 |
+
* the tensor, it should call the deleter to notify the host that the resource
|
243 |
+
* is no longer needed.
|
244 |
+
*
|
245 |
+
* \note This data structure is used as Legacy DLManagedTensor
|
246 |
+
* in DLPack exchange and is deprecated after DLPack v0.8
|
247 |
+
* Use DLManagedTensorVersioned instead.
|
248 |
+
* This data structure may get renamed or deleted in future versions.
|
249 |
+
*
|
250 |
+
* \sa DLManagedTensorVersioned
|
251 |
+
*/
|
252 |
+
typedef struct DLManagedTensor {
|
253 |
+
/*! \brief DLTensor which is being memory managed */
|
254 |
+
DLTensor dl_tensor;
|
255 |
+
/*! \brief the context of the original host framework of DLManagedTensor in
|
256 |
+
* which DLManagedTensor is used in the framework. It can also be NULL.
|
257 |
+
*/
|
258 |
+
void* manager_ctx;
|
259 |
+
/*!
|
260 |
+
* \brief Destructor - this should be called
|
261 |
+
* to destruct the manager_ctx which backs the DLManagedTensor. It can be
|
262 |
+
* NULL if there is no way for the caller to provide a reasonable destructor.
|
263 |
+
* The destructors deletes the argument self as well.
|
264 |
+
*/
|
265 |
+
void (*deleter)(struct DLManagedTensor* self);
|
266 |
+
} DLManagedTensor;
|
267 |
+
|
268 |
+
// bit masks used in in the DLManagedTensorVersioned
|
269 |
+
|
270 |
+
/*! \brief bit mask to indicate that the tensor is read only. */
|
271 |
+
#define DLPACK_FLAG_BITMASK_READ_ONLY (1UL << 0UL)
|
272 |
+
|
273 |
+
/*!
|
274 |
+
* \brief A versioned and managed C Tensor object, manage memory of DLTensor.
|
275 |
+
*
|
276 |
+
* This data structure is intended to facilitate the borrowing of DLTensor by
|
277 |
+
* another framework. It is not meant to transfer the tensor. When the borrowing
|
278 |
+
* framework doesn't need the tensor, it should call the deleter to notify the
|
279 |
+
* host that the resource is no longer needed.
|
280 |
+
*
|
281 |
+
* \note This is the current standard DLPack exchange data structure.
|
282 |
+
*/
|
283 |
+
struct DLManagedTensorVersioned {
|
284 |
+
/*!
|
285 |
+
* \brief The API and ABI version of the current managed Tensor
|
286 |
+
*/
|
287 |
+
DLPackVersion version;
|
288 |
+
/*!
|
289 |
+
* \brief the context of the original host framework.
|
290 |
+
*
|
291 |
+
* Stores DLManagedTensorVersioned is used in the
|
292 |
+
* framework. It can also be NULL.
|
293 |
+
*/
|
294 |
+
void* manager_ctx;
|
295 |
+
/*!
|
296 |
+
* \brief Destructor.
|
297 |
+
*
|
298 |
+
* This should be called to destruct manager_ctx which holds the
|
299 |
+
* DLManagedTensorVersioned. It can be NULL if there is no way for the caller to provide
|
300 |
+
* a reasonable destructor. The destructors deletes the argument self as well.
|
301 |
+
*/
|
302 |
+
void (*deleter)(struct DLManagedTensorVersioned* self);
|
303 |
+
/*!
|
304 |
+
* \brief Additional bitmask flags information about the tensor.
|
305 |
+
*
|
306 |
+
* By default the flags should be set to 0.
|
307 |
+
*
|
308 |
+
* \note Future ABI changes should keep everything until this field
|
309 |
+
* stable, to ensure that deleter can be correctly called.
|
310 |
+
*
|
311 |
+
* \sa DLPACK_FLAG_BITMASK_READ_ONLY
|
312 |
+
*/
|
313 |
+
uint64_t flags;
|
314 |
+
/*! \brief DLTensor which is being memory managed */
|
315 |
+
DLTensor dl_tensor;
|
316 |
+
};
|
317 |
+
|
318 |
+
#ifdef __cplusplus
|
319 |
+
} // DLPACK_EXTERN_C
|
320 |
+
#endif
|
321 |
+
#endif // DLPACK_DLPACK_H_
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api.h
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// NOTE: API is EXPERIMENTAL and will change without going through a
|
19 |
+
// deprecation cycle
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
/// \defgroup compute-functions Abstract compute function API
|
24 |
+
/// @{
|
25 |
+
/// @}
|
26 |
+
|
27 |
+
/// \defgroup compute-concrete-options Concrete option classes for compute functions
|
28 |
+
/// @{
|
29 |
+
/// @}
|
30 |
+
|
31 |
+
#include "arrow/compute/api_aggregate.h" // IWYU pragma: export
|
32 |
+
#include "arrow/compute/api_scalar.h" // IWYU pragma: export
|
33 |
+
#include "arrow/compute/api_vector.h" // IWYU pragma: export
|
34 |
+
#include "arrow/compute/cast.h" // IWYU pragma: export
|
35 |
+
#include "arrow/compute/function.h" // IWYU pragma: export
|
36 |
+
#include "arrow/compute/function_options.h" // IWYU pragma: export
|
37 |
+
#include "arrow/compute/kernel.h" // IWYU pragma: export
|
38 |
+
#include "arrow/compute/registry.h" // IWYU pragma: export
|
39 |
+
#include "arrow/datum.h" // IWYU pragma: export
|
40 |
+
|
41 |
+
#include "arrow/compute/expression.h" // IWYU pragma: export
|
42 |
+
|
43 |
+
/// \defgroup execnode-row Utilities for working with data in a row-major format
|
44 |
+
/// @{
|
45 |
+
/// @}
|
46 |
+
|
47 |
+
#include "arrow/compute/row/grouper.h" // IWYU pragma: export
|
48 |
+
|
49 |
+
/// \defgroup acero-internals Acero internals, useful for those extending Acero
|
50 |
+
/// @{
|
51 |
+
/// @}
|
52 |
+
|
53 |
+
#include "arrow/compute/exec.h" // IWYU pragma: export
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_aggregate.h
ADDED
@@ -0,0 +1,466 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// Eager evaluation convenience APIs for invoking common functions, including
|
19 |
+
// necessary memory allocations
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
#include <vector>
|
24 |
+
|
25 |
+
#include "arrow/compute/function_options.h"
|
26 |
+
#include "arrow/datum.h"
|
27 |
+
#include "arrow/result.h"
|
28 |
+
#include "arrow/util/macros.h"
|
29 |
+
#include "arrow/util/visibility.h"
|
30 |
+
|
31 |
+
namespace arrow {
|
32 |
+
|
33 |
+
class Array;
|
34 |
+
|
35 |
+
namespace compute {
|
36 |
+
|
37 |
+
class ExecContext;
|
38 |
+
|
39 |
+
// ----------------------------------------------------------------------
|
40 |
+
// Aggregate functions
|
41 |
+
|
42 |
+
/// \addtogroup compute-concrete-options
|
43 |
+
/// @{
|
44 |
+
|
45 |
+
/// \brief Control general scalar aggregate kernel behavior
|
46 |
+
///
|
47 |
+
/// By default, null values are ignored (skip_nulls = true).
|
48 |
+
class ARROW_EXPORT ScalarAggregateOptions : public FunctionOptions {
|
49 |
+
public:
|
50 |
+
explicit ScalarAggregateOptions(bool skip_nulls = true, uint32_t min_count = 1);
|
51 |
+
static constexpr char const kTypeName[] = "ScalarAggregateOptions";
|
52 |
+
static ScalarAggregateOptions Defaults() { return ScalarAggregateOptions{}; }
|
53 |
+
|
54 |
+
/// If true (the default), null values are ignored. Otherwise, if any value is null,
|
55 |
+
/// emit null.
|
56 |
+
bool skip_nulls;
|
57 |
+
/// If less than this many non-null values are observed, emit null.
|
58 |
+
uint32_t min_count;
|
59 |
+
};
|
60 |
+
|
61 |
+
/// \brief Control count aggregate kernel behavior.
|
62 |
+
///
|
63 |
+
/// By default, only non-null values are counted.
|
64 |
+
class ARROW_EXPORT CountOptions : public FunctionOptions {
|
65 |
+
public:
|
66 |
+
enum CountMode {
|
67 |
+
/// Count only non-null values.
|
68 |
+
ONLY_VALID = 0,
|
69 |
+
/// Count only null values.
|
70 |
+
ONLY_NULL,
|
71 |
+
/// Count both non-null and null values.
|
72 |
+
ALL,
|
73 |
+
};
|
74 |
+
explicit CountOptions(CountMode mode = CountMode::ONLY_VALID);
|
75 |
+
static constexpr char const kTypeName[] = "CountOptions";
|
76 |
+
static CountOptions Defaults() { return CountOptions{}; }
|
77 |
+
|
78 |
+
CountMode mode;
|
79 |
+
};
|
80 |
+
|
81 |
+
/// \brief Control Mode kernel behavior
|
82 |
+
///
|
83 |
+
/// Returns top-n common values and counts.
|
84 |
+
/// By default, returns the most common value and count.
|
85 |
+
class ARROW_EXPORT ModeOptions : public FunctionOptions {
|
86 |
+
public:
|
87 |
+
explicit ModeOptions(int64_t n = 1, bool skip_nulls = true, uint32_t min_count = 0);
|
88 |
+
static constexpr char const kTypeName[] = "ModeOptions";
|
89 |
+
static ModeOptions Defaults() { return ModeOptions{}; }
|
90 |
+
|
91 |
+
int64_t n = 1;
|
92 |
+
/// If true (the default), null values are ignored. Otherwise, if any value is null,
|
93 |
+
/// emit null.
|
94 |
+
bool skip_nulls;
|
95 |
+
/// If less than this many non-null values are observed, emit null.
|
96 |
+
uint32_t min_count;
|
97 |
+
};
|
98 |
+
|
99 |
+
/// \brief Control Delta Degrees of Freedom (ddof) of Variance and Stddev kernel
|
100 |
+
///
|
101 |
+
/// The divisor used in calculations is N - ddof, where N is the number of elements.
|
102 |
+
/// By default, ddof is zero, and population variance or stddev is returned.
|
103 |
+
class ARROW_EXPORT VarianceOptions : public FunctionOptions {
|
104 |
+
public:
|
105 |
+
explicit VarianceOptions(int ddof = 0, bool skip_nulls = true, uint32_t min_count = 0);
|
106 |
+
static constexpr char const kTypeName[] = "VarianceOptions";
|
107 |
+
static VarianceOptions Defaults() { return VarianceOptions{}; }
|
108 |
+
|
109 |
+
int ddof = 0;
|
110 |
+
/// If true (the default), null values are ignored. Otherwise, if any value is null,
|
111 |
+
/// emit null.
|
112 |
+
bool skip_nulls;
|
113 |
+
/// If less than this many non-null values are observed, emit null.
|
114 |
+
uint32_t min_count;
|
115 |
+
};
|
116 |
+
|
117 |
+
/// \brief Control Quantile kernel behavior
|
118 |
+
///
|
119 |
+
/// By default, returns the median value.
|
120 |
+
class ARROW_EXPORT QuantileOptions : public FunctionOptions {
|
121 |
+
public:
|
122 |
+
/// Interpolation method to use when quantile lies between two data points
|
123 |
+
enum Interpolation {
|
124 |
+
LINEAR = 0,
|
125 |
+
LOWER,
|
126 |
+
HIGHER,
|
127 |
+
NEAREST,
|
128 |
+
MIDPOINT,
|
129 |
+
};
|
130 |
+
|
131 |
+
explicit QuantileOptions(double q = 0.5, enum Interpolation interpolation = LINEAR,
|
132 |
+
bool skip_nulls = true, uint32_t min_count = 0);
|
133 |
+
|
134 |
+
explicit QuantileOptions(std::vector<double> q,
|
135 |
+
enum Interpolation interpolation = LINEAR,
|
136 |
+
bool skip_nulls = true, uint32_t min_count = 0);
|
137 |
+
|
138 |
+
static constexpr char const kTypeName[] = "QuantileOptions";
|
139 |
+
static QuantileOptions Defaults() { return QuantileOptions{}; }
|
140 |
+
|
141 |
+
/// probability level of quantile must be between 0 and 1 inclusive
|
142 |
+
std::vector<double> q;
|
143 |
+
enum Interpolation interpolation;
|
144 |
+
/// If true (the default), null values are ignored. Otherwise, if any value is null,
|
145 |
+
/// emit null.
|
146 |
+
bool skip_nulls;
|
147 |
+
/// If less than this many non-null values are observed, emit null.
|
148 |
+
uint32_t min_count;
|
149 |
+
};
|
150 |
+
|
151 |
+
/// \brief Control TDigest approximate quantile kernel behavior
|
152 |
+
///
|
153 |
+
/// By default, returns the median value.
|
154 |
+
class ARROW_EXPORT TDigestOptions : public FunctionOptions {
|
155 |
+
public:
|
156 |
+
explicit TDigestOptions(double q = 0.5, uint32_t delta = 100,
|
157 |
+
uint32_t buffer_size = 500, bool skip_nulls = true,
|
158 |
+
uint32_t min_count = 0);
|
159 |
+
explicit TDigestOptions(std::vector<double> q, uint32_t delta = 100,
|
160 |
+
uint32_t buffer_size = 500, bool skip_nulls = true,
|
161 |
+
uint32_t min_count = 0);
|
162 |
+
static constexpr char const kTypeName[] = "TDigestOptions";
|
163 |
+
static TDigestOptions Defaults() { return TDigestOptions{}; }
|
164 |
+
|
165 |
+
/// probability level of quantile must be between 0 and 1 inclusive
|
166 |
+
std::vector<double> q;
|
167 |
+
/// compression parameter, default 100
|
168 |
+
uint32_t delta;
|
169 |
+
/// input buffer size, default 500
|
170 |
+
uint32_t buffer_size;
|
171 |
+
/// If true (the default), null values are ignored. Otherwise, if any value is null,
|
172 |
+
/// emit null.
|
173 |
+
bool skip_nulls;
|
174 |
+
/// If less than this many non-null values are observed, emit null.
|
175 |
+
uint32_t min_count;
|
176 |
+
};
|
177 |
+
|
178 |
+
/// \brief Control Index kernel behavior
|
179 |
+
class ARROW_EXPORT IndexOptions : public FunctionOptions {
|
180 |
+
public:
|
181 |
+
explicit IndexOptions(std::shared_ptr<Scalar> value);
|
182 |
+
// Default constructor for serialization
|
183 |
+
IndexOptions();
|
184 |
+
static constexpr char const kTypeName[] = "IndexOptions";
|
185 |
+
|
186 |
+
std::shared_ptr<Scalar> value;
|
187 |
+
};
|
188 |
+
|
189 |
+
/// \brief Configure a grouped aggregation
|
190 |
+
struct ARROW_EXPORT Aggregate {
|
191 |
+
Aggregate() = default;
|
192 |
+
|
193 |
+
Aggregate(std::string function, std::shared_ptr<FunctionOptions> options,
|
194 |
+
std::vector<FieldRef> target, std::string name = "")
|
195 |
+
: function(std::move(function)),
|
196 |
+
options(std::move(options)),
|
197 |
+
target(std::move(target)),
|
198 |
+
name(std::move(name)) {}
|
199 |
+
|
200 |
+
Aggregate(std::string function, std::shared_ptr<FunctionOptions> options,
|
201 |
+
FieldRef target, std::string name = "")
|
202 |
+
: Aggregate(std::move(function), std::move(options),
|
203 |
+
std::vector<FieldRef>{std::move(target)}, std::move(name)) {}
|
204 |
+
|
205 |
+
Aggregate(std::string function, FieldRef target, std::string name)
|
206 |
+
: Aggregate(std::move(function), /*options=*/NULLPTR,
|
207 |
+
std::vector<FieldRef>{std::move(target)}, std::move(name)) {}
|
208 |
+
|
209 |
+
Aggregate(std::string function, std::string name)
|
210 |
+
: Aggregate(std::move(function), /*options=*/NULLPTR,
|
211 |
+
/*target=*/std::vector<FieldRef>{}, std::move(name)) {}
|
212 |
+
|
213 |
+
/// the name of the aggregation function
|
214 |
+
std::string function;
|
215 |
+
|
216 |
+
/// options for the aggregation function
|
217 |
+
std::shared_ptr<FunctionOptions> options;
|
218 |
+
|
219 |
+
/// zero or more fields to which aggregations will be applied
|
220 |
+
std::vector<FieldRef> target;
|
221 |
+
|
222 |
+
/// optional output field name for aggregations
|
223 |
+
std::string name;
|
224 |
+
};
|
225 |
+
|
226 |
+
/// @}
|
227 |
+
|
228 |
+
/// \brief Count values in an array.
|
229 |
+
///
|
230 |
+
/// \param[in] options counting options, see CountOptions for more information
|
231 |
+
/// \param[in] datum to count
|
232 |
+
/// \param[in] ctx the function execution context, optional
|
233 |
+
/// \return out resulting datum
|
234 |
+
///
|
235 |
+
/// \since 1.0.0
|
236 |
+
/// \note API not yet finalized
|
237 |
+
ARROW_EXPORT
|
238 |
+
Result<Datum> Count(const Datum& datum,
|
239 |
+
const CountOptions& options = CountOptions::Defaults(),
|
240 |
+
ExecContext* ctx = NULLPTR);
|
241 |
+
|
242 |
+
/// \brief Compute the mean of a numeric array.
|
243 |
+
///
|
244 |
+
/// \param[in] value datum to compute the mean, expecting Array
|
245 |
+
/// \param[in] options see ScalarAggregateOptions for more information
|
246 |
+
/// \param[in] ctx the function execution context, optional
|
247 |
+
/// \return datum of the computed mean as a DoubleScalar
|
248 |
+
///
|
249 |
+
/// \since 1.0.0
|
250 |
+
/// \note API not yet finalized
|
251 |
+
ARROW_EXPORT
|
252 |
+
Result<Datum> Mean(
|
253 |
+
const Datum& value,
|
254 |
+
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
255 |
+
ExecContext* ctx = NULLPTR);
|
256 |
+
|
257 |
+
/// \brief Compute the product of values of a numeric array.
|
258 |
+
///
|
259 |
+
/// \param[in] value datum to compute product of, expecting Array or ChunkedArray
|
260 |
+
/// \param[in] options see ScalarAggregateOptions for more information
|
261 |
+
/// \param[in] ctx the function execution context, optional
|
262 |
+
/// \return datum of the computed sum as a Scalar
|
263 |
+
///
|
264 |
+
/// \since 6.0.0
|
265 |
+
/// \note API not yet finalized
|
266 |
+
ARROW_EXPORT
|
267 |
+
Result<Datum> Product(
|
268 |
+
const Datum& value,
|
269 |
+
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
270 |
+
ExecContext* ctx = NULLPTR);
|
271 |
+
|
272 |
+
/// \brief Sum values of a numeric array.
|
273 |
+
///
|
274 |
+
/// \param[in] value datum to sum, expecting Array or ChunkedArray
|
275 |
+
/// \param[in] options see ScalarAggregateOptions for more information
|
276 |
+
/// \param[in] ctx the function execution context, optional
|
277 |
+
/// \return datum of the computed sum as a Scalar
|
278 |
+
///
|
279 |
+
/// \since 1.0.0
|
280 |
+
/// \note API not yet finalized
|
281 |
+
ARROW_EXPORT
|
282 |
+
Result<Datum> Sum(
|
283 |
+
const Datum& value,
|
284 |
+
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
285 |
+
ExecContext* ctx = NULLPTR);
|
286 |
+
|
287 |
+
/// \brief Calculate the first value of an array
|
288 |
+
///
|
289 |
+
/// \param[in] value input datum, expecting Array or ChunkedArray
|
290 |
+
/// \param[in] options see ScalarAggregateOptions for more information
|
291 |
+
/// \param[in] ctx the function execution context, optional
|
292 |
+
/// \return datum of the computed first as Scalar
|
293 |
+
///
|
294 |
+
/// \since 13.0.0
|
295 |
+
/// \note API not yet finalized
|
296 |
+
ARROW_EXPORT
|
297 |
+
Result<Datum> First(
|
298 |
+
const Datum& value,
|
299 |
+
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
300 |
+
ExecContext* ctx = NULLPTR);
|
301 |
+
|
302 |
+
/// \brief Calculate the last value of an array
|
303 |
+
///
|
304 |
+
/// \param[in] value input datum, expecting Array or ChunkedArray
|
305 |
+
/// \param[in] options see ScalarAggregateOptions for more information
|
306 |
+
/// \param[in] ctx the function execution context, optional
|
307 |
+
/// \return datum of the computed last as a Scalar
|
308 |
+
///
|
309 |
+
/// \since 13.0.0
|
310 |
+
/// \note API not yet finalized
|
311 |
+
ARROW_EXPORT
|
312 |
+
Result<Datum> Last(
|
313 |
+
const Datum& value,
|
314 |
+
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
315 |
+
ExecContext* ctx = NULLPTR);
|
316 |
+
|
317 |
+
/// \brief Calculate the min / max of a numeric array
|
318 |
+
///
|
319 |
+
/// This function returns both the min and max as a struct scalar, with type
|
320 |
+
/// struct<min: T, max: T>, where T is the input type
|
321 |
+
///
|
322 |
+
/// \param[in] value input datum, expecting Array or ChunkedArray
|
323 |
+
/// \param[in] options see ScalarAggregateOptions for more information
|
324 |
+
/// \param[in] ctx the function execution context, optional
|
325 |
+
/// \return resulting datum as a struct<min: T, max: T> scalar
|
326 |
+
///
|
327 |
+
/// \since 1.0.0
|
328 |
+
/// \note API not yet finalized
|
329 |
+
ARROW_EXPORT
|
330 |
+
Result<Datum> MinMax(
|
331 |
+
const Datum& value,
|
332 |
+
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
333 |
+
ExecContext* ctx = NULLPTR);
|
334 |
+
|
335 |
+
/// \brief Test whether any element in a boolean array evaluates to true.
|
336 |
+
///
|
337 |
+
/// This function returns true if any of the elements in the array evaluates
|
338 |
+
/// to true and false otherwise. Null values are ignored by default.
|
339 |
+
/// If null values are taken into account by setting ScalarAggregateOptions
|
340 |
+
/// parameter skip_nulls = false then Kleene logic is used.
|
341 |
+
/// See KleeneOr for more details on Kleene logic.
|
342 |
+
///
|
343 |
+
/// \param[in] value input datum, expecting a boolean array
|
344 |
+
/// \param[in] options see ScalarAggregateOptions for more information
|
345 |
+
/// \param[in] ctx the function execution context, optional
|
346 |
+
/// \return resulting datum as a BooleanScalar
|
347 |
+
///
|
348 |
+
/// \since 3.0.0
|
349 |
+
/// \note API not yet finalized
|
350 |
+
ARROW_EXPORT
|
351 |
+
Result<Datum> Any(
|
352 |
+
const Datum& value,
|
353 |
+
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
354 |
+
ExecContext* ctx = NULLPTR);
|
355 |
+
|
356 |
+
/// \brief Test whether all elements in a boolean array evaluate to true.
|
357 |
+
///
|
358 |
+
/// This function returns true if all of the elements in the array evaluate
|
359 |
+
/// to true and false otherwise. Null values are ignored by default.
|
360 |
+
/// If null values are taken into account by setting ScalarAggregateOptions
|
361 |
+
/// parameter skip_nulls = false then Kleene logic is used.
|
362 |
+
/// See KleeneAnd for more details on Kleene logic.
|
363 |
+
///
|
364 |
+
/// \param[in] value input datum, expecting a boolean array
|
365 |
+
/// \param[in] options see ScalarAggregateOptions for more information
|
366 |
+
/// \param[in] ctx the function execution context, optional
|
367 |
+
/// \return resulting datum as a BooleanScalar
|
368 |
+
|
369 |
+
/// \since 3.0.0
|
370 |
+
/// \note API not yet finalized
|
371 |
+
ARROW_EXPORT
|
372 |
+
Result<Datum> All(
|
373 |
+
const Datum& value,
|
374 |
+
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
375 |
+
ExecContext* ctx = NULLPTR);
|
376 |
+
|
377 |
+
/// \brief Calculate the modal (most common) value of a numeric array
|
378 |
+
///
|
379 |
+
/// This function returns top-n most common values and number of times they occur as
|
380 |
+
/// an array of `struct<mode: T, count: int64>`, where T is the input type.
|
381 |
+
/// Values with larger counts are returned before smaller ones.
|
382 |
+
/// If there are more than one values with same count, smaller value is returned first.
|
383 |
+
///
|
384 |
+
/// \param[in] value input datum, expecting Array or ChunkedArray
|
385 |
+
/// \param[in] options see ModeOptions for more information
|
386 |
+
/// \param[in] ctx the function execution context, optional
|
387 |
+
/// \return resulting datum as an array of struct<mode: T, count: int64>
|
388 |
+
///
|
389 |
+
/// \since 2.0.0
|
390 |
+
/// \note API not yet finalized
|
391 |
+
ARROW_EXPORT
|
392 |
+
Result<Datum> Mode(const Datum& value,
|
393 |
+
const ModeOptions& options = ModeOptions::Defaults(),
|
394 |
+
ExecContext* ctx = NULLPTR);
|
395 |
+
|
396 |
+
/// \brief Calculate the standard deviation of a numeric array
|
397 |
+
///
|
398 |
+
/// \param[in] value input datum, expecting Array or ChunkedArray
|
399 |
+
/// \param[in] options see VarianceOptions for more information
|
400 |
+
/// \param[in] ctx the function execution context, optional
|
401 |
+
/// \return datum of the computed standard deviation as a DoubleScalar
|
402 |
+
///
|
403 |
+
/// \since 2.0.0
|
404 |
+
/// \note API not yet finalized
|
405 |
+
ARROW_EXPORT
|
406 |
+
Result<Datum> Stddev(const Datum& value,
|
407 |
+
const VarianceOptions& options = VarianceOptions::Defaults(),
|
408 |
+
ExecContext* ctx = NULLPTR);
|
409 |
+
|
410 |
+
/// \brief Calculate the variance of a numeric array
|
411 |
+
///
|
412 |
+
/// \param[in] value input datum, expecting Array or ChunkedArray
|
413 |
+
/// \param[in] options see VarianceOptions for more information
|
414 |
+
/// \param[in] ctx the function execution context, optional
|
415 |
+
/// \return datum of the computed variance as a DoubleScalar
|
416 |
+
///
|
417 |
+
/// \since 2.0.0
|
418 |
+
/// \note API not yet finalized
|
419 |
+
ARROW_EXPORT
|
420 |
+
Result<Datum> Variance(const Datum& value,
|
421 |
+
const VarianceOptions& options = VarianceOptions::Defaults(),
|
422 |
+
ExecContext* ctx = NULLPTR);
|
423 |
+
|
424 |
+
/// \brief Calculate the quantiles of a numeric array
|
425 |
+
///
|
426 |
+
/// \param[in] value input datum, expecting Array or ChunkedArray
|
427 |
+
/// \param[in] options see QuantileOptions for more information
|
428 |
+
/// \param[in] ctx the function execution context, optional
|
429 |
+
/// \return resulting datum as an array
|
430 |
+
///
|
431 |
+
/// \since 4.0.0
|
432 |
+
/// \note API not yet finalized
|
433 |
+
ARROW_EXPORT
|
434 |
+
Result<Datum> Quantile(const Datum& value,
|
435 |
+
const QuantileOptions& options = QuantileOptions::Defaults(),
|
436 |
+
ExecContext* ctx = NULLPTR);
|
437 |
+
|
438 |
+
/// \brief Calculate the approximate quantiles of a numeric array with T-Digest algorithm
|
439 |
+
///
|
440 |
+
/// \param[in] value input datum, expecting Array or ChunkedArray
|
441 |
+
/// \param[in] options see TDigestOptions for more information
|
442 |
+
/// \param[in] ctx the function execution context, optional
|
443 |
+
/// \return resulting datum as an array
|
444 |
+
///
|
445 |
+
/// \since 4.0.0
|
446 |
+
/// \note API not yet finalized
|
447 |
+
ARROW_EXPORT
|
448 |
+
Result<Datum> TDigest(const Datum& value,
|
449 |
+
const TDigestOptions& options = TDigestOptions::Defaults(),
|
450 |
+
ExecContext* ctx = NULLPTR);
|
451 |
+
|
452 |
+
/// \brief Find the first index of a value in an array.
|
453 |
+
///
|
454 |
+
/// \param[in] value The array to search.
|
455 |
+
/// \param[in] options The array to search for. See IndexOptions.
|
456 |
+
/// \param[in] ctx the function execution context, optional
|
457 |
+
/// \return out a Scalar containing the index (or -1 if not found).
|
458 |
+
///
|
459 |
+
/// \since 5.0.0
|
460 |
+
/// \note API not yet finalized
|
461 |
+
ARROW_EXPORT
|
462 |
+
Result<Datum> Index(const Datum& value, const IndexOptions& options,
|
463 |
+
ExecContext* ctx = NULLPTR);
|
464 |
+
|
465 |
+
} // namespace compute
|
466 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_scalar.h
ADDED
@@ -0,0 +1,1717 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// Eager evaluation convenience APIs for invoking common functions, including
|
19 |
+
// necessary memory allocations
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
#include <optional>
|
24 |
+
#include <string>
|
25 |
+
#include <utility>
|
26 |
+
|
27 |
+
#include "arrow/compute/function_options.h"
|
28 |
+
#include "arrow/compute/type_fwd.h"
|
29 |
+
#include "arrow/datum.h"
|
30 |
+
#include "arrow/result.h"
|
31 |
+
#include "arrow/util/macros.h"
|
32 |
+
#include "arrow/util/visibility.h"
|
33 |
+
|
34 |
+
namespace arrow {
|
35 |
+
namespace compute {
|
36 |
+
|
37 |
+
/// \addtogroup compute-concrete-options
|
38 |
+
///
|
39 |
+
/// @{
|
40 |
+
|
41 |
+
class ARROW_EXPORT ArithmeticOptions : public FunctionOptions {
|
42 |
+
public:
|
43 |
+
explicit ArithmeticOptions(bool check_overflow = false);
|
44 |
+
static constexpr char const kTypeName[] = "ArithmeticOptions";
|
45 |
+
bool check_overflow;
|
46 |
+
};
|
47 |
+
|
48 |
+
class ARROW_EXPORT ElementWiseAggregateOptions : public FunctionOptions {
|
49 |
+
public:
|
50 |
+
explicit ElementWiseAggregateOptions(bool skip_nulls = true);
|
51 |
+
static constexpr char const kTypeName[] = "ElementWiseAggregateOptions";
|
52 |
+
static ElementWiseAggregateOptions Defaults() { return ElementWiseAggregateOptions{}; }
|
53 |
+
bool skip_nulls;
|
54 |
+
};
|
55 |
+
|
56 |
+
/// Rounding and tie-breaking modes for round compute functions.
|
57 |
+
/// Additional details and examples are provided in compute.rst.
|
58 |
+
enum class RoundMode : int8_t {
|
59 |
+
/// Round to nearest integer less than or equal in magnitude (aka "floor")
|
60 |
+
DOWN,
|
61 |
+
/// Round to nearest integer greater than or equal in magnitude (aka "ceil")
|
62 |
+
UP,
|
63 |
+
/// Get the integral part without fractional digits (aka "trunc")
|
64 |
+
TOWARDS_ZERO,
|
65 |
+
/// Round negative values with DOWN rule
|
66 |
+
/// and positive values with UP rule (aka "away from zero")
|
67 |
+
TOWARDS_INFINITY,
|
68 |
+
/// Round ties with DOWN rule (also called "round half towards negative infinity")
|
69 |
+
HALF_DOWN,
|
70 |
+
/// Round ties with UP rule (also called "round half towards positive infinity")
|
71 |
+
HALF_UP,
|
72 |
+
/// Round ties with TOWARDS_ZERO rule (also called "round half away from infinity")
|
73 |
+
HALF_TOWARDS_ZERO,
|
74 |
+
/// Round ties with TOWARDS_INFINITY rule (also called "round half away from zero")
|
75 |
+
HALF_TOWARDS_INFINITY,
|
76 |
+
/// Round ties to nearest even integer
|
77 |
+
HALF_TO_EVEN,
|
78 |
+
/// Round ties to nearest odd integer
|
79 |
+
HALF_TO_ODD,
|
80 |
+
};
|
81 |
+
|
82 |
+
class ARROW_EXPORT RoundOptions : public FunctionOptions {
|
83 |
+
public:
|
84 |
+
explicit RoundOptions(int64_t ndigits = 0,
|
85 |
+
RoundMode round_mode = RoundMode::HALF_TO_EVEN);
|
86 |
+
static constexpr char const kTypeName[] = "RoundOptions";
|
87 |
+
static RoundOptions Defaults() { return RoundOptions(); }
|
88 |
+
/// Rounding precision (number of digits to round to)
|
89 |
+
int64_t ndigits;
|
90 |
+
/// Rounding and tie-breaking mode
|
91 |
+
RoundMode round_mode;
|
92 |
+
};
|
93 |
+
|
94 |
+
class ARROW_EXPORT RoundBinaryOptions : public FunctionOptions {
|
95 |
+
public:
|
96 |
+
explicit RoundBinaryOptions(RoundMode round_mode = RoundMode::HALF_TO_EVEN);
|
97 |
+
static constexpr char const kTypeName[] = "RoundBinaryOptions";
|
98 |
+
static RoundBinaryOptions Defaults() { return RoundBinaryOptions(); }
|
99 |
+
/// Rounding and tie-breaking mode
|
100 |
+
RoundMode round_mode;
|
101 |
+
};
|
102 |
+
|
103 |
+
enum class CalendarUnit : int8_t {
|
104 |
+
NANOSECOND,
|
105 |
+
MICROSECOND,
|
106 |
+
MILLISECOND,
|
107 |
+
SECOND,
|
108 |
+
MINUTE,
|
109 |
+
HOUR,
|
110 |
+
DAY,
|
111 |
+
WEEK,
|
112 |
+
MONTH,
|
113 |
+
QUARTER,
|
114 |
+
YEAR
|
115 |
+
};
|
116 |
+
|
117 |
+
class ARROW_EXPORT RoundTemporalOptions : public FunctionOptions {
|
118 |
+
public:
|
119 |
+
explicit RoundTemporalOptions(int multiple = 1, CalendarUnit unit = CalendarUnit::DAY,
|
120 |
+
bool week_starts_monday = true,
|
121 |
+
bool ceil_is_strictly_greater = false,
|
122 |
+
bool calendar_based_origin = false);
|
123 |
+
static constexpr char const kTypeName[] = "RoundTemporalOptions";
|
124 |
+
static RoundTemporalOptions Defaults() { return RoundTemporalOptions(); }
|
125 |
+
|
126 |
+
/// Number of units to round to
|
127 |
+
int multiple;
|
128 |
+
/// The unit used for rounding of time
|
129 |
+
CalendarUnit unit;
|
130 |
+
/// What day does the week start with (Monday=true, Sunday=false)
|
131 |
+
bool week_starts_monday;
|
132 |
+
/// Enable this flag to return a rounded value that is strictly greater than the input.
|
133 |
+
/// For example: ceiling 1970-01-01T00:00:00 to 3 hours would yield 1970-01-01T03:00:00
|
134 |
+
/// if set to true and 1970-01-01T00:00:00 if set to false.
|
135 |
+
/// This applies for ceiling only.
|
136 |
+
bool ceil_is_strictly_greater;
|
137 |
+
/// By default time is rounded to a multiple of units since 1970-01-01T00:00:00.
|
138 |
+
/// By setting calendar_based_origin to true, time will be rounded to a number
|
139 |
+
/// of units since the last greater calendar unit.
|
140 |
+
/// For example: rounding to a multiple of days since the beginning of the month or
|
141 |
+
/// to hours since the beginning of the day.
|
142 |
+
/// Exceptions: week and quarter are not used as greater units, therefore days will
|
143 |
+
/// will be rounded to the beginning of the month not week. Greater unit of week
|
144 |
+
/// is year.
|
145 |
+
/// Note that ceiling and rounding might change sorting order of an array near greater
|
146 |
+
/// unit change. For example rounding YYYY-mm-dd 23:00:00 to 5 hours will ceil and
|
147 |
+
/// round to YYYY-mm-dd+1 01:00:00 and floor to YYYY-mm-dd 20:00:00. On the other hand
|
148 |
+
/// YYYY-mm-dd+1 00:00:00 will ceil, round and floor to YYYY-mm-dd+1 00:00:00. This
|
149 |
+
/// can break the order of an already ordered array.
|
150 |
+
bool calendar_based_origin;
|
151 |
+
};
|
152 |
+
|
153 |
+
class ARROW_EXPORT RoundToMultipleOptions : public FunctionOptions {
|
154 |
+
public:
|
155 |
+
explicit RoundToMultipleOptions(double multiple = 1.0,
|
156 |
+
RoundMode round_mode = RoundMode::HALF_TO_EVEN);
|
157 |
+
explicit RoundToMultipleOptions(std::shared_ptr<Scalar> multiple,
|
158 |
+
RoundMode round_mode = RoundMode::HALF_TO_EVEN);
|
159 |
+
static constexpr char const kTypeName[] = "RoundToMultipleOptions";
|
160 |
+
static RoundToMultipleOptions Defaults() { return RoundToMultipleOptions(); }
|
161 |
+
/// Rounding scale (multiple to round to).
|
162 |
+
///
|
163 |
+
/// Should be a positive numeric scalar of a type compatible with the
|
164 |
+
/// argument to be rounded. The cast kernel is used to convert the rounding
|
165 |
+
/// multiple to match the result type.
|
166 |
+
std::shared_ptr<Scalar> multiple;
|
167 |
+
/// Rounding and tie-breaking mode
|
168 |
+
RoundMode round_mode;
|
169 |
+
};
|
170 |
+
|
171 |
+
/// Options for var_args_join.
|
172 |
+
class ARROW_EXPORT JoinOptions : public FunctionOptions {
|
173 |
+
public:
|
174 |
+
/// How to handle null values. (A null separator always results in a null output.)
|
175 |
+
enum NullHandlingBehavior {
|
176 |
+
/// A null in any input results in a null in the output.
|
177 |
+
EMIT_NULL,
|
178 |
+
/// Nulls in inputs are skipped.
|
179 |
+
SKIP,
|
180 |
+
/// Nulls in inputs are replaced with the replacement string.
|
181 |
+
REPLACE,
|
182 |
+
};
|
183 |
+
explicit JoinOptions(NullHandlingBehavior null_handling = EMIT_NULL,
|
184 |
+
std::string null_replacement = "");
|
185 |
+
static constexpr char const kTypeName[] = "JoinOptions";
|
186 |
+
static JoinOptions Defaults() { return JoinOptions(); }
|
187 |
+
NullHandlingBehavior null_handling;
|
188 |
+
std::string null_replacement;
|
189 |
+
};
|
190 |
+
|
191 |
+
class ARROW_EXPORT MatchSubstringOptions : public FunctionOptions {
|
192 |
+
public:
|
193 |
+
explicit MatchSubstringOptions(std::string pattern, bool ignore_case = false);
|
194 |
+
MatchSubstringOptions();
|
195 |
+
static constexpr char const kTypeName[] = "MatchSubstringOptions";
|
196 |
+
|
197 |
+
/// The exact substring (or regex, depending on kernel) to look for inside input values.
|
198 |
+
std::string pattern;
|
199 |
+
/// Whether to perform a case-insensitive match.
|
200 |
+
bool ignore_case;
|
201 |
+
};
|
202 |
+
|
203 |
+
class ARROW_EXPORT SplitOptions : public FunctionOptions {
|
204 |
+
public:
|
205 |
+
explicit SplitOptions(int64_t max_splits = -1, bool reverse = false);
|
206 |
+
static constexpr char const kTypeName[] = "SplitOptions";
|
207 |
+
|
208 |
+
/// Maximum number of splits allowed, or unlimited when -1
|
209 |
+
int64_t max_splits;
|
210 |
+
/// Start splitting from the end of the string (only relevant when max_splits != -1)
|
211 |
+
bool reverse;
|
212 |
+
};
|
213 |
+
|
214 |
+
class ARROW_EXPORT SplitPatternOptions : public FunctionOptions {
|
215 |
+
public:
|
216 |
+
explicit SplitPatternOptions(std::string pattern, int64_t max_splits = -1,
|
217 |
+
bool reverse = false);
|
218 |
+
SplitPatternOptions();
|
219 |
+
static constexpr char const kTypeName[] = "SplitPatternOptions";
|
220 |
+
|
221 |
+
/// The exact substring to split on.
|
222 |
+
std::string pattern;
|
223 |
+
/// Maximum number of splits allowed, or unlimited when -1
|
224 |
+
int64_t max_splits;
|
225 |
+
/// Start splitting from the end of the string (only relevant when max_splits != -1)
|
226 |
+
bool reverse;
|
227 |
+
};
|
228 |
+
|
229 |
+
class ARROW_EXPORT ReplaceSliceOptions : public FunctionOptions {
|
230 |
+
public:
|
231 |
+
explicit ReplaceSliceOptions(int64_t start, int64_t stop, std::string replacement);
|
232 |
+
ReplaceSliceOptions();
|
233 |
+
static constexpr char const kTypeName[] = "ReplaceSliceOptions";
|
234 |
+
|
235 |
+
/// Index to start slicing at
|
236 |
+
int64_t start;
|
237 |
+
/// Index to stop slicing at
|
238 |
+
int64_t stop;
|
239 |
+
/// String to replace the slice with
|
240 |
+
std::string replacement;
|
241 |
+
};
|
242 |
+
|
243 |
+
class ARROW_EXPORT ReplaceSubstringOptions : public FunctionOptions {
|
244 |
+
public:
|
245 |
+
explicit ReplaceSubstringOptions(std::string pattern, std::string replacement,
|
246 |
+
int64_t max_replacements = -1);
|
247 |
+
ReplaceSubstringOptions();
|
248 |
+
static constexpr char const kTypeName[] = "ReplaceSubstringOptions";
|
249 |
+
|
250 |
+
/// Pattern to match, literal, or regular expression depending on which kernel is used
|
251 |
+
std::string pattern;
|
252 |
+
/// String to replace the pattern with
|
253 |
+
std::string replacement;
|
254 |
+
/// Max number of substrings to replace (-1 means unbounded)
|
255 |
+
int64_t max_replacements;
|
256 |
+
};
|
257 |
+
|
258 |
+
class ARROW_EXPORT ExtractRegexOptions : public FunctionOptions {
|
259 |
+
public:
|
260 |
+
explicit ExtractRegexOptions(std::string pattern);
|
261 |
+
ExtractRegexOptions();
|
262 |
+
static constexpr char const kTypeName[] = "ExtractRegexOptions";
|
263 |
+
|
264 |
+
/// Regular expression with named capture fields
|
265 |
+
std::string pattern;
|
266 |
+
};
|
267 |
+
|
268 |
+
/// Options for IsIn and IndexIn functions
|
269 |
+
class ARROW_EXPORT SetLookupOptions : public FunctionOptions {
|
270 |
+
public:
|
271 |
+
/// How to handle null values.
|
272 |
+
enum NullMatchingBehavior {
|
273 |
+
/// MATCH, any null in `value_set` is successfully matched in
|
274 |
+
/// the input.
|
275 |
+
MATCH,
|
276 |
+
/// SKIP, any null in `value_set` is ignored and nulls in the input
|
277 |
+
/// produce null (IndexIn) or false (IsIn) values in the output.
|
278 |
+
SKIP,
|
279 |
+
/// EMIT_NULL, any null in `value_set` is ignored and nulls in the
|
280 |
+
/// input produce null (IndexIn and IsIn) values in the output.
|
281 |
+
EMIT_NULL,
|
282 |
+
/// INCONCLUSIVE, null values are regarded as unknown values, which is
|
283 |
+
/// sql-compatible. nulls in the input produce null (IndexIn and IsIn)
|
284 |
+
/// values in the output. Besides, if `value_set` contains a null,
|
285 |
+
/// non-null unmatched values in the input also produce null values
|
286 |
+
/// (IndexIn and IsIn) in the output.
|
287 |
+
INCONCLUSIVE
|
288 |
+
};
|
289 |
+
|
290 |
+
explicit SetLookupOptions(Datum value_set, NullMatchingBehavior = MATCH);
|
291 |
+
SetLookupOptions();
|
292 |
+
|
293 |
+
// DEPRECATED(will be removed after removing of skip_nulls)
|
294 |
+
explicit SetLookupOptions(Datum value_set, bool skip_nulls);
|
295 |
+
|
296 |
+
static constexpr char const kTypeName[] = "SetLookupOptions";
|
297 |
+
|
298 |
+
/// The set of values to look up input values into.
|
299 |
+
Datum value_set;
|
300 |
+
|
301 |
+
NullMatchingBehavior null_matching_behavior;
|
302 |
+
|
303 |
+
// DEPRECATED(will be removed after removing of skip_nulls)
|
304 |
+
NullMatchingBehavior GetNullMatchingBehavior() const;
|
305 |
+
|
306 |
+
// DEPRECATED(use null_matching_behavior instead)
|
307 |
+
/// Whether nulls in `value_set` count for lookup.
|
308 |
+
///
|
309 |
+
/// If true, any null in `value_set` is ignored and nulls in the input
|
310 |
+
/// produce null (IndexIn) or false (IsIn) values in the output.
|
311 |
+
/// If false, any null in `value_set` is successfully matched in
|
312 |
+
/// the input.
|
313 |
+
std::optional<bool> skip_nulls;
|
314 |
+
};
|
315 |
+
|
316 |
+
/// Options for struct_field function
|
317 |
+
class ARROW_EXPORT StructFieldOptions : public FunctionOptions {
|
318 |
+
public:
|
319 |
+
explicit StructFieldOptions(std::vector<int> indices);
|
320 |
+
explicit StructFieldOptions(std::initializer_list<int>);
|
321 |
+
explicit StructFieldOptions(FieldRef field_ref);
|
322 |
+
StructFieldOptions();
|
323 |
+
static constexpr char const kTypeName[] = "StructFieldOptions";
|
324 |
+
|
325 |
+
/// The FieldRef specifying what to extract from struct or union.
|
326 |
+
FieldRef field_ref;
|
327 |
+
};
|
328 |
+
|
329 |
+
class ARROW_EXPORT StrptimeOptions : public FunctionOptions {
|
330 |
+
public:
|
331 |
+
explicit StrptimeOptions(std::string format, TimeUnit::type unit,
|
332 |
+
bool error_is_null = false);
|
333 |
+
StrptimeOptions();
|
334 |
+
static constexpr char const kTypeName[] = "StrptimeOptions";
|
335 |
+
|
336 |
+
/// The desired format string.
|
337 |
+
std::string format;
|
338 |
+
/// The desired time resolution
|
339 |
+
TimeUnit::type unit;
|
340 |
+
/// Return null on parsing errors if true or raise if false
|
341 |
+
bool error_is_null;
|
342 |
+
};
|
343 |
+
|
344 |
+
class ARROW_EXPORT StrftimeOptions : public FunctionOptions {
|
345 |
+
public:
|
346 |
+
explicit StrftimeOptions(std::string format, std::string locale = "C");
|
347 |
+
StrftimeOptions();
|
348 |
+
|
349 |
+
static constexpr char const kTypeName[] = "StrftimeOptions";
|
350 |
+
|
351 |
+
static constexpr const char* kDefaultFormat = "%Y-%m-%dT%H:%M:%S";
|
352 |
+
|
353 |
+
/// The desired format string.
|
354 |
+
std::string format;
|
355 |
+
/// The desired output locale string.
|
356 |
+
std::string locale;
|
357 |
+
};
|
358 |
+
|
359 |
+
class ARROW_EXPORT PadOptions : public FunctionOptions {
|
360 |
+
public:
|
361 |
+
explicit PadOptions(int64_t width, std::string padding = " ");
|
362 |
+
PadOptions();
|
363 |
+
static constexpr char const kTypeName[] = "PadOptions";
|
364 |
+
|
365 |
+
/// The desired string length.
|
366 |
+
int64_t width;
|
367 |
+
/// What to pad the string with. Should be one codepoint (Unicode)/byte (ASCII).
|
368 |
+
std::string padding;
|
369 |
+
};
|
370 |
+
|
371 |
+
class ARROW_EXPORT TrimOptions : public FunctionOptions {
|
372 |
+
public:
|
373 |
+
explicit TrimOptions(std::string characters);
|
374 |
+
TrimOptions();
|
375 |
+
static constexpr char const kTypeName[] = "TrimOptions";
|
376 |
+
|
377 |
+
/// The individual characters to be trimmed from the string.
|
378 |
+
std::string characters;
|
379 |
+
};
|
380 |
+
|
381 |
+
class ARROW_EXPORT SliceOptions : public FunctionOptions {
|
382 |
+
public:
|
383 |
+
explicit SliceOptions(int64_t start, int64_t stop = std::numeric_limits<int64_t>::max(),
|
384 |
+
int64_t step = 1);
|
385 |
+
SliceOptions();
|
386 |
+
static constexpr char const kTypeName[] = "SliceOptions";
|
387 |
+
int64_t start, stop, step;
|
388 |
+
};
|
389 |
+
|
390 |
+
class ARROW_EXPORT ListSliceOptions : public FunctionOptions {
|
391 |
+
public:
|
392 |
+
explicit ListSliceOptions(int64_t start, std::optional<int64_t> stop = std::nullopt,
|
393 |
+
int64_t step = 1,
|
394 |
+
std::optional<bool> return_fixed_size_list = std::nullopt);
|
395 |
+
ListSliceOptions();
|
396 |
+
static constexpr char const kTypeName[] = "ListSliceOptions";
|
397 |
+
/// The start of list slicing.
|
398 |
+
int64_t start;
|
399 |
+
/// Optional stop of list slicing. If not set, then slice to end. (NotImplemented)
|
400 |
+
std::optional<int64_t> stop;
|
401 |
+
/// Slicing step
|
402 |
+
int64_t step;
|
403 |
+
// Whether to return a FixedSizeListArray. If true _and_ stop is after
|
404 |
+
// a list element's length, nulls will be appended to create the requested slice size.
|
405 |
+
// Default of `nullopt` will return whatever type it got in.
|
406 |
+
std::optional<bool> return_fixed_size_list;
|
407 |
+
};
|
408 |
+
|
409 |
+
class ARROW_EXPORT NullOptions : public FunctionOptions {
|
410 |
+
public:
|
411 |
+
explicit NullOptions(bool nan_is_null = false);
|
412 |
+
static constexpr char const kTypeName[] = "NullOptions";
|
413 |
+
static NullOptions Defaults() { return NullOptions{}; }
|
414 |
+
|
415 |
+
bool nan_is_null;
|
416 |
+
};
|
417 |
+
|
418 |
+
enum CompareOperator : int8_t {
|
419 |
+
EQUAL,
|
420 |
+
NOT_EQUAL,
|
421 |
+
GREATER,
|
422 |
+
GREATER_EQUAL,
|
423 |
+
LESS,
|
424 |
+
LESS_EQUAL,
|
425 |
+
};
|
426 |
+
|
427 |
+
struct ARROW_EXPORT CompareOptions {
|
428 |
+
explicit CompareOptions(CompareOperator op) : op(op) {}
|
429 |
+
CompareOptions() : CompareOptions(CompareOperator::EQUAL) {}
|
430 |
+
enum CompareOperator op;
|
431 |
+
};
|
432 |
+
|
433 |
+
class ARROW_EXPORT MakeStructOptions : public FunctionOptions {
|
434 |
+
public:
|
435 |
+
MakeStructOptions(std::vector<std::string> n, std::vector<bool> r,
|
436 |
+
std::vector<std::shared_ptr<const KeyValueMetadata>> m);
|
437 |
+
explicit MakeStructOptions(std::vector<std::string> n);
|
438 |
+
MakeStructOptions();
|
439 |
+
static constexpr char const kTypeName[] = "MakeStructOptions";
|
440 |
+
|
441 |
+
/// Names for wrapped columns
|
442 |
+
std::vector<std::string> field_names;
|
443 |
+
|
444 |
+
/// Nullability bits for wrapped columns
|
445 |
+
std::vector<bool> field_nullability;
|
446 |
+
|
447 |
+
/// Metadata attached to wrapped columns
|
448 |
+
std::vector<std::shared_ptr<const KeyValueMetadata>> field_metadata;
|
449 |
+
};
|
450 |
+
|
451 |
+
struct ARROW_EXPORT DayOfWeekOptions : public FunctionOptions {
|
452 |
+
public:
|
453 |
+
explicit DayOfWeekOptions(bool count_from_zero = true, uint32_t week_start = 1);
|
454 |
+
static constexpr char const kTypeName[] = "DayOfWeekOptions";
|
455 |
+
static DayOfWeekOptions Defaults() { return DayOfWeekOptions(); }
|
456 |
+
|
457 |
+
/// Number days from 0 if true and from 1 if false
|
458 |
+
bool count_from_zero;
|
459 |
+
/// What day does the week start with (Monday=1, Sunday=7).
|
460 |
+
/// The numbering is unaffected by the count_from_zero parameter.
|
461 |
+
uint32_t week_start;
|
462 |
+
};
|
463 |
+
|
464 |
+
/// Used to control timestamp timezone conversion and handling ambiguous/nonexistent
|
465 |
+
/// times.
|
466 |
+
struct ARROW_EXPORT AssumeTimezoneOptions : public FunctionOptions {
|
467 |
+
public:
|
468 |
+
/// \brief How to interpret ambiguous local times that can be interpreted as
|
469 |
+
/// multiple instants (normally two) due to DST shifts.
|
470 |
+
///
|
471 |
+
/// AMBIGUOUS_EARLIEST emits the earliest instant amongst possible interpretations.
|
472 |
+
/// AMBIGUOUS_LATEST emits the latest instant amongst possible interpretations.
|
473 |
+
enum Ambiguous { AMBIGUOUS_RAISE, AMBIGUOUS_EARLIEST, AMBIGUOUS_LATEST };
|
474 |
+
|
475 |
+
/// \brief How to handle local times that do not exist due to DST shifts.
|
476 |
+
///
|
477 |
+
/// NONEXISTENT_EARLIEST emits the instant "just before" the DST shift instant
|
478 |
+
/// in the given timestamp precision (for example, for a nanoseconds precision
|
479 |
+
/// timestamp, this is one nanosecond before the DST shift instant).
|
480 |
+
/// NONEXISTENT_LATEST emits the DST shift instant.
|
481 |
+
enum Nonexistent { NONEXISTENT_RAISE, NONEXISTENT_EARLIEST, NONEXISTENT_LATEST };
|
482 |
+
|
483 |
+
explicit AssumeTimezoneOptions(std::string timezone,
|
484 |
+
Ambiguous ambiguous = AMBIGUOUS_RAISE,
|
485 |
+
Nonexistent nonexistent = NONEXISTENT_RAISE);
|
486 |
+
AssumeTimezoneOptions();
|
487 |
+
static constexpr char const kTypeName[] = "AssumeTimezoneOptions";
|
488 |
+
|
489 |
+
/// Timezone to convert timestamps from
|
490 |
+
std::string timezone;
|
491 |
+
|
492 |
+
/// How to interpret ambiguous local times (due to DST shifts)
|
493 |
+
Ambiguous ambiguous;
|
494 |
+
/// How to interpret nonexistent local times (due to DST shifts)
|
495 |
+
Nonexistent nonexistent;
|
496 |
+
};
|
497 |
+
|
498 |
+
struct ARROW_EXPORT WeekOptions : public FunctionOptions {
|
499 |
+
public:
|
500 |
+
explicit WeekOptions(bool week_starts_monday = true, bool count_from_zero = false,
|
501 |
+
bool first_week_is_fully_in_year = false);
|
502 |
+
static constexpr char const kTypeName[] = "WeekOptions";
|
503 |
+
static WeekOptions Defaults() { return WeekOptions{}; }
|
504 |
+
static WeekOptions ISODefaults() {
|
505 |
+
return WeekOptions{/*week_starts_monday*/ true,
|
506 |
+
/*count_from_zero=*/false,
|
507 |
+
/*first_week_is_fully_in_year=*/false};
|
508 |
+
}
|
509 |
+
static WeekOptions USDefaults() {
|
510 |
+
return WeekOptions{/*week_starts_monday*/ false,
|
511 |
+
/*count_from_zero=*/false,
|
512 |
+
/*first_week_is_fully_in_year=*/false};
|
513 |
+
}
|
514 |
+
|
515 |
+
/// What day does the week start with (Monday=true, Sunday=false)
|
516 |
+
bool week_starts_monday;
|
517 |
+
/// Dates from current year that fall into last ISO week of the previous year return
|
518 |
+
/// 0 if true and 52 or 53 if false.
|
519 |
+
bool count_from_zero;
|
520 |
+
/// Must the first week be fully in January (true), or is a week that begins on
|
521 |
+
/// December 29, 30, or 31 considered to be the first week of the new year (false)?
|
522 |
+
bool first_week_is_fully_in_year;
|
523 |
+
};
|
524 |
+
|
525 |
+
struct ARROW_EXPORT Utf8NormalizeOptions : public FunctionOptions {
|
526 |
+
public:
|
527 |
+
enum Form { NFC, NFKC, NFD, NFKD };
|
528 |
+
|
529 |
+
explicit Utf8NormalizeOptions(Form form = NFC);
|
530 |
+
static Utf8NormalizeOptions Defaults() { return Utf8NormalizeOptions(); }
|
531 |
+
static constexpr char const kTypeName[] = "Utf8NormalizeOptions";
|
532 |
+
|
533 |
+
/// The Unicode normalization form to apply
|
534 |
+
Form form;
|
535 |
+
};
|
536 |
+
|
537 |
+
class ARROW_EXPORT RandomOptions : public FunctionOptions {
|
538 |
+
public:
|
539 |
+
enum Initializer { SystemRandom, Seed };
|
540 |
+
|
541 |
+
static RandomOptions FromSystemRandom() { return RandomOptions{SystemRandom, 0}; }
|
542 |
+
static RandomOptions FromSeed(uint64_t seed) { return RandomOptions{Seed, seed}; }
|
543 |
+
|
544 |
+
RandomOptions(Initializer initializer, uint64_t seed);
|
545 |
+
RandomOptions();
|
546 |
+
static constexpr char const kTypeName[] = "RandomOptions";
|
547 |
+
static RandomOptions Defaults() { return RandomOptions(); }
|
548 |
+
|
549 |
+
/// The type of initialization for random number generation - system or provided seed.
|
550 |
+
Initializer initializer;
|
551 |
+
/// The seed value used to initialize the random number generation.
|
552 |
+
uint64_t seed;
|
553 |
+
};
|
554 |
+
|
555 |
+
/// Options for map_lookup function
|
556 |
+
class ARROW_EXPORT MapLookupOptions : public FunctionOptions {
|
557 |
+
public:
|
558 |
+
enum Occurrence {
|
559 |
+
/// Return the first matching value
|
560 |
+
FIRST,
|
561 |
+
/// Return the last matching value
|
562 |
+
LAST,
|
563 |
+
/// Return all matching values
|
564 |
+
ALL
|
565 |
+
};
|
566 |
+
|
567 |
+
explicit MapLookupOptions(std::shared_ptr<Scalar> query_key, Occurrence occurrence);
|
568 |
+
MapLookupOptions();
|
569 |
+
|
570 |
+
constexpr static char const kTypeName[] = "MapLookupOptions";
|
571 |
+
|
572 |
+
/// The key to lookup in the map
|
573 |
+
std::shared_ptr<Scalar> query_key;
|
574 |
+
|
575 |
+
/// Whether to return the first, last, or all matching values
|
576 |
+
Occurrence occurrence;
|
577 |
+
};
|
578 |
+
|
579 |
+
/// @}
|
580 |
+
|
581 |
+
/// \brief Get the absolute value of a value.
|
582 |
+
///
|
583 |
+
/// If argument is null the result will be null.
|
584 |
+
///
|
585 |
+
/// \param[in] arg the value transformed
|
586 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
587 |
+
/// \param[in] ctx the function execution context, optional
|
588 |
+
/// \return the elementwise absolute value
|
589 |
+
ARROW_EXPORT
|
590 |
+
Result<Datum> AbsoluteValue(const Datum& arg,
|
591 |
+
ArithmeticOptions options = ArithmeticOptions(),
|
592 |
+
ExecContext* ctx = NULLPTR);
|
593 |
+
|
594 |
+
/// \brief Add two values together. Array values must be the same length. If
|
595 |
+
/// either addend is null the result will be null.
|
596 |
+
///
|
597 |
+
/// \param[in] left the first addend
|
598 |
+
/// \param[in] right the second addend
|
599 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
600 |
+
/// \param[in] ctx the function execution context, optional
|
601 |
+
/// \return the elementwise sum
|
602 |
+
ARROW_EXPORT
|
603 |
+
Result<Datum> Add(const Datum& left, const Datum& right,
|
604 |
+
ArithmeticOptions options = ArithmeticOptions(),
|
605 |
+
ExecContext* ctx = NULLPTR);
|
606 |
+
|
607 |
+
/// \brief Subtract two values. Array values must be the same length. If the
|
608 |
+
/// minuend or subtrahend is null the result will be null.
|
609 |
+
///
|
610 |
+
/// \param[in] left the value subtracted from (minuend)
|
611 |
+
/// \param[in] right the value by which the minuend is reduced (subtrahend)
|
612 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
613 |
+
/// \param[in] ctx the function execution context, optional
|
614 |
+
/// \return the elementwise difference
|
615 |
+
ARROW_EXPORT
|
616 |
+
Result<Datum> Subtract(const Datum& left, const Datum& right,
|
617 |
+
ArithmeticOptions options = ArithmeticOptions(),
|
618 |
+
ExecContext* ctx = NULLPTR);
|
619 |
+
|
620 |
+
/// \brief Multiply two values. Array values must be the same length. If either
|
621 |
+
/// factor is null the result will be null.
|
622 |
+
///
|
623 |
+
/// \param[in] left the first factor
|
624 |
+
/// \param[in] right the second factor
|
625 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
626 |
+
/// \param[in] ctx the function execution context, optional
|
627 |
+
/// \return the elementwise product
|
628 |
+
ARROW_EXPORT
|
629 |
+
Result<Datum> Multiply(const Datum& left, const Datum& right,
|
630 |
+
ArithmeticOptions options = ArithmeticOptions(),
|
631 |
+
ExecContext* ctx = NULLPTR);
|
632 |
+
|
633 |
+
/// \brief Divide two values. Array values must be the same length. If either
|
634 |
+
/// argument is null the result will be null. For integer types, if there is
|
635 |
+
/// a zero divisor, an error will be raised.
|
636 |
+
///
|
637 |
+
/// \param[in] left the dividend
|
638 |
+
/// \param[in] right the divisor
|
639 |
+
/// \param[in] options arithmetic options (enable/disable overflow checking), optional
|
640 |
+
/// \param[in] ctx the function execution context, optional
|
641 |
+
/// \return the elementwise quotient
|
642 |
+
ARROW_EXPORT
|
643 |
+
Result<Datum> Divide(const Datum& left, const Datum& right,
|
644 |
+
ArithmeticOptions options = ArithmeticOptions(),
|
645 |
+
ExecContext* ctx = NULLPTR);
|
646 |
+
|
647 |
+
/// \brief Negate values.
|
648 |
+
///
|
649 |
+
/// If argument is null the result will be null.
|
650 |
+
///
|
651 |
+
/// \param[in] arg the value negated
|
652 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
653 |
+
/// \param[in] ctx the function execution context, optional
|
654 |
+
/// \return the elementwise negation
|
655 |
+
ARROW_EXPORT
|
656 |
+
Result<Datum> Negate(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
657 |
+
ExecContext* ctx = NULLPTR);
|
658 |
+
|
659 |
+
/// \brief Raise the values of base array to the power of the exponent array values.
|
660 |
+
/// Array values must be the same length. If either base or exponent is null the result
|
661 |
+
/// will be null.
|
662 |
+
///
|
663 |
+
/// \param[in] left the base
|
664 |
+
/// \param[in] right the exponent
|
665 |
+
/// \param[in] options arithmetic options (enable/disable overflow checking), optional
|
666 |
+
/// \param[in] ctx the function execution context, optional
|
667 |
+
/// \return the elementwise base value raised to the power of exponent
|
668 |
+
ARROW_EXPORT
|
669 |
+
Result<Datum> Power(const Datum& left, const Datum& right,
|
670 |
+
ArithmeticOptions options = ArithmeticOptions(),
|
671 |
+
ExecContext* ctx = NULLPTR);
|
672 |
+
|
673 |
+
/// \brief Raise Euler's number to the power of specified exponent, element-wise.
|
674 |
+
/// If the exponent value is null the result will be null.
|
675 |
+
///
|
676 |
+
/// \param[in] arg the exponent
|
677 |
+
/// \param[in] ctx the function execution context, optional
|
678 |
+
/// \return the element-wise Euler's number raised to the power of exponent
|
679 |
+
ARROW_EXPORT
|
680 |
+
Result<Datum> Exp(const Datum& arg, ExecContext* ctx = NULLPTR);
|
681 |
+
|
682 |
+
/// \brief Left shift the left array by the right array. Array values must be the
|
683 |
+
/// same length. If either operand is null, the result will be null.
|
684 |
+
///
|
685 |
+
/// \param[in] left the value to shift
|
686 |
+
/// \param[in] right the value to shift by
|
687 |
+
/// \param[in] options arithmetic options (enable/disable overflow checking), optional
|
688 |
+
/// \param[in] ctx the function execution context, optional
|
689 |
+
/// \return the elementwise left value shifted left by the right value
|
690 |
+
ARROW_EXPORT
|
691 |
+
Result<Datum> ShiftLeft(const Datum& left, const Datum& right,
|
692 |
+
ArithmeticOptions options = ArithmeticOptions(),
|
693 |
+
ExecContext* ctx = NULLPTR);
|
694 |
+
|
695 |
+
/// \brief Right shift the left array by the right array. Array values must be the
|
696 |
+
/// same length. If either operand is null, the result will be null. Performs a
|
697 |
+
/// logical shift for unsigned values, and an arithmetic shift for signed values.
|
698 |
+
///
|
699 |
+
/// \param[in] left the value to shift
|
700 |
+
/// \param[in] right the value to shift by
|
701 |
+
/// \param[in] options arithmetic options (enable/disable overflow checking), optional
|
702 |
+
/// \param[in] ctx the function execution context, optional
|
703 |
+
/// \return the elementwise left value shifted right by the right value
|
704 |
+
ARROW_EXPORT
|
705 |
+
Result<Datum> ShiftRight(const Datum& left, const Datum& right,
|
706 |
+
ArithmeticOptions options = ArithmeticOptions(),
|
707 |
+
ExecContext* ctx = NULLPTR);
|
708 |
+
|
709 |
+
/// \brief Compute the sine of the array values.
|
710 |
+
/// \param[in] arg The values to compute the sine for.
|
711 |
+
/// \param[in] options arithmetic options (enable/disable overflow checking), optional
|
712 |
+
/// \param[in] ctx the function execution context, optional
|
713 |
+
/// \return the elementwise sine of the values
|
714 |
+
ARROW_EXPORT
|
715 |
+
Result<Datum> Sin(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
716 |
+
ExecContext* ctx = NULLPTR);
|
717 |
+
|
718 |
+
/// \brief Compute the cosine of the array values.
|
719 |
+
/// \param[in] arg The values to compute the cosine for.
|
720 |
+
/// \param[in] options arithmetic options (enable/disable overflow checking), optional
|
721 |
+
/// \param[in] ctx the function execution context, optional
|
722 |
+
/// \return the elementwise cosine of the values
|
723 |
+
ARROW_EXPORT
|
724 |
+
Result<Datum> Cos(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
725 |
+
ExecContext* ctx = NULLPTR);
|
726 |
+
|
727 |
+
/// \brief Compute the inverse sine (arcsine) of the array values.
|
728 |
+
/// \param[in] arg The values to compute the inverse sine for.
|
729 |
+
/// \param[in] options arithmetic options (enable/disable overflow checking), optional
|
730 |
+
/// \param[in] ctx the function execution context, optional
|
731 |
+
/// \return the elementwise inverse sine of the values
|
732 |
+
ARROW_EXPORT
|
733 |
+
Result<Datum> Asin(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
734 |
+
ExecContext* ctx = NULLPTR);
|
735 |
+
|
736 |
+
/// \brief Compute the inverse cosine (arccosine) of the array values.
|
737 |
+
/// \param[in] arg The values to compute the inverse cosine for.
|
738 |
+
/// \param[in] options arithmetic options (enable/disable overflow checking), optional
|
739 |
+
/// \param[in] ctx the function execution context, optional
|
740 |
+
/// \return the elementwise inverse cosine of the values
|
741 |
+
ARROW_EXPORT
|
742 |
+
Result<Datum> Acos(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
743 |
+
ExecContext* ctx = NULLPTR);
|
744 |
+
|
745 |
+
/// \brief Compute the tangent of the array values.
|
746 |
+
/// \param[in] arg The values to compute the tangent for.
|
747 |
+
/// \param[in] options arithmetic options (enable/disable overflow checking), optional
|
748 |
+
/// \param[in] ctx the function execution context, optional
|
749 |
+
/// \return the elementwise tangent of the values
|
750 |
+
ARROW_EXPORT
|
751 |
+
Result<Datum> Tan(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
752 |
+
ExecContext* ctx = NULLPTR);
|
753 |
+
|
754 |
+
/// \brief Compute the inverse tangent (arctangent) of the array values.
|
755 |
+
/// \param[in] arg The values to compute the inverse tangent for.
|
756 |
+
/// \param[in] ctx the function execution context, optional
|
757 |
+
/// \return the elementwise inverse tangent of the values
|
758 |
+
ARROW_EXPORT
|
759 |
+
Result<Datum> Atan(const Datum& arg, ExecContext* ctx = NULLPTR);
|
760 |
+
|
761 |
+
/// \brief Compute the inverse tangent (arctangent) of y/x, using the
|
762 |
+
/// argument signs to determine the correct quadrant.
|
763 |
+
/// \param[in] y The y-values to compute the inverse tangent for.
|
764 |
+
/// \param[in] x The x-values to compute the inverse tangent for.
|
765 |
+
/// \param[in] ctx the function execution context, optional
|
766 |
+
/// \return the elementwise inverse tangent of the values
|
767 |
+
ARROW_EXPORT
|
768 |
+
Result<Datum> Atan2(const Datum& y, const Datum& x, ExecContext* ctx = NULLPTR);
|
769 |
+
|
770 |
+
/// \brief Get the natural log of a value.
|
771 |
+
///
|
772 |
+
/// If argument is null the result will be null.
|
773 |
+
///
|
774 |
+
/// \param[in] arg The values to compute the logarithm for.
|
775 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
776 |
+
/// \param[in] ctx the function execution context, optional
|
777 |
+
/// \return the elementwise natural log
|
778 |
+
ARROW_EXPORT
|
779 |
+
Result<Datum> Ln(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
780 |
+
ExecContext* ctx = NULLPTR);
|
781 |
+
|
782 |
+
/// \brief Get the log base 10 of a value.
|
783 |
+
///
|
784 |
+
/// If argument is null the result will be null.
|
785 |
+
///
|
786 |
+
/// \param[in] arg The values to compute the logarithm for.
|
787 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
788 |
+
/// \param[in] ctx the function execution context, optional
|
789 |
+
/// \return the elementwise log base 10
|
790 |
+
ARROW_EXPORT
|
791 |
+
Result<Datum> Log10(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
792 |
+
ExecContext* ctx = NULLPTR);
|
793 |
+
|
794 |
+
/// \brief Get the log base 2 of a value.
|
795 |
+
///
|
796 |
+
/// If argument is null the result will be null.
|
797 |
+
///
|
798 |
+
/// \param[in] arg The values to compute the logarithm for.
|
799 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
800 |
+
/// \param[in] ctx the function execution context, optional
|
801 |
+
/// \return the elementwise log base 2
|
802 |
+
ARROW_EXPORT
|
803 |
+
Result<Datum> Log2(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
804 |
+
ExecContext* ctx = NULLPTR);
|
805 |
+
|
806 |
+
/// \brief Get the natural log of (1 + value).
|
807 |
+
///
|
808 |
+
/// If argument is null the result will be null.
|
809 |
+
/// This function may be more accurate than Log(1 + value) for values close to zero.
|
810 |
+
///
|
811 |
+
/// \param[in] arg The values to compute the logarithm for.
|
812 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
813 |
+
/// \param[in] ctx the function execution context, optional
|
814 |
+
/// \return the elementwise natural log
|
815 |
+
ARROW_EXPORT
|
816 |
+
Result<Datum> Log1p(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
817 |
+
ExecContext* ctx = NULLPTR);
|
818 |
+
|
819 |
+
/// \brief Get the log of a value to the given base.
|
820 |
+
///
|
821 |
+
/// If argument is null the result will be null.
|
822 |
+
///
|
823 |
+
/// \param[in] arg The values to compute the logarithm for.
|
824 |
+
/// \param[in] base The given base.
|
825 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
826 |
+
/// \param[in] ctx the function execution context, optional
|
827 |
+
/// \return the elementwise log to the given base
|
828 |
+
ARROW_EXPORT
|
829 |
+
Result<Datum> Logb(const Datum& arg, const Datum& base,
|
830 |
+
ArithmeticOptions options = ArithmeticOptions(),
|
831 |
+
ExecContext* ctx = NULLPTR);
|
832 |
+
|
833 |
+
/// \brief Get the square-root of a value.
|
834 |
+
///
|
835 |
+
/// If argument is null the result will be null.
|
836 |
+
///
|
837 |
+
/// \param[in] arg The values to compute the square-root for.
|
838 |
+
/// \param[in] options arithmetic options (overflow handling), optional
|
839 |
+
/// \param[in] ctx the function execution context, optional
|
840 |
+
/// \return the elementwise square-root
|
841 |
+
ARROW_EXPORT
|
842 |
+
Result<Datum> Sqrt(const Datum& arg, ArithmeticOptions options = ArithmeticOptions(),
|
843 |
+
ExecContext* ctx = NULLPTR);
|
844 |
+
|
845 |
+
/// \brief Round to the nearest integer less than or equal in magnitude to the
|
846 |
+
/// argument.
|
847 |
+
///
|
848 |
+
/// If argument is null the result will be null.
|
849 |
+
///
|
850 |
+
/// \param[in] arg the value to round
|
851 |
+
/// \param[in] ctx the function execution context, optional
|
852 |
+
/// \return the rounded value
|
853 |
+
ARROW_EXPORT
|
854 |
+
Result<Datum> Floor(const Datum& arg, ExecContext* ctx = NULLPTR);
|
855 |
+
|
856 |
+
/// \brief Round to the nearest integer greater than or equal in magnitude to the
|
857 |
+
/// argument.
|
858 |
+
///
|
859 |
+
/// If argument is null the result will be null.
|
860 |
+
///
|
861 |
+
/// \param[in] arg the value to round
|
862 |
+
/// \param[in] ctx the function execution context, optional
|
863 |
+
/// \return the rounded value
|
864 |
+
ARROW_EXPORT
|
865 |
+
Result<Datum> Ceil(const Datum& arg, ExecContext* ctx = NULLPTR);
|
866 |
+
|
867 |
+
/// \brief Get the integral part without fractional digits.
|
868 |
+
///
|
869 |
+
/// If argument is null the result will be null.
|
870 |
+
///
|
871 |
+
/// \param[in] arg the value to truncate
|
872 |
+
/// \param[in] ctx the function execution context, optional
|
873 |
+
/// \return the truncated value
|
874 |
+
ARROW_EXPORT
|
875 |
+
Result<Datum> Trunc(const Datum& arg, ExecContext* ctx = NULLPTR);
|
876 |
+
|
877 |
+
/// \brief Find the element-wise maximum of any number of arrays or scalars.
|
878 |
+
/// Array values must be the same length.
|
879 |
+
///
|
880 |
+
/// \param[in] args arrays or scalars to operate on.
|
881 |
+
/// \param[in] options options for handling nulls, optional
|
882 |
+
/// \param[in] ctx the function execution context, optional
|
883 |
+
/// \return the element-wise maximum
|
884 |
+
ARROW_EXPORT
|
885 |
+
Result<Datum> MaxElementWise(
|
886 |
+
const std::vector<Datum>& args,
|
887 |
+
ElementWiseAggregateOptions options = ElementWiseAggregateOptions::Defaults(),
|
888 |
+
ExecContext* ctx = NULLPTR);
|
889 |
+
|
890 |
+
/// \brief Find the element-wise minimum of any number of arrays or scalars.
|
891 |
+
/// Array values must be the same length.
|
892 |
+
///
|
893 |
+
/// \param[in] args arrays or scalars to operate on.
|
894 |
+
/// \param[in] options options for handling nulls, optional
|
895 |
+
/// \param[in] ctx the function execution context, optional
|
896 |
+
/// \return the element-wise minimum
|
897 |
+
ARROW_EXPORT
|
898 |
+
Result<Datum> MinElementWise(
|
899 |
+
const std::vector<Datum>& args,
|
900 |
+
ElementWiseAggregateOptions options = ElementWiseAggregateOptions::Defaults(),
|
901 |
+
ExecContext* ctx = NULLPTR);
|
902 |
+
|
903 |
+
/// \brief Get the sign of a value. Array values can be of arbitrary length. If argument
|
904 |
+
/// is null the result will be null.
|
905 |
+
///
|
906 |
+
/// \param[in] arg the value to extract sign from
|
907 |
+
/// \param[in] ctx the function execution context, optional
|
908 |
+
/// \return the element-wise sign function
|
909 |
+
ARROW_EXPORT
|
910 |
+
Result<Datum> Sign(const Datum& arg, ExecContext* ctx = NULLPTR);
|
911 |
+
|
912 |
+
/// \brief Round a value to a given precision.
|
913 |
+
///
|
914 |
+
/// If arg is null the result will be null.
|
915 |
+
///
|
916 |
+
/// \param[in] arg the value to be rounded
|
917 |
+
/// \param[in] options rounding options (rounding mode and number of digits), optional
|
918 |
+
/// \param[in] ctx the function execution context, optional
|
919 |
+
/// \return the element-wise rounded value
|
920 |
+
ARROW_EXPORT
|
921 |
+
Result<Datum> Round(const Datum& arg, RoundOptions options = RoundOptions::Defaults(),
|
922 |
+
ExecContext* ctx = NULLPTR);
|
923 |
+
|
924 |
+
/// \brief Round a value to a given precision.
|
925 |
+
///
|
926 |
+
/// If arg1 is null the result will be null.
|
927 |
+
/// If arg2 is null then the result will be null. If arg2 is negative, then the rounding
|
928 |
+
/// place will be shifted to the left (thus -1 would correspond to rounding to the nearest
|
929 |
+
/// ten). If positive, the rounding place will shift to the right (and +1 would
|
930 |
+
/// correspond to rounding to the nearest tenth).
|
931 |
+
///
|
932 |
+
/// \param[in] arg1 the value to be rounded
|
933 |
+
/// \param[in] arg2 the number of significant digits to round to
|
934 |
+
/// \param[in] options rounding options, optional
|
935 |
+
/// \param[in] ctx the function execution context, optional
|
936 |
+
/// \return the element-wise rounded value
|
937 |
+
ARROW_EXPORT
|
938 |
+
Result<Datum> RoundBinary(const Datum& arg1, const Datum& arg2,
|
939 |
+
RoundBinaryOptions options = RoundBinaryOptions::Defaults(),
|
940 |
+
ExecContext* ctx = NULLPTR);
|
941 |
+
|
942 |
+
/// \brief Round a value to a given multiple.
|
943 |
+
///
|
944 |
+
/// If argument is null the result will be null.
|
945 |
+
///
|
946 |
+
/// \param[in] arg the value to round
|
947 |
+
/// \param[in] options rounding options (rounding mode and multiple), optional
|
948 |
+
/// \param[in] ctx the function execution context, optional
|
949 |
+
/// \return the element-wise rounded value
|
950 |
+
ARROW_EXPORT
|
951 |
+
Result<Datum> RoundToMultiple(
|
952 |
+
const Datum& arg, RoundToMultipleOptions options = RoundToMultipleOptions::Defaults(),
|
953 |
+
ExecContext* ctx = NULLPTR);
|
954 |
+
|
955 |
+
/// \brief Ceil a temporal value to a given frequency
|
956 |
+
///
|
957 |
+
/// If argument is null the result will be null.
|
958 |
+
///
|
959 |
+
/// \param[in] arg the temporal value to ceil
|
960 |
+
/// \param[in] options temporal rounding options, optional
|
961 |
+
/// \param[in] ctx the function execution context, optional
|
962 |
+
/// \return the element-wise rounded value
|
963 |
+
///
|
964 |
+
/// \since 7.0.0
|
965 |
+
/// \note API not yet finalized
|
966 |
+
ARROW_EXPORT
|
967 |
+
Result<Datum> CeilTemporal(
|
968 |
+
const Datum& arg, RoundTemporalOptions options = RoundTemporalOptions::Defaults(),
|
969 |
+
ExecContext* ctx = NULLPTR);
|
970 |
+
|
971 |
+
/// \brief Floor a temporal value to a given frequency
|
972 |
+
///
|
973 |
+
/// If argument is null the result will be null.
|
974 |
+
///
|
975 |
+
/// \param[in] arg the temporal value to floor
|
976 |
+
/// \param[in] options temporal rounding options, optional
|
977 |
+
/// \param[in] ctx the function execution context, optional
|
978 |
+
/// \return the element-wise rounded value
|
979 |
+
///
|
980 |
+
/// \since 7.0.0
|
981 |
+
/// \note API not yet finalized
|
982 |
+
ARROW_EXPORT
|
983 |
+
Result<Datum> FloorTemporal(
|
984 |
+
const Datum& arg, RoundTemporalOptions options = RoundTemporalOptions::Defaults(),
|
985 |
+
ExecContext* ctx = NULLPTR);
|
986 |
+
|
987 |
+
/// \brief Round a temporal value to a given frequency
|
988 |
+
///
|
989 |
+
/// If argument is null the result will be null.
|
990 |
+
///
|
991 |
+
/// \param[in] arg the temporal value to round
|
992 |
+
/// \param[in] options temporal rounding options, optional
|
993 |
+
/// \param[in] ctx the function execution context, optional
|
994 |
+
/// \return the element-wise rounded value
|
995 |
+
///
|
996 |
+
/// \since 7.0.0
|
997 |
+
/// \note API not yet finalized
|
998 |
+
ARROW_EXPORT
|
999 |
+
Result<Datum> RoundTemporal(
|
1000 |
+
const Datum& arg, RoundTemporalOptions options = RoundTemporalOptions::Defaults(),
|
1001 |
+
ExecContext* ctx = NULLPTR);
|
1002 |
+
|
1003 |
+
/// \brief Invert the values of a boolean datum
|
1004 |
+
/// \param[in] value datum to invert
|
1005 |
+
/// \param[in] ctx the function execution context, optional
|
1006 |
+
/// \return the resulting datum
|
1007 |
+
///
|
1008 |
+
/// \since 1.0.0
|
1009 |
+
/// \note API not yet finalized
|
1010 |
+
ARROW_EXPORT
|
1011 |
+
Result<Datum> Invert(const Datum& value, ExecContext* ctx = NULLPTR);
|
1012 |
+
|
1013 |
+
/// \brief Element-wise AND of two boolean datums which always propagates nulls
|
1014 |
+
/// (null and false is null).
|
1015 |
+
///
|
1016 |
+
/// \param[in] left left operand
|
1017 |
+
/// \param[in] right right operand
|
1018 |
+
/// \param[in] ctx the function execution context, optional
|
1019 |
+
/// \return the resulting datum
|
1020 |
+
///
|
1021 |
+
/// \since 1.0.0
|
1022 |
+
/// \note API not yet finalized
|
1023 |
+
ARROW_EXPORT
|
1024 |
+
Result<Datum> And(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR);
|
1025 |
+
|
1026 |
+
/// \brief Element-wise AND of two boolean datums with a Kleene truth table
|
1027 |
+
/// (null and false is false).
|
1028 |
+
///
|
1029 |
+
/// \param[in] left left operand
|
1030 |
+
/// \param[in] right right operand
|
1031 |
+
/// \param[in] ctx the function execution context, optional
|
1032 |
+
/// \return the resulting datum
|
1033 |
+
///
|
1034 |
+
/// \since 1.0.0
|
1035 |
+
/// \note API not yet finalized
|
1036 |
+
ARROW_EXPORT
|
1037 |
+
Result<Datum> KleeneAnd(const Datum& left, const Datum& right,
|
1038 |
+
ExecContext* ctx = NULLPTR);
|
1039 |
+
|
1040 |
+
/// \brief Element-wise OR of two boolean datums which always propagates nulls
|
1041 |
+
/// (null and true is null).
|
1042 |
+
///
|
1043 |
+
/// \param[in] left left operand
|
1044 |
+
/// \param[in] right right operand
|
1045 |
+
/// \param[in] ctx the function execution context, optional
|
1046 |
+
/// \return the resulting datum
|
1047 |
+
///
|
1048 |
+
/// \since 1.0.0
|
1049 |
+
/// \note API not yet finalized
|
1050 |
+
ARROW_EXPORT
|
1051 |
+
Result<Datum> Or(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR);
|
1052 |
+
|
1053 |
+
/// \brief Element-wise OR of two boolean datums with a Kleene truth table
|
1054 |
+
/// (null or true is true).
|
1055 |
+
///
|
1056 |
+
/// \param[in] left left operand
|
1057 |
+
/// \param[in] right right operand
|
1058 |
+
/// \param[in] ctx the function execution context, optional
|
1059 |
+
/// \return the resulting datum
|
1060 |
+
///
|
1061 |
+
/// \since 1.0.0
|
1062 |
+
/// \note API not yet finalized
|
1063 |
+
ARROW_EXPORT
|
1064 |
+
Result<Datum> KleeneOr(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR);
|
1065 |
+
|
1066 |
+
/// \brief Element-wise XOR of two boolean datums
|
1067 |
+
/// \param[in] left left operand
|
1068 |
+
/// \param[in] right right operand
|
1069 |
+
/// \param[in] ctx the function execution context, optional
|
1070 |
+
/// \return the resulting datum
|
1071 |
+
///
|
1072 |
+
/// \since 1.0.0
|
1073 |
+
/// \note API not yet finalized
|
1074 |
+
ARROW_EXPORT
|
1075 |
+
Result<Datum> Xor(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR);
|
1076 |
+
|
1077 |
+
/// \brief Element-wise AND NOT of two boolean datums which always propagates nulls
|
1078 |
+
/// (null and not true is null).
|
1079 |
+
///
|
1080 |
+
/// \param[in] left left operand
|
1081 |
+
/// \param[in] right right operand
|
1082 |
+
/// \param[in] ctx the function execution context, optional
|
1083 |
+
/// \return the resulting datum
|
1084 |
+
///
|
1085 |
+
/// \since 3.0.0
|
1086 |
+
/// \note API not yet finalized
|
1087 |
+
ARROW_EXPORT
|
1088 |
+
Result<Datum> AndNot(const Datum& left, const Datum& right, ExecContext* ctx = NULLPTR);
|
1089 |
+
|
1090 |
+
/// \brief Element-wise AND NOT of two boolean datums with a Kleene truth table
|
1091 |
+
/// (false and not null is false, null and not true is false).
|
1092 |
+
///
|
1093 |
+
/// \param[in] left left operand
|
1094 |
+
/// \param[in] right right operand
|
1095 |
+
/// \param[in] ctx the function execution context, optional
|
1096 |
+
/// \return the resulting datum
|
1097 |
+
///
|
1098 |
+
/// \since 3.0.0
|
1099 |
+
/// \note API not yet finalized
|
1100 |
+
ARROW_EXPORT
|
1101 |
+
Result<Datum> KleeneAndNot(const Datum& left, const Datum& right,
|
1102 |
+
ExecContext* ctx = NULLPTR);
|
1103 |
+
|
1104 |
+
/// \brief IsIn returns true for each element of `values` that is contained in
|
1105 |
+
/// `value_set`
|
1106 |
+
///
|
1107 |
+
/// Behaviour of nulls is governed by SetLookupOptions::skip_nulls.
|
1108 |
+
///
|
1109 |
+
/// \param[in] values array-like input to look up in value_set
|
1110 |
+
/// \param[in] options SetLookupOptions
|
1111 |
+
/// \param[in] ctx the function execution context, optional
|
1112 |
+
/// \return the resulting datum
|
1113 |
+
///
|
1114 |
+
/// \since 1.0.0
|
1115 |
+
/// \note API not yet finalized
|
1116 |
+
ARROW_EXPORT
|
1117 |
+
Result<Datum> IsIn(const Datum& values, const SetLookupOptions& options,
|
1118 |
+
ExecContext* ctx = NULLPTR);
|
1119 |
+
ARROW_EXPORT
|
1120 |
+
Result<Datum> IsIn(const Datum& values, const Datum& value_set,
|
1121 |
+
ExecContext* ctx = NULLPTR);
|
1122 |
+
|
1123 |
+
/// \brief IndexIn examines each slot in the values against a value_set array.
|
1124 |
+
/// If the value is not found in value_set, null will be output.
|
1125 |
+
/// If found, the index of occurrence within value_set (ignoring duplicates)
|
1126 |
+
/// will be output.
|
1127 |
+
///
|
1128 |
+
/// For example given values = [99, 42, 3, null] and
|
1129 |
+
/// value_set = [3, 3, 99], the output will be = [2, null, 0, null]
|
1130 |
+
///
|
1131 |
+
/// Behaviour of nulls is governed by SetLookupOptions::skip_nulls.
|
1132 |
+
///
|
1133 |
+
/// \param[in] values array-like input
|
1134 |
+
/// \param[in] options SetLookupOptions
|
1135 |
+
/// \param[in] ctx the function execution context, optional
|
1136 |
+
/// \return the resulting datum
|
1137 |
+
///
|
1138 |
+
/// \since 1.0.0
|
1139 |
+
/// \note API not yet finalized
|
1140 |
+
ARROW_EXPORT
|
1141 |
+
Result<Datum> IndexIn(const Datum& values, const SetLookupOptions& options,
|
1142 |
+
ExecContext* ctx = NULLPTR);
|
1143 |
+
ARROW_EXPORT
|
1144 |
+
Result<Datum> IndexIn(const Datum& values, const Datum& value_set,
|
1145 |
+
ExecContext* ctx = NULLPTR);
|
1146 |
+
|
1147 |
+
/// \brief IsValid returns true for each element of `values` that is not null,
|
1148 |
+
/// false otherwise
|
1149 |
+
///
|
1150 |
+
/// \param[in] values input to examine for validity
|
1151 |
+
/// \param[in] ctx the function execution context, optional
|
1152 |
+
/// \return the resulting datum
|
1153 |
+
///
|
1154 |
+
/// \since 1.0.0
|
1155 |
+
/// \note API not yet finalized
|
1156 |
+
ARROW_EXPORT
|
1157 |
+
Result<Datum> IsValid(const Datum& values, ExecContext* ctx = NULLPTR);
|
1158 |
+
|
1159 |
+
/// \brief IsNull returns true for each element of `values` that is null,
|
1160 |
+
/// false otherwise
|
1161 |
+
///
|
1162 |
+
/// \param[in] values input to examine for nullity
|
1163 |
+
/// \param[in] options NullOptions
|
1164 |
+
/// \param[in] ctx the function execution context, optional
|
1165 |
+
/// \return the resulting datum
|
1166 |
+
///
|
1167 |
+
/// \since 1.0.0
|
1168 |
+
/// \note API not yet finalized
|
1169 |
+
ARROW_EXPORT
|
1170 |
+
Result<Datum> IsNull(const Datum& values, NullOptions options = NullOptions::Defaults(),
|
1171 |
+
ExecContext* ctx = NULLPTR);
|
1172 |
+
|
1173 |
+
/// \brief IsNan returns true for each element of `values` that is NaN,
|
1174 |
+
/// false otherwise
|
1175 |
+
///
|
1176 |
+
/// \param[in] values input to look for NaN
|
1177 |
+
/// \param[in] ctx the function execution context, optional
|
1178 |
+
/// \return the resulting datum
|
1179 |
+
///
|
1180 |
+
/// \since 3.0.0
|
1181 |
+
/// \note API not yet finalized
|
1182 |
+
ARROW_EXPORT
|
1183 |
+
Result<Datum> IsNan(const Datum& values, ExecContext* ctx = NULLPTR);
|
1184 |
+
|
1185 |
+
/// \brief IfElse returns elements chosen from `left` or `right`
|
1186 |
+
/// depending on `cond`. `null` values in `cond` will be promoted to the result
|
1187 |
+
///
|
1188 |
+
/// \param[in] cond `Boolean` condition Scalar/ Array
|
1189 |
+
/// \param[in] left Scalar/ Array
|
1190 |
+
/// \param[in] right Scalar/ Array
|
1191 |
+
/// \param[in] ctx the function execution context, optional
|
1192 |
+
///
|
1193 |
+
/// \return the resulting datum
|
1194 |
+
///
|
1195 |
+
/// \since 5.0.0
|
1196 |
+
/// \note API not yet finalized
|
1197 |
+
ARROW_EXPORT
|
1198 |
+
Result<Datum> IfElse(const Datum& cond, const Datum& left, const Datum& right,
|
1199 |
+
ExecContext* ctx = NULLPTR);
|
1200 |
+
|
1201 |
+
/// \brief CaseWhen behaves like a switch/case or if-else if-else statement: for
|
1202 |
+
/// each row, select the first value for which the corresponding condition is
|
1203 |
+
/// true, or (if given) select the 'else' value, else emit null. Note that a
|
1204 |
+
/// null condition is the same as false.
|
1205 |
+
///
|
1206 |
+
/// \param[in] cond Conditions (Boolean)
|
1207 |
+
/// \param[in] cases Values (any type), along with an optional 'else' value.
|
1208 |
+
/// \param[in] ctx the function execution context, optional
|
1209 |
+
///
|
1210 |
+
/// \return the resulting datum
|
1211 |
+
///
|
1212 |
+
/// \since 5.0.0
|
1213 |
+
/// \note API not yet finalized
|
1214 |
+
ARROW_EXPORT
|
1215 |
+
Result<Datum> CaseWhen(const Datum& cond, const std::vector<Datum>& cases,
|
1216 |
+
ExecContext* ctx = NULLPTR);
|
1217 |
+
|
1218 |
+
/// \brief Year returns year for each element of `values`
|
1219 |
+
///
|
1220 |
+
/// \param[in] values input to extract year from
|
1221 |
+
/// \param[in] ctx the function execution context, optional
|
1222 |
+
/// \return the resulting datum
|
1223 |
+
///
|
1224 |
+
/// \since 5.0.0
|
1225 |
+
/// \note API not yet finalized
|
1226 |
+
ARROW_EXPORT
|
1227 |
+
Result<Datum> Year(const Datum& values, ExecContext* ctx = NULLPTR);
|
1228 |
+
|
1229 |
+
/// \brief IsLeapYear returns if a year is a leap year for each element of `values`
|
1230 |
+
///
|
1231 |
+
/// \param[in] values input to extract leap year indicator from
|
1232 |
+
/// \param[in] ctx the function execution context, optional
|
1233 |
+
/// \return the resulting datum
|
1234 |
+
///
|
1235 |
+
/// \since 8.0.0
|
1236 |
+
/// \note API not yet finalized
|
1237 |
+
ARROW_EXPORT
|
1238 |
+
Result<Datum> IsLeapYear(const Datum& values, ExecContext* ctx = NULLPTR);
|
1239 |
+
|
1240 |
+
/// \brief Month returns month for each element of `values`.
|
1241 |
+
/// Month is encoded as January=1, December=12
|
1242 |
+
///
|
1243 |
+
/// \param[in] values input to extract month from
|
1244 |
+
/// \param[in] ctx the function execution context, optional
|
1245 |
+
/// \return the resulting datum
|
1246 |
+
///
|
1247 |
+
/// \since 5.0.0
|
1248 |
+
/// \note API not yet finalized
|
1249 |
+
ARROW_EXPORT
|
1250 |
+
Result<Datum> Month(const Datum& values, ExecContext* ctx = NULLPTR);
|
1251 |
+
|
1252 |
+
/// \brief Day returns day number for each element of `values`
|
1253 |
+
///
|
1254 |
+
/// \param[in] values input to extract day from
|
1255 |
+
/// \param[in] ctx the function execution context, optional
|
1256 |
+
/// \return the resulting datum
|
1257 |
+
///
|
1258 |
+
/// \since 5.0.0
|
1259 |
+
/// \note API not yet finalized
|
1260 |
+
ARROW_EXPORT
|
1261 |
+
Result<Datum> Day(const Datum& values, ExecContext* ctx = NULLPTR);
|
1262 |
+
|
1263 |
+
/// \brief YearMonthDay returns a struct containing the Year, Month and Day value for
|
1264 |
+
/// each element of `values`.
|
1265 |
+
///
|
1266 |
+
/// \param[in] values input to extract (year, month, day) struct from
|
1267 |
+
/// \param[in] ctx the function execution context, optional
|
1268 |
+
/// \return the resulting datum
|
1269 |
+
///
|
1270 |
+
/// \since 7.0.0
|
1271 |
+
/// \note API not yet finalized
|
1272 |
+
ARROW_EXPORT
|
1273 |
+
Result<Datum> YearMonthDay(const Datum& values, ExecContext* ctx = NULLPTR);
|
1274 |
+
|
1275 |
+
/// \brief DayOfWeek returns number of the day of the week value for each element of
|
1276 |
+
/// `values`.
|
1277 |
+
///
|
1278 |
+
/// By default week starts on Monday denoted by 0 and ends on Sunday denoted
|
1279 |
+
/// by 6. Start day of the week (Monday=1, Sunday=7) and numbering base (0 or 1) can be
|
1280 |
+
/// set using DayOfWeekOptions
|
1281 |
+
///
|
1282 |
+
/// \param[in] values input to extract number of the day of the week from
|
1283 |
+
/// \param[in] options for setting start of the week and day numbering
|
1284 |
+
/// \param[in] ctx the function execution context, optional
|
1285 |
+
/// \return the resulting datum
|
1286 |
+
///
|
1287 |
+
/// \since 5.0.0
|
1288 |
+
/// \note API not yet finalized
|
1289 |
+
ARROW_EXPORT Result<Datum> DayOfWeek(const Datum& values,
|
1290 |
+
DayOfWeekOptions options = DayOfWeekOptions(),
|
1291 |
+
ExecContext* ctx = NULLPTR);
|
1292 |
+
|
1293 |
+
/// \brief DayOfYear returns number of day of the year for each element of `values`.
|
1294 |
+
/// January 1st maps to day number 1, February 1st to 32, etc.
|
1295 |
+
///
|
1296 |
+
/// \param[in] values input to extract number of day of the year from
|
1297 |
+
/// \param[in] ctx the function execution context, optional
|
1298 |
+
/// \return the resulting datum
|
1299 |
+
///
|
1300 |
+
/// \since 5.0.0
|
1301 |
+
/// \note API not yet finalized
|
1302 |
+
ARROW_EXPORT Result<Datum> DayOfYear(const Datum& values, ExecContext* ctx = NULLPTR);
|
1303 |
+
|
1304 |
+
/// \brief ISOYear returns ISO year number for each element of `values`.
|
1305 |
+
/// First week of an ISO year has the majority (4 or more) of its days in January.
|
1306 |
+
///
|
1307 |
+
/// \param[in] values input to extract ISO year from
|
1308 |
+
/// \param[in] ctx the function execution context, optional
|
1309 |
+
/// \return the resulting datum
|
1310 |
+
///
|
1311 |
+
/// \since 5.0.0
|
1312 |
+
/// \note API not yet finalized
|
1313 |
+
ARROW_EXPORT
|
1314 |
+
Result<Datum> ISOYear(const Datum& values, ExecContext* ctx = NULLPTR);
|
1315 |
+
|
1316 |
+
/// \brief USYear returns US epidemiological year number for each element of `values`.
|
1317 |
+
/// First week of US epidemiological year has the majority (4 or more) of it's
|
1318 |
+
/// days in January. Last week of US epidemiological year has the year's last
|
1319 |
+
/// Wednesday in it. US epidemiological week starts on Sunday.
|
1320 |
+
///
|
1321 |
+
/// \param[in] values input to extract US epidemiological year from
|
1322 |
+
/// \param[in] ctx the function execution context, optional
|
1323 |
+
/// \return the resulting datum
|
1324 |
+
///
|
1325 |
+
/// \since 8.0.0
|
1326 |
+
/// \note API not yet finalized
|
1327 |
+
ARROW_EXPORT
|
1328 |
+
Result<Datum> USYear(const Datum& values, ExecContext* ctx = NULLPTR);
|
1329 |
+
|
1330 |
+
/// \brief ISOWeek returns ISO week of year number for each element of `values`.
|
1331 |
+
/// First ISO week has the majority (4 or more) of its days in January.
|
1332 |
+
/// ISO week starts on Monday. Year can have 52 or 53 weeks.
|
1333 |
+
/// Week numbering can start with 1.
|
1334 |
+
///
|
1335 |
+
/// \param[in] values input to extract ISO week of year from
|
1336 |
+
/// \param[in] ctx the function execution context, optional
|
1337 |
+
/// \return the resulting datum
|
1338 |
+
///
|
1339 |
+
/// \since 5.0.0
|
1340 |
+
/// \note API not yet finalized
|
1341 |
+
ARROW_EXPORT Result<Datum> ISOWeek(const Datum& values, ExecContext* ctx = NULLPTR);
|
1342 |
+
|
1343 |
+
/// \brief USWeek returns US week of year number for each element of `values`.
|
1344 |
+
/// First US week has the majority (4 or more) of its days in January.
|
1345 |
+
/// US week starts on Sunday. Year can have 52 or 53 weeks.
|
1346 |
+
/// Week numbering starts with 1.
|
1347 |
+
///
|
1348 |
+
/// \param[in] values input to extract US week of year from
|
1349 |
+
/// \param[in] ctx the function execution context, optional
|
1350 |
+
/// \return the resulting datum
|
1351 |
+
///
|
1352 |
+
/// \since 6.0.0
|
1353 |
+
/// \note API not yet finalized
|
1354 |
+
ARROW_EXPORT Result<Datum> USWeek(const Datum& values, ExecContext* ctx = NULLPTR);
|
1355 |
+
|
1356 |
+
/// \brief Week returns week of year number for each element of `values`.
|
1357 |
+
/// First ISO week has the majority (4 or more) of its days in January.
|
1358 |
+
/// Year can have 52 or 53 weeks. Week numbering can start with 0 or 1
|
1359 |
+
/// depending on DayOfWeekOptions.count_from_zero.
|
1360 |
+
///
|
1361 |
+
/// \param[in] values input to extract week of year from
|
1362 |
+
/// \param[in] options for setting numbering start
|
1363 |
+
/// \param[in] ctx the function execution context, optional
|
1364 |
+
/// \return the resulting datum
|
1365 |
+
///
|
1366 |
+
/// \since 6.0.0
|
1367 |
+
/// \note API not yet finalized
|
1368 |
+
ARROW_EXPORT Result<Datum> Week(const Datum& values, WeekOptions options = WeekOptions(),
|
1369 |
+
ExecContext* ctx = NULLPTR);
|
1370 |
+
|
1371 |
+
/// \brief ISOCalendar returns a (ISO year, ISO week, ISO day of week) struct for
|
1372 |
+
/// each element of `values`.
|
1373 |
+
/// ISO week starts on Monday denoted by 1 and ends on Sunday denoted by 7.
|
1374 |
+
///
|
1375 |
+
/// \param[in] values input to ISO calendar struct from
|
1376 |
+
/// \param[in] ctx the function execution context, optional
|
1377 |
+
/// \return the resulting datum
|
1378 |
+
///
|
1379 |
+
/// \since 5.0.0
|
1380 |
+
/// \note API not yet finalized
|
1381 |
+
ARROW_EXPORT Result<Datum> ISOCalendar(const Datum& values, ExecContext* ctx = NULLPTR);
|
1382 |
+
|
1383 |
+
/// \brief Quarter returns the quarter of year number for each element of `values`
|
1384 |
+
/// First quarter maps to 1 and fourth quarter maps to 4.
|
1385 |
+
///
|
1386 |
+
/// \param[in] values input to extract quarter of year from
|
1387 |
+
/// \param[in] ctx the function execution context, optional
|
1388 |
+
/// \return the resulting datum
|
1389 |
+
///
|
1390 |
+
/// \since 5.0.0
|
1391 |
+
/// \note API not yet finalized
|
1392 |
+
ARROW_EXPORT Result<Datum> Quarter(const Datum& values, ExecContext* ctx = NULLPTR);
|
1393 |
+
|
1394 |
+
/// \brief Hour returns hour value for each element of `values`
|
1395 |
+
///
|
1396 |
+
/// \param[in] values input to extract hour from
|
1397 |
+
/// \param[in] ctx the function execution context, optional
|
1398 |
+
/// \return the resulting datum
|
1399 |
+
///
|
1400 |
+
/// \since 5.0.0
|
1401 |
+
/// \note API not yet finalized
|
1402 |
+
ARROW_EXPORT
|
1403 |
+
Result<Datum> Hour(const Datum& values, ExecContext* ctx = NULLPTR);
|
1404 |
+
|
1405 |
+
/// \brief Minute returns minutes value for each element of `values`
|
1406 |
+
///
|
1407 |
+
/// \param[in] values input to extract minutes from
|
1408 |
+
/// \param[in] ctx the function execution context, optional
|
1409 |
+
/// \return the resulting datum
|
1410 |
+
///
|
1411 |
+
/// \since 5.0.0
|
1412 |
+
/// \note API not yet finalized
|
1413 |
+
ARROW_EXPORT
|
1414 |
+
Result<Datum> Minute(const Datum& values, ExecContext* ctx = NULLPTR);
|
1415 |
+
|
1416 |
+
/// \brief Second returns seconds value for each element of `values`
|
1417 |
+
///
|
1418 |
+
/// \param[in] values input to extract seconds from
|
1419 |
+
/// \param[in] ctx the function execution context, optional
|
1420 |
+
/// \return the resulting datum
|
1421 |
+
///
|
1422 |
+
/// \since 5.0.0
|
1423 |
+
/// \note API not yet finalized
|
1424 |
+
ARROW_EXPORT
|
1425 |
+
Result<Datum> Second(const Datum& values, ExecContext* ctx = NULLPTR);
|
1426 |
+
|
1427 |
+
/// \brief Millisecond returns number of milliseconds since the last full second
|
1428 |
+
/// for each element of `values`
|
1429 |
+
///
|
1430 |
+
/// \param[in] values input to extract milliseconds from
|
1431 |
+
/// \param[in] ctx the function execution context, optional
|
1432 |
+
/// \return the resulting datum
|
1433 |
+
///
|
1434 |
+
/// \since 5.0.0
|
1435 |
+
/// \note API not yet finalized
|
1436 |
+
ARROW_EXPORT
|
1437 |
+
Result<Datum> Millisecond(const Datum& values, ExecContext* ctx = NULLPTR);
|
1438 |
+
|
1439 |
+
/// \brief Microsecond returns number of microseconds since the last full millisecond
|
1440 |
+
/// for each element of `values`
|
1441 |
+
///
|
1442 |
+
/// \param[in] values input to extract microseconds from
|
1443 |
+
/// \param[in] ctx the function execution context, optional
|
1444 |
+
/// \return the resulting datum
|
1445 |
+
///
|
1446 |
+
/// \since 5.0.0
|
1447 |
+
/// \note API not yet finalized
|
1448 |
+
ARROW_EXPORT
|
1449 |
+
Result<Datum> Microsecond(const Datum& values, ExecContext* ctx = NULLPTR);
|
1450 |
+
|
1451 |
+
/// \brief Nanosecond returns number of nanoseconds since the last full millisecond
|
1452 |
+
/// for each element of `values`
|
1453 |
+
///
|
1454 |
+
/// \param[in] values input to extract nanoseconds from
|
1455 |
+
/// \param[in] ctx the function execution context, optional
|
1456 |
+
/// \return the resulting datum
|
1457 |
+
///
|
1458 |
+
/// \since 5.0.0
|
1459 |
+
/// \note API not yet finalized
|
1460 |
+
ARROW_EXPORT
|
1461 |
+
Result<Datum> Nanosecond(const Datum& values, ExecContext* ctx = NULLPTR);
|
1462 |
+
|
1463 |
+
/// \brief Subsecond returns the fraction of second elapsed since last full second
|
1464 |
+
/// as a float for each element of `values`
|
1465 |
+
///
|
1466 |
+
/// \param[in] values input to extract subsecond from
|
1467 |
+
/// \param[in] ctx the function execution context, optional
|
1468 |
+
/// \return the resulting datum
|
1469 |
+
///
|
1470 |
+
/// \since 5.0.0
|
1471 |
+
/// \note API not yet finalized
|
1472 |
+
ARROW_EXPORT Result<Datum> Subsecond(const Datum& values, ExecContext* ctx = NULLPTR);
|
1473 |
+
|
1474 |
+
/// \brief Format timestamps according to a format string
|
1475 |
+
///
|
1476 |
+
/// Return formatted time strings according to the format string
|
1477 |
+
/// `StrftimeOptions::format` and to the locale specifier `Strftime::locale`.
|
1478 |
+
///
|
1479 |
+
/// \param[in] values input timestamps
|
1480 |
+
/// \param[in] options for setting format string and locale
|
1481 |
+
/// \param[in] ctx the function execution context, optional
|
1482 |
+
/// \return the resulting datum
|
1483 |
+
///
|
1484 |
+
/// \since 6.0.0
|
1485 |
+
/// \note API not yet finalized
|
1486 |
+
ARROW_EXPORT Result<Datum> Strftime(const Datum& values, StrftimeOptions options,
|
1487 |
+
ExecContext* ctx = NULLPTR);
|
1488 |
+
|
1489 |
+
/// \brief Parse timestamps according to a format string
|
1490 |
+
///
|
1491 |
+
/// Return parsed timestamps according to the format string
|
1492 |
+
/// `StrptimeOptions::format` at time resolution `Strftime::unit`. Parse errors are
|
1493 |
+
/// raised depending on the `Strftime::error_is_null` setting.
|
1494 |
+
///
|
1495 |
+
/// \param[in] values input strings
|
1496 |
+
/// \param[in] options for setting format string, unit and error_is_null
|
1497 |
+
/// \param[in] ctx the function execution context, optional
|
1498 |
+
/// \return the resulting datum
|
1499 |
+
///
|
1500 |
+
/// \since 8.0.0
|
1501 |
+
/// \note API not yet finalized
|
1502 |
+
ARROW_EXPORT Result<Datum> Strptime(const Datum& values, StrptimeOptions options,
|
1503 |
+
ExecContext* ctx = NULLPTR);
|
1504 |
+
|
1505 |
+
/// \brief Converts timestamps from local timestamp without a timezone to a timestamp with
|
1506 |
+
/// timezone, interpreting the local timestamp as being in the specified timezone for each
|
1507 |
+
/// element of `values`
|
1508 |
+
///
|
1509 |
+
/// \param[in] values input to convert
|
1510 |
+
/// \param[in] options for setting source timezone, exception and ambiguous timestamp
|
1511 |
+
/// handling.
|
1512 |
+
/// \param[in] ctx the function execution context, optional
|
1513 |
+
/// \return the resulting datum
|
1514 |
+
///
|
1515 |
+
/// \since 6.0.0
|
1516 |
+
/// \note API not yet finalized
|
1517 |
+
ARROW_EXPORT Result<Datum> AssumeTimezone(const Datum& values,
|
1518 |
+
AssumeTimezoneOptions options,
|
1519 |
+
ExecContext* ctx = NULLPTR);
|
1520 |
+
|
1521 |
+
/// \brief IsDaylightSavings extracts if currently observing daylight savings for each
|
1522 |
+
/// element of `values`
|
1523 |
+
///
|
1524 |
+
/// \param[in] values input to extract daylight savings indicator from
|
1525 |
+
/// \param[in] ctx the function execution context, optional
|
1526 |
+
/// \return the resulting datum
|
1527 |
+
///
|
1528 |
+
/// \since 8.0.0
|
1529 |
+
/// \note API not yet finalized
|
1530 |
+
ARROW_EXPORT Result<Datum> IsDaylightSavings(const Datum& values,
|
1531 |
+
ExecContext* ctx = NULLPTR);
|
1532 |
+
|
1533 |
+
/// \brief LocalTimestamp converts timestamp to timezone naive local timestamp
|
1534 |
+
///
|
1535 |
+
/// \param[in] values input to convert to local time
|
1536 |
+
/// \param[in] ctx the function execution context, optional
|
1537 |
+
/// \return the resulting datum
|
1538 |
+
///
|
1539 |
+
/// \since 12.0.0
|
1540 |
+
/// \note API not yet finalized
|
1541 |
+
ARROW_EXPORT Result<Datum> LocalTimestamp(const Datum& values,
|
1542 |
+
ExecContext* ctx = NULLPTR);
|
1543 |
+
|
1544 |
+
/// \brief Years Between finds the number of years between two values
|
1545 |
+
///
|
1546 |
+
/// \param[in] left input treated as the start time
|
1547 |
+
/// \param[in] right input treated as the end time
|
1548 |
+
/// \param[in] ctx the function execution context, optional
|
1549 |
+
/// \return the resulting datum
|
1550 |
+
///
|
1551 |
+
/// \since 8.0.0
|
1552 |
+
/// \note API not yet finalized
|
1553 |
+
ARROW_EXPORT Result<Datum> YearsBetween(const Datum& left, const Datum& right,
|
1554 |
+
ExecContext* ctx = NULLPTR);
|
1555 |
+
|
1556 |
+
/// \brief Quarters Between finds the number of quarters between two values
|
1557 |
+
///
|
1558 |
+
/// \param[in] left input treated as the start time
|
1559 |
+
/// \param[in] right input treated as the end time
|
1560 |
+
/// \param[in] ctx the function execution context, optional
|
1561 |
+
/// \return the resulting datum
|
1562 |
+
///
|
1563 |
+
/// \since 8.0.0
|
1564 |
+
/// \note API not yet finalized
|
1565 |
+
ARROW_EXPORT Result<Datum> QuartersBetween(const Datum& left, const Datum& right,
|
1566 |
+
ExecContext* ctx = NULLPTR);
|
1567 |
+
|
1568 |
+
/// \brief Months Between finds the number of month between two values
|
1569 |
+
///
|
1570 |
+
/// \param[in] left input treated as the start time
|
1571 |
+
/// \param[in] right input treated as the end time
|
1572 |
+
/// \param[in] ctx the function execution context, optional
|
1573 |
+
/// \return the resulting datum
|
1574 |
+
///
|
1575 |
+
/// \since 8.0.0
|
1576 |
+
/// \note API not yet finalized
|
1577 |
+
ARROW_EXPORT Result<Datum> MonthsBetween(const Datum& left, const Datum& right,
|
1578 |
+
ExecContext* ctx = NULLPTR);
|
1579 |
+
|
1580 |
+
/// \brief Weeks Between finds the number of weeks between two values
|
1581 |
+
///
|
1582 |
+
/// \param[in] left input treated as the start time
|
1583 |
+
/// \param[in] right input treated as the end time
|
1584 |
+
/// \param[in] ctx the function execution context, optional
|
1585 |
+
/// \return the resulting datum
|
1586 |
+
///
|
1587 |
+
/// \since 8.0.0
|
1588 |
+
/// \note API not yet finalized
|
1589 |
+
ARROW_EXPORT Result<Datum> WeeksBetween(const Datum& left, const Datum& right,
|
1590 |
+
ExecContext* ctx = NULLPTR);
|
1591 |
+
|
1592 |
+
/// \brief Month Day Nano Between finds the number of months, days, and nanoseconds
|
1593 |
+
/// between two values
|
1594 |
+
///
|
1595 |
+
/// \param[in] left input treated as the start time
|
1596 |
+
/// \param[in] right input treated as the end time
|
1597 |
+
/// \param[in] ctx the function execution context, optional
|
1598 |
+
/// \return the resulting datum
|
1599 |
+
///
|
1600 |
+
/// \since 8.0.0
|
1601 |
+
/// \note API not yet finalized
|
1602 |
+
ARROW_EXPORT Result<Datum> MonthDayNanoBetween(const Datum& left, const Datum& right,
|
1603 |
+
ExecContext* ctx = NULLPTR);
|
1604 |
+
|
1605 |
+
/// \brief DayTime Between finds the number of days and milliseconds between two values
|
1606 |
+
///
|
1607 |
+
/// \param[in] left input treated as the start time
|
1608 |
+
/// \param[in] right input treated as the end time
|
1609 |
+
/// \param[in] ctx the function execution context, optional
|
1610 |
+
/// \return the resulting datum
|
1611 |
+
///
|
1612 |
+
/// \since 8.0.0
|
1613 |
+
/// \note API not yet finalized
|
1614 |
+
ARROW_EXPORT Result<Datum> DayTimeBetween(const Datum& left, const Datum& right,
|
1615 |
+
ExecContext* ctx = NULLPTR);
|
1616 |
+
|
1617 |
+
/// \brief Days Between finds the number of days between two values
|
1618 |
+
///
|
1619 |
+
/// \param[in] left input treated as the start time
|
1620 |
+
/// \param[in] right input treated as the end time
|
1621 |
+
/// \param[in] ctx the function execution context, optional
|
1622 |
+
/// \return the resulting datum
|
1623 |
+
///
|
1624 |
+
/// \since 8.0.0
|
1625 |
+
/// \note API not yet finalized
|
1626 |
+
ARROW_EXPORT Result<Datum> DaysBetween(const Datum& left, const Datum& right,
|
1627 |
+
ExecContext* ctx = NULLPTR);
|
1628 |
+
|
1629 |
+
/// \brief Hours Between finds the number of hours between two values
|
1630 |
+
///
|
1631 |
+
/// \param[in] left input treated as the start time
|
1632 |
+
/// \param[in] right input treated as the end time
|
1633 |
+
/// \param[in] ctx the function execution context, optional
|
1634 |
+
/// \return the resulting datum
|
1635 |
+
///
|
1636 |
+
/// \since 8.0.0
|
1637 |
+
/// \note API not yet finalized
|
1638 |
+
ARROW_EXPORT Result<Datum> HoursBetween(const Datum& left, const Datum& right,
|
1639 |
+
ExecContext* ctx = NULLPTR);
|
1640 |
+
|
1641 |
+
/// \brief Minutes Between finds the number of minutes between two values
|
1642 |
+
///
|
1643 |
+
/// \param[in] left input treated as the start time
|
1644 |
+
/// \param[in] right input treated as the end time
|
1645 |
+
/// \param[in] ctx the function execution context, optional
|
1646 |
+
/// \return the resulting datum
|
1647 |
+
///
|
1648 |
+
/// \since 8.0.0
|
1649 |
+
/// \note API not yet finalized
|
1650 |
+
ARROW_EXPORT Result<Datum> MinutesBetween(const Datum& left, const Datum& right,
|
1651 |
+
ExecContext* ctx = NULLPTR);
|
1652 |
+
|
1653 |
+
/// \brief Seconds Between finds the number of hours between two values
|
1654 |
+
///
|
1655 |
+
/// \param[in] left input treated as the start time
|
1656 |
+
/// \param[in] right input treated as the end time
|
1657 |
+
/// \param[in] ctx the function execution context, optional
|
1658 |
+
/// \return the resulting datum
|
1659 |
+
///
|
1660 |
+
/// \since 8.0.0
|
1661 |
+
/// \note API not yet finalized
|
1662 |
+
ARROW_EXPORT Result<Datum> SecondsBetween(const Datum& left, const Datum& right,
|
1663 |
+
ExecContext* ctx = NULLPTR);
|
1664 |
+
|
1665 |
+
/// \brief Milliseconds Between finds the number of milliseconds between two values
|
1666 |
+
///
|
1667 |
+
/// \param[in] left input treated as the start time
|
1668 |
+
/// \param[in] right input treated as the end time
|
1669 |
+
/// \param[in] ctx the function execution context, optional
|
1670 |
+
/// \return the resulting datum
|
1671 |
+
///
|
1672 |
+
/// \since 8.0.0
|
1673 |
+
/// \note API not yet finalized
|
1674 |
+
ARROW_EXPORT Result<Datum> MillisecondsBetween(const Datum& left, const Datum& right,
|
1675 |
+
ExecContext* ctx = NULLPTR);
|
1676 |
+
|
1677 |
+
/// \brief Microseconds Between finds the number of microseconds between two values
|
1678 |
+
///
|
1679 |
+
/// \param[in] left input treated as the start time
|
1680 |
+
/// \param[in] right input treated as the end time
|
1681 |
+
/// \param[in] ctx the function execution context, optional
|
1682 |
+
/// \return the resulting datum
|
1683 |
+
///
|
1684 |
+
/// \since 8.0.0
|
1685 |
+
/// \note API not yet finalized
|
1686 |
+
ARROW_EXPORT Result<Datum> MicrosecondsBetween(const Datum& left, const Datum& right,
|
1687 |
+
ExecContext* ctx = NULLPTR);
|
1688 |
+
|
1689 |
+
/// \brief Nanoseconds Between finds the number of nanoseconds between two values
|
1690 |
+
///
|
1691 |
+
/// \param[in] left input treated as the start time
|
1692 |
+
/// \param[in] right input treated as the end time
|
1693 |
+
/// \param[in] ctx the function execution context, optional
|
1694 |
+
/// \return the resulting datum
|
1695 |
+
///
|
1696 |
+
/// \since 8.0.0
|
1697 |
+
/// \note API not yet finalized
|
1698 |
+
ARROW_EXPORT Result<Datum> NanosecondsBetween(const Datum& left, const Datum& right,
|
1699 |
+
ExecContext* ctx = NULLPTR);
|
1700 |
+
|
1701 |
+
/// \brief Finds either the FIRST, LAST, or ALL items with a key that matches the given
|
1702 |
+
/// query key in a map.
|
1703 |
+
///
|
1704 |
+
/// Returns an array of items for FIRST and LAST, and an array of list of items for ALL.
|
1705 |
+
///
|
1706 |
+
/// \param[in] map to look in
|
1707 |
+
/// \param[in] options to pass a query key and choose which matching keys to return
|
1708 |
+
/// (FIRST, LAST or ALL)
|
1709 |
+
/// \param[in] ctx the function execution context, optional
|
1710 |
+
/// \return the resulting datum
|
1711 |
+
///
|
1712 |
+
/// \since 8.0.0
|
1713 |
+
/// \note API not yet finalized
|
1714 |
+
ARROW_EXPORT Result<Datum> MapLookup(const Datum& map, MapLookupOptions options,
|
1715 |
+
ExecContext* ctx = NULLPTR);
|
1716 |
+
} // namespace compute
|
1717 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_vector.h
ADDED
@@ -0,0 +1,697 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
#include <utility>
|
22 |
+
|
23 |
+
#include "arrow/compute/function_options.h"
|
24 |
+
#include "arrow/compute/ordering.h"
|
25 |
+
#include "arrow/result.h"
|
26 |
+
#include "arrow/type_fwd.h"
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
namespace compute {
|
30 |
+
|
31 |
+
class ExecContext;
|
32 |
+
|
33 |
+
/// \addtogroup compute-concrete-options
|
34 |
+
/// @{
|
35 |
+
|
36 |
+
class ARROW_EXPORT FilterOptions : public FunctionOptions {
|
37 |
+
public:
|
38 |
+
/// Configure the action taken when a slot of the selection mask is null
|
39 |
+
enum NullSelectionBehavior {
|
40 |
+
/// The corresponding filtered value will be removed in the output.
|
41 |
+
DROP,
|
42 |
+
/// The corresponding filtered value will be null in the output.
|
43 |
+
EMIT_NULL,
|
44 |
+
};
|
45 |
+
|
46 |
+
explicit FilterOptions(NullSelectionBehavior null_selection = DROP);
|
47 |
+
static constexpr char const kTypeName[] = "FilterOptions";
|
48 |
+
static FilterOptions Defaults() { return FilterOptions(); }
|
49 |
+
|
50 |
+
NullSelectionBehavior null_selection_behavior = DROP;
|
51 |
+
};
|
52 |
+
|
53 |
+
class ARROW_EXPORT TakeOptions : public FunctionOptions {
|
54 |
+
public:
|
55 |
+
explicit TakeOptions(bool boundscheck = true);
|
56 |
+
static constexpr char const kTypeName[] = "TakeOptions";
|
57 |
+
static TakeOptions BoundsCheck() { return TakeOptions(true); }
|
58 |
+
static TakeOptions NoBoundsCheck() { return TakeOptions(false); }
|
59 |
+
static TakeOptions Defaults() { return BoundsCheck(); }
|
60 |
+
|
61 |
+
bool boundscheck = true;
|
62 |
+
};
|
63 |
+
|
64 |
+
/// \brief Options for the dictionary encode function
|
65 |
+
class ARROW_EXPORT DictionaryEncodeOptions : public FunctionOptions {
|
66 |
+
public:
|
67 |
+
/// Configure how null values will be encoded
|
68 |
+
enum NullEncodingBehavior {
|
69 |
+
/// The null value will be added to the dictionary with a proper index.
|
70 |
+
ENCODE,
|
71 |
+
/// The null value will be masked in the indices array.
|
72 |
+
MASK
|
73 |
+
};
|
74 |
+
|
75 |
+
explicit DictionaryEncodeOptions(NullEncodingBehavior null_encoding = MASK);
|
76 |
+
static constexpr char const kTypeName[] = "DictionaryEncodeOptions";
|
77 |
+
static DictionaryEncodeOptions Defaults() { return DictionaryEncodeOptions(); }
|
78 |
+
|
79 |
+
NullEncodingBehavior null_encoding_behavior = MASK;
|
80 |
+
};
|
81 |
+
|
82 |
+
/// \brief Options for the run-end encode function
|
83 |
+
class ARROW_EXPORT RunEndEncodeOptions : public FunctionOptions {
|
84 |
+
public:
|
85 |
+
explicit RunEndEncodeOptions(std::shared_ptr<DataType> run_end_type = int32());
|
86 |
+
static constexpr char const kTypeName[] = "RunEndEncodeOptions";
|
87 |
+
static RunEndEncodeOptions Defaults() { return RunEndEncodeOptions(); }
|
88 |
+
|
89 |
+
std::shared_ptr<DataType> run_end_type;
|
90 |
+
};
|
91 |
+
|
92 |
+
class ARROW_EXPORT ArraySortOptions : public FunctionOptions {
|
93 |
+
public:
|
94 |
+
explicit ArraySortOptions(SortOrder order = SortOrder::Ascending,
|
95 |
+
NullPlacement null_placement = NullPlacement::AtEnd);
|
96 |
+
static constexpr char const kTypeName[] = "ArraySortOptions";
|
97 |
+
static ArraySortOptions Defaults() { return ArraySortOptions(); }
|
98 |
+
|
99 |
+
/// Sorting order
|
100 |
+
SortOrder order;
|
101 |
+
/// Whether nulls and NaNs are placed at the start or at the end
|
102 |
+
NullPlacement null_placement;
|
103 |
+
};
|
104 |
+
|
105 |
+
class ARROW_EXPORT SortOptions : public FunctionOptions {
|
106 |
+
public:
|
107 |
+
explicit SortOptions(std::vector<SortKey> sort_keys = {},
|
108 |
+
NullPlacement null_placement = NullPlacement::AtEnd);
|
109 |
+
explicit SortOptions(const Ordering& ordering);
|
110 |
+
static constexpr char const kTypeName[] = "SortOptions";
|
111 |
+
static SortOptions Defaults() { return SortOptions(); }
|
112 |
+
/// Convenience constructor to create an ordering from SortOptions
|
113 |
+
///
|
114 |
+
/// Note: Both classes contain the exact same information. However,
|
115 |
+
/// sort_options should only be used in a "function options" context while Ordering
|
116 |
+
/// is used more generally.
|
117 |
+
Ordering AsOrdering() && { return Ordering(std::move(sort_keys), null_placement); }
|
118 |
+
Ordering AsOrdering() const& { return Ordering(sort_keys, null_placement); }
|
119 |
+
|
120 |
+
/// Column key(s) to order by and how to order by these sort keys.
|
121 |
+
std::vector<SortKey> sort_keys;
|
122 |
+
/// Whether nulls and NaNs are placed at the start or at the end
|
123 |
+
NullPlacement null_placement;
|
124 |
+
};
|
125 |
+
|
126 |
+
/// \brief SelectK options
|
127 |
+
class ARROW_EXPORT SelectKOptions : public FunctionOptions {
|
128 |
+
public:
|
129 |
+
explicit SelectKOptions(int64_t k = -1, std::vector<SortKey> sort_keys = {});
|
130 |
+
static constexpr char const kTypeName[] = "SelectKOptions";
|
131 |
+
static SelectKOptions Defaults() { return SelectKOptions(); }
|
132 |
+
|
133 |
+
static SelectKOptions TopKDefault(int64_t k, std::vector<std::string> key_names = {}) {
|
134 |
+
std::vector<SortKey> keys;
|
135 |
+
for (const auto& name : key_names) {
|
136 |
+
keys.emplace_back(SortKey(name, SortOrder::Descending));
|
137 |
+
}
|
138 |
+
if (key_names.empty()) {
|
139 |
+
keys.emplace_back(SortKey("not-used", SortOrder::Descending));
|
140 |
+
}
|
141 |
+
return SelectKOptions{k, keys};
|
142 |
+
}
|
143 |
+
static SelectKOptions BottomKDefault(int64_t k,
|
144 |
+
std::vector<std::string> key_names = {}) {
|
145 |
+
std::vector<SortKey> keys;
|
146 |
+
for (const auto& name : key_names) {
|
147 |
+
keys.emplace_back(SortKey(name, SortOrder::Ascending));
|
148 |
+
}
|
149 |
+
if (key_names.empty()) {
|
150 |
+
keys.emplace_back(SortKey("not-used", SortOrder::Ascending));
|
151 |
+
}
|
152 |
+
return SelectKOptions{k, keys};
|
153 |
+
}
|
154 |
+
|
155 |
+
/// The number of `k` elements to keep.
|
156 |
+
int64_t k;
|
157 |
+
/// Column key(s) to order by and how to order by these sort keys.
|
158 |
+
std::vector<SortKey> sort_keys;
|
159 |
+
};
|
160 |
+
|
161 |
+
/// \brief Rank options
|
162 |
+
class ARROW_EXPORT RankOptions : public FunctionOptions {
|
163 |
+
public:
|
164 |
+
/// Configure how ties between equal values are handled
|
165 |
+
enum Tiebreaker {
|
166 |
+
/// Ties get the smallest possible rank in sorted order.
|
167 |
+
Min,
|
168 |
+
/// Ties get the largest possible rank in sorted order.
|
169 |
+
Max,
|
170 |
+
/// Ranks are assigned in order of when ties appear in the input.
|
171 |
+
/// This ensures the ranks are a stable permutation of the input.
|
172 |
+
First,
|
173 |
+
/// The ranks span a dense [1, M] interval where M is the number
|
174 |
+
/// of distinct values in the input.
|
175 |
+
Dense
|
176 |
+
};
|
177 |
+
|
178 |
+
explicit RankOptions(std::vector<SortKey> sort_keys = {},
|
179 |
+
NullPlacement null_placement = NullPlacement::AtEnd,
|
180 |
+
Tiebreaker tiebreaker = RankOptions::First);
|
181 |
+
/// Convenience constructor for array inputs
|
182 |
+
explicit RankOptions(SortOrder order,
|
183 |
+
NullPlacement null_placement = NullPlacement::AtEnd,
|
184 |
+
Tiebreaker tiebreaker = RankOptions::First)
|
185 |
+
: RankOptions({SortKey("", order)}, null_placement, tiebreaker) {}
|
186 |
+
|
187 |
+
static constexpr char const kTypeName[] = "RankOptions";
|
188 |
+
static RankOptions Defaults() { return RankOptions(); }
|
189 |
+
|
190 |
+
/// Column key(s) to order by and how to order by these sort keys.
|
191 |
+
std::vector<SortKey> sort_keys;
|
192 |
+
/// Whether nulls and NaNs are placed at the start or at the end
|
193 |
+
NullPlacement null_placement;
|
194 |
+
/// Tiebreaker for dealing with equal values in ranks
|
195 |
+
Tiebreaker tiebreaker;
|
196 |
+
};
|
197 |
+
|
198 |
+
/// \brief Partitioning options for NthToIndices
|
199 |
+
class ARROW_EXPORT PartitionNthOptions : public FunctionOptions {
|
200 |
+
public:
|
201 |
+
explicit PartitionNthOptions(int64_t pivot,
|
202 |
+
NullPlacement null_placement = NullPlacement::AtEnd);
|
203 |
+
PartitionNthOptions() : PartitionNthOptions(0) {}
|
204 |
+
static constexpr char const kTypeName[] = "PartitionNthOptions";
|
205 |
+
|
206 |
+
/// The index into the equivalent sorted array of the partition pivot element.
|
207 |
+
int64_t pivot;
|
208 |
+
/// Whether nulls and NaNs are partitioned at the start or at the end
|
209 |
+
NullPlacement null_placement;
|
210 |
+
};
|
211 |
+
|
212 |
+
/// \brief Options for cumulative functions
|
213 |
+
/// \note Also aliased as CumulativeSumOptions for backward compatibility
|
214 |
+
class ARROW_EXPORT CumulativeOptions : public FunctionOptions {
|
215 |
+
public:
|
216 |
+
explicit CumulativeOptions(bool skip_nulls = false);
|
217 |
+
explicit CumulativeOptions(double start, bool skip_nulls = false);
|
218 |
+
explicit CumulativeOptions(std::shared_ptr<Scalar> start, bool skip_nulls = false);
|
219 |
+
static constexpr char const kTypeName[] = "CumulativeOptions";
|
220 |
+
static CumulativeOptions Defaults() { return CumulativeOptions(); }
|
221 |
+
|
222 |
+
/// Optional starting value for cumulative operation computation, default depends on the
|
223 |
+
/// operation and input type.
|
224 |
+
/// - sum: 0
|
225 |
+
/// - prod: 1
|
226 |
+
/// - min: maximum of the input type
|
227 |
+
/// - max: minimum of the input type
|
228 |
+
/// - mean: start is ignored because it has no meaning for mean
|
229 |
+
std::optional<std::shared_ptr<Scalar>> start;
|
230 |
+
|
231 |
+
/// If true, nulls in the input are ignored and produce a corresponding null output.
|
232 |
+
/// When false, the first null encountered is propagated through the remaining output.
|
233 |
+
bool skip_nulls = false;
|
234 |
+
};
|
235 |
+
using CumulativeSumOptions = CumulativeOptions; // For backward compatibility
|
236 |
+
|
237 |
+
/// \brief Options for pairwise functions
|
238 |
+
class ARROW_EXPORT PairwiseOptions : public FunctionOptions {
|
239 |
+
public:
|
240 |
+
explicit PairwiseOptions(int64_t periods = 1);
|
241 |
+
static constexpr char const kTypeName[] = "PairwiseOptions";
|
242 |
+
static PairwiseOptions Defaults() { return PairwiseOptions(); }
|
243 |
+
|
244 |
+
/// Periods to shift for applying the binary operation, accepts negative values.
|
245 |
+
int64_t periods = 1;
|
246 |
+
};
|
247 |
+
|
248 |
+
/// @}
|
249 |
+
|
250 |
+
/// \brief Filter with a boolean selection filter
|
251 |
+
///
|
252 |
+
/// The output will be populated with values from the input at positions
|
253 |
+
/// where the selection filter is not 0. Nulls in the filter will be handled
|
254 |
+
/// based on options.null_selection_behavior.
|
255 |
+
///
|
256 |
+
/// For example given values = ["a", "b", "c", null, "e", "f"] and
|
257 |
+
/// filter = [0, 1, 1, 0, null, 1], the output will be
|
258 |
+
/// (null_selection_behavior == DROP) = ["b", "c", "f"]
|
259 |
+
/// (null_selection_behavior == EMIT_NULL) = ["b", "c", null, "f"]
|
260 |
+
///
|
261 |
+
/// \param[in] values array to filter
|
262 |
+
/// \param[in] filter indicates which values should be filtered out
|
263 |
+
/// \param[in] options configures null_selection_behavior
|
264 |
+
/// \param[in] ctx the function execution context, optional
|
265 |
+
/// \return the resulting datum
|
266 |
+
ARROW_EXPORT
|
267 |
+
Result<Datum> Filter(const Datum& values, const Datum& filter,
|
268 |
+
const FilterOptions& options = FilterOptions::Defaults(),
|
269 |
+
ExecContext* ctx = NULLPTR);
|
270 |
+
|
271 |
+
namespace internal {
|
272 |
+
|
273 |
+
// These internal functions are implemented in kernels/vector_selection.cc
|
274 |
+
|
275 |
+
/// \brief Return the number of selected indices in the boolean filter
|
276 |
+
///
|
277 |
+
/// \param filter a plain or run-end encoded boolean array with or without nulls
|
278 |
+
/// \param null_selection how to handle nulls in the filter
|
279 |
+
ARROW_EXPORT
|
280 |
+
int64_t GetFilterOutputSize(const ArraySpan& filter,
|
281 |
+
FilterOptions::NullSelectionBehavior null_selection);
|
282 |
+
|
283 |
+
/// \brief Compute uint64 selection indices for use with Take given a boolean
|
284 |
+
/// filter
|
285 |
+
///
|
286 |
+
/// \param filter a plain or run-end encoded boolean array with or without nulls
|
287 |
+
/// \param null_selection how to handle nulls in the filter
|
288 |
+
ARROW_EXPORT
|
289 |
+
Result<std::shared_ptr<ArrayData>> GetTakeIndices(
|
290 |
+
const ArraySpan& filter, FilterOptions::NullSelectionBehavior null_selection,
|
291 |
+
MemoryPool* memory_pool = default_memory_pool());
|
292 |
+
|
293 |
+
} // namespace internal
|
294 |
+
|
295 |
+
/// \brief ReplaceWithMask replaces each value in the array corresponding
|
296 |
+
/// to a true value in the mask with the next element from `replacements`.
|
297 |
+
///
|
298 |
+
/// \param[in] values Array input to replace
|
299 |
+
/// \param[in] mask Array or Scalar of Boolean mask values
|
300 |
+
/// \param[in] replacements The replacement values to draw from. There must
|
301 |
+
/// be as many replacement values as true values in the mask.
|
302 |
+
/// \param[in] ctx the function execution context, optional
|
303 |
+
///
|
304 |
+
/// \return the resulting datum
|
305 |
+
///
|
306 |
+
/// \since 5.0.0
|
307 |
+
/// \note API not yet finalized
|
308 |
+
ARROW_EXPORT
|
309 |
+
Result<Datum> ReplaceWithMask(const Datum& values, const Datum& mask,
|
310 |
+
const Datum& replacements, ExecContext* ctx = NULLPTR);
|
311 |
+
|
312 |
+
/// \brief FillNullForward fill null values in forward direction
|
313 |
+
///
|
314 |
+
/// The output array will be of the same type as the input values
|
315 |
+
/// array, with replaced null values in forward direction.
|
316 |
+
///
|
317 |
+
/// For example given values = ["a", "b", "c", null, null, "f"],
|
318 |
+
/// the output will be = ["a", "b", "c", "c", "c", "f"]
|
319 |
+
///
|
320 |
+
/// \param[in] values datum from which to take
|
321 |
+
/// \param[in] ctx the function execution context, optional
|
322 |
+
/// \return the resulting datum
|
323 |
+
ARROW_EXPORT
|
324 |
+
Result<Datum> FillNullForward(const Datum& values, ExecContext* ctx = NULLPTR);
|
325 |
+
|
326 |
+
/// \brief FillNullBackward fill null values in backward direction
|
327 |
+
///
|
328 |
+
/// The output array will be of the same type as the input values
|
329 |
+
/// array, with replaced null values in backward direction.
|
330 |
+
///
|
331 |
+
/// For example given values = ["a", "b", "c", null, null, "f"],
|
332 |
+
/// the output will be = ["a", "b", "c", "f", "f", "f"]
|
333 |
+
///
|
334 |
+
/// \param[in] values datum from which to take
|
335 |
+
/// \param[in] ctx the function execution context, optional
|
336 |
+
/// \return the resulting datum
|
337 |
+
ARROW_EXPORT
|
338 |
+
Result<Datum> FillNullBackward(const Datum& values, ExecContext* ctx = NULLPTR);
|
339 |
+
|
340 |
+
/// \brief Take from an array of values at indices in another array
|
341 |
+
///
|
342 |
+
/// The output array will be of the same type as the input values
|
343 |
+
/// array, with elements taken from the values array at the given
|
344 |
+
/// indices. If an index is null then the taken element will be null.
|
345 |
+
///
|
346 |
+
/// For example given values = ["a", "b", "c", null, "e", "f"] and
|
347 |
+
/// indices = [2, 1, null, 3], the output will be
|
348 |
+
/// = [values[2], values[1], null, values[3]]
|
349 |
+
/// = ["c", "b", null, null]
|
350 |
+
///
|
351 |
+
/// \param[in] values datum from which to take
|
352 |
+
/// \param[in] indices which values to take
|
353 |
+
/// \param[in] options options
|
354 |
+
/// \param[in] ctx the function execution context, optional
|
355 |
+
/// \return the resulting datum
|
356 |
+
ARROW_EXPORT
|
357 |
+
Result<Datum> Take(const Datum& values, const Datum& indices,
|
358 |
+
const TakeOptions& options = TakeOptions::Defaults(),
|
359 |
+
ExecContext* ctx = NULLPTR);
|
360 |
+
|
361 |
+
/// \brief Take with Array inputs and output
|
362 |
+
ARROW_EXPORT
|
363 |
+
Result<std::shared_ptr<Array>> Take(const Array& values, const Array& indices,
|
364 |
+
const TakeOptions& options = TakeOptions::Defaults(),
|
365 |
+
ExecContext* ctx = NULLPTR);
|
366 |
+
|
367 |
+
/// \brief Drop Null from an array of values
|
368 |
+
///
|
369 |
+
/// The output array will be of the same type as the input values
|
370 |
+
/// array, with elements taken from the values array without nulls.
|
371 |
+
///
|
372 |
+
/// For example given values = ["a", "b", "c", null, "e", "f"],
|
373 |
+
/// the output will be = ["a", "b", "c", "e", "f"]
|
374 |
+
///
|
375 |
+
/// \param[in] values datum from which to take
|
376 |
+
/// \param[in] ctx the function execution context, optional
|
377 |
+
/// \return the resulting datum
|
378 |
+
ARROW_EXPORT
|
379 |
+
Result<Datum> DropNull(const Datum& values, ExecContext* ctx = NULLPTR);
|
380 |
+
|
381 |
+
/// \brief DropNull with Array inputs and output
|
382 |
+
ARROW_EXPORT
|
383 |
+
Result<std::shared_ptr<Array>> DropNull(const Array& values, ExecContext* ctx = NULLPTR);
|
384 |
+
|
385 |
+
/// \brief Return indices that partition an array around n-th sorted element.
|
386 |
+
///
|
387 |
+
/// Find index of n-th(0 based) smallest value and perform indirect
|
388 |
+
/// partition of an array around that element. Output indices[0 ~ n-1]
|
389 |
+
/// holds values no greater than n-th element, and indices[n+1 ~ end]
|
390 |
+
/// holds values no less than n-th element. Elements in each partition
|
391 |
+
/// is not sorted. Nulls will be partitioned to the end of the output.
|
392 |
+
/// Output is not guaranteed to be stable.
|
393 |
+
///
|
394 |
+
/// \param[in] values array to be partitioned
|
395 |
+
/// \param[in] n pivot array around sorted n-th element
|
396 |
+
/// \param[in] ctx the function execution context, optional
|
397 |
+
/// \return offsets indices that would partition an array
|
398 |
+
ARROW_EXPORT
|
399 |
+
Result<std::shared_ptr<Array>> NthToIndices(const Array& values, int64_t n,
|
400 |
+
ExecContext* ctx = NULLPTR);
|
401 |
+
|
402 |
+
/// \brief Return indices that partition an array around n-th sorted element.
|
403 |
+
///
|
404 |
+
/// This overload takes a PartitionNthOptions specifying the pivot index
|
405 |
+
/// and the null handling.
|
406 |
+
///
|
407 |
+
/// \param[in] values array to be partitioned
|
408 |
+
/// \param[in] options options including pivot index and null handling
|
409 |
+
/// \param[in] ctx the function execution context, optional
|
410 |
+
/// \return offsets indices that would partition an array
|
411 |
+
ARROW_EXPORT
|
412 |
+
Result<std::shared_ptr<Array>> NthToIndices(const Array& values,
|
413 |
+
const PartitionNthOptions& options,
|
414 |
+
ExecContext* ctx = NULLPTR);
|
415 |
+
|
416 |
+
/// \brief Return indices that would select the first `k` elements.
|
417 |
+
///
|
418 |
+
/// Perform an indirect sort of the datum, keeping only the first `k` elements. The output
|
419 |
+
/// array will contain indices such that the item indicated by the k-th index will be in
|
420 |
+
/// the position it would be if the datum were sorted by `options.sort_keys`. However,
|
421 |
+
/// indices of null values will not be part of the output. The sort is not guaranteed to
|
422 |
+
/// be stable.
|
423 |
+
///
|
424 |
+
/// \param[in] datum datum to be partitioned
|
425 |
+
/// \param[in] options options
|
426 |
+
/// \param[in] ctx the function execution context, optional
|
427 |
+
/// \return a datum with the same schema as the input
|
428 |
+
ARROW_EXPORT
|
429 |
+
Result<std::shared_ptr<Array>> SelectKUnstable(const Datum& datum,
|
430 |
+
const SelectKOptions& options,
|
431 |
+
ExecContext* ctx = NULLPTR);
|
432 |
+
|
433 |
+
/// \brief Return the indices that would sort an array.
|
434 |
+
///
|
435 |
+
/// Perform an indirect sort of array. The output array will contain
|
436 |
+
/// indices that would sort an array, which would be the same length
|
437 |
+
/// as input. Nulls will be stably partitioned to the end of the output
|
438 |
+
/// regardless of order.
|
439 |
+
///
|
440 |
+
/// For example given array = [null, 1, 3.3, null, 2, 5.3] and order
|
441 |
+
/// = SortOrder::DESCENDING, the output will be [5, 2, 4, 1, 0,
|
442 |
+
/// 3].
|
443 |
+
///
|
444 |
+
/// \param[in] array array to sort
|
445 |
+
/// \param[in] order ascending or descending
|
446 |
+
/// \param[in] ctx the function execution context, optional
|
447 |
+
/// \return offsets indices that would sort an array
|
448 |
+
ARROW_EXPORT
|
449 |
+
Result<std::shared_ptr<Array>> SortIndices(const Array& array,
|
450 |
+
SortOrder order = SortOrder::Ascending,
|
451 |
+
ExecContext* ctx = NULLPTR);
|
452 |
+
|
453 |
+
/// \brief Return the indices that would sort an array.
|
454 |
+
///
|
455 |
+
/// This overload takes a ArraySortOptions specifying the sort order
|
456 |
+
/// and the null handling.
|
457 |
+
///
|
458 |
+
/// \param[in] array array to sort
|
459 |
+
/// \param[in] options options including sort order and null handling
|
460 |
+
/// \param[in] ctx the function execution context, optional
|
461 |
+
/// \return offsets indices that would sort an array
|
462 |
+
ARROW_EXPORT
|
463 |
+
Result<std::shared_ptr<Array>> SortIndices(const Array& array,
|
464 |
+
const ArraySortOptions& options,
|
465 |
+
ExecContext* ctx = NULLPTR);
|
466 |
+
|
467 |
+
/// \brief Return the indices that would sort a chunked array.
|
468 |
+
///
|
469 |
+
/// Perform an indirect sort of chunked array. The output array will
|
470 |
+
/// contain indices that would sort a chunked array, which would be
|
471 |
+
/// the same length as input. Nulls will be stably partitioned to the
|
472 |
+
/// end of the output regardless of order.
|
473 |
+
///
|
474 |
+
/// For example given chunked_array = [[null, 1], [3.3], [null, 2,
|
475 |
+
/// 5.3]] and order = SortOrder::DESCENDING, the output will be [5, 2,
|
476 |
+
/// 4, 1, 0, 3].
|
477 |
+
///
|
478 |
+
/// \param[in] chunked_array chunked array to sort
|
479 |
+
/// \param[in] order ascending or descending
|
480 |
+
/// \param[in] ctx the function execution context, optional
|
481 |
+
/// \return offsets indices that would sort an array
|
482 |
+
ARROW_EXPORT
|
483 |
+
Result<std::shared_ptr<Array>> SortIndices(const ChunkedArray& chunked_array,
|
484 |
+
SortOrder order = SortOrder::Ascending,
|
485 |
+
ExecContext* ctx = NULLPTR);
|
486 |
+
|
487 |
+
/// \brief Return the indices that would sort a chunked array.
|
488 |
+
///
|
489 |
+
/// This overload takes a ArraySortOptions specifying the sort order
|
490 |
+
/// and the null handling.
|
491 |
+
///
|
492 |
+
/// \param[in] chunked_array chunked array to sort
|
493 |
+
/// \param[in] options options including sort order and null handling
|
494 |
+
/// \param[in] ctx the function execution context, optional
|
495 |
+
/// \return offsets indices that would sort an array
|
496 |
+
ARROW_EXPORT
|
497 |
+
Result<std::shared_ptr<Array>> SortIndices(const ChunkedArray& chunked_array,
|
498 |
+
const ArraySortOptions& options,
|
499 |
+
ExecContext* ctx = NULLPTR);
|
500 |
+
|
501 |
+
/// \brief Return the indices that would sort an input in the
|
502 |
+
/// specified order. Input is one of array, chunked array record batch
|
503 |
+
/// or table.
|
504 |
+
///
|
505 |
+
/// Perform an indirect sort of input. The output array will contain
|
506 |
+
/// indices that would sort an input, which would be the same length
|
507 |
+
/// as input. Nulls will be stably partitioned to the start or to the end
|
508 |
+
/// of the output depending on SortOrder::null_placement.
|
509 |
+
///
|
510 |
+
/// For example given input (table) = {
|
511 |
+
/// "column1": [[null, 1], [ 3, null, 2, 1]],
|
512 |
+
/// "column2": [[ 5], [3, null, null, 5, 5]],
|
513 |
+
/// } and options = {
|
514 |
+
/// {"column1", SortOrder::Ascending},
|
515 |
+
/// {"column2", SortOrder::Descending},
|
516 |
+
/// }, the output will be [5, 1, 4, 2, 0, 3].
|
517 |
+
///
|
518 |
+
/// \param[in] datum array, chunked array, record batch or table to sort
|
519 |
+
/// \param[in] options options
|
520 |
+
/// \param[in] ctx the function execution context, optional
|
521 |
+
/// \return offsets indices that would sort a table
|
522 |
+
ARROW_EXPORT
|
523 |
+
Result<std::shared_ptr<Array>> SortIndices(const Datum& datum, const SortOptions& options,
|
524 |
+
ExecContext* ctx = NULLPTR);
|
525 |
+
|
526 |
+
/// \brief Compute unique elements from an array-like object
|
527 |
+
///
|
528 |
+
/// Note if a null occurs in the input it will NOT be included in the output.
|
529 |
+
///
|
530 |
+
/// \param[in] datum array-like input
|
531 |
+
/// \param[in] ctx the function execution context, optional
|
532 |
+
/// \return result as Array
|
533 |
+
///
|
534 |
+
/// \since 1.0.0
|
535 |
+
/// \note API not yet finalized
|
536 |
+
ARROW_EXPORT
|
537 |
+
Result<std::shared_ptr<Array>> Unique(const Datum& datum, ExecContext* ctx = NULLPTR);
|
538 |
+
|
539 |
+
// Constants for accessing the output of ValueCounts
|
540 |
+
ARROW_EXPORT extern const char kValuesFieldName[];
|
541 |
+
ARROW_EXPORT extern const char kCountsFieldName[];
|
542 |
+
ARROW_EXPORT extern const int32_t kValuesFieldIndex;
|
543 |
+
ARROW_EXPORT extern const int32_t kCountsFieldIndex;
|
544 |
+
|
545 |
+
/// \brief Return counts of unique elements from an array-like object.
|
546 |
+
///
|
547 |
+
/// Note that the counts do not include counts for nulls in the array. These can be
|
548 |
+
/// obtained separately from metadata.
|
549 |
+
///
|
550 |
+
/// For floating point arrays there is no attempt to normalize -0.0, 0.0 and NaN values
|
551 |
+
/// which can lead to unexpected results if the input Array has these values.
|
552 |
+
///
|
553 |
+
/// \param[in] value array-like input
|
554 |
+
/// \param[in] ctx the function execution context, optional
|
555 |
+
/// \return counts An array of <input type "Values", int64_t "Counts"> structs.
|
556 |
+
///
|
557 |
+
/// \since 1.0.0
|
558 |
+
/// \note API not yet finalized
|
559 |
+
ARROW_EXPORT
|
560 |
+
Result<std::shared_ptr<StructArray>> ValueCounts(const Datum& value,
|
561 |
+
ExecContext* ctx = NULLPTR);
|
562 |
+
|
563 |
+
/// \brief Dictionary-encode values in an array-like object
|
564 |
+
///
|
565 |
+
/// Any nulls encountered in the dictionary will be handled according to the
|
566 |
+
/// specified null encoding behavior.
|
567 |
+
///
|
568 |
+
/// For example, given values ["a", "b", null, "a", null] the output will be
|
569 |
+
/// (null_encoding == ENCODE) Indices: [0, 1, 2, 0, 2] / Dict: ["a", "b", null]
|
570 |
+
/// (null_encoding == MASK) Indices: [0, 1, null, 0, null] / Dict: ["a", "b"]
|
571 |
+
///
|
572 |
+
/// If the input is already dictionary encoded this function is a no-op unless
|
573 |
+
/// it needs to modify the null_encoding (TODO)
|
574 |
+
///
|
575 |
+
/// \param[in] data array-like input
|
576 |
+
/// \param[in] ctx the function execution context, optional
|
577 |
+
/// \param[in] options configures null encoding behavior
|
578 |
+
/// \return result with same shape and type as input
|
579 |
+
///
|
580 |
+
/// \since 1.0.0
|
581 |
+
/// \note API not yet finalized
|
582 |
+
ARROW_EXPORT
|
583 |
+
Result<Datum> DictionaryEncode(
|
584 |
+
const Datum& data,
|
585 |
+
const DictionaryEncodeOptions& options = DictionaryEncodeOptions::Defaults(),
|
586 |
+
ExecContext* ctx = NULLPTR);
|
587 |
+
|
588 |
+
/// \brief Run-end-encode values in an array-like object
|
589 |
+
///
|
590 |
+
/// The returned run-end encoded type uses the same value type of the input and
|
591 |
+
/// run-end type defined in the options.
|
592 |
+
///
|
593 |
+
/// \param[in] value array-like input
|
594 |
+
/// \param[in] options configures encoding behavior
|
595 |
+
/// \param[in] ctx the function execution context, optional
|
596 |
+
/// \return result with same shape but run-end encoded
|
597 |
+
///
|
598 |
+
/// \since 12.0.0
|
599 |
+
/// \note API not yet finalized
|
600 |
+
ARROW_EXPORT
|
601 |
+
Result<Datum> RunEndEncode(
|
602 |
+
const Datum& value,
|
603 |
+
const RunEndEncodeOptions& options = RunEndEncodeOptions::Defaults(),
|
604 |
+
ExecContext* ctx = NULLPTR);
|
605 |
+
|
606 |
+
/// \brief Decode a Run-End Encoded array to a plain array
|
607 |
+
///
|
608 |
+
/// The output data type is the same as the values array type of run-end encoded
|
609 |
+
/// input.
|
610 |
+
///
|
611 |
+
/// \param[in] value run-end-encoded input
|
612 |
+
/// \param[in] ctx the function execution context, optional
|
613 |
+
/// \return plain array resulting from decoding the run-end encoded input
|
614 |
+
///
|
615 |
+
/// \since 12.0.0
|
616 |
+
/// \note API not yet finalized
|
617 |
+
ARROW_EXPORT
|
618 |
+
Result<Datum> RunEndDecode(const Datum& value, ExecContext* ctx = NULLPTR);
|
619 |
+
|
620 |
+
/// \brief Compute the cumulative sum of an array-like object
|
621 |
+
///
|
622 |
+
/// \param[in] values array-like input
|
623 |
+
/// \param[in] options configures cumulative sum behavior
|
624 |
+
/// \param[in] check_overflow whether to check for overflow, if true, return Invalid
|
625 |
+
/// status on overflow, otherwise wrap around on overflow
|
626 |
+
/// \param[in] ctx the function execution context, optional
|
627 |
+
ARROW_EXPORT
|
628 |
+
Result<Datum> CumulativeSum(
|
629 |
+
const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(),
|
630 |
+
bool check_overflow = false, ExecContext* ctx = NULLPTR);
|
631 |
+
|
632 |
+
/// \brief Compute the cumulative product of an array-like object
|
633 |
+
///
|
634 |
+
/// \param[in] values array-like input
|
635 |
+
/// \param[in] options configures cumulative prod behavior
|
636 |
+
/// \param[in] check_overflow whether to check for overflow, if true, return Invalid
|
637 |
+
/// status on overflow, otherwise wrap around on overflow
|
638 |
+
/// \param[in] ctx the function execution context, optional
|
639 |
+
ARROW_EXPORT
|
640 |
+
Result<Datum> CumulativeProd(
|
641 |
+
const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(),
|
642 |
+
bool check_overflow = false, ExecContext* ctx = NULLPTR);
|
643 |
+
|
644 |
+
/// \brief Compute the cumulative max of an array-like object
|
645 |
+
///
|
646 |
+
/// \param[in] values array-like input
|
647 |
+
/// \param[in] options configures cumulative max behavior
|
648 |
+
/// \param[in] ctx the function execution context, optional
|
649 |
+
ARROW_EXPORT
|
650 |
+
Result<Datum> CumulativeMax(
|
651 |
+
const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(),
|
652 |
+
ExecContext* ctx = NULLPTR);
|
653 |
+
|
654 |
+
/// \brief Compute the cumulative min of an array-like object
|
655 |
+
///
|
656 |
+
/// \param[in] values array-like input
|
657 |
+
/// \param[in] options configures cumulative min behavior
|
658 |
+
/// \param[in] ctx the function execution context, optional
|
659 |
+
ARROW_EXPORT
|
660 |
+
Result<Datum> CumulativeMin(
|
661 |
+
const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(),
|
662 |
+
ExecContext* ctx = NULLPTR);
|
663 |
+
|
664 |
+
/// \brief Compute the cumulative mean of an array-like object
|
665 |
+
///
|
666 |
+
/// \param[in] values array-like input
|
667 |
+
/// \param[in] options configures cumulative mean behavior, `start` is ignored
|
668 |
+
/// \param[in] ctx the function execution context, optional
|
669 |
+
ARROW_EXPORT
|
670 |
+
Result<Datum> CumulativeMean(
|
671 |
+
const Datum& values, const CumulativeOptions& options = CumulativeOptions::Defaults(),
|
672 |
+
ExecContext* ctx = NULLPTR);
|
673 |
+
|
674 |
+
/// \brief Return the first order difference of an array.
|
675 |
+
///
|
676 |
+
/// Computes the first order difference of an array, i.e.
|
677 |
+
/// output[i] = input[i] - input[i - p] if i >= p
|
678 |
+
/// output[i] = null otherwise
|
679 |
+
/// where p is the period. For example, with p = 1,
|
680 |
+
/// Diff([1, 4, 9, 10, 15]) = [null, 3, 5, 1, 5].
|
681 |
+
/// With p = 2,
|
682 |
+
/// Diff([1, 4, 9, 10, 15]) = [null, null, 8, 6, 6]
|
683 |
+
/// p can also be negative, in which case the diff is computed in
|
684 |
+
/// the opposite direction.
|
685 |
+
/// \param[in] array array input
|
686 |
+
/// \param[in] options options, specifying overflow behavior and period
|
687 |
+
/// \param[in] check_overflow whether to return error on overflow
|
688 |
+
/// \param[in] ctx the function execution context, optional
|
689 |
+
/// \return result as array
|
690 |
+
ARROW_EXPORT
|
691 |
+
Result<std::shared_ptr<Array>> PairwiseDiff(const Array& array,
|
692 |
+
const PairwiseOptions& options,
|
693 |
+
bool check_overflow = false,
|
694 |
+
ExecContext* ctx = NULLPTR);
|
695 |
+
|
696 |
+
} // namespace compute
|
697 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/cast.h
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
#include <string>
|
22 |
+
#include <vector>
|
23 |
+
|
24 |
+
#include "arrow/compute/function.h"
|
25 |
+
#include "arrow/compute/function_options.h"
|
26 |
+
#include "arrow/compute/type_fwd.h"
|
27 |
+
#include "arrow/result.h"
|
28 |
+
#include "arrow/status.h"
|
29 |
+
#include "arrow/type.h"
|
30 |
+
#include "arrow/util/macros.h"
|
31 |
+
#include "arrow/util/visibility.h"
|
32 |
+
|
33 |
+
namespace arrow {
|
34 |
+
|
35 |
+
class Array;
|
36 |
+
|
37 |
+
namespace compute {
|
38 |
+
|
39 |
+
class ExecContext;
|
40 |
+
|
41 |
+
/// \addtogroup compute-concrete-options
|
42 |
+
/// @{
|
43 |
+
|
44 |
+
class ARROW_EXPORT CastOptions : public FunctionOptions {
|
45 |
+
public:
|
46 |
+
explicit CastOptions(bool safe = true);
|
47 |
+
|
48 |
+
static constexpr char const kTypeName[] = "CastOptions";
|
49 |
+
static CastOptions Safe(TypeHolder to_type = {}) {
|
50 |
+
CastOptions safe(true);
|
51 |
+
safe.to_type = std::move(to_type);
|
52 |
+
return safe;
|
53 |
+
}
|
54 |
+
|
55 |
+
static CastOptions Unsafe(TypeHolder to_type = {}) {
|
56 |
+
CastOptions unsafe(false);
|
57 |
+
unsafe.to_type = std::move(to_type);
|
58 |
+
return unsafe;
|
59 |
+
}
|
60 |
+
|
61 |
+
// Type being casted to. May be passed separate to eager function
|
62 |
+
// compute::Cast
|
63 |
+
TypeHolder to_type;
|
64 |
+
|
65 |
+
bool allow_int_overflow;
|
66 |
+
bool allow_time_truncate;
|
67 |
+
bool allow_time_overflow;
|
68 |
+
bool allow_decimal_truncate;
|
69 |
+
bool allow_float_truncate;
|
70 |
+
// Indicate if conversions from Binary/FixedSizeBinary to string must
|
71 |
+
// validate the utf8 payload.
|
72 |
+
bool allow_invalid_utf8;
|
73 |
+
|
74 |
+
/// true if the safety options all match CastOptions::Safe
|
75 |
+
///
|
76 |
+
/// Note, if this returns false it does not mean is_unsafe will return true
|
77 |
+
bool is_safe() const;
|
78 |
+
/// true if the safety options all match CastOptions::Unsafe
|
79 |
+
///
|
80 |
+
/// Note, if this returns false it does not mean is_safe will return true
|
81 |
+
bool is_unsafe() const;
|
82 |
+
};
|
83 |
+
|
84 |
+
/// @}
|
85 |
+
|
86 |
+
/// \brief Return true if a cast function is defined
|
87 |
+
ARROW_EXPORT
|
88 |
+
bool CanCast(const DataType& from_type, const DataType& to_type);
|
89 |
+
|
90 |
+
// ----------------------------------------------------------------------
|
91 |
+
// Convenience invocation APIs for a number of kernels
|
92 |
+
|
93 |
+
/// \brief Cast from one array type to another
|
94 |
+
/// \param[in] value array to cast
|
95 |
+
/// \param[in] to_type type to cast to
|
96 |
+
/// \param[in] options casting options
|
97 |
+
/// \param[in] ctx the function execution context, optional
|
98 |
+
/// \return the resulting array
|
99 |
+
///
|
100 |
+
/// \since 1.0.0
|
101 |
+
/// \note API not yet finalized
|
102 |
+
ARROW_EXPORT
|
103 |
+
Result<std::shared_ptr<Array>> Cast(const Array& value, const TypeHolder& to_type,
|
104 |
+
const CastOptions& options = CastOptions::Safe(),
|
105 |
+
ExecContext* ctx = NULLPTR);
|
106 |
+
|
107 |
+
/// \brief Cast from one array type to another
|
108 |
+
/// \param[in] value array to cast
|
109 |
+
/// \param[in] options casting options. The "to_type" field must be populated
|
110 |
+
/// \param[in] ctx the function execution context, optional
|
111 |
+
/// \return the resulting array
|
112 |
+
///
|
113 |
+
/// \since 1.0.0
|
114 |
+
/// \note API not yet finalized
|
115 |
+
ARROW_EXPORT
|
116 |
+
Result<Datum> Cast(const Datum& value, const CastOptions& options,
|
117 |
+
ExecContext* ctx = NULLPTR);
|
118 |
+
|
119 |
+
/// \brief Cast from one value to another
|
120 |
+
/// \param[in] value datum to cast
|
121 |
+
/// \param[in] to_type type to cast to
|
122 |
+
/// \param[in] options casting options
|
123 |
+
/// \param[in] ctx the function execution context, optional
|
124 |
+
/// \return the resulting datum
|
125 |
+
///
|
126 |
+
/// \since 1.0.0
|
127 |
+
/// \note API not yet finalized
|
128 |
+
ARROW_EXPORT
|
129 |
+
Result<Datum> Cast(const Datum& value, const TypeHolder& to_type,
|
130 |
+
const CastOptions& options = CastOptions::Safe(),
|
131 |
+
ExecContext* ctx = NULLPTR);
|
132 |
+
|
133 |
+
} // namespace compute
|
134 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/exec.h
ADDED
@@ -0,0 +1,489 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// NOTE: API is EXPERIMENTAL and will change without going through a
|
19 |
+
// deprecation cycle
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
#include <atomic>
|
24 |
+
#include <cstdint>
|
25 |
+
#include <limits>
|
26 |
+
#include <memory>
|
27 |
+
#include <optional>
|
28 |
+
#include <string>
|
29 |
+
#include <utility>
|
30 |
+
#include <vector>
|
31 |
+
|
32 |
+
#include "arrow/array/data.h"
|
33 |
+
#include "arrow/compute/expression.h"
|
34 |
+
#include "arrow/compute/type_fwd.h"
|
35 |
+
#include "arrow/datum.h"
|
36 |
+
#include "arrow/result.h"
|
37 |
+
#include "arrow/type_fwd.h"
|
38 |
+
#include "arrow/util/macros.h"
|
39 |
+
#include "arrow/util/type_fwd.h"
|
40 |
+
#include "arrow/util/visibility.h"
|
41 |
+
|
42 |
+
namespace arrow {
|
43 |
+
namespace compute {
|
44 |
+
|
45 |
+
// It seems like 64K might be a good default chunksize to use for execution
|
46 |
+
// based on the experience of other query processing systems. The current
|
47 |
+
// default is not to chunk contiguous arrays, though, but this may change in
|
48 |
+
// the future once parallel execution is implemented
|
49 |
+
static constexpr int64_t kDefaultExecChunksize = UINT16_MAX;
|
50 |
+
|
51 |
+
/// \brief Context for expression-global variables and options used by
|
52 |
+
/// function evaluation
|
53 |
+
class ARROW_EXPORT ExecContext {
|
54 |
+
public:
|
55 |
+
// If no function registry passed, the default is used.
|
56 |
+
explicit ExecContext(MemoryPool* pool = default_memory_pool(),
|
57 |
+
::arrow::internal::Executor* executor = NULLPTR,
|
58 |
+
FunctionRegistry* func_registry = NULLPTR);
|
59 |
+
|
60 |
+
/// \brief The MemoryPool used for allocations, default is
|
61 |
+
/// default_memory_pool().
|
62 |
+
MemoryPool* memory_pool() const { return pool_; }
|
63 |
+
|
64 |
+
const ::arrow::internal::CpuInfo* cpu_info() const;
|
65 |
+
|
66 |
+
/// \brief An Executor which may be used to parallelize execution.
|
67 |
+
::arrow::internal::Executor* executor() const { return executor_; }
|
68 |
+
|
69 |
+
/// \brief The FunctionRegistry for looking up functions by name and
|
70 |
+
/// selecting kernels for execution. Defaults to the library-global function
|
71 |
+
/// registry provided by GetFunctionRegistry.
|
72 |
+
FunctionRegistry* func_registry() const { return func_registry_; }
|
73 |
+
|
74 |
+
// \brief Set maximum length unit of work for kernel execution. Larger
|
75 |
+
// contiguous array inputs will be split into smaller chunks, and, if
|
76 |
+
// possible and enabled, processed in parallel. The default chunksize is
|
77 |
+
// INT64_MAX, so contiguous arrays are not split.
|
78 |
+
void set_exec_chunksize(int64_t chunksize) { exec_chunksize_ = chunksize; }
|
79 |
+
|
80 |
+
// \brief Maximum length for ExecBatch data chunks processed by
|
81 |
+
// kernels. Contiguous array inputs with longer length will be split into
|
82 |
+
// smaller chunks.
|
83 |
+
int64_t exec_chunksize() const { return exec_chunksize_; }
|
84 |
+
|
85 |
+
/// \brief Set whether to use multiple threads for function execution. This
|
86 |
+
/// is not yet used.
|
87 |
+
void set_use_threads(bool use_threads = true) { use_threads_ = use_threads; }
|
88 |
+
|
89 |
+
/// \brief If true, then utilize multiple threads where relevant for function
|
90 |
+
/// execution. This is not yet used.
|
91 |
+
bool use_threads() const { return use_threads_; }
|
92 |
+
|
93 |
+
// Set the preallocation strategy for kernel execution as it relates to
|
94 |
+
// chunked execution. For chunked execution, whether via ChunkedArray inputs
|
95 |
+
// or splitting larger Array arguments into smaller pieces, contiguous
|
96 |
+
// allocation (if permitted by the kernel) will allocate one large array to
|
97 |
+
// write output into yielding it to the caller at the end. If this option is
|
98 |
+
// set to off, then preallocations will be performed independently for each
|
99 |
+
// chunk of execution
|
100 |
+
//
|
101 |
+
// TODO: At some point we might want the limit the size of contiguous
|
102 |
+
// preallocations. For example, even if the exec_chunksize is 64K or less, we
|
103 |
+
// might limit contiguous allocations to 1M records, say.
|
104 |
+
void set_preallocate_contiguous(bool preallocate) {
|
105 |
+
preallocate_contiguous_ = preallocate;
|
106 |
+
}
|
107 |
+
|
108 |
+
/// \brief If contiguous preallocations should be used when doing chunked
|
109 |
+
/// execution as specified by exec_chunksize(). See
|
110 |
+
/// set_preallocate_contiguous() for more information.
|
111 |
+
bool preallocate_contiguous() const { return preallocate_contiguous_; }
|
112 |
+
|
113 |
+
private:
|
114 |
+
MemoryPool* pool_;
|
115 |
+
::arrow::internal::Executor* executor_;
|
116 |
+
FunctionRegistry* func_registry_;
|
117 |
+
int64_t exec_chunksize_ = std::numeric_limits<int64_t>::max();
|
118 |
+
bool preallocate_contiguous_ = true;
|
119 |
+
bool use_threads_ = true;
|
120 |
+
};
|
121 |
+
|
122 |
+
// TODO: Consider standardizing on uint16 selection vectors and only use them
|
123 |
+
// when we can ensure that each value is 64K length or smaller
|
124 |
+
|
125 |
+
/// \brief Container for an array of value selection indices that were
|
126 |
+
/// materialized from a filter.
|
127 |
+
///
|
128 |
+
/// Columnar query engines (see e.g. [1]) have found that rather than
|
129 |
+
/// materializing filtered data, the filter can instead be converted to an
|
130 |
+
/// array of the "on" indices and then "fusing" these indices in operator
|
131 |
+
/// implementations. This is especially relevant for aggregations but also
|
132 |
+
/// applies to scalar operations.
|
133 |
+
///
|
134 |
+
/// We are not yet using this so this is mostly a placeholder for now.
|
135 |
+
///
|
136 |
+
/// [1]: http://cidrdb.org/cidr2005/papers/P19.pdf
|
137 |
+
class ARROW_EXPORT SelectionVector {
|
138 |
+
public:
|
139 |
+
explicit SelectionVector(std::shared_ptr<ArrayData> data);
|
140 |
+
|
141 |
+
explicit SelectionVector(const Array& arr);
|
142 |
+
|
143 |
+
/// \brief Create SelectionVector from boolean mask
|
144 |
+
static Result<std::shared_ptr<SelectionVector>> FromMask(const BooleanArray& arr);
|
145 |
+
|
146 |
+
const int32_t* indices() const { return indices_; }
|
147 |
+
int32_t length() const;
|
148 |
+
|
149 |
+
private:
|
150 |
+
std::shared_ptr<ArrayData> data_;
|
151 |
+
const int32_t* indices_;
|
152 |
+
};
|
153 |
+
|
154 |
+
/// An index to represent that a batch does not belong to an ordered stream
|
155 |
+
constexpr int64_t kUnsequencedIndex = -1;
|
156 |
+
|
157 |
+
/// \brief A unit of work for kernel execution. It contains a collection of
|
158 |
+
/// Array and Scalar values and an optional SelectionVector indicating that
|
159 |
+
/// there is an unmaterialized filter that either must be materialized, or (if
|
160 |
+
/// the kernel supports it) pushed down into the kernel implementation.
|
161 |
+
///
|
162 |
+
/// ExecBatch is semantically similar to RecordBatch in that in a SQL context
|
163 |
+
/// it represents a collection of records, but constant "columns" are
|
164 |
+
/// represented by Scalar values rather than having to be converted into arrays
|
165 |
+
/// with repeated values.
|
166 |
+
///
|
167 |
+
/// TODO: Datum uses arrow/util/variant.h which may be a bit heavier-weight
|
168 |
+
/// than is desirable for this class. Microbenchmarks would help determine for
|
169 |
+
/// sure. See ARROW-8928.
|
170 |
+
|
171 |
+
/// \addtogroup acero-internals
|
172 |
+
/// @{
|
173 |
+
|
174 |
+
struct ARROW_EXPORT ExecBatch {
|
175 |
+
ExecBatch() = default;
|
176 |
+
ExecBatch(std::vector<Datum> values, int64_t length)
|
177 |
+
: values(std::move(values)), length(length) {}
|
178 |
+
|
179 |
+
explicit ExecBatch(const RecordBatch& batch);
|
180 |
+
|
181 |
+
/// \brief Infer the ExecBatch length from values.
|
182 |
+
static Result<int64_t> InferLength(const std::vector<Datum>& values);
|
183 |
+
|
184 |
+
/// Creates an ExecBatch with length-validation.
|
185 |
+
///
|
186 |
+
/// If any value is given, then all values must have a common length. If the given
|
187 |
+
/// length is negative, then the length of the ExecBatch is set to this common length,
|
188 |
+
/// or to 1 if no values are given. Otherwise, the given length must equal the common
|
189 |
+
/// length, if any value is given.
|
190 |
+
static Result<ExecBatch> Make(std::vector<Datum> values, int64_t length = -1);
|
191 |
+
|
192 |
+
Result<std::shared_ptr<RecordBatch>> ToRecordBatch(
|
193 |
+
std::shared_ptr<Schema> schema, MemoryPool* pool = default_memory_pool()) const;
|
194 |
+
|
195 |
+
/// The values representing positional arguments to be passed to a kernel's
|
196 |
+
/// exec function for processing.
|
197 |
+
std::vector<Datum> values;
|
198 |
+
|
199 |
+
/// A deferred filter represented as an array of indices into the values.
|
200 |
+
///
|
201 |
+
/// For example, the filter [true, true, false, true] would be represented as
|
202 |
+
/// the selection vector [0, 1, 3]. When the selection vector is set,
|
203 |
+
/// ExecBatch::length is equal to the length of this array.
|
204 |
+
std::shared_ptr<SelectionVector> selection_vector;
|
205 |
+
|
206 |
+
/// A predicate Expression guaranteed to evaluate to true for all rows in this batch.
|
207 |
+
Expression guarantee = literal(true);
|
208 |
+
|
209 |
+
/// The semantic length of the ExecBatch. When the values are all scalars,
|
210 |
+
/// the length should be set to 1 for non-aggregate kernels, otherwise the
|
211 |
+
/// length is taken from the array values, except when there is a selection
|
212 |
+
/// vector. When there is a selection vector set, the length of the batch is
|
213 |
+
/// the length of the selection. Aggregate kernels can have an ExecBatch
|
214 |
+
/// formed by projecting just the partition columns from a batch in which
|
215 |
+
/// case, it would have scalar rows with length greater than 1.
|
216 |
+
///
|
217 |
+
/// If the array values are of length 0 then the length is 0 regardless of
|
218 |
+
/// whether any values are Scalar.
|
219 |
+
int64_t length = 0;
|
220 |
+
|
221 |
+
/// \brief index of this batch in a sorted stream of batches
|
222 |
+
///
|
223 |
+
/// This index must be strictly monotonic starting at 0 without gaps or
|
224 |
+
/// it can be set to kUnsequencedIndex if there is no meaningful order
|
225 |
+
int64_t index = kUnsequencedIndex;
|
226 |
+
|
227 |
+
/// \brief The sum of bytes in each buffer referenced by the batch
|
228 |
+
///
|
229 |
+
/// Note: Scalars are not counted
|
230 |
+
/// Note: Some values may referenced only part of a buffer, for
|
231 |
+
/// example, an array with an offset. The actual data
|
232 |
+
/// visible to this batch will be smaller than the total
|
233 |
+
/// buffer size in this case.
|
234 |
+
int64_t TotalBufferSize() const;
|
235 |
+
|
236 |
+
/// \brief Return the value at the i-th index
|
237 |
+
template <typename index_type>
|
238 |
+
inline const Datum& operator[](index_type i) const {
|
239 |
+
return values[i];
|
240 |
+
}
|
241 |
+
|
242 |
+
bool Equals(const ExecBatch& other) const;
|
243 |
+
|
244 |
+
/// \brief A convenience for the number of values / arguments.
|
245 |
+
int num_values() const { return static_cast<int>(values.size()); }
|
246 |
+
|
247 |
+
ExecBatch Slice(int64_t offset, int64_t length) const;
|
248 |
+
|
249 |
+
Result<ExecBatch> SelectValues(const std::vector<int>& ids) const;
|
250 |
+
|
251 |
+
/// \brief A convenience for returning the types from the batch.
|
252 |
+
std::vector<TypeHolder> GetTypes() const {
|
253 |
+
std::vector<TypeHolder> result;
|
254 |
+
for (const auto& value : this->values) {
|
255 |
+
result.emplace_back(value.type());
|
256 |
+
}
|
257 |
+
return result;
|
258 |
+
}
|
259 |
+
|
260 |
+
std::string ToString() const;
|
261 |
+
};
|
262 |
+
|
263 |
+
inline bool operator==(const ExecBatch& l, const ExecBatch& r) { return l.Equals(r); }
|
264 |
+
inline bool operator!=(const ExecBatch& l, const ExecBatch& r) { return !l.Equals(r); }
|
265 |
+
|
266 |
+
ARROW_EXPORT void PrintTo(const ExecBatch&, std::ostream*);
|
267 |
+
|
268 |
+
/// @}
|
269 |
+
|
270 |
+
/// \defgroup compute-internals Utilities for calling functions, useful for those
|
271 |
+
/// extending the function registry
|
272 |
+
///
|
273 |
+
/// @{
|
274 |
+
|
275 |
+
struct ExecValue {
|
276 |
+
ArraySpan array = {};
|
277 |
+
const Scalar* scalar = NULLPTR;
|
278 |
+
|
279 |
+
ExecValue(Scalar* scalar) // NOLINT implicit conversion
|
280 |
+
: scalar(scalar) {}
|
281 |
+
|
282 |
+
ExecValue(ArraySpan array) // NOLINT implicit conversion
|
283 |
+
: array(std::move(array)) {}
|
284 |
+
|
285 |
+
ExecValue(const ArrayData& array) { // NOLINT implicit conversion
|
286 |
+
this->array.SetMembers(array);
|
287 |
+
}
|
288 |
+
|
289 |
+
ExecValue() = default;
|
290 |
+
ExecValue(const ExecValue& other) = default;
|
291 |
+
ExecValue& operator=(const ExecValue& other) = default;
|
292 |
+
ExecValue(ExecValue&& other) = default;
|
293 |
+
ExecValue& operator=(ExecValue&& other) = default;
|
294 |
+
|
295 |
+
int64_t length() const { return this->is_array() ? this->array.length : 1; }
|
296 |
+
|
297 |
+
bool is_array() const { return this->scalar == NULLPTR; }
|
298 |
+
bool is_scalar() const { return !this->is_array(); }
|
299 |
+
|
300 |
+
void SetArray(const ArrayData& array) {
|
301 |
+
this->array.SetMembers(array);
|
302 |
+
this->scalar = NULLPTR;
|
303 |
+
}
|
304 |
+
|
305 |
+
void SetScalar(const Scalar* scalar) { this->scalar = scalar; }
|
306 |
+
|
307 |
+
template <typename ExactType>
|
308 |
+
const ExactType& scalar_as() const {
|
309 |
+
return ::arrow::internal::checked_cast<const ExactType&>(*this->scalar);
|
310 |
+
}
|
311 |
+
|
312 |
+
/// XXX: here temporarily for compatibility with datum, see
|
313 |
+
/// e.g. MakeStructExec in scalar_nested.cc
|
314 |
+
int64_t null_count() const {
|
315 |
+
if (this->is_array()) {
|
316 |
+
return this->array.GetNullCount();
|
317 |
+
} else {
|
318 |
+
return this->scalar->is_valid ? 0 : 1;
|
319 |
+
}
|
320 |
+
}
|
321 |
+
|
322 |
+
const DataType* type() const {
|
323 |
+
if (this->is_array()) {
|
324 |
+
return array.type;
|
325 |
+
} else {
|
326 |
+
return scalar->type.get();
|
327 |
+
}
|
328 |
+
}
|
329 |
+
};
|
330 |
+
|
331 |
+
struct ARROW_EXPORT ExecResult {
|
332 |
+
// The default value of the variant is ArraySpan
|
333 |
+
std::variant<ArraySpan, std::shared_ptr<ArrayData>> value;
|
334 |
+
|
335 |
+
int64_t length() const {
|
336 |
+
if (this->is_array_span()) {
|
337 |
+
return this->array_span()->length;
|
338 |
+
} else {
|
339 |
+
return this->array_data()->length;
|
340 |
+
}
|
341 |
+
}
|
342 |
+
|
343 |
+
const DataType* type() const {
|
344 |
+
if (this->is_array_span()) {
|
345 |
+
return this->array_span()->type;
|
346 |
+
} else {
|
347 |
+
return this->array_data()->type.get();
|
348 |
+
}
|
349 |
+
}
|
350 |
+
|
351 |
+
const ArraySpan* array_span() const { return &std::get<ArraySpan>(this->value); }
|
352 |
+
ArraySpan* array_span_mutable() { return &std::get<ArraySpan>(this->value); }
|
353 |
+
|
354 |
+
bool is_array_span() const { return this->value.index() == 0; }
|
355 |
+
|
356 |
+
const std::shared_ptr<ArrayData>& array_data() const {
|
357 |
+
return std::get<std::shared_ptr<ArrayData>>(this->value);
|
358 |
+
}
|
359 |
+
ArrayData* array_data_mutable() {
|
360 |
+
return std::get<std::shared_ptr<ArrayData>>(this->value).get();
|
361 |
+
}
|
362 |
+
|
363 |
+
bool is_array_data() const { return this->value.index() == 1; }
|
364 |
+
};
|
365 |
+
|
366 |
+
/// \brief A "lightweight" column batch object which contains no
|
367 |
+
/// std::shared_ptr objects and does not have any memory ownership
|
368 |
+
/// semantics. Can represent a view onto an "owning" ExecBatch.
|
369 |
+
struct ARROW_EXPORT ExecSpan {
|
370 |
+
ExecSpan() = default;
|
371 |
+
ExecSpan(const ExecSpan& other) = default;
|
372 |
+
ExecSpan& operator=(const ExecSpan& other) = default;
|
373 |
+
ExecSpan(ExecSpan&& other) = default;
|
374 |
+
ExecSpan& operator=(ExecSpan&& other) = default;
|
375 |
+
|
376 |
+
explicit ExecSpan(std::vector<ExecValue> values, int64_t length)
|
377 |
+
: length(length), values(std::move(values)) {}
|
378 |
+
|
379 |
+
explicit ExecSpan(const ExecBatch& batch) {
|
380 |
+
this->length = batch.length;
|
381 |
+
this->values.resize(batch.values.size());
|
382 |
+
for (size_t i = 0; i < batch.values.size(); ++i) {
|
383 |
+
const Datum& in_value = batch[i];
|
384 |
+
ExecValue* out_value = &this->values[i];
|
385 |
+
if (in_value.is_array()) {
|
386 |
+
out_value->SetArray(*in_value.array());
|
387 |
+
} else {
|
388 |
+
out_value->SetScalar(in_value.scalar().get());
|
389 |
+
}
|
390 |
+
}
|
391 |
+
}
|
392 |
+
|
393 |
+
/// \brief Return the value at the i-th index
|
394 |
+
template <typename index_type>
|
395 |
+
inline const ExecValue& operator[](index_type i) const {
|
396 |
+
return values[i];
|
397 |
+
}
|
398 |
+
|
399 |
+
/// \brief A convenience for the number of values / arguments.
|
400 |
+
int num_values() const { return static_cast<int>(values.size()); }
|
401 |
+
|
402 |
+
std::vector<TypeHolder> GetTypes() const {
|
403 |
+
std::vector<TypeHolder> result;
|
404 |
+
for (const auto& value : this->values) {
|
405 |
+
result.emplace_back(value.type());
|
406 |
+
}
|
407 |
+
return result;
|
408 |
+
}
|
409 |
+
|
410 |
+
ExecBatch ToExecBatch() const {
|
411 |
+
ExecBatch result;
|
412 |
+
result.length = this->length;
|
413 |
+
for (const ExecValue& value : this->values) {
|
414 |
+
if (value.is_array()) {
|
415 |
+
result.values.push_back(value.array.ToArrayData());
|
416 |
+
} else {
|
417 |
+
result.values.push_back(value.scalar->GetSharedPtr());
|
418 |
+
}
|
419 |
+
}
|
420 |
+
return result;
|
421 |
+
}
|
422 |
+
|
423 |
+
int64_t length = 0;
|
424 |
+
std::vector<ExecValue> values;
|
425 |
+
};
|
426 |
+
|
427 |
+
/// \defgroup compute-call-function One-shot calls to compute functions
|
428 |
+
///
|
429 |
+
/// @{
|
430 |
+
|
431 |
+
/// \brief One-shot invoker for all types of functions.
|
432 |
+
///
|
433 |
+
/// Does kernel dispatch, argument checking, iteration of ChunkedArray inputs,
|
434 |
+
/// and wrapping of outputs.
|
435 |
+
ARROW_EXPORT
|
436 |
+
Result<Datum> CallFunction(const std::string& func_name, const std::vector<Datum>& args,
|
437 |
+
const FunctionOptions* options, ExecContext* ctx = NULLPTR);
|
438 |
+
|
439 |
+
/// \brief Variant of CallFunction which uses a function's default options.
|
440 |
+
///
|
441 |
+
/// NB: Some functions require FunctionOptions be provided.
|
442 |
+
ARROW_EXPORT
|
443 |
+
Result<Datum> CallFunction(const std::string& func_name, const std::vector<Datum>& args,
|
444 |
+
ExecContext* ctx = NULLPTR);
|
445 |
+
|
446 |
+
/// \brief One-shot invoker for all types of functions.
|
447 |
+
///
|
448 |
+
/// Does kernel dispatch, argument checking, iteration of ChunkedArray inputs,
|
449 |
+
/// and wrapping of outputs.
|
450 |
+
ARROW_EXPORT
|
451 |
+
Result<Datum> CallFunction(const std::string& func_name, const ExecBatch& batch,
|
452 |
+
const FunctionOptions* options, ExecContext* ctx = NULLPTR);
|
453 |
+
|
454 |
+
/// \brief Variant of CallFunction which uses a function's default options.
|
455 |
+
///
|
456 |
+
/// NB: Some functions require FunctionOptions be provided.
|
457 |
+
ARROW_EXPORT
|
458 |
+
Result<Datum> CallFunction(const std::string& func_name, const ExecBatch& batch,
|
459 |
+
ExecContext* ctx = NULLPTR);
|
460 |
+
|
461 |
+
/// @}
|
462 |
+
|
463 |
+
/// \defgroup compute-function-executor One-shot calls to obtain function executors
|
464 |
+
///
|
465 |
+
/// @{
|
466 |
+
|
467 |
+
/// \brief One-shot executor provider for all types of functions.
|
468 |
+
///
|
469 |
+
/// This function creates and initializes a `FunctionExecutor` appropriate
|
470 |
+
/// for the given function name, input types and function options.
|
471 |
+
ARROW_EXPORT
|
472 |
+
Result<std::shared_ptr<FunctionExecutor>> GetFunctionExecutor(
|
473 |
+
const std::string& func_name, std::vector<TypeHolder> in_types,
|
474 |
+
const FunctionOptions* options = NULLPTR, FunctionRegistry* func_registry = NULLPTR);
|
475 |
+
|
476 |
+
/// \brief One-shot executor provider for all types of functions.
|
477 |
+
///
|
478 |
+
/// This function creates and initializes a `FunctionExecutor` appropriate
|
479 |
+
/// for the given function name, input types (taken from the Datum arguments)
|
480 |
+
/// and function options.
|
481 |
+
ARROW_EXPORT
|
482 |
+
Result<std::shared_ptr<FunctionExecutor>> GetFunctionExecutor(
|
483 |
+
const std::string& func_name, const std::vector<Datum>& args,
|
484 |
+
const FunctionOptions* options = NULLPTR, FunctionRegistry* func_registry = NULLPTR);
|
485 |
+
|
486 |
+
/// @}
|
487 |
+
|
488 |
+
} // namespace compute
|
489 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/expression.h
ADDED
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// This API is EXPERIMENTAL.
|
19 |
+
|
20 |
+
#pragma once
|
21 |
+
|
22 |
+
#include <memory>
|
23 |
+
#include <string>
|
24 |
+
#include <utility>
|
25 |
+
#include <variant>
|
26 |
+
#include <vector>
|
27 |
+
|
28 |
+
#include "arrow/compute/type_fwd.h"
|
29 |
+
#include "arrow/datum.h"
|
30 |
+
#include "arrow/type_fwd.h"
|
31 |
+
#include "arrow/util/small_vector.h"
|
32 |
+
|
33 |
+
namespace arrow {
|
34 |
+
namespace compute {
|
35 |
+
|
36 |
+
/// \defgroup expression-core Expressions to describe data transformations
|
37 |
+
///
|
38 |
+
/// @{
|
39 |
+
|
40 |
+
/// An unbound expression which maps a single Datum to another Datum.
|
41 |
+
/// An expression is one of
|
42 |
+
/// - A literal Datum.
|
43 |
+
/// - A reference to a single (potentially nested) field of the input Datum.
|
44 |
+
/// - A call to a compute function, with arguments specified by other Expressions.
|
45 |
+
class ARROW_EXPORT Expression {
|
46 |
+
public:
|
47 |
+
struct Call {
|
48 |
+
std::string function_name;
|
49 |
+
std::vector<Expression> arguments;
|
50 |
+
std::shared_ptr<FunctionOptions> options;
|
51 |
+
// Cached hash value
|
52 |
+
size_t hash;
|
53 |
+
|
54 |
+
// post-Bind properties:
|
55 |
+
std::shared_ptr<Function> function;
|
56 |
+
const Kernel* kernel = NULLPTR;
|
57 |
+
std::shared_ptr<KernelState> kernel_state;
|
58 |
+
TypeHolder type;
|
59 |
+
|
60 |
+
void ComputeHash();
|
61 |
+
};
|
62 |
+
|
63 |
+
std::string ToString() const;
|
64 |
+
bool Equals(const Expression& other) const;
|
65 |
+
size_t hash() const;
|
66 |
+
struct Hash {
|
67 |
+
size_t operator()(const Expression& expr) const { return expr.hash(); }
|
68 |
+
};
|
69 |
+
|
70 |
+
/// Bind this expression to the given input type, looking up Kernels and field types.
|
71 |
+
/// Some expression simplification may be performed and implicit casts will be inserted.
|
72 |
+
/// Any state necessary for execution will be initialized and returned.
|
73 |
+
Result<Expression> Bind(const TypeHolder& in, ExecContext* = NULLPTR) const;
|
74 |
+
Result<Expression> Bind(const Schema& in_schema, ExecContext* = NULLPTR) const;
|
75 |
+
|
76 |
+
// XXX someday
|
77 |
+
// Clone all KernelState in this bound expression. If any function referenced by this
|
78 |
+
// expression has mutable KernelState, it is not safe to execute or apply simplification
|
79 |
+
// passes to it (or copies of it!) from multiple threads. Cloning state produces new
|
80 |
+
// KernelStates where necessary to ensure that Expressions may be manipulated safely
|
81 |
+
// on multiple threads.
|
82 |
+
// Result<ExpressionState> CloneState() const;
|
83 |
+
// Status SetState(ExpressionState);
|
84 |
+
|
85 |
+
/// Return true if all an expression's field references have explicit types
|
86 |
+
/// and all of its functions' kernels are looked up.
|
87 |
+
bool IsBound() const;
|
88 |
+
|
89 |
+
/// Return true if this expression is composed only of Scalar literals, field
|
90 |
+
/// references, and calls to ScalarFunctions.
|
91 |
+
bool IsScalarExpression() const;
|
92 |
+
|
93 |
+
/// Return true if this expression is literal and entirely null.
|
94 |
+
bool IsNullLiteral() const;
|
95 |
+
|
96 |
+
/// Return true if this expression could evaluate to true. Will return true for any
|
97 |
+
/// unbound or non-boolean Expressions. IsSatisfiable does not (currently) do any
|
98 |
+
/// canonicalization or simplification of the expression, so even Expressions
|
99 |
+
/// which are unsatisfiable may spuriously return `true` here. This function is
|
100 |
+
/// intended for use in predicate pushdown where a filter expression is simplified
|
101 |
+
/// by a guarantee, so it assumes that trying to simplify again would be redundant.
|
102 |
+
bool IsSatisfiable() const;
|
103 |
+
|
104 |
+
// XXX someday
|
105 |
+
// Result<PipelineGraph> GetPipelines();
|
106 |
+
|
107 |
+
bool is_valid() const { return impl_ != NULLPTR; }
|
108 |
+
|
109 |
+
/// Access a Call or return nullptr if this expression is not a call
|
110 |
+
const Call* call() const;
|
111 |
+
/// Access a Datum or return nullptr if this expression is not a literal
|
112 |
+
const Datum* literal() const;
|
113 |
+
/// Access a FieldRef or return nullptr if this expression is not a field_ref
|
114 |
+
const FieldRef* field_ref() const;
|
115 |
+
|
116 |
+
/// The type to which this expression will evaluate
|
117 |
+
const DataType* type() const;
|
118 |
+
// XXX someday
|
119 |
+
// NullGeneralization::type nullable() const;
|
120 |
+
|
121 |
+
struct Parameter {
|
122 |
+
FieldRef ref;
|
123 |
+
|
124 |
+
// post-bind properties
|
125 |
+
TypeHolder type;
|
126 |
+
::arrow::internal::SmallVector<int, 2> indices;
|
127 |
+
};
|
128 |
+
const Parameter* parameter() const;
|
129 |
+
|
130 |
+
Expression() = default;
|
131 |
+
explicit Expression(Call call);
|
132 |
+
explicit Expression(Datum literal);
|
133 |
+
explicit Expression(Parameter parameter);
|
134 |
+
|
135 |
+
private:
|
136 |
+
using Impl = std::variant<Datum, Parameter, Call>;
|
137 |
+
std::shared_ptr<Impl> impl_;
|
138 |
+
|
139 |
+
ARROW_FRIEND_EXPORT friend bool Identical(const Expression& l, const Expression& r);
|
140 |
+
};
|
141 |
+
|
142 |
+
inline bool operator==(const Expression& l, const Expression& r) { return l.Equals(r); }
|
143 |
+
inline bool operator!=(const Expression& l, const Expression& r) { return !l.Equals(r); }
|
144 |
+
|
145 |
+
ARROW_EXPORT void PrintTo(const Expression&, std::ostream*);
|
146 |
+
|
147 |
+
// Factories
|
148 |
+
|
149 |
+
ARROW_EXPORT
|
150 |
+
Expression literal(Datum lit);
|
151 |
+
|
152 |
+
template <typename Arg>
|
153 |
+
Expression literal(Arg&& arg) {
|
154 |
+
return literal(Datum(std::forward<Arg>(arg)));
|
155 |
+
}
|
156 |
+
|
157 |
+
ARROW_EXPORT
|
158 |
+
Expression field_ref(FieldRef ref);
|
159 |
+
|
160 |
+
ARROW_EXPORT
|
161 |
+
Expression call(std::string function, std::vector<Expression> arguments,
|
162 |
+
std::shared_ptr<FunctionOptions> options = NULLPTR);
|
163 |
+
|
164 |
+
template <typename Options, typename = typename std::enable_if<
|
165 |
+
std::is_base_of<FunctionOptions, Options>::value>::type>
|
166 |
+
Expression call(std::string function, std::vector<Expression> arguments,
|
167 |
+
Options options) {
|
168 |
+
return call(std::move(function), std::move(arguments),
|
169 |
+
std::make_shared<Options>(std::move(options)));
|
170 |
+
}
|
171 |
+
|
172 |
+
/// Assemble a list of all fields referenced by an Expression at any depth.
|
173 |
+
ARROW_EXPORT
|
174 |
+
std::vector<FieldRef> FieldsInExpression(const Expression&);
|
175 |
+
|
176 |
+
/// Check if the expression references any fields.
|
177 |
+
ARROW_EXPORT
|
178 |
+
bool ExpressionHasFieldRefs(const Expression&);
|
179 |
+
|
180 |
+
struct ARROW_EXPORT KnownFieldValues;
|
181 |
+
|
182 |
+
/// Assemble a mapping from field references to known values. This derives known values
|
183 |
+
/// from "equal" and "is_null" Expressions referencing a field and a literal.
|
184 |
+
ARROW_EXPORT
|
185 |
+
Result<KnownFieldValues> ExtractKnownFieldValues(
|
186 |
+
const Expression& guaranteed_true_predicate);
|
187 |
+
|
188 |
+
/// @}
|
189 |
+
|
190 |
+
/// \defgroup expression-passes Functions for modification of Expressions
|
191 |
+
///
|
192 |
+
/// @{
|
193 |
+
///
|
194 |
+
/// These transform bound expressions. Some transforms utilize a guarantee, which is
|
195 |
+
/// provided as an Expression which is guaranteed to evaluate to true. The
|
196 |
+
/// guaranteed_true_predicate need not be bound, but canonicalization is currently
|
197 |
+
/// deferred to producers of guarantees. For example in order to be recognized as a
|
198 |
+
/// guarantee on a field value, an Expression must be a call to "equal" with field_ref LHS
|
199 |
+
/// and literal RHS. Flipping the arguments, "is_in" with a one-long value_set, ... or
|
200 |
+
/// other semantically identical Expressions will not be recognized.
|
201 |
+
|
202 |
+
/// Weak canonicalization which establishes guarantees for subsequent passes. Even
|
203 |
+
/// equivalent Expressions may result in different canonicalized expressions.
|
204 |
+
/// TODO this could be a strong canonicalization
|
205 |
+
ARROW_EXPORT
|
206 |
+
Result<Expression> Canonicalize(Expression, ExecContext* = NULLPTR);
|
207 |
+
|
208 |
+
/// Simplify Expressions based on literal arguments (for example, add(null, x) will always
|
209 |
+
/// be null so replace the call with a null literal). Includes early evaluation of all
|
210 |
+
/// calls whose arguments are entirely literal.
|
211 |
+
ARROW_EXPORT
|
212 |
+
Result<Expression> FoldConstants(Expression);
|
213 |
+
|
214 |
+
/// Simplify Expressions by replacing with known values of the fields which it references.
|
215 |
+
ARROW_EXPORT
|
216 |
+
Result<Expression> ReplaceFieldsWithKnownValues(const KnownFieldValues& known_values,
|
217 |
+
Expression);
|
218 |
+
|
219 |
+
/// Simplify an expression by replacing subexpressions based on a guarantee:
|
220 |
+
/// a boolean expression which is guaranteed to evaluate to `true`. For example, this is
|
221 |
+
/// used to remove redundant function calls from a filter expression or to replace a
|
222 |
+
/// reference to a constant-value field with a literal.
|
223 |
+
ARROW_EXPORT
|
224 |
+
Result<Expression> SimplifyWithGuarantee(Expression,
|
225 |
+
const Expression& guaranteed_true_predicate);
|
226 |
+
|
227 |
+
/// Replace all named field refs (e.g. "x" or "x.y") with field paths (e.g. [0] or [1,3])
|
228 |
+
///
|
229 |
+
/// This isn't usually needed and does not offer any simplification by itself. However,
|
230 |
+
/// it can be useful to normalize an expression to paths to make it simpler to work with.
|
231 |
+
ARROW_EXPORT Result<Expression> RemoveNamedRefs(Expression expression);
|
232 |
+
|
233 |
+
/// @}
|
234 |
+
|
235 |
+
// Execution
|
236 |
+
|
237 |
+
/// Create an ExecBatch suitable for passing to ExecuteScalarExpression() from a
|
238 |
+
/// RecordBatch which may have missing or incorrectly ordered columns.
|
239 |
+
/// Missing fields will be replaced with null scalars.
|
240 |
+
ARROW_EXPORT Result<ExecBatch> MakeExecBatch(const Schema& full_schema,
|
241 |
+
const Datum& partial,
|
242 |
+
Expression guarantee = literal(true));
|
243 |
+
|
244 |
+
/// Execute a scalar expression against the provided state and input ExecBatch. This
|
245 |
+
/// expression must be bound.
|
246 |
+
ARROW_EXPORT
|
247 |
+
Result<Datum> ExecuteScalarExpression(const Expression&, const ExecBatch& input,
|
248 |
+
ExecContext* = NULLPTR);
|
249 |
+
|
250 |
+
/// Convenience function for invoking against a RecordBatch
|
251 |
+
ARROW_EXPORT
|
252 |
+
Result<Datum> ExecuteScalarExpression(const Expression&, const Schema& full_schema,
|
253 |
+
const Datum& partial_input, ExecContext* = NULLPTR);
|
254 |
+
|
255 |
+
// Serialization
|
256 |
+
|
257 |
+
ARROW_EXPORT
|
258 |
+
Result<std::shared_ptr<Buffer>> Serialize(const Expression&);
|
259 |
+
|
260 |
+
ARROW_EXPORT
|
261 |
+
Result<Expression> Deserialize(std::shared_ptr<Buffer>);
|
262 |
+
|
263 |
+
/// \defgroup expression-convenience Helpers for convenient expression creation
|
264 |
+
///
|
265 |
+
/// @{
|
266 |
+
|
267 |
+
ARROW_EXPORT Expression project(std::vector<Expression> values,
|
268 |
+
std::vector<std::string> names);
|
269 |
+
|
270 |
+
ARROW_EXPORT Expression equal(Expression lhs, Expression rhs);
|
271 |
+
|
272 |
+
ARROW_EXPORT Expression not_equal(Expression lhs, Expression rhs);
|
273 |
+
|
274 |
+
ARROW_EXPORT Expression less(Expression lhs, Expression rhs);
|
275 |
+
|
276 |
+
ARROW_EXPORT Expression less_equal(Expression lhs, Expression rhs);
|
277 |
+
|
278 |
+
ARROW_EXPORT Expression greater(Expression lhs, Expression rhs);
|
279 |
+
|
280 |
+
ARROW_EXPORT Expression greater_equal(Expression lhs, Expression rhs);
|
281 |
+
|
282 |
+
ARROW_EXPORT Expression is_null(Expression lhs, bool nan_is_null = false);
|
283 |
+
|
284 |
+
ARROW_EXPORT Expression is_valid(Expression lhs);
|
285 |
+
|
286 |
+
ARROW_EXPORT Expression and_(Expression lhs, Expression rhs);
|
287 |
+
ARROW_EXPORT Expression and_(const std::vector<Expression>&);
|
288 |
+
ARROW_EXPORT Expression or_(Expression lhs, Expression rhs);
|
289 |
+
ARROW_EXPORT Expression or_(const std::vector<Expression>&);
|
290 |
+
ARROW_EXPORT Expression not_(Expression operand);
|
291 |
+
|
292 |
+
/// @}
|
293 |
+
|
294 |
+
} // namespace compute
|
295 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function.h
ADDED
@@ -0,0 +1,394 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// NOTE: API is EXPERIMENTAL and will change without going through a
|
19 |
+
// deprecation cycle.
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
#include <string>
|
24 |
+
#include <utility>
|
25 |
+
#include <vector>
|
26 |
+
|
27 |
+
#include "arrow/compute/kernel.h"
|
28 |
+
#include "arrow/compute/type_fwd.h"
|
29 |
+
#include "arrow/datum.h"
|
30 |
+
#include "arrow/result.h"
|
31 |
+
#include "arrow/status.h"
|
32 |
+
#include "arrow/util/compare.h"
|
33 |
+
#include "arrow/util/macros.h"
|
34 |
+
#include "arrow/util/visibility.h"
|
35 |
+
|
36 |
+
namespace arrow {
|
37 |
+
namespace compute {
|
38 |
+
|
39 |
+
/// \addtogroup compute-functions
|
40 |
+
/// @{
|
41 |
+
|
42 |
+
/// \brief Contains the number of required arguments for the function.
|
43 |
+
///
|
44 |
+
/// Naming conventions taken from https://en.wikipedia.org/wiki/Arity.
|
45 |
+
struct ARROW_EXPORT Arity {
|
46 |
+
/// \brief A function taking no arguments
|
47 |
+
static Arity Nullary() { return Arity(0, false); }
|
48 |
+
|
49 |
+
/// \brief A function taking 1 argument
|
50 |
+
static Arity Unary() { return Arity(1, false); }
|
51 |
+
|
52 |
+
/// \brief A function taking 2 arguments
|
53 |
+
static Arity Binary() { return Arity(2, false); }
|
54 |
+
|
55 |
+
/// \brief A function taking 3 arguments
|
56 |
+
static Arity Ternary() { return Arity(3, false); }
|
57 |
+
|
58 |
+
/// \brief A function taking a variable number of arguments
|
59 |
+
///
|
60 |
+
/// \param[in] min_args the minimum number of arguments required when
|
61 |
+
/// invoking the function
|
62 |
+
static Arity VarArgs(int min_args = 0) { return Arity(min_args, true); }
|
63 |
+
|
64 |
+
// NOTE: the 0-argument form (default constructor) is required for Cython
|
65 |
+
explicit Arity(int num_args = 0, bool is_varargs = false)
|
66 |
+
: num_args(num_args), is_varargs(is_varargs) {}
|
67 |
+
|
68 |
+
/// The number of required arguments (or the minimum number for varargs
|
69 |
+
/// functions).
|
70 |
+
int num_args;
|
71 |
+
|
72 |
+
/// If true, then the num_args is the minimum number of required arguments.
|
73 |
+
bool is_varargs = false;
|
74 |
+
};
|
75 |
+
|
76 |
+
struct ARROW_EXPORT FunctionDoc {
|
77 |
+
/// \brief A one-line summary of the function, using a verb.
|
78 |
+
///
|
79 |
+
/// For example, "Add two numeric arrays or scalars".
|
80 |
+
std::string summary;
|
81 |
+
|
82 |
+
/// \brief A detailed description of the function, meant to follow the summary.
|
83 |
+
std::string description;
|
84 |
+
|
85 |
+
/// \brief Symbolic names (identifiers) for the function arguments.
|
86 |
+
///
|
87 |
+
/// Some bindings may use this to generate nicer function signatures.
|
88 |
+
std::vector<std::string> arg_names;
|
89 |
+
|
90 |
+
// TODO add argument descriptions?
|
91 |
+
|
92 |
+
/// \brief Name of the options class, if any.
|
93 |
+
std::string options_class;
|
94 |
+
|
95 |
+
/// \brief Whether options are required for function execution
|
96 |
+
///
|
97 |
+
/// If false, then either the function does not have an options class
|
98 |
+
/// or there is a usable default options value.
|
99 |
+
bool options_required;
|
100 |
+
|
101 |
+
FunctionDoc() = default;
|
102 |
+
|
103 |
+
FunctionDoc(std::string summary, std::string description,
|
104 |
+
std::vector<std::string> arg_names, std::string options_class = "",
|
105 |
+
bool options_required = false)
|
106 |
+
: summary(std::move(summary)),
|
107 |
+
description(std::move(description)),
|
108 |
+
arg_names(std::move(arg_names)),
|
109 |
+
options_class(std::move(options_class)),
|
110 |
+
options_required(options_required) {}
|
111 |
+
|
112 |
+
static const FunctionDoc& Empty();
|
113 |
+
};
|
114 |
+
|
115 |
+
/// \brief An executor of a function with a preconfigured kernel
|
116 |
+
class ARROW_EXPORT FunctionExecutor {
|
117 |
+
public:
|
118 |
+
virtual ~FunctionExecutor() = default;
|
119 |
+
/// \brief Initialize or re-initialize the preconfigured kernel
|
120 |
+
///
|
121 |
+
/// This method may be called zero or more times. Depending on how
|
122 |
+
/// the FunctionExecutor was obtained, it may already have been initialized.
|
123 |
+
virtual Status Init(const FunctionOptions* options = NULLPTR,
|
124 |
+
ExecContext* exec_ctx = NULLPTR) = 0;
|
125 |
+
/// \brief Execute the preconfigured kernel with arguments that must fit it
|
126 |
+
///
|
127 |
+
/// The method requires the arguments be castable to the preconfigured types.
|
128 |
+
///
|
129 |
+
/// \param[in] args Arguments to execute the function on
|
130 |
+
/// \param[in] length Length of arguments batch or -1 to default it. If the
|
131 |
+
/// function has no parameters, this determines the batch length, defaulting
|
132 |
+
/// to 0. Otherwise, if the function is scalar, this must equal the argument
|
133 |
+
/// batch's inferred length or be -1 to default to it. This is ignored for
|
134 |
+
/// vector functions.
|
135 |
+
virtual Result<Datum> Execute(const std::vector<Datum>& args, int64_t length = -1) = 0;
|
136 |
+
};
|
137 |
+
|
138 |
+
/// \brief Base class for compute functions. Function implementations contain a
|
139 |
+
/// collection of "kernels" which are implementations of the function for
|
140 |
+
/// specific argument types. Selecting a viable kernel for executing a function
|
141 |
+
/// is referred to as "dispatching".
|
142 |
+
class ARROW_EXPORT Function {
|
143 |
+
public:
|
144 |
+
/// \brief The kind of function, which indicates in what contexts it is
|
145 |
+
/// valid for use.
|
146 |
+
enum Kind {
|
147 |
+
/// A function that performs scalar data operations on whole arrays of
|
148 |
+
/// data. Can generally process Array or Scalar values. The size of the
|
149 |
+
/// output will be the same as the size (or broadcasted size, in the case
|
150 |
+
/// of mixing Array and Scalar inputs) of the input.
|
151 |
+
SCALAR,
|
152 |
+
|
153 |
+
/// A function with array input and output whose behavior depends on the
|
154 |
+
/// values of the entire arrays passed, rather than the value of each scalar
|
155 |
+
/// value.
|
156 |
+
VECTOR,
|
157 |
+
|
158 |
+
/// A function that computes scalar summary statistics from array input.
|
159 |
+
SCALAR_AGGREGATE,
|
160 |
+
|
161 |
+
/// A function that computes grouped summary statistics from array input
|
162 |
+
/// and an array of group identifiers.
|
163 |
+
HASH_AGGREGATE,
|
164 |
+
|
165 |
+
/// A function that dispatches to other functions and does not contain its
|
166 |
+
/// own kernels.
|
167 |
+
META
|
168 |
+
};
|
169 |
+
|
170 |
+
virtual ~Function() = default;
|
171 |
+
|
172 |
+
/// \brief The name of the kernel. The registry enforces uniqueness of names.
|
173 |
+
const std::string& name() const { return name_; }
|
174 |
+
|
175 |
+
/// \brief The kind of kernel, which indicates in what contexts it is valid
|
176 |
+
/// for use.
|
177 |
+
Function::Kind kind() const { return kind_; }
|
178 |
+
|
179 |
+
/// \brief Contains the number of arguments the function requires, or if the
|
180 |
+
/// function accepts variable numbers of arguments.
|
181 |
+
const Arity& arity() const { return arity_; }
|
182 |
+
|
183 |
+
/// \brief Return the function documentation
|
184 |
+
const FunctionDoc& doc() const { return doc_; }
|
185 |
+
|
186 |
+
/// \brief Returns the number of registered kernels for this function.
|
187 |
+
virtual int num_kernels() const = 0;
|
188 |
+
|
189 |
+
/// \brief Return a kernel that can execute the function given the exact
|
190 |
+
/// argument types (without implicit type casts).
|
191 |
+
///
|
192 |
+
/// NB: This function is overridden in CastFunction.
|
193 |
+
virtual Result<const Kernel*> DispatchExact(const std::vector<TypeHolder>& types) const;
|
194 |
+
|
195 |
+
/// \brief Return a best-match kernel that can execute the function given the argument
|
196 |
+
/// types, after implicit casts are applied.
|
197 |
+
///
|
198 |
+
/// \param[in,out] values Argument types. An element may be modified to
|
199 |
+
/// indicate that the returned kernel only approximately matches the input
|
200 |
+
/// value descriptors; callers are responsible for casting inputs to the type
|
201 |
+
/// required by the kernel.
|
202 |
+
virtual Result<const Kernel*> DispatchBest(std::vector<TypeHolder>* values) const;
|
203 |
+
|
204 |
+
/// \brief Get a function executor with a best-matching kernel
|
205 |
+
///
|
206 |
+
/// The returned executor will by default work with the default FunctionOptions
|
207 |
+
/// and KernelContext. If you want to change that, call `FunctionExecutor::Init`.
|
208 |
+
virtual Result<std::shared_ptr<FunctionExecutor>> GetBestExecutor(
|
209 |
+
std::vector<TypeHolder> inputs) const;
|
210 |
+
|
211 |
+
/// \brief Execute the function eagerly with the passed input arguments with
|
212 |
+
/// kernel dispatch, batch iteration, and memory allocation details taken
|
213 |
+
/// care of.
|
214 |
+
///
|
215 |
+
/// If the `options` pointer is null, then `default_options()` will be used.
|
216 |
+
///
|
217 |
+
/// This function can be overridden in subclasses.
|
218 |
+
virtual Result<Datum> Execute(const std::vector<Datum>& args,
|
219 |
+
const FunctionOptions* options, ExecContext* ctx) const;
|
220 |
+
|
221 |
+
virtual Result<Datum> Execute(const ExecBatch& batch, const FunctionOptions* options,
|
222 |
+
ExecContext* ctx) const;
|
223 |
+
|
224 |
+
/// \brief Returns the default options for this function.
|
225 |
+
///
|
226 |
+
/// Whatever option semantics a Function has, implementations must guarantee
|
227 |
+
/// that default_options() is valid to pass to Execute as options.
|
228 |
+
const FunctionOptions* default_options() const { return default_options_; }
|
229 |
+
|
230 |
+
virtual Status Validate() const;
|
231 |
+
|
232 |
+
protected:
|
233 |
+
Function(std::string name, Function::Kind kind, const Arity& arity, FunctionDoc doc,
|
234 |
+
const FunctionOptions* default_options)
|
235 |
+
: name_(std::move(name)),
|
236 |
+
kind_(kind),
|
237 |
+
arity_(arity),
|
238 |
+
doc_(std::move(doc)),
|
239 |
+
default_options_(default_options) {}
|
240 |
+
|
241 |
+
Status CheckArity(size_t num_args) const;
|
242 |
+
|
243 |
+
std::string name_;
|
244 |
+
Function::Kind kind_;
|
245 |
+
Arity arity_;
|
246 |
+
const FunctionDoc doc_;
|
247 |
+
const FunctionOptions* default_options_ = NULLPTR;
|
248 |
+
};
|
249 |
+
|
250 |
+
namespace detail {
|
251 |
+
|
252 |
+
template <typename KernelType>
|
253 |
+
class FunctionImpl : public Function {
|
254 |
+
public:
|
255 |
+
/// \brief Return pointers to current-available kernels for inspection
|
256 |
+
std::vector<const KernelType*> kernels() const {
|
257 |
+
std::vector<const KernelType*> result;
|
258 |
+
for (const auto& kernel : kernels_) {
|
259 |
+
result.push_back(&kernel);
|
260 |
+
}
|
261 |
+
return result;
|
262 |
+
}
|
263 |
+
|
264 |
+
int num_kernels() const override { return static_cast<int>(kernels_.size()); }
|
265 |
+
|
266 |
+
protected:
|
267 |
+
FunctionImpl(std::string name, Function::Kind kind, const Arity& arity, FunctionDoc doc,
|
268 |
+
const FunctionOptions* default_options)
|
269 |
+
: Function(std::move(name), kind, arity, std::move(doc), default_options) {}
|
270 |
+
|
271 |
+
std::vector<KernelType> kernels_;
|
272 |
+
};
|
273 |
+
|
274 |
+
/// \brief Look up a kernel in a function. If no Kernel is found, nullptr is returned.
|
275 |
+
ARROW_EXPORT
|
276 |
+
const Kernel* DispatchExactImpl(const Function* func, const std::vector<TypeHolder>&);
|
277 |
+
|
278 |
+
/// \brief Return an error message if no Kernel is found.
|
279 |
+
ARROW_EXPORT
|
280 |
+
Status NoMatchingKernel(const Function* func, const std::vector<TypeHolder>&);
|
281 |
+
|
282 |
+
} // namespace detail
|
283 |
+
|
284 |
+
/// \brief A function that executes elementwise operations on arrays or
|
285 |
+
/// scalars, and therefore whose results generally do not depend on the order
|
286 |
+
/// of the values in the arguments. Accepts and returns arrays that are all of
|
287 |
+
/// the same size. These functions roughly correspond to the functions used in
|
288 |
+
/// SQL expressions.
|
289 |
+
class ARROW_EXPORT ScalarFunction : public detail::FunctionImpl<ScalarKernel> {
|
290 |
+
public:
|
291 |
+
using KernelType = ScalarKernel;
|
292 |
+
|
293 |
+
ScalarFunction(std::string name, const Arity& arity, FunctionDoc doc,
|
294 |
+
const FunctionOptions* default_options = NULLPTR)
|
295 |
+
: detail::FunctionImpl<ScalarKernel>(std::move(name), Function::SCALAR, arity,
|
296 |
+
std::move(doc), default_options) {}
|
297 |
+
|
298 |
+
/// \brief Add a kernel with given input/output types, no required state
|
299 |
+
/// initialization, preallocation for fixed-width types, and default null
|
300 |
+
/// handling (intersect validity bitmaps of inputs).
|
301 |
+
Status AddKernel(std::vector<InputType> in_types, OutputType out_type,
|
302 |
+
ArrayKernelExec exec, KernelInit init = NULLPTR);
|
303 |
+
|
304 |
+
/// \brief Add a kernel (function implementation). Returns error if the
|
305 |
+
/// kernel's signature does not match the function's arity.
|
306 |
+
Status AddKernel(ScalarKernel kernel);
|
307 |
+
};
|
308 |
+
|
309 |
+
/// \brief A function that executes general array operations that may yield
|
310 |
+
/// outputs of different sizes or have results that depend on the whole array
|
311 |
+
/// contents. These functions roughly correspond to the functions found in
|
312 |
+
/// non-SQL array languages like APL and its derivatives.
|
313 |
+
class ARROW_EXPORT VectorFunction : public detail::FunctionImpl<VectorKernel> {
|
314 |
+
public:
|
315 |
+
using KernelType = VectorKernel;
|
316 |
+
|
317 |
+
VectorFunction(std::string name, const Arity& arity, FunctionDoc doc,
|
318 |
+
const FunctionOptions* default_options = NULLPTR)
|
319 |
+
: detail::FunctionImpl<VectorKernel>(std::move(name), Function::VECTOR, arity,
|
320 |
+
std::move(doc), default_options) {}
|
321 |
+
|
322 |
+
/// \brief Add a simple kernel with given input/output types, no required
|
323 |
+
/// state initialization, no data preallocation, and no preallocation of the
|
324 |
+
/// validity bitmap.
|
325 |
+
Status AddKernel(std::vector<InputType> in_types, OutputType out_type,
|
326 |
+
ArrayKernelExec exec, KernelInit init = NULLPTR);
|
327 |
+
|
328 |
+
/// \brief Add a kernel (function implementation). Returns error if the
|
329 |
+
/// kernel's signature does not match the function's arity.
|
330 |
+
Status AddKernel(VectorKernel kernel);
|
331 |
+
};
|
332 |
+
|
333 |
+
class ARROW_EXPORT ScalarAggregateFunction
|
334 |
+
: public detail::FunctionImpl<ScalarAggregateKernel> {
|
335 |
+
public:
|
336 |
+
using KernelType = ScalarAggregateKernel;
|
337 |
+
|
338 |
+
ScalarAggregateFunction(std::string name, const Arity& arity, FunctionDoc doc,
|
339 |
+
const FunctionOptions* default_options = NULLPTR)
|
340 |
+
: detail::FunctionImpl<ScalarAggregateKernel>(std::move(name),
|
341 |
+
Function::SCALAR_AGGREGATE, arity,
|
342 |
+
std::move(doc), default_options) {}
|
343 |
+
|
344 |
+
/// \brief Add a kernel (function implementation). Returns error if the
|
345 |
+
/// kernel's signature does not match the function's arity.
|
346 |
+
Status AddKernel(ScalarAggregateKernel kernel);
|
347 |
+
};
|
348 |
+
|
349 |
+
class ARROW_EXPORT HashAggregateFunction
|
350 |
+
: public detail::FunctionImpl<HashAggregateKernel> {
|
351 |
+
public:
|
352 |
+
using KernelType = HashAggregateKernel;
|
353 |
+
|
354 |
+
HashAggregateFunction(std::string name, const Arity& arity, FunctionDoc doc,
|
355 |
+
const FunctionOptions* default_options = NULLPTR)
|
356 |
+
: detail::FunctionImpl<HashAggregateKernel>(std::move(name),
|
357 |
+
Function::HASH_AGGREGATE, arity,
|
358 |
+
std::move(doc), default_options) {}
|
359 |
+
|
360 |
+
/// \brief Add a kernel (function implementation). Returns error if the
|
361 |
+
/// kernel's signature does not match the function's arity.
|
362 |
+
Status AddKernel(HashAggregateKernel kernel);
|
363 |
+
};
|
364 |
+
|
365 |
+
/// \brief A function that dispatches to other functions. Must implement
|
366 |
+
/// MetaFunction::ExecuteImpl.
|
367 |
+
///
|
368 |
+
/// For Array, ChunkedArray, and Scalar Datum kinds, may rely on the execution
|
369 |
+
/// of concrete Function types, but must handle other Datum kinds on its own.
|
370 |
+
class ARROW_EXPORT MetaFunction : public Function {
|
371 |
+
public:
|
372 |
+
int num_kernels() const override { return 0; }
|
373 |
+
|
374 |
+
Result<Datum> Execute(const std::vector<Datum>& args, const FunctionOptions* options,
|
375 |
+
ExecContext* ctx) const override;
|
376 |
+
|
377 |
+
Result<Datum> Execute(const ExecBatch& batch, const FunctionOptions* options,
|
378 |
+
ExecContext* ctx) const override;
|
379 |
+
|
380 |
+
protected:
|
381 |
+
virtual Result<Datum> ExecuteImpl(const std::vector<Datum>& args,
|
382 |
+
const FunctionOptions* options,
|
383 |
+
ExecContext* ctx) const = 0;
|
384 |
+
|
385 |
+
MetaFunction(std::string name, const Arity& arity, FunctionDoc doc,
|
386 |
+
const FunctionOptions* default_options = NULLPTR)
|
387 |
+
: Function(std::move(name), Function::META, arity, std::move(doc),
|
388 |
+
default_options) {}
|
389 |
+
};
|
390 |
+
|
391 |
+
/// @}
|
392 |
+
|
393 |
+
} // namespace compute
|
394 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/function_options.h
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// NOTE: API is EXPERIMENTAL and will change without going through a
|
19 |
+
// deprecation cycle.
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
#include "arrow/compute/type_fwd.h"
|
24 |
+
#include "arrow/result.h"
|
25 |
+
#include "arrow/status.h"
|
26 |
+
#include "arrow/type_fwd.h"
|
27 |
+
#include "arrow/util/visibility.h"
|
28 |
+
|
29 |
+
namespace arrow {
|
30 |
+
namespace compute {
|
31 |
+
|
32 |
+
/// \addtogroup compute-functions
|
33 |
+
/// @{
|
34 |
+
|
35 |
+
/// \brief Extension point for defining options outside libarrow (but
|
36 |
+
/// still within this project).
|
37 |
+
class ARROW_EXPORT FunctionOptionsType {
|
38 |
+
public:
|
39 |
+
virtual ~FunctionOptionsType() = default;
|
40 |
+
|
41 |
+
virtual const char* type_name() const = 0;
|
42 |
+
virtual std::string Stringify(const FunctionOptions&) const = 0;
|
43 |
+
virtual bool Compare(const FunctionOptions&, const FunctionOptions&) const = 0;
|
44 |
+
virtual Result<std::shared_ptr<Buffer>> Serialize(const FunctionOptions&) const;
|
45 |
+
virtual Result<std::unique_ptr<FunctionOptions>> Deserialize(
|
46 |
+
const Buffer& buffer) const;
|
47 |
+
virtual std::unique_ptr<FunctionOptions> Copy(const FunctionOptions&) const = 0;
|
48 |
+
};
|
49 |
+
|
50 |
+
/// \brief Base class for specifying options configuring a function's behavior,
|
51 |
+
/// such as error handling.
|
52 |
+
class ARROW_EXPORT FunctionOptions : public util::EqualityComparable<FunctionOptions> {
|
53 |
+
public:
|
54 |
+
virtual ~FunctionOptions() = default;
|
55 |
+
|
56 |
+
const FunctionOptionsType* options_type() const { return options_type_; }
|
57 |
+
const char* type_name() const { return options_type()->type_name(); }
|
58 |
+
|
59 |
+
bool Equals(const FunctionOptions& other) const;
|
60 |
+
std::string ToString() const;
|
61 |
+
std::unique_ptr<FunctionOptions> Copy() const;
|
62 |
+
/// \brief Serialize an options struct to a buffer.
|
63 |
+
Result<std::shared_ptr<Buffer>> Serialize() const;
|
64 |
+
/// \brief Deserialize an options struct from a buffer.
|
65 |
+
/// Note: this will only look for `type_name` in the default FunctionRegistry;
|
66 |
+
/// to use a custom FunctionRegistry, look up the FunctionOptionsType, then
|
67 |
+
/// call FunctionOptionsType::Deserialize().
|
68 |
+
static Result<std::unique_ptr<FunctionOptions>> Deserialize(
|
69 |
+
const std::string& type_name, const Buffer& buffer);
|
70 |
+
|
71 |
+
protected:
|
72 |
+
explicit FunctionOptions(const FunctionOptionsType* type) : options_type_(type) {}
|
73 |
+
const FunctionOptionsType* options_type_;
|
74 |
+
};
|
75 |
+
|
76 |
+
ARROW_EXPORT void PrintTo(const FunctionOptions&, std::ostream*);
|
77 |
+
|
78 |
+
/// @}
|
79 |
+
|
80 |
+
} // namespace compute
|
81 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/kernel.h
ADDED
@@ -0,0 +1,752 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// NOTE: API is EXPERIMENTAL and will change without going through a
|
19 |
+
// deprecation cycle
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
#include <cstddef>
|
24 |
+
#include <cstdint>
|
25 |
+
#include <functional>
|
26 |
+
#include <memory>
|
27 |
+
#include <string>
|
28 |
+
#include <utility>
|
29 |
+
#include <vector>
|
30 |
+
|
31 |
+
#include "arrow/buffer.h"
|
32 |
+
#include "arrow/compute/exec.h"
|
33 |
+
#include "arrow/datum.h"
|
34 |
+
#include "arrow/memory_pool.h"
|
35 |
+
#include "arrow/result.h"
|
36 |
+
#include "arrow/status.h"
|
37 |
+
#include "arrow/type.h"
|
38 |
+
#include "arrow/util/macros.h"
|
39 |
+
#include "arrow/util/visibility.h"
|
40 |
+
|
41 |
+
// macOS defines PREALLOCATE as a preprocessor macro in the header sys/vnode.h.
|
42 |
+
// No other BSD seems to do so. The name is used as an identifier in MemAllocation enum.
|
43 |
+
#if defined(__APPLE__) && defined(PREALLOCATE)
|
44 |
+
#undef PREALLOCATE
|
45 |
+
#endif
|
46 |
+
|
47 |
+
namespace arrow {
|
48 |
+
namespace compute {
|
49 |
+
|
50 |
+
class FunctionOptions;
|
51 |
+
|
52 |
+
/// \brief Base class for opaque kernel-specific state. For example, if there
|
53 |
+
/// is some kind of initialization required.
|
54 |
+
struct ARROW_EXPORT KernelState {
|
55 |
+
virtual ~KernelState() = default;
|
56 |
+
};
|
57 |
+
|
58 |
+
/// \brief Context/state for the execution of a particular kernel.
|
59 |
+
class ARROW_EXPORT KernelContext {
|
60 |
+
public:
|
61 |
+
// Can pass optional backreference; not used consistently for the
|
62 |
+
// moment but will be made so in the future
|
63 |
+
explicit KernelContext(ExecContext* exec_ctx, const Kernel* kernel = NULLPTR)
|
64 |
+
: exec_ctx_(exec_ctx), kernel_(kernel) {}
|
65 |
+
|
66 |
+
/// \brief Allocate buffer from the context's memory pool. The contents are
|
67 |
+
/// not initialized.
|
68 |
+
Result<std::shared_ptr<ResizableBuffer>> Allocate(int64_t nbytes);
|
69 |
+
|
70 |
+
/// \brief Allocate buffer for bitmap from the context's memory pool. Like
|
71 |
+
/// Allocate, the contents of the buffer are not initialized but the last
|
72 |
+
/// byte is preemptively zeroed to help avoid ASAN or valgrind issues.
|
73 |
+
Result<std::shared_ptr<ResizableBuffer>> AllocateBitmap(int64_t num_bits);
|
74 |
+
|
75 |
+
/// \brief Assign the active KernelState to be utilized for each stage of
|
76 |
+
/// kernel execution. Ownership and memory lifetime of the KernelState must
|
77 |
+
/// be minded separately.
|
78 |
+
void SetState(KernelState* state) { state_ = state; }
|
79 |
+
|
80 |
+
// Set kernel that is being invoked since some kernel
|
81 |
+
// implementations will examine the kernel state.
|
82 |
+
void SetKernel(const Kernel* kernel) { kernel_ = kernel; }
|
83 |
+
|
84 |
+
KernelState* state() { return state_; }
|
85 |
+
|
86 |
+
/// \brief Configuration related to function execution that is to be shared
|
87 |
+
/// across multiple kernels.
|
88 |
+
ExecContext* exec_context() { return exec_ctx_; }
|
89 |
+
|
90 |
+
/// \brief The memory pool to use for allocations. For now, it uses the
|
91 |
+
/// MemoryPool contained in the ExecContext used to create the KernelContext.
|
92 |
+
MemoryPool* memory_pool() { return exec_ctx_->memory_pool(); }
|
93 |
+
|
94 |
+
const Kernel* kernel() const { return kernel_; }
|
95 |
+
|
96 |
+
private:
|
97 |
+
ExecContext* exec_ctx_;
|
98 |
+
KernelState* state_ = NULLPTR;
|
99 |
+
const Kernel* kernel_ = NULLPTR;
|
100 |
+
};
|
101 |
+
|
102 |
+
/// \brief An type-checking interface to permit customizable validation rules
|
103 |
+
/// for use with InputType and KernelSignature. This is for scenarios where the
|
104 |
+
/// acceptance is not an exact type instance, such as a TIMESTAMP type for a
|
105 |
+
/// specific TimeUnit, but permitting any time zone.
|
106 |
+
struct ARROW_EXPORT TypeMatcher {
|
107 |
+
virtual ~TypeMatcher() = default;
|
108 |
+
|
109 |
+
/// \brief Return true if this matcher accepts the data type.
|
110 |
+
virtual bool Matches(const DataType& type) const = 0;
|
111 |
+
|
112 |
+
/// \brief A human-interpretable string representation of what the type
|
113 |
+
/// matcher checks for, usable when printing KernelSignature or formatting
|
114 |
+
/// error messages.
|
115 |
+
virtual std::string ToString() const = 0;
|
116 |
+
|
117 |
+
/// \brief Return true if this TypeMatcher contains the same matching rule as
|
118 |
+
/// the other. Currently depends on RTTI.
|
119 |
+
virtual bool Equals(const TypeMatcher& other) const = 0;
|
120 |
+
};
|
121 |
+
|
122 |
+
namespace match {
|
123 |
+
|
124 |
+
/// \brief Match any DataType instance having the same DataType::id.
|
125 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> SameTypeId(Type::type type_id);
|
126 |
+
|
127 |
+
/// \brief Match any TimestampType instance having the same unit, but the time
|
128 |
+
/// zones can be different.
|
129 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> TimestampTypeUnit(TimeUnit::type unit);
|
130 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> Time32TypeUnit(TimeUnit::type unit);
|
131 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> Time64TypeUnit(TimeUnit::type unit);
|
132 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> DurationTypeUnit(TimeUnit::type unit);
|
133 |
+
|
134 |
+
// \brief Match any integer type
|
135 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> Integer();
|
136 |
+
|
137 |
+
// Match types using 32-bit varbinary representation
|
138 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> BinaryLike();
|
139 |
+
|
140 |
+
// Match types using 64-bit varbinary representation
|
141 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> LargeBinaryLike();
|
142 |
+
|
143 |
+
// Match any fixed binary type
|
144 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> FixedSizeBinaryLike();
|
145 |
+
|
146 |
+
// \brief Match any primitive type (boolean or any type representable as a C
|
147 |
+
// Type)
|
148 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> Primitive();
|
149 |
+
|
150 |
+
// \brief Match any integer type that can be used as run-end in run-end encoded
|
151 |
+
// arrays
|
152 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> RunEndInteger();
|
153 |
+
|
154 |
+
/// \brief Match run-end encoded types that use any valid run-end type and
|
155 |
+
/// encode specific value types
|
156 |
+
///
|
157 |
+
/// @param[in] value_type_matcher a matcher that is applied to the values field
|
158 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> RunEndEncoded(
|
159 |
+
std::shared_ptr<TypeMatcher> value_type_matcher);
|
160 |
+
|
161 |
+
/// \brief Match run-end encoded types that use any valid run-end type and
|
162 |
+
/// encode specific value types
|
163 |
+
///
|
164 |
+
/// @param[in] value_type_id a type id that the type of the values field should match
|
165 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> RunEndEncoded(Type::type value_type_id);
|
166 |
+
|
167 |
+
/// \brief Match run-end encoded types that encode specific run-end and value types
|
168 |
+
///
|
169 |
+
/// @param[in] run_end_type_matcher a matcher that is applied to the run_ends field
|
170 |
+
/// @param[in] value_type_matcher a matcher that is applied to the values field
|
171 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> RunEndEncoded(
|
172 |
+
std::shared_ptr<TypeMatcher> run_end_type_matcher,
|
173 |
+
std::shared_ptr<TypeMatcher> value_type_matcher);
|
174 |
+
|
175 |
+
} // namespace match
|
176 |
+
|
177 |
+
/// \brief An object used for type-checking arguments to be passed to a kernel
|
178 |
+
/// and stored in a KernelSignature. The type-checking rule can be supplied
|
179 |
+
/// either with an exact DataType instance or a custom TypeMatcher.
|
180 |
+
class ARROW_EXPORT InputType {
|
181 |
+
public:
|
182 |
+
/// \brief The kind of type-checking rule that the InputType contains.
|
183 |
+
enum Kind {
|
184 |
+
/// \brief Accept any value type.
|
185 |
+
ANY_TYPE,
|
186 |
+
|
187 |
+
/// \brief A fixed arrow::DataType and will only exact match having this
|
188 |
+
/// exact type (e.g. same TimestampType unit, same decimal scale and
|
189 |
+
/// precision, or same nested child types).
|
190 |
+
EXACT_TYPE,
|
191 |
+
|
192 |
+
/// \brief Uses a TypeMatcher implementation to check the type.
|
193 |
+
USE_TYPE_MATCHER
|
194 |
+
};
|
195 |
+
|
196 |
+
/// \brief Accept any value type
|
197 |
+
InputType() : kind_(ANY_TYPE) {}
|
198 |
+
|
199 |
+
/// \brief Accept an exact value type.
|
200 |
+
InputType(std::shared_ptr<DataType> type) // NOLINT implicit construction
|
201 |
+
: kind_(EXACT_TYPE), type_(std::move(type)) {}
|
202 |
+
|
203 |
+
/// \brief Use the passed TypeMatcher to type check.
|
204 |
+
InputType(std::shared_ptr<TypeMatcher> type_matcher) // NOLINT implicit construction
|
205 |
+
: kind_(USE_TYPE_MATCHER), type_matcher_(std::move(type_matcher)) {}
|
206 |
+
|
207 |
+
/// \brief Match any type with the given Type::type. Uses a TypeMatcher for
|
208 |
+
/// its implementation.
|
209 |
+
InputType(Type::type type_id) // NOLINT implicit construction
|
210 |
+
: InputType(match::SameTypeId(type_id)) {}
|
211 |
+
|
212 |
+
InputType(const InputType& other) { CopyInto(other); }
|
213 |
+
|
214 |
+
void operator=(const InputType& other) { CopyInto(other); }
|
215 |
+
|
216 |
+
InputType(InputType&& other) { MoveInto(std::forward<InputType>(other)); }
|
217 |
+
|
218 |
+
void operator=(InputType&& other) { MoveInto(std::forward<InputType>(other)); }
|
219 |
+
|
220 |
+
// \brief Match any input (array, scalar of any type)
|
221 |
+
static InputType Any() { return InputType(); }
|
222 |
+
|
223 |
+
/// \brief Return true if this input type matches the same type cases as the
|
224 |
+
/// other.
|
225 |
+
bool Equals(const InputType& other) const;
|
226 |
+
|
227 |
+
bool operator==(const InputType& other) const { return this->Equals(other); }
|
228 |
+
|
229 |
+
bool operator!=(const InputType& other) const { return !(*this == other); }
|
230 |
+
|
231 |
+
/// \brief Return hash code.
|
232 |
+
size_t Hash() const;
|
233 |
+
|
234 |
+
/// \brief Render a human-readable string representation.
|
235 |
+
std::string ToString() const;
|
236 |
+
|
237 |
+
/// \brief Return true if the Datum matches this argument kind in
|
238 |
+
/// type (and only allows scalar or array-like Datums).
|
239 |
+
bool Matches(const Datum& value) const;
|
240 |
+
|
241 |
+
/// \brief Return true if the type matches this InputType
|
242 |
+
bool Matches(const DataType& type) const;
|
243 |
+
|
244 |
+
/// \brief The type matching rule that this InputType uses.
|
245 |
+
Kind kind() const { return kind_; }
|
246 |
+
|
247 |
+
/// \brief For InputType::EXACT_TYPE kind, the exact type that this InputType
|
248 |
+
/// must match. Otherwise this function should not be used and will assert in
|
249 |
+
/// debug builds.
|
250 |
+
const std::shared_ptr<DataType>& type() const;
|
251 |
+
|
252 |
+
/// \brief For InputType::USE_TYPE_MATCHER, the TypeMatcher to be used for
|
253 |
+
/// checking the type of a value. Otherwise this function should not be used
|
254 |
+
/// and will assert in debug builds.
|
255 |
+
const TypeMatcher& type_matcher() const;
|
256 |
+
|
257 |
+
private:
|
258 |
+
void CopyInto(const InputType& other) {
|
259 |
+
this->kind_ = other.kind_;
|
260 |
+
this->type_ = other.type_;
|
261 |
+
this->type_matcher_ = other.type_matcher_;
|
262 |
+
}
|
263 |
+
|
264 |
+
void MoveInto(InputType&& other) {
|
265 |
+
this->kind_ = other.kind_;
|
266 |
+
this->type_ = std::move(other.type_);
|
267 |
+
this->type_matcher_ = std::move(other.type_matcher_);
|
268 |
+
}
|
269 |
+
|
270 |
+
Kind kind_;
|
271 |
+
|
272 |
+
// For EXACT_TYPE Kind
|
273 |
+
std::shared_ptr<DataType> type_;
|
274 |
+
|
275 |
+
// For USE_TYPE_MATCHER Kind
|
276 |
+
std::shared_ptr<TypeMatcher> type_matcher_;
|
277 |
+
};
|
278 |
+
|
279 |
+
/// \brief Container to capture both exact and input-dependent output types.
|
280 |
+
class ARROW_EXPORT OutputType {
|
281 |
+
public:
|
282 |
+
/// \brief An enum indicating whether the value type is an invariant fixed
|
283 |
+
/// value or one that's computed by a kernel-defined resolver function.
|
284 |
+
enum ResolveKind { FIXED, COMPUTED };
|
285 |
+
|
286 |
+
/// Type resolution function. Given input types, return output type. This
|
287 |
+
/// function MAY may use the kernel state to decide the output type based on
|
288 |
+
/// the FunctionOptions.
|
289 |
+
///
|
290 |
+
/// This function SHOULD _not_ be used to check for arity, that is to be
|
291 |
+
/// performed one or more layers above.
|
292 |
+
using Resolver =
|
293 |
+
std::function<Result<TypeHolder>(KernelContext*, const std::vector<TypeHolder>&)>;
|
294 |
+
|
295 |
+
/// \brief Output an exact type
|
296 |
+
OutputType(std::shared_ptr<DataType> type) // NOLINT implicit construction
|
297 |
+
: kind_(FIXED), type_(std::move(type)) {}
|
298 |
+
|
299 |
+
/// \brief Output a computed type depending on actual input types
|
300 |
+
template <typename Fn>
|
301 |
+
OutputType(Fn resolver) // NOLINT implicit construction
|
302 |
+
: kind_(COMPUTED), resolver_(std::move(resolver)) {}
|
303 |
+
|
304 |
+
OutputType(const OutputType& other) {
|
305 |
+
this->kind_ = other.kind_;
|
306 |
+
this->type_ = other.type_;
|
307 |
+
this->resolver_ = other.resolver_;
|
308 |
+
}
|
309 |
+
|
310 |
+
OutputType(OutputType&& other) {
|
311 |
+
this->kind_ = other.kind_;
|
312 |
+
this->type_ = std::move(other.type_);
|
313 |
+
this->resolver_ = other.resolver_;
|
314 |
+
}
|
315 |
+
|
316 |
+
OutputType& operator=(const OutputType&) = default;
|
317 |
+
OutputType& operator=(OutputType&&) = default;
|
318 |
+
|
319 |
+
/// \brief Return the type of the expected output value of the kernel given
|
320 |
+
/// the input argument types. The resolver may make use of state information
|
321 |
+
/// kept in the KernelContext.
|
322 |
+
Result<TypeHolder> Resolve(KernelContext* ctx,
|
323 |
+
const std::vector<TypeHolder>& args) const;
|
324 |
+
|
325 |
+
/// \brief The exact output value type for the FIXED kind.
|
326 |
+
const std::shared_ptr<DataType>& type() const;
|
327 |
+
|
328 |
+
/// \brief For use with COMPUTED resolution strategy. It may be more
|
329 |
+
/// convenient to invoke this with OutputType::Resolve returned from this
|
330 |
+
/// method.
|
331 |
+
const Resolver& resolver() const;
|
332 |
+
|
333 |
+
/// \brief Render a human-readable string representation.
|
334 |
+
std::string ToString() const;
|
335 |
+
|
336 |
+
/// \brief Return the kind of type resolution of this output type, whether
|
337 |
+
/// fixed/invariant or computed by a resolver.
|
338 |
+
ResolveKind kind() const { return kind_; }
|
339 |
+
|
340 |
+
private:
|
341 |
+
ResolveKind kind_;
|
342 |
+
|
343 |
+
// For FIXED resolution
|
344 |
+
std::shared_ptr<DataType> type_;
|
345 |
+
|
346 |
+
// For COMPUTED resolution
|
347 |
+
Resolver resolver_ = NULLPTR;
|
348 |
+
};
|
349 |
+
|
350 |
+
/// \brief Holds the input types and output type of the kernel.
|
351 |
+
///
|
352 |
+
/// VarArgs functions with minimum N arguments should pass up to N input types to be
|
353 |
+
/// used to validate the input types of a function invocation. The first N-1 types
|
354 |
+
/// will be matched against the first N-1 arguments, and the last type will be
|
355 |
+
/// matched against the remaining arguments.
|
356 |
+
class ARROW_EXPORT KernelSignature {
|
357 |
+
public:
|
358 |
+
KernelSignature(std::vector<InputType> in_types, OutputType out_type,
|
359 |
+
bool is_varargs = false);
|
360 |
+
|
361 |
+
/// \brief Convenience ctor since make_shared can be awkward
|
362 |
+
static std::shared_ptr<KernelSignature> Make(std::vector<InputType> in_types,
|
363 |
+
OutputType out_type,
|
364 |
+
bool is_varargs = false);
|
365 |
+
|
366 |
+
/// \brief Return true if the signature if compatible with the list of input
|
367 |
+
/// value descriptors.
|
368 |
+
bool MatchesInputs(const std::vector<TypeHolder>& types) const;
|
369 |
+
|
370 |
+
/// \brief Returns true if the input types of each signature are
|
371 |
+
/// equal. Well-formed functions should have a deterministic output type
|
372 |
+
/// given input types, but currently it is the responsibility of the
|
373 |
+
/// developer to ensure this.
|
374 |
+
bool Equals(const KernelSignature& other) const;
|
375 |
+
|
376 |
+
bool operator==(const KernelSignature& other) const { return this->Equals(other); }
|
377 |
+
|
378 |
+
bool operator!=(const KernelSignature& other) const { return !(*this == other); }
|
379 |
+
|
380 |
+
/// \brief Compute a hash code for the signature
|
381 |
+
size_t Hash() const;
|
382 |
+
|
383 |
+
/// \brief The input types for the kernel. For VarArgs functions, this should
|
384 |
+
/// generally contain a single validator to use for validating all of the
|
385 |
+
/// function arguments.
|
386 |
+
const std::vector<InputType>& in_types() const { return in_types_; }
|
387 |
+
|
388 |
+
/// \brief The output type for the kernel. Use Resolve to return the
|
389 |
+
/// exact output given input argument types, since many kernels'
|
390 |
+
/// output types depend on their input types (or their type
|
391 |
+
/// metadata).
|
392 |
+
const OutputType& out_type() const { return out_type_; }
|
393 |
+
|
394 |
+
/// \brief Render a human-readable string representation
|
395 |
+
std::string ToString() const;
|
396 |
+
|
397 |
+
bool is_varargs() const { return is_varargs_; }
|
398 |
+
|
399 |
+
private:
|
400 |
+
std::vector<InputType> in_types_;
|
401 |
+
OutputType out_type_;
|
402 |
+
bool is_varargs_;
|
403 |
+
|
404 |
+
// For caching the hash code after it's computed the first time
|
405 |
+
mutable uint64_t hash_code_;
|
406 |
+
};
|
407 |
+
|
408 |
+
/// \brief A function may contain multiple variants of a kernel for a given
|
409 |
+
/// type combination for different SIMD levels. Based on the active system's
|
410 |
+
/// CPU info or the user's preferences, we can elect to use one over the other.
|
411 |
+
struct SimdLevel {
|
412 |
+
enum type { NONE = 0, SSE4_2, AVX, AVX2, AVX512, NEON, MAX };
|
413 |
+
};
|
414 |
+
|
415 |
+
/// \brief The strategy to use for propagating or otherwise populating the
|
416 |
+
/// validity bitmap of a kernel output.
|
417 |
+
struct NullHandling {
|
418 |
+
enum type {
|
419 |
+
/// Compute the output validity bitmap by intersecting the validity bitmaps
|
420 |
+
/// of the arguments using bitwise-and operations. This means that values
|
421 |
+
/// in the output are valid/non-null only if the corresponding values in
|
422 |
+
/// all input arguments were valid/non-null. Kernel generally need not
|
423 |
+
/// touch the bitmap thereafter, but a kernel's exec function is permitted
|
424 |
+
/// to alter the bitmap after the null intersection is computed if it needs
|
425 |
+
/// to.
|
426 |
+
INTERSECTION,
|
427 |
+
|
428 |
+
/// Kernel expects a pre-allocated buffer to write the result bitmap
|
429 |
+
/// into. The preallocated memory is not zeroed (except for the last byte),
|
430 |
+
/// so the kernel should ensure to completely populate the bitmap.
|
431 |
+
COMPUTED_PREALLOCATE,
|
432 |
+
|
433 |
+
/// Kernel allocates and sets the validity bitmap of the output.
|
434 |
+
COMPUTED_NO_PREALLOCATE,
|
435 |
+
|
436 |
+
/// Kernel output is never null and a validity bitmap does not need to be
|
437 |
+
/// allocated.
|
438 |
+
OUTPUT_NOT_NULL
|
439 |
+
};
|
440 |
+
};
|
441 |
+
|
442 |
+
/// \brief The preference for memory preallocation of fixed-width type outputs
|
443 |
+
/// in kernel execution.
|
444 |
+
struct MemAllocation {
|
445 |
+
enum type {
|
446 |
+
// For data types that support pre-allocation (i.e. fixed-width), the
|
447 |
+
// kernel expects to be provided a pre-allocated data buffer to write
|
448 |
+
// into. Non-fixed-width types must always allocate their own data
|
449 |
+
// buffers. The allocation made for the same length as the execution batch,
|
450 |
+
// so vector kernels yielding differently sized output should not use this.
|
451 |
+
//
|
452 |
+
// It is valid for the data to not be preallocated but the validity bitmap
|
453 |
+
// is (or is computed using the intersection/bitwise-and method).
|
454 |
+
//
|
455 |
+
// For variable-size output types like BinaryType or StringType, or for
|
456 |
+
// nested types, this option has no effect.
|
457 |
+
PREALLOCATE,
|
458 |
+
|
459 |
+
// The kernel is responsible for allocating its own data buffer for
|
460 |
+
// fixed-width type outputs.
|
461 |
+
NO_PREALLOCATE
|
462 |
+
};
|
463 |
+
};
|
464 |
+
|
465 |
+
struct Kernel;
|
466 |
+
|
467 |
+
/// \brief Arguments to pass to an KernelInit function. A struct is used to help
|
468 |
+
/// avoid API breakage should the arguments passed need to be expanded.
|
469 |
+
struct KernelInitArgs {
|
470 |
+
/// \brief A pointer to the kernel being initialized. The init function may
|
471 |
+
/// depend on the kernel's KernelSignature or other data contained there.
|
472 |
+
const Kernel* kernel;
|
473 |
+
|
474 |
+
/// \brief The types of the input arguments that the kernel is
|
475 |
+
/// about to be executed against.
|
476 |
+
const std::vector<TypeHolder>& inputs;
|
477 |
+
|
478 |
+
/// \brief Opaque options specific to this kernel. May be nullptr for functions
|
479 |
+
/// that do not require options.
|
480 |
+
const FunctionOptions* options;
|
481 |
+
};
|
482 |
+
|
483 |
+
/// \brief Common initializer function for all kernel types.
|
484 |
+
using KernelInit = std::function<Result<std::unique_ptr<KernelState>>(
|
485 |
+
KernelContext*, const KernelInitArgs&)>;
|
486 |
+
|
487 |
+
/// \brief Base type for kernels. Contains the function signature and
|
488 |
+
/// optionally the state initialization function, along with some common
|
489 |
+
/// attributes
|
490 |
+
struct ARROW_EXPORT Kernel {
|
491 |
+
Kernel() = default;
|
492 |
+
|
493 |
+
Kernel(std::shared_ptr<KernelSignature> sig, KernelInit init)
|
494 |
+
: signature(std::move(sig)), init(std::move(init)) {}
|
495 |
+
|
496 |
+
Kernel(std::vector<InputType> in_types, OutputType out_type, KernelInit init)
|
497 |
+
: Kernel(KernelSignature::Make(std::move(in_types), std::move(out_type)),
|
498 |
+
std::move(init)) {}
|
499 |
+
|
500 |
+
/// \brief The "signature" of the kernel containing the InputType input
|
501 |
+
/// argument validators and OutputType output type resolver.
|
502 |
+
std::shared_ptr<KernelSignature> signature;
|
503 |
+
|
504 |
+
/// \brief Create a new KernelState for invocations of this kernel, e.g. to
|
505 |
+
/// set up any options or state relevant for execution.
|
506 |
+
KernelInit init;
|
507 |
+
|
508 |
+
/// \brief Create a vector of new KernelState for invocations of this kernel.
|
509 |
+
static Status InitAll(KernelContext*, const KernelInitArgs&,
|
510 |
+
std::vector<std::unique_ptr<KernelState>>*);
|
511 |
+
|
512 |
+
/// \brief Indicates whether execution can benefit from parallelization
|
513 |
+
/// (splitting large chunks into smaller chunks and using multiple
|
514 |
+
/// threads). Some kernels may not support parallel execution at
|
515 |
+
/// all. Synchronization and concurrency-related issues are currently the
|
516 |
+
/// responsibility of the Kernel's implementation.
|
517 |
+
bool parallelizable = true;
|
518 |
+
|
519 |
+
/// \brief Indicates the level of SIMD instruction support in the host CPU is
|
520 |
+
/// required to use the function. The intention is for functions to be able to
|
521 |
+
/// contain multiple kernels with the same signature but different levels of SIMD,
|
522 |
+
/// so that the most optimized kernel supported on a host's processor can be chosen.
|
523 |
+
SimdLevel::type simd_level = SimdLevel::NONE;
|
524 |
+
|
525 |
+
// Additional kernel-specific data
|
526 |
+
std::shared_ptr<KernelState> data;
|
527 |
+
};
|
528 |
+
|
529 |
+
/// \brief The scalar kernel execution API that must be implemented for SCALAR
|
530 |
+
/// kernel types. This includes both stateless and stateful kernels. Kernels
|
531 |
+
/// depending on some execution state access that state via subclasses of
|
532 |
+
/// KernelState set on the KernelContext object. Implementations should
|
533 |
+
/// endeavor to write into pre-allocated memory if they are able, though for
|
534 |
+
/// some kernels (e.g. in cases when a builder like StringBuilder) must be
|
535 |
+
/// employed this may not be possible.
|
536 |
+
using ArrayKernelExec = Status (*)(KernelContext*, const ExecSpan&, ExecResult*);
|
537 |
+
|
538 |
+
/// \brief Kernel data structure for implementations of ScalarFunction. In
|
539 |
+
/// addition to the members found in Kernel, contains the null handling
|
540 |
+
/// and memory pre-allocation preferences.
|
541 |
+
struct ARROW_EXPORT ScalarKernel : public Kernel {
|
542 |
+
ScalarKernel() = default;
|
543 |
+
|
544 |
+
ScalarKernel(std::shared_ptr<KernelSignature> sig, ArrayKernelExec exec,
|
545 |
+
KernelInit init = NULLPTR)
|
546 |
+
: Kernel(std::move(sig), init), exec(exec) {}
|
547 |
+
|
548 |
+
ScalarKernel(std::vector<InputType> in_types, OutputType out_type, ArrayKernelExec exec,
|
549 |
+
KernelInit init = NULLPTR)
|
550 |
+
: Kernel(std::move(in_types), std::move(out_type), std::move(init)), exec(exec) {}
|
551 |
+
|
552 |
+
/// \brief Perform a single invocation of this kernel. Depending on the
|
553 |
+
/// implementation, it may only write into preallocated memory, while in some
|
554 |
+
/// cases it will allocate its own memory. Any required state is managed
|
555 |
+
/// through the KernelContext.
|
556 |
+
ArrayKernelExec exec;
|
557 |
+
|
558 |
+
/// \brief Writing execution results into larger contiguous allocations
|
559 |
+
/// requires that the kernel be able to write into sliced output ArrayData*,
|
560 |
+
/// including sliced output validity bitmaps. Some kernel implementations may
|
561 |
+
/// not be able to do this, so setting this to false disables this
|
562 |
+
/// functionality.
|
563 |
+
bool can_write_into_slices = true;
|
564 |
+
|
565 |
+
// For scalar functions preallocated data and intersecting arg validity
|
566 |
+
// bitmaps is a reasonable default
|
567 |
+
NullHandling::type null_handling = NullHandling::INTERSECTION;
|
568 |
+
MemAllocation::type mem_allocation = MemAllocation::PREALLOCATE;
|
569 |
+
};
|
570 |
+
|
571 |
+
// ----------------------------------------------------------------------
|
572 |
+
// VectorKernel (for VectorFunction)
|
573 |
+
|
574 |
+
/// \brief Kernel data structure for implementations of VectorFunction. In
|
575 |
+
/// contains an optional finalizer function, the null handling and memory
|
576 |
+
/// pre-allocation preferences (which have different defaults from
|
577 |
+
/// ScalarKernel), and some other execution-related options.
|
578 |
+
struct ARROW_EXPORT VectorKernel : public Kernel {
|
579 |
+
/// \brief See VectorKernel::finalize member for usage
|
580 |
+
using FinalizeFunc = std::function<Status(KernelContext*, std::vector<Datum>*)>;
|
581 |
+
|
582 |
+
/// \brief Function for executing a stateful VectorKernel against a
|
583 |
+
/// ChunkedArray input. Does not need to be defined for all VectorKernels
|
584 |
+
using ChunkedExec = Status (*)(KernelContext*, const ExecBatch&, Datum* out);
|
585 |
+
|
586 |
+
VectorKernel() = default;
|
587 |
+
|
588 |
+
VectorKernel(std::vector<InputType> in_types, OutputType out_type, ArrayKernelExec exec,
|
589 |
+
KernelInit init = NULLPTR, FinalizeFunc finalize = NULLPTR)
|
590 |
+
: Kernel(std::move(in_types), std::move(out_type), std::move(init)),
|
591 |
+
exec(exec),
|
592 |
+
finalize(std::move(finalize)) {}
|
593 |
+
|
594 |
+
VectorKernel(std::shared_ptr<KernelSignature> sig, ArrayKernelExec exec,
|
595 |
+
KernelInit init = NULLPTR, FinalizeFunc finalize = NULLPTR)
|
596 |
+
: Kernel(std::move(sig), std::move(init)),
|
597 |
+
exec(exec),
|
598 |
+
finalize(std::move(finalize)) {}
|
599 |
+
|
600 |
+
/// \brief Perform a single invocation of this kernel. Any required state is
|
601 |
+
/// managed through the KernelContext.
|
602 |
+
ArrayKernelExec exec;
|
603 |
+
|
604 |
+
/// \brief Execute the kernel on a ChunkedArray. Does not need to be defined
|
605 |
+
ChunkedExec exec_chunked = NULLPTR;
|
606 |
+
|
607 |
+
/// \brief For VectorKernel, convert intermediate results into finalized
|
608 |
+
/// results. Mutates input argument. Some kernels may accumulate state
|
609 |
+
/// (example: hashing-related functions) through processing chunked inputs, and
|
610 |
+
/// then need to attach some accumulated state to each of the outputs of
|
611 |
+
/// processing each chunk of data.
|
612 |
+
FinalizeFunc finalize;
|
613 |
+
|
614 |
+
/// Since vector kernels generally are implemented rather differently from
|
615 |
+
/// scalar/elementwise kernels (and they may not even yield arrays of the same
|
616 |
+
/// size), so we make the developer opt-in to any memory preallocation rather
|
617 |
+
/// than having to turn it off.
|
618 |
+
NullHandling::type null_handling = NullHandling::COMPUTED_NO_PREALLOCATE;
|
619 |
+
MemAllocation::type mem_allocation = MemAllocation::NO_PREALLOCATE;
|
620 |
+
|
621 |
+
/// \brief Writing execution results into larger contiguous allocations
|
622 |
+
/// requires that the kernel be able to write into sliced output ArrayData*,
|
623 |
+
/// including sliced output validity bitmaps. Some kernel implementations may
|
624 |
+
/// not be able to do this, so setting this to false disables this
|
625 |
+
/// functionality.
|
626 |
+
bool can_write_into_slices = true;
|
627 |
+
|
628 |
+
/// Some vector kernels can do chunkwise execution using ExecSpanIterator,
|
629 |
+
/// in some cases accumulating some state. Other kernels (like Take) need to
|
630 |
+
/// be passed whole arrays and don't work on ChunkedArray inputs
|
631 |
+
bool can_execute_chunkwise = true;
|
632 |
+
|
633 |
+
/// Some kernels (like unique and value_counts) yield non-chunked output from
|
634 |
+
/// chunked-array inputs. This option controls how the results are boxed when
|
635 |
+
/// returned from ExecVectorFunction
|
636 |
+
///
|
637 |
+
/// true -> ChunkedArray
|
638 |
+
/// false -> Array
|
639 |
+
bool output_chunked = true;
|
640 |
+
};
|
641 |
+
|
642 |
+
// ----------------------------------------------------------------------
|
643 |
+
// ScalarAggregateKernel (for ScalarAggregateFunction)
|
644 |
+
|
645 |
+
using ScalarAggregateConsume = Status (*)(KernelContext*, const ExecSpan&);
|
646 |
+
using ScalarAggregateMerge = Status (*)(KernelContext*, KernelState&&, KernelState*);
|
647 |
+
// Finalize returns Datum to permit multiple return values
|
648 |
+
using ScalarAggregateFinalize = Status (*)(KernelContext*, Datum*);
|
649 |
+
|
650 |
+
/// \brief Kernel data structure for implementations of
|
651 |
+
/// ScalarAggregateFunction. The four necessary components of an aggregation
|
652 |
+
/// kernel are the init, consume, merge, and finalize functions.
|
653 |
+
///
|
654 |
+
/// * init: creates a new KernelState for a kernel.
|
655 |
+
/// * consume: processes an ExecSpan and updates the KernelState found in the
|
656 |
+
/// KernelContext.
|
657 |
+
/// * merge: combines one KernelState with another.
|
658 |
+
/// * finalize: produces the end result of the aggregation using the
|
659 |
+
/// KernelState in the KernelContext.
|
660 |
+
struct ARROW_EXPORT ScalarAggregateKernel : public Kernel {
|
661 |
+
ScalarAggregateKernel(std::shared_ptr<KernelSignature> sig, KernelInit init,
|
662 |
+
ScalarAggregateConsume consume, ScalarAggregateMerge merge,
|
663 |
+
ScalarAggregateFinalize finalize, const bool ordered)
|
664 |
+
: Kernel(std::move(sig), std::move(init)),
|
665 |
+
consume(consume),
|
666 |
+
merge(merge),
|
667 |
+
finalize(finalize),
|
668 |
+
ordered(ordered) {}
|
669 |
+
|
670 |
+
ScalarAggregateKernel(std::vector<InputType> in_types, OutputType out_type,
|
671 |
+
KernelInit init, ScalarAggregateConsume consume,
|
672 |
+
ScalarAggregateMerge merge, ScalarAggregateFinalize finalize,
|
673 |
+
const bool ordered)
|
674 |
+
: ScalarAggregateKernel(
|
675 |
+
KernelSignature::Make(std::move(in_types), std::move(out_type)),
|
676 |
+
std::move(init), consume, merge, finalize, ordered) {}
|
677 |
+
|
678 |
+
/// \brief Merge a vector of KernelStates into a single KernelState.
|
679 |
+
/// The merged state will be returned and will be set on the KernelContext.
|
680 |
+
static Result<std::unique_ptr<KernelState>> MergeAll(
|
681 |
+
const ScalarAggregateKernel* kernel, KernelContext* ctx,
|
682 |
+
std::vector<std::unique_ptr<KernelState>> states);
|
683 |
+
|
684 |
+
ScalarAggregateConsume consume;
|
685 |
+
ScalarAggregateMerge merge;
|
686 |
+
ScalarAggregateFinalize finalize;
|
687 |
+
/// \brief Whether this kernel requires ordering
|
688 |
+
/// Some aggregations, such as, "first", requires some kind of input order. The
|
689 |
+
/// order can be implicit, e.g., the order of the input data, or explicit, e.g.
|
690 |
+
/// the ordering specified with a window aggregation.
|
691 |
+
/// The caller of the aggregate kernel is responsible for passing data in some
|
692 |
+
/// defined order to the kernel. The flag here is a way for the kernel to tell
|
693 |
+
/// the caller that data passed to the kernel must be defined in some order.
|
694 |
+
bool ordered = false;
|
695 |
+
};
|
696 |
+
|
697 |
+
// ----------------------------------------------------------------------
|
698 |
+
// HashAggregateKernel (for HashAggregateFunction)
|
699 |
+
|
700 |
+
using HashAggregateResize = Status (*)(KernelContext*, int64_t);
|
701 |
+
using HashAggregateConsume = Status (*)(KernelContext*, const ExecSpan&);
|
702 |
+
using HashAggregateMerge = Status (*)(KernelContext*, KernelState&&, const ArrayData&);
|
703 |
+
|
704 |
+
// Finalize returns Datum to permit multiple return values
|
705 |
+
using HashAggregateFinalize = Status (*)(KernelContext*, Datum*);
|
706 |
+
|
707 |
+
/// \brief Kernel data structure for implementations of
|
708 |
+
/// HashAggregateFunction. The four necessary components of an aggregation
|
709 |
+
/// kernel are the init, consume, merge, and finalize functions.
|
710 |
+
///
|
711 |
+
/// * init: creates a new KernelState for a kernel.
|
712 |
+
/// * resize: ensure that the KernelState can accommodate the specified number of groups.
|
713 |
+
/// * consume: processes an ExecSpan (which includes the argument as well
|
714 |
+
/// as an array of group identifiers) and updates the KernelState found in the
|
715 |
+
/// KernelContext.
|
716 |
+
/// * merge: combines one KernelState with another.
|
717 |
+
/// * finalize: produces the end result of the aggregation using the
|
718 |
+
/// KernelState in the KernelContext.
|
719 |
+
struct ARROW_EXPORT HashAggregateKernel : public Kernel {
|
720 |
+
HashAggregateKernel() = default;
|
721 |
+
|
722 |
+
HashAggregateKernel(std::shared_ptr<KernelSignature> sig, KernelInit init,
|
723 |
+
HashAggregateResize resize, HashAggregateConsume consume,
|
724 |
+
HashAggregateMerge merge, HashAggregateFinalize finalize,
|
725 |
+
const bool ordered)
|
726 |
+
: Kernel(std::move(sig), std::move(init)),
|
727 |
+
resize(resize),
|
728 |
+
consume(consume),
|
729 |
+
merge(merge),
|
730 |
+
finalize(finalize),
|
731 |
+
ordered(ordered) {}
|
732 |
+
|
733 |
+
HashAggregateKernel(std::vector<InputType> in_types, OutputType out_type,
|
734 |
+
KernelInit init, HashAggregateConsume consume,
|
735 |
+
HashAggregateResize resize, HashAggregateMerge merge,
|
736 |
+
HashAggregateFinalize finalize, const bool ordered)
|
737 |
+
: HashAggregateKernel(
|
738 |
+
KernelSignature::Make(std::move(in_types), std::move(out_type)),
|
739 |
+
std::move(init), resize, consume, merge, finalize, ordered) {}
|
740 |
+
|
741 |
+
HashAggregateResize resize;
|
742 |
+
HashAggregateConsume consume;
|
743 |
+
HashAggregateMerge merge;
|
744 |
+
HashAggregateFinalize finalize;
|
745 |
+
/// @brief whether the summarizer requires ordering
|
746 |
+
/// This is similar to ScalarAggregateKernel. See ScalarAggregateKernel
|
747 |
+
/// for detailed doc of this variable.
|
748 |
+
bool ordered = false;
|
749 |
+
};
|
750 |
+
|
751 |
+
} // namespace compute
|
752 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/key_hash.h
ADDED
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#if defined(ARROW_HAVE_RUNTIME_AVX2)
|
21 |
+
#include <immintrin.h>
|
22 |
+
#endif
|
23 |
+
|
24 |
+
#include <cstdint>
|
25 |
+
|
26 |
+
#include "arrow/compute/light_array.h"
|
27 |
+
#include "arrow/compute/util.h"
|
28 |
+
|
29 |
+
namespace arrow {
|
30 |
+
namespace compute {
|
31 |
+
|
32 |
+
// Forward declarations only needed for making test functions a friend of the classes in
|
33 |
+
// this file.
|
34 |
+
//
|
35 |
+
enum class BloomFilterBuildStrategy;
|
36 |
+
|
37 |
+
// Implementations are based on xxh3 32-bit algorithm description from:
|
38 |
+
// https://github.com/Cyan4973/xxHash/blob/dev/doc/xxhash_spec.md
|
39 |
+
//
|
40 |
+
class ARROW_EXPORT Hashing32 {
|
41 |
+
friend class TestVectorHash;
|
42 |
+
template <typename T>
|
43 |
+
friend void TestBloomLargeHashHelper(int64_t, int64_t, const std::vector<uint64_t>&,
|
44 |
+
int64_t, int, T*);
|
45 |
+
friend void TestBloomSmall(BloomFilterBuildStrategy, int64_t, int, bool, bool);
|
46 |
+
|
47 |
+
public:
|
48 |
+
static void HashMultiColumn(const std::vector<KeyColumnArray>& cols, LightContext* ctx,
|
49 |
+
uint32_t* out_hash);
|
50 |
+
|
51 |
+
static Status HashBatch(const ExecBatch& key_batch, uint32_t* hashes,
|
52 |
+
std::vector<KeyColumnArray>& column_arrays,
|
53 |
+
int64_t hardware_flags, util::TempVectorStack* temp_stack,
|
54 |
+
int64_t start_row, int64_t num_rows);
|
55 |
+
|
56 |
+
static void HashFixed(int64_t hardware_flags, bool combine_hashes, uint32_t num_keys,
|
57 |
+
uint64_t key_length, const uint8_t* keys, uint32_t* hashes,
|
58 |
+
uint32_t* temp_hashes_for_combine);
|
59 |
+
|
60 |
+
private:
|
61 |
+
static const uint32_t PRIME32_1 = 0x9E3779B1;
|
62 |
+
static const uint32_t PRIME32_2 = 0x85EBCA77;
|
63 |
+
static const uint32_t PRIME32_3 = 0xC2B2AE3D;
|
64 |
+
static const uint32_t PRIME32_4 = 0x27D4EB2F;
|
65 |
+
static const uint32_t PRIME32_5 = 0x165667B1;
|
66 |
+
static const uint32_t kCombineConst = 0x9e3779b9UL;
|
67 |
+
static const int64_t kStripeSize = 4 * sizeof(uint32_t);
|
68 |
+
|
69 |
+
static void HashVarLen(int64_t hardware_flags, bool combine_hashes, uint32_t num_rows,
|
70 |
+
const uint32_t* offsets, const uint8_t* concatenated_keys,
|
71 |
+
uint32_t* hashes, uint32_t* temp_hashes_for_combine);
|
72 |
+
|
73 |
+
static void HashVarLen(int64_t hardware_flags, bool combine_hashes, uint32_t num_rows,
|
74 |
+
const uint64_t* offsets, const uint8_t* concatenated_keys,
|
75 |
+
uint32_t* hashes, uint32_t* temp_hashes_for_combine);
|
76 |
+
|
77 |
+
static inline uint32_t Avalanche(uint32_t acc) {
|
78 |
+
acc ^= (acc >> 15);
|
79 |
+
acc *= PRIME32_2;
|
80 |
+
acc ^= (acc >> 13);
|
81 |
+
acc *= PRIME32_3;
|
82 |
+
acc ^= (acc >> 16);
|
83 |
+
return acc;
|
84 |
+
}
|
85 |
+
static inline uint32_t Round(uint32_t acc, uint32_t input);
|
86 |
+
static inline uint32_t CombineAccumulators(uint32_t acc1, uint32_t acc2, uint32_t acc3,
|
87 |
+
uint32_t acc4);
|
88 |
+
static inline uint32_t CombineHashesImp(uint32_t previous_hash, uint32_t hash) {
|
89 |
+
uint32_t next_hash = previous_hash ^ (hash + kCombineConst + (previous_hash << 6) +
|
90 |
+
(previous_hash >> 2));
|
91 |
+
return next_hash;
|
92 |
+
}
|
93 |
+
static inline void ProcessFullStripes(uint64_t num_stripes, const uint8_t* key,
|
94 |
+
uint32_t* out_acc1, uint32_t* out_acc2,
|
95 |
+
uint32_t* out_acc3, uint32_t* out_acc4);
|
96 |
+
static inline void ProcessLastStripe(uint32_t mask1, uint32_t mask2, uint32_t mask3,
|
97 |
+
uint32_t mask4, const uint8_t* last_stripe,
|
98 |
+
uint32_t* acc1, uint32_t* acc2, uint32_t* acc3,
|
99 |
+
uint32_t* acc4);
|
100 |
+
static inline void StripeMask(int i, uint32_t* mask1, uint32_t* mask2, uint32_t* mask3,
|
101 |
+
uint32_t* mask4);
|
102 |
+
template <bool T_COMBINE_HASHES>
|
103 |
+
static void HashFixedLenImp(uint32_t num_rows, uint64_t key_length, const uint8_t* keys,
|
104 |
+
uint32_t* hashes);
|
105 |
+
template <typename T, bool T_COMBINE_HASHES>
|
106 |
+
static void HashVarLenImp(uint32_t num_rows, const T* offsets,
|
107 |
+
const uint8_t* concatenated_keys, uint32_t* hashes);
|
108 |
+
template <bool T_COMBINE_HASHES>
|
109 |
+
static void HashBitImp(int64_t bit_offset, uint32_t num_keys, const uint8_t* keys,
|
110 |
+
uint32_t* hashes);
|
111 |
+
static void HashBit(bool combine_hashes, int64_t bit_offset, uint32_t num_keys,
|
112 |
+
const uint8_t* keys, uint32_t* hashes);
|
113 |
+
template <bool T_COMBINE_HASHES, typename T>
|
114 |
+
static void HashIntImp(uint32_t num_keys, const T* keys, uint32_t* hashes);
|
115 |
+
static void HashInt(bool combine_hashes, uint32_t num_keys, uint64_t key_length,
|
116 |
+
const uint8_t* keys, uint32_t* hashes);
|
117 |
+
|
118 |
+
#if defined(ARROW_HAVE_RUNTIME_AVX2)
|
119 |
+
static inline __m256i Avalanche_avx2(__m256i hash);
|
120 |
+
static inline __m256i CombineHashesImp_avx2(__m256i previous_hash, __m256i hash);
|
121 |
+
template <bool T_COMBINE_HASHES>
|
122 |
+
static void AvalancheAll_avx2(uint32_t num_rows, uint32_t* hashes,
|
123 |
+
const uint32_t* hashes_temp_for_combine);
|
124 |
+
static inline __m256i Round_avx2(__m256i acc, __m256i input);
|
125 |
+
static inline uint64_t CombineAccumulators_avx2(__m256i acc);
|
126 |
+
static inline __m256i StripeMask_avx2(int i, int j);
|
127 |
+
template <bool two_equal_lengths>
|
128 |
+
static inline __m256i ProcessStripes_avx2(int64_t num_stripes_A, int64_t num_stripes_B,
|
129 |
+
__m256i mask_last_stripe, const uint8_t* keys,
|
130 |
+
int64_t offset_A, int64_t offset_B);
|
131 |
+
template <bool T_COMBINE_HASHES>
|
132 |
+
static uint32_t HashFixedLenImp_avx2(uint32_t num_rows, uint64_t key_length,
|
133 |
+
const uint8_t* keys, uint32_t* hashes,
|
134 |
+
uint32_t* hashes_temp_for_combine);
|
135 |
+
static uint32_t HashFixedLen_avx2(bool combine_hashes, uint32_t num_rows,
|
136 |
+
uint64_t key_length, const uint8_t* keys,
|
137 |
+
uint32_t* hashes, uint32_t* hashes_temp_for_combine);
|
138 |
+
template <typename T, bool T_COMBINE_HASHES>
|
139 |
+
static uint32_t HashVarLenImp_avx2(uint32_t num_rows, const T* offsets,
|
140 |
+
const uint8_t* concatenated_keys, uint32_t* hashes,
|
141 |
+
uint32_t* hashes_temp_for_combine);
|
142 |
+
static uint32_t HashVarLen_avx2(bool combine_hashes, uint32_t num_rows,
|
143 |
+
const uint32_t* offsets,
|
144 |
+
const uint8_t* concatenated_keys, uint32_t* hashes,
|
145 |
+
uint32_t* hashes_temp_for_combine);
|
146 |
+
static uint32_t HashVarLen_avx2(bool combine_hashes, uint32_t num_rows,
|
147 |
+
const uint64_t* offsets,
|
148 |
+
const uint8_t* concatenated_keys, uint32_t* hashes,
|
149 |
+
uint32_t* hashes_temp_for_combine);
|
150 |
+
#endif
|
151 |
+
};
|
152 |
+
|
153 |
+
class ARROW_EXPORT Hashing64 {
|
154 |
+
friend class TestVectorHash;
|
155 |
+
template <typename T>
|
156 |
+
friend void TestBloomLargeHashHelper(int64_t, int64_t, const std::vector<uint64_t>&,
|
157 |
+
int64_t, int, T*);
|
158 |
+
friend void TestBloomSmall(BloomFilterBuildStrategy, int64_t, int, bool, bool);
|
159 |
+
|
160 |
+
public:
|
161 |
+
static void HashMultiColumn(const std::vector<KeyColumnArray>& cols, LightContext* ctx,
|
162 |
+
uint64_t* hashes);
|
163 |
+
|
164 |
+
static Status HashBatch(const ExecBatch& key_batch, uint64_t* hashes,
|
165 |
+
std::vector<KeyColumnArray>& column_arrays,
|
166 |
+
int64_t hardware_flags, util::TempVectorStack* temp_stack,
|
167 |
+
int64_t start_row, int64_t num_rows);
|
168 |
+
|
169 |
+
static void HashFixed(bool combine_hashes, uint32_t num_keys, uint64_t key_length,
|
170 |
+
const uint8_t* keys, uint64_t* hashes);
|
171 |
+
|
172 |
+
private:
|
173 |
+
static const uint64_t PRIME64_1 = 0x9E3779B185EBCA87ULL;
|
174 |
+
static const uint64_t PRIME64_2 = 0xC2B2AE3D27D4EB4FULL;
|
175 |
+
static const uint64_t PRIME64_3 = 0x165667B19E3779F9ULL;
|
176 |
+
static const uint64_t PRIME64_4 = 0x85EBCA77C2B2AE63ULL;
|
177 |
+
static const uint64_t PRIME64_5 = 0x27D4EB2F165667C5ULL;
|
178 |
+
static const uint32_t kCombineConst = 0x9e3779b9UL;
|
179 |
+
static const int64_t kStripeSize = 4 * sizeof(uint64_t);
|
180 |
+
|
181 |
+
static void HashVarLen(bool combine_hashes, uint32_t num_rows, const uint32_t* offsets,
|
182 |
+
const uint8_t* concatenated_keys, uint64_t* hashes);
|
183 |
+
|
184 |
+
static void HashVarLen(bool combine_hashes, uint32_t num_rows, const uint64_t* offsets,
|
185 |
+
const uint8_t* concatenated_keys, uint64_t* hashes);
|
186 |
+
|
187 |
+
static inline uint64_t Avalanche(uint64_t acc);
|
188 |
+
static inline uint64_t Round(uint64_t acc, uint64_t input);
|
189 |
+
static inline uint64_t CombineAccumulators(uint64_t acc1, uint64_t acc2, uint64_t acc3,
|
190 |
+
uint64_t acc4);
|
191 |
+
static inline uint64_t CombineHashesImp(uint64_t previous_hash, uint64_t hash) {
|
192 |
+
uint64_t next_hash = previous_hash ^ (hash + kCombineConst + (previous_hash << 6) +
|
193 |
+
(previous_hash >> 2));
|
194 |
+
return next_hash;
|
195 |
+
}
|
196 |
+
static inline void ProcessFullStripes(uint64_t num_stripes, const uint8_t* key,
|
197 |
+
uint64_t* out_acc1, uint64_t* out_acc2,
|
198 |
+
uint64_t* out_acc3, uint64_t* out_acc4);
|
199 |
+
static inline void ProcessLastStripe(uint64_t mask1, uint64_t mask2, uint64_t mask3,
|
200 |
+
uint64_t mask4, const uint8_t* last_stripe,
|
201 |
+
uint64_t* acc1, uint64_t* acc2, uint64_t* acc3,
|
202 |
+
uint64_t* acc4);
|
203 |
+
static inline void StripeMask(int i, uint64_t* mask1, uint64_t* mask2, uint64_t* mask3,
|
204 |
+
uint64_t* mask4);
|
205 |
+
template <bool T_COMBINE_HASHES>
|
206 |
+
static void HashFixedLenImp(uint32_t num_rows, uint64_t key_length, const uint8_t* keys,
|
207 |
+
uint64_t* hashes);
|
208 |
+
template <typename T, bool T_COMBINE_HASHES>
|
209 |
+
static void HashVarLenImp(uint32_t num_rows, const T* offsets,
|
210 |
+
const uint8_t* concatenated_keys, uint64_t* hashes);
|
211 |
+
template <bool T_COMBINE_HASHES>
|
212 |
+
static void HashBitImp(int64_t bit_offset, uint32_t num_keys, const uint8_t* keys,
|
213 |
+
uint64_t* hashes);
|
214 |
+
static void HashBit(bool combine_hashes, int64_t bit_offset, uint32_t num_keys,
|
215 |
+
const uint8_t* keys, uint64_t* hashes);
|
216 |
+
template <bool T_COMBINE_HASHES, typename T>
|
217 |
+
static void HashIntImp(uint32_t num_keys, const T* keys, uint64_t* hashes);
|
218 |
+
static void HashInt(bool combine_hashes, uint32_t num_keys, uint64_t key_length,
|
219 |
+
const uint8_t* keys, uint64_t* hashes);
|
220 |
+
};
|
221 |
+
|
222 |
+
} // namespace compute
|
223 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/key_map.h
ADDED
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cassert>
|
21 |
+
#include <functional>
|
22 |
+
|
23 |
+
#include "arrow/compute/util.h"
|
24 |
+
#include "arrow/result.h"
|
25 |
+
#include "arrow/status.h"
|
26 |
+
#include "arrow/type_fwd.h"
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
namespace compute {
|
30 |
+
|
31 |
+
// SwissTable is a variant of a hash table implementation.
|
32 |
+
// This implementation is vectorized, that is: main interface methods take arrays of input
|
33 |
+
// values and output arrays of result values.
|
34 |
+
//
|
35 |
+
// A detailed explanation of this data structure (including concepts such as blocks,
|
36 |
+
// slots, stamps) and operations provided by this class is given in the document:
|
37 |
+
// arrow/compute/exec/doc/key_map.md.
|
38 |
+
//
|
39 |
+
class ARROW_EXPORT SwissTable {
|
40 |
+
friend class SwissTableMerge;
|
41 |
+
|
42 |
+
public:
|
43 |
+
SwissTable() = default;
|
44 |
+
~SwissTable() { cleanup(); }
|
45 |
+
|
46 |
+
using EqualImpl =
|
47 |
+
std::function<void(int num_keys, const uint16_t* selection /* may be null */,
|
48 |
+
const uint32_t* group_ids, uint32_t* out_num_keys_mismatch,
|
49 |
+
uint16_t* out_selection_mismatch, void* callback_ctx)>;
|
50 |
+
using AppendImpl =
|
51 |
+
std::function<Status(int num_keys, const uint16_t* selection, void* callback_ctx)>;
|
52 |
+
|
53 |
+
Status init(int64_t hardware_flags, MemoryPool* pool, int log_blocks = 0,
|
54 |
+
bool no_hash_array = false);
|
55 |
+
|
56 |
+
void cleanup();
|
57 |
+
|
58 |
+
void early_filter(const int num_keys, const uint32_t* hashes,
|
59 |
+
uint8_t* out_match_bitvector, uint8_t* out_local_slots) const;
|
60 |
+
|
61 |
+
void find(const int num_keys, const uint32_t* hashes, uint8_t* inout_match_bitvector,
|
62 |
+
const uint8_t* local_slots, uint32_t* out_group_ids,
|
63 |
+
util::TempVectorStack* temp_stack, const EqualImpl& equal_impl,
|
64 |
+
void* callback_ctx) const;
|
65 |
+
|
66 |
+
Status map_new_keys(uint32_t num_ids, uint16_t* ids, const uint32_t* hashes,
|
67 |
+
uint32_t* group_ids, util::TempVectorStack* temp_stack,
|
68 |
+
const EqualImpl& equal_impl, const AppendImpl& append_impl,
|
69 |
+
void* callback_ctx);
|
70 |
+
|
71 |
+
int minibatch_size() const { return 1 << log_minibatch_; }
|
72 |
+
|
73 |
+
uint32_t num_inserted() const { return num_inserted_; }
|
74 |
+
|
75 |
+
int64_t hardware_flags() const { return hardware_flags_; }
|
76 |
+
|
77 |
+
MemoryPool* pool() const { return pool_; }
|
78 |
+
|
79 |
+
int log_blocks() const { return log_blocks_; }
|
80 |
+
|
81 |
+
void num_inserted(uint32_t i) { num_inserted_ = i; }
|
82 |
+
|
83 |
+
uint8_t* blocks() const { return blocks_->mutable_data(); }
|
84 |
+
|
85 |
+
uint32_t* hashes() const {
|
86 |
+
return reinterpret_cast<uint32_t*>(hashes_->mutable_data());
|
87 |
+
}
|
88 |
+
|
89 |
+
/// \brief Extract group id for a given slot in a given block.
|
90 |
+
///
|
91 |
+
inline uint64_t extract_group_id(const uint8_t* block_ptr, int slot,
|
92 |
+
uint64_t group_id_mask) const;
|
93 |
+
|
94 |
+
inline void insert_into_empty_slot(uint32_t slot_id, uint32_t hash, uint32_t group_id);
|
95 |
+
|
96 |
+
static int num_groupid_bits_from_log_blocks(int log_blocks) {
|
97 |
+
int required_bits = log_blocks + 3;
|
98 |
+
return required_bits <= 8 ? 8
|
99 |
+
: required_bits <= 16 ? 16
|
100 |
+
: required_bits <= 32 ? 32
|
101 |
+
: 64;
|
102 |
+
}
|
103 |
+
|
104 |
+
// Use 32-bit hash for now
|
105 |
+
static constexpr int bits_hash_ = 32;
|
106 |
+
|
107 |
+
private:
|
108 |
+
// Lookup helpers
|
109 |
+
|
110 |
+
/// \brief Scan bytes in block in reverse and stop as soon
|
111 |
+
/// as a position of interest is found.
|
112 |
+
///
|
113 |
+
/// Positions of interest:
|
114 |
+
/// a) slot with a matching stamp is encountered,
|
115 |
+
/// b) first empty slot is encountered,
|
116 |
+
/// c) we reach the end of the block.
|
117 |
+
///
|
118 |
+
/// Optionally an index of the first slot to start the search from can be specified.
|
119 |
+
/// In this case slots before it will be ignored.
|
120 |
+
///
|
121 |
+
/// \param[in] block 8 byte block of hash table
|
122 |
+
/// \param[in] stamp 7 bits of hash used as a stamp
|
123 |
+
/// \param[in] start_slot Index of the first slot in the block to start search from. We
|
124 |
+
/// assume that this index always points to a non-empty slot, equivalently
|
125 |
+
/// that it comes before any empty slots. (Used only by one template
|
126 |
+
/// variant.)
|
127 |
+
/// \param[out] out_slot index corresponding to the discovered position of interest (8
|
128 |
+
/// represents end of block).
|
129 |
+
/// \param[out] out_match_found an integer flag (0 or 1) indicating if we reached an
|
130 |
+
/// empty slot (0) or not (1). Therefore 1 can mean that either actual match was found
|
131 |
+
/// (case a) above) or we reached the end of full block (case b) above).
|
132 |
+
///
|
133 |
+
template <bool use_start_slot>
|
134 |
+
inline void search_block(uint64_t block, int stamp, int start_slot, int* out_slot,
|
135 |
+
int* out_match_found) const;
|
136 |
+
|
137 |
+
void extract_group_ids(const int num_keys, const uint16_t* optional_selection,
|
138 |
+
const uint32_t* hashes, const uint8_t* local_slots,
|
139 |
+
uint32_t* out_group_ids) const;
|
140 |
+
|
141 |
+
template <typename T, bool use_selection>
|
142 |
+
void extract_group_ids_imp(const int num_keys, const uint16_t* selection,
|
143 |
+
const uint32_t* hashes, const uint8_t* local_slots,
|
144 |
+
uint32_t* out_group_ids, int elements_offset,
|
145 |
+
int element_multiplier) const;
|
146 |
+
|
147 |
+
inline uint64_t next_slot_to_visit(uint64_t block_index, int slot,
|
148 |
+
int match_found) const;
|
149 |
+
|
150 |
+
inline uint64_t num_groups_for_resize() const;
|
151 |
+
|
152 |
+
inline uint64_t wrap_global_slot_id(uint64_t global_slot_id) const;
|
153 |
+
|
154 |
+
void init_slot_ids(const int num_keys, const uint16_t* selection,
|
155 |
+
const uint32_t* hashes, const uint8_t* local_slots,
|
156 |
+
const uint8_t* match_bitvector, uint32_t* out_slot_ids) const;
|
157 |
+
|
158 |
+
void init_slot_ids_for_new_keys(uint32_t num_ids, const uint16_t* ids,
|
159 |
+
const uint32_t* hashes, uint32_t* slot_ids) const;
|
160 |
+
|
161 |
+
// Quickly filter out keys that have no matches based only on hash value and the
|
162 |
+
// corresponding starting 64-bit block of slot status bytes. May return false positives.
|
163 |
+
//
|
164 |
+
void early_filter_imp(const int num_keys, const uint32_t* hashes,
|
165 |
+
uint8_t* out_match_bitvector, uint8_t* out_local_slots) const;
|
166 |
+
#if defined(ARROW_HAVE_RUNTIME_AVX2) && defined(ARROW_HAVE_RUNTIME_BMI2)
|
167 |
+
// The functions below use BMI2 instructions, be careful before calling!
|
168 |
+
int early_filter_imp_avx2_x8(const int num_hashes, const uint32_t* hashes,
|
169 |
+
uint8_t* out_match_bitvector,
|
170 |
+
uint8_t* out_local_slots) const;
|
171 |
+
int early_filter_imp_avx2_x32(const int num_hashes, const uint32_t* hashes,
|
172 |
+
uint8_t* out_match_bitvector,
|
173 |
+
uint8_t* out_local_slots) const;
|
174 |
+
int extract_group_ids_avx2(const int num_keys, const uint32_t* hashes,
|
175 |
+
const uint8_t* local_slots, uint32_t* out_group_ids,
|
176 |
+
int byte_offset, int byte_multiplier, int byte_size) const;
|
177 |
+
#endif
|
178 |
+
|
179 |
+
void run_comparisons(const int num_keys, const uint16_t* optional_selection_ids,
|
180 |
+
const uint8_t* optional_selection_bitvector,
|
181 |
+
const uint32_t* groupids, int* out_num_not_equal,
|
182 |
+
uint16_t* out_not_equal_selection, const EqualImpl& equal_impl,
|
183 |
+
void* callback_ctx) const;
|
184 |
+
|
185 |
+
inline bool find_next_stamp_match(const uint32_t hash, const uint32_t in_slot_id,
|
186 |
+
uint32_t* out_slot_id, uint32_t* out_group_id) const;
|
187 |
+
|
188 |
+
// Slow processing of input keys in the most generic case.
|
189 |
+
// Handles inserting new keys.
|
190 |
+
// Preexisting keys will be handled correctly, although the intended use is for this
|
191 |
+
// call to follow a call to find() method, which would only pass on new keys that were
|
192 |
+
// not present in the hash table.
|
193 |
+
//
|
194 |
+
Status map_new_keys_helper(const uint32_t* hashes, uint32_t* inout_num_selected,
|
195 |
+
uint16_t* inout_selection, bool* out_need_resize,
|
196 |
+
uint32_t* out_group_ids, uint32_t* out_next_slot_ids,
|
197 |
+
util::TempVectorStack* temp_stack,
|
198 |
+
const EqualImpl& equal_impl, const AppendImpl& append_impl,
|
199 |
+
void* callback_ctx);
|
200 |
+
|
201 |
+
// Resize small hash tables when 50% full (up to 8KB).
|
202 |
+
// Resize large hash tables when 75% full.
|
203 |
+
Status grow_double();
|
204 |
+
|
205 |
+
// Number of hash bits stored in slots in a block.
|
206 |
+
// The highest bits of hash determine block id.
|
207 |
+
// The next set of highest bits is a "stamp" stored in a slot in a block.
|
208 |
+
static constexpr int bits_stamp_ = 7;
|
209 |
+
|
210 |
+
// Padding bytes added at the end of buffers for ease of SIMD access
|
211 |
+
static constexpr int padding_ = 64;
|
212 |
+
|
213 |
+
int log_minibatch_;
|
214 |
+
// Base 2 log of the number of blocks
|
215 |
+
int log_blocks_ = 0;
|
216 |
+
// Number of keys inserted into hash table
|
217 |
+
uint32_t num_inserted_ = 0;
|
218 |
+
|
219 |
+
// Data for blocks.
|
220 |
+
// Each block has 8 status bytes for 8 slots, followed by 8 bit packed group ids for
|
221 |
+
// these slots. In 8B status word, the order of bytes is reversed. Group ids are in
|
222 |
+
// normal order. There is 64B padding at the end.
|
223 |
+
//
|
224 |
+
// 0 byte - 7 bucket | 1. byte - 6 bucket | ...
|
225 |
+
// ---------------------------------------------------
|
226 |
+
// | Empty bit* | Empty bit |
|
227 |
+
// ---------------------------------------------------
|
228 |
+
// | 7-bit hash | 7-bit hash |
|
229 |
+
// ---------------------------------------------------
|
230 |
+
// * Empty bucket has value 0x80. Non-empty bucket has highest bit set to 0.
|
231 |
+
//
|
232 |
+
std::shared_ptr<Buffer> blocks_;
|
233 |
+
|
234 |
+
// Array of hashes of values inserted into slots.
|
235 |
+
// Undefined if the corresponding slot is empty.
|
236 |
+
// There is 64B padding at the end.
|
237 |
+
std::shared_ptr<Buffer> hashes_;
|
238 |
+
|
239 |
+
int64_t hardware_flags_;
|
240 |
+
MemoryPool* pool_;
|
241 |
+
};
|
242 |
+
|
243 |
+
uint64_t SwissTable::extract_group_id(const uint8_t* block_ptr, int slot,
|
244 |
+
uint64_t group_id_mask) const {
|
245 |
+
// Group id values for all 8 slots in the block are bit-packed and follow the status
|
246 |
+
// bytes. We assume here that the number of bits is rounded up to 8, 16, 32 or 64. In
|
247 |
+
// that case we can extract group id using aligned 64-bit word access.
|
248 |
+
int num_group_id_bits = static_cast<int>(ARROW_POPCOUNT64(group_id_mask));
|
249 |
+
assert(num_group_id_bits == 8 || num_group_id_bits == 16 || num_group_id_bits == 32 ||
|
250 |
+
num_group_id_bits == 64);
|
251 |
+
|
252 |
+
int bit_offset = slot * num_group_id_bits;
|
253 |
+
const uint64_t* group_id_bytes =
|
254 |
+
reinterpret_cast<const uint64_t*>(block_ptr) + 1 + (bit_offset >> 6);
|
255 |
+
uint64_t group_id = (*group_id_bytes >> (bit_offset & 63)) & group_id_mask;
|
256 |
+
|
257 |
+
return group_id;
|
258 |
+
}
|
259 |
+
|
260 |
+
void SwissTable::insert_into_empty_slot(uint32_t slot_id, uint32_t hash,
|
261 |
+
uint32_t group_id) {
|
262 |
+
const uint64_t num_groupid_bits = num_groupid_bits_from_log_blocks(log_blocks_);
|
263 |
+
|
264 |
+
// We assume here that the number of bits is rounded up to 8, 16, 32 or 64.
|
265 |
+
// In that case we can insert group id value using aligned 64-bit word access.
|
266 |
+
assert(num_groupid_bits == 8 || num_groupid_bits == 16 || num_groupid_bits == 32 ||
|
267 |
+
num_groupid_bits == 64);
|
268 |
+
|
269 |
+
const uint64_t num_block_bytes = (8 + num_groupid_bits);
|
270 |
+
constexpr uint64_t stamp_mask = 0x7f;
|
271 |
+
|
272 |
+
int start_slot = (slot_id & 7);
|
273 |
+
int stamp =
|
274 |
+
static_cast<int>((hash >> (bits_hash_ - log_blocks_ - bits_stamp_)) & stamp_mask);
|
275 |
+
uint64_t block_id = slot_id >> 3;
|
276 |
+
uint8_t* blockbase = blocks_->mutable_data() + num_block_bytes * block_id;
|
277 |
+
|
278 |
+
blockbase[7 - start_slot] = static_cast<uint8_t>(stamp);
|
279 |
+
int groupid_bit_offset = static_cast<int>(start_slot * num_groupid_bits);
|
280 |
+
|
281 |
+
// Block status bytes should start at an address aligned to 8 bytes
|
282 |
+
assert((reinterpret_cast<uint64_t>(blockbase) & 7) == 0);
|
283 |
+
uint64_t* ptr = reinterpret_cast<uint64_t*>(blockbase) + 1 + (groupid_bit_offset >> 6);
|
284 |
+
*ptr |= (static_cast<uint64_t>(group_id) << (groupid_bit_offset & 63));
|
285 |
+
}
|
286 |
+
|
287 |
+
} // namespace compute
|
288 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/light_array.h
ADDED
@@ -0,0 +1,451 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
|
22 |
+
#include "arrow/array.h"
|
23 |
+
#include "arrow/compute/exec.h"
|
24 |
+
#include "arrow/compute/util.h"
|
25 |
+
#include "arrow/type.h"
|
26 |
+
#include "arrow/util/cpu_info.h"
|
27 |
+
#include "arrow/util/logging.h"
|
28 |
+
|
29 |
+
/// This file contains lightweight containers for Arrow buffers. These containers
|
30 |
+
/// makes compromises in terms of strong ownership and the range of data types supported
|
31 |
+
/// in order to gain performance and reduced overhead.
|
32 |
+
|
33 |
+
namespace arrow {
|
34 |
+
namespace compute {
|
35 |
+
|
36 |
+
/// \brief Context needed by various execution engine operations
|
37 |
+
///
|
38 |
+
/// In the execution engine this context is provided by either the node or the
|
39 |
+
/// plan and the context exists for the lifetime of the plan. Defining this here
|
40 |
+
/// allows us to take advantage of these resources without coupling the logic with
|
41 |
+
/// the execution engine.
|
42 |
+
struct LightContext {
|
43 |
+
bool has_avx2() const { return (hardware_flags & arrow::internal::CpuInfo::AVX2) > 0; }
|
44 |
+
int64_t hardware_flags;
|
45 |
+
util::TempVectorStack* stack;
|
46 |
+
};
|
47 |
+
|
48 |
+
/// \brief Description of the layout of a "key" column
|
49 |
+
///
|
50 |
+
/// A "key" column is a non-nested, non-union column.
|
51 |
+
/// Every key column has either 0 (null), 2 (e.g. int32) or 3 (e.g. string) buffers
|
52 |
+
/// and no children.
|
53 |
+
///
|
54 |
+
/// This metadata object is a zero-allocation analogue of arrow::DataType
|
55 |
+
struct ARROW_EXPORT KeyColumnMetadata {
|
56 |
+
KeyColumnMetadata() = default;
|
57 |
+
KeyColumnMetadata(bool is_fixed_length_in, uint32_t fixed_length_in,
|
58 |
+
bool is_null_type_in = false)
|
59 |
+
: is_fixed_length(is_fixed_length_in),
|
60 |
+
is_null_type(is_null_type_in),
|
61 |
+
fixed_length(fixed_length_in) {}
|
62 |
+
/// \brief True if the column is not a varying-length binary type
|
63 |
+
///
|
64 |
+
/// If this is true the column will have a validity buffer and
|
65 |
+
/// a data buffer and the third buffer will be unused.
|
66 |
+
bool is_fixed_length;
|
67 |
+
/// \brief True if this column is the null type
|
68 |
+
bool is_null_type;
|
69 |
+
/// \brief The number of bytes for each item
|
70 |
+
///
|
71 |
+
/// Zero has a special meaning, indicating a bit vector with one bit per value if it
|
72 |
+
/// isn't a null type column.
|
73 |
+
///
|
74 |
+
/// For a varying-length binary column this represents the number of bytes per offset.
|
75 |
+
uint32_t fixed_length;
|
76 |
+
};
|
77 |
+
|
78 |
+
/// \brief A lightweight view into a "key" array
|
79 |
+
///
|
80 |
+
/// A "key" column is a non-nested, non-union column \see KeyColumnMetadata
|
81 |
+
///
|
82 |
+
/// This metadata object is a zero-allocation analogue of arrow::ArrayData
|
83 |
+
class ARROW_EXPORT KeyColumnArray {
|
84 |
+
public:
|
85 |
+
/// \brief Create an uninitialized KeyColumnArray
|
86 |
+
KeyColumnArray() = default;
|
87 |
+
/// \brief Create a read-only view from buffers
|
88 |
+
///
|
89 |
+
/// This is a view only and does not take ownership of the buffers. The lifetime
|
90 |
+
/// of the buffers must exceed the lifetime of this view
|
91 |
+
KeyColumnArray(const KeyColumnMetadata& metadata, int64_t length,
|
92 |
+
const uint8_t* validity_buffer, const uint8_t* fixed_length_buffer,
|
93 |
+
const uint8_t* var_length_buffer, int bit_offset_validity = 0,
|
94 |
+
int bit_offset_fixed = 0);
|
95 |
+
/// \brief Create a mutable view from buffers
|
96 |
+
///
|
97 |
+
/// This is a view only and does not take ownership of the buffers. The lifetime
|
98 |
+
/// of the buffers must exceed the lifetime of this view
|
99 |
+
KeyColumnArray(const KeyColumnMetadata& metadata, int64_t length,
|
100 |
+
uint8_t* validity_buffer, uint8_t* fixed_length_buffer,
|
101 |
+
uint8_t* var_length_buffer, int bit_offset_validity = 0,
|
102 |
+
int bit_offset_fixed = 0);
|
103 |
+
/// \brief Create a sliced view of `this`
|
104 |
+
///
|
105 |
+
/// The number of rows used in offset must be divisible by 8
|
106 |
+
/// in order to not split bit vectors within a single byte.
|
107 |
+
KeyColumnArray Slice(int64_t offset, int64_t length) const;
|
108 |
+
/// \brief Create a copy of `this` with a buffer from `other`
|
109 |
+
///
|
110 |
+
/// The copy will be identical to `this` except the buffer at buffer_id_to_replace
|
111 |
+
/// will be replaced by the corresponding buffer in `other`.
|
112 |
+
KeyColumnArray WithBufferFrom(const KeyColumnArray& other,
|
113 |
+
int buffer_id_to_replace) const;
|
114 |
+
|
115 |
+
/// \brief Create a copy of `this` with new metadata
|
116 |
+
KeyColumnArray WithMetadata(const KeyColumnMetadata& metadata) const;
|
117 |
+
|
118 |
+
// Constants used for accessing buffers using data() and mutable_data().
|
119 |
+
static constexpr int kValidityBuffer = 0;
|
120 |
+
static constexpr int kFixedLengthBuffer = 1;
|
121 |
+
static constexpr int kVariableLengthBuffer = 2;
|
122 |
+
|
123 |
+
/// \brief Return one of the underlying mutable buffers
|
124 |
+
uint8_t* mutable_data(int i) {
|
125 |
+
ARROW_DCHECK(i >= 0 && i < kMaxBuffers);
|
126 |
+
return mutable_buffers_[i];
|
127 |
+
}
|
128 |
+
/// \brief Return one of the underlying read-only buffers
|
129 |
+
const uint8_t* data(int i) const {
|
130 |
+
ARROW_DCHECK(i >= 0 && i < kMaxBuffers);
|
131 |
+
return buffers_[i];
|
132 |
+
}
|
133 |
+
/// \brief Return a mutable version of the offsets buffer
|
134 |
+
///
|
135 |
+
/// Only valid if this is a view into a varbinary type
|
136 |
+
uint32_t* mutable_offsets() {
|
137 |
+
DCHECK(!metadata_.is_fixed_length);
|
138 |
+
DCHECK_EQ(metadata_.fixed_length, sizeof(uint32_t));
|
139 |
+
return reinterpret_cast<uint32_t*>(mutable_data(kFixedLengthBuffer));
|
140 |
+
}
|
141 |
+
/// \brief Return a read-only version of the offsets buffer
|
142 |
+
///
|
143 |
+
/// Only valid if this is a view into a varbinary type
|
144 |
+
const uint32_t* offsets() const {
|
145 |
+
DCHECK(!metadata_.is_fixed_length);
|
146 |
+
DCHECK_EQ(metadata_.fixed_length, sizeof(uint32_t));
|
147 |
+
return reinterpret_cast<const uint32_t*>(data(kFixedLengthBuffer));
|
148 |
+
}
|
149 |
+
/// \brief Return a mutable version of the large-offsets buffer
|
150 |
+
///
|
151 |
+
/// Only valid if this is a view into a large varbinary type
|
152 |
+
uint64_t* mutable_large_offsets() {
|
153 |
+
DCHECK(!metadata_.is_fixed_length);
|
154 |
+
DCHECK_EQ(metadata_.fixed_length, sizeof(uint64_t));
|
155 |
+
return reinterpret_cast<uint64_t*>(mutable_data(kFixedLengthBuffer));
|
156 |
+
}
|
157 |
+
/// \brief Return a read-only version of the large-offsets buffer
|
158 |
+
///
|
159 |
+
/// Only valid if this is a view into a large varbinary type
|
160 |
+
const uint64_t* large_offsets() const {
|
161 |
+
DCHECK(!metadata_.is_fixed_length);
|
162 |
+
DCHECK_EQ(metadata_.fixed_length, sizeof(uint64_t));
|
163 |
+
return reinterpret_cast<const uint64_t*>(data(kFixedLengthBuffer));
|
164 |
+
}
|
165 |
+
/// \brief Return the type metadata
|
166 |
+
const KeyColumnMetadata& metadata() const { return metadata_; }
|
167 |
+
/// \brief Return the length (in rows) of the array
|
168 |
+
int64_t length() const { return length_; }
|
169 |
+
/// \brief Return the bit offset into the corresponding vector
|
170 |
+
///
|
171 |
+
/// if i == 1 then this must be a bool array
|
172 |
+
int bit_offset(int i) const {
|
173 |
+
ARROW_DCHECK(i >= 0 && i < kMaxBuffers);
|
174 |
+
return bit_offset_[i];
|
175 |
+
}
|
176 |
+
|
177 |
+
private:
|
178 |
+
static constexpr int kMaxBuffers = 3;
|
179 |
+
const uint8_t* buffers_[kMaxBuffers];
|
180 |
+
uint8_t* mutable_buffers_[kMaxBuffers];
|
181 |
+
KeyColumnMetadata metadata_;
|
182 |
+
int64_t length_;
|
183 |
+
// Starting bit offset within the first byte (between 0 and 7)
|
184 |
+
// to be used when accessing buffers that store bit vectors.
|
185 |
+
int bit_offset_[kMaxBuffers - 1];
|
186 |
+
|
187 |
+
bool is_bool_type() const {
|
188 |
+
return metadata_.is_fixed_length && metadata_.fixed_length == 0 &&
|
189 |
+
!metadata_.is_null_type;
|
190 |
+
}
|
191 |
+
|
192 |
+
bool is_fixed_width_types() const {
|
193 |
+
return metadata_.is_fixed_length && metadata_.fixed_length != 0 &&
|
194 |
+
!metadata_.is_null_type;
|
195 |
+
}
|
196 |
+
|
197 |
+
bool is_binary_type() const {
|
198 |
+
return !metadata_.is_fixed_length && metadata_.fixed_length == sizeof(uint32_t) &&
|
199 |
+
!metadata_.is_null_type;
|
200 |
+
}
|
201 |
+
|
202 |
+
bool is_large_binary_type() const {
|
203 |
+
return !metadata_.is_fixed_length && metadata_.fixed_length == sizeof(uint64_t) &&
|
204 |
+
!metadata_.is_null_type;
|
205 |
+
}
|
206 |
+
|
207 |
+
bool is_null_type() const {
|
208 |
+
return metadata_.is_fixed_length && metadata_.fixed_length == 0 &&
|
209 |
+
metadata_.is_null_type;
|
210 |
+
}
|
211 |
+
};
|
212 |
+
|
213 |
+
/// \brief Create KeyColumnMetadata from a DataType
|
214 |
+
///
|
215 |
+
/// If `type` is a dictionary type then this will return the KeyColumnMetadata for
|
216 |
+
/// the indices type
|
217 |
+
///
|
218 |
+
/// This should only be called on "key" columns. Calling this with
|
219 |
+
/// a non-key column will return Status::TypeError.
|
220 |
+
ARROW_EXPORT Result<KeyColumnMetadata> ColumnMetadataFromDataType(
|
221 |
+
const std::shared_ptr<DataType>& type);
|
222 |
+
|
223 |
+
/// \brief Create KeyColumnArray from ArrayData
|
224 |
+
///
|
225 |
+
/// If `type` is a dictionary type then this will return the KeyColumnArray for
|
226 |
+
/// the indices array
|
227 |
+
///
|
228 |
+
/// The caller should ensure this is only called on "key" columns.
|
229 |
+
/// \see ColumnMetadataFromDataType for details
|
230 |
+
ARROW_EXPORT Result<KeyColumnArray> ColumnArrayFromArrayData(
|
231 |
+
const std::shared_ptr<ArrayData>& array_data, int64_t start_row, int64_t num_rows);
|
232 |
+
|
233 |
+
/// \brief Create KeyColumnArray from ArrayData and KeyColumnMetadata
|
234 |
+
///
|
235 |
+
/// If `type` is a dictionary type then this will return the KeyColumnArray for
|
236 |
+
/// the indices array
|
237 |
+
///
|
238 |
+
/// The caller should ensure this is only called on "key" columns.
|
239 |
+
/// \see ColumnMetadataFromDataType for details
|
240 |
+
ARROW_EXPORT KeyColumnArray ColumnArrayFromArrayDataAndMetadata(
|
241 |
+
const std::shared_ptr<ArrayData>& array_data, const KeyColumnMetadata& metadata,
|
242 |
+
int64_t start_row, int64_t num_rows);
|
243 |
+
|
244 |
+
/// \brief Create KeyColumnMetadata instances from an ExecBatch
|
245 |
+
///
|
246 |
+
/// column_metadatas will be resized to fit
|
247 |
+
///
|
248 |
+
/// All columns in `batch` must be eligible "key" columns and have an array shape
|
249 |
+
/// \see ColumnMetadataFromDataType for more details
|
250 |
+
ARROW_EXPORT Status ColumnMetadatasFromExecBatch(
|
251 |
+
const ExecBatch& batch, std::vector<KeyColumnMetadata>* column_metadatas);
|
252 |
+
|
253 |
+
/// \brief Create KeyColumnArray instances from a slice of an ExecBatch
|
254 |
+
///
|
255 |
+
/// column_arrays will be resized to fit
|
256 |
+
///
|
257 |
+
/// All columns in `batch` must be eligible "key" columns and have an array shape
|
258 |
+
/// \see ColumnArrayFromArrayData for more details
|
259 |
+
ARROW_EXPORT Status ColumnArraysFromExecBatch(const ExecBatch& batch, int64_t start_row,
|
260 |
+
int64_t num_rows,
|
261 |
+
std::vector<KeyColumnArray>* column_arrays);
|
262 |
+
|
263 |
+
/// \brief Create KeyColumnArray instances from an ExecBatch
|
264 |
+
///
|
265 |
+
/// column_arrays will be resized to fit
|
266 |
+
///
|
267 |
+
/// All columns in `batch` must be eligible "key" columns and have an array shape
|
268 |
+
/// \see ColumnArrayFromArrayData for more details
|
269 |
+
ARROW_EXPORT Status ColumnArraysFromExecBatch(const ExecBatch& batch,
|
270 |
+
std::vector<KeyColumnArray>* column_arrays);
|
271 |
+
|
272 |
+
/// A lightweight resizable array for "key" columns
|
273 |
+
///
|
274 |
+
/// Unlike KeyColumnArray this instance owns its buffers
|
275 |
+
///
|
276 |
+
/// Resizing is handled by arrow::ResizableBuffer and a doubling approach is
|
277 |
+
/// used so that resizes will always grow up to the next power of 2
|
278 |
+
class ARROW_EXPORT ResizableArrayData {
|
279 |
+
public:
|
280 |
+
/// \brief Create an uninitialized instance
|
281 |
+
///
|
282 |
+
/// Init must be called before calling any other operations
|
283 |
+
ResizableArrayData()
|
284 |
+
: log_num_rows_min_(0),
|
285 |
+
pool_(NULLPTR),
|
286 |
+
num_rows_(0),
|
287 |
+
num_rows_allocated_(0),
|
288 |
+
var_len_buf_size_(0) {}
|
289 |
+
|
290 |
+
~ResizableArrayData() { Clear(true); }
|
291 |
+
|
292 |
+
/// \brief Initialize the array
|
293 |
+
/// \param data_type The data type this array is holding data for.
|
294 |
+
/// \param pool The pool to make allocations on
|
295 |
+
/// \param log_num_rows_min All resize operations will allocate at least enough
|
296 |
+
/// space for (1 << log_num_rows_min) rows
|
297 |
+
void Init(const std::shared_ptr<DataType>& data_type, MemoryPool* pool,
|
298 |
+
int log_num_rows_min);
|
299 |
+
|
300 |
+
/// \brief Resets the array back to an empty state
|
301 |
+
/// \param release_buffers If true then allocated memory is released and the
|
302 |
+
/// next resize operation will have to reallocate memory
|
303 |
+
void Clear(bool release_buffers);
|
304 |
+
|
305 |
+
/// \brief Resize the fixed length buffers
|
306 |
+
///
|
307 |
+
/// The buffers will be resized to hold at least `num_rows_new` rows of data
|
308 |
+
Status ResizeFixedLengthBuffers(int num_rows_new);
|
309 |
+
|
310 |
+
/// \brief Resize the varying length buffer if this array is a variable binary type
|
311 |
+
///
|
312 |
+
/// This must be called after offsets have been populated and the buffer will be
|
313 |
+
/// resized to hold at least as much data as the offsets require
|
314 |
+
///
|
315 |
+
/// Does nothing if the array is not a variable binary type
|
316 |
+
Status ResizeVaryingLengthBuffer();
|
317 |
+
|
318 |
+
/// \brief The current length (in rows) of the array
|
319 |
+
int num_rows() const { return num_rows_; }
|
320 |
+
|
321 |
+
/// \brief A non-owning view into this array
|
322 |
+
KeyColumnArray column_array() const;
|
323 |
+
|
324 |
+
/// \brief A lightweight descriptor of the data held by this array
|
325 |
+
Result<KeyColumnMetadata> column_metadata() const {
|
326 |
+
return ColumnMetadataFromDataType(data_type_);
|
327 |
+
}
|
328 |
+
|
329 |
+
/// \brief Convert the data to an arrow::ArrayData
|
330 |
+
///
|
331 |
+
/// This is a zero copy operation and the created ArrayData will reference the
|
332 |
+
/// buffers held by this instance.
|
333 |
+
std::shared_ptr<ArrayData> array_data() const;
|
334 |
+
|
335 |
+
// Constants used for accessing buffers using mutable_data().
|
336 |
+
static constexpr int kValidityBuffer = 0;
|
337 |
+
static constexpr int kFixedLengthBuffer = 1;
|
338 |
+
static constexpr int kVariableLengthBuffer = 2;
|
339 |
+
|
340 |
+
/// \brief A raw pointer to the requested buffer
|
341 |
+
///
|
342 |
+
/// If i is 0 (kValidityBuffer) then this returns the validity buffer
|
343 |
+
/// If i is 1 (kFixedLengthBuffer) then this returns the buffer used for values (if this
|
344 |
+
/// is a fixed length data type) or offsets (if this is a variable binary type)
|
345 |
+
/// If i is 2 (kVariableLengthBuffer) then this returns the buffer used for variable
|
346 |
+
/// length binary data
|
347 |
+
uint8_t* mutable_data(int i) { return buffers_[i]->mutable_data(); }
|
348 |
+
|
349 |
+
private:
|
350 |
+
static constexpr int64_t kNumPaddingBytes = 64;
|
351 |
+
int log_num_rows_min_;
|
352 |
+
std::shared_ptr<DataType> data_type_;
|
353 |
+
MemoryPool* pool_;
|
354 |
+
int num_rows_;
|
355 |
+
int num_rows_allocated_;
|
356 |
+
int64_t var_len_buf_size_;
|
357 |
+
static constexpr int kMaxBuffers = 3;
|
358 |
+
std::shared_ptr<ResizableBuffer> buffers_[kMaxBuffers];
|
359 |
+
};
|
360 |
+
|
361 |
+
/// \brief A builder to concatenate batches of data into a larger batch
|
362 |
+
///
|
363 |
+
/// Will only store num_rows_max() rows
|
364 |
+
class ARROW_EXPORT ExecBatchBuilder {
|
365 |
+
public:
|
366 |
+
/// \brief Add rows from `source` into `target` column
|
367 |
+
///
|
368 |
+
/// If `target` is uninitialized or cleared it will be initialized to use
|
369 |
+
/// the given pool.
|
370 |
+
static Status AppendSelected(const std::shared_ptr<ArrayData>& source,
|
371 |
+
ResizableArrayData* target, int num_rows_to_append,
|
372 |
+
const uint16_t* row_ids, MemoryPool* pool);
|
373 |
+
|
374 |
+
/// \brief Add nulls into `target` column
|
375 |
+
///
|
376 |
+
/// If `target` is uninitialized or cleared it will be initialized to use
|
377 |
+
/// the given pool.
|
378 |
+
static Status AppendNulls(const std::shared_ptr<DataType>& type,
|
379 |
+
ResizableArrayData& target, int num_rows_to_append,
|
380 |
+
MemoryPool* pool);
|
381 |
+
|
382 |
+
/// \brief Add selected rows from `batch`
|
383 |
+
///
|
384 |
+
/// If `col_ids` is null then `num_cols` should less than batch.num_values() and
|
385 |
+
/// the first `num_cols` columns of batch will be appended.
|
386 |
+
///
|
387 |
+
/// All columns in `batch` must have array shape
|
388 |
+
Status AppendSelected(MemoryPool* pool, const ExecBatch& batch, int num_rows_to_append,
|
389 |
+
const uint16_t* row_ids, int num_cols,
|
390 |
+
const int* col_ids = NULLPTR);
|
391 |
+
|
392 |
+
/// \brief Add all-null rows
|
393 |
+
Status AppendNulls(MemoryPool* pool,
|
394 |
+
const std::vector<std::shared_ptr<DataType>>& types,
|
395 |
+
int num_rows_to_append);
|
396 |
+
|
397 |
+
/// \brief Create an ExecBatch with the data that has been appended so far
|
398 |
+
/// and clear this builder to be used again
|
399 |
+
///
|
400 |
+
/// Should only be called if num_rows() returns non-zero.
|
401 |
+
ExecBatch Flush();
|
402 |
+
|
403 |
+
int num_rows() const { return values_.empty() ? 0 : values_[0].num_rows(); }
|
404 |
+
|
405 |
+
static int num_rows_max() { return 1 << kLogNumRows; }
|
406 |
+
|
407 |
+
private:
|
408 |
+
static constexpr int kLogNumRows = 15;
|
409 |
+
|
410 |
+
// Calculate how many rows to skip from the tail of the
|
411 |
+
// sequence of selected rows, such that the total size of skipped rows is at
|
412 |
+
// least equal to the size specified by the caller.
|
413 |
+
//
|
414 |
+
// Skipping of the tail rows
|
415 |
+
// is used to allow for faster processing by the caller of remaining rows
|
416 |
+
// without checking buffer bounds (useful with SIMD or fixed size memory loads
|
417 |
+
// and stores).
|
418 |
+
//
|
419 |
+
// The sequence of row_ids provided must be non-decreasing. In case of consecutive rows
|
420 |
+
// with the same row id, they are skipped all at once because they occupy the same
|
421 |
+
// space.
|
422 |
+
//
|
423 |
+
static int NumRowsToSkip(const std::shared_ptr<ArrayData>& column, int num_rows,
|
424 |
+
const uint16_t* row_ids, int num_tail_bytes_to_skip);
|
425 |
+
|
426 |
+
// The supplied lambda will be called for each row in the given list of rows.
|
427 |
+
// The arguments given to it will be:
|
428 |
+
// - index of a row (within the set of selected rows),
|
429 |
+
// - pointer to the value,
|
430 |
+
// - byte length of the value.
|
431 |
+
//
|
432 |
+
// The information about nulls (validity bitmap) is not used in this call and
|
433 |
+
// has to be processed separately.
|
434 |
+
//
|
435 |
+
template <class PROCESS_VALUE_FN>
|
436 |
+
static void Visit(const std::shared_ptr<ArrayData>& column, int num_rows,
|
437 |
+
const uint16_t* row_ids, PROCESS_VALUE_FN process_value_fn);
|
438 |
+
|
439 |
+
template <bool OUTPUT_BYTE_ALIGNED>
|
440 |
+
static void CollectBitsImp(const uint8_t* input_bits, int64_t input_bits_offset,
|
441 |
+
uint8_t* output_bits, int64_t output_bits_offset,
|
442 |
+
int num_rows, const uint16_t* row_ids);
|
443 |
+
static void CollectBits(const uint8_t* input_bits, int64_t input_bits_offset,
|
444 |
+
uint8_t* output_bits, int64_t output_bits_offset, int num_rows,
|
445 |
+
const uint16_t* row_ids);
|
446 |
+
|
447 |
+
std::vector<ResizableArrayData> values_;
|
448 |
+
};
|
449 |
+
|
450 |
+
} // namespace compute
|
451 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/ordering.h
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <string>
|
21 |
+
#include <vector>
|
22 |
+
|
23 |
+
#include "arrow/type.h"
|
24 |
+
#include "arrow/util/compare.h"
|
25 |
+
#include "arrow/util/visibility.h"
|
26 |
+
|
27 |
+
namespace arrow {
|
28 |
+
namespace compute {
|
29 |
+
|
30 |
+
enum class SortOrder {
|
31 |
+
/// Arrange values in increasing order
|
32 |
+
Ascending,
|
33 |
+
/// Arrange values in decreasing order
|
34 |
+
Descending,
|
35 |
+
};
|
36 |
+
|
37 |
+
enum class NullPlacement {
|
38 |
+
/// Place nulls and NaNs before any non-null values.
|
39 |
+
/// NaNs will come after nulls.
|
40 |
+
AtStart,
|
41 |
+
/// Place nulls and NaNs after any non-null values.
|
42 |
+
/// NaNs will come before nulls.
|
43 |
+
AtEnd,
|
44 |
+
};
|
45 |
+
|
46 |
+
/// \brief One sort key for PartitionNthIndices (TODO) and SortIndices
|
47 |
+
class ARROW_EXPORT SortKey : public util::EqualityComparable<SortKey> {
|
48 |
+
public:
|
49 |
+
explicit SortKey(FieldRef target, SortOrder order = SortOrder::Ascending)
|
50 |
+
: target(std::move(target)), order(order) {}
|
51 |
+
|
52 |
+
bool Equals(const SortKey& other) const;
|
53 |
+
std::string ToString() const;
|
54 |
+
|
55 |
+
/// A FieldRef targeting the sort column.
|
56 |
+
FieldRef target;
|
57 |
+
/// How to order by this sort key.
|
58 |
+
SortOrder order;
|
59 |
+
};
|
60 |
+
|
61 |
+
class ARROW_EXPORT Ordering : public util::EqualityComparable<Ordering> {
|
62 |
+
public:
|
63 |
+
Ordering(std::vector<SortKey> sort_keys,
|
64 |
+
NullPlacement null_placement = NullPlacement::AtStart)
|
65 |
+
: sort_keys_(std::move(sort_keys)), null_placement_(null_placement) {}
|
66 |
+
/// true if data ordered by other is also ordered by this
|
67 |
+
///
|
68 |
+
/// For example, if data is ordered by [a, b, c] then it is also ordered
|
69 |
+
/// by [a, b] but not by [b, c] or [a, b, c, d].
|
70 |
+
///
|
71 |
+
/// [a, b].IsSuborderOf([a, b, c]) - true
|
72 |
+
/// [a, b, c].IsSuborderOf([a, b, c]) - true
|
73 |
+
/// [b, c].IsSuborderOf([a, b, c]) - false
|
74 |
+
/// [a, b, c, d].IsSuborderOf([a, b, c]) - false
|
75 |
+
///
|
76 |
+
/// The implicit ordering is not a suborder of any other ordering and
|
77 |
+
/// no other ordering is a suborder of it. The implicit ordering is not a
|
78 |
+
/// suborder of itself.
|
79 |
+
///
|
80 |
+
/// The unordered ordering is a suborder of all other orderings but no
|
81 |
+
/// other ordering is a suborder of it. The unordered ordering is a suborder
|
82 |
+
/// of itself.
|
83 |
+
///
|
84 |
+
/// The unordered ordering is a suborder of the implicit ordering.
|
85 |
+
bool IsSuborderOf(const Ordering& other) const;
|
86 |
+
|
87 |
+
bool Equals(const Ordering& other) const;
|
88 |
+
std::string ToString() const;
|
89 |
+
|
90 |
+
bool is_implicit() const { return is_implicit_; }
|
91 |
+
bool is_unordered() const { return !is_implicit_ && sort_keys_.empty(); }
|
92 |
+
|
93 |
+
const std::vector<SortKey>& sort_keys() const { return sort_keys_; }
|
94 |
+
NullPlacement null_placement() const { return null_placement_; }
|
95 |
+
|
96 |
+
static const Ordering& Implicit() {
|
97 |
+
static const Ordering kImplicit(true);
|
98 |
+
return kImplicit;
|
99 |
+
}
|
100 |
+
|
101 |
+
static const Ordering& Unordered() {
|
102 |
+
static const Ordering kUnordered(false);
|
103 |
+
// It is also possible to get an unordered ordering by passing in an empty vector
|
104 |
+
// using the normal constructor. This is ok and useful when ordering comes from user
|
105 |
+
// input.
|
106 |
+
return kUnordered;
|
107 |
+
}
|
108 |
+
|
109 |
+
private:
|
110 |
+
explicit Ordering(bool is_implicit)
|
111 |
+
: null_placement_(NullPlacement::AtStart), is_implicit_(is_implicit) {}
|
112 |
+
/// Column key(s) to order by and how to order by these sort keys.
|
113 |
+
std::vector<SortKey> sort_keys_;
|
114 |
+
/// Whether nulls and NaNs are placed at the start or at the end
|
115 |
+
NullPlacement null_placement_;
|
116 |
+
bool is_implicit_ = false;
|
117 |
+
};
|
118 |
+
|
119 |
+
} // namespace compute
|
120 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/registry.h
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// NOTE: API is EXPERIMENTAL and will change without going through a
|
19 |
+
// deprecation cycle
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
#include <memory>
|
24 |
+
#include <string>
|
25 |
+
#include <vector>
|
26 |
+
|
27 |
+
#include "arrow/result.h"
|
28 |
+
#include "arrow/status.h"
|
29 |
+
#include "arrow/util/visibility.h"
|
30 |
+
|
31 |
+
namespace arrow {
|
32 |
+
namespace compute {
|
33 |
+
|
34 |
+
class Function;
|
35 |
+
class FunctionOptionsType;
|
36 |
+
|
37 |
+
/// \brief A mutable central function registry for built-in functions as well
|
38 |
+
/// as user-defined functions. Functions are implementations of
|
39 |
+
/// arrow::compute::Function.
|
40 |
+
///
|
41 |
+
/// Generally, each function contains kernels which are implementations of a
|
42 |
+
/// function for a specific argument signature. After looking up a function in
|
43 |
+
/// the registry, one can either execute it eagerly with Function::Execute or
|
44 |
+
/// use one of the function's dispatch methods to pick a suitable kernel for
|
45 |
+
/// lower-level function execution.
|
46 |
+
class ARROW_EXPORT FunctionRegistry {
|
47 |
+
public:
|
48 |
+
~FunctionRegistry();
|
49 |
+
|
50 |
+
/// \brief Construct a new registry.
|
51 |
+
///
|
52 |
+
/// Most users only need to use the global registry.
|
53 |
+
static std::unique_ptr<FunctionRegistry> Make();
|
54 |
+
|
55 |
+
/// \brief Construct a new nested registry with the given parent.
|
56 |
+
///
|
57 |
+
/// Most users only need to use the global registry. The returned registry never changes
|
58 |
+
/// its parent, even when an operation allows overwriting.
|
59 |
+
static std::unique_ptr<FunctionRegistry> Make(FunctionRegistry* parent);
|
60 |
+
|
61 |
+
/// \brief Check whether a new function can be added to the registry.
|
62 |
+
///
|
63 |
+
/// \returns Status::KeyError if a function with the same name is already registered.
|
64 |
+
Status CanAddFunction(std::shared_ptr<Function> function, bool allow_overwrite = false);
|
65 |
+
|
66 |
+
/// \brief Add a new function to the registry.
|
67 |
+
///
|
68 |
+
/// \returns Status::KeyError if a function with the same name is already registered.
|
69 |
+
Status AddFunction(std::shared_ptr<Function> function, bool allow_overwrite = false);
|
70 |
+
|
71 |
+
/// \brief Check whether an alias can be added for the given function name.
|
72 |
+
///
|
73 |
+
/// \returns Status::KeyError if the function with the given name is not registered.
|
74 |
+
Status CanAddAlias(const std::string& target_name, const std::string& source_name);
|
75 |
+
|
76 |
+
/// \brief Add alias for the given function name.
|
77 |
+
///
|
78 |
+
/// \returns Status::KeyError if the function with the given name is not registered.
|
79 |
+
Status AddAlias(const std::string& target_name, const std::string& source_name);
|
80 |
+
|
81 |
+
/// \brief Check whether a new function options type can be added to the registry.
|
82 |
+
///
|
83 |
+
/// \return Status::KeyError if a function options type with the same name is already
|
84 |
+
/// registered.
|
85 |
+
Status CanAddFunctionOptionsType(const FunctionOptionsType* options_type,
|
86 |
+
bool allow_overwrite = false);
|
87 |
+
|
88 |
+
/// \brief Add a new function options type to the registry.
|
89 |
+
///
|
90 |
+
/// \returns Status::KeyError if a function options type with the same name is already
|
91 |
+
/// registered.
|
92 |
+
Status AddFunctionOptionsType(const FunctionOptionsType* options_type,
|
93 |
+
bool allow_overwrite = false);
|
94 |
+
|
95 |
+
/// \brief Retrieve a function by name from the registry.
|
96 |
+
Result<std::shared_ptr<Function>> GetFunction(const std::string& name) const;
|
97 |
+
|
98 |
+
/// \brief Return vector of all entry names in the registry.
|
99 |
+
///
|
100 |
+
/// Helpful for displaying a manifest of available functions.
|
101 |
+
std::vector<std::string> GetFunctionNames() const;
|
102 |
+
|
103 |
+
/// \brief Retrieve a function options type by name from the registry.
|
104 |
+
Result<const FunctionOptionsType*> GetFunctionOptionsType(
|
105 |
+
const std::string& name) const;
|
106 |
+
|
107 |
+
/// \brief The number of currently registered functions.
|
108 |
+
int num_functions() const;
|
109 |
+
|
110 |
+
private:
|
111 |
+
FunctionRegistry();
|
112 |
+
|
113 |
+
// Use PIMPL pattern to not have std::unordered_map here
|
114 |
+
class FunctionRegistryImpl;
|
115 |
+
std::unique_ptr<FunctionRegistryImpl> impl_;
|
116 |
+
|
117 |
+
explicit FunctionRegistry(FunctionRegistryImpl* impl);
|
118 |
+
};
|
119 |
+
|
120 |
+
} // namespace compute
|
121 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/row/grouper.h
ADDED
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
#include <vector>
|
22 |
+
|
23 |
+
#include "arrow/compute/kernel.h"
|
24 |
+
#include "arrow/datum.h"
|
25 |
+
#include "arrow/result.h"
|
26 |
+
#include "arrow/util/visibility.h"
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
namespace compute {
|
30 |
+
|
31 |
+
/// \brief A segment
|
32 |
+
/// A segment group is a chunk of continuous rows that have the same segment key. (For
|
33 |
+
/// example, in ordered time series processing, segment key can be "date", and a segment
|
34 |
+
/// group can be all the rows that belong to the same date.) A segment group can span
|
35 |
+
/// across multiple exec batches. A segment is a chunk of continuous rows that has the
|
36 |
+
/// same segment key within a given batch. When a segment group span cross batches, it
|
37 |
+
/// will have multiple segments. A segment never spans cross batches. The segment data
|
38 |
+
/// structure only makes sense when used along with a exec batch.
|
39 |
+
struct ARROW_EXPORT Segment {
|
40 |
+
/// \brief the offset into the batch where the segment starts
|
41 |
+
int64_t offset;
|
42 |
+
/// \brief the length of the segment
|
43 |
+
int64_t length;
|
44 |
+
/// \brief whether the segment may be extended by a next one
|
45 |
+
bool is_open;
|
46 |
+
/// \brief whether the segment extends a preceeding one
|
47 |
+
bool extends;
|
48 |
+
};
|
49 |
+
|
50 |
+
inline bool operator==(const Segment& segment1, const Segment& segment2) {
|
51 |
+
return segment1.offset == segment2.offset && segment1.length == segment2.length &&
|
52 |
+
segment1.is_open == segment2.is_open && segment1.extends == segment2.extends;
|
53 |
+
}
|
54 |
+
inline bool operator!=(const Segment& segment1, const Segment& segment2) {
|
55 |
+
return !(segment1 == segment2);
|
56 |
+
}
|
57 |
+
|
58 |
+
/// \brief a helper class to divide a batch into segments of equal values
|
59 |
+
///
|
60 |
+
/// For example, given a batch with two rows:
|
61 |
+
///
|
62 |
+
/// A A
|
63 |
+
/// A A
|
64 |
+
/// A B
|
65 |
+
/// A B
|
66 |
+
/// A A
|
67 |
+
///
|
68 |
+
/// Then the batch could be divided into 3 segments. The first would be rows 0 & 1,
|
69 |
+
/// the second would be rows 2 & 3, and the third would be row 4.
|
70 |
+
///
|
71 |
+
/// Further, a segmenter keeps track of the last value seen. This allows it to calculate
|
72 |
+
/// segments which span batches. In our above example the last batch we emit would set
|
73 |
+
/// the "open" flag, which indicates whether the segment may extend into the next batch.
|
74 |
+
///
|
75 |
+
/// If the next call to the segmenter starts with `A A` then that segment would set the
|
76 |
+
/// "extends" flag, which indicates whether the segment continues the last open batch.
|
77 |
+
class ARROW_EXPORT RowSegmenter {
|
78 |
+
public:
|
79 |
+
virtual ~RowSegmenter() = default;
|
80 |
+
|
81 |
+
/// \brief Construct a Segmenter which segments on the specified key types
|
82 |
+
///
|
83 |
+
/// \param[in] key_types the specified key types
|
84 |
+
/// \param[in] nullable_keys whether values of the specified keys may be null
|
85 |
+
/// \param[in] ctx the execution context to use
|
86 |
+
static Result<std::unique_ptr<RowSegmenter>> Make(
|
87 |
+
const std::vector<TypeHolder>& key_types, bool nullable_keys, ExecContext* ctx);
|
88 |
+
|
89 |
+
/// \brief Return the key types of this segmenter
|
90 |
+
virtual const std::vector<TypeHolder>& key_types() const = 0;
|
91 |
+
|
92 |
+
/// \brief Reset this segmenter
|
93 |
+
///
|
94 |
+
/// A segmenter normally extends (see `Segment`) a segment from one batch to the next.
|
95 |
+
/// If segment-extension is undesirable, for example when each batch is processed
|
96 |
+
/// independently, then `Reset` should be invoked before processing the next batch.
|
97 |
+
virtual Status Reset() = 0;
|
98 |
+
|
99 |
+
/// \brief Get the next segment for the given batch starting from the given offset
|
100 |
+
virtual Result<Segment> GetNextSegment(const ExecSpan& batch, int64_t offset) = 0;
|
101 |
+
};
|
102 |
+
|
103 |
+
/// Consumes batches of keys and yields batches of the group ids.
|
104 |
+
class ARROW_EXPORT Grouper {
|
105 |
+
public:
|
106 |
+
virtual ~Grouper() = default;
|
107 |
+
|
108 |
+
/// Construct a Grouper which receives the specified key types
|
109 |
+
static Result<std::unique_ptr<Grouper>> Make(const std::vector<TypeHolder>& key_types,
|
110 |
+
ExecContext* ctx = default_exec_context());
|
111 |
+
|
112 |
+
/// Consume a batch of keys, producing the corresponding group ids as an integer array,
|
113 |
+
/// over a slice defined by an offset and length, which defaults to the batch length.
|
114 |
+
/// Currently only uint32 indices will be produced, eventually the bit width will only
|
115 |
+
/// be as wide as necessary.
|
116 |
+
virtual Result<Datum> Consume(const ExecSpan& batch, int64_t offset = 0,
|
117 |
+
int64_t length = -1) = 0;
|
118 |
+
|
119 |
+
/// Get current unique keys. May be called multiple times.
|
120 |
+
virtual Result<ExecBatch> GetUniques() = 0;
|
121 |
+
|
122 |
+
/// Get the current number of groups.
|
123 |
+
virtual uint32_t num_groups() const = 0;
|
124 |
+
|
125 |
+
/// \brief Assemble lists of indices of identical elements.
|
126 |
+
///
|
127 |
+
/// \param[in] ids An unsigned, all-valid integral array which will be
|
128 |
+
/// used as grouping criteria.
|
129 |
+
/// \param[in] num_groups An upper bound for the elements of ids
|
130 |
+
/// \param[in] ctx Execution context to use during the operation
|
131 |
+
/// \return A num_groups-long ListArray where the slot at i contains a
|
132 |
+
/// list of indices where i appears in ids.
|
133 |
+
///
|
134 |
+
/// MakeGroupings([
|
135 |
+
/// 2,
|
136 |
+
/// 2,
|
137 |
+
/// 5,
|
138 |
+
/// 5,
|
139 |
+
/// 2,
|
140 |
+
/// 3
|
141 |
+
/// ], 8) == [
|
142 |
+
/// [],
|
143 |
+
/// [],
|
144 |
+
/// [0, 1, 4],
|
145 |
+
/// [5],
|
146 |
+
/// [],
|
147 |
+
/// [2, 3],
|
148 |
+
/// [],
|
149 |
+
/// []
|
150 |
+
/// ]
|
151 |
+
static Result<std::shared_ptr<ListArray>> MakeGroupings(
|
152 |
+
const UInt32Array& ids, uint32_t num_groups,
|
153 |
+
ExecContext* ctx = default_exec_context());
|
154 |
+
|
155 |
+
/// \brief Produce a ListArray whose slots are selections of `array` which correspond to
|
156 |
+
/// the provided groupings.
|
157 |
+
///
|
158 |
+
/// For example,
|
159 |
+
/// ApplyGroupings([
|
160 |
+
/// [],
|
161 |
+
/// [],
|
162 |
+
/// [0, 1, 4],
|
163 |
+
/// [5],
|
164 |
+
/// [],
|
165 |
+
/// [2, 3],
|
166 |
+
/// [],
|
167 |
+
/// []
|
168 |
+
/// ], [2, 2, 5, 5, 2, 3]) == [
|
169 |
+
/// [],
|
170 |
+
/// [],
|
171 |
+
/// [2, 2, 2],
|
172 |
+
/// [3],
|
173 |
+
/// [],
|
174 |
+
/// [5, 5],
|
175 |
+
/// [],
|
176 |
+
/// []
|
177 |
+
/// ]
|
178 |
+
static Result<std::shared_ptr<ListArray>> ApplyGroupings(
|
179 |
+
const ListArray& groupings, const Array& array,
|
180 |
+
ExecContext* ctx = default_exec_context());
|
181 |
+
};
|
182 |
+
|
183 |
+
} // namespace compute
|
184 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/type_fwd.h
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include "arrow/util/visibility.h"
|
21 |
+
|
22 |
+
namespace arrow {
|
23 |
+
|
24 |
+
struct Datum;
|
25 |
+
struct TypeHolder;
|
26 |
+
|
27 |
+
namespace compute {
|
28 |
+
|
29 |
+
class Function;
|
30 |
+
class ScalarAggregateFunction;
|
31 |
+
class FunctionExecutor;
|
32 |
+
class FunctionOptions;
|
33 |
+
class FunctionRegistry;
|
34 |
+
|
35 |
+
/// \brief Return the process-global function registry.
|
36 |
+
// Defined in registry.cc
|
37 |
+
ARROW_EXPORT FunctionRegistry* GetFunctionRegistry();
|
38 |
+
|
39 |
+
class CastOptions;
|
40 |
+
|
41 |
+
struct ExecBatch;
|
42 |
+
class ExecContext;
|
43 |
+
class KernelContext;
|
44 |
+
|
45 |
+
struct Kernel;
|
46 |
+
struct ScalarKernel;
|
47 |
+
struct ScalarAggregateKernel;
|
48 |
+
struct VectorKernel;
|
49 |
+
|
50 |
+
struct KernelState;
|
51 |
+
|
52 |
+
class Expression;
|
53 |
+
|
54 |
+
ARROW_EXPORT ExecContext* default_exec_context();
|
55 |
+
ARROW_EXPORT ExecContext* threaded_exec_context();
|
56 |
+
|
57 |
+
} // namespace compute
|
58 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/compute/util.h
ADDED
@@ -0,0 +1,290 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <atomic>
|
21 |
+
#include <cstdint>
|
22 |
+
#include <optional>
|
23 |
+
#include <thread>
|
24 |
+
#include <unordered_map>
|
25 |
+
#include <vector>
|
26 |
+
|
27 |
+
#include "arrow/buffer.h"
|
28 |
+
#include "arrow/compute/expression.h"
|
29 |
+
#include "arrow/compute/type_fwd.h"
|
30 |
+
#include "arrow/memory_pool.h"
|
31 |
+
#include "arrow/result.h"
|
32 |
+
#include "arrow/status.h"
|
33 |
+
#include "arrow/util/bit_util.h"
|
34 |
+
#include "arrow/util/cpu_info.h"
|
35 |
+
#include "arrow/util/mutex.h"
|
36 |
+
#include "arrow/util/thread_pool.h"
|
37 |
+
#include "arrow/util/type_fwd.h"
|
38 |
+
|
39 |
+
#if defined(__clang__) || defined(__GNUC__)
|
40 |
+
#define BYTESWAP(x) __builtin_bswap64(x)
|
41 |
+
#define ROTL(x, n) (((x) << (n)) | ((x) >> ((-n) & 31)))
|
42 |
+
#define ROTL64(x, n) (((x) << (n)) | ((x) >> ((-n) & 63)))
|
43 |
+
#define PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
|
44 |
+
#elif defined(_MSC_VER)
|
45 |
+
#include <intrin.h>
|
46 |
+
#define BYTESWAP(x) _byteswap_uint64(x)
|
47 |
+
#define ROTL(x, n) _rotl((x), (n))
|
48 |
+
#define ROTL64(x, n) _rotl64((x), (n))
|
49 |
+
#if defined(_M_X64) || defined(_M_I86)
|
50 |
+
#include <mmintrin.h> // https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx
|
51 |
+
#define PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
|
52 |
+
#else
|
53 |
+
#define PREFETCH(ptr) (void)(ptr) /* disabled */
|
54 |
+
#endif
|
55 |
+
#endif
|
56 |
+
|
57 |
+
namespace arrow {
|
58 |
+
namespace util {
|
59 |
+
|
60 |
+
// Some platforms typedef int64_t as long int instead of long long int,
|
61 |
+
// which breaks the _mm256_i64gather_epi64 and _mm256_i32gather_epi64 intrinsics
|
62 |
+
// which need long long.
|
63 |
+
// We use the cast to the type below in these intrinsics to make the code
|
64 |
+
// compile in all cases.
|
65 |
+
//
|
66 |
+
using int64_for_gather_t = const long long int; // NOLINT runtime-int
|
67 |
+
|
68 |
+
// All MiniBatch... classes use TempVectorStack for vector allocations and can
|
69 |
+
// only work with vectors up to 1024 elements.
|
70 |
+
//
|
71 |
+
// They should only be allocated on the stack to guarantee the right sequence
|
72 |
+
// of allocation and deallocation of vectors from TempVectorStack.
|
73 |
+
//
|
74 |
+
class MiniBatch {
|
75 |
+
public:
|
76 |
+
static constexpr int kLogMiniBatchLength = 10;
|
77 |
+
static constexpr int kMiniBatchLength = 1 << kLogMiniBatchLength;
|
78 |
+
};
|
79 |
+
|
80 |
+
/// Storage used to allocate temporary vectors of a batch size.
|
81 |
+
/// Temporary vectors should resemble allocating temporary variables on the stack
|
82 |
+
/// but in the context of vectorized processing where we need to store a vector of
|
83 |
+
/// temporaries instead of a single value.
|
84 |
+
class ARROW_EXPORT TempVectorStack {
|
85 |
+
template <typename>
|
86 |
+
friend class TempVectorHolder;
|
87 |
+
|
88 |
+
public:
|
89 |
+
Status Init(MemoryPool* pool, int64_t size) {
|
90 |
+
num_vectors_ = 0;
|
91 |
+
top_ = 0;
|
92 |
+
buffer_size_ = PaddedAllocationSize(size) + kPadding + 2 * sizeof(uint64_t);
|
93 |
+
ARROW_ASSIGN_OR_RAISE(auto buffer, AllocateResizableBuffer(size, pool));
|
94 |
+
// Ensure later operations don't accidentally read uninitialized memory.
|
95 |
+
std::memset(buffer->mutable_data(), 0xFF, size);
|
96 |
+
buffer_ = std::move(buffer);
|
97 |
+
return Status::OK();
|
98 |
+
}
|
99 |
+
|
100 |
+
private:
|
101 |
+
int64_t PaddedAllocationSize(int64_t num_bytes) {
|
102 |
+
// Round up allocation size to multiple of 8 bytes
|
103 |
+
// to avoid returning temp vectors with unaligned address.
|
104 |
+
//
|
105 |
+
// Also add padding at the end to facilitate loads and stores
|
106 |
+
// using SIMD when number of vector elements is not divisible
|
107 |
+
// by the number of SIMD lanes.
|
108 |
+
//
|
109 |
+
return ::arrow::bit_util::RoundUp(num_bytes, sizeof(int64_t)) + kPadding;
|
110 |
+
}
|
111 |
+
void alloc(uint32_t num_bytes, uint8_t** data, int* id);
|
112 |
+
void release(int id, uint32_t num_bytes);
|
113 |
+
static constexpr uint64_t kGuard1 = 0x3141592653589793ULL;
|
114 |
+
static constexpr uint64_t kGuard2 = 0x0577215664901532ULL;
|
115 |
+
static constexpr int64_t kPadding = 64;
|
116 |
+
int num_vectors_;
|
117 |
+
int64_t top_;
|
118 |
+
std::unique_ptr<Buffer> buffer_;
|
119 |
+
int64_t buffer_size_;
|
120 |
+
};
|
121 |
+
|
122 |
+
template <typename T>
|
123 |
+
class TempVectorHolder {
|
124 |
+
friend class TempVectorStack;
|
125 |
+
|
126 |
+
public:
|
127 |
+
~TempVectorHolder() { stack_->release(id_, num_elements_ * sizeof(T)); }
|
128 |
+
T* mutable_data() { return reinterpret_cast<T*>(data_); }
|
129 |
+
TempVectorHolder(TempVectorStack* stack, uint32_t num_elements) {
|
130 |
+
stack_ = stack;
|
131 |
+
num_elements_ = num_elements;
|
132 |
+
stack_->alloc(num_elements * sizeof(T), &data_, &id_);
|
133 |
+
}
|
134 |
+
|
135 |
+
private:
|
136 |
+
TempVectorStack* stack_;
|
137 |
+
uint8_t* data_;
|
138 |
+
int id_;
|
139 |
+
uint32_t num_elements_;
|
140 |
+
};
|
141 |
+
|
142 |
+
namespace bit_util {
|
143 |
+
|
144 |
+
ARROW_EXPORT void bits_to_indexes(int bit_to_search, int64_t hardware_flags,
|
145 |
+
const int num_bits, const uint8_t* bits,
|
146 |
+
int* num_indexes, uint16_t* indexes,
|
147 |
+
int bit_offset = 0);
|
148 |
+
|
149 |
+
ARROW_EXPORT void bits_filter_indexes(int bit_to_search, int64_t hardware_flags,
|
150 |
+
const int num_bits, const uint8_t* bits,
|
151 |
+
const uint16_t* input_indexes, int* num_indexes,
|
152 |
+
uint16_t* indexes, int bit_offset = 0);
|
153 |
+
|
154 |
+
// Input and output indexes may be pointing to the same data (in-place filtering).
|
155 |
+
ARROW_EXPORT void bits_split_indexes(int64_t hardware_flags, const int num_bits,
|
156 |
+
const uint8_t* bits, int* num_indexes_bit0,
|
157 |
+
uint16_t* indexes_bit0, uint16_t* indexes_bit1,
|
158 |
+
int bit_offset = 0);
|
159 |
+
|
160 |
+
// Bit 1 is replaced with byte 0xFF.
|
161 |
+
ARROW_EXPORT void bits_to_bytes(int64_t hardware_flags, const int num_bits,
|
162 |
+
const uint8_t* bits, uint8_t* bytes, int bit_offset = 0);
|
163 |
+
|
164 |
+
// Return highest bit of each byte.
|
165 |
+
ARROW_EXPORT void bytes_to_bits(int64_t hardware_flags, const int num_bits,
|
166 |
+
const uint8_t* bytes, uint8_t* bits, int bit_offset = 0);
|
167 |
+
|
168 |
+
ARROW_EXPORT bool are_all_bytes_zero(int64_t hardware_flags, const uint8_t* bytes,
|
169 |
+
uint32_t num_bytes);
|
170 |
+
|
171 |
+
#if defined(ARROW_HAVE_RUNTIME_AVX2) && defined(ARROW_HAVE_RUNTIME_BMI2)
|
172 |
+
// The functions below use BMI2 instructions, be careful before calling!
|
173 |
+
|
174 |
+
namespace avx2 {
|
175 |
+
ARROW_EXPORT void bits_filter_indexes_avx2(int bit_to_search, const int num_bits,
|
176 |
+
const uint8_t* bits,
|
177 |
+
const uint16_t* input_indexes,
|
178 |
+
int* num_indexes, uint16_t* indexes);
|
179 |
+
ARROW_EXPORT void bits_to_indexes_avx2(int bit_to_search, const int num_bits,
|
180 |
+
const uint8_t* bits, int* num_indexes,
|
181 |
+
uint16_t* indexes, uint16_t base_index = 0);
|
182 |
+
ARROW_EXPORT void bits_to_bytes_avx2(const int num_bits, const uint8_t* bits,
|
183 |
+
uint8_t* bytes);
|
184 |
+
ARROW_EXPORT void bytes_to_bits_avx2(const int num_bits, const uint8_t* bytes,
|
185 |
+
uint8_t* bits);
|
186 |
+
ARROW_EXPORT bool are_all_bytes_zero_avx2(const uint8_t* bytes, uint32_t num_bytes);
|
187 |
+
} // namespace avx2
|
188 |
+
|
189 |
+
#endif
|
190 |
+
|
191 |
+
} // namespace bit_util
|
192 |
+
} // namespace util
|
193 |
+
|
194 |
+
namespace compute {
|
195 |
+
|
196 |
+
/// Modify an Expression with pre-order and post-order visitation.
|
197 |
+
/// `pre` will be invoked on each Expression. `pre` will visit Calls before their
|
198 |
+
/// arguments, `post_call` will visit Calls (and no other Expressions) after their
|
199 |
+
/// arguments. Visitors should return the Identical expression to indicate no change; this
|
200 |
+
/// will prevent unnecessary construction in the common case where a modification is not
|
201 |
+
/// possible/necessary/...
|
202 |
+
///
|
203 |
+
/// If an argument was modified, `post_call` visits a reconstructed Call with the modified
|
204 |
+
/// arguments but also receives a pointer to the unmodified Expression as a second
|
205 |
+
/// argument. If no arguments were modified the unmodified Expression* will be nullptr.
|
206 |
+
template <typename PreVisit, typename PostVisitCall>
|
207 |
+
Result<Expression> ModifyExpression(Expression expr, const PreVisit& pre,
|
208 |
+
const PostVisitCall& post_call) {
|
209 |
+
ARROW_ASSIGN_OR_RAISE(expr, Result<Expression>(pre(std::move(expr))));
|
210 |
+
|
211 |
+
auto call = expr.call();
|
212 |
+
if (!call) return expr;
|
213 |
+
|
214 |
+
bool at_least_one_modified = false;
|
215 |
+
std::vector<Expression> modified_arguments;
|
216 |
+
|
217 |
+
for (size_t i = 0; i < call->arguments.size(); ++i) {
|
218 |
+
ARROW_ASSIGN_OR_RAISE(auto modified_argument,
|
219 |
+
ModifyExpression(call->arguments[i], pre, post_call));
|
220 |
+
|
221 |
+
if (Identical(modified_argument, call->arguments[i])) {
|
222 |
+
continue;
|
223 |
+
}
|
224 |
+
|
225 |
+
if (!at_least_one_modified) {
|
226 |
+
modified_arguments = call->arguments;
|
227 |
+
at_least_one_modified = true;
|
228 |
+
}
|
229 |
+
|
230 |
+
modified_arguments[i] = std::move(modified_argument);
|
231 |
+
}
|
232 |
+
|
233 |
+
if (at_least_one_modified) {
|
234 |
+
// reconstruct the call expression with the modified arguments
|
235 |
+
auto modified_call = *call;
|
236 |
+
modified_call.arguments = std::move(modified_arguments);
|
237 |
+
return post_call(Expression(std::move(modified_call)), &expr);
|
238 |
+
}
|
239 |
+
|
240 |
+
return post_call(std::move(expr), NULLPTR);
|
241 |
+
}
|
242 |
+
|
243 |
+
// Helper class to calculate the modified number of rows to process using SIMD.
|
244 |
+
//
|
245 |
+
// Some array elements at the end will be skipped in order to avoid buffer
|
246 |
+
// overrun, when doing memory loads and stores using larger word size than a
|
247 |
+
// single array element.
|
248 |
+
//
|
249 |
+
class TailSkipForSIMD {
|
250 |
+
public:
|
251 |
+
static int64_t FixBitAccess(int num_bytes_accessed_together, int64_t num_rows,
|
252 |
+
int bit_offset) {
|
253 |
+
int64_t num_bytes = bit_util::BytesForBits(num_rows + bit_offset);
|
254 |
+
int64_t num_bytes_safe =
|
255 |
+
std::max(static_cast<int64_t>(0LL), num_bytes - num_bytes_accessed_together + 1);
|
256 |
+
int64_t num_rows_safe =
|
257 |
+
std::max(static_cast<int64_t>(0LL), 8 * num_bytes_safe - bit_offset);
|
258 |
+
return std::min(num_rows_safe, num_rows);
|
259 |
+
}
|
260 |
+
static int64_t FixBinaryAccess(int num_bytes_accessed_together, int64_t num_rows,
|
261 |
+
int64_t length) {
|
262 |
+
int64_t num_rows_to_skip = bit_util::CeilDiv(length, num_bytes_accessed_together);
|
263 |
+
int64_t num_rows_safe =
|
264 |
+
std::max(static_cast<int64_t>(0LL), num_rows - num_rows_to_skip);
|
265 |
+
return num_rows_safe;
|
266 |
+
}
|
267 |
+
static int64_t FixVarBinaryAccess(int num_bytes_accessed_together, int64_t num_rows,
|
268 |
+
const uint32_t* offsets) {
|
269 |
+
// Do not process rows that could read past the end of the buffer using N
|
270 |
+
// byte loads/stores.
|
271 |
+
//
|
272 |
+
int64_t num_rows_safe = num_rows;
|
273 |
+
while (num_rows_safe > 0 &&
|
274 |
+
offsets[num_rows_safe] + num_bytes_accessed_together > offsets[num_rows]) {
|
275 |
+
--num_rows_safe;
|
276 |
+
}
|
277 |
+
return num_rows_safe;
|
278 |
+
}
|
279 |
+
static int FixSelection(int64_t num_rows_safe, int num_selected,
|
280 |
+
const uint16_t* selection) {
|
281 |
+
int num_selected_safe = num_selected;
|
282 |
+
while (num_selected_safe > 0 && selection[num_selected_safe - 1] >= num_rows_safe) {
|
283 |
+
--num_selected_safe;
|
284 |
+
}
|
285 |
+
return num_selected_safe;
|
286 |
+
}
|
287 |
+
};
|
288 |
+
|
289 |
+
} // namespace compute
|
290 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/discovery.h
ADDED
@@ -0,0 +1,275 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
/// Logic for automatically determining the structure of multi-file
|
19 |
+
/// dataset with possible partitioning according to available
|
20 |
+
/// partitioning
|
21 |
+
|
22 |
+
// This API is EXPERIMENTAL.
|
23 |
+
|
24 |
+
#pragma once
|
25 |
+
|
26 |
+
#include <memory>
|
27 |
+
#include <string>
|
28 |
+
#include <variant>
|
29 |
+
#include <vector>
|
30 |
+
|
31 |
+
#include "arrow/dataset/partition.h"
|
32 |
+
#include "arrow/dataset/type_fwd.h"
|
33 |
+
#include "arrow/dataset/visibility.h"
|
34 |
+
#include "arrow/filesystem/type_fwd.h"
|
35 |
+
#include "arrow/result.h"
|
36 |
+
#include "arrow/util/macros.h"
|
37 |
+
|
38 |
+
namespace arrow {
|
39 |
+
namespace dataset {
|
40 |
+
|
41 |
+
/// \defgroup dataset-discovery Discovery API
|
42 |
+
///
|
43 |
+
/// @{
|
44 |
+
|
45 |
+
struct InspectOptions {
|
46 |
+
/// See `fragments` property.
|
47 |
+
static constexpr int kInspectAllFragments = -1;
|
48 |
+
|
49 |
+
/// Indicate how many fragments should be inspected to infer the unified dataset
|
50 |
+
/// schema. Limiting the number of fragments accessed improves the latency of
|
51 |
+
/// the discovery process when dealing with a high number of fragments and/or
|
52 |
+
/// high latency file systems.
|
53 |
+
///
|
54 |
+
/// The default value of `1` inspects the schema of the first (in no particular
|
55 |
+
/// order) fragment only. If the dataset has a uniform schema for all fragments,
|
56 |
+
/// this default is the optimal value. In order to inspect all fragments and
|
57 |
+
/// robustly unify their potentially varying schemas, set this option to
|
58 |
+
/// `kInspectAllFragments`. A value of `0` disables inspection of fragments
|
59 |
+
/// altogether so only the partitioning schema will be inspected.
|
60 |
+
int fragments = 1;
|
61 |
+
|
62 |
+
/// Control how to unify types. By default, types are merged strictly (the
|
63 |
+
/// type must match exactly, except nulls can be merged with other types).
|
64 |
+
Field::MergeOptions field_merge_options = Field::MergeOptions::Defaults();
|
65 |
+
};
|
66 |
+
|
67 |
+
struct FinishOptions {
|
68 |
+
/// Finalize the dataset with this given schema. If the schema is not
|
69 |
+
/// provided, infer the schema via the Inspect, see the `inspect_options`
|
70 |
+
/// property.
|
71 |
+
std::shared_ptr<Schema> schema = NULLPTR;
|
72 |
+
|
73 |
+
/// If the schema is not provided, it will be discovered by passing the
|
74 |
+
/// following options to `DatasetDiscovery::Inspect`.
|
75 |
+
InspectOptions inspect_options{};
|
76 |
+
|
77 |
+
/// Indicate if the given Schema (when specified), should be validated against
|
78 |
+
/// the fragments' schemas. `inspect_options` will control how many fragments
|
79 |
+
/// are checked.
|
80 |
+
bool validate_fragments = false;
|
81 |
+
};
|
82 |
+
|
83 |
+
/// \brief DatasetFactory provides a way to inspect/discover a Dataset's expected
|
84 |
+
/// schema before materializing said Dataset.
|
85 |
+
class ARROW_DS_EXPORT DatasetFactory {
|
86 |
+
public:
|
87 |
+
/// \brief Get the schemas of the Fragments and Partitioning.
|
88 |
+
virtual Result<std::vector<std::shared_ptr<Schema>>> InspectSchemas(
|
89 |
+
InspectOptions options) = 0;
|
90 |
+
|
91 |
+
/// \brief Get unified schema for the resulting Dataset.
|
92 |
+
Result<std::shared_ptr<Schema>> Inspect(InspectOptions options = {});
|
93 |
+
|
94 |
+
/// \brief Create a Dataset
|
95 |
+
Result<std::shared_ptr<Dataset>> Finish();
|
96 |
+
/// \brief Create a Dataset with the given schema (see \a InspectOptions::schema)
|
97 |
+
Result<std::shared_ptr<Dataset>> Finish(std::shared_ptr<Schema> schema);
|
98 |
+
/// \brief Create a Dataset with the given options
|
99 |
+
virtual Result<std::shared_ptr<Dataset>> Finish(FinishOptions options) = 0;
|
100 |
+
|
101 |
+
/// \brief Optional root partition for the resulting Dataset.
|
102 |
+
const compute::Expression& root_partition() const { return root_partition_; }
|
103 |
+
/// \brief Set the root partition for the resulting Dataset.
|
104 |
+
Status SetRootPartition(compute::Expression partition) {
|
105 |
+
root_partition_ = std::move(partition);
|
106 |
+
return Status::OK();
|
107 |
+
}
|
108 |
+
|
109 |
+
virtual ~DatasetFactory() = default;
|
110 |
+
|
111 |
+
protected:
|
112 |
+
DatasetFactory();
|
113 |
+
|
114 |
+
compute::Expression root_partition_;
|
115 |
+
};
|
116 |
+
|
117 |
+
/// @}
|
118 |
+
|
119 |
+
/// \brief DatasetFactory provides a way to inspect/discover a Dataset's
|
120 |
+
/// expected schema before materialization.
|
121 |
+
/// \ingroup dataset-implementations
|
122 |
+
class ARROW_DS_EXPORT UnionDatasetFactory : public DatasetFactory {
|
123 |
+
public:
|
124 |
+
static Result<std::shared_ptr<DatasetFactory>> Make(
|
125 |
+
std::vector<std::shared_ptr<DatasetFactory>> factories);
|
126 |
+
|
127 |
+
/// \brief Return the list of child DatasetFactory
|
128 |
+
const std::vector<std::shared_ptr<DatasetFactory>>& factories() const {
|
129 |
+
return factories_;
|
130 |
+
}
|
131 |
+
|
132 |
+
/// \brief Get the schemas of the Datasets.
|
133 |
+
///
|
134 |
+
/// Instead of applying options globally, it applies at each child factory.
|
135 |
+
/// This will not respect `options.fragments` exactly, but will respect the
|
136 |
+
/// spirit of peeking the first fragments or all of them.
|
137 |
+
Result<std::vector<std::shared_ptr<Schema>>> InspectSchemas(
|
138 |
+
InspectOptions options) override;
|
139 |
+
|
140 |
+
/// \brief Create a Dataset.
|
141 |
+
Result<std::shared_ptr<Dataset>> Finish(FinishOptions options) override;
|
142 |
+
|
143 |
+
protected:
|
144 |
+
explicit UnionDatasetFactory(std::vector<std::shared_ptr<DatasetFactory>> factories);
|
145 |
+
|
146 |
+
std::vector<std::shared_ptr<DatasetFactory>> factories_;
|
147 |
+
};
|
148 |
+
|
149 |
+
/// \ingroup dataset-filesystem
|
150 |
+
struct FileSystemFactoryOptions {
|
151 |
+
/// Either an explicit Partitioning or a PartitioningFactory to discover one.
|
152 |
+
///
|
153 |
+
/// If a factory is provided, it will be used to infer a schema for partition fields
|
154 |
+
/// based on file and directory paths then construct a Partitioning. The default
|
155 |
+
/// is a Partitioning which will yield no partition information.
|
156 |
+
///
|
157 |
+
/// The (explicit or discovered) partitioning will be applied to discovered files
|
158 |
+
/// and the resulting partition information embedded in the Dataset.
|
159 |
+
PartitioningOrFactory partitioning{Partitioning::Default()};
|
160 |
+
|
161 |
+
/// For the purposes of applying the partitioning, paths will be stripped
|
162 |
+
/// of the partition_base_dir. Files not matching the partition_base_dir
|
163 |
+
/// prefix will be skipped for partition discovery. The ignored files will still
|
164 |
+
/// be part of the Dataset, but will not have partition information.
|
165 |
+
///
|
166 |
+
/// Example:
|
167 |
+
/// partition_base_dir = "/dataset";
|
168 |
+
///
|
169 |
+
/// - "/dataset/US/sales.csv" -> "US/sales.csv" will be given to the partitioning
|
170 |
+
///
|
171 |
+
/// - "/home/john/late_sales.csv" -> Will be ignored for partition discovery.
|
172 |
+
///
|
173 |
+
/// This is useful for partitioning which parses directory when ordering
|
174 |
+
/// is important, e.g. DirectoryPartitioning.
|
175 |
+
std::string partition_base_dir;
|
176 |
+
|
177 |
+
/// Invalid files (via selector or explicitly) will be excluded by checking
|
178 |
+
/// with the FileFormat::IsSupported method. This will incur IO for each files
|
179 |
+
/// in a serial and single threaded fashion. Disabling this feature will skip the
|
180 |
+
/// IO, but unsupported files may be present in the Dataset
|
181 |
+
/// (resulting in an error at scan time).
|
182 |
+
bool exclude_invalid_files = false;
|
183 |
+
|
184 |
+
/// When discovering from a Selector (and not from an explicit file list), ignore
|
185 |
+
/// files and directories matching any of these prefixes.
|
186 |
+
///
|
187 |
+
/// Example (with selector = "/dataset/**"):
|
188 |
+
/// selector_ignore_prefixes = {"_", ".DS_STORE" };
|
189 |
+
///
|
190 |
+
/// - "/dataset/data.csv" -> not ignored
|
191 |
+
/// - "/dataset/_metadata" -> ignored
|
192 |
+
/// - "/dataset/.DS_STORE" -> ignored
|
193 |
+
/// - "/dataset/_hidden/dat" -> ignored
|
194 |
+
/// - "/dataset/nested/.DS_STORE" -> ignored
|
195 |
+
std::vector<std::string> selector_ignore_prefixes = {
|
196 |
+
".",
|
197 |
+
"_",
|
198 |
+
};
|
199 |
+
};
|
200 |
+
|
201 |
+
/// \brief FileSystemDatasetFactory creates a Dataset from a vector of
|
202 |
+
/// fs::FileInfo or a fs::FileSelector.
|
203 |
+
/// \ingroup dataset-filesystem
|
204 |
+
class ARROW_DS_EXPORT FileSystemDatasetFactory : public DatasetFactory {
|
205 |
+
public:
|
206 |
+
/// \brief Build a FileSystemDatasetFactory from an explicit list of
|
207 |
+
/// paths.
|
208 |
+
///
|
209 |
+
/// \param[in] filesystem passed to FileSystemDataset
|
210 |
+
/// \param[in] paths passed to FileSystemDataset
|
211 |
+
/// \param[in] format passed to FileSystemDataset
|
212 |
+
/// \param[in] options see FileSystemFactoryOptions for more information.
|
213 |
+
static Result<std::shared_ptr<DatasetFactory>> Make(
|
214 |
+
std::shared_ptr<fs::FileSystem> filesystem, const std::vector<std::string>& paths,
|
215 |
+
std::shared_ptr<FileFormat> format, FileSystemFactoryOptions options);
|
216 |
+
|
217 |
+
/// \brief Build a FileSystemDatasetFactory from a fs::FileSelector.
|
218 |
+
///
|
219 |
+
/// The selector will expand to a vector of FileInfo. The expansion/crawling
|
220 |
+
/// is performed in this function call. Thus, the finalized Dataset is
|
221 |
+
/// working with a snapshot of the filesystem.
|
222 |
+
//
|
223 |
+
/// If options.partition_base_dir is not provided, it will be overwritten
|
224 |
+
/// with selector.base_dir.
|
225 |
+
///
|
226 |
+
/// \param[in] filesystem passed to FileSystemDataset
|
227 |
+
/// \param[in] selector used to crawl and search files
|
228 |
+
/// \param[in] format passed to FileSystemDataset
|
229 |
+
/// \param[in] options see FileSystemFactoryOptions for more information.
|
230 |
+
static Result<std::shared_ptr<DatasetFactory>> Make(
|
231 |
+
std::shared_ptr<fs::FileSystem> filesystem, fs::FileSelector selector,
|
232 |
+
std::shared_ptr<FileFormat> format, FileSystemFactoryOptions options);
|
233 |
+
|
234 |
+
/// \brief Build a FileSystemDatasetFactory from an uri including filesystem
|
235 |
+
/// information.
|
236 |
+
///
|
237 |
+
/// \param[in] uri passed to FileSystemDataset
|
238 |
+
/// \param[in] format passed to FileSystemDataset
|
239 |
+
/// \param[in] options see FileSystemFactoryOptions for more information.
|
240 |
+
static Result<std::shared_ptr<DatasetFactory>> Make(std::string uri,
|
241 |
+
std::shared_ptr<FileFormat> format,
|
242 |
+
FileSystemFactoryOptions options);
|
243 |
+
|
244 |
+
/// \brief Build a FileSystemDatasetFactory from an explicit list of
|
245 |
+
/// file information.
|
246 |
+
///
|
247 |
+
/// \param[in] filesystem passed to FileSystemDataset
|
248 |
+
/// \param[in] files passed to FileSystemDataset
|
249 |
+
/// \param[in] format passed to FileSystemDataset
|
250 |
+
/// \param[in] options see FileSystemFactoryOptions for more information.
|
251 |
+
static Result<std::shared_ptr<DatasetFactory>> Make(
|
252 |
+
std::shared_ptr<fs::FileSystem> filesystem, const std::vector<fs::FileInfo>& files,
|
253 |
+
std::shared_ptr<FileFormat> format, FileSystemFactoryOptions options);
|
254 |
+
|
255 |
+
Result<std::vector<std::shared_ptr<Schema>>> InspectSchemas(
|
256 |
+
InspectOptions options) override;
|
257 |
+
|
258 |
+
Result<std::shared_ptr<Dataset>> Finish(FinishOptions options) override;
|
259 |
+
|
260 |
+
protected:
|
261 |
+
FileSystemDatasetFactory(std::vector<fs::FileInfo> files,
|
262 |
+
std::shared_ptr<fs::FileSystem> filesystem,
|
263 |
+
std::shared_ptr<FileFormat> format,
|
264 |
+
FileSystemFactoryOptions options);
|
265 |
+
|
266 |
+
Result<std::shared_ptr<Schema>> PartitionSchema();
|
267 |
+
|
268 |
+
std::vector<fs::FileInfo> files_;
|
269 |
+
std::shared_ptr<fs::FileSystem> fs_;
|
270 |
+
std::shared_ptr<FileFormat> format_;
|
271 |
+
FileSystemFactoryOptions options_;
|
272 |
+
};
|
273 |
+
|
274 |
+
} // namespace dataset
|
275 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/api.h
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include "arrow/util/config.h" // IWYU pragma: export
|
21 |
+
|
22 |
+
#include "arrow/filesystem/filesystem.h" // IWYU pragma: export
|
23 |
+
#include "arrow/filesystem/hdfs.h" // IWYU pragma: export
|
24 |
+
#ifdef ARROW_GCS
|
25 |
+
#include "arrow/filesystem/gcsfs.h" // IWYU pragma: export
|
26 |
+
#endif
|
27 |
+
#include "arrow/filesystem/localfs.h" // IWYU pragma: export
|
28 |
+
#include "arrow/filesystem/mockfs.h" // IWYU pragma: export
|
29 |
+
#ifdef ARROW_S3
|
30 |
+
#include "arrow/filesystem/s3fs.h" // IWYU pragma: export
|
31 |
+
#endif
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/azurefs.h
ADDED
@@ -0,0 +1,237 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
#include <string>
|
22 |
+
#include <vector>
|
23 |
+
|
24 |
+
#include "arrow/filesystem/filesystem.h"
|
25 |
+
#include "arrow/util/macros.h"
|
26 |
+
#include "arrow/util/uri.h"
|
27 |
+
|
28 |
+
namespace Azure::Core::Credentials {
|
29 |
+
class TokenCredential;
|
30 |
+
}
|
31 |
+
|
32 |
+
namespace Azure::Storage {
|
33 |
+
class StorageSharedKeyCredential;
|
34 |
+
}
|
35 |
+
|
36 |
+
namespace Azure::Storage::Blobs {
|
37 |
+
class BlobServiceClient;
|
38 |
+
}
|
39 |
+
|
40 |
+
namespace Azure::Storage::Files::DataLake {
|
41 |
+
class DataLakeFileSystemClient;
|
42 |
+
class DataLakeServiceClient;
|
43 |
+
} // namespace Azure::Storage::Files::DataLake
|
44 |
+
|
45 |
+
namespace arrow::fs {
|
46 |
+
|
47 |
+
class TestAzureFileSystem;
|
48 |
+
|
49 |
+
/// Options for the AzureFileSystem implementation.
|
50 |
+
///
|
51 |
+
/// By default, authentication is handled by the Azure SDK's credential chain
|
52 |
+
/// which may read from multiple environment variables, such as:
|
53 |
+
/// - `AZURE_TENANT_ID`
|
54 |
+
/// - `AZURE_CLIENT_ID`
|
55 |
+
/// - `AZURE_CLIENT_SECRET`
|
56 |
+
/// - `AZURE_AUTHORITY_HOST`
|
57 |
+
/// - `AZURE_CLIENT_CERTIFICATE_PATH`
|
58 |
+
/// - `AZURE_FEDERATED_TOKEN_FILE`
|
59 |
+
///
|
60 |
+
/// Functions are provided for explicit configuration of credentials if that is preferred.
|
61 |
+
struct ARROW_EXPORT AzureOptions {
|
62 |
+
/// \brief The name of the Azure Storage Account being accessed.
|
63 |
+
///
|
64 |
+
/// All service URLs will be constructed using this storage account name.
|
65 |
+
/// `ConfigureAccountKeyCredential` assumes the user wants to authenticate
|
66 |
+
/// this account.
|
67 |
+
std::string account_name;
|
68 |
+
|
69 |
+
/// \brief hostname[:port] of the Azure Blob Storage Service.
|
70 |
+
///
|
71 |
+
/// If the hostname is a relative domain name (one that starts with a '.'), then storage
|
72 |
+
/// account URLs will be constructed by prepending the account name to the hostname.
|
73 |
+
/// If the hostname is a fully qualified domain name, then the hostname will be used
|
74 |
+
/// as-is and the account name will follow the hostname in the URL path.
|
75 |
+
///
|
76 |
+
/// Default: ".blob.core.windows.net"
|
77 |
+
std::string blob_storage_authority = ".blob.core.windows.net";
|
78 |
+
|
79 |
+
/// \brief hostname[:port] of the Azure Data Lake Storage Gen 2 Service.
|
80 |
+
///
|
81 |
+
/// If the hostname is a relative domain name (one that starts with a '.'), then storage
|
82 |
+
/// account URLs will be constructed by prepending the account name to the hostname.
|
83 |
+
/// If the hostname is a fully qualified domain name, then the hostname will be used
|
84 |
+
/// as-is and the account name will follow the hostname in the URL path.
|
85 |
+
///
|
86 |
+
/// Default: ".dfs.core.windows.net"
|
87 |
+
std::string dfs_storage_authority = ".dfs.core.windows.net";
|
88 |
+
|
89 |
+
/// \brief Azure Blob Storage connection transport.
|
90 |
+
///
|
91 |
+
/// Default: "https"
|
92 |
+
std::string blob_storage_scheme = "https";
|
93 |
+
|
94 |
+
/// \brief Azure Data Lake Storage Gen 2 connection transport.
|
95 |
+
///
|
96 |
+
/// Default: "https"
|
97 |
+
std::string dfs_storage_scheme = "https";
|
98 |
+
|
99 |
+
// TODO(GH-38598): Add support for more auth methods.
|
100 |
+
// std::string connection_string;
|
101 |
+
// std::string sas_token;
|
102 |
+
|
103 |
+
/// \brief Default metadata for OpenOutputStream.
|
104 |
+
///
|
105 |
+
/// This will be ignored if non-empty metadata is passed to OpenOutputStream.
|
106 |
+
std::shared_ptr<const KeyValueMetadata> default_metadata;
|
107 |
+
|
108 |
+
private:
|
109 |
+
enum class CredentialKind {
|
110 |
+
kDefault,
|
111 |
+
kAnonymous,
|
112 |
+
kStorageSharedKey,
|
113 |
+
kClientSecret,
|
114 |
+
kManagedIdentity,
|
115 |
+
kWorkloadIdentity,
|
116 |
+
} credential_kind_ = CredentialKind::kDefault;
|
117 |
+
|
118 |
+
std::shared_ptr<Azure::Storage::StorageSharedKeyCredential>
|
119 |
+
storage_shared_key_credential_;
|
120 |
+
mutable std::shared_ptr<Azure::Core::Credentials::TokenCredential> token_credential_;
|
121 |
+
|
122 |
+
public:
|
123 |
+
AzureOptions();
|
124 |
+
~AzureOptions();
|
125 |
+
|
126 |
+
Status ConfigureDefaultCredential();
|
127 |
+
Status ConfigureAnonymousCredential();
|
128 |
+
Status ConfigureAccountKeyCredential(const std::string& account_key);
|
129 |
+
Status ConfigureClientSecretCredential(const std::string& tenant_id,
|
130 |
+
const std::string& client_id,
|
131 |
+
const std::string& client_secret);
|
132 |
+
Status ConfigureManagedIdentityCredential(const std::string& client_id = std::string());
|
133 |
+
Status ConfigureWorkloadIdentityCredential();
|
134 |
+
|
135 |
+
bool Equals(const AzureOptions& other) const;
|
136 |
+
|
137 |
+
std::string AccountBlobUrl(const std::string& account_name) const;
|
138 |
+
std::string AccountDfsUrl(const std::string& account_name) const;
|
139 |
+
|
140 |
+
Result<std::unique_ptr<Azure::Storage::Blobs::BlobServiceClient>>
|
141 |
+
MakeBlobServiceClient() const;
|
142 |
+
|
143 |
+
Result<std::unique_ptr<Azure::Storage::Files::DataLake::DataLakeServiceClient>>
|
144 |
+
MakeDataLakeServiceClient() const;
|
145 |
+
};
|
146 |
+
|
147 |
+
/// \brief FileSystem implementation backed by Azure Blob Storage (ABS) [1] and
|
148 |
+
/// Azure Data Lake Storage Gen2 (ADLS Gen2) [2].
|
149 |
+
///
|
150 |
+
/// ADLS Gen2 isn't a dedicated service or account type. It's a set of capabilities that
|
151 |
+
/// support high throughput analytic workloads, built on Azure Blob Storage. All the data
|
152 |
+
/// ingested via the ADLS Gen2 APIs is persisted as blobs in the storage account.
|
153 |
+
/// ADLS Gen2 provides filesystem semantics, file-level security, and Hadoop
|
154 |
+
/// compatibility. ADLS Gen1 exists as a separate object that will retired on 2024-02-29
|
155 |
+
/// and new ADLS accounts use Gen2 instead.
|
156 |
+
///
|
157 |
+
/// ADLS Gen2 and Blob APIs can operate on the same data, but there are
|
158 |
+
/// some limitations [3]. The ones that are relevant to this
|
159 |
+
/// implementation are listed here:
|
160 |
+
///
|
161 |
+
/// - You can't use Blob APIs, and ADLS APIs to write to the same instance of a file. If
|
162 |
+
/// you write to a file by using ADLS APIs then that file's blocks won't be visible
|
163 |
+
/// to calls to the GetBlockList Blob API. The only exception is when you're
|
164 |
+
/// overwriting.
|
165 |
+
/// - When you use the ListBlobs operation without specifying a delimiter, the results
|
166 |
+
/// include both directories and blobs. If you choose to use a delimiter, use only a
|
167 |
+
/// forward slash (/) -- the only supported delimiter.
|
168 |
+
/// - If you use the DeleteBlob API to delete a directory, that directory is deleted only
|
169 |
+
/// if it's empty. This means that you can't use the Blob API delete directories
|
170 |
+
/// recursively.
|
171 |
+
///
|
172 |
+
/// [1]: https://azure.microsoft.com/en-us/products/storage/blobs
|
173 |
+
/// [2]: https://azure.microsoft.com/en-us/products/storage/data-lake-storage
|
174 |
+
/// [3]:
|
175 |
+
/// https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-known-issues
|
176 |
+
class ARROW_EXPORT AzureFileSystem : public FileSystem {
|
177 |
+
private:
|
178 |
+
class Impl;
|
179 |
+
std::unique_ptr<Impl> impl_;
|
180 |
+
|
181 |
+
explicit AzureFileSystem(std::unique_ptr<Impl>&& impl);
|
182 |
+
|
183 |
+
friend class TestAzureFileSystem;
|
184 |
+
void ForceCachedHierarchicalNamespaceSupport(int hns_support);
|
185 |
+
|
186 |
+
public:
|
187 |
+
~AzureFileSystem() override = default;
|
188 |
+
|
189 |
+
static Result<std::shared_ptr<AzureFileSystem>> Make(
|
190 |
+
const AzureOptions& options, const io::IOContext& = io::default_io_context());
|
191 |
+
|
192 |
+
std::string type_name() const override { return "abfs"; }
|
193 |
+
|
194 |
+
/// Return the original Azure options when constructing the filesystem
|
195 |
+
const AzureOptions& options() const;
|
196 |
+
|
197 |
+
bool Equals(const FileSystem& other) const override;
|
198 |
+
|
199 |
+
Result<FileInfo> GetFileInfo(const std::string& path) override;
|
200 |
+
|
201 |
+
Result<FileInfoVector> GetFileInfo(const FileSelector& select) override;
|
202 |
+
|
203 |
+
Status CreateDir(const std::string& path, bool recursive = true) override;
|
204 |
+
|
205 |
+
Status DeleteDir(const std::string& path) override;
|
206 |
+
|
207 |
+
Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override;
|
208 |
+
|
209 |
+
Status DeleteRootDirContents() override;
|
210 |
+
|
211 |
+
Status DeleteFile(const std::string& path) override;
|
212 |
+
|
213 |
+
Status Move(const std::string& src, const std::string& dest) override;
|
214 |
+
|
215 |
+
Status CopyFile(const std::string& src, const std::string& dest) override;
|
216 |
+
|
217 |
+
Result<std::shared_ptr<io::InputStream>> OpenInputStream(
|
218 |
+
const std::string& path) override;
|
219 |
+
|
220 |
+
Result<std::shared_ptr<io::InputStream>> OpenInputStream(const FileInfo& info) override;
|
221 |
+
|
222 |
+
Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
|
223 |
+
const std::string& path) override;
|
224 |
+
|
225 |
+
Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
|
226 |
+
const FileInfo& info) override;
|
227 |
+
|
228 |
+
Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(
|
229 |
+
const std::string& path,
|
230 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
|
231 |
+
|
232 |
+
Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(
|
233 |
+
const std::string& path,
|
234 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
|
235 |
+
};
|
236 |
+
|
237 |
+
} // namespace arrow::fs
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem.h
ADDED
@@ -0,0 +1,565 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <chrono>
|
21 |
+
#include <cstdint>
|
22 |
+
#include <functional>
|
23 |
+
#include <iosfwd>
|
24 |
+
#include <memory>
|
25 |
+
#include <string>
|
26 |
+
#include <utility>
|
27 |
+
#include <vector>
|
28 |
+
|
29 |
+
#include "arrow/filesystem/type_fwd.h"
|
30 |
+
#include "arrow/io/interfaces.h"
|
31 |
+
#include "arrow/type_fwd.h"
|
32 |
+
#include "arrow/util/compare.h"
|
33 |
+
#include "arrow/util/macros.h"
|
34 |
+
#include "arrow/util/type_fwd.h"
|
35 |
+
#include "arrow/util/visibility.h"
|
36 |
+
#include "arrow/util/windows_fixup.h"
|
37 |
+
|
38 |
+
namespace arrow {
|
39 |
+
namespace fs {
|
40 |
+
|
41 |
+
// A system clock time point expressed as a 64-bit (or more) number of
|
42 |
+
// nanoseconds since the epoch.
|
43 |
+
using TimePoint =
|
44 |
+
std::chrono::time_point<std::chrono::system_clock, std::chrono::nanoseconds>;
|
45 |
+
|
46 |
+
ARROW_EXPORT std::string ToString(FileType);
|
47 |
+
|
48 |
+
ARROW_EXPORT std::ostream& operator<<(std::ostream& os, FileType);
|
49 |
+
|
50 |
+
static const int64_t kNoSize = -1;
|
51 |
+
static const TimePoint kNoTime = TimePoint(TimePoint::duration(-1));
|
52 |
+
|
53 |
+
/// \brief FileSystem entry info
|
54 |
+
struct ARROW_EXPORT FileInfo : public util::EqualityComparable<FileInfo> {
|
55 |
+
FileInfo() = default;
|
56 |
+
FileInfo(FileInfo&&) = default;
|
57 |
+
FileInfo& operator=(FileInfo&&) = default;
|
58 |
+
FileInfo(const FileInfo&) = default;
|
59 |
+
FileInfo& operator=(const FileInfo&) = default;
|
60 |
+
|
61 |
+
explicit FileInfo(std::string path, FileType type = FileType::Unknown)
|
62 |
+
: path_(std::move(path)), type_(type) {}
|
63 |
+
|
64 |
+
/// The file type
|
65 |
+
FileType type() const { return type_; }
|
66 |
+
void set_type(FileType type) { type_ = type; }
|
67 |
+
|
68 |
+
/// The full file path in the filesystem
|
69 |
+
const std::string& path() const { return path_; }
|
70 |
+
void set_path(std::string path) { path_ = std::move(path); }
|
71 |
+
|
72 |
+
/// The file base name (component after the last directory separator)
|
73 |
+
std::string base_name() const;
|
74 |
+
|
75 |
+
// The directory base name (component before the file base name).
|
76 |
+
std::string dir_name() const;
|
77 |
+
|
78 |
+
/// The size in bytes, if available
|
79 |
+
///
|
80 |
+
/// Only regular files are guaranteed to have a size.
|
81 |
+
int64_t size() const { return size_; }
|
82 |
+
void set_size(int64_t size) { size_ = size; }
|
83 |
+
|
84 |
+
/// The file extension (excluding the dot)
|
85 |
+
std::string extension() const;
|
86 |
+
|
87 |
+
/// The time of last modification, if available
|
88 |
+
TimePoint mtime() const { return mtime_; }
|
89 |
+
void set_mtime(TimePoint mtime) { mtime_ = mtime; }
|
90 |
+
|
91 |
+
bool IsFile() const { return type_ == FileType::File; }
|
92 |
+
bool IsDirectory() const { return type_ == FileType::Directory; }
|
93 |
+
|
94 |
+
bool Equals(const FileInfo& other) const {
|
95 |
+
return type() == other.type() && path() == other.path() && size() == other.size() &&
|
96 |
+
mtime() == other.mtime();
|
97 |
+
}
|
98 |
+
|
99 |
+
std::string ToString() const;
|
100 |
+
|
101 |
+
/// Function object implementing less-than comparison and hashing by
|
102 |
+
/// path, to support sorting infos, using them as keys, and other
|
103 |
+
/// interactions with the STL.
|
104 |
+
struct ByPath {
|
105 |
+
bool operator()(const FileInfo& l, const FileInfo& r) const {
|
106 |
+
return l.path() < r.path();
|
107 |
+
}
|
108 |
+
|
109 |
+
size_t operator()(const FileInfo& i) const {
|
110 |
+
return std::hash<std::string>{}(i.path());
|
111 |
+
}
|
112 |
+
};
|
113 |
+
|
114 |
+
protected:
|
115 |
+
std::string path_;
|
116 |
+
FileType type_ = FileType::Unknown;
|
117 |
+
int64_t size_ = kNoSize;
|
118 |
+
TimePoint mtime_ = kNoTime;
|
119 |
+
};
|
120 |
+
|
121 |
+
ARROW_EXPORT std::ostream& operator<<(std::ostream& os, const FileInfo&);
|
122 |
+
|
123 |
+
/// \brief File selector for filesystem APIs
|
124 |
+
struct ARROW_EXPORT FileSelector {
|
125 |
+
/// The directory in which to select files.
|
126 |
+
/// If the path exists but doesn't point to a directory, this should be an error.
|
127 |
+
std::string base_dir;
|
128 |
+
/// The behavior if `base_dir` isn't found in the filesystem. If false,
|
129 |
+
/// an error is returned. If true, an empty selection is returned.
|
130 |
+
bool allow_not_found;
|
131 |
+
/// Whether to recurse into subdirectories.
|
132 |
+
bool recursive;
|
133 |
+
/// The maximum number of subdirectories to recurse into.
|
134 |
+
int32_t max_recursion;
|
135 |
+
|
136 |
+
FileSelector() : allow_not_found(false), recursive(false), max_recursion(INT32_MAX) {}
|
137 |
+
};
|
138 |
+
|
139 |
+
/// \brief FileSystem, path pair
|
140 |
+
struct ARROW_EXPORT FileLocator {
|
141 |
+
std::shared_ptr<FileSystem> filesystem;
|
142 |
+
std::string path;
|
143 |
+
};
|
144 |
+
|
145 |
+
using FileInfoVector = std::vector<FileInfo>;
|
146 |
+
using FileInfoGenerator = std::function<Future<FileInfoVector>()>;
|
147 |
+
|
148 |
+
} // namespace fs
|
149 |
+
|
150 |
+
template <>
|
151 |
+
struct IterationTraits<fs::FileInfoVector> {
|
152 |
+
static fs::FileInfoVector End() { return {}; }
|
153 |
+
static bool IsEnd(const fs::FileInfoVector& val) { return val.empty(); }
|
154 |
+
};
|
155 |
+
|
156 |
+
namespace fs {
|
157 |
+
|
158 |
+
/// \brief Abstract file system API
|
159 |
+
class ARROW_EXPORT FileSystem : public std::enable_shared_from_this<FileSystem> {
|
160 |
+
public:
|
161 |
+
virtual ~FileSystem();
|
162 |
+
|
163 |
+
virtual std::string type_name() const = 0;
|
164 |
+
|
165 |
+
/// EXPERIMENTAL: The IOContext associated with this filesystem.
|
166 |
+
const io::IOContext& io_context() const { return io_context_; }
|
167 |
+
|
168 |
+
/// Normalize path for the given filesystem
|
169 |
+
///
|
170 |
+
/// The default implementation of this method is a no-op, but subclasses
|
171 |
+
/// may allow normalizing irregular path forms (such as Windows local paths).
|
172 |
+
virtual Result<std::string> NormalizePath(std::string path);
|
173 |
+
|
174 |
+
/// \brief Ensure a URI (or path) is compatible with the given filesystem and return the
|
175 |
+
/// path
|
176 |
+
///
|
177 |
+
/// \param uri_string A URI representing a resource in the given filesystem.
|
178 |
+
///
|
179 |
+
/// This method will check to ensure the given filesystem is compatible with the
|
180 |
+
/// URI. This can be useful when the user provides both a URI and a filesystem or
|
181 |
+
/// when a user provides multiple URIs that should be compatible with the same
|
182 |
+
/// filesystem.
|
183 |
+
///
|
184 |
+
/// uri_string can be an absolute path instead of a URI. In that case it will ensure
|
185 |
+
/// the filesystem (if supplied) is the local filesystem (or some custom filesystem that
|
186 |
+
/// is capable of reading local paths) and will normalize the path's file separators.
|
187 |
+
///
|
188 |
+
/// Note, this method only checks to ensure the URI scheme is valid. It will not detect
|
189 |
+
/// inconsistencies like a mismatching region or endpoint override.
|
190 |
+
///
|
191 |
+
/// \return The path inside the filesystem that is indicated by the URI.
|
192 |
+
virtual Result<std::string> PathFromUri(const std::string& uri_string) const;
|
193 |
+
|
194 |
+
virtual bool Equals(const FileSystem& other) const = 0;
|
195 |
+
|
196 |
+
virtual bool Equals(const std::shared_ptr<FileSystem>& other) const {
|
197 |
+
return Equals(*other);
|
198 |
+
}
|
199 |
+
|
200 |
+
/// Get info for the given target.
|
201 |
+
///
|
202 |
+
/// Any symlink is automatically dereferenced, recursively.
|
203 |
+
/// A nonexistent or unreachable file returns an Ok status and
|
204 |
+
/// has a FileType of value NotFound. An error status indicates
|
205 |
+
/// a truly exceptional condition (low-level I/O error, etc.).
|
206 |
+
virtual Result<FileInfo> GetFileInfo(const std::string& path) = 0;
|
207 |
+
/// Same, for many targets at once.
|
208 |
+
virtual Result<FileInfoVector> GetFileInfo(const std::vector<std::string>& paths);
|
209 |
+
/// Same, according to a selector.
|
210 |
+
///
|
211 |
+
/// The selector's base directory will not be part of the results, even if
|
212 |
+
/// it exists.
|
213 |
+
/// If it doesn't exist, see `FileSelector::allow_not_found`.
|
214 |
+
virtual Result<FileInfoVector> GetFileInfo(const FileSelector& select) = 0;
|
215 |
+
|
216 |
+
/// Async version of GetFileInfo
|
217 |
+
virtual Future<FileInfoVector> GetFileInfoAsync(const std::vector<std::string>& paths);
|
218 |
+
|
219 |
+
/// Streaming async version of GetFileInfo
|
220 |
+
///
|
221 |
+
/// The returned generator is not async-reentrant, i.e. you need to wait for
|
222 |
+
/// the returned future to complete before calling the generator again.
|
223 |
+
virtual FileInfoGenerator GetFileInfoGenerator(const FileSelector& select);
|
224 |
+
|
225 |
+
/// Create a directory and subdirectories.
|
226 |
+
///
|
227 |
+
/// This function succeeds if the directory already exists.
|
228 |
+
virtual Status CreateDir(const std::string& path, bool recursive = true) = 0;
|
229 |
+
|
230 |
+
/// Delete a directory and its contents, recursively.
|
231 |
+
virtual Status DeleteDir(const std::string& path) = 0;
|
232 |
+
|
233 |
+
/// Delete a directory's contents, recursively.
|
234 |
+
///
|
235 |
+
/// Like DeleteDir, but doesn't delete the directory itself.
|
236 |
+
/// Passing an empty path ("" or "/") is disallowed, see DeleteRootDirContents.
|
237 |
+
virtual Status DeleteDirContents(const std::string& path,
|
238 |
+
bool missing_dir_ok = false) = 0;
|
239 |
+
|
240 |
+
/// Async version of DeleteDirContents.
|
241 |
+
virtual Future<> DeleteDirContentsAsync(const std::string& path,
|
242 |
+
bool missing_dir_ok = false);
|
243 |
+
|
244 |
+
/// EXPERIMENTAL: Delete the root directory's contents, recursively.
|
245 |
+
///
|
246 |
+
/// Implementations may decide to raise an error if this operation is
|
247 |
+
/// too dangerous.
|
248 |
+
// NOTE: may decide to remove this if it's deemed not useful
|
249 |
+
virtual Status DeleteRootDirContents() = 0;
|
250 |
+
|
251 |
+
/// Delete a file.
|
252 |
+
virtual Status DeleteFile(const std::string& path) = 0;
|
253 |
+
/// Delete many files.
|
254 |
+
///
|
255 |
+
/// The default implementation issues individual delete operations in sequence.
|
256 |
+
virtual Status DeleteFiles(const std::vector<std::string>& paths);
|
257 |
+
|
258 |
+
/// Move / rename a file or directory.
|
259 |
+
///
|
260 |
+
/// If the destination exists:
|
261 |
+
/// - if it is a non-empty directory, an error is returned
|
262 |
+
/// - otherwise, if it has the same type as the source, it is replaced
|
263 |
+
/// - otherwise, behavior is unspecified (implementation-dependent).
|
264 |
+
virtual Status Move(const std::string& src, const std::string& dest) = 0;
|
265 |
+
|
266 |
+
/// Copy a file.
|
267 |
+
///
|
268 |
+
/// If the destination exists and is a directory, an error is returned.
|
269 |
+
/// Otherwise, it is replaced.
|
270 |
+
virtual Status CopyFile(const std::string& src, const std::string& dest) = 0;
|
271 |
+
|
272 |
+
/// Open an input stream for sequential reading.
|
273 |
+
virtual Result<std::shared_ptr<io::InputStream>> OpenInputStream(
|
274 |
+
const std::string& path) = 0;
|
275 |
+
/// Open an input stream for sequential reading.
|
276 |
+
///
|
277 |
+
/// This override assumes the given FileInfo validly represents the file's
|
278 |
+
/// characteristics, and may optimize access depending on them (for example
|
279 |
+
/// avoid querying the file size or its existence).
|
280 |
+
virtual Result<std::shared_ptr<io::InputStream>> OpenInputStream(const FileInfo& info);
|
281 |
+
|
282 |
+
/// Open an input file for random access reading.
|
283 |
+
virtual Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
|
284 |
+
const std::string& path) = 0;
|
285 |
+
/// Open an input file for random access reading.
|
286 |
+
///
|
287 |
+
/// This override assumes the given FileInfo validly represents the file's
|
288 |
+
/// characteristics, and may optimize access depending on them (for example
|
289 |
+
/// avoid querying the file size or its existence).
|
290 |
+
virtual Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
|
291 |
+
const FileInfo& info);
|
292 |
+
|
293 |
+
/// Async version of OpenInputStream
|
294 |
+
virtual Future<std::shared_ptr<io::InputStream>> OpenInputStreamAsync(
|
295 |
+
const std::string& path);
|
296 |
+
/// Async version of OpenInputStream
|
297 |
+
virtual Future<std::shared_ptr<io::InputStream>> OpenInputStreamAsync(
|
298 |
+
const FileInfo& info);
|
299 |
+
|
300 |
+
/// Async version of OpenInputFile
|
301 |
+
virtual Future<std::shared_ptr<io::RandomAccessFile>> OpenInputFileAsync(
|
302 |
+
const std::string& path);
|
303 |
+
/// Async version of OpenInputFile
|
304 |
+
virtual Future<std::shared_ptr<io::RandomAccessFile>> OpenInputFileAsync(
|
305 |
+
const FileInfo& info);
|
306 |
+
|
307 |
+
/// Open an output stream for sequential writing.
|
308 |
+
///
|
309 |
+
/// If the target already exists, existing data is truncated.
|
310 |
+
virtual Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(
|
311 |
+
const std::string& path,
|
312 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata) = 0;
|
313 |
+
Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(const std::string& path);
|
314 |
+
|
315 |
+
/// Open an output stream for appending.
|
316 |
+
///
|
317 |
+
/// If the target doesn't exist, a new empty file is created.
|
318 |
+
///
|
319 |
+
/// Note: some filesystem implementations do not support efficient appending
|
320 |
+
/// to an existing file, in which case this method will return NotImplemented.
|
321 |
+
/// Consider writing to multiple files (using e.g. the dataset layer) instead.
|
322 |
+
virtual Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(
|
323 |
+
const std::string& path,
|
324 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata) = 0;
|
325 |
+
Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(const std::string& path);
|
326 |
+
|
327 |
+
protected:
|
328 |
+
explicit FileSystem(io::IOContext io_context = io::default_io_context())
|
329 |
+
: io_context_(std::move(io_context)) {}
|
330 |
+
|
331 |
+
io::IOContext io_context_;
|
332 |
+
// Whether metadata operations (such as GetFileInfo or OpenInputStream)
|
333 |
+
// are cheap enough that the default async variants don't bother with
|
334 |
+
// a thread pool.
|
335 |
+
bool default_async_is_sync_ = true;
|
336 |
+
};
|
337 |
+
|
338 |
+
/// \brief A FileSystem implementation that delegates to another
|
339 |
+
/// implementation after prepending a fixed base path.
|
340 |
+
///
|
341 |
+
/// This is useful to expose a logical view of a subtree of a filesystem,
|
342 |
+
/// for example a directory in a LocalFileSystem.
|
343 |
+
/// This works on abstract paths, i.e. paths using forward slashes and
|
344 |
+
/// and a single root "/". Windows paths are not guaranteed to work.
|
345 |
+
/// This makes no security guarantee. For example, symlinks may allow to
|
346 |
+
/// "escape" the subtree and access other parts of the underlying filesystem.
|
347 |
+
class ARROW_EXPORT SubTreeFileSystem : public FileSystem {
|
348 |
+
public:
|
349 |
+
// This constructor may abort if base_path is invalid.
|
350 |
+
explicit SubTreeFileSystem(const std::string& base_path,
|
351 |
+
std::shared_ptr<FileSystem> base_fs);
|
352 |
+
~SubTreeFileSystem() override;
|
353 |
+
|
354 |
+
std::string type_name() const override { return "subtree"; }
|
355 |
+
std::string base_path() const { return base_path_; }
|
356 |
+
std::shared_ptr<FileSystem> base_fs() const { return base_fs_; }
|
357 |
+
|
358 |
+
Result<std::string> NormalizePath(std::string path) override;
|
359 |
+
Result<std::string> PathFromUri(const std::string& uri_string) const override;
|
360 |
+
|
361 |
+
bool Equals(const FileSystem& other) const override;
|
362 |
+
|
363 |
+
/// \cond FALSE
|
364 |
+
using FileSystem::GetFileInfo;
|
365 |
+
/// \endcond
|
366 |
+
Result<FileInfo> GetFileInfo(const std::string& path) override;
|
367 |
+
Result<FileInfoVector> GetFileInfo(const FileSelector& select) override;
|
368 |
+
|
369 |
+
FileInfoGenerator GetFileInfoGenerator(const FileSelector& select) override;
|
370 |
+
|
371 |
+
Status CreateDir(const std::string& path, bool recursive = true) override;
|
372 |
+
|
373 |
+
Status DeleteDir(const std::string& path) override;
|
374 |
+
Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override;
|
375 |
+
Status DeleteRootDirContents() override;
|
376 |
+
|
377 |
+
Status DeleteFile(const std::string& path) override;
|
378 |
+
|
379 |
+
Status Move(const std::string& src, const std::string& dest) override;
|
380 |
+
|
381 |
+
Status CopyFile(const std::string& src, const std::string& dest) override;
|
382 |
+
|
383 |
+
Result<std::shared_ptr<io::InputStream>> OpenInputStream(
|
384 |
+
const std::string& path) override;
|
385 |
+
Result<std::shared_ptr<io::InputStream>> OpenInputStream(const FileInfo& info) override;
|
386 |
+
Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
|
387 |
+
const std::string& path) override;
|
388 |
+
Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
|
389 |
+
const FileInfo& info) override;
|
390 |
+
|
391 |
+
Future<std::shared_ptr<io::InputStream>> OpenInputStreamAsync(
|
392 |
+
const std::string& path) override;
|
393 |
+
Future<std::shared_ptr<io::InputStream>> OpenInputStreamAsync(
|
394 |
+
const FileInfo& info) override;
|
395 |
+
Future<std::shared_ptr<io::RandomAccessFile>> OpenInputFileAsync(
|
396 |
+
const std::string& path) override;
|
397 |
+
Future<std::shared_ptr<io::RandomAccessFile>> OpenInputFileAsync(
|
398 |
+
const FileInfo& info) override;
|
399 |
+
|
400 |
+
Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(
|
401 |
+
const std::string& path,
|
402 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
|
403 |
+
Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(
|
404 |
+
const std::string& path,
|
405 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
|
406 |
+
|
407 |
+
protected:
|
408 |
+
SubTreeFileSystem() {}
|
409 |
+
|
410 |
+
const std::string base_path_;
|
411 |
+
std::shared_ptr<FileSystem> base_fs_;
|
412 |
+
|
413 |
+
Result<std::string> PrependBase(const std::string& s) const;
|
414 |
+
Result<std::string> PrependBaseNonEmpty(const std::string& s) const;
|
415 |
+
Result<std::string> StripBase(const std::string& s) const;
|
416 |
+
Status FixInfo(FileInfo* info) const;
|
417 |
+
|
418 |
+
static Result<std::string> NormalizeBasePath(
|
419 |
+
std::string base_path, const std::shared_ptr<FileSystem>& base_fs);
|
420 |
+
};
|
421 |
+
|
422 |
+
/// \brief A FileSystem implementation that delegates to another
|
423 |
+
/// implementation but inserts latencies at various points.
|
424 |
+
class ARROW_EXPORT SlowFileSystem : public FileSystem {
|
425 |
+
public:
|
426 |
+
SlowFileSystem(std::shared_ptr<FileSystem> base_fs,
|
427 |
+
std::shared_ptr<io::LatencyGenerator> latencies);
|
428 |
+
SlowFileSystem(std::shared_ptr<FileSystem> base_fs, double average_latency);
|
429 |
+
SlowFileSystem(std::shared_ptr<FileSystem> base_fs, double average_latency,
|
430 |
+
int32_t seed);
|
431 |
+
|
432 |
+
std::string type_name() const override { return "slow"; }
|
433 |
+
bool Equals(const FileSystem& other) const override;
|
434 |
+
Result<std::string> PathFromUri(const std::string& uri_string) const override;
|
435 |
+
|
436 |
+
using FileSystem::GetFileInfo;
|
437 |
+
Result<FileInfo> GetFileInfo(const std::string& path) override;
|
438 |
+
Result<FileInfoVector> GetFileInfo(const FileSelector& select) override;
|
439 |
+
|
440 |
+
Status CreateDir(const std::string& path, bool recursive = true) override;
|
441 |
+
|
442 |
+
Status DeleteDir(const std::string& path) override;
|
443 |
+
Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override;
|
444 |
+
Status DeleteRootDirContents() override;
|
445 |
+
|
446 |
+
Status DeleteFile(const std::string& path) override;
|
447 |
+
|
448 |
+
Status Move(const std::string& src, const std::string& dest) override;
|
449 |
+
|
450 |
+
Status CopyFile(const std::string& src, const std::string& dest) override;
|
451 |
+
|
452 |
+
Result<std::shared_ptr<io::InputStream>> OpenInputStream(
|
453 |
+
const std::string& path) override;
|
454 |
+
Result<std::shared_ptr<io::InputStream>> OpenInputStream(const FileInfo& info) override;
|
455 |
+
Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
|
456 |
+
const std::string& path) override;
|
457 |
+
Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
|
458 |
+
const FileInfo& info) override;
|
459 |
+
Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(
|
460 |
+
const std::string& path,
|
461 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
|
462 |
+
Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(
|
463 |
+
const std::string& path,
|
464 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
|
465 |
+
|
466 |
+
protected:
|
467 |
+
std::shared_ptr<FileSystem> base_fs_;
|
468 |
+
std::shared_ptr<io::LatencyGenerator> latencies_;
|
469 |
+
};
|
470 |
+
|
471 |
+
/// \defgroup filesystem-factories Functions for creating FileSystem instances
|
472 |
+
///
|
473 |
+
/// @{
|
474 |
+
|
475 |
+
/// \brief Create a new FileSystem by URI
|
476 |
+
///
|
477 |
+
/// Recognized schemes are "file", "mock", "hdfs", "viewfs", "s3",
|
478 |
+
/// "gs" and "gcs".
|
479 |
+
///
|
480 |
+
/// \param[in] uri a URI-based path, ex: file:///some/local/path
|
481 |
+
/// \param[out] out_path (optional) Path inside the filesystem.
|
482 |
+
/// \return out_fs FileSystem instance.
|
483 |
+
ARROW_EXPORT
|
484 |
+
Result<std::shared_ptr<FileSystem>> FileSystemFromUri(const std::string& uri,
|
485 |
+
std::string* out_path = NULLPTR);
|
486 |
+
|
487 |
+
/// \brief Create a new FileSystem by URI with a custom IO context
|
488 |
+
///
|
489 |
+
/// Recognized schemes are "file", "mock", "hdfs", "viewfs", "s3",
|
490 |
+
/// "gs" and "gcs".
|
491 |
+
///
|
492 |
+
/// \param[in] uri a URI-based path, ex: file:///some/local/path
|
493 |
+
/// \param[in] io_context an IOContext which will be associated with the filesystem
|
494 |
+
/// \param[out] out_path (optional) Path inside the filesystem.
|
495 |
+
/// \return out_fs FileSystem instance.
|
496 |
+
ARROW_EXPORT
|
497 |
+
Result<std::shared_ptr<FileSystem>> FileSystemFromUri(const std::string& uri,
|
498 |
+
const io::IOContext& io_context,
|
499 |
+
std::string* out_path = NULLPTR);
|
500 |
+
|
501 |
+
/// \brief Create a new FileSystem by URI
|
502 |
+
///
|
503 |
+
/// Same as FileSystemFromUri, but in addition also recognize non-URIs
|
504 |
+
/// and treat them as local filesystem paths. Only absolute local filesystem
|
505 |
+
/// paths are allowed.
|
506 |
+
ARROW_EXPORT
|
507 |
+
Result<std::shared_ptr<FileSystem>> FileSystemFromUriOrPath(
|
508 |
+
const std::string& uri, std::string* out_path = NULLPTR);
|
509 |
+
|
510 |
+
/// \brief Create a new FileSystem by URI with a custom IO context
|
511 |
+
///
|
512 |
+
/// Same as FileSystemFromUri, but in addition also recognize non-URIs
|
513 |
+
/// and treat them as local filesystem paths. Only absolute local filesystem
|
514 |
+
/// paths are allowed.
|
515 |
+
ARROW_EXPORT
|
516 |
+
Result<std::shared_ptr<FileSystem>> FileSystemFromUriOrPath(
|
517 |
+
const std::string& uri, const io::IOContext& io_context,
|
518 |
+
std::string* out_path = NULLPTR);
|
519 |
+
|
520 |
+
/// @}
|
521 |
+
|
522 |
+
/// \brief Copy files, including from one FileSystem to another
|
523 |
+
///
|
524 |
+
/// If a source and destination are resident in the same FileSystem FileSystem::CopyFile
|
525 |
+
/// will be used, otherwise the file will be opened as a stream in both FileSystems and
|
526 |
+
/// chunks copied from the source to the destination. No directories will be created.
|
527 |
+
ARROW_EXPORT
|
528 |
+
Status CopyFiles(const std::vector<FileLocator>& sources,
|
529 |
+
const std::vector<FileLocator>& destinations,
|
530 |
+
const io::IOContext& io_context = io::default_io_context(),
|
531 |
+
int64_t chunk_size = 1024 * 1024, bool use_threads = true);
|
532 |
+
|
533 |
+
/// \brief Copy selected files, including from one FileSystem to another
|
534 |
+
///
|
535 |
+
/// Directories will be created under the destination base directory as needed.
|
536 |
+
ARROW_EXPORT
|
537 |
+
Status CopyFiles(const std::shared_ptr<FileSystem>& source_fs,
|
538 |
+
const FileSelector& source_sel,
|
539 |
+
const std::shared_ptr<FileSystem>& destination_fs,
|
540 |
+
const std::string& destination_base_dir,
|
541 |
+
const io::IOContext& io_context = io::default_io_context(),
|
542 |
+
int64_t chunk_size = 1024 * 1024, bool use_threads = true);
|
543 |
+
|
544 |
+
struct FileSystemGlobalOptions {
|
545 |
+
/// Path to a single PEM file holding all TLS CA certificates
|
546 |
+
///
|
547 |
+
/// If empty, the underlying TLS library's defaults will be used.
|
548 |
+
std::string tls_ca_file_path;
|
549 |
+
|
550 |
+
/// Path to a directory holding TLS CA certificates in individual PEM files
|
551 |
+
/// named along the OpenSSL "hashed" format.
|
552 |
+
///
|
553 |
+
/// If empty, the underlying TLS library's defaults will be used.
|
554 |
+
std::string tls_ca_dir_path;
|
555 |
+
};
|
556 |
+
|
557 |
+
/// EXPERIMENTAL: optional global initialization routine
|
558 |
+
///
|
559 |
+
/// This is for environments (such as manylinux) where the path
|
560 |
+
/// to TLS CA certificates needs to be configured at runtime.
|
561 |
+
ARROW_EXPORT
|
562 |
+
Status Initialize(const FileSystemGlobalOptions& options);
|
563 |
+
|
564 |
+
} // namespace fs
|
565 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/gcsfs.h
ADDED
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
#include <optional>
|
22 |
+
#include <string>
|
23 |
+
#include <vector>
|
24 |
+
|
25 |
+
#include "arrow/filesystem/filesystem.h"
|
26 |
+
#include "arrow/util/uri.h"
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
namespace fs {
|
30 |
+
namespace internal {
|
31 |
+
|
32 |
+
// Opaque wrapper for GCS's library credentials to avoid exposing in Arrow headers.
|
33 |
+
struct GcsCredentialsHolder;
|
34 |
+
|
35 |
+
} // namespace internal
|
36 |
+
|
37 |
+
class GcsFileSystem;
|
38 |
+
|
39 |
+
/// \brief Container for GCS Credentials and information necessary to recreate them.
|
40 |
+
class ARROW_EXPORT GcsCredentials {
|
41 |
+
public:
|
42 |
+
bool Equals(const GcsCredentials& other) const;
|
43 |
+
bool anonymous() const { return anonymous_; }
|
44 |
+
const std::string& access_token() const { return access_token_; }
|
45 |
+
TimePoint expiration() const { return expiration_; }
|
46 |
+
const std::string& target_service_account() const { return target_service_account_; }
|
47 |
+
const std::string& json_credentials() const { return json_credentials_; }
|
48 |
+
const std::shared_ptr<internal::GcsCredentialsHolder>& holder() const {
|
49 |
+
return holder_;
|
50 |
+
}
|
51 |
+
|
52 |
+
private:
|
53 |
+
GcsCredentials() = default;
|
54 |
+
bool anonymous_ = false;
|
55 |
+
std::string access_token_;
|
56 |
+
TimePoint expiration_;
|
57 |
+
std::string target_service_account_;
|
58 |
+
std::string json_credentials_;
|
59 |
+
std::shared_ptr<internal::GcsCredentialsHolder> holder_;
|
60 |
+
friend class GcsFileSystem;
|
61 |
+
friend struct GcsOptions;
|
62 |
+
};
|
63 |
+
|
64 |
+
/// Options for the GcsFileSystem implementation.
|
65 |
+
struct ARROW_EXPORT GcsOptions {
|
66 |
+
/// \brief Equivalent to GcsOptions::Defaults().
|
67 |
+
GcsOptions();
|
68 |
+
GcsCredentials credentials;
|
69 |
+
|
70 |
+
std::string endpoint_override;
|
71 |
+
std::string scheme;
|
72 |
+
/// \brief Location to use for creating buckets.
|
73 |
+
std::string default_bucket_location;
|
74 |
+
|
75 |
+
/// \brief If set used to control total time allowed for retrying underlying
|
76 |
+
/// errors.
|
77 |
+
///
|
78 |
+
/// The default policy is to retry for up to 15 minutes.
|
79 |
+
std::optional<double> retry_limit_seconds;
|
80 |
+
|
81 |
+
/// \brief Default metadata for OpenOutputStream.
|
82 |
+
///
|
83 |
+
/// This will be ignored if non-empty metadata is passed to OpenOutputStream.
|
84 |
+
std::shared_ptr<const KeyValueMetadata> default_metadata;
|
85 |
+
|
86 |
+
/// \brief The project to use for creating buckets.
|
87 |
+
///
|
88 |
+
/// If not set, the library uses the GOOGLE_CLOUD_PROJECT environment
|
89 |
+
/// variable. Most I/O operations do not need a project id, only applications
|
90 |
+
/// that create new buckets need a project id.
|
91 |
+
std::optional<std::string> project_id;
|
92 |
+
|
93 |
+
bool Equals(const GcsOptions& other) const;
|
94 |
+
|
95 |
+
/// \brief Initialize with Google Default Credentials
|
96 |
+
///
|
97 |
+
/// Create options configured to use [Application Default Credentials][aip/4110]. The
|
98 |
+
/// details of this mechanism are too involved to describe here, but suffice is to say
|
99 |
+
/// that applications can override any defaults using an environment variable
|
100 |
+
/// (`GOOGLE_APPLICATION_CREDENTIALS`), and that the defaults work with most Google
|
101 |
+
/// Cloud Platform deployment environments (GCE, GKE, Cloud Run, etc.), and that have
|
102 |
+
/// the same behavior as the `gcloud` CLI tool on your workstation.
|
103 |
+
///
|
104 |
+
/// \see https://cloud.google.com/docs/authentication
|
105 |
+
///
|
106 |
+
/// [aip/4110]: https://google.aip.dev/auth/4110
|
107 |
+
static GcsOptions Defaults();
|
108 |
+
|
109 |
+
/// \brief Initialize with anonymous credentials
|
110 |
+
static GcsOptions Anonymous();
|
111 |
+
|
112 |
+
/// \brief Initialize with access token
|
113 |
+
///
|
114 |
+
/// These credentials are useful when using an out-of-band mechanism to fetch access
|
115 |
+
/// tokens. Note that access tokens are time limited, you will need to manually refresh
|
116 |
+
/// the tokens created by the out-of-band mechanism.
|
117 |
+
static GcsOptions FromAccessToken(const std::string& access_token,
|
118 |
+
TimePoint expiration);
|
119 |
+
|
120 |
+
/// \brief Initialize with service account impersonation
|
121 |
+
///
|
122 |
+
/// Service account impersonation allows one principal (a user or service account) to
|
123 |
+
/// impersonate a service account. It requires that the calling principal has the
|
124 |
+
/// necessary permissions *on* the service account.
|
125 |
+
static GcsOptions FromImpersonatedServiceAccount(
|
126 |
+
const GcsCredentials& base_credentials, const std::string& target_service_account);
|
127 |
+
|
128 |
+
/// Creates service account credentials from a JSON object in string form.
|
129 |
+
///
|
130 |
+
/// The @p json_object is expected to be in the format described by [aip/4112]. Such an
|
131 |
+
/// object contains the identity of a service account, as well as a private key that can
|
132 |
+
/// be used to sign tokens, showing the caller was holding the private key.
|
133 |
+
///
|
134 |
+
/// In GCP one can create several "keys" for each service account, and these keys are
|
135 |
+
/// downloaded as a JSON "key file". The contents of such a file are in the format
|
136 |
+
/// required by this function. Remember that key files and their contents should be
|
137 |
+
/// treated as any other secret with security implications, think of them as passwords
|
138 |
+
/// (because they are!), don't store them or output them where unauthorized persons may
|
139 |
+
/// read them.
|
140 |
+
///
|
141 |
+
/// Most applications should probably use default credentials, maybe pointing them to a
|
142 |
+
/// file with these contents. Using this function may be useful when the json object is
|
143 |
+
/// obtained from a Cloud Secret Manager or a similar service.
|
144 |
+
///
|
145 |
+
/// [aip/4112]: https://google.aip.dev/auth/4112
|
146 |
+
static GcsOptions FromServiceAccountCredentials(const std::string& json_object);
|
147 |
+
|
148 |
+
/// Initialize from URIs such as "gs://bucket/object".
|
149 |
+
static Result<GcsOptions> FromUri(const arrow::internal::Uri& uri,
|
150 |
+
std::string* out_path);
|
151 |
+
static Result<GcsOptions> FromUri(const std::string& uri, std::string* out_path);
|
152 |
+
};
|
153 |
+
|
154 |
+
/// \brief GCS-backed FileSystem implementation.
|
155 |
+
///
|
156 |
+
/// GCS (Google Cloud Storage - https://cloud.google.com/storage) is a scalable object
|
157 |
+
/// storage system for any amount of data. The main abstractions in GCS are buckets and
|
158 |
+
/// objects. A bucket is a namespace for objects, buckets can store any number of objects,
|
159 |
+
/// tens of millions and even billions is not uncommon. Each object contains a single
|
160 |
+
/// blob of data, up to 5TiB in size. Buckets are typically configured to keep a single
|
161 |
+
/// version of each object, but versioning can be enabled. Versioning is important because
|
162 |
+
/// objects are immutable, once created one cannot append data to the object or modify the
|
163 |
+
/// object data in any way.
|
164 |
+
///
|
165 |
+
/// GCS buckets are in a global namespace, if a Google Cloud customer creates a bucket
|
166 |
+
/// named `foo` no other customer can create a bucket with the same name. Note that a
|
167 |
+
/// principal (a user or service account) may only list the buckets they are entitled to,
|
168 |
+
/// and then only within a project. It is not possible to list "all" the buckets.
|
169 |
+
///
|
170 |
+
/// Within each bucket objects are in flat namespace. GCS does not have folders or
|
171 |
+
/// directories. However, following some conventions it is possible to emulate
|
172 |
+
/// directories. To this end, this class:
|
173 |
+
///
|
174 |
+
/// - All buckets are treated as directories at the "root"
|
175 |
+
/// - Creating a root directory results in a new bucket being created, this may be slower
|
176 |
+
/// than most GCS operations.
|
177 |
+
/// - The class creates marker objects for a directory, using a metadata attribute to
|
178 |
+
/// annotate the file.
|
179 |
+
/// - GCS can list all the objects with a given prefix, this is used to emulate listing
|
180 |
+
/// of directories.
|
181 |
+
/// - In object lists GCS can summarize all the objects with a common prefix as a single
|
182 |
+
/// entry, this is used to emulate non-recursive lists. Note that GCS list time is
|
183 |
+
/// proportional to the number of objects in the prefix. Listing recursively takes
|
184 |
+
/// almost the same time as non-recursive lists.
|
185 |
+
///
|
186 |
+
class ARROW_EXPORT GcsFileSystem : public FileSystem {
|
187 |
+
public:
|
188 |
+
~GcsFileSystem() override = default;
|
189 |
+
|
190 |
+
std::string type_name() const override;
|
191 |
+
const GcsOptions& options() const;
|
192 |
+
|
193 |
+
bool Equals(const FileSystem& other) const override;
|
194 |
+
Result<std::string> PathFromUri(const std::string& uri_string) const override;
|
195 |
+
|
196 |
+
Result<FileInfo> GetFileInfo(const std::string& path) override;
|
197 |
+
Result<FileInfoVector> GetFileInfo(const FileSelector& select) override;
|
198 |
+
|
199 |
+
Status CreateDir(const std::string& path, bool recursive) override;
|
200 |
+
|
201 |
+
Status DeleteDir(const std::string& path) override;
|
202 |
+
|
203 |
+
Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override;
|
204 |
+
|
205 |
+
/// This is not implemented in GcsFileSystem, as it would be too dangerous.
|
206 |
+
Status DeleteRootDirContents() override;
|
207 |
+
|
208 |
+
Status DeleteFile(const std::string& path) override;
|
209 |
+
|
210 |
+
Status Move(const std::string& src, const std::string& dest) override;
|
211 |
+
|
212 |
+
Status CopyFile(const std::string& src, const std::string& dest) override;
|
213 |
+
|
214 |
+
Result<std::shared_ptr<io::InputStream>> OpenInputStream(
|
215 |
+
const std::string& path) override;
|
216 |
+
Result<std::shared_ptr<io::InputStream>> OpenInputStream(const FileInfo& info) override;
|
217 |
+
|
218 |
+
Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
|
219 |
+
const std::string& path) override;
|
220 |
+
Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
|
221 |
+
const FileInfo& info) override;
|
222 |
+
|
223 |
+
Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(
|
224 |
+
const std::string& path,
|
225 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata) override;
|
226 |
+
|
227 |
+
ARROW_DEPRECATED(
|
228 |
+
"Deprecated. "
|
229 |
+
"OpenAppendStream is unsupported on the GCS FileSystem.")
|
230 |
+
Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(
|
231 |
+
const std::string& path,
|
232 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata) override;
|
233 |
+
|
234 |
+
/// Create a GcsFileSystem instance from the given options.
|
235 |
+
// TODO(ARROW-16884): make this return Result for consistency
|
236 |
+
static std::shared_ptr<GcsFileSystem> Make(
|
237 |
+
const GcsOptions& options, const io::IOContext& = io::default_io_context());
|
238 |
+
|
239 |
+
private:
|
240 |
+
explicit GcsFileSystem(const GcsOptions& options, const io::IOContext& io_context);
|
241 |
+
|
242 |
+
class Impl;
|
243 |
+
std::shared_ptr<Impl> impl_;
|
244 |
+
};
|
245 |
+
|
246 |
+
} // namespace fs
|
247 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/hdfs.h
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
#include <string>
|
22 |
+
#include <vector>
|
23 |
+
|
24 |
+
#include "arrow/filesystem/filesystem.h"
|
25 |
+
#include "arrow/io/hdfs.h"
|
26 |
+
#include "arrow/util/uri.h"
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
namespace fs {
|
30 |
+
|
31 |
+
/// Options for the HDFS implementation.
|
32 |
+
struct ARROW_EXPORT HdfsOptions {
|
33 |
+
HdfsOptions() = default;
|
34 |
+
~HdfsOptions() = default;
|
35 |
+
|
36 |
+
/// Hdfs configuration options, contains host, port, driver
|
37 |
+
io::HdfsConnectionConfig connection_config;
|
38 |
+
|
39 |
+
/// Used by Hdfs OpenWritable Interface.
|
40 |
+
int32_t buffer_size = 0;
|
41 |
+
int16_t replication = 3;
|
42 |
+
int64_t default_block_size = 0;
|
43 |
+
|
44 |
+
void ConfigureEndPoint(std::string host, int port);
|
45 |
+
void ConfigureReplication(int16_t replication);
|
46 |
+
void ConfigureUser(std::string user_name);
|
47 |
+
void ConfigureBufferSize(int32_t buffer_size);
|
48 |
+
void ConfigureBlockSize(int64_t default_block_size);
|
49 |
+
void ConfigureKerberosTicketCachePath(std::string path);
|
50 |
+
void ConfigureExtraConf(std::string key, std::string val);
|
51 |
+
|
52 |
+
bool Equals(const HdfsOptions& other) const;
|
53 |
+
|
54 |
+
static Result<HdfsOptions> FromUri(const ::arrow::internal::Uri& uri);
|
55 |
+
static Result<HdfsOptions> FromUri(const std::string& uri);
|
56 |
+
};
|
57 |
+
|
58 |
+
/// HDFS-backed FileSystem implementation.
|
59 |
+
///
|
60 |
+
/// implementation notes:
|
61 |
+
/// - This is a wrapper of arrow/io/hdfs, so we can use FileSystem API to handle hdfs.
|
62 |
+
class ARROW_EXPORT HadoopFileSystem : public FileSystem {
|
63 |
+
public:
|
64 |
+
~HadoopFileSystem() override;
|
65 |
+
|
66 |
+
std::string type_name() const override { return "hdfs"; }
|
67 |
+
HdfsOptions options() const;
|
68 |
+
bool Equals(const FileSystem& other) const override;
|
69 |
+
Result<std::string> PathFromUri(const std::string& uri_string) const override;
|
70 |
+
|
71 |
+
/// \cond FALSE
|
72 |
+
using FileSystem::GetFileInfo;
|
73 |
+
/// \endcond
|
74 |
+
Result<FileInfo> GetFileInfo(const std::string& path) override;
|
75 |
+
Result<std::vector<FileInfo>> GetFileInfo(const FileSelector& select) override;
|
76 |
+
|
77 |
+
Status CreateDir(const std::string& path, bool recursive = true) override;
|
78 |
+
|
79 |
+
Status DeleteDir(const std::string& path) override;
|
80 |
+
|
81 |
+
Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override;
|
82 |
+
|
83 |
+
Status DeleteRootDirContents() override;
|
84 |
+
|
85 |
+
Status DeleteFile(const std::string& path) override;
|
86 |
+
|
87 |
+
Status Move(const std::string& src, const std::string& dest) override;
|
88 |
+
|
89 |
+
Status CopyFile(const std::string& src, const std::string& dest) override;
|
90 |
+
|
91 |
+
Result<std::shared_ptr<io::InputStream>> OpenInputStream(
|
92 |
+
const std::string& path) override;
|
93 |
+
Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
|
94 |
+
const std::string& path) override;
|
95 |
+
Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(
|
96 |
+
const std::string& path,
|
97 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
|
98 |
+
Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(
|
99 |
+
const std::string& path,
|
100 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
|
101 |
+
|
102 |
+
/// Create a HdfsFileSystem instance from the given options.
|
103 |
+
static Result<std::shared_ptr<HadoopFileSystem>> Make(
|
104 |
+
const HdfsOptions& options, const io::IOContext& = io::default_io_context());
|
105 |
+
|
106 |
+
protected:
|
107 |
+
HadoopFileSystem(const HdfsOptions& options, const io::IOContext&);
|
108 |
+
|
109 |
+
class Impl;
|
110 |
+
std::unique_ptr<Impl> impl_;
|
111 |
+
};
|
112 |
+
|
113 |
+
} // namespace fs
|
114 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/localfs.h
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
#include <string>
|
22 |
+
#include <vector>
|
23 |
+
|
24 |
+
#include "arrow/filesystem/filesystem.h"
|
25 |
+
|
26 |
+
namespace arrow {
|
27 |
+
namespace internal {
|
28 |
+
|
29 |
+
class Uri;
|
30 |
+
|
31 |
+
}
|
32 |
+
|
33 |
+
namespace fs {
|
34 |
+
|
35 |
+
/// Options for the LocalFileSystem implementation.
|
36 |
+
struct ARROW_EXPORT LocalFileSystemOptions {
|
37 |
+
static constexpr int32_t kDefaultDirectoryReadahead = 16;
|
38 |
+
static constexpr int32_t kDefaultFileInfoBatchSize = 1000;
|
39 |
+
|
40 |
+
/// Whether OpenInputStream and OpenInputFile return a mmap'ed file,
|
41 |
+
/// or a regular one.
|
42 |
+
bool use_mmap = false;
|
43 |
+
|
44 |
+
/// Options related to `GetFileInfoGenerator` interface.
|
45 |
+
|
46 |
+
/// EXPERIMENTAL: The maximum number of directories processed in parallel
|
47 |
+
/// by `GetFileInfoGenerator`.
|
48 |
+
int32_t directory_readahead = kDefaultDirectoryReadahead;
|
49 |
+
|
50 |
+
/// EXPERIMENTAL: The maximum number of entries aggregated into each
|
51 |
+
/// FileInfoVector chunk by `GetFileInfoGenerator`.
|
52 |
+
///
|
53 |
+
/// Since each FileInfo entry needs a separate `stat` system call, a
|
54 |
+
/// directory with a very large number of files may take a lot of time to
|
55 |
+
/// process entirely. By generating a FileInfoVector after this chunk
|
56 |
+
/// size is reached, we ensure FileInfo entries can start being consumed
|
57 |
+
/// from the FileInfoGenerator with less initial latency.
|
58 |
+
int32_t file_info_batch_size = kDefaultFileInfoBatchSize;
|
59 |
+
|
60 |
+
/// \brief Initialize with defaults
|
61 |
+
static LocalFileSystemOptions Defaults();
|
62 |
+
|
63 |
+
bool Equals(const LocalFileSystemOptions& other) const;
|
64 |
+
|
65 |
+
static Result<LocalFileSystemOptions> FromUri(const ::arrow::internal::Uri& uri,
|
66 |
+
std::string* out_path);
|
67 |
+
};
|
68 |
+
|
69 |
+
/// \brief A FileSystem implementation accessing files on the local machine.
|
70 |
+
///
|
71 |
+
/// This class handles only `/`-separated paths. If desired, conversion
|
72 |
+
/// from Windows backslash-separated paths should be done by the caller.
|
73 |
+
/// Details such as symlinks are abstracted away (symlinks are always
|
74 |
+
/// followed, except when deleting an entry).
|
75 |
+
class ARROW_EXPORT LocalFileSystem : public FileSystem {
|
76 |
+
public:
|
77 |
+
explicit LocalFileSystem(const io::IOContext& = io::default_io_context());
|
78 |
+
explicit LocalFileSystem(const LocalFileSystemOptions&,
|
79 |
+
const io::IOContext& = io::default_io_context());
|
80 |
+
~LocalFileSystem() override;
|
81 |
+
|
82 |
+
std::string type_name() const override { return "local"; }
|
83 |
+
|
84 |
+
Result<std::string> NormalizePath(std::string path) override;
|
85 |
+
Result<std::string> PathFromUri(const std::string& uri_string) const override;
|
86 |
+
|
87 |
+
bool Equals(const FileSystem& other) const override;
|
88 |
+
|
89 |
+
LocalFileSystemOptions options() const { return options_; }
|
90 |
+
|
91 |
+
/// \cond FALSE
|
92 |
+
using FileSystem::GetFileInfo;
|
93 |
+
/// \endcond
|
94 |
+
Result<FileInfo> GetFileInfo(const std::string& path) override;
|
95 |
+
Result<std::vector<FileInfo>> GetFileInfo(const FileSelector& select) override;
|
96 |
+
FileInfoGenerator GetFileInfoGenerator(const FileSelector& select) override;
|
97 |
+
|
98 |
+
Status CreateDir(const std::string& path, bool recursive = true) override;
|
99 |
+
|
100 |
+
Status DeleteDir(const std::string& path) override;
|
101 |
+
Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override;
|
102 |
+
Status DeleteRootDirContents() override;
|
103 |
+
|
104 |
+
Status DeleteFile(const std::string& path) override;
|
105 |
+
|
106 |
+
Status Move(const std::string& src, const std::string& dest) override;
|
107 |
+
|
108 |
+
Status CopyFile(const std::string& src, const std::string& dest) override;
|
109 |
+
|
110 |
+
Result<std::shared_ptr<io::InputStream>> OpenInputStream(
|
111 |
+
const std::string& path) override;
|
112 |
+
Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
|
113 |
+
const std::string& path) override;
|
114 |
+
Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(
|
115 |
+
const std::string& path,
|
116 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
|
117 |
+
Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(
|
118 |
+
const std::string& path,
|
119 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
|
120 |
+
|
121 |
+
protected:
|
122 |
+
LocalFileSystemOptions options_;
|
123 |
+
};
|
124 |
+
|
125 |
+
} // namespace fs
|
126 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/mockfs.h
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <iosfwd>
|
21 |
+
#include <memory>
|
22 |
+
#include <string>
|
23 |
+
#include <string_view>
|
24 |
+
#include <vector>
|
25 |
+
|
26 |
+
#include "arrow/filesystem/filesystem.h"
|
27 |
+
#include "arrow/util/windows_fixup.h"
|
28 |
+
|
29 |
+
namespace arrow {
|
30 |
+
namespace fs {
|
31 |
+
namespace internal {
|
32 |
+
|
33 |
+
struct MockDirInfo {
|
34 |
+
std::string full_path;
|
35 |
+
TimePoint mtime;
|
36 |
+
|
37 |
+
bool operator==(const MockDirInfo& other) const {
|
38 |
+
return mtime == other.mtime && full_path == other.full_path;
|
39 |
+
}
|
40 |
+
|
41 |
+
ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream&, const MockDirInfo&);
|
42 |
+
};
|
43 |
+
|
44 |
+
struct MockFileInfo {
|
45 |
+
std::string full_path;
|
46 |
+
TimePoint mtime;
|
47 |
+
std::string_view data;
|
48 |
+
|
49 |
+
bool operator==(const MockFileInfo& other) const {
|
50 |
+
return mtime == other.mtime && full_path == other.full_path && data == other.data;
|
51 |
+
}
|
52 |
+
|
53 |
+
ARROW_FRIEND_EXPORT friend std::ostream& operator<<(std::ostream&, const MockFileInfo&);
|
54 |
+
};
|
55 |
+
|
56 |
+
/// A mock FileSystem implementation that holds its contents in memory.
|
57 |
+
///
|
58 |
+
/// Useful for validating the FileSystem API, writing conformance suite,
|
59 |
+
/// and bootstrapping FileSystem-based APIs.
|
60 |
+
class ARROW_EXPORT MockFileSystem : public FileSystem {
|
61 |
+
public:
|
62 |
+
explicit MockFileSystem(TimePoint current_time,
|
63 |
+
const io::IOContext& = io::default_io_context());
|
64 |
+
~MockFileSystem() override;
|
65 |
+
|
66 |
+
std::string type_name() const override { return "mock"; }
|
67 |
+
|
68 |
+
bool Equals(const FileSystem& other) const override;
|
69 |
+
Result<std::string> PathFromUri(const std::string& uri_string) const override;
|
70 |
+
|
71 |
+
// XXX It's not very practical to have to explicitly declare inheritance
|
72 |
+
// of default overrides.
|
73 |
+
using FileSystem::GetFileInfo;
|
74 |
+
Result<FileInfo> GetFileInfo(const std::string& path) override;
|
75 |
+
Result<std::vector<FileInfo>> GetFileInfo(const FileSelector& select) override;
|
76 |
+
|
77 |
+
Status CreateDir(const std::string& path, bool recursive = true) override;
|
78 |
+
|
79 |
+
Status DeleteDir(const std::string& path) override;
|
80 |
+
Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override;
|
81 |
+
Status DeleteRootDirContents() override;
|
82 |
+
|
83 |
+
Status DeleteFile(const std::string& path) override;
|
84 |
+
|
85 |
+
Status Move(const std::string& src, const std::string& dest) override;
|
86 |
+
|
87 |
+
Status CopyFile(const std::string& src, const std::string& dest) override;
|
88 |
+
|
89 |
+
Result<std::shared_ptr<io::InputStream>> OpenInputStream(
|
90 |
+
const std::string& path) override;
|
91 |
+
Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
|
92 |
+
const std::string& path) override;
|
93 |
+
Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(
|
94 |
+
const std::string& path,
|
95 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
|
96 |
+
Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(
|
97 |
+
const std::string& path,
|
98 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
|
99 |
+
|
100 |
+
// Contents-dumping helpers to ease testing.
|
101 |
+
// Output is lexicographically-ordered by full path.
|
102 |
+
std::vector<MockDirInfo> AllDirs();
|
103 |
+
std::vector<MockFileInfo> AllFiles();
|
104 |
+
|
105 |
+
// Create a File with a content from a string.
|
106 |
+
Status CreateFile(const std::string& path, std::string_view content,
|
107 |
+
bool recursive = true);
|
108 |
+
|
109 |
+
// Create a MockFileSystem out of (empty) FileInfo. The content of every
|
110 |
+
// file is empty and of size 0. All directories will be created recursively.
|
111 |
+
static Result<std::shared_ptr<FileSystem>> Make(TimePoint current_time,
|
112 |
+
const std::vector<FileInfo>& infos);
|
113 |
+
|
114 |
+
class Impl;
|
115 |
+
|
116 |
+
protected:
|
117 |
+
std::unique_ptr<Impl> impl_;
|
118 |
+
};
|
119 |
+
|
120 |
+
class ARROW_EXPORT MockAsyncFileSystem : public MockFileSystem {
|
121 |
+
public:
|
122 |
+
explicit MockAsyncFileSystem(TimePoint current_time,
|
123 |
+
const io::IOContext& io_context = io::default_io_context())
|
124 |
+
: MockFileSystem(current_time, io_context) {
|
125 |
+
default_async_is_sync_ = false;
|
126 |
+
}
|
127 |
+
|
128 |
+
FileInfoGenerator GetFileInfoGenerator(const FileSelector& select) override;
|
129 |
+
};
|
130 |
+
|
131 |
+
} // namespace internal
|
132 |
+
} // namespace fs
|
133 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/path_util.h
ADDED
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <optional>
|
21 |
+
#include <string>
|
22 |
+
#include <string_view>
|
23 |
+
#include <utility>
|
24 |
+
#include <vector>
|
25 |
+
|
26 |
+
#include "arrow/type_fwd.h"
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
namespace fs {
|
30 |
+
namespace internal {
|
31 |
+
|
32 |
+
constexpr char kSep = '/';
|
33 |
+
|
34 |
+
// Computations on abstract paths (not local paths with system-dependent behaviour).
|
35 |
+
// Abstract paths are typically used in URIs.
|
36 |
+
|
37 |
+
// Split an abstract path into its individual components.
|
38 |
+
ARROW_EXPORT
|
39 |
+
std::vector<std::string> SplitAbstractPath(const std::string& path, char sep = kSep);
|
40 |
+
|
41 |
+
// Slice the individual components of an abstract path and combine them
|
42 |
+
//
|
43 |
+
// If offset or length are negative then an empty string is returned
|
44 |
+
// If offset is >= the number of components then an empty string is returned
|
45 |
+
// If offset + length is >= the number of components then length is truncated
|
46 |
+
ARROW_EXPORT
|
47 |
+
std::string SliceAbstractPath(const std::string& path, int offset, int length,
|
48 |
+
char sep = kSep);
|
49 |
+
|
50 |
+
// Return the extension of the file
|
51 |
+
ARROW_EXPORT std::string GetAbstractPathExtension(const std::string& s);
|
52 |
+
|
53 |
+
// Return the depth (number of components) of an abstract path
|
54 |
+
//
|
55 |
+
// Trailing slashes do not count towards depth
|
56 |
+
// Leading slashes do not count towards depth
|
57 |
+
//
|
58 |
+
// The root path ("/") has depth 0
|
59 |
+
ARROW_EXPORT int GetAbstractPathDepth(std::string_view path);
|
60 |
+
|
61 |
+
// Return the parent directory and basename of an abstract path. Both values may be
|
62 |
+
// empty.
|
63 |
+
ARROW_EXPORT
|
64 |
+
std::pair<std::string, std::string> GetAbstractPathParent(const std::string& s);
|
65 |
+
|
66 |
+
// Validate the components of an abstract path.
|
67 |
+
ARROW_EXPORT
|
68 |
+
Status ValidateAbstractPathParts(const std::vector<std::string>& parts);
|
69 |
+
|
70 |
+
// Append a non-empty stem to an abstract path.
|
71 |
+
ARROW_EXPORT
|
72 |
+
std::string ConcatAbstractPath(std::string_view base, std::string_view stem);
|
73 |
+
|
74 |
+
// Make path relative to base, if it starts with base. Otherwise error out.
|
75 |
+
ARROW_EXPORT
|
76 |
+
Result<std::string> MakeAbstractPathRelative(const std::string& base,
|
77 |
+
const std::string& path);
|
78 |
+
|
79 |
+
ARROW_EXPORT
|
80 |
+
std::string EnsureLeadingSlash(std::string_view s);
|
81 |
+
|
82 |
+
ARROW_EXPORT
|
83 |
+
std::string_view RemoveLeadingSlash(std::string_view s);
|
84 |
+
|
85 |
+
ARROW_EXPORT
|
86 |
+
std::string EnsureTrailingSlash(std::string_view s);
|
87 |
+
|
88 |
+
/// \brief remove the forward slash (if any) from the given path
|
89 |
+
/// \param s the input path
|
90 |
+
/// \param preserve_root if true, allow a path of just "/" to remain unchanged
|
91 |
+
ARROW_EXPORT
|
92 |
+
std::string_view RemoveTrailingSlash(std::string_view s, bool preserve_root = false);
|
93 |
+
|
94 |
+
ARROW_EXPORT
|
95 |
+
Status AssertNoTrailingSlash(std::string_view s);
|
96 |
+
|
97 |
+
inline bool HasTrailingSlash(std::string_view s) {
|
98 |
+
return !s.empty() && s.back() == kSep;
|
99 |
+
}
|
100 |
+
|
101 |
+
inline bool HasLeadingSlash(std::string_view s) {
|
102 |
+
return !s.empty() && s.front() == kSep;
|
103 |
+
}
|
104 |
+
|
105 |
+
ARROW_EXPORT
|
106 |
+
bool IsAncestorOf(std::string_view ancestor, std::string_view descendant);
|
107 |
+
|
108 |
+
ARROW_EXPORT
|
109 |
+
std::optional<std::string_view> RemoveAncestor(std::string_view ancestor,
|
110 |
+
std::string_view descendant);
|
111 |
+
|
112 |
+
/// Return a vector of ancestors between a base path and a descendant.
|
113 |
+
/// For example,
|
114 |
+
///
|
115 |
+
/// AncestorsFromBasePath("a/b", "a/b/c/d/e") -> ["a/b/c", "a/b/c/d"]
|
116 |
+
ARROW_EXPORT
|
117 |
+
std::vector<std::string> AncestorsFromBasePath(std::string_view base_path,
|
118 |
+
std::string_view descendant);
|
119 |
+
|
120 |
+
/// Given a vector of paths of directories which must be created, produce a the minimal
|
121 |
+
/// subset for passing to CreateDir(recursive=true) by removing redundant parent
|
122 |
+
/// directories
|
123 |
+
ARROW_EXPORT
|
124 |
+
std::vector<std::string> MinimalCreateDirSet(std::vector<std::string> dirs);
|
125 |
+
|
126 |
+
// Join the components of an abstract path.
|
127 |
+
template <class StringIt>
|
128 |
+
std::string JoinAbstractPath(StringIt it, StringIt end, char sep = kSep) {
|
129 |
+
std::string path;
|
130 |
+
for (; it != end; ++it) {
|
131 |
+
if (it->empty()) continue;
|
132 |
+
|
133 |
+
if (!path.empty()) {
|
134 |
+
path += sep;
|
135 |
+
}
|
136 |
+
path += *it;
|
137 |
+
}
|
138 |
+
return path;
|
139 |
+
}
|
140 |
+
|
141 |
+
template <class StringRange>
|
142 |
+
std::string JoinAbstractPath(const StringRange& range, char sep = kSep) {
|
143 |
+
return JoinAbstractPath(range.begin(), range.end(), sep);
|
144 |
+
}
|
145 |
+
|
146 |
+
/// Convert slashes to backslashes, on all platforms. Mostly useful for testing.
|
147 |
+
ARROW_EXPORT
|
148 |
+
std::string ToBackslashes(std::string_view s);
|
149 |
+
|
150 |
+
/// Ensure a local path is abstract, by converting backslashes to regular slashes
|
151 |
+
/// on Windows. Return the path unchanged on other systems.
|
152 |
+
ARROW_EXPORT
|
153 |
+
std::string ToSlashes(std::string_view s);
|
154 |
+
|
155 |
+
ARROW_EXPORT
|
156 |
+
bool IsEmptyPath(std::string_view s);
|
157 |
+
|
158 |
+
ARROW_EXPORT
|
159 |
+
bool IsLikelyUri(std::string_view s);
|
160 |
+
|
161 |
+
class ARROW_EXPORT Globber {
|
162 |
+
public:
|
163 |
+
~Globber();
|
164 |
+
explicit Globber(std::string pattern);
|
165 |
+
bool Matches(const std::string& path);
|
166 |
+
|
167 |
+
protected:
|
168 |
+
struct Impl;
|
169 |
+
std::unique_ptr<Impl> impl_;
|
170 |
+
};
|
171 |
+
|
172 |
+
} // namespace internal
|
173 |
+
} // namespace fs
|
174 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3_test_util.h
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
#include <string>
|
22 |
+
#include <utility>
|
23 |
+
|
24 |
+
#include <gtest/gtest.h>
|
25 |
+
|
26 |
+
#include "arrow/filesystem/s3fs.h"
|
27 |
+
#include "arrow/status.h"
|
28 |
+
#include "arrow/testing/gtest_util.h"
|
29 |
+
#include "arrow/testing/util.h"
|
30 |
+
#include "arrow/util/checked_cast.h"
|
31 |
+
#include "arrow/util/macros.h"
|
32 |
+
|
33 |
+
namespace arrow {
|
34 |
+
namespace fs {
|
35 |
+
|
36 |
+
// A minio test server, managed as a child process
|
37 |
+
|
38 |
+
class MinioTestServer {
|
39 |
+
public:
|
40 |
+
MinioTestServer();
|
41 |
+
~MinioTestServer();
|
42 |
+
|
43 |
+
Status Start();
|
44 |
+
|
45 |
+
Status Stop();
|
46 |
+
|
47 |
+
std::string connect_string() const;
|
48 |
+
|
49 |
+
std::string access_key() const;
|
50 |
+
|
51 |
+
std::string secret_key() const;
|
52 |
+
|
53 |
+
private:
|
54 |
+
struct Impl;
|
55 |
+
std::unique_ptr<Impl> impl_;
|
56 |
+
};
|
57 |
+
|
58 |
+
// A Minio "environment" that spawns Minio processes in advances, such as
|
59 |
+
// to hide process launch latencies during testing.
|
60 |
+
|
61 |
+
class MinioTestEnvironment : public ::testing::Environment {
|
62 |
+
public:
|
63 |
+
MinioTestEnvironment();
|
64 |
+
~MinioTestEnvironment();
|
65 |
+
|
66 |
+
void SetUp() override;
|
67 |
+
|
68 |
+
Result<std::shared_ptr<MinioTestServer>> GetOneServer();
|
69 |
+
|
70 |
+
protected:
|
71 |
+
struct Impl;
|
72 |
+
std::unique_ptr<Impl> impl_;
|
73 |
+
};
|
74 |
+
|
75 |
+
// A global test "environment", to ensure that the S3 API is initialized before
|
76 |
+
// running unit tests.
|
77 |
+
|
78 |
+
class S3Environment : public ::testing::Environment {
|
79 |
+
public:
|
80 |
+
// We set this environment variable to speed up tests by ensuring
|
81 |
+
// DefaultAWSCredentialsProviderChain does not query (inaccessible)
|
82 |
+
// EC2 metadata endpoint.
|
83 |
+
// This must be done before spawning any Minio child process to avoid any race
|
84 |
+
// condition accessing environment variables.
|
85 |
+
S3Environment() : ec2_metadata_disabled_guard_("AWS_EC2_METADATA_DISABLED", "true") {}
|
86 |
+
|
87 |
+
void SetUp() override {
|
88 |
+
// Change this to increase logging during tests
|
89 |
+
S3GlobalOptions options;
|
90 |
+
options.log_level = S3LogLevel::Fatal;
|
91 |
+
ASSERT_OK(InitializeS3(options));
|
92 |
+
}
|
93 |
+
|
94 |
+
void TearDown() override { ASSERT_OK(FinalizeS3()); }
|
95 |
+
|
96 |
+
private:
|
97 |
+
EnvVarGuard ec2_metadata_disabled_guard_;
|
98 |
+
};
|
99 |
+
|
100 |
+
} // namespace fs
|
101 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/s3fs.h
ADDED
@@ -0,0 +1,396 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
#include <string>
|
22 |
+
#include <vector>
|
23 |
+
|
24 |
+
#include "arrow/filesystem/filesystem.h"
|
25 |
+
#include "arrow/util/macros.h"
|
26 |
+
#include "arrow/util/uri.h"
|
27 |
+
|
28 |
+
namespace Aws {
|
29 |
+
namespace Auth {
|
30 |
+
|
31 |
+
class AWSCredentialsProvider;
|
32 |
+
class STSAssumeRoleCredentialsProvider;
|
33 |
+
|
34 |
+
} // namespace Auth
|
35 |
+
namespace STS {
|
36 |
+
class STSClient;
|
37 |
+
}
|
38 |
+
} // namespace Aws
|
39 |
+
|
40 |
+
namespace arrow {
|
41 |
+
namespace fs {
|
42 |
+
|
43 |
+
/// Options for using a proxy for S3
|
44 |
+
struct ARROW_EXPORT S3ProxyOptions {
|
45 |
+
std::string scheme;
|
46 |
+
std::string host;
|
47 |
+
int port = -1;
|
48 |
+
std::string username;
|
49 |
+
std::string password;
|
50 |
+
|
51 |
+
/// Initialize from URI such as http://username:password@host:port
|
52 |
+
/// or http://host:port
|
53 |
+
static Result<S3ProxyOptions> FromUri(const std::string& uri);
|
54 |
+
static Result<S3ProxyOptions> FromUri(const ::arrow::internal::Uri& uri);
|
55 |
+
|
56 |
+
bool Equals(const S3ProxyOptions& other) const;
|
57 |
+
};
|
58 |
+
|
59 |
+
enum class S3CredentialsKind : int8_t {
|
60 |
+
/// Anonymous access (no credentials used)
|
61 |
+
Anonymous,
|
62 |
+
/// Use default AWS credentials, configured through environment variables
|
63 |
+
Default,
|
64 |
+
/// Use explicitly-provided access key pair
|
65 |
+
Explicit,
|
66 |
+
/// Assume role through a role ARN
|
67 |
+
Role,
|
68 |
+
/// Use web identity token to assume role, configured through environment variables
|
69 |
+
WebIdentity
|
70 |
+
};
|
71 |
+
|
72 |
+
/// Pure virtual class for describing custom S3 retry strategies
|
73 |
+
class ARROW_EXPORT S3RetryStrategy {
|
74 |
+
public:
|
75 |
+
virtual ~S3RetryStrategy() = default;
|
76 |
+
|
77 |
+
/// Simple struct where each field corresponds to a field in Aws::Client::AWSError
|
78 |
+
struct AWSErrorDetail {
|
79 |
+
/// Corresponds to AWSError::GetErrorType()
|
80 |
+
int error_type;
|
81 |
+
/// Corresponds to AWSError::GetMessage()
|
82 |
+
std::string message;
|
83 |
+
/// Corresponds to AWSError::GetExceptionName()
|
84 |
+
std::string exception_name;
|
85 |
+
/// Corresponds to AWSError::ShouldRetry()
|
86 |
+
bool should_retry;
|
87 |
+
};
|
88 |
+
/// Returns true if the S3 request resulting in the provided error should be retried.
|
89 |
+
virtual bool ShouldRetry(const AWSErrorDetail& error, int64_t attempted_retries) = 0;
|
90 |
+
/// Returns the time in milliseconds the S3 client should sleep for until retrying.
|
91 |
+
virtual int64_t CalculateDelayBeforeNextRetry(const AWSErrorDetail& error,
|
92 |
+
int64_t attempted_retries) = 0;
|
93 |
+
/// Returns a stock AWS Default retry strategy.
|
94 |
+
static std::shared_ptr<S3RetryStrategy> GetAwsDefaultRetryStrategy(
|
95 |
+
int64_t max_attempts);
|
96 |
+
/// Returns a stock AWS Standard retry strategy.
|
97 |
+
static std::shared_ptr<S3RetryStrategy> GetAwsStandardRetryStrategy(
|
98 |
+
int64_t max_attempts);
|
99 |
+
};
|
100 |
+
|
101 |
+
/// Options for the S3FileSystem implementation.
|
102 |
+
struct ARROW_EXPORT S3Options {
|
103 |
+
/// \brief AWS region to connect to.
|
104 |
+
///
|
105 |
+
/// If unset, the AWS SDK will choose a default value. The exact algorithm
|
106 |
+
/// depends on the SDK version. Before 1.8, the default is hardcoded
|
107 |
+
/// to "us-east-1". Since 1.8, several heuristics are used to determine
|
108 |
+
/// the region (environment variables, configuration profile, EC2 metadata
|
109 |
+
/// server).
|
110 |
+
std::string region;
|
111 |
+
|
112 |
+
/// \brief Socket connection timeout, in seconds
|
113 |
+
///
|
114 |
+
/// If negative, the AWS SDK default value is used (typically 1 second).
|
115 |
+
double connect_timeout = -1;
|
116 |
+
|
117 |
+
/// \brief Socket read timeout on Windows and macOS, in seconds
|
118 |
+
///
|
119 |
+
/// If negative, the AWS SDK default value is used (typically 3 seconds).
|
120 |
+
/// This option is ignored on non-Windows, non-macOS systems.
|
121 |
+
double request_timeout = -1;
|
122 |
+
|
123 |
+
/// If non-empty, override region with a connect string such as "localhost:9000"
|
124 |
+
// XXX perhaps instead take a URL like "http://localhost:9000"?
|
125 |
+
std::string endpoint_override;
|
126 |
+
/// S3 connection transport, default "https"
|
127 |
+
std::string scheme = "https";
|
128 |
+
|
129 |
+
/// ARN of role to assume
|
130 |
+
std::string role_arn;
|
131 |
+
/// Optional identifier for an assumed role session.
|
132 |
+
std::string session_name;
|
133 |
+
/// Optional external identifier to pass to STS when assuming a role
|
134 |
+
std::string external_id;
|
135 |
+
/// Frequency (in seconds) to refresh temporary credentials from assumed role
|
136 |
+
int load_frequency = 900;
|
137 |
+
|
138 |
+
/// If connection is through a proxy, set options here
|
139 |
+
S3ProxyOptions proxy_options;
|
140 |
+
|
141 |
+
/// AWS credentials provider
|
142 |
+
std::shared_ptr<Aws::Auth::AWSCredentialsProvider> credentials_provider;
|
143 |
+
|
144 |
+
/// Type of credentials being used. Set along with credentials_provider.
|
145 |
+
S3CredentialsKind credentials_kind = S3CredentialsKind::Default;
|
146 |
+
|
147 |
+
/// Whether to use virtual addressing of buckets
|
148 |
+
///
|
149 |
+
/// If true, then virtual addressing is always enabled.
|
150 |
+
/// If false, then virtual addressing is only enabled if `endpoint_override` is empty.
|
151 |
+
///
|
152 |
+
/// This can be used for non-AWS backends that only support virtual hosted-style access.
|
153 |
+
bool force_virtual_addressing = false;
|
154 |
+
|
155 |
+
/// Whether OutputStream writes will be issued in the background, without blocking.
|
156 |
+
bool background_writes = true;
|
157 |
+
|
158 |
+
/// Whether to allow creation of buckets
|
159 |
+
///
|
160 |
+
/// When S3FileSystem creates new buckets, it does not pass any non-default settings.
|
161 |
+
/// In AWS S3, the bucket and all objects will be not publicly visible, and there
|
162 |
+
/// will be no bucket policies and no resource tags. To have more control over how
|
163 |
+
/// buckets are created, use a different API to create them.
|
164 |
+
bool allow_bucket_creation = false;
|
165 |
+
|
166 |
+
/// Whether to allow deletion of buckets
|
167 |
+
bool allow_bucket_deletion = false;
|
168 |
+
|
169 |
+
/// \brief Default metadata for OpenOutputStream.
|
170 |
+
///
|
171 |
+
/// This will be ignored if non-empty metadata is passed to OpenOutputStream.
|
172 |
+
std::shared_ptr<const KeyValueMetadata> default_metadata;
|
173 |
+
|
174 |
+
/// Optional retry strategy to determine which error types should be retried, and the
|
175 |
+
/// delay between retries.
|
176 |
+
std::shared_ptr<S3RetryStrategy> retry_strategy;
|
177 |
+
|
178 |
+
S3Options();
|
179 |
+
|
180 |
+
/// Configure with the default AWS credentials provider chain.
|
181 |
+
void ConfigureDefaultCredentials();
|
182 |
+
|
183 |
+
/// Configure with anonymous credentials. This will only let you access public buckets.
|
184 |
+
void ConfigureAnonymousCredentials();
|
185 |
+
|
186 |
+
/// Configure with explicit access and secret key.
|
187 |
+
void ConfigureAccessKey(const std::string& access_key, const std::string& secret_key,
|
188 |
+
const std::string& session_token = "");
|
189 |
+
|
190 |
+
/// Configure with credentials from an assumed role.
|
191 |
+
void ConfigureAssumeRoleCredentials(
|
192 |
+
const std::string& role_arn, const std::string& session_name = "",
|
193 |
+
const std::string& external_id = "", int load_frequency = 900,
|
194 |
+
const std::shared_ptr<Aws::STS::STSClient>& stsClient = NULLPTR);
|
195 |
+
|
196 |
+
/// Configure with credentials from role assumed using a web identity token
|
197 |
+
void ConfigureAssumeRoleWithWebIdentityCredentials();
|
198 |
+
|
199 |
+
std::string GetAccessKey() const;
|
200 |
+
std::string GetSecretKey() const;
|
201 |
+
std::string GetSessionToken() const;
|
202 |
+
|
203 |
+
bool Equals(const S3Options& other) const;
|
204 |
+
|
205 |
+
/// \brief Initialize with default credentials provider chain
|
206 |
+
///
|
207 |
+
/// This is recommended if you use the standard AWS environment variables
|
208 |
+
/// and/or configuration file.
|
209 |
+
static S3Options Defaults();
|
210 |
+
|
211 |
+
/// \brief Initialize with anonymous credentials.
|
212 |
+
///
|
213 |
+
/// This will only let you access public buckets.
|
214 |
+
static S3Options Anonymous();
|
215 |
+
|
216 |
+
/// \brief Initialize with explicit access and secret key.
|
217 |
+
///
|
218 |
+
/// Optionally, a session token may also be provided for temporary credentials
|
219 |
+
/// (from STS).
|
220 |
+
static S3Options FromAccessKey(const std::string& access_key,
|
221 |
+
const std::string& secret_key,
|
222 |
+
const std::string& session_token = "");
|
223 |
+
|
224 |
+
/// \brief Initialize from an assumed role.
|
225 |
+
static S3Options FromAssumeRole(
|
226 |
+
const std::string& role_arn, const std::string& session_name = "",
|
227 |
+
const std::string& external_id = "", int load_frequency = 900,
|
228 |
+
const std::shared_ptr<Aws::STS::STSClient>& stsClient = NULLPTR);
|
229 |
+
|
230 |
+
/// \brief Initialize from an assumed role with web-identity.
|
231 |
+
/// Uses the AWS SDK which uses environment variables to
|
232 |
+
/// generate temporary credentials.
|
233 |
+
static S3Options FromAssumeRoleWithWebIdentity();
|
234 |
+
|
235 |
+
static Result<S3Options> FromUri(const ::arrow::internal::Uri& uri,
|
236 |
+
std::string* out_path = NULLPTR);
|
237 |
+
static Result<S3Options> FromUri(const std::string& uri,
|
238 |
+
std::string* out_path = NULLPTR);
|
239 |
+
};
|
240 |
+
|
241 |
+
/// S3-backed FileSystem implementation.
|
242 |
+
///
|
243 |
+
/// Some implementation notes:
|
244 |
+
/// - buckets are special and the operations available on them may be limited
|
245 |
+
/// or more expensive than desired.
|
246 |
+
class ARROW_EXPORT S3FileSystem : public FileSystem {
|
247 |
+
public:
|
248 |
+
~S3FileSystem() override;
|
249 |
+
|
250 |
+
std::string type_name() const override { return "s3"; }
|
251 |
+
|
252 |
+
/// Return the original S3 options when constructing the filesystem
|
253 |
+
S3Options options() const;
|
254 |
+
/// Return the actual region this filesystem connects to
|
255 |
+
std::string region() const;
|
256 |
+
|
257 |
+
bool Equals(const FileSystem& other) const override;
|
258 |
+
Result<std::string> PathFromUri(const std::string& uri_string) const override;
|
259 |
+
|
260 |
+
/// \cond FALSE
|
261 |
+
using FileSystem::GetFileInfo;
|
262 |
+
/// \endcond
|
263 |
+
Result<FileInfo> GetFileInfo(const std::string& path) override;
|
264 |
+
Result<std::vector<FileInfo>> GetFileInfo(const FileSelector& select) override;
|
265 |
+
|
266 |
+
FileInfoGenerator GetFileInfoGenerator(const FileSelector& select) override;
|
267 |
+
|
268 |
+
Status CreateDir(const std::string& path, bool recursive = true) override;
|
269 |
+
|
270 |
+
Status DeleteDir(const std::string& path) override;
|
271 |
+
Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override;
|
272 |
+
Future<> DeleteDirContentsAsync(const std::string& path,
|
273 |
+
bool missing_dir_ok = false) override;
|
274 |
+
Status DeleteRootDirContents() override;
|
275 |
+
|
276 |
+
Status DeleteFile(const std::string& path) override;
|
277 |
+
|
278 |
+
Status Move(const std::string& src, const std::string& dest) override;
|
279 |
+
|
280 |
+
Status CopyFile(const std::string& src, const std::string& dest) override;
|
281 |
+
|
282 |
+
/// Create a sequential input stream for reading from a S3 object.
|
283 |
+
///
|
284 |
+
/// NOTE: Reads from the stream will be synchronous and unbuffered.
|
285 |
+
/// You way want to wrap the stream in a BufferedInputStream or use
|
286 |
+
/// a custom readahead strategy to avoid idle waits.
|
287 |
+
Result<std::shared_ptr<io::InputStream>> OpenInputStream(
|
288 |
+
const std::string& path) override;
|
289 |
+
/// Create a sequential input stream for reading from a S3 object.
|
290 |
+
///
|
291 |
+
/// This override avoids a HEAD request by assuming the FileInfo
|
292 |
+
/// contains correct information.
|
293 |
+
Result<std::shared_ptr<io::InputStream>> OpenInputStream(const FileInfo& info) override;
|
294 |
+
|
295 |
+
/// Create a random access file for reading from a S3 object.
|
296 |
+
///
|
297 |
+
/// See OpenInputStream for performance notes.
|
298 |
+
Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
|
299 |
+
const std::string& path) override;
|
300 |
+
/// Create a random access file for reading from a S3 object.
|
301 |
+
///
|
302 |
+
/// This override avoids a HEAD request by assuming the FileInfo
|
303 |
+
/// contains correct information.
|
304 |
+
Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
|
305 |
+
const FileInfo& info) override;
|
306 |
+
|
307 |
+
/// Create a sequential output stream for writing to a S3 object.
|
308 |
+
///
|
309 |
+
/// NOTE: Writes to the stream will be buffered. Depending on
|
310 |
+
/// S3Options.background_writes, they can be synchronous or not.
|
311 |
+
/// It is recommended to enable background_writes unless you prefer
|
312 |
+
/// implementing your own background execution strategy.
|
313 |
+
Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(
|
314 |
+
const std::string& path,
|
315 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
|
316 |
+
|
317 |
+
Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(
|
318 |
+
const std::string& path,
|
319 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
|
320 |
+
|
321 |
+
/// Create a S3FileSystem instance from the given options.
|
322 |
+
static Result<std::shared_ptr<S3FileSystem>> Make(
|
323 |
+
const S3Options& options, const io::IOContext& = io::default_io_context());
|
324 |
+
|
325 |
+
protected:
|
326 |
+
explicit S3FileSystem(const S3Options& options, const io::IOContext&);
|
327 |
+
|
328 |
+
class Impl;
|
329 |
+
std::shared_ptr<Impl> impl_;
|
330 |
+
};
|
331 |
+
|
332 |
+
enum class S3LogLevel : int8_t { Off, Fatal, Error, Warn, Info, Debug, Trace };
|
333 |
+
|
334 |
+
struct ARROW_EXPORT S3GlobalOptions {
|
335 |
+
S3LogLevel log_level;
|
336 |
+
/// The number of threads to configure when creating AWS' I/O event loop
|
337 |
+
///
|
338 |
+
/// Defaults to 1 as recommended by AWS' doc when the # of connections is
|
339 |
+
/// expected to be, at most, in the hundreds
|
340 |
+
///
|
341 |
+
/// For more details see Aws::Crt::Io::EventLoopGroup
|
342 |
+
int num_event_loop_threads = 1;
|
343 |
+
|
344 |
+
/// \brief Initialize with default options
|
345 |
+
///
|
346 |
+
/// For log_level, this method first tries to extract a suitable value from the
|
347 |
+
/// environment variable ARROW_S3_LOG_LEVEL.
|
348 |
+
static S3GlobalOptions Defaults();
|
349 |
+
};
|
350 |
+
|
351 |
+
/// \brief Initialize the S3 APIs with the specified set of options.
|
352 |
+
///
|
353 |
+
/// It is required to call this function at least once before using S3FileSystem.
|
354 |
+
///
|
355 |
+
/// Once this function is called you MUST call FinalizeS3 before the end of the
|
356 |
+
/// application in order to avoid a segmentation fault at shutdown.
|
357 |
+
ARROW_EXPORT
|
358 |
+
Status InitializeS3(const S3GlobalOptions& options);
|
359 |
+
|
360 |
+
/// \brief Ensure the S3 APIs are initialized, but only if not already done.
|
361 |
+
///
|
362 |
+
/// If necessary, this will call InitializeS3() with some default options.
|
363 |
+
ARROW_EXPORT
|
364 |
+
Status EnsureS3Initialized();
|
365 |
+
|
366 |
+
/// Whether S3 was initialized, and not finalized.
|
367 |
+
ARROW_EXPORT
|
368 |
+
bool IsS3Initialized();
|
369 |
+
|
370 |
+
/// Whether S3 was finalized.
|
371 |
+
ARROW_EXPORT
|
372 |
+
bool IsS3Finalized();
|
373 |
+
|
374 |
+
/// \brief Shutdown the S3 APIs.
|
375 |
+
///
|
376 |
+
/// This can wait for some S3 concurrent calls to finish so as to avoid
|
377 |
+
/// race conditions.
|
378 |
+
/// After this function has been called, all S3 calls will fail with an error.
|
379 |
+
///
|
380 |
+
/// Calls to InitializeS3() and FinalizeS3() should be serialized by the
|
381 |
+
/// application (this also applies to EnsureS3Initialized() and
|
382 |
+
/// EnsureS3Finalized()).
|
383 |
+
ARROW_EXPORT
|
384 |
+
Status FinalizeS3();
|
385 |
+
|
386 |
+
/// \brief Ensure the S3 APIs are shutdown, but only if not already done.
|
387 |
+
///
|
388 |
+
/// If necessary, this will call FinalizeS3().
|
389 |
+
ARROW_EXPORT
|
390 |
+
Status EnsureS3Finalized();
|
391 |
+
|
392 |
+
ARROW_EXPORT
|
393 |
+
Result<std::string> ResolveS3BucketRegion(const std::string& bucket);
|
394 |
+
|
395 |
+
} // namespace fs
|
396 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/test_util.h
ADDED
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <chrono>
|
21 |
+
#include <memory>
|
22 |
+
#include <string>
|
23 |
+
#include <vector>
|
24 |
+
|
25 |
+
#include "arrow/filesystem/filesystem.h"
|
26 |
+
#include "arrow/filesystem/mockfs.h"
|
27 |
+
#include "arrow/testing/visibility.h"
|
28 |
+
#include "arrow/util/counting_semaphore.h"
|
29 |
+
|
30 |
+
namespace arrow {
|
31 |
+
namespace fs {
|
32 |
+
|
33 |
+
static constexpr double kTimeSlack = 2.0; // In seconds
|
34 |
+
|
35 |
+
static inline FileInfo File(std::string path) {
|
36 |
+
return FileInfo(std::move(path), FileType::File);
|
37 |
+
}
|
38 |
+
|
39 |
+
static inline FileInfo Dir(std::string path) {
|
40 |
+
return FileInfo(std::move(path), FileType::Directory);
|
41 |
+
}
|
42 |
+
|
43 |
+
// A subclass of MockFileSystem that blocks operations until an unlock method is
|
44 |
+
// called.
|
45 |
+
//
|
46 |
+
// This is intended for testing fine-grained ordering of filesystem operations.
|
47 |
+
//
|
48 |
+
// N.B. Only OpenOutputStream supports gating at the moment but this is simply because
|
49 |
+
// it is all that has been needed so far. Feel free to add support for more methods
|
50 |
+
// as required.
|
51 |
+
class ARROW_TESTING_EXPORT GatedMockFilesystem : public internal::MockFileSystem {
|
52 |
+
public:
|
53 |
+
GatedMockFilesystem(TimePoint current_time,
|
54 |
+
const io::IOContext& = io::default_io_context());
|
55 |
+
~GatedMockFilesystem() override;
|
56 |
+
|
57 |
+
Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(
|
58 |
+
const std::string& path,
|
59 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata = {}) override;
|
60 |
+
|
61 |
+
// Wait until at least num_waiters are waiting on OpenOutputStream
|
62 |
+
Status WaitForOpenOutputStream(uint32_t num_waiters);
|
63 |
+
// Unlock `num_waiters` individual calls to OpenOutputStream
|
64 |
+
Status UnlockOpenOutputStream(uint32_t num_waiters);
|
65 |
+
|
66 |
+
private:
|
67 |
+
util::CountingSemaphore open_output_sem_;
|
68 |
+
};
|
69 |
+
|
70 |
+
ARROW_TESTING_EXPORT
|
71 |
+
void CreateFile(FileSystem* fs, const std::string& path, const std::string& data);
|
72 |
+
|
73 |
+
// Sort a vector of FileInfo by lexicographic path order
|
74 |
+
ARROW_TESTING_EXPORT
|
75 |
+
void SortInfos(FileInfoVector* infos);
|
76 |
+
|
77 |
+
// Create a copy of a FileInfo vector sorted by lexicographic path order
|
78 |
+
ARROW_TESTING_EXPORT
|
79 |
+
FileInfoVector SortedInfos(const FileInfoVector& infos);
|
80 |
+
|
81 |
+
ARROW_TESTING_EXPORT
|
82 |
+
void CollectFileInfoGenerator(FileInfoGenerator gen, FileInfoVector* out_infos);
|
83 |
+
|
84 |
+
ARROW_TESTING_EXPORT
|
85 |
+
void AssertFileInfo(const FileInfo& info, const std::string& path, FileType type);
|
86 |
+
|
87 |
+
ARROW_TESTING_EXPORT
|
88 |
+
void AssertFileInfo(const FileInfo& info, const std::string& path, FileType type,
|
89 |
+
TimePoint mtime);
|
90 |
+
|
91 |
+
ARROW_TESTING_EXPORT
|
92 |
+
void AssertFileInfo(const FileInfo& info, const std::string& path, FileType type,
|
93 |
+
TimePoint mtime, int64_t size);
|
94 |
+
|
95 |
+
ARROW_TESTING_EXPORT
|
96 |
+
void AssertFileInfo(const FileInfo& info, const std::string& path, FileType type,
|
97 |
+
int64_t size);
|
98 |
+
|
99 |
+
ARROW_TESTING_EXPORT
|
100 |
+
void AssertFileInfo(FileSystem* fs, const std::string& path, FileType type);
|
101 |
+
|
102 |
+
ARROW_TESTING_EXPORT
|
103 |
+
void AssertFileInfo(FileSystem* fs, const std::string& path, FileType type,
|
104 |
+
TimePoint mtime);
|
105 |
+
|
106 |
+
ARROW_TESTING_EXPORT
|
107 |
+
void AssertFileInfo(FileSystem* fs, const std::string& path, FileType type,
|
108 |
+
TimePoint mtime, int64_t size);
|
109 |
+
|
110 |
+
ARROW_TESTING_EXPORT
|
111 |
+
void AssertFileInfo(FileSystem* fs, const std::string& path, FileType type, int64_t size);
|
112 |
+
|
113 |
+
ARROW_TESTING_EXPORT
|
114 |
+
void AssertFileContents(FileSystem* fs, const std::string& path,
|
115 |
+
const std::string& expected_data);
|
116 |
+
|
117 |
+
template <typename Duration>
|
118 |
+
void AssertDurationBetween(Duration d, double min_secs, double max_secs) {
|
119 |
+
auto seconds = std::chrono::duration_cast<std::chrono::duration<double>>(d);
|
120 |
+
ASSERT_GE(seconds.count(), min_secs);
|
121 |
+
ASSERT_LE(seconds.count(), max_secs);
|
122 |
+
}
|
123 |
+
|
124 |
+
// Generic tests for FileSystem implementations.
|
125 |
+
// To use this class, subclass both from it and ::testing::Test,
|
126 |
+
// implement GetEmptyFileSystem(), and use GENERIC_FS_TEST_FUNCTIONS()
|
127 |
+
// to define the various tests.
|
128 |
+
class ARROW_TESTING_EXPORT GenericFileSystemTest {
|
129 |
+
public:
|
130 |
+
virtual ~GenericFileSystemTest();
|
131 |
+
|
132 |
+
void TestEmpty();
|
133 |
+
void TestNormalizePath();
|
134 |
+
void TestCreateDir();
|
135 |
+
void TestDeleteDir();
|
136 |
+
void TestDeleteDirContents();
|
137 |
+
void TestDeleteRootDirContents();
|
138 |
+
void TestDeleteFile();
|
139 |
+
void TestDeleteFiles();
|
140 |
+
void TestMoveFile();
|
141 |
+
void TestMoveDir();
|
142 |
+
void TestCopyFile();
|
143 |
+
void TestGetFileInfo();
|
144 |
+
void TestGetFileInfoVector();
|
145 |
+
void TestGetFileInfoSelector();
|
146 |
+
void TestGetFileInfoSelectorWithRecursion();
|
147 |
+
void TestGetFileInfoAsync();
|
148 |
+
void TestGetFileInfoGenerator();
|
149 |
+
void TestOpenOutputStream();
|
150 |
+
void TestOpenAppendStream();
|
151 |
+
void TestOpenInputStream();
|
152 |
+
void TestOpenInputStreamWithFileInfo();
|
153 |
+
void TestOpenInputStreamAsync();
|
154 |
+
void TestOpenInputFile();
|
155 |
+
void TestOpenInputFileWithFileInfo();
|
156 |
+
void TestOpenInputFileAsync();
|
157 |
+
void TestSpecialChars();
|
158 |
+
|
159 |
+
protected:
|
160 |
+
// This function should return the filesystem under test.
|
161 |
+
virtual std::shared_ptr<FileSystem> GetEmptyFileSystem() = 0;
|
162 |
+
|
163 |
+
// Override the following functions to specify deviations from expected
|
164 |
+
// filesystem semantics.
|
165 |
+
// - Whether the filesystem may "implicitly" create intermediate directories
|
166 |
+
virtual bool have_implicit_directories() const { return false; }
|
167 |
+
// - Whether the filesystem may allow writing a file "over" a directory
|
168 |
+
virtual bool allow_write_file_over_dir() const { return false; }
|
169 |
+
// - Whether the filesystem allows reading a directory
|
170 |
+
virtual bool allow_read_dir_as_file() const { return false; }
|
171 |
+
// - Whether the filesystem allows moving a directory
|
172 |
+
virtual bool allow_move_dir() const { return true; }
|
173 |
+
// - Whether the filesystem allows moving a directory "over" a non-empty destination
|
174 |
+
virtual bool allow_move_dir_over_non_empty_dir() const { return false; }
|
175 |
+
// - Whether the filesystem allows appending to a file
|
176 |
+
virtual bool allow_append_to_file() const { return true; }
|
177 |
+
// - Whether the filesystem allows appending to a nonexistent file
|
178 |
+
virtual bool allow_append_to_new_file() const { return true; }
|
179 |
+
// - Whether the filesystem supports directory modification times
|
180 |
+
virtual bool have_directory_mtimes() const { return true; }
|
181 |
+
// - Whether some directory tree deletion tests may fail randomly
|
182 |
+
virtual bool have_flaky_directory_tree_deletion() const { return false; }
|
183 |
+
// - Whether the filesystem stores some metadata alongside files
|
184 |
+
virtual bool have_file_metadata() const { return false; }
|
185 |
+
|
186 |
+
void TestEmpty(FileSystem* fs);
|
187 |
+
void TestNormalizePath(FileSystem* fs);
|
188 |
+
void TestCreateDir(FileSystem* fs);
|
189 |
+
void TestDeleteDir(FileSystem* fs);
|
190 |
+
void TestDeleteDirContents(FileSystem* fs);
|
191 |
+
void TestDeleteRootDirContents(FileSystem* fs);
|
192 |
+
void TestDeleteFile(FileSystem* fs);
|
193 |
+
void TestDeleteFiles(FileSystem* fs);
|
194 |
+
void TestMoveFile(FileSystem* fs);
|
195 |
+
void TestMoveDir(FileSystem* fs);
|
196 |
+
void TestCopyFile(FileSystem* fs);
|
197 |
+
void TestGetFileInfo(FileSystem* fs);
|
198 |
+
void TestGetFileInfoVector(FileSystem* fs);
|
199 |
+
void TestGetFileInfoSelector(FileSystem* fs);
|
200 |
+
void TestGetFileInfoSelectorWithRecursion(FileSystem* fs);
|
201 |
+
void TestGetFileInfoAsync(FileSystem* fs);
|
202 |
+
void TestGetFileInfoGenerator(FileSystem* fs);
|
203 |
+
void TestOpenOutputStream(FileSystem* fs);
|
204 |
+
void TestOpenAppendStream(FileSystem* fs);
|
205 |
+
void TestOpenInputStream(FileSystem* fs);
|
206 |
+
void TestOpenInputStreamWithFileInfo(FileSystem* fs);
|
207 |
+
void TestOpenInputStreamAsync(FileSystem* fs);
|
208 |
+
void TestOpenInputFile(FileSystem* fs);
|
209 |
+
void TestOpenInputFileWithFileInfo(FileSystem* fs);
|
210 |
+
void TestOpenInputFileAsync(FileSystem* fs);
|
211 |
+
void TestSpecialChars(FileSystem* fs);
|
212 |
+
};
|
213 |
+
|
214 |
+
#define GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, NAME) \
|
215 |
+
TEST_MACRO(TEST_CLASS, NAME) { this->Test##NAME(); }
|
216 |
+
|
217 |
+
#define GENERIC_FS_TEST_FUNCTIONS_MACROS(TEST_MACRO, TEST_CLASS) \
|
218 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, Empty) \
|
219 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, NormalizePath) \
|
220 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, CreateDir) \
|
221 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, DeleteDir) \
|
222 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, DeleteDirContents) \
|
223 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, DeleteRootDirContents) \
|
224 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, DeleteFile) \
|
225 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, DeleteFiles) \
|
226 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, MoveFile) \
|
227 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, MoveDir) \
|
228 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, CopyFile) \
|
229 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfo) \
|
230 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfoVector) \
|
231 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfoSelector) \
|
232 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfoSelectorWithRecursion) \
|
233 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfoAsync) \
|
234 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, GetFileInfoGenerator) \
|
235 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenOutputStream) \
|
236 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenAppendStream) \
|
237 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputStream) \
|
238 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputStreamWithFileInfo) \
|
239 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputStreamAsync) \
|
240 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputFile) \
|
241 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputFileWithFileInfo) \
|
242 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, OpenInputFileAsync) \
|
243 |
+
GENERIC_FS_TEST_FUNCTION(TEST_MACRO, TEST_CLASS, SpecialChars)
|
244 |
+
|
245 |
+
#define GENERIC_FS_TEST_FUNCTIONS(TEST_CLASS) \
|
246 |
+
GENERIC_FS_TEST_FUNCTIONS_MACROS(TEST_F, TEST_CLASS)
|
247 |
+
|
248 |
+
#define GENERIC_FS_TYPED_TEST_FUNCTIONS(TEST_CLASS) \
|
249 |
+
GENERIC_FS_TEST_FUNCTIONS_MACROS(TYPED_TEST, TEST_CLASS)
|
250 |
+
|
251 |
+
} // namespace fs
|
252 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/type_fwd.h
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
|
22 |
+
namespace arrow {
|
23 |
+
namespace fs {
|
24 |
+
|
25 |
+
/// \brief FileSystem entry type
|
26 |
+
enum class FileType : int8_t {
|
27 |
+
/// Entry is not found
|
28 |
+
NotFound,
|
29 |
+
/// Entry exists but its type is unknown
|
30 |
+
///
|
31 |
+
/// This can designate a special file such as a Unix socket or character
|
32 |
+
/// device, or Windows NUL / CON / ...
|
33 |
+
Unknown,
|
34 |
+
/// Entry is a regular file
|
35 |
+
File,
|
36 |
+
/// Entry is a directory
|
37 |
+
Directory
|
38 |
+
};
|
39 |
+
|
40 |
+
struct FileInfo;
|
41 |
+
|
42 |
+
struct FileSelector;
|
43 |
+
|
44 |
+
class FileSystem;
|
45 |
+
class SubTreeFileSystem;
|
46 |
+
class SlowFileSystem;
|
47 |
+
class LocalFileSystem;
|
48 |
+
class S3FileSystem;
|
49 |
+
class GcsFileSystem;
|
50 |
+
|
51 |
+
} // namespace fs
|
52 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/feather.h
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// Public API for the "Feather" file format, originally created at
|
19 |
+
// http://github.com/wesm/feather
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
#include <cstdint>
|
24 |
+
#include <memory>
|
25 |
+
#include <string>
|
26 |
+
#include <vector>
|
27 |
+
|
28 |
+
#include "arrow/ipc/options.h"
|
29 |
+
#include "arrow/type_fwd.h"
|
30 |
+
#include "arrow/util/compression.h"
|
31 |
+
#include "arrow/util/visibility.h"
|
32 |
+
|
33 |
+
namespace arrow {
|
34 |
+
|
35 |
+
class Schema;
|
36 |
+
class Status;
|
37 |
+
class Table;
|
38 |
+
|
39 |
+
namespace io {
|
40 |
+
|
41 |
+
class OutputStream;
|
42 |
+
class RandomAccessFile;
|
43 |
+
|
44 |
+
} // namespace io
|
45 |
+
|
46 |
+
namespace ipc {
|
47 |
+
namespace feather {
|
48 |
+
|
49 |
+
static constexpr const int kFeatherV1Version = 2;
|
50 |
+
static constexpr const int kFeatherV2Version = 3;
|
51 |
+
|
52 |
+
// ----------------------------------------------------------------------
|
53 |
+
// Metadata accessor classes
|
54 |
+
|
55 |
+
/// \class Reader
|
56 |
+
/// \brief An interface for reading columns from Feather files
|
57 |
+
class ARROW_EXPORT Reader {
|
58 |
+
public:
|
59 |
+
virtual ~Reader() = default;
|
60 |
+
|
61 |
+
/// \brief Open a Feather file from a RandomAccessFile interface
|
62 |
+
///
|
63 |
+
/// \param[in] source a RandomAccessFile instance
|
64 |
+
/// \return the table reader
|
65 |
+
static Result<std::shared_ptr<Reader>> Open(
|
66 |
+
const std::shared_ptr<io::RandomAccessFile>& source);
|
67 |
+
|
68 |
+
/// \brief Open a Feather file from a RandomAccessFile interface
|
69 |
+
/// with IPC Read options
|
70 |
+
///
|
71 |
+
/// \param[in] source a RandomAccessFile instance
|
72 |
+
/// \param[in] options IPC Read options
|
73 |
+
/// \return the table reader
|
74 |
+
static Result<std::shared_ptr<Reader>> Open(
|
75 |
+
const std::shared_ptr<io::RandomAccessFile>& source, const IpcReadOptions& options);
|
76 |
+
|
77 |
+
/// \brief Return the version number of the Feather file
|
78 |
+
virtual int version() const = 0;
|
79 |
+
|
80 |
+
virtual std::shared_ptr<Schema> schema() const = 0;
|
81 |
+
|
82 |
+
/// \brief Read all columns from the file as an arrow::Table.
|
83 |
+
///
|
84 |
+
/// \param[out] out the returned table
|
85 |
+
/// \return Status
|
86 |
+
///
|
87 |
+
/// This function is zero-copy if the file source supports zero-copy reads
|
88 |
+
virtual Status Read(std::shared_ptr<Table>* out) = 0;
|
89 |
+
|
90 |
+
/// \brief Read only the specified columns from the file as an arrow::Table.
|
91 |
+
///
|
92 |
+
/// \param[in] indices the column indices to read
|
93 |
+
/// \param[out] out the returned table
|
94 |
+
/// \return Status
|
95 |
+
///
|
96 |
+
/// This function is zero-copy if the file source supports zero-copy reads
|
97 |
+
virtual Status Read(const std::vector<int>& indices, std::shared_ptr<Table>* out) = 0;
|
98 |
+
|
99 |
+
/// \brief Read only the specified columns from the file as an arrow::Table.
|
100 |
+
///
|
101 |
+
/// \param[in] names the column names to read
|
102 |
+
/// \param[out] out the returned table
|
103 |
+
/// \return Status
|
104 |
+
///
|
105 |
+
/// This function is zero-copy if the file source supports zero-copy reads
|
106 |
+
virtual Status Read(const std::vector<std::string>& names,
|
107 |
+
std::shared_ptr<Table>* out) = 0;
|
108 |
+
};
|
109 |
+
|
110 |
+
struct ARROW_EXPORT WriteProperties {
|
111 |
+
static WriteProperties Defaults();
|
112 |
+
|
113 |
+
static WriteProperties DefaultsV1() {
|
114 |
+
WriteProperties props = Defaults();
|
115 |
+
props.version = kFeatherV1Version;
|
116 |
+
return props;
|
117 |
+
}
|
118 |
+
|
119 |
+
/// Feather file version number
|
120 |
+
///
|
121 |
+
/// version 2: "Feather V1" Apache Arrow <= 0.16.0
|
122 |
+
/// version 3: "Feather V2" Apache Arrow > 0.16.0
|
123 |
+
int version = kFeatherV2Version;
|
124 |
+
|
125 |
+
// Parameters for Feather V2 only
|
126 |
+
|
127 |
+
/// Number of rows per intra-file chunk. Use smaller chunksize when you need
|
128 |
+
/// faster random row access
|
129 |
+
int64_t chunksize = 1LL << 16;
|
130 |
+
|
131 |
+
/// Compression type to use. Only UNCOMPRESSED, LZ4_FRAME, and ZSTD are
|
132 |
+
/// supported. The default compression returned by Defaults() is LZ4 if the
|
133 |
+
/// project is built with support for it, otherwise
|
134 |
+
/// UNCOMPRESSED. UNCOMPRESSED is set as the object default here so that if
|
135 |
+
/// WriteProperties::Defaults() is not used, the default constructor for
|
136 |
+
/// WriteProperties will work regardless of the options used to build the C++
|
137 |
+
/// project.
|
138 |
+
Compression::type compression = Compression::UNCOMPRESSED;
|
139 |
+
|
140 |
+
/// Compressor-specific compression level
|
141 |
+
int compression_level = ::arrow::util::kUseDefaultCompressionLevel;
|
142 |
+
};
|
143 |
+
|
144 |
+
ARROW_EXPORT
|
145 |
+
Status WriteTable(const Table& table, io::OutputStream* dst,
|
146 |
+
const WriteProperties& properties = WriteProperties::Defaults());
|
147 |
+
|
148 |
+
} // namespace feather
|
149 |
+
} // namespace ipc
|
150 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/json_simple.h
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// Implement a simple JSON representation format for arrays
|
19 |
+
|
20 |
+
#pragma once
|
21 |
+
|
22 |
+
#include <memory>
|
23 |
+
#include <string>
|
24 |
+
#include <string_view>
|
25 |
+
|
26 |
+
#include "arrow/status.h"
|
27 |
+
#include "arrow/type_fwd.h"
|
28 |
+
#include "arrow/util/visibility.h"
|
29 |
+
|
30 |
+
namespace arrow {
|
31 |
+
|
32 |
+
class Array;
|
33 |
+
class DataType;
|
34 |
+
|
35 |
+
namespace ipc {
|
36 |
+
namespace internal {
|
37 |
+
namespace json {
|
38 |
+
|
39 |
+
ARROW_EXPORT
|
40 |
+
Result<std::shared_ptr<Array>> ArrayFromJSON(const std::shared_ptr<DataType>&,
|
41 |
+
const std::string& json);
|
42 |
+
|
43 |
+
ARROW_EXPORT
|
44 |
+
Result<std::shared_ptr<Array>> ArrayFromJSON(const std::shared_ptr<DataType>&,
|
45 |
+
std::string_view json);
|
46 |
+
|
47 |
+
ARROW_EXPORT
|
48 |
+
Result<std::shared_ptr<Array>> ArrayFromJSON(const std::shared_ptr<DataType>&,
|
49 |
+
const char* json);
|
50 |
+
|
51 |
+
ARROW_EXPORT
|
52 |
+
Status ChunkedArrayFromJSON(const std::shared_ptr<DataType>& type,
|
53 |
+
const std::vector<std::string>& json_strings,
|
54 |
+
std::shared_ptr<ChunkedArray>* out);
|
55 |
+
|
56 |
+
ARROW_EXPORT
|
57 |
+
Status DictArrayFromJSON(const std::shared_ptr<DataType>&, std::string_view indices_json,
|
58 |
+
std::string_view dictionary_json, std::shared_ptr<Array>* out);
|
59 |
+
|
60 |
+
ARROW_EXPORT
|
61 |
+
Status ScalarFromJSON(const std::shared_ptr<DataType>&, std::string_view json,
|
62 |
+
std::shared_ptr<Scalar>* out);
|
63 |
+
|
64 |
+
ARROW_EXPORT
|
65 |
+
Status DictScalarFromJSON(const std::shared_ptr<DataType>&, std::string_view index_json,
|
66 |
+
std::string_view dictionary_json, std::shared_ptr<Scalar>* out);
|
67 |
+
|
68 |
+
} // namespace json
|
69 |
+
} // namespace internal
|
70 |
+
} // namespace ipc
|
71 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/message.h
ADDED
@@ -0,0 +1,565 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// C++ object model and user API for interprocess schema messaging
|
19 |
+
|
20 |
+
#pragma once
|
21 |
+
|
22 |
+
#include <cstdint>
|
23 |
+
#include <functional>
|
24 |
+
#include <memory>
|
25 |
+
#include <string>
|
26 |
+
#include <utility>
|
27 |
+
|
28 |
+
#include "arrow/io/type_fwd.h"
|
29 |
+
#include "arrow/ipc/type_fwd.h"
|
30 |
+
#include "arrow/result.h"
|
31 |
+
#include "arrow/status.h"
|
32 |
+
#include "arrow/type_fwd.h"
|
33 |
+
#include "arrow/util/macros.h"
|
34 |
+
#include "arrow/util/visibility.h"
|
35 |
+
|
36 |
+
namespace arrow {
|
37 |
+
namespace ipc {
|
38 |
+
|
39 |
+
struct IpcWriteOptions;
|
40 |
+
|
41 |
+
// Read interface classes. We do not fully deserialize the flatbuffers so that
|
42 |
+
// individual fields metadata can be retrieved from very large schema without
|
43 |
+
//
|
44 |
+
|
45 |
+
/// \class Message
|
46 |
+
/// \brief An IPC message including metadata and body
|
47 |
+
class ARROW_EXPORT Message {
|
48 |
+
public:
|
49 |
+
/// \brief Construct message, but do not validate
|
50 |
+
///
|
51 |
+
/// Use at your own risk; Message::Open has more metadata validation
|
52 |
+
Message(std::shared_ptr<Buffer> metadata, std::shared_ptr<Buffer> body);
|
53 |
+
|
54 |
+
~Message();
|
55 |
+
|
56 |
+
/// \brief Create and validate a Message instance from two buffers
|
57 |
+
///
|
58 |
+
/// \param[in] metadata a buffer containing the Flatbuffer metadata
|
59 |
+
/// \param[in] body a buffer containing the message body, which may be null
|
60 |
+
/// \return the created message
|
61 |
+
static Result<std::unique_ptr<Message>> Open(std::shared_ptr<Buffer> metadata,
|
62 |
+
std::shared_ptr<Buffer> body);
|
63 |
+
|
64 |
+
/// \brief Read message body and create Message given Flatbuffer metadata
|
65 |
+
/// \param[in] metadata containing a serialized Message flatbuffer
|
66 |
+
/// \param[in] stream an InputStream
|
67 |
+
/// \return the created Message
|
68 |
+
///
|
69 |
+
/// \note If stream supports zero-copy, this is zero-copy
|
70 |
+
static Result<std::unique_ptr<Message>> ReadFrom(std::shared_ptr<Buffer> metadata,
|
71 |
+
io::InputStream* stream);
|
72 |
+
|
73 |
+
/// \brief Read message body from position in file, and create Message given
|
74 |
+
/// the Flatbuffer metadata
|
75 |
+
/// \param[in] offset the position in the file where the message body starts.
|
76 |
+
/// \param[in] metadata containing a serialized Message flatbuffer
|
77 |
+
/// \param[in] file the seekable file interface to read from
|
78 |
+
/// \return the created Message
|
79 |
+
///
|
80 |
+
/// \note If file supports zero-copy, this is zero-copy
|
81 |
+
static Result<std::unique_ptr<Message>> ReadFrom(const int64_t offset,
|
82 |
+
std::shared_ptr<Buffer> metadata,
|
83 |
+
io::RandomAccessFile* file);
|
84 |
+
|
85 |
+
/// \brief Return true if message type and contents are equal
|
86 |
+
///
|
87 |
+
/// \param other another message
|
88 |
+
/// \return true if contents equal
|
89 |
+
bool Equals(const Message& other) const;
|
90 |
+
|
91 |
+
/// \brief the Message metadata
|
92 |
+
///
|
93 |
+
/// \return buffer
|
94 |
+
std::shared_ptr<Buffer> metadata() const;
|
95 |
+
|
96 |
+
/// \brief Custom metadata serialized in metadata Flatbuffer. Returns nullptr
|
97 |
+
/// when none set
|
98 |
+
const std::shared_ptr<const KeyValueMetadata>& custom_metadata() const;
|
99 |
+
|
100 |
+
/// \brief the Message body, if any
|
101 |
+
///
|
102 |
+
/// \return buffer is null if no body
|
103 |
+
std::shared_ptr<Buffer> body() const;
|
104 |
+
|
105 |
+
/// \brief The expected body length according to the metadata, for
|
106 |
+
/// verification purposes
|
107 |
+
int64_t body_length() const;
|
108 |
+
|
109 |
+
/// \brief The Message type
|
110 |
+
MessageType type() const;
|
111 |
+
|
112 |
+
/// \brief The Message metadata version
|
113 |
+
MetadataVersion metadata_version() const;
|
114 |
+
|
115 |
+
const void* header() const;
|
116 |
+
|
117 |
+
/// \brief Write length-prefixed metadata and body to output stream
|
118 |
+
///
|
119 |
+
/// \param[in] file output stream to write to
|
120 |
+
/// \param[in] options IPC writing options including alignment
|
121 |
+
/// \param[out] output_length the number of bytes written
|
122 |
+
/// \return Status
|
123 |
+
Status SerializeTo(io::OutputStream* file, const IpcWriteOptions& options,
|
124 |
+
int64_t* output_length) const;
|
125 |
+
|
126 |
+
/// \brief Return true if the Message metadata passes Flatbuffer validation
|
127 |
+
bool Verify() const;
|
128 |
+
|
129 |
+
/// \brief Whether a given message type needs a body.
|
130 |
+
static bool HasBody(MessageType type) {
|
131 |
+
return type != MessageType::NONE && type != MessageType::SCHEMA;
|
132 |
+
}
|
133 |
+
|
134 |
+
private:
|
135 |
+
// Hide serialization details from user API
|
136 |
+
class MessageImpl;
|
137 |
+
std::unique_ptr<MessageImpl> impl_;
|
138 |
+
|
139 |
+
ARROW_DISALLOW_COPY_AND_ASSIGN(Message);
|
140 |
+
};
|
141 |
+
|
142 |
+
ARROW_EXPORT std::string FormatMessageType(MessageType type);
|
143 |
+
|
144 |
+
/// \class MessageDecoderListener
|
145 |
+
/// \brief An abstract class to listen events from MessageDecoder.
|
146 |
+
///
|
147 |
+
/// This API is EXPERIMENTAL.
|
148 |
+
///
|
149 |
+
/// \since 0.17.0
|
150 |
+
class ARROW_EXPORT MessageDecoderListener {
|
151 |
+
public:
|
152 |
+
virtual ~MessageDecoderListener() = default;
|
153 |
+
|
154 |
+
/// \brief Called when a message is decoded.
|
155 |
+
///
|
156 |
+
/// MessageDecoder calls this method when it decodes a message. This
|
157 |
+
/// method is called multiple times when the target stream has
|
158 |
+
/// multiple messages.
|
159 |
+
///
|
160 |
+
/// \param[in] message a decoded message
|
161 |
+
/// \return Status
|
162 |
+
virtual Status OnMessageDecoded(std::unique_ptr<Message> message) = 0;
|
163 |
+
|
164 |
+
/// \brief Called when the decoder state is changed to
|
165 |
+
/// MessageDecoder::State::INITIAL.
|
166 |
+
///
|
167 |
+
/// The default implementation just returns arrow::Status::OK().
|
168 |
+
///
|
169 |
+
/// \return Status
|
170 |
+
virtual Status OnInitial();
|
171 |
+
|
172 |
+
/// \brief Called when the decoder state is changed to
|
173 |
+
/// MessageDecoder::State::METADATA_LENGTH.
|
174 |
+
///
|
175 |
+
/// The default implementation just returns arrow::Status::OK().
|
176 |
+
///
|
177 |
+
/// \return Status
|
178 |
+
virtual Status OnMetadataLength();
|
179 |
+
|
180 |
+
/// \brief Called when the decoder state is changed to
|
181 |
+
/// MessageDecoder::State::METADATA.
|
182 |
+
///
|
183 |
+
/// The default implementation just returns arrow::Status::OK().
|
184 |
+
///
|
185 |
+
/// \return Status
|
186 |
+
virtual Status OnMetadata();
|
187 |
+
|
188 |
+
/// \brief Called when the decoder state is changed to
|
189 |
+
/// MessageDecoder::State::BODY.
|
190 |
+
///
|
191 |
+
/// The default implementation just returns arrow::Status::OK().
|
192 |
+
///
|
193 |
+
/// \return Status
|
194 |
+
virtual Status OnBody();
|
195 |
+
|
196 |
+
/// \brief Called when the decoder state is changed to
|
197 |
+
/// MessageDecoder::State::EOS.
|
198 |
+
///
|
199 |
+
/// The default implementation just returns arrow::Status::OK().
|
200 |
+
///
|
201 |
+
/// \return Status
|
202 |
+
virtual Status OnEOS();
|
203 |
+
};
|
204 |
+
|
205 |
+
/// \class AssignMessageDecoderListener
|
206 |
+
/// \brief Assign a message decoded by MessageDecoder.
|
207 |
+
///
|
208 |
+
/// This API is EXPERIMENTAL.
|
209 |
+
///
|
210 |
+
/// \since 0.17.0
|
211 |
+
class ARROW_EXPORT AssignMessageDecoderListener : public MessageDecoderListener {
|
212 |
+
public:
|
213 |
+
/// \brief Construct a listener that assigns a decoded message to the
|
214 |
+
/// specified location.
|
215 |
+
///
|
216 |
+
/// \param[in] message a location to store the received message
|
217 |
+
explicit AssignMessageDecoderListener(std::unique_ptr<Message>* message)
|
218 |
+
: message_(message) {}
|
219 |
+
|
220 |
+
virtual ~AssignMessageDecoderListener() = default;
|
221 |
+
|
222 |
+
Status OnMessageDecoded(std::unique_ptr<Message> message) override {
|
223 |
+
*message_ = std::move(message);
|
224 |
+
return Status::OK();
|
225 |
+
}
|
226 |
+
|
227 |
+
private:
|
228 |
+
std::unique_ptr<Message>* message_;
|
229 |
+
|
230 |
+
ARROW_DISALLOW_COPY_AND_ASSIGN(AssignMessageDecoderListener);
|
231 |
+
};
|
232 |
+
|
233 |
+
/// \class MessageDecoder
|
234 |
+
/// \brief Push style message decoder that receives data from user.
|
235 |
+
///
|
236 |
+
/// This API is EXPERIMENTAL.
|
237 |
+
///
|
238 |
+
/// \since 0.17.0
|
239 |
+
class ARROW_EXPORT MessageDecoder {
|
240 |
+
public:
|
241 |
+
/// \brief State for reading a message
|
242 |
+
enum State {
|
243 |
+
/// The initial state. It requires one of the followings as the next data:
|
244 |
+
///
|
245 |
+
/// * int32_t continuation token
|
246 |
+
/// * int32_t end-of-stream mark (== 0)
|
247 |
+
/// * int32_t metadata length (backward compatibility for
|
248 |
+
/// reading old IPC messages produced prior to version 0.15.0
|
249 |
+
INITIAL,
|
250 |
+
|
251 |
+
/// It requires int32_t metadata length.
|
252 |
+
METADATA_LENGTH,
|
253 |
+
|
254 |
+
/// It requires metadata.
|
255 |
+
METADATA,
|
256 |
+
|
257 |
+
/// It requires message body.
|
258 |
+
BODY,
|
259 |
+
|
260 |
+
/// The end-of-stream state. No more data is processed.
|
261 |
+
EOS,
|
262 |
+
};
|
263 |
+
|
264 |
+
/// \brief Construct a message decoder.
|
265 |
+
///
|
266 |
+
/// \param[in] listener a MessageDecoderListener that responds events from
|
267 |
+
/// the decoder
|
268 |
+
/// \param[in] pool an optional MemoryPool to copy metadata on the
|
269 |
+
/// \param[in] skip_body if true the body will be skipped even if the message has a body
|
270 |
+
/// CPU, if required
|
271 |
+
explicit MessageDecoder(std::shared_ptr<MessageDecoderListener> listener,
|
272 |
+
MemoryPool* pool = default_memory_pool(),
|
273 |
+
bool skip_body = false);
|
274 |
+
|
275 |
+
/// \brief Construct a message decoder with the specified state.
|
276 |
+
///
|
277 |
+
/// This is a construct for advanced users that know how to decode
|
278 |
+
/// Message.
|
279 |
+
///
|
280 |
+
/// \param[in] listener a MessageDecoderListener that responds events from
|
281 |
+
/// the decoder
|
282 |
+
/// \param[in] initial_state an initial state of the decode
|
283 |
+
/// \param[in] initial_next_required_size the number of bytes needed
|
284 |
+
/// to run the next action
|
285 |
+
/// \param[in] pool an optional MemoryPool to copy metadata on the
|
286 |
+
/// CPU, if required
|
287 |
+
/// \param[in] skip_body if true the body will be skipped even if the message has a body
|
288 |
+
MessageDecoder(std::shared_ptr<MessageDecoderListener> listener, State initial_state,
|
289 |
+
int64_t initial_next_required_size,
|
290 |
+
MemoryPool* pool = default_memory_pool(), bool skip_body = false);
|
291 |
+
|
292 |
+
virtual ~MessageDecoder();
|
293 |
+
|
294 |
+
/// \brief Feed data to the decoder as a raw data.
|
295 |
+
///
|
296 |
+
/// If the decoder can decode one or more messages by the data, the
|
297 |
+
/// decoder calls listener->OnMessageDecoded() with a decoded
|
298 |
+
/// message multiple times.
|
299 |
+
///
|
300 |
+
/// If the state of the decoder is changed, corresponding callbacks
|
301 |
+
/// on listener is called:
|
302 |
+
///
|
303 |
+
/// * MessageDecoder::State::INITIAL: listener->OnInitial()
|
304 |
+
/// * MessageDecoder::State::METADATA_LENGTH: listener->OnMetadataLength()
|
305 |
+
/// * MessageDecoder::State::METADATA: listener->OnMetadata()
|
306 |
+
/// * MessageDecoder::State::BODY: listener->OnBody()
|
307 |
+
/// * MessageDecoder::State::EOS: listener->OnEOS()
|
308 |
+
///
|
309 |
+
/// \param[in] data a raw data to be processed. This data isn't
|
310 |
+
/// copied. The passed memory must be kept alive through message
|
311 |
+
/// processing.
|
312 |
+
/// \param[in] size raw data size.
|
313 |
+
/// \return Status
|
314 |
+
Status Consume(const uint8_t* data, int64_t size);
|
315 |
+
|
316 |
+
/// \brief Feed data to the decoder as a Buffer.
|
317 |
+
///
|
318 |
+
/// If the decoder can decode one or more messages by the Buffer,
|
319 |
+
/// the decoder calls listener->OnMessageDecoded() with a decoded
|
320 |
+
/// message multiple times.
|
321 |
+
///
|
322 |
+
/// \param[in] buffer a Buffer to be processed.
|
323 |
+
/// \return Status
|
324 |
+
Status Consume(std::shared_ptr<Buffer> buffer);
|
325 |
+
|
326 |
+
/// \brief Return the number of bytes needed to advance the state of
|
327 |
+
/// the decoder.
|
328 |
+
///
|
329 |
+
/// This method is provided for users who want to optimize performance.
|
330 |
+
/// Normal users don't need to use this method.
|
331 |
+
///
|
332 |
+
/// Here is an example usage for normal users:
|
333 |
+
///
|
334 |
+
/// ~~~{.cpp}
|
335 |
+
/// decoder.Consume(buffer1);
|
336 |
+
/// decoder.Consume(buffer2);
|
337 |
+
/// decoder.Consume(buffer3);
|
338 |
+
/// ~~~
|
339 |
+
///
|
340 |
+
/// Decoder has internal buffer. If consumed data isn't enough to
|
341 |
+
/// advance the state of the decoder, consumed data is buffered to
|
342 |
+
/// the internal buffer. It causes performance overhead.
|
343 |
+
///
|
344 |
+
/// If you pass next_required_size() size data to each Consume()
|
345 |
+
/// call, the decoder doesn't use its internal buffer. It improves
|
346 |
+
/// performance.
|
347 |
+
///
|
348 |
+
/// Here is an example usage to avoid using internal buffer:
|
349 |
+
///
|
350 |
+
/// ~~~{.cpp}
|
351 |
+
/// buffer1 = get_data(decoder.next_required_size());
|
352 |
+
/// decoder.Consume(buffer1);
|
353 |
+
/// buffer2 = get_data(decoder.next_required_size());
|
354 |
+
/// decoder.Consume(buffer2);
|
355 |
+
/// ~~~
|
356 |
+
///
|
357 |
+
/// Users can use this method to avoid creating small
|
358 |
+
/// chunks. Message body must be contiguous data. If users pass
|
359 |
+
/// small chunks to the decoder, the decoder needs concatenate small
|
360 |
+
/// chunks internally. It causes performance overhead.
|
361 |
+
///
|
362 |
+
/// Here is an example usage to reduce small chunks:
|
363 |
+
///
|
364 |
+
/// ~~~{.cpp}
|
365 |
+
/// buffer = AllocateResizableBuffer();
|
366 |
+
/// while ((small_chunk = get_data(&small_chunk_size))) {
|
367 |
+
/// auto current_buffer_size = buffer->size();
|
368 |
+
/// buffer->Resize(current_buffer_size + small_chunk_size);
|
369 |
+
/// memcpy(buffer->mutable_data() + current_buffer_size,
|
370 |
+
/// small_chunk,
|
371 |
+
/// small_chunk_size);
|
372 |
+
/// if (buffer->size() < decoder.next_required_size()) {
|
373 |
+
/// continue;
|
374 |
+
/// }
|
375 |
+
/// std::shared_ptr<arrow::Buffer> chunk(buffer.release());
|
376 |
+
/// decoder.Consume(chunk);
|
377 |
+
/// buffer = AllocateResizableBuffer();
|
378 |
+
/// }
|
379 |
+
/// if (buffer->size() > 0) {
|
380 |
+
/// std::shared_ptr<arrow::Buffer> chunk(buffer.release());
|
381 |
+
/// decoder.Consume(chunk);
|
382 |
+
/// }
|
383 |
+
/// ~~~
|
384 |
+
///
|
385 |
+
/// \return the number of bytes needed to advance the state of the
|
386 |
+
/// decoder
|
387 |
+
int64_t next_required_size() const;
|
388 |
+
|
389 |
+
/// \brief Return the current state of the decoder.
|
390 |
+
///
|
391 |
+
/// This method is provided for users who want to optimize performance.
|
392 |
+
/// Normal users don't need to use this method.
|
393 |
+
///
|
394 |
+
/// Decoder doesn't need Buffer to process data on the
|
395 |
+
/// MessageDecoder::State::INITIAL state and the
|
396 |
+
/// MessageDecoder::State::METADATA_LENGTH. Creating Buffer has
|
397 |
+
/// performance overhead. Advanced users can avoid creating Buffer
|
398 |
+
/// by checking the current state of the decoder:
|
399 |
+
///
|
400 |
+
/// ~~~{.cpp}
|
401 |
+
/// switch (decoder.state()) {
|
402 |
+
/// MessageDecoder::State::INITIAL:
|
403 |
+
/// MessageDecoder::State::METADATA_LENGTH:
|
404 |
+
/// {
|
405 |
+
/// uint8_t data[sizeof(int32_t)];
|
406 |
+
/// auto data_size = input->Read(decoder.next_required_size(), data);
|
407 |
+
/// decoder.Consume(data, data_size);
|
408 |
+
/// }
|
409 |
+
/// break;
|
410 |
+
/// default:
|
411 |
+
/// {
|
412 |
+
/// auto buffer = input->Read(decoder.next_required_size());
|
413 |
+
/// decoder.Consume(buffer);
|
414 |
+
/// }
|
415 |
+
/// break;
|
416 |
+
/// }
|
417 |
+
/// ~~~
|
418 |
+
///
|
419 |
+
/// \return the current state
|
420 |
+
State state() const;
|
421 |
+
|
422 |
+
private:
|
423 |
+
class MessageDecoderImpl;
|
424 |
+
std::unique_ptr<MessageDecoderImpl> impl_;
|
425 |
+
|
426 |
+
ARROW_DISALLOW_COPY_AND_ASSIGN(MessageDecoder);
|
427 |
+
};
|
428 |
+
|
429 |
+
/// \brief Abstract interface for a sequence of messages
|
430 |
+
/// \since 0.5.0
|
431 |
+
class ARROW_EXPORT MessageReader {
|
432 |
+
public:
|
433 |
+
virtual ~MessageReader() = default;
|
434 |
+
|
435 |
+
/// \brief Create MessageReader that reads from InputStream
|
436 |
+
static std::unique_ptr<MessageReader> Open(io::InputStream* stream);
|
437 |
+
|
438 |
+
/// \brief Create MessageReader that reads from owned InputStream
|
439 |
+
static std::unique_ptr<MessageReader> Open(
|
440 |
+
const std::shared_ptr<io::InputStream>& owned_stream);
|
441 |
+
|
442 |
+
/// \brief Read next Message from the interface
|
443 |
+
///
|
444 |
+
/// \return an arrow::ipc::Message instance
|
445 |
+
virtual Result<std::unique_ptr<Message>> ReadNextMessage() = 0;
|
446 |
+
};
|
447 |
+
|
448 |
+
// the first parameter of the function should be a pointer to metadata (aka.
|
449 |
+
// org::apache::arrow::flatbuf::RecordBatch*)
|
450 |
+
using FieldsLoaderFunction = std::function<Status(const void*, io::RandomAccessFile*)>;
|
451 |
+
|
452 |
+
/// \brief Read encapsulated RPC message from position in file
|
453 |
+
///
|
454 |
+
/// Read a length-prefixed message flatbuffer starting at the indicated file
|
455 |
+
/// offset. If the message has a body with non-zero length, it will also be
|
456 |
+
/// read
|
457 |
+
///
|
458 |
+
/// The metadata_length includes at least the length prefix and the flatbuffer
|
459 |
+
///
|
460 |
+
/// \param[in] offset the position in the file where the message starts. The
|
461 |
+
/// first 4 bytes after the offset are the message length
|
462 |
+
/// \param[in] metadata_length the total number of bytes to read from file
|
463 |
+
/// \param[in] file the seekable file interface to read from
|
464 |
+
/// \param[in] fields_loader the function for loading subset of fields from the given file
|
465 |
+
/// \return the message read
|
466 |
+
|
467 |
+
ARROW_EXPORT
|
468 |
+
Result<std::unique_ptr<Message>> ReadMessage(
|
469 |
+
const int64_t offset, const int32_t metadata_length, io::RandomAccessFile* file,
|
470 |
+
const FieldsLoaderFunction& fields_loader = {});
|
471 |
+
|
472 |
+
/// \brief Read encapsulated RPC message from cached buffers
|
473 |
+
///
|
474 |
+
/// The buffers should contain an entire message. Partial reads are not handled.
|
475 |
+
///
|
476 |
+
/// This method can be used to read just the metadata by passing in a nullptr for the
|
477 |
+
/// body. The body will then be skipped and the body size will not be validated.
|
478 |
+
///
|
479 |
+
/// If the body buffer is provided then it must be the complete body buffer
|
480 |
+
///
|
481 |
+
/// This is similar to Message::Open but performs slightly more validation (e.g. checks
|
482 |
+
/// to see that the metadata length is correct and that the body is the size the metadata
|
483 |
+
/// expected)
|
484 |
+
///
|
485 |
+
/// \param metadata The bytes for the metadata
|
486 |
+
/// \param body The bytes for the body
|
487 |
+
/// \return The message represented by the buffers
|
488 |
+
ARROW_EXPORT Result<std::unique_ptr<Message>> ReadMessage(
|
489 |
+
std::shared_ptr<Buffer> metadata, std::shared_ptr<Buffer> body);
|
490 |
+
|
491 |
+
ARROW_EXPORT
|
492 |
+
Future<std::shared_ptr<Message>> ReadMessageAsync(
|
493 |
+
const int64_t offset, const int32_t metadata_length, const int64_t body_length,
|
494 |
+
io::RandomAccessFile* file, const io::IOContext& context = io::default_io_context());
|
495 |
+
|
496 |
+
/// \brief Advance stream to an 8-byte offset if its position is not a multiple
|
497 |
+
/// of 8 already
|
498 |
+
/// \param[in] stream an input stream
|
499 |
+
/// \param[in] alignment the byte multiple for the metadata prefix, usually 8
|
500 |
+
/// or 64, to ensure the body starts on a multiple of that alignment
|
501 |
+
/// \return Status
|
502 |
+
ARROW_EXPORT
|
503 |
+
Status AlignStream(io::InputStream* stream, int32_t alignment = 8);
|
504 |
+
|
505 |
+
/// \brief Advance stream to an 8-byte offset if its position is not a multiple
|
506 |
+
/// of 8 already
|
507 |
+
/// \param[in] stream an output stream
|
508 |
+
/// \param[in] alignment the byte multiple for the metadata prefix, usually 8
|
509 |
+
/// or 64, to ensure the body starts on a multiple of that alignment
|
510 |
+
/// \return Status
|
511 |
+
ARROW_EXPORT
|
512 |
+
Status AlignStream(io::OutputStream* stream, int32_t alignment = 8);
|
513 |
+
|
514 |
+
/// \brief Return error Status if file position is not a multiple of the
|
515 |
+
/// indicated alignment
|
516 |
+
ARROW_EXPORT
|
517 |
+
Status CheckAligned(io::FileInterface* stream, int32_t alignment = 8);
|
518 |
+
|
519 |
+
/// \brief Read encapsulated IPC message (metadata and body) from InputStream
|
520 |
+
///
|
521 |
+
/// Returns null if there are not enough bytes available or the
|
522 |
+
/// message length is 0 (e.g. EOS in a stream)
|
523 |
+
///
|
524 |
+
/// \param[in] stream an input stream
|
525 |
+
/// \param[in] pool an optional MemoryPool to copy metadata on the CPU, if required
|
526 |
+
/// \return Message
|
527 |
+
ARROW_EXPORT
|
528 |
+
Result<std::unique_ptr<Message>> ReadMessage(io::InputStream* stream,
|
529 |
+
MemoryPool* pool = default_memory_pool());
|
530 |
+
|
531 |
+
/// \brief Feed data from InputStream to MessageDecoder to decode an
|
532 |
+
/// encapsulated IPC message (metadata and body)
|
533 |
+
///
|
534 |
+
/// This API is EXPERIMENTAL.
|
535 |
+
///
|
536 |
+
/// \param[in] decoder a decoder
|
537 |
+
/// \param[in] stream an input stream
|
538 |
+
/// \return Status
|
539 |
+
///
|
540 |
+
/// \since 0.17.0
|
541 |
+
ARROW_EXPORT
|
542 |
+
Status DecodeMessage(MessageDecoder* decoder, io::InputStream* stream);
|
543 |
+
|
544 |
+
/// Write encapsulated IPC message Does not make assumptions about
|
545 |
+
/// whether the stream is aligned already. Can write legacy (pre
|
546 |
+
/// version 0.15.0) IPC message if option set
|
547 |
+
///
|
548 |
+
/// continuation: 0xFFFFFFFF
|
549 |
+
/// message_size: int32
|
550 |
+
/// message: const void*
|
551 |
+
/// padding
|
552 |
+
///
|
553 |
+
///
|
554 |
+
/// \param[in] message a buffer containing the metadata to write
|
555 |
+
/// \param[in] options IPC writing options, including alignment and
|
556 |
+
/// legacy message support
|
557 |
+
/// \param[in,out] file the OutputStream to write to
|
558 |
+
/// \param[out] message_length the total size of the payload written including
|
559 |
+
/// padding
|
560 |
+
/// \return Status
|
561 |
+
Status WriteMessage(const Buffer& message, const IpcWriteOptions& options,
|
562 |
+
io::OutputStream* file, int32_t* message_length);
|
563 |
+
|
564 |
+
} // namespace ipc
|
565 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/options.h
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <optional>
|
22 |
+
#include <vector>
|
23 |
+
|
24 |
+
#include "arrow/io/caching.h"
|
25 |
+
#include "arrow/ipc/type_fwd.h"
|
26 |
+
#include "arrow/status.h"
|
27 |
+
#include "arrow/type_fwd.h"
|
28 |
+
#include "arrow/util/compression.h"
|
29 |
+
#include "arrow/util/visibility.h"
|
30 |
+
|
31 |
+
namespace arrow {
|
32 |
+
|
33 |
+
class MemoryPool;
|
34 |
+
|
35 |
+
namespace ipc {
|
36 |
+
|
37 |
+
// ARROW-109: We set this number arbitrarily to help catch user mistakes. For
|
38 |
+
// deeply nested schemas, it is expected the user will indicate explicitly the
|
39 |
+
// maximum allowed recursion depth
|
40 |
+
constexpr int kMaxNestingDepth = 64;
|
41 |
+
|
42 |
+
/// \brief Options for writing Arrow IPC messages
|
43 |
+
struct ARROW_EXPORT IpcWriteOptions {
|
44 |
+
/// \brief If true, allow field lengths that don't fit in a signed 32-bit int.
|
45 |
+
///
|
46 |
+
/// Some implementations may not be able to parse streams created with this option.
|
47 |
+
bool allow_64bit = false;
|
48 |
+
|
49 |
+
/// \brief The maximum permitted schema nesting depth.
|
50 |
+
int max_recursion_depth = kMaxNestingDepth;
|
51 |
+
|
52 |
+
/// \brief Write padding after memory buffers up to this multiple of bytes.
|
53 |
+
int32_t alignment = 8;
|
54 |
+
|
55 |
+
/// \brief Write the pre-0.15.0 IPC message format
|
56 |
+
///
|
57 |
+
/// This legacy format consists of a 4-byte prefix instead of 8-byte.
|
58 |
+
bool write_legacy_ipc_format = false;
|
59 |
+
|
60 |
+
/// \brief The memory pool to use for allocations made during IPC writing
|
61 |
+
///
|
62 |
+
/// While Arrow IPC is predominantly zero-copy, it may have to allocate
|
63 |
+
/// memory in some cases (for example if compression is enabled).
|
64 |
+
MemoryPool* memory_pool = default_memory_pool();
|
65 |
+
|
66 |
+
/// \brief Compression codec to use for record batch body buffers
|
67 |
+
///
|
68 |
+
/// May only be UNCOMPRESSED, LZ4_FRAME and ZSTD.
|
69 |
+
std::shared_ptr<util::Codec> codec;
|
70 |
+
|
71 |
+
/// \brief Minimum space savings percentage required for compression to be applied
|
72 |
+
///
|
73 |
+
/// Space savings is calculated as (1.0 - compressed_size / uncompressed_size).
|
74 |
+
///
|
75 |
+
/// For example, if min_space_savings = 0.1, a 100-byte body buffer won't undergo
|
76 |
+
/// compression if its expected compressed size exceeds 90 bytes. If this option is
|
77 |
+
/// unset, compression will be used indiscriminately. If no codec was supplied, this
|
78 |
+
/// option is ignored.
|
79 |
+
///
|
80 |
+
/// Values outside of the range [0,1] are handled as errors.
|
81 |
+
///
|
82 |
+
/// Note that enabling this option may result in unreadable data for Arrow C++ versions
|
83 |
+
/// prior to 12.0.0.
|
84 |
+
std::optional<double> min_space_savings;
|
85 |
+
|
86 |
+
/// \brief Use global CPU thread pool to parallelize any computational tasks
|
87 |
+
/// like compression
|
88 |
+
bool use_threads = true;
|
89 |
+
|
90 |
+
/// \brief Whether to emit dictionary deltas
|
91 |
+
///
|
92 |
+
/// If false, a changed dictionary for a given field will emit a full
|
93 |
+
/// dictionary replacement.
|
94 |
+
/// If true, a changed dictionary will be compared against the previous
|
95 |
+
/// version. If possible, a dictionary delta will be emitted, otherwise
|
96 |
+
/// a full dictionary replacement.
|
97 |
+
///
|
98 |
+
/// Default is false to maximize stream compatibility.
|
99 |
+
///
|
100 |
+
/// Also, note that if a changed dictionary is a nested dictionary,
|
101 |
+
/// then a delta is never emitted, for compatibility with the read path.
|
102 |
+
bool emit_dictionary_deltas = false;
|
103 |
+
|
104 |
+
/// \brief Whether to unify dictionaries for the IPC file format
|
105 |
+
///
|
106 |
+
/// The IPC file format doesn't support dictionary replacements.
|
107 |
+
/// Therefore, chunks of a column with a dictionary type must have the same
|
108 |
+
/// dictionary in each record batch (or an extended dictionary + delta).
|
109 |
+
///
|
110 |
+
/// If this option is true, RecordBatchWriter::WriteTable will attempt
|
111 |
+
/// to unify dictionaries across each table column. If this option is
|
112 |
+
/// false, incompatible dictionaries across a table column will simply
|
113 |
+
/// raise an error.
|
114 |
+
///
|
115 |
+
/// Note that enabling this option has a runtime cost. Also, not all types
|
116 |
+
/// currently support dictionary unification.
|
117 |
+
///
|
118 |
+
/// This option is ignored for IPC streams, which support dictionary replacement
|
119 |
+
/// and deltas.
|
120 |
+
bool unify_dictionaries = false;
|
121 |
+
|
122 |
+
/// \brief Format version to use for IPC messages and their metadata.
|
123 |
+
///
|
124 |
+
/// Presently using V5 version (readable by 1.0.0 and later).
|
125 |
+
/// V4 is also available (readable by 0.8.0 and later).
|
126 |
+
MetadataVersion metadata_version = MetadataVersion::V5;
|
127 |
+
|
128 |
+
static IpcWriteOptions Defaults();
|
129 |
+
};
|
130 |
+
|
131 |
+
/// \brief Options for reading Arrow IPC messages
|
132 |
+
struct ARROW_EXPORT IpcReadOptions {
|
133 |
+
/// \brief The maximum permitted schema nesting depth.
|
134 |
+
int max_recursion_depth = kMaxNestingDepth;
|
135 |
+
|
136 |
+
/// \brief The memory pool to use for allocations made during IPC reading
|
137 |
+
///
|
138 |
+
/// While Arrow IPC is predominantly zero-copy, it may have to allocate
|
139 |
+
/// memory in some cases (for example if compression is enabled).
|
140 |
+
MemoryPool* memory_pool = default_memory_pool();
|
141 |
+
|
142 |
+
/// \brief Top-level schema fields to include when deserializing RecordBatch.
|
143 |
+
///
|
144 |
+
/// If empty (the default), return all deserialized fields.
|
145 |
+
/// If non-empty, the values are the indices of fields in the top-level schema.
|
146 |
+
std::vector<int> included_fields;
|
147 |
+
|
148 |
+
/// \brief Use global CPU thread pool to parallelize any computational tasks
|
149 |
+
/// like decompression
|
150 |
+
bool use_threads = true;
|
151 |
+
|
152 |
+
/// \brief Whether to convert incoming data to platform-native endianness
|
153 |
+
///
|
154 |
+
/// If the endianness of the received schema is not equal to platform-native
|
155 |
+
/// endianness, then all buffers with endian-sensitive data will be byte-swapped.
|
156 |
+
/// This includes the value buffers of numeric types, temporal types, decimal
|
157 |
+
/// types, as well as the offset buffers of variable-sized binary and list-like
|
158 |
+
/// types.
|
159 |
+
///
|
160 |
+
/// Endianness conversion is achieved by the RecordBatchFileReader,
|
161 |
+
/// RecordBatchStreamReader and StreamDecoder classes.
|
162 |
+
bool ensure_native_endian = true;
|
163 |
+
|
164 |
+
/// \brief Options to control caching behavior when pre-buffering is requested
|
165 |
+
///
|
166 |
+
/// The lazy property will always be reset to true to deliver the expected behavior
|
167 |
+
io::CacheOptions pre_buffer_cache_options = io::CacheOptions::LazyDefaults();
|
168 |
+
|
169 |
+
static IpcReadOptions Defaults();
|
170 |
+
};
|
171 |
+
|
172 |
+
namespace internal {
|
173 |
+
|
174 |
+
Status CheckCompressionSupported(Compression::type codec);
|
175 |
+
|
176 |
+
} // namespace internal
|
177 |
+
} // namespace ipc
|
178 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/writer.h
ADDED
@@ -0,0 +1,475 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// Implement Arrow streaming binary format
|
19 |
+
|
20 |
+
#pragma once
|
21 |
+
|
22 |
+
#include <cstdint>
|
23 |
+
#include <memory>
|
24 |
+
#include <vector>
|
25 |
+
|
26 |
+
#include "arrow/ipc/dictionary.h" // IWYU pragma: export
|
27 |
+
#include "arrow/ipc/message.h"
|
28 |
+
#include "arrow/ipc/options.h"
|
29 |
+
#include "arrow/result.h"
|
30 |
+
#include "arrow/util/macros.h"
|
31 |
+
#include "arrow/util/visibility.h"
|
32 |
+
|
33 |
+
namespace arrow {
|
34 |
+
|
35 |
+
class Array;
|
36 |
+
class Buffer;
|
37 |
+
class MemoryManager;
|
38 |
+
class MemoryPool;
|
39 |
+
class RecordBatch;
|
40 |
+
class Schema;
|
41 |
+
class Status;
|
42 |
+
class Table;
|
43 |
+
class Tensor;
|
44 |
+
class SparseTensor;
|
45 |
+
|
46 |
+
namespace io {
|
47 |
+
|
48 |
+
class OutputStream;
|
49 |
+
|
50 |
+
} // namespace io
|
51 |
+
|
52 |
+
namespace ipc {
|
53 |
+
|
54 |
+
/// \brief Intermediate data structure with metadata header, and zero
|
55 |
+
/// or more buffers for the message body.
|
56 |
+
struct IpcPayload {
|
57 |
+
MessageType type = MessageType::NONE;
|
58 |
+
std::shared_ptr<Buffer> metadata;
|
59 |
+
std::vector<std::shared_ptr<Buffer>> body_buffers;
|
60 |
+
std::vector<int64_t> variadic_buffer_counts;
|
61 |
+
int64_t body_length = 0; // serialized body length (padded, maybe compressed)
|
62 |
+
int64_t raw_body_length = 0; // initial uncompressed body length
|
63 |
+
};
|
64 |
+
|
65 |
+
struct WriteStats {
|
66 |
+
/// Number of IPC messages written.
|
67 |
+
int64_t num_messages = 0;
|
68 |
+
/// Number of record batches written.
|
69 |
+
int64_t num_record_batches = 0;
|
70 |
+
/// Number of dictionary batches written.
|
71 |
+
///
|
72 |
+
/// Note: num_dictionary_batches >= num_dictionary_deltas + num_replaced_dictionaries
|
73 |
+
int64_t num_dictionary_batches = 0;
|
74 |
+
|
75 |
+
/// Number of dictionary deltas written.
|
76 |
+
int64_t num_dictionary_deltas = 0;
|
77 |
+
/// Number of replaced dictionaries (i.e. where a dictionary batch replaces
|
78 |
+
/// an existing dictionary with an unrelated new dictionary).
|
79 |
+
int64_t num_replaced_dictionaries = 0;
|
80 |
+
|
81 |
+
/// Total size in bytes of record batches emitted.
|
82 |
+
/// The "raw" size counts the original buffer sizes, while the "serialized" size
|
83 |
+
/// includes padding and (optionally) compression.
|
84 |
+
int64_t total_raw_body_size = 0;
|
85 |
+
int64_t total_serialized_body_size = 0;
|
86 |
+
};
|
87 |
+
|
88 |
+
/// \class RecordBatchWriter
|
89 |
+
/// \brief Abstract interface for writing a stream of record batches
|
90 |
+
class ARROW_EXPORT RecordBatchWriter {
|
91 |
+
public:
|
92 |
+
virtual ~RecordBatchWriter();
|
93 |
+
|
94 |
+
/// \brief Write a record batch to the stream
|
95 |
+
///
|
96 |
+
/// \param[in] batch the record batch to write to the stream
|
97 |
+
/// \return Status
|
98 |
+
virtual Status WriteRecordBatch(const RecordBatch& batch) = 0;
|
99 |
+
|
100 |
+
/// \brief Write a record batch with custom metadata to the stream
|
101 |
+
///
|
102 |
+
/// \param[in] batch the record batch to write to the stream
|
103 |
+
/// \param[in] custom_metadata the record batch's custom metadata to write to the stream
|
104 |
+
/// \return Status
|
105 |
+
virtual Status WriteRecordBatch(
|
106 |
+
const RecordBatch& batch,
|
107 |
+
const std::shared_ptr<const KeyValueMetadata>& custom_metadata);
|
108 |
+
|
109 |
+
/// \brief Write possibly-chunked table by creating sequence of record batches
|
110 |
+
/// \param[in] table table to write
|
111 |
+
/// \return Status
|
112 |
+
Status WriteTable(const Table& table);
|
113 |
+
|
114 |
+
/// \brief Write Table with a particular chunksize
|
115 |
+
/// \param[in] table table to write
|
116 |
+
/// \param[in] max_chunksize maximum length of table chunks. To indicate
|
117 |
+
/// that no maximum should be enforced, pass -1.
|
118 |
+
/// \return Status
|
119 |
+
virtual Status WriteTable(const Table& table, int64_t max_chunksize);
|
120 |
+
|
121 |
+
/// \brief Perform any logic necessary to finish the stream
|
122 |
+
///
|
123 |
+
/// \return Status
|
124 |
+
virtual Status Close() = 0;
|
125 |
+
|
126 |
+
/// \brief Return current write statistics
|
127 |
+
virtual WriteStats stats() const = 0;
|
128 |
+
};
|
129 |
+
|
130 |
+
/// \defgroup record-batch-writer-factories Functions for creating RecordBatchWriter
|
131 |
+
/// instances
|
132 |
+
///
|
133 |
+
/// @{
|
134 |
+
|
135 |
+
/// Create a new IPC stream writer from stream sink and schema. User is
|
136 |
+
/// responsible for closing the actual OutputStream.
|
137 |
+
///
|
138 |
+
/// \param[in] sink output stream to write to
|
139 |
+
/// \param[in] schema the schema of the record batches to be written
|
140 |
+
/// \param[in] options options for serialization
|
141 |
+
/// \return Result<std::shared_ptr<RecordBatchWriter>>
|
142 |
+
ARROW_EXPORT
|
143 |
+
Result<std::shared_ptr<RecordBatchWriter>> MakeStreamWriter(
|
144 |
+
io::OutputStream* sink, const std::shared_ptr<Schema>& schema,
|
145 |
+
const IpcWriteOptions& options = IpcWriteOptions::Defaults());
|
146 |
+
|
147 |
+
/// Create a new IPC stream writer from stream sink and schema. User is
|
148 |
+
/// responsible for closing the actual OutputStream.
|
149 |
+
///
|
150 |
+
/// \param[in] sink output stream to write to
|
151 |
+
/// \param[in] schema the schema of the record batches to be written
|
152 |
+
/// \param[in] options options for serialization
|
153 |
+
/// \return Result<std::shared_ptr<RecordBatchWriter>>
|
154 |
+
ARROW_EXPORT
|
155 |
+
Result<std::shared_ptr<RecordBatchWriter>> MakeStreamWriter(
|
156 |
+
std::shared_ptr<io::OutputStream> sink, const std::shared_ptr<Schema>& schema,
|
157 |
+
const IpcWriteOptions& options = IpcWriteOptions::Defaults());
|
158 |
+
|
159 |
+
/// Create a new IPC file writer from stream sink and schema
|
160 |
+
///
|
161 |
+
/// \param[in] sink output stream to write to
|
162 |
+
/// \param[in] schema the schema of the record batches to be written
|
163 |
+
/// \param[in] options options for serialization, optional
|
164 |
+
/// \param[in] metadata custom metadata for File Footer, optional
|
165 |
+
/// \return Result<std::shared_ptr<RecordBatchWriter>>
|
166 |
+
ARROW_EXPORT
|
167 |
+
Result<std::shared_ptr<RecordBatchWriter>> MakeFileWriter(
|
168 |
+
io::OutputStream* sink, const std::shared_ptr<Schema>& schema,
|
169 |
+
const IpcWriteOptions& options = IpcWriteOptions::Defaults(),
|
170 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata = NULLPTR);
|
171 |
+
|
172 |
+
/// Create a new IPC file writer from stream sink and schema
|
173 |
+
///
|
174 |
+
/// \param[in] sink output stream to write to
|
175 |
+
/// \param[in] schema the schema of the record batches to be written
|
176 |
+
/// \param[in] options options for serialization, optional
|
177 |
+
/// \param[in] metadata custom metadata for File Footer, optional
|
178 |
+
/// \return Result<std::shared_ptr<RecordBatchWriter>>
|
179 |
+
ARROW_EXPORT
|
180 |
+
Result<std::shared_ptr<RecordBatchWriter>> MakeFileWriter(
|
181 |
+
std::shared_ptr<io::OutputStream> sink, const std::shared_ptr<Schema>& schema,
|
182 |
+
const IpcWriteOptions& options = IpcWriteOptions::Defaults(),
|
183 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata = NULLPTR);
|
184 |
+
|
185 |
+
/// @}
|
186 |
+
|
187 |
+
/// \brief Low-level API for writing a record batch (without schema)
|
188 |
+
/// to an OutputStream as encapsulated IPC message. See Arrow format
|
189 |
+
/// documentation for more detail.
|
190 |
+
///
|
191 |
+
/// \param[in] batch the record batch to write
|
192 |
+
/// \param[in] buffer_start_offset the start offset to use in the buffer metadata,
|
193 |
+
/// generally should be 0
|
194 |
+
/// \param[in] dst an OutputStream
|
195 |
+
/// \param[out] metadata_length the size of the length-prefixed flatbuffer
|
196 |
+
/// including padding to a 64-byte boundary
|
197 |
+
/// \param[out] body_length the size of the contiguous buffer block plus
|
198 |
+
/// \param[in] options options for serialization
|
199 |
+
/// \return Status
|
200 |
+
ARROW_EXPORT
|
201 |
+
Status WriteRecordBatch(const RecordBatch& batch, int64_t buffer_start_offset,
|
202 |
+
io::OutputStream* dst, int32_t* metadata_length,
|
203 |
+
int64_t* body_length, const IpcWriteOptions& options);
|
204 |
+
|
205 |
+
/// \brief Serialize record batch as encapsulated IPC message in a new buffer
|
206 |
+
///
|
207 |
+
/// \param[in] batch the record batch
|
208 |
+
/// \param[in] options the IpcWriteOptions to use for serialization
|
209 |
+
/// \return the serialized message
|
210 |
+
ARROW_EXPORT
|
211 |
+
Result<std::shared_ptr<Buffer>> SerializeRecordBatch(const RecordBatch& batch,
|
212 |
+
const IpcWriteOptions& options);
|
213 |
+
|
214 |
+
/// \brief Serialize record batch as encapsulated IPC message in a new buffer
|
215 |
+
///
|
216 |
+
/// \param[in] batch the record batch
|
217 |
+
/// \param[in] mm a MemoryManager to allocate memory from
|
218 |
+
/// \return the serialized message
|
219 |
+
ARROW_EXPORT
|
220 |
+
Result<std::shared_ptr<Buffer>> SerializeRecordBatch(const RecordBatch& batch,
|
221 |
+
std::shared_ptr<MemoryManager> mm);
|
222 |
+
|
223 |
+
/// \brief Write record batch to OutputStream
|
224 |
+
///
|
225 |
+
/// \param[in] batch the record batch to write
|
226 |
+
/// \param[in] options the IpcWriteOptions to use for serialization
|
227 |
+
/// \param[in] out the OutputStream to write the output to
|
228 |
+
/// \return Status
|
229 |
+
///
|
230 |
+
/// If writing to pre-allocated memory, you can use
|
231 |
+
/// arrow::ipc::GetRecordBatchSize to compute how much space is required
|
232 |
+
ARROW_EXPORT
|
233 |
+
Status SerializeRecordBatch(const RecordBatch& batch, const IpcWriteOptions& options,
|
234 |
+
io::OutputStream* out);
|
235 |
+
|
236 |
+
/// \brief Serialize schema as encapsulated IPC message
|
237 |
+
///
|
238 |
+
/// \param[in] schema the schema to write
|
239 |
+
/// \param[in] pool a MemoryPool to allocate memory from
|
240 |
+
/// \return the serialized schema
|
241 |
+
ARROW_EXPORT
|
242 |
+
Result<std::shared_ptr<Buffer>> SerializeSchema(const Schema& schema,
|
243 |
+
MemoryPool* pool = default_memory_pool());
|
244 |
+
|
245 |
+
/// \brief Write multiple record batches to OutputStream, including schema
|
246 |
+
/// \param[in] batches a vector of batches. Must all have same schema
|
247 |
+
/// \param[in] options options for serialization
|
248 |
+
/// \param[out] dst an OutputStream
|
249 |
+
/// \return Status
|
250 |
+
ARROW_EXPORT
|
251 |
+
Status WriteRecordBatchStream(const std::vector<std::shared_ptr<RecordBatch>>& batches,
|
252 |
+
const IpcWriteOptions& options, io::OutputStream* dst);
|
253 |
+
|
254 |
+
/// \brief Compute the number of bytes needed to write an IPC payload
|
255 |
+
/// including metadata
|
256 |
+
///
|
257 |
+
/// \param[in] payload the IPC payload to write
|
258 |
+
/// \param[in] options write options
|
259 |
+
/// \return the size of the complete encapsulated message
|
260 |
+
ARROW_EXPORT
|
261 |
+
int64_t GetPayloadSize(const IpcPayload& payload,
|
262 |
+
const IpcWriteOptions& options = IpcWriteOptions::Defaults());
|
263 |
+
|
264 |
+
/// \brief Compute the number of bytes needed to write a record batch including metadata
|
265 |
+
///
|
266 |
+
/// \param[in] batch the record batch to write
|
267 |
+
/// \param[out] size the size of the complete encapsulated message
|
268 |
+
/// \return Status
|
269 |
+
ARROW_EXPORT
|
270 |
+
Status GetRecordBatchSize(const RecordBatch& batch, int64_t* size);
|
271 |
+
|
272 |
+
/// \brief Compute the number of bytes needed to write a record batch including metadata
|
273 |
+
///
|
274 |
+
/// \param[in] batch the record batch to write
|
275 |
+
/// \param[in] options options for serialization
|
276 |
+
/// \param[out] size the size of the complete encapsulated message
|
277 |
+
/// \return Status
|
278 |
+
ARROW_EXPORT
|
279 |
+
Status GetRecordBatchSize(const RecordBatch& batch, const IpcWriteOptions& options,
|
280 |
+
int64_t* size);
|
281 |
+
|
282 |
+
/// \brief Compute the number of bytes needed to write a tensor including metadata
|
283 |
+
///
|
284 |
+
/// \param[in] tensor the tensor to write
|
285 |
+
/// \param[out] size the size of the complete encapsulated message
|
286 |
+
/// \return Status
|
287 |
+
ARROW_EXPORT
|
288 |
+
Status GetTensorSize(const Tensor& tensor, int64_t* size);
|
289 |
+
|
290 |
+
/// \brief EXPERIMENTAL: Convert arrow::Tensor to a Message with minimal memory
|
291 |
+
/// allocation
|
292 |
+
///
|
293 |
+
/// \param[in] tensor the Tensor to write
|
294 |
+
/// \param[in] pool MemoryPool to allocate space for metadata
|
295 |
+
/// \return the resulting Message
|
296 |
+
ARROW_EXPORT
|
297 |
+
Result<std::unique_ptr<Message>> GetTensorMessage(const Tensor& tensor, MemoryPool* pool);
|
298 |
+
|
299 |
+
/// \brief Write arrow::Tensor as a contiguous message.
|
300 |
+
///
|
301 |
+
/// The metadata and body are written assuming 64-byte alignment. It is the
|
302 |
+
/// user's responsibility to ensure that the OutputStream has been aligned
|
303 |
+
/// to a 64-byte multiple before writing the message.
|
304 |
+
///
|
305 |
+
/// The message is written out as followed:
|
306 |
+
/// \code
|
307 |
+
/// <metadata size> <metadata> <tensor data>
|
308 |
+
/// \endcode
|
309 |
+
///
|
310 |
+
/// \param[in] tensor the Tensor to write
|
311 |
+
/// \param[in] dst the OutputStream to write to
|
312 |
+
/// \param[out] metadata_length the actual metadata length, including padding
|
313 |
+
/// \param[out] body_length the actual message body length
|
314 |
+
/// \return Status
|
315 |
+
ARROW_EXPORT
|
316 |
+
Status WriteTensor(const Tensor& tensor, io::OutputStream* dst, int32_t* metadata_length,
|
317 |
+
int64_t* body_length);
|
318 |
+
|
319 |
+
/// \brief EXPERIMENTAL: Convert arrow::SparseTensor to a Message with minimal memory
|
320 |
+
/// allocation
|
321 |
+
///
|
322 |
+
/// The message is written out as followed:
|
323 |
+
/// \code
|
324 |
+
/// <metadata size> <metadata> <sparse index> <sparse tensor body>
|
325 |
+
/// \endcode
|
326 |
+
///
|
327 |
+
/// \param[in] sparse_tensor the SparseTensor to write
|
328 |
+
/// \param[in] pool MemoryPool to allocate space for metadata
|
329 |
+
/// \return the resulting Message
|
330 |
+
ARROW_EXPORT
|
331 |
+
Result<std::unique_ptr<Message>> GetSparseTensorMessage(const SparseTensor& sparse_tensor,
|
332 |
+
MemoryPool* pool);
|
333 |
+
|
334 |
+
/// \brief EXPERIMENTAL: Write arrow::SparseTensor as a contiguous message. The metadata,
|
335 |
+
/// sparse index, and body are written assuming 64-byte alignment. It is the
|
336 |
+
/// user's responsibility to ensure that the OutputStream has been aligned
|
337 |
+
/// to a 64-byte multiple before writing the message.
|
338 |
+
///
|
339 |
+
/// \param[in] sparse_tensor the SparseTensor to write
|
340 |
+
/// \param[in] dst the OutputStream to write to
|
341 |
+
/// \param[out] metadata_length the actual metadata length, including padding
|
342 |
+
/// \param[out] body_length the actual message body length
|
343 |
+
/// \return Status
|
344 |
+
ARROW_EXPORT
|
345 |
+
Status WriteSparseTensor(const SparseTensor& sparse_tensor, io::OutputStream* dst,
|
346 |
+
int32_t* metadata_length, int64_t* body_length);
|
347 |
+
|
348 |
+
/// \brief Compute IpcPayload for the given schema
|
349 |
+
/// \param[in] schema the Schema that is being serialized
|
350 |
+
/// \param[in] options options for serialization
|
351 |
+
/// \param[in] mapper object mapping dictionary fields to dictionary ids
|
352 |
+
/// \param[out] out the returned vector of IpcPayloads
|
353 |
+
/// \return Status
|
354 |
+
ARROW_EXPORT
|
355 |
+
Status GetSchemaPayload(const Schema& schema, const IpcWriteOptions& options,
|
356 |
+
const DictionaryFieldMapper& mapper, IpcPayload* out);
|
357 |
+
|
358 |
+
/// \brief Compute IpcPayload for a dictionary
|
359 |
+
/// \param[in] id the dictionary id
|
360 |
+
/// \param[in] dictionary the dictionary values
|
361 |
+
/// \param[in] options options for serialization
|
362 |
+
/// \param[out] payload the output IpcPayload
|
363 |
+
/// \return Status
|
364 |
+
ARROW_EXPORT
|
365 |
+
Status GetDictionaryPayload(int64_t id, const std::shared_ptr<Array>& dictionary,
|
366 |
+
const IpcWriteOptions& options, IpcPayload* payload);
|
367 |
+
|
368 |
+
/// \brief Compute IpcPayload for a dictionary
|
369 |
+
/// \param[in] id the dictionary id
|
370 |
+
/// \param[in] is_delta whether the dictionary is a delta dictionary
|
371 |
+
/// \param[in] dictionary the dictionary values
|
372 |
+
/// \param[in] options options for serialization
|
373 |
+
/// \param[out] payload the output IpcPayload
|
374 |
+
/// \return Status
|
375 |
+
ARROW_EXPORT
|
376 |
+
Status GetDictionaryPayload(int64_t id, bool is_delta,
|
377 |
+
const std::shared_ptr<Array>& dictionary,
|
378 |
+
const IpcWriteOptions& options, IpcPayload* payload);
|
379 |
+
|
380 |
+
/// \brief Compute IpcPayload for the given record batch
|
381 |
+
/// \param[in] batch the RecordBatch that is being serialized
|
382 |
+
/// \param[in] options options for serialization
|
383 |
+
/// \param[out] out the returned IpcPayload
|
384 |
+
/// \return Status
|
385 |
+
ARROW_EXPORT
|
386 |
+
Status GetRecordBatchPayload(const RecordBatch& batch, const IpcWriteOptions& options,
|
387 |
+
IpcPayload* out);
|
388 |
+
|
389 |
+
/// \brief Compute IpcPayload for the given record batch and custom metadata
|
390 |
+
/// \param[in] batch the RecordBatch that is being serialized
|
391 |
+
/// \param[in] custom_metadata the custom metadata to be serialized with the record batch
|
392 |
+
/// \param[in] options options for serialization
|
393 |
+
/// \param[out] out the returned IpcPayload
|
394 |
+
/// \return Status
|
395 |
+
ARROW_EXPORT
|
396 |
+
Status GetRecordBatchPayload(
|
397 |
+
const RecordBatch& batch,
|
398 |
+
const std::shared_ptr<const KeyValueMetadata>& custom_metadata,
|
399 |
+
const IpcWriteOptions& options, IpcPayload* out);
|
400 |
+
|
401 |
+
/// \brief Write an IPC payload to the given stream.
|
402 |
+
/// \param[in] payload the payload to write
|
403 |
+
/// \param[in] options options for serialization
|
404 |
+
/// \param[in] dst The stream to write the payload to.
|
405 |
+
/// \param[out] metadata_length the length of the serialized metadata
|
406 |
+
/// \return Status
|
407 |
+
ARROW_EXPORT
|
408 |
+
Status WriteIpcPayload(const IpcPayload& payload, const IpcWriteOptions& options,
|
409 |
+
io::OutputStream* dst, int32_t* metadata_length);
|
410 |
+
|
411 |
+
/// \brief Compute IpcPayload for the given sparse tensor
|
412 |
+
/// \param[in] sparse_tensor the SparseTensor that is being serialized
|
413 |
+
/// \param[in,out] pool for any required temporary memory allocations
|
414 |
+
/// \param[out] out the returned IpcPayload
|
415 |
+
/// \return Status
|
416 |
+
ARROW_EXPORT
|
417 |
+
Status GetSparseTensorPayload(const SparseTensor& sparse_tensor, MemoryPool* pool,
|
418 |
+
IpcPayload* out);
|
419 |
+
|
420 |
+
namespace internal {
|
421 |
+
|
422 |
+
// These internal APIs may change without warning or deprecation
|
423 |
+
|
424 |
+
class ARROW_EXPORT IpcPayloadWriter {
|
425 |
+
public:
|
426 |
+
virtual ~IpcPayloadWriter();
|
427 |
+
|
428 |
+
// Default implementation is a no-op
|
429 |
+
virtual Status Start();
|
430 |
+
|
431 |
+
virtual Status WritePayload(const IpcPayload& payload) = 0;
|
432 |
+
|
433 |
+
virtual Status Close() = 0;
|
434 |
+
};
|
435 |
+
|
436 |
+
/// Create a new IPC payload stream writer from stream sink. User is
|
437 |
+
/// responsible for closing the actual OutputStream.
|
438 |
+
///
|
439 |
+
/// \param[in] sink output stream to write to
|
440 |
+
/// \param[in] options options for serialization
|
441 |
+
/// \return Result<std::shared_ptr<IpcPayloadWriter>>
|
442 |
+
ARROW_EXPORT
|
443 |
+
Result<std::unique_ptr<IpcPayloadWriter>> MakePayloadStreamWriter(
|
444 |
+
io::OutputStream* sink, const IpcWriteOptions& options = IpcWriteOptions::Defaults());
|
445 |
+
|
446 |
+
/// Create a new IPC payload file writer from stream sink.
|
447 |
+
///
|
448 |
+
/// \param[in] sink output stream to write to
|
449 |
+
/// \param[in] schema the schema of the record batches to be written
|
450 |
+
/// \param[in] options options for serialization, optional
|
451 |
+
/// \param[in] metadata custom metadata for File Footer, optional
|
452 |
+
/// \return Status
|
453 |
+
ARROW_EXPORT
|
454 |
+
Result<std::unique_ptr<IpcPayloadWriter>> MakePayloadFileWriter(
|
455 |
+
io::OutputStream* sink, const std::shared_ptr<Schema>& schema,
|
456 |
+
const IpcWriteOptions& options = IpcWriteOptions::Defaults(),
|
457 |
+
const std::shared_ptr<const KeyValueMetadata>& metadata = NULLPTR);
|
458 |
+
|
459 |
+
/// Create a new RecordBatchWriter from IpcPayloadWriter and schema.
|
460 |
+
///
|
461 |
+
/// The format is implicitly the IPC stream format (allowing dictionary
|
462 |
+
/// replacement and deltas).
|
463 |
+
///
|
464 |
+
/// \param[in] sink the IpcPayloadWriter to write to
|
465 |
+
/// \param[in] schema the schema of the record batches to be written
|
466 |
+
/// \param[in] options options for serialization
|
467 |
+
/// \return Result<std::unique_ptr<RecordBatchWriter>>
|
468 |
+
ARROW_EXPORT
|
469 |
+
Result<std::unique_ptr<RecordBatchWriter>> OpenRecordBatchWriter(
|
470 |
+
std::unique_ptr<IpcPayloadWriter> sink, const std::shared_ptr<Schema>& schema,
|
471 |
+
const IpcWriteOptions& options = IpcWriteOptions::Defaults());
|
472 |
+
|
473 |
+
} // namespace internal
|
474 |
+
} // namespace ipc
|
475 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/api.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include "arrow/json/options.h"
|
21 |
+
#include "arrow/json/reader.h"
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/chunked_builder.h
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
#include <vector>
|
22 |
+
|
23 |
+
#include "arrow/status.h"
|
24 |
+
#include "arrow/type_fwd.h"
|
25 |
+
#include "arrow/util/type_fwd.h"
|
26 |
+
#include "arrow/util/visibility.h"
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
namespace json {
|
30 |
+
|
31 |
+
class PromotionGraph;
|
32 |
+
|
33 |
+
class ARROW_EXPORT ChunkedArrayBuilder {
|
34 |
+
public:
|
35 |
+
virtual ~ChunkedArrayBuilder() = default;
|
36 |
+
|
37 |
+
/// Spawn a task that will try to convert and insert the given JSON block
|
38 |
+
virtual void Insert(int64_t block_index,
|
39 |
+
const std::shared_ptr<Field>& unconverted_field,
|
40 |
+
const std::shared_ptr<Array>& unconverted) = 0;
|
41 |
+
|
42 |
+
/// Return the final chunked array.
|
43 |
+
/// Every chunk must be inserted before this is called!
|
44 |
+
virtual Status Finish(std::shared_ptr<ChunkedArray>* out) = 0;
|
45 |
+
|
46 |
+
/// Finish current task group and substitute a new one
|
47 |
+
virtual Status ReplaceTaskGroup(
|
48 |
+
const std::shared_ptr<arrow::internal::TaskGroup>& task_group) = 0;
|
49 |
+
|
50 |
+
protected:
|
51 |
+
explicit ChunkedArrayBuilder(
|
52 |
+
const std::shared_ptr<arrow::internal::TaskGroup>& task_group)
|
53 |
+
: task_group_(task_group) {}
|
54 |
+
|
55 |
+
std::shared_ptr<arrow::internal::TaskGroup> task_group_;
|
56 |
+
};
|
57 |
+
|
58 |
+
/// create a chunked builder
|
59 |
+
///
|
60 |
+
/// if unexpected fields and promotion need to be handled, promotion_graph must be
|
61 |
+
/// non-null
|
62 |
+
ARROW_EXPORT Status MakeChunkedArrayBuilder(
|
63 |
+
const std::shared_ptr<arrow::internal::TaskGroup>& task_group, MemoryPool* pool,
|
64 |
+
const PromotionGraph* promotion_graph, const std::shared_ptr<DataType>& type,
|
65 |
+
std::shared_ptr<ChunkedArrayBuilder>* out);
|
66 |
+
|
67 |
+
} // namespace json
|
68 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/chunker.h
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
|
22 |
+
#include "arrow/util/delimiting.h"
|
23 |
+
#include "arrow/util/macros.h"
|
24 |
+
#include "arrow/util/visibility.h"
|
25 |
+
|
26 |
+
namespace arrow {
|
27 |
+
namespace json {
|
28 |
+
|
29 |
+
struct ParseOptions;
|
30 |
+
|
31 |
+
ARROW_EXPORT
|
32 |
+
std::unique_ptr<Chunker> MakeChunker(const ParseOptions& options);
|
33 |
+
|
34 |
+
} // namespace json
|
35 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/converter.h
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
#include <string>
|
22 |
+
|
23 |
+
#include "arrow/status.h"
|
24 |
+
#include "arrow/util/macros.h"
|
25 |
+
#include "arrow/util/visibility.h"
|
26 |
+
|
27 |
+
namespace arrow {
|
28 |
+
|
29 |
+
class Array;
|
30 |
+
class DataType;
|
31 |
+
class Field;
|
32 |
+
class MemoryPool;
|
33 |
+
|
34 |
+
namespace json {
|
35 |
+
|
36 |
+
/// \brief interface for conversion of Arrays
|
37 |
+
///
|
38 |
+
/// Converters are not required to be correct for arbitrary input- only
|
39 |
+
/// for unconverted arrays emitted by a corresponding parser.
|
40 |
+
class ARROW_EXPORT Converter {
|
41 |
+
public:
|
42 |
+
virtual ~Converter() = default;
|
43 |
+
|
44 |
+
/// convert an array
|
45 |
+
/// on failure, this converter may be promoted to another converter which
|
46 |
+
/// *can* convert the given input.
|
47 |
+
virtual Status Convert(const std::shared_ptr<Array>& in,
|
48 |
+
std::shared_ptr<Array>* out) = 0;
|
49 |
+
|
50 |
+
std::shared_ptr<DataType> out_type() const { return out_type_; }
|
51 |
+
|
52 |
+
MemoryPool* pool() { return pool_; }
|
53 |
+
|
54 |
+
protected:
|
55 |
+
ARROW_DISALLOW_COPY_AND_ASSIGN(Converter);
|
56 |
+
|
57 |
+
Converter(MemoryPool* pool, const std::shared_ptr<DataType>& out_type)
|
58 |
+
: pool_(pool), out_type_(out_type) {}
|
59 |
+
|
60 |
+
MemoryPool* pool_;
|
61 |
+
std::shared_ptr<DataType> out_type_;
|
62 |
+
};
|
63 |
+
|
64 |
+
/// \brief produce a single converter to the specified out_type
|
65 |
+
ARROW_EXPORT Status MakeConverter(const std::shared_ptr<DataType>& out_type,
|
66 |
+
MemoryPool* pool, std::shared_ptr<Converter>* out);
|
67 |
+
|
68 |
+
class ARROW_EXPORT PromotionGraph {
|
69 |
+
public:
|
70 |
+
virtual ~PromotionGraph() = default;
|
71 |
+
|
72 |
+
/// \brief produce a valid field which will be inferred as null
|
73 |
+
virtual std::shared_ptr<Field> Null(const std::string& name) const = 0;
|
74 |
+
|
75 |
+
/// \brief given an unexpected field encountered during parsing, return a type to which
|
76 |
+
/// it may be convertible (may return null if none is available)
|
77 |
+
virtual std::shared_ptr<DataType> Infer(
|
78 |
+
const std::shared_ptr<Field>& unexpected_field) const = 0;
|
79 |
+
|
80 |
+
/// \brief given a type to which conversion failed, return a promoted type to which
|
81 |
+
/// conversion may succeed (may return null if none is available)
|
82 |
+
virtual std::shared_ptr<DataType> Promote(
|
83 |
+
const std::shared_ptr<DataType>& failed,
|
84 |
+
const std::shared_ptr<Field>& unexpected_field) const = 0;
|
85 |
+
|
86 |
+
protected:
|
87 |
+
ARROW_DISALLOW_COPY_AND_ASSIGN(PromotionGraph);
|
88 |
+
PromotionGraph() = default;
|
89 |
+
};
|
90 |
+
|
91 |
+
ARROW_EXPORT const PromotionGraph* GetPromotionGraph();
|
92 |
+
|
93 |
+
} // namespace json
|
94 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/object_writer.h
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
#include <string_view>
|
22 |
+
|
23 |
+
#include "arrow/util/visibility.h"
|
24 |
+
|
25 |
+
namespace arrow {
|
26 |
+
namespace json {
|
27 |
+
namespace internal {
|
28 |
+
|
29 |
+
/// This class is a helper to serialize a json object to a string.
|
30 |
+
/// It uses rapidjson in implementation.
|
31 |
+
class ARROW_EXPORT ObjectWriter {
|
32 |
+
public:
|
33 |
+
ObjectWriter();
|
34 |
+
~ObjectWriter();
|
35 |
+
|
36 |
+
void SetString(std::string_view key, std::string_view value);
|
37 |
+
void SetBool(std::string_view key, bool value);
|
38 |
+
|
39 |
+
std::string Serialize();
|
40 |
+
|
41 |
+
private:
|
42 |
+
class Impl;
|
43 |
+
std::unique_ptr<Impl> impl_;
|
44 |
+
};
|
45 |
+
|
46 |
+
} // namespace internal
|
47 |
+
} // namespace json
|
48 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/options.h
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <memory>
|
22 |
+
|
23 |
+
#include "arrow/json/type_fwd.h"
|
24 |
+
#include "arrow/util/visibility.h"
|
25 |
+
|
26 |
+
namespace arrow {
|
27 |
+
|
28 |
+
class DataType;
|
29 |
+
class Schema;
|
30 |
+
|
31 |
+
namespace json {
|
32 |
+
|
33 |
+
enum class UnexpectedFieldBehavior : char {
|
34 |
+
/// Unexpected JSON fields are ignored
|
35 |
+
Ignore,
|
36 |
+
/// Unexpected JSON fields error out
|
37 |
+
Error,
|
38 |
+
/// Unexpected JSON fields are type-inferred and included in the output
|
39 |
+
InferType
|
40 |
+
};
|
41 |
+
|
42 |
+
struct ARROW_EXPORT ParseOptions {
|
43 |
+
// Parsing options
|
44 |
+
|
45 |
+
/// Optional explicit schema (disables type inference on those fields)
|
46 |
+
std::shared_ptr<Schema> explicit_schema;
|
47 |
+
|
48 |
+
/// Whether objects may be printed across multiple lines (for example pretty-printed)
|
49 |
+
///
|
50 |
+
/// If true, parsing may be slower.
|
51 |
+
bool newlines_in_values = false;
|
52 |
+
|
53 |
+
/// How JSON fields outside of explicit_schema (if given) are treated
|
54 |
+
UnexpectedFieldBehavior unexpected_field_behavior = UnexpectedFieldBehavior::InferType;
|
55 |
+
|
56 |
+
/// Create parsing options with default values
|
57 |
+
static ParseOptions Defaults();
|
58 |
+
};
|
59 |
+
|
60 |
+
struct ARROW_EXPORT ReadOptions {
|
61 |
+
// Reader options
|
62 |
+
|
63 |
+
/// Whether to use the global CPU thread pool
|
64 |
+
bool use_threads = true;
|
65 |
+
/// Block size we request from the IO layer; also determines the size of
|
66 |
+
/// chunks when use_threads is true
|
67 |
+
int32_t block_size = 1 << 20; // 1 MB
|
68 |
+
|
69 |
+
/// Create read options with default values
|
70 |
+
static ReadOptions Defaults();
|
71 |
+
};
|
72 |
+
|
73 |
+
} // namespace json
|
74 |
+
} // namespace arrow
|
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/json/parser.h
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
#include <string>
|
22 |
+
|
23 |
+
#include "arrow/json/options.h"
|
24 |
+
#include "arrow/status.h"
|
25 |
+
#include "arrow/util/key_value_metadata.h"
|
26 |
+
#include "arrow/util/macros.h"
|
27 |
+
#include "arrow/util/visibility.h"
|
28 |
+
|
29 |
+
namespace arrow {
|
30 |
+
|
31 |
+
class Array;
|
32 |
+
class Buffer;
|
33 |
+
class MemoryPool;
|
34 |
+
class KeyValueMetadata;
|
35 |
+
class ResizableBuffer;
|
36 |
+
|
37 |
+
namespace json {
|
38 |
+
|
39 |
+
struct Kind {
|
40 |
+
enum type : uint8_t {
|
41 |
+
kNull,
|
42 |
+
kBoolean,
|
43 |
+
kNumber,
|
44 |
+
kString,
|
45 |
+
kArray,
|
46 |
+
kObject,
|
47 |
+
kNumberOrString
|
48 |
+
};
|
49 |
+
|
50 |
+
static const std::string& Name(Kind::type);
|
51 |
+
|
52 |
+
static const std::shared_ptr<const KeyValueMetadata>& Tag(Kind::type);
|
53 |
+
|
54 |
+
static Kind::type FromTag(const std::shared_ptr<const KeyValueMetadata>& tag);
|
55 |
+
|
56 |
+
static Status ForType(const DataType& type, Kind::type* kind);
|
57 |
+
};
|
58 |
+
|
59 |
+
/// \class BlockParser
|
60 |
+
/// \brief A reusable block-based parser for JSON data
|
61 |
+
///
|
62 |
+
/// The parser takes a block of newline delimited JSON data and extracts Arrays
|
63 |
+
/// of unconverted strings which can be fed to a Converter to obtain a usable Array.
|
64 |
+
///
|
65 |
+
/// Note that in addition to parse errors (such as malformed JSON) some conversion
|
66 |
+
/// errors are caught at parse time:
|
67 |
+
/// - A null value in non-nullable column
|
68 |
+
/// - Change in the JSON kind of a column. For example, if an explicit schema is provided
|
69 |
+
/// which stipulates that field "a" is integral, a row of {"a": "not a number"} will
|
70 |
+
/// result in an error. This also applies to fields outside an explicit schema.
|
71 |
+
class ARROW_EXPORT BlockParser {
|
72 |
+
public:
|
73 |
+
virtual ~BlockParser() = default;
|
74 |
+
|
75 |
+
/// \brief Reserve storage for scalars parsed from a block of json
|
76 |
+
virtual Status ReserveScalarStorage(int64_t nbytes) = 0;
|
77 |
+
|
78 |
+
/// \brief Parse a block of data
|
79 |
+
virtual Status Parse(const std::shared_ptr<Buffer>& json) = 0;
|
80 |
+
|
81 |
+
/// \brief Extract parsed data
|
82 |
+
virtual Status Finish(std::shared_ptr<Array>* parsed) = 0;
|
83 |
+
|
84 |
+
/// \brief Return the number of parsed rows
|
85 |
+
int32_t num_rows() const { return num_rows_; }
|
86 |
+
|
87 |
+
/// \brief Construct a BlockParser
|
88 |
+
///
|
89 |
+
/// \param[in] pool MemoryPool to use when constructing parsed array
|
90 |
+
/// \param[in] options ParseOptions to use when parsing JSON
|
91 |
+
/// \param[out] out constructed BlockParser
|
92 |
+
static Status Make(MemoryPool* pool, const ParseOptions& options,
|
93 |
+
std::unique_ptr<BlockParser>* out);
|
94 |
+
|
95 |
+
static Status Make(const ParseOptions& options, std::unique_ptr<BlockParser>* out);
|
96 |
+
|
97 |
+
protected:
|
98 |
+
ARROW_DISALLOW_COPY_AND_ASSIGN(BlockParser);
|
99 |
+
|
100 |
+
explicit BlockParser(MemoryPool* pool) : pool_(pool) {}
|
101 |
+
|
102 |
+
MemoryPool* pool_;
|
103 |
+
int32_t num_rows_ = 0;
|
104 |
+
};
|
105 |
+
|
106 |
+
} // namespace json
|
107 |
+
} // namespace arrow
|