Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/_compute.cpython-310-x86_64-linux-gnu.so +3 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset.cpython-310-x86_64-linux-gnu.so +3 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/accumulation_queue.h +160 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/aggregate_node.h +57 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/api.h +32 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/asof_join_node.h +41 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/backpressure_handler.h +74 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/benchmark_util.h +48 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/bloom_filter.h +326 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/exec_plan.h +819 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join.h +75 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_dict.h +318 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_node.h +103 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/map_node.h +81 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/options.h +866 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/order_by_impl.h +56 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/partition_util.h +184 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/pch.h +23 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/query_context.h +157 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/schema_util.h +226 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/task_util.h +102 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/test_nodes.h +86 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/time_series_util.h +31 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/tpch_node.h +65 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/type_fwd.h +36 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/unmaterialized_table.h +271 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/util.h +184 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/visibility.h +50 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/c/abi.h +233 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/c/bridge.h +348 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack.h +51 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack_abi.h +321 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/c/helpers.h +129 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api.h +53 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_aggregate.h +466 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/expression.h +295 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/kernel.h +752 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/row/grouper.h +184 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/util.h +294 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/chunker.h +36 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/converter.h +82 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/invalid_row.h +55 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/options.h +220 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/reader.h +112 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/test_common.h +55 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/writer.h +89 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/api.h +25 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/dictionary.h +177 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/feather.h +150 -0
.gitattributes
CHANGED
@@ -69,3 +69,6 @@ llmeval-env/lib/python3.10/site-packages/scipy/fft/_pocketfft/pypocketfft.cpytho
|
|
69 |
llmeval-env/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
70 |
llmeval-env/lib/python3.10/site-packages/triton/third_party/cuda/bin/ptxas filter=lfs diff=lfs merge=lfs -text
|
71 |
llmeval-env/lib/python3.10/site-packages/triton/third_party/cuda/bin/nvdisasm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
69 |
llmeval-env/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
70 |
llmeval-env/lib/python3.10/site-packages/triton/third_party/cuda/bin/ptxas filter=lfs diff=lfs merge=lfs -text
|
71 |
llmeval-env/lib/python3.10/site-packages/triton/third_party/cuda/bin/nvdisasm filter=lfs diff=lfs merge=lfs -text
|
72 |
+
llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_acero.so.1600 filter=lfs diff=lfs merge=lfs -text
|
73 |
+
llmeval-env/lib/python3.10/site-packages/pyarrow/_compute.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
74 |
+
llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
llmeval-env/lib/python3.10/site-packages/pyarrow/_compute.cpython-310-x86_64-linux-gnu.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b9f843f6319551667349e0285280b415b7e1838274395c5397d12fd5c9f54161
|
3 |
+
size 1331992
|
llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset.cpython-310-x86_64-linux-gnu.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4473d519e5ef3c96f31cd8aa9772525c46a87d70d9a922e6dfdb5726abaa9b77
|
3 |
+
size 1083224
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/accumulation_queue.h
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <functional>
|
22 |
+
#include <optional>
|
23 |
+
#include <vector>
|
24 |
+
|
25 |
+
#include "arrow/compute/exec.h"
|
26 |
+
#include "arrow/result.h"
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
namespace acero {
|
30 |
+
namespace util {
|
31 |
+
|
32 |
+
using arrow::compute::ExecBatch;
|
33 |
+
|
34 |
+
/// \brief A container that accumulates batches until they are ready to
|
35 |
+
/// be processed.
|
36 |
+
class AccumulationQueue {
|
37 |
+
public:
|
38 |
+
AccumulationQueue() : row_count_(0) {}
|
39 |
+
~AccumulationQueue() = default;
|
40 |
+
|
41 |
+
// We should never be copying ExecBatch around
|
42 |
+
AccumulationQueue(const AccumulationQueue&) = delete;
|
43 |
+
AccumulationQueue& operator=(const AccumulationQueue&) = delete;
|
44 |
+
|
45 |
+
AccumulationQueue(AccumulationQueue&& that);
|
46 |
+
AccumulationQueue& operator=(AccumulationQueue&& that);
|
47 |
+
|
48 |
+
void Concatenate(AccumulationQueue&& that);
|
49 |
+
void InsertBatch(ExecBatch batch);
|
50 |
+
int64_t row_count() { return row_count_; }
|
51 |
+
size_t batch_count() { return batches_.size(); }
|
52 |
+
bool empty() const { return batches_.empty(); }
|
53 |
+
void Clear();
|
54 |
+
ExecBatch& operator[](size_t i);
|
55 |
+
|
56 |
+
private:
|
57 |
+
int64_t row_count_;
|
58 |
+
std::vector<ExecBatch> batches_;
|
59 |
+
};
|
60 |
+
|
61 |
+
/// A queue that sequences incoming batches
|
62 |
+
///
|
63 |
+
/// This can be used when a node needs to do some kind of ordered processing on
|
64 |
+
/// the stream.
|
65 |
+
///
|
66 |
+
/// Batches can be inserted in any order. The process_callback will be called on
|
67 |
+
/// the batches, in order, without reentrant calls. For this reason the callback
|
68 |
+
/// should be quick.
|
69 |
+
///
|
70 |
+
/// For example, in a top-n node, the process callback should determine how many
|
71 |
+
/// rows need to be delivered for the given batch, and then return a task to actually
|
72 |
+
/// deliver those rows.
|
73 |
+
class SequencingQueue {
|
74 |
+
public:
|
75 |
+
using Task = std::function<Status()>;
|
76 |
+
|
77 |
+
/// Strategy that describes how to handle items
|
78 |
+
class Processor {
|
79 |
+
public:
|
80 |
+
/// Process the batch, potentially generating a task
|
81 |
+
///
|
82 |
+
/// This method will be called on each batch in order. Calls to this method
|
83 |
+
/// will be serialized and it will not be called reentrantly. This makes it
|
84 |
+
/// safe to do things that rely on order but minimal time should be spent here
|
85 |
+
/// to avoid becoming a bottleneck.
|
86 |
+
///
|
87 |
+
/// \return a follow-up task that will be scheduled. The follow-up task(s) are
|
88 |
+
/// is not guaranteed to run in any particular order. If nullopt is
|
89 |
+
/// returned then nothing will be scheduled.
|
90 |
+
virtual Result<std::optional<Task>> Process(ExecBatch batch) = 0;
|
91 |
+
/// Schedule a task
|
92 |
+
virtual void Schedule(Task task) = 0;
|
93 |
+
};
|
94 |
+
|
95 |
+
virtual ~SequencingQueue() = default;
|
96 |
+
|
97 |
+
/// Insert a batch into the queue
|
98 |
+
///
|
99 |
+
/// This will insert the batch into the queue. If this batch was the next batch
|
100 |
+
/// to deliver then this will trigger 1+ calls to the process callback to generate
|
101 |
+
/// 1+ tasks.
|
102 |
+
///
|
103 |
+
/// The task generated by this call will be executed immediately. The remaining
|
104 |
+
/// tasks will be scheduled using the schedule callback.
|
105 |
+
///
|
106 |
+
/// From a data pipeline perspective the sequencing queue is a "sometimes" breaker. If
|
107 |
+
/// a task arrives in order then this call will usually execute the downstream pipeline.
|
108 |
+
/// If this task arrives early then this call will only queue the data.
|
109 |
+
virtual Status InsertBatch(ExecBatch batch) = 0;
|
110 |
+
|
111 |
+
/// Create a queue
|
112 |
+
/// \param processor describes how to process the batches, must outlive the queue
|
113 |
+
static std::unique_ptr<SequencingQueue> Make(Processor* processor);
|
114 |
+
};
|
115 |
+
|
116 |
+
/// A queue that sequences incoming batches
|
117 |
+
///
|
118 |
+
/// Unlike SequencingQueue the Process method is not expected to schedule new tasks.
|
119 |
+
///
|
120 |
+
/// If a batch arrives and another thread is currently processing then the batch
|
121 |
+
/// will be queued and control will return. In other words, delivery of batches will
|
122 |
+
/// not block on the Process method.
|
123 |
+
///
|
124 |
+
/// It can be helpful to think of this as if a dedicated thread is running Process as
|
125 |
+
/// batches arrive
|
126 |
+
class SerialSequencingQueue {
|
127 |
+
public:
|
128 |
+
/// Strategy that describes how to handle items
|
129 |
+
class Processor {
|
130 |
+
public:
|
131 |
+
/// Process the batch
|
132 |
+
///
|
133 |
+
/// This method will be called on each batch in order. Calls to this method
|
134 |
+
/// will be serialized and it will not be called reentrantly. This makes it
|
135 |
+
/// safe to do things that rely on order.
|
136 |
+
///
|
137 |
+
/// If this falls behind then data may accumulate
|
138 |
+
///
|
139 |
+
/// TODO: Could add backpressure if needed but right now all uses of this should
|
140 |
+
/// be pretty fast and so are unlikely to block.
|
141 |
+
virtual Status Process(ExecBatch batch) = 0;
|
142 |
+
};
|
143 |
+
|
144 |
+
virtual ~SerialSequencingQueue() = default;
|
145 |
+
|
146 |
+
/// Insert a batch into the queue
|
147 |
+
///
|
148 |
+
/// This will insert the batch into the queue. If this batch was the next batch
|
149 |
+
/// to deliver then this may trigger calls to the processor which will be run
|
150 |
+
/// as part of this call.
|
151 |
+
virtual Status InsertBatch(ExecBatch batch) = 0;
|
152 |
+
|
153 |
+
/// Create a queue
|
154 |
+
/// \param processor describes how to process the batches, must outlive the queue
|
155 |
+
static std::unique_ptr<SerialSequencingQueue> Make(Processor* processor);
|
156 |
+
};
|
157 |
+
|
158 |
+
} // namespace util
|
159 |
+
} // namespace acero
|
160 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/aggregate_node.h
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// This API is EXPERIMENTAL.
|
19 |
+
|
20 |
+
#pragma once
|
21 |
+
|
22 |
+
#include <memory>
|
23 |
+
#include <vector>
|
24 |
+
|
25 |
+
#include "arrow/acero/visibility.h"
|
26 |
+
#include "arrow/compute/api_aggregate.h"
|
27 |
+
#include "arrow/compute/type_fwd.h"
|
28 |
+
#include "arrow/result.h"
|
29 |
+
#include "arrow/type_fwd.h"
|
30 |
+
|
31 |
+
namespace arrow {
|
32 |
+
namespace acero {
|
33 |
+
namespace aggregate {
|
34 |
+
|
35 |
+
using compute::Aggregate;
|
36 |
+
using compute::default_exec_context;
|
37 |
+
using compute::ExecContext;
|
38 |
+
|
39 |
+
/// \brief Make the output schema of an aggregate node
|
40 |
+
///
|
41 |
+
/// The output schema is determined by the aggregation kernels, which may depend on the
|
42 |
+
/// ExecContext argument. To guarantee correct results, the same ExecContext argument
|
43 |
+
/// should be used in execution.
|
44 |
+
///
|
45 |
+
/// \param[in] input_schema the schema of the input to the node
|
46 |
+
/// \param[in] keys the grouping keys for the aggregation
|
47 |
+
/// \param[in] segment_keys the segmenting keys for the aggregation
|
48 |
+
/// \param[in] aggregates the aggregates for the aggregation
|
49 |
+
/// \param[in] exec_ctx the execution context for the aggregation
|
50 |
+
ARROW_ACERO_EXPORT Result<std::shared_ptr<Schema>> MakeOutputSchema(
|
51 |
+
const std::shared_ptr<Schema>& input_schema, const std::vector<FieldRef>& keys,
|
52 |
+
const std::vector<FieldRef>& segment_keys, const std::vector<Aggregate>& aggregates,
|
53 |
+
ExecContext* exec_ctx = default_exec_context());
|
54 |
+
|
55 |
+
} // namespace aggregate
|
56 |
+
} // namespace acero
|
57 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/api.h
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// NOTE: API is EXPERIMENTAL and will change without going through a
|
19 |
+
// deprecation cycle
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
/// \defgroup acero-api Utilities for creating and executing execution plans
|
24 |
+
/// @{
|
25 |
+
/// @}
|
26 |
+
|
27 |
+
/// \defgroup acero-nodes Options classes for the various exec nodes
|
28 |
+
/// @{
|
29 |
+
/// @}
|
30 |
+
|
31 |
+
#include "arrow/acero/exec_plan.h"
|
32 |
+
#include "arrow/acero/options.h"
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/asof_join_node.h
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#include <vector>
|
19 |
+
|
20 |
+
#include "arrow/acero/options.h"
|
21 |
+
#include "arrow/acero/visibility.h"
|
22 |
+
#include "arrow/compute/exec.h"
|
23 |
+
#include "arrow/type.h"
|
24 |
+
|
25 |
+
namespace arrow {
|
26 |
+
namespace acero {
|
27 |
+
namespace asofjoin {
|
28 |
+
|
29 |
+
using AsofJoinKeys = AsofJoinNodeOptions::Keys;
|
30 |
+
|
31 |
+
/// \brief Make the output schema of an as-of-join node
|
32 |
+
///
|
33 |
+
/// \param[in] input_schema the schema of each input to the node
|
34 |
+
/// \param[in] input_keys the key of each input to the node
|
35 |
+
ARROW_ACERO_EXPORT Result<std::shared_ptr<Schema>> MakeOutputSchema(
|
36 |
+
const std::vector<std::shared_ptr<Schema>>& input_schema,
|
37 |
+
const std::vector<AsofJoinKeys>& input_keys);
|
38 |
+
|
39 |
+
} // namespace asofjoin
|
40 |
+
} // namespace acero
|
41 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/backpressure_handler.h
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
#include "arrow/acero/exec_plan.h"
|
20 |
+
#include "arrow/acero/options.h"
|
21 |
+
|
22 |
+
#include <memory>
|
23 |
+
|
24 |
+
namespace arrow::acero {
|
25 |
+
|
26 |
+
class BackpressureHandler {
|
27 |
+
private:
|
28 |
+
BackpressureHandler(ExecNode* input, size_t low_threshold, size_t high_threshold,
|
29 |
+
std::unique_ptr<BackpressureControl> backpressure_control)
|
30 |
+
: input_(input),
|
31 |
+
low_threshold_(low_threshold),
|
32 |
+
high_threshold_(high_threshold),
|
33 |
+
backpressure_control_(std::move(backpressure_control)) {}
|
34 |
+
|
35 |
+
public:
|
36 |
+
static Result<BackpressureHandler> Make(
|
37 |
+
ExecNode* input, size_t low_threshold, size_t high_threshold,
|
38 |
+
std::unique_ptr<BackpressureControl> backpressure_control) {
|
39 |
+
if (low_threshold >= high_threshold) {
|
40 |
+
return Status::Invalid("low threshold (", low_threshold,
|
41 |
+
") must be less than high threshold (", high_threshold, ")");
|
42 |
+
}
|
43 |
+
if (backpressure_control == NULLPTR) {
|
44 |
+
return Status::Invalid("null backpressure control parameter");
|
45 |
+
}
|
46 |
+
BackpressureHandler backpressure_handler(input, low_threshold, high_threshold,
|
47 |
+
std::move(backpressure_control));
|
48 |
+
return std::move(backpressure_handler);
|
49 |
+
}
|
50 |
+
|
51 |
+
void Handle(size_t start_level, size_t end_level) {
|
52 |
+
if (start_level < high_threshold_ && end_level >= high_threshold_) {
|
53 |
+
backpressure_control_->Pause();
|
54 |
+
} else if (start_level > low_threshold_ && end_level <= low_threshold_) {
|
55 |
+
backpressure_control_->Resume();
|
56 |
+
}
|
57 |
+
}
|
58 |
+
|
59 |
+
Status ForceShutdown() {
|
60 |
+
// It may be unintuitive to call Resume() here, but this is to avoid a deadlock.
|
61 |
+
// Since acero's executor won't terminate if any one node is paused, we need to
|
62 |
+
// force resume the node before stopping production.
|
63 |
+
backpressure_control_->Resume();
|
64 |
+
return input_->StopProducing();
|
65 |
+
}
|
66 |
+
|
67 |
+
private:
|
68 |
+
ExecNode* input_;
|
69 |
+
size_t low_threshold_;
|
70 |
+
size_t high_threshold_;
|
71 |
+
std::unique_ptr<BackpressureControl> backpressure_control_;
|
72 |
+
};
|
73 |
+
|
74 |
+
} // namespace arrow::acero
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/benchmark_util.h
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <string>
|
22 |
+
#include <vector>
|
23 |
+
|
24 |
+
#include "benchmark/benchmark.h"
|
25 |
+
|
26 |
+
#include "arrow/acero/exec_plan.h"
|
27 |
+
#include "arrow/acero/test_util_internal.h"
|
28 |
+
#include "arrow/compute/exec.h"
|
29 |
+
|
30 |
+
namespace arrow {
|
31 |
+
|
32 |
+
namespace acero {
|
33 |
+
|
34 |
+
Status BenchmarkNodeOverhead(benchmark::State& state, int32_t num_batches,
|
35 |
+
int32_t batch_size, arrow::acero::BatchesWithSchema data,
|
36 |
+
std::vector<arrow::acero::Declaration>& node_declarations,
|
37 |
+
arrow::MemoryPool* pool = default_memory_pool());
|
38 |
+
|
39 |
+
Status BenchmarkIsolatedNodeOverhead(benchmark::State& state,
|
40 |
+
arrow::compute::Expression expr, int32_t num_batches,
|
41 |
+
int32_t batch_size,
|
42 |
+
arrow::acero::BatchesWithSchema data,
|
43 |
+
std::string factory_name,
|
44 |
+
arrow::acero::ExecNodeOptions& options,
|
45 |
+
arrow::MemoryPool* pool = default_memory_pool());
|
46 |
+
|
47 |
+
} // namespace acero
|
48 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/bloom_filter.h
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#if defined(ARROW_HAVE_RUNTIME_AVX2)
|
21 |
+
#include <immintrin.h>
|
22 |
+
#endif
|
23 |
+
|
24 |
+
#include <atomic>
|
25 |
+
#include <cstdint>
|
26 |
+
#include <memory>
|
27 |
+
|
28 |
+
#include "arrow/acero/partition_util.h"
|
29 |
+
#include "arrow/acero/util.h"
|
30 |
+
#include "arrow/memory_pool.h"
|
31 |
+
#include "arrow/result.h"
|
32 |
+
#include "arrow/status.h"
|
33 |
+
|
34 |
+
namespace arrow {
|
35 |
+
namespace acero {
|
36 |
+
|
37 |
+
// A set of pre-generated bit masks from a 64-bit word.
|
38 |
+
//
|
39 |
+
// It is used to map selected bits of hash to a bit mask that will be used in
|
40 |
+
// a Bloom filter.
|
41 |
+
//
|
42 |
+
// These bit masks need to look random and need to have a similar fractions of
|
43 |
+
// bits set in order for a Bloom filter to have a low false positives rate.
|
44 |
+
//
|
45 |
+
struct ARROW_ACERO_EXPORT BloomFilterMasks {
|
46 |
+
// Generate all masks as a single bit vector. Each bit offset in this bit
|
47 |
+
// vector corresponds to a single mask.
|
48 |
+
// In each consecutive kBitsPerMask bits, there must be between
|
49 |
+
// kMinBitsSet and kMaxBitsSet bits set.
|
50 |
+
//
|
51 |
+
BloomFilterMasks();
|
52 |
+
|
53 |
+
inline uint64_t mask(int bit_offset) {
|
54 |
+
#if ARROW_LITTLE_ENDIAN
|
55 |
+
return (arrow::util::SafeLoadAs<uint64_t>(masks_ + bit_offset / 8) >>
|
56 |
+
(bit_offset % 8)) &
|
57 |
+
kFullMask;
|
58 |
+
#else
|
59 |
+
return (BYTESWAP(arrow::util::SafeLoadAs<uint64_t>(masks_ + bit_offset / 8)) >>
|
60 |
+
(bit_offset % 8)) &
|
61 |
+
kFullMask;
|
62 |
+
#endif
|
63 |
+
}
|
64 |
+
|
65 |
+
// Masks are 57 bits long because then they can be accessed at an
|
66 |
+
// arbitrary bit offset using a single unaligned 64-bit load instruction.
|
67 |
+
//
|
68 |
+
static constexpr int kBitsPerMask = 57;
|
69 |
+
static constexpr uint64_t kFullMask = (1ULL << kBitsPerMask) - 1;
|
70 |
+
|
71 |
+
// Minimum and maximum number of bits set in each mask.
|
72 |
+
// This constraint is enforced when generating the bit masks.
|
73 |
+
// Values should be close to each other and chosen as to minimize a Bloom
|
74 |
+
// filter false positives rate.
|
75 |
+
//
|
76 |
+
static constexpr int kMinBitsSet = 4;
|
77 |
+
static constexpr int kMaxBitsSet = 5;
|
78 |
+
|
79 |
+
// Number of generated masks.
|
80 |
+
// Having more masks to choose will improve false positives rate of Bloom
|
81 |
+
// filter but will also use more memory, which may lead to more CPU cache
|
82 |
+
// misses.
|
83 |
+
// The chosen value results in using only a few cache-lines for mask lookups,
|
84 |
+
// while providing a good variety of available bit masks.
|
85 |
+
//
|
86 |
+
static constexpr int kLogNumMasks = 10;
|
87 |
+
static constexpr int kNumMasks = 1 << kLogNumMasks;
|
88 |
+
|
89 |
+
// Data of masks. Masks are stored in a single bit vector. Nth mask is
|
90 |
+
// kBitsPerMask bits starting at bit offset N.
|
91 |
+
//
|
92 |
+
static constexpr int kTotalBytes = (kNumMasks + 64) / 8;
|
93 |
+
uint8_t masks_[kTotalBytes];
|
94 |
+
};
|
95 |
+
|
96 |
+
// A variant of a blocked Bloom filter implementation.
|
97 |
+
// A Bloom filter is a data structure that provides approximate membership test
|
98 |
+
// functionality based only on the hash of the key. Membership test may return
|
99 |
+
// false positives but not false negatives. Approximation of the result allows
|
100 |
+
// in general case (for arbitrary data types of keys) to save on both memory and
|
101 |
+
// lookup cost compared to the accurate membership test.
|
102 |
+
// The accurate test may sometimes still be cheaper for a specific data types
|
103 |
+
// and inputs, e.g. integers from a small range.
|
104 |
+
//
|
105 |
+
// This blocked Bloom filter is optimized for use in hash joins, to achieve a
|
106 |
+
// good balance between the size of the filter, the cost of its building and
|
107 |
+
// querying and the rate of false positives.
|
108 |
+
//
|
109 |
+
class ARROW_ACERO_EXPORT BlockedBloomFilter {
|
110 |
+
friend class BloomFilterBuilder_SingleThreaded;
|
111 |
+
friend class BloomFilterBuilder_Parallel;
|
112 |
+
|
113 |
+
public:
|
114 |
+
BlockedBloomFilter() : log_num_blocks_(0), num_blocks_(0), blocks_(NULLPTR) {}
|
115 |
+
|
116 |
+
inline bool Find(uint64_t hash) const {
|
117 |
+
uint64_t m = mask(hash);
|
118 |
+
uint64_t b = blocks_[block_id(hash)];
|
119 |
+
return (b & m) == m;
|
120 |
+
}
|
121 |
+
|
122 |
+
// Uses SIMD if available for smaller Bloom filters.
|
123 |
+
// Uses memory prefetching for larger Bloom filters.
|
124 |
+
//
|
125 |
+
void Find(int64_t hardware_flags, int64_t num_rows, const uint32_t* hashes,
|
126 |
+
uint8_t* result_bit_vector, bool enable_prefetch = true) const;
|
127 |
+
void Find(int64_t hardware_flags, int64_t num_rows, const uint64_t* hashes,
|
128 |
+
uint8_t* result_bit_vector, bool enable_prefetch = true) const;
|
129 |
+
|
130 |
+
int log_num_blocks() const { return log_num_blocks_; }
|
131 |
+
|
132 |
+
int NumHashBitsUsed() const;
|
133 |
+
|
134 |
+
bool IsSameAs(const BlockedBloomFilter* other) const;
|
135 |
+
|
136 |
+
int64_t NumBitsSet() const;
|
137 |
+
|
138 |
+
// Folding of a block Bloom filter after the initial version
|
139 |
+
// has been built.
|
140 |
+
//
|
141 |
+
// One of the parameters for creation of Bloom filter is the number
|
142 |
+
// of bits allocated for it. The more bits allocated, the lower the
|
143 |
+
// probability of false positives. A good heuristic is to aim for
|
144 |
+
// half of the bits set in the constructed Bloom filter. This should
|
145 |
+
// result in a good trade off between size (and following cost of
|
146 |
+
// memory accesses) and false positives rate.
|
147 |
+
//
|
148 |
+
// There might have been many duplicate keys in the input provided
|
149 |
+
// to Bloom filter builder. In that case the resulting bit vector
|
150 |
+
// would be more sparse then originally intended. It is possible to
|
151 |
+
// easily correct that and cut in half the size of Bloom filter
|
152 |
+
// after it has already been constructed. The process to do that is
|
153 |
+
// approximately equal to OR-ing bits from upper and lower half (the
|
154 |
+
// way we address these bits when inserting or querying a hash makes
|
155 |
+
// such folding in half possible).
|
156 |
+
//
|
157 |
+
// We will keep folding as long as the fraction of bits set is less
|
158 |
+
// than 1/4. The resulting bit vector density should be in the [1/4,
|
159 |
+
// 1/2) range.
|
160 |
+
//
|
161 |
+
void Fold();
|
162 |
+
|
163 |
+
private:
|
164 |
+
Status CreateEmpty(int64_t num_rows_to_insert, MemoryPool* pool);
|
165 |
+
|
166 |
+
inline void Insert(uint64_t hash) {
|
167 |
+
uint64_t m = mask(hash);
|
168 |
+
uint64_t& b = blocks_[block_id(hash)];
|
169 |
+
b |= m;
|
170 |
+
}
|
171 |
+
|
172 |
+
void Insert(int64_t hardware_flags, int64_t num_rows, const uint32_t* hashes);
|
173 |
+
void Insert(int64_t hardware_flags, int64_t num_rows, const uint64_t* hashes);
|
174 |
+
|
175 |
+
inline uint64_t mask(uint64_t hash) const {
|
176 |
+
// The lowest bits of hash are used to pick mask index.
|
177 |
+
//
|
178 |
+
int mask_id = static_cast<int>(hash & (BloomFilterMasks::kNumMasks - 1));
|
179 |
+
uint64_t result = masks_.mask(mask_id);
|
180 |
+
|
181 |
+
// The next set of hash bits is used to pick the amount of bit
|
182 |
+
// rotation of the mask.
|
183 |
+
//
|
184 |
+
int rotation = (hash >> BloomFilterMasks::kLogNumMasks) & 63;
|
185 |
+
result = ROTL64(result, rotation);
|
186 |
+
|
187 |
+
return result;
|
188 |
+
}
|
189 |
+
|
190 |
+
inline int64_t block_id(uint64_t hash) const {
|
191 |
+
// The next set of hash bits following the bits used to select a
|
192 |
+
// mask is used to pick block id (index of 64-bit word in a bit
|
193 |
+
// vector).
|
194 |
+
//
|
195 |
+
return (hash >> (BloomFilterMasks::kLogNumMasks + 6)) & (num_blocks_ - 1);
|
196 |
+
}
|
197 |
+
|
198 |
+
template <typename T>
|
199 |
+
inline void InsertImp(int64_t num_rows, const T* hashes);
|
200 |
+
|
201 |
+
template <typename T>
|
202 |
+
inline void FindImp(int64_t num_rows, const T* hashes, uint8_t* result_bit_vector,
|
203 |
+
bool enable_prefetch) const;
|
204 |
+
|
205 |
+
void SingleFold(int num_folds);
|
206 |
+
|
207 |
+
#if defined(ARROW_HAVE_RUNTIME_AVX2)
|
208 |
+
inline __m256i mask_avx2(__m256i hash) const;
|
209 |
+
inline __m256i block_id_avx2(__m256i hash) const;
|
210 |
+
int64_t Insert_avx2(int64_t num_rows, const uint32_t* hashes);
|
211 |
+
int64_t Insert_avx2(int64_t num_rows, const uint64_t* hashes);
|
212 |
+
template <typename T>
|
213 |
+
int64_t InsertImp_avx2(int64_t num_rows, const T* hashes);
|
214 |
+
int64_t Find_avx2(int64_t num_rows, const uint32_t* hashes,
|
215 |
+
uint8_t* result_bit_vector) const;
|
216 |
+
int64_t Find_avx2(int64_t num_rows, const uint64_t* hashes,
|
217 |
+
uint8_t* result_bit_vector) const;
|
218 |
+
template <typename T>
|
219 |
+
int64_t FindImp_avx2(int64_t num_rows, const T* hashes,
|
220 |
+
uint8_t* result_bit_vector) const;
|
221 |
+
#endif
|
222 |
+
|
223 |
+
bool UsePrefetch() const {
|
224 |
+
return num_blocks_ * sizeof(uint64_t) > kPrefetchLimitBytes;
|
225 |
+
}
|
226 |
+
|
227 |
+
static constexpr int64_t kPrefetchLimitBytes = 256 * 1024;
|
228 |
+
|
229 |
+
static BloomFilterMasks masks_;
|
230 |
+
|
231 |
+
// Total number of bits used by block Bloom filter must be a power
|
232 |
+
// of 2.
|
233 |
+
//
|
234 |
+
int log_num_blocks_;
|
235 |
+
int64_t num_blocks_;
|
236 |
+
|
237 |
+
// Buffer allocated to store an array of power of 2 64-bit blocks.
|
238 |
+
//
|
239 |
+
std::shared_ptr<Buffer> buf_;
|
240 |
+
// Pointer to mutable data owned by Buffer
|
241 |
+
//
|
242 |
+
uint64_t* blocks_;
|
243 |
+
};
|
244 |
+
|
245 |
+
// We have two separate implementations of building a Bloom filter, multi-threaded and
|
246 |
+
// single-threaded.
|
247 |
+
//
|
248 |
+
// Single threaded version is useful in two ways:
|
249 |
+
// a) It allows to verify parallel implementation in tests (the single threaded one is
|
250 |
+
// simpler and can be used as the source of truth).
|
251 |
+
// b) It is preferred for small and medium size Bloom filters, because it skips extra
|
252 |
+
// synchronization related steps from parallel variant (partitioning and taking locks).
|
253 |
+
//
|
254 |
+
enum class BloomFilterBuildStrategy {
|
255 |
+
SINGLE_THREADED = 0,
|
256 |
+
PARALLEL = 1,
|
257 |
+
};
|
258 |
+
|
259 |
+
class ARROW_ACERO_EXPORT BloomFilterBuilder {
|
260 |
+
public:
|
261 |
+
virtual ~BloomFilterBuilder() = default;
|
262 |
+
virtual Status Begin(size_t num_threads, int64_t hardware_flags, MemoryPool* pool,
|
263 |
+
int64_t num_rows, int64_t num_batches,
|
264 |
+
BlockedBloomFilter* build_target) = 0;
|
265 |
+
virtual int64_t num_tasks() const { return 0; }
|
266 |
+
virtual Status PushNextBatch(size_t thread_index, int64_t num_rows,
|
267 |
+
const uint32_t* hashes) = 0;
|
268 |
+
virtual Status PushNextBatch(size_t thread_index, int64_t num_rows,
|
269 |
+
const uint64_t* hashes) = 0;
|
270 |
+
virtual void CleanUp() {}
|
271 |
+
static std::unique_ptr<BloomFilterBuilder> Make(BloomFilterBuildStrategy strategy);
|
272 |
+
};
|
273 |
+
|
274 |
+
class ARROW_ACERO_EXPORT BloomFilterBuilder_SingleThreaded : public BloomFilterBuilder {
|
275 |
+
public:
|
276 |
+
Status Begin(size_t num_threads, int64_t hardware_flags, MemoryPool* pool,
|
277 |
+
int64_t num_rows, int64_t num_batches,
|
278 |
+
BlockedBloomFilter* build_target) override;
|
279 |
+
|
280 |
+
Status PushNextBatch(size_t /*thread_index*/, int64_t num_rows,
|
281 |
+
const uint32_t* hashes) override;
|
282 |
+
|
283 |
+
Status PushNextBatch(size_t /*thread_index*/, int64_t num_rows,
|
284 |
+
const uint64_t* hashes) override;
|
285 |
+
|
286 |
+
private:
|
287 |
+
template <typename T>
|
288 |
+
void PushNextBatchImp(int64_t num_rows, const T* hashes);
|
289 |
+
|
290 |
+
int64_t hardware_flags_;
|
291 |
+
BlockedBloomFilter* build_target_;
|
292 |
+
};
|
293 |
+
|
294 |
+
class ARROW_ACERO_EXPORT BloomFilterBuilder_Parallel : public BloomFilterBuilder {
|
295 |
+
public:
|
296 |
+
Status Begin(size_t num_threads, int64_t hardware_flags, MemoryPool* pool,
|
297 |
+
int64_t num_rows, int64_t num_batches,
|
298 |
+
BlockedBloomFilter* build_target) override;
|
299 |
+
|
300 |
+
Status PushNextBatch(size_t thread_id, int64_t num_rows,
|
301 |
+
const uint32_t* hashes) override;
|
302 |
+
|
303 |
+
Status PushNextBatch(size_t thread_id, int64_t num_rows,
|
304 |
+
const uint64_t* hashes) override;
|
305 |
+
|
306 |
+
void CleanUp() override;
|
307 |
+
|
308 |
+
private:
|
309 |
+
template <typename T>
|
310 |
+
void PushNextBatchImp(size_t thread_id, int64_t num_rows, const T* hashes);
|
311 |
+
|
312 |
+
int64_t hardware_flags_;
|
313 |
+
BlockedBloomFilter* build_target_;
|
314 |
+
int log_num_prtns_;
|
315 |
+
struct ThreadLocalState {
|
316 |
+
std::vector<uint32_t> partitioned_hashes_32;
|
317 |
+
std::vector<uint64_t> partitioned_hashes_64;
|
318 |
+
std::vector<uint16_t> partition_ranges;
|
319 |
+
std::vector<int> unprocessed_partition_ids;
|
320 |
+
};
|
321 |
+
std::vector<ThreadLocalState> thread_local_states_;
|
322 |
+
PartitionLocks prtn_locks_;
|
323 |
+
};
|
324 |
+
|
325 |
+
} // namespace acero
|
326 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/exec_plan.h
ADDED
@@ -0,0 +1,819 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstddef>
|
21 |
+
#include <cstdint>
|
22 |
+
#include <functional>
|
23 |
+
#include <memory>
|
24 |
+
#include <optional>
|
25 |
+
#include <string>
|
26 |
+
#include <utility>
|
27 |
+
#include <vector>
|
28 |
+
|
29 |
+
#include "arrow/acero/type_fwd.h"
|
30 |
+
#include "arrow/acero/visibility.h"
|
31 |
+
#include "arrow/compute/api_vector.h"
|
32 |
+
#include "arrow/compute/exec.h"
|
33 |
+
#include "arrow/compute/ordering.h"
|
34 |
+
#include "arrow/type_fwd.h"
|
35 |
+
#include "arrow/util/future.h"
|
36 |
+
#include "arrow/util/macros.h"
|
37 |
+
#include "arrow/util/tracing.h"
|
38 |
+
#include "arrow/util/type_fwd.h"
|
39 |
+
|
40 |
+
namespace arrow {
|
41 |
+
|
42 |
+
using compute::ExecBatch;
|
43 |
+
using compute::ExecContext;
|
44 |
+
using compute::FunctionRegistry;
|
45 |
+
using compute::GetFunctionRegistry;
|
46 |
+
using compute::Ordering;
|
47 |
+
using compute::threaded_exec_context;
|
48 |
+
|
49 |
+
namespace acero {
|
50 |
+
|
51 |
+
/// \addtogroup acero-internals
|
52 |
+
/// @{
|
53 |
+
|
54 |
+
class ARROW_ACERO_EXPORT ExecPlan : public std::enable_shared_from_this<ExecPlan> {
|
55 |
+
public:
|
56 |
+
// This allows operators to rely on signed 16-bit indices
|
57 |
+
static const uint32_t kMaxBatchSize = 1 << 15;
|
58 |
+
using NodeVector = std::vector<ExecNode*>;
|
59 |
+
|
60 |
+
virtual ~ExecPlan() = default;
|
61 |
+
|
62 |
+
QueryContext* query_context();
|
63 |
+
|
64 |
+
/// \brief retrieve the nodes in the plan
|
65 |
+
const NodeVector& nodes() const;
|
66 |
+
|
67 |
+
/// Make an empty exec plan
|
68 |
+
static Result<std::shared_ptr<ExecPlan>> Make(
|
69 |
+
QueryOptions options, ExecContext exec_context = *threaded_exec_context(),
|
70 |
+
std::shared_ptr<const KeyValueMetadata> metadata = NULLPTR);
|
71 |
+
|
72 |
+
static Result<std::shared_ptr<ExecPlan>> Make(
|
73 |
+
ExecContext exec_context = *threaded_exec_context(),
|
74 |
+
std::shared_ptr<const KeyValueMetadata> metadata = NULLPTR);
|
75 |
+
|
76 |
+
static Result<std::shared_ptr<ExecPlan>> Make(
|
77 |
+
QueryOptions options, ExecContext* exec_context,
|
78 |
+
std::shared_ptr<const KeyValueMetadata> metadata = NULLPTR);
|
79 |
+
|
80 |
+
static Result<std::shared_ptr<ExecPlan>> Make(
|
81 |
+
ExecContext* exec_context,
|
82 |
+
std::shared_ptr<const KeyValueMetadata> metadata = NULLPTR);
|
83 |
+
|
84 |
+
ExecNode* AddNode(std::unique_ptr<ExecNode> node);
|
85 |
+
|
86 |
+
template <typename Node, typename... Args>
|
87 |
+
Node* EmplaceNode(Args&&... args) {
|
88 |
+
std::unique_ptr<Node> node{new Node{std::forward<Args>(args)...}};
|
89 |
+
auto out = node.get();
|
90 |
+
AddNode(std::move(node));
|
91 |
+
return out;
|
92 |
+
}
|
93 |
+
|
94 |
+
Status Validate();
|
95 |
+
|
96 |
+
/// \brief Start producing on all nodes
|
97 |
+
///
|
98 |
+
/// Nodes are started in reverse topological order, such that any node
|
99 |
+
/// is started before all of its inputs.
|
100 |
+
void StartProducing();
|
101 |
+
|
102 |
+
/// \brief Stop producing on all nodes
|
103 |
+
///
|
104 |
+
/// Triggers all sources to stop producing new data. In order to cleanly stop the plan
|
105 |
+
/// will continue to run any tasks that are already in progress. The caller should
|
106 |
+
/// still wait for `finished` to complete before destroying the plan.
|
107 |
+
void StopProducing();
|
108 |
+
|
109 |
+
/// \brief A future which will be marked finished when all tasks have finished.
|
110 |
+
Future<> finished();
|
111 |
+
|
112 |
+
/// \brief Return whether the plan has non-empty metadata
|
113 |
+
bool HasMetadata() const;
|
114 |
+
|
115 |
+
/// \brief Return the plan's attached metadata
|
116 |
+
std::shared_ptr<const KeyValueMetadata> metadata() const;
|
117 |
+
|
118 |
+
std::string ToString() const;
|
119 |
+
};
|
120 |
+
|
121 |
+
// Acero can be extended by providing custom implementations of ExecNode. The methods
|
122 |
+
// below are documented in detail and provide careful instruction on how to fulfill the
|
123 |
+
// ExecNode contract. It's suggested you familiarize yourself with the Acero
|
124 |
+
// documentation in the C++ user guide.
|
125 |
+
class ARROW_ACERO_EXPORT ExecNode {
|
126 |
+
public:
|
127 |
+
using NodeVector = std::vector<ExecNode*>;
|
128 |
+
|
129 |
+
virtual ~ExecNode() = default;
|
130 |
+
|
131 |
+
virtual const char* kind_name() const = 0;
|
132 |
+
|
133 |
+
// The number of inputs expected by this node
|
134 |
+
int num_inputs() const { return static_cast<int>(inputs_.size()); }
|
135 |
+
|
136 |
+
/// This node's predecessors in the exec plan
|
137 |
+
const NodeVector& inputs() const { return inputs_; }
|
138 |
+
|
139 |
+
/// True if the plan has no output schema (is a sink)
|
140 |
+
bool is_sink() const { return !output_schema_; }
|
141 |
+
|
142 |
+
/// \brief Labels identifying the function of each input.
|
143 |
+
const std::vector<std::string>& input_labels() const { return input_labels_; }
|
144 |
+
|
145 |
+
/// This node's successor in the exec plan
|
146 |
+
const ExecNode* output() const { return output_; }
|
147 |
+
|
148 |
+
/// The datatypes for batches produced by this node
|
149 |
+
const std::shared_ptr<Schema>& output_schema() const { return output_schema_; }
|
150 |
+
|
151 |
+
/// This node's exec plan
|
152 |
+
ExecPlan* plan() { return plan_; }
|
153 |
+
|
154 |
+
/// \brief An optional label, for display and debugging
|
155 |
+
///
|
156 |
+
/// There is no guarantee that this value is non-empty or unique.
|
157 |
+
const std::string& label() const { return label_; }
|
158 |
+
void SetLabel(std::string label) { label_ = std::move(label); }
|
159 |
+
|
160 |
+
virtual Status Validate() const;
|
161 |
+
|
162 |
+
/// \brief the ordering of the output batches
|
163 |
+
///
|
164 |
+
/// This does not guarantee the batches will be emitted by this node
|
165 |
+
/// in order. Instead it guarantees that the batches will have their
|
166 |
+
/// ExecBatch::index property set in a way that respects this ordering.
|
167 |
+
///
|
168 |
+
/// In other words, given the ordering {{"x", SortOrder::Ascending}} we
|
169 |
+
/// know that all values of x in a batch with index N will be less than
|
170 |
+
/// or equal to all values of x in a batch with index N+k (assuming k > 0).
|
171 |
+
/// Furthermore, we also know that values will be sorted within a batch.
|
172 |
+
/// Any row N will have a value of x that is less than the value for
|
173 |
+
/// any row N+k.
|
174 |
+
///
|
175 |
+
/// Note that an ordering can be both Ordering::Unordered and Ordering::Implicit.
|
176 |
+
/// A node's output should be marked Ordering::Unordered if the order is
|
177 |
+
/// non-deterministic. For example, a hash-join has no predictable output order.
|
178 |
+
///
|
179 |
+
/// If the ordering is Ordering::Implicit then there is a meaningful order but that
|
180 |
+
/// ordering is not represented by any column in the data. The most common case for
|
181 |
+
/// this is when reading data from an in-memory table. The data has an implicit "row
|
182 |
+
/// order" which is not necessarily represented in the data set.
|
183 |
+
///
|
184 |
+
/// A filter or project node will not modify the ordering. Nothing needs to be done
|
185 |
+
/// other than ensure the index assigned to output batches is the same as the
|
186 |
+
/// input batch that was mapped.
|
187 |
+
///
|
188 |
+
/// Other nodes may introduce order. For example, an order-by node will emit
|
189 |
+
/// a brand new ordering independent of the input ordering.
|
190 |
+
///
|
191 |
+
/// Finally, as described above, such as a hash-join or aggregation may may
|
192 |
+
/// destroy ordering (although these nodes could also choose to establish a
|
193 |
+
/// new ordering based on the hash keys).
|
194 |
+
///
|
195 |
+
/// Some nodes will require an ordering. For example, a fetch node or an
|
196 |
+
/// asof join node will only function if the input data is ordered (for fetch
|
197 |
+
/// it is enough to be implicitly ordered. For an asof join the ordering must
|
198 |
+
/// be explicit and compatible with the on key.)
|
199 |
+
///
|
200 |
+
/// Nodes that maintain ordering should be careful to avoid introducing gaps
|
201 |
+
/// in the batch index. This may require emitting empty batches in order to
|
202 |
+
/// maintain continuity.
|
203 |
+
virtual const Ordering& ordering() const;
|
204 |
+
|
205 |
+
/// Upstream API:
|
206 |
+
/// These functions are called by input nodes that want to inform this node
|
207 |
+
/// about an updated condition (a new input batch or an impending
|
208 |
+
/// end of stream).
|
209 |
+
///
|
210 |
+
/// Implementation rules:
|
211 |
+
/// - these may be called anytime after StartProducing() has succeeded
|
212 |
+
/// (and even during or after StopProducing())
|
213 |
+
/// - these may be called concurrently
|
214 |
+
/// - these are allowed to call back into PauseProducing(), ResumeProducing()
|
215 |
+
/// and StopProducing()
|
216 |
+
|
217 |
+
/// Transfer input batch to ExecNode
|
218 |
+
///
|
219 |
+
/// A node will typically perform some kind of operation on the batch
|
220 |
+
/// and then call InputReceived on its outputs with the result.
|
221 |
+
///
|
222 |
+
/// Other nodes may need to accumulate some number of inputs before any
|
223 |
+
/// output can be produced. These nodes will add the batch to some kind
|
224 |
+
/// of in-memory accumulation queue and return.
|
225 |
+
virtual Status InputReceived(ExecNode* input, ExecBatch batch) = 0;
|
226 |
+
|
227 |
+
/// Mark the inputs finished after the given number of batches.
|
228 |
+
///
|
229 |
+
/// This may be called before all inputs are received. This simply fixes
|
230 |
+
/// the total number of incoming batches for an input, so that the ExecNode
|
231 |
+
/// knows when it has received all input, regardless of order.
|
232 |
+
virtual Status InputFinished(ExecNode* input, int total_batches) = 0;
|
233 |
+
|
234 |
+
/// \brief Perform any needed initialization
|
235 |
+
///
|
236 |
+
/// This hook performs any actions in between creation of ExecPlan and the call to
|
237 |
+
/// StartProducing. An example could be Bloom filter pushdown. The order of ExecNodes
|
238 |
+
/// that executes this method is undefined, but the calls are made synchronously.
|
239 |
+
///
|
240 |
+
/// At this point a node can rely on all inputs & outputs (and the input schemas)
|
241 |
+
/// being well defined.
|
242 |
+
virtual Status Init();
|
243 |
+
|
244 |
+
/// Lifecycle API:
|
245 |
+
/// - start / stop to initiate and terminate production
|
246 |
+
/// - pause / resume to apply backpressure
|
247 |
+
///
|
248 |
+
/// Implementation rules:
|
249 |
+
/// - StartProducing() should not recurse into the inputs, as it is
|
250 |
+
/// handled by ExecPlan::StartProducing()
|
251 |
+
/// - PauseProducing(), ResumeProducing(), StopProducing() may be called
|
252 |
+
/// concurrently, potentially even before the call to StartProducing
|
253 |
+
/// has finished.
|
254 |
+
/// - PauseProducing(), ResumeProducing(), StopProducing() may be called
|
255 |
+
/// by the downstream nodes' InputReceived(), InputFinished() methods
|
256 |
+
///
|
257 |
+
/// StopProducing may be called due to an error, by the user (e.g. cancel), or
|
258 |
+
/// because a node has all the data it needs (e.g. limit, top-k on sorted data).
|
259 |
+
/// This means the method may be called multiple times and we have the following
|
260 |
+
/// additional rules
|
261 |
+
/// - StopProducing() must be idempotent
|
262 |
+
/// - StopProducing() must be forwarded to inputs (this is needed for the limit/top-k
|
263 |
+
/// case because we may not be stopping the entire plan)
|
264 |
+
|
265 |
+
// Right now, since synchronous calls happen in both directions (input to
|
266 |
+
// output and then output to input), a node must be careful to be reentrant
|
267 |
+
// against synchronous calls from its output, *and* also concurrent calls from
|
268 |
+
// other threads. The most reliable solution is to update the internal state
|
269 |
+
// first, and notify outputs only at the end.
|
270 |
+
//
|
271 |
+
// Concurrent calls to PauseProducing and ResumeProducing can be hard to sequence
|
272 |
+
// as they may travel at different speeds through the plan.
|
273 |
+
//
|
274 |
+
// For example, consider a resume that comes quickly after a pause. If the source
|
275 |
+
// receives the resume before the pause the source may think the destination is full
|
276 |
+
// and halt production which would lead to deadlock.
|
277 |
+
//
|
278 |
+
// To resolve this a counter is sent for all calls to pause/resume. Only the call with
|
279 |
+
// the highest counter value is valid. So if a call to PauseProducing(5) comes after
|
280 |
+
// a call to ResumeProducing(6) then the source should continue producing.
|
281 |
+
|
282 |
+
/// \brief Start producing
|
283 |
+
///
|
284 |
+
/// This must only be called once.
|
285 |
+
///
|
286 |
+
/// This is typically called automatically by ExecPlan::StartProducing().
|
287 |
+
virtual Status StartProducing() = 0;
|
288 |
+
|
289 |
+
/// \brief Pause producing temporarily
|
290 |
+
///
|
291 |
+
/// \param output Pointer to the output that is full
|
292 |
+
/// \param counter Counter used to sequence calls to pause/resume
|
293 |
+
///
|
294 |
+
/// This call is a hint that an output node is currently not willing
|
295 |
+
/// to receive data.
|
296 |
+
///
|
297 |
+
/// This may be called any number of times.
|
298 |
+
/// However, the node is still free to produce data (which may be difficult
|
299 |
+
/// to prevent anyway if data is produced using multiple threads).
|
300 |
+
virtual void PauseProducing(ExecNode* output, int32_t counter) = 0;
|
301 |
+
|
302 |
+
/// \brief Resume producing after a temporary pause
|
303 |
+
///
|
304 |
+
/// \param output Pointer to the output that is now free
|
305 |
+
/// \param counter Counter used to sequence calls to pause/resume
|
306 |
+
///
|
307 |
+
/// This call is a hint that an output node is willing to receive data again.
|
308 |
+
///
|
309 |
+
/// This may be called any number of times.
|
310 |
+
virtual void ResumeProducing(ExecNode* output, int32_t counter) = 0;
|
311 |
+
|
312 |
+
/// \brief Stop producing new data
|
313 |
+
///
|
314 |
+
/// If this node is a source then the source should stop generating data
|
315 |
+
/// as quickly as possible. If this node is not a source then there is typically
|
316 |
+
/// nothing that needs to be done although a node may choose to start ignoring incoming
|
317 |
+
/// data.
|
318 |
+
///
|
319 |
+
/// This method will be called when an error occurs in the plan
|
320 |
+
/// This method may also be called by the user if they wish to end a plan early
|
321 |
+
/// Finally, this method may be called if a node determines it no longer needs any more
|
322 |
+
/// input (for example, a limit node).
|
323 |
+
///
|
324 |
+
/// This method may be called multiple times.
|
325 |
+
///
|
326 |
+
/// This is not a pause. There will be no way to start the source again after this has
|
327 |
+
/// been called.
|
328 |
+
virtual Status StopProducing();
|
329 |
+
|
330 |
+
std::string ToString(int indent = 0) const;
|
331 |
+
|
332 |
+
protected:
|
333 |
+
ExecNode(ExecPlan* plan, NodeVector inputs, std::vector<std::string> input_labels,
|
334 |
+
std::shared_ptr<Schema> output_schema);
|
335 |
+
|
336 |
+
virtual Status StopProducingImpl() = 0;
|
337 |
+
|
338 |
+
/// Provide extra info to include in the string representation.
|
339 |
+
virtual std::string ToStringExtra(int indent = 0) const;
|
340 |
+
|
341 |
+
std::atomic<bool> stopped_;
|
342 |
+
ExecPlan* plan_;
|
343 |
+
std::string label_;
|
344 |
+
|
345 |
+
NodeVector inputs_;
|
346 |
+
std::vector<std::string> input_labels_;
|
347 |
+
|
348 |
+
std::shared_ptr<Schema> output_schema_;
|
349 |
+
ExecNode* output_ = NULLPTR;
|
350 |
+
};
|
351 |
+
|
352 |
+
/// \brief An extensible registry for factories of ExecNodes
|
353 |
+
class ARROW_ACERO_EXPORT ExecFactoryRegistry {
|
354 |
+
public:
|
355 |
+
using Factory = std::function<Result<ExecNode*>(ExecPlan*, std::vector<ExecNode*>,
|
356 |
+
const ExecNodeOptions&)>;
|
357 |
+
|
358 |
+
virtual ~ExecFactoryRegistry() = default;
|
359 |
+
|
360 |
+
/// \brief Get the named factory from this registry
|
361 |
+
///
|
362 |
+
/// will raise if factory_name is not found
|
363 |
+
virtual Result<Factory> GetFactory(const std::string& factory_name) = 0;
|
364 |
+
|
365 |
+
/// \brief Add a factory to this registry with the provided name
|
366 |
+
///
|
367 |
+
/// will raise if factory_name is already in the registry
|
368 |
+
virtual Status AddFactory(std::string factory_name, Factory factory) = 0;
|
369 |
+
};
|
370 |
+
|
371 |
+
/// The default registry, which includes built-in factories.
|
372 |
+
ARROW_ACERO_EXPORT
|
373 |
+
ExecFactoryRegistry* default_exec_factory_registry();
|
374 |
+
|
375 |
+
/// \brief Construct an ExecNode using the named factory
|
376 |
+
inline Result<ExecNode*> MakeExecNode(
|
377 |
+
const std::string& factory_name, ExecPlan* plan, std::vector<ExecNode*> inputs,
|
378 |
+
const ExecNodeOptions& options,
|
379 |
+
ExecFactoryRegistry* registry = default_exec_factory_registry()) {
|
380 |
+
ARROW_ASSIGN_OR_RAISE(auto factory, registry->GetFactory(factory_name));
|
381 |
+
return factory(plan, std::move(inputs), options);
|
382 |
+
}
|
383 |
+
|
384 |
+
/// @}
|
385 |
+
|
386 |
+
/// \addtogroup acero-api
|
387 |
+
/// @{
|
388 |
+
|
389 |
+
/// \brief Helper class for declaring execution nodes
|
390 |
+
///
|
391 |
+
/// A Declaration represents an unconstructed ExecNode (and potentially an entire graph
|
392 |
+
/// since its inputs may also be Declarations)
|
393 |
+
///
|
394 |
+
/// A Declaration can be converted to a plan and executed using one of the
|
395 |
+
/// DeclarationToXyz methods.
|
396 |
+
///
|
397 |
+
/// For more direct control, a Declaration can be added to an existing execution
|
398 |
+
/// plan with Declaration::AddToPlan, which will recursively construct any inputs as
|
399 |
+
/// necessary.
|
400 |
+
struct ARROW_ACERO_EXPORT Declaration {
|
401 |
+
using Input = std::variant<ExecNode*, Declaration>;
|
402 |
+
|
403 |
+
Declaration() {}
|
404 |
+
|
405 |
+
/// \brief construct a declaration
|
406 |
+
/// \param factory_name the name of the exec node to construct. The node must have
|
407 |
+
/// been added to the exec node registry with this name.
|
408 |
+
/// \param inputs the inputs to the node, these should be other declarations
|
409 |
+
/// \param options options that control the behavior of the node. You must use
|
410 |
+
/// the appropriate subclass. For example, if `factory_name` is
|
411 |
+
/// "project" then `options` should be ProjectNodeOptions.
|
412 |
+
/// \param label a label to give the node. Can be used to distinguish it from other
|
413 |
+
/// nodes of the same type in the plan.
|
414 |
+
Declaration(std::string factory_name, std::vector<Input> inputs,
|
415 |
+
std::shared_ptr<ExecNodeOptions> options, std::string label)
|
416 |
+
: factory_name{std::move(factory_name)},
|
417 |
+
inputs{std::move(inputs)},
|
418 |
+
options{std::move(options)},
|
419 |
+
label{std::move(label)} {}
|
420 |
+
|
421 |
+
template <typename Options>
|
422 |
+
Declaration(std::string factory_name, std::vector<Input> inputs, Options options,
|
423 |
+
std::string label)
|
424 |
+
: Declaration{std::move(factory_name), std::move(inputs),
|
425 |
+
std::shared_ptr<ExecNodeOptions>(
|
426 |
+
std::make_shared<Options>(std::move(options))),
|
427 |
+
std::move(label)} {}
|
428 |
+
|
429 |
+
template <typename Options>
|
430 |
+
Declaration(std::string factory_name, std::vector<Input> inputs, Options options)
|
431 |
+
: Declaration{std::move(factory_name), std::move(inputs), std::move(options),
|
432 |
+
/*label=*/""} {}
|
433 |
+
|
434 |
+
template <typename Options>
|
435 |
+
Declaration(std::string factory_name, Options options)
|
436 |
+
: Declaration{std::move(factory_name), {}, std::move(options), /*label=*/""} {}
|
437 |
+
|
438 |
+
template <typename Options>
|
439 |
+
Declaration(std::string factory_name, Options options, std::string label)
|
440 |
+
: Declaration{std::move(factory_name), {}, std::move(options), std::move(label)} {}
|
441 |
+
|
442 |
+
/// \brief Convenience factory for the common case of a simple sequence of nodes.
|
443 |
+
///
|
444 |
+
/// Each of decls will be appended to the inputs of the subsequent declaration,
|
445 |
+
/// and the final modified declaration will be returned.
|
446 |
+
///
|
447 |
+
/// Without this convenience factory, constructing a sequence would require explicit,
|
448 |
+
/// difficult-to-read nesting:
|
449 |
+
///
|
450 |
+
/// Declaration{"n3",
|
451 |
+
/// {
|
452 |
+
/// Declaration{"n2",
|
453 |
+
/// {
|
454 |
+
/// Declaration{"n1",
|
455 |
+
/// {
|
456 |
+
/// Declaration{"n0", N0Opts{}},
|
457 |
+
/// },
|
458 |
+
/// N1Opts{}},
|
459 |
+
/// },
|
460 |
+
/// N2Opts{}},
|
461 |
+
/// },
|
462 |
+
/// N3Opts{}};
|
463 |
+
///
|
464 |
+
/// An equivalent Declaration can be constructed more tersely using Sequence:
|
465 |
+
///
|
466 |
+
/// Declaration::Sequence({
|
467 |
+
/// {"n0", N0Opts{}},
|
468 |
+
/// {"n1", N1Opts{}},
|
469 |
+
/// {"n2", N2Opts{}},
|
470 |
+
/// {"n3", N3Opts{}},
|
471 |
+
/// });
|
472 |
+
static Declaration Sequence(std::vector<Declaration> decls);
|
473 |
+
|
474 |
+
/// \brief add the declaration to an already created execution plan
|
475 |
+
/// \param plan the plan to add the node to
|
476 |
+
/// \param registry the registry to use to lookup the node factory
|
477 |
+
///
|
478 |
+
/// This method will recursively call AddToPlan on all of the declaration's inputs.
|
479 |
+
/// This method is only for advanced use when the DeclarationToXyz methods are not
|
480 |
+
/// sufficient.
|
481 |
+
///
|
482 |
+
/// \return the instantiated execution node
|
483 |
+
Result<ExecNode*> AddToPlan(ExecPlan* plan, ExecFactoryRegistry* registry =
|
484 |
+
default_exec_factory_registry()) const;
|
485 |
+
|
486 |
+
// Validate a declaration
|
487 |
+
bool IsValid(ExecFactoryRegistry* registry = default_exec_factory_registry()) const;
|
488 |
+
|
489 |
+
/// \brief the name of the factory to use when creating a node
|
490 |
+
std::string factory_name;
|
491 |
+
/// \brief the declarations's inputs
|
492 |
+
std::vector<Input> inputs;
|
493 |
+
/// \brief options to control the behavior of the node
|
494 |
+
std::shared_ptr<ExecNodeOptions> options;
|
495 |
+
/// \brief a label to give the node in the plan
|
496 |
+
std::string label;
|
497 |
+
};
|
498 |
+
|
499 |
+
/// \brief How to handle unaligned buffers
|
500 |
+
enum class UnalignedBufferHandling { kWarn, kIgnore, kReallocate, kError };
|
501 |
+
|
502 |
+
/// \brief get the default behavior of unaligned buffer handling
|
503 |
+
///
|
504 |
+
/// This is configurable via the ACERO_ALIGNMENT_HANDLING environment variable which
|
505 |
+
/// can be set to "warn", "ignore", "reallocate", or "error". If the environment
|
506 |
+
/// variable is not set, or is set to an invalid value, this will return kWarn
|
507 |
+
UnalignedBufferHandling GetDefaultUnalignedBufferHandling();
|
508 |
+
|
509 |
+
/// \brief plan-wide options that can be specified when executing an execution plan
|
510 |
+
struct ARROW_ACERO_EXPORT QueryOptions {
|
511 |
+
/// \brief Should the plan use a legacy batching strategy
|
512 |
+
///
|
513 |
+
/// This is currently in place only to support the Scanner::ToTable
|
514 |
+
/// method. This method relies on batch indices from the scanner
|
515 |
+
/// remaining consistent. This is impractical in the ExecPlan which
|
516 |
+
/// might slice batches as needed (e.g. for a join)
|
517 |
+
///
|
518 |
+
/// However, it still works for simple plans and this is the only way
|
519 |
+
/// we have at the moment for maintaining implicit order.
|
520 |
+
bool use_legacy_batching = false;
|
521 |
+
|
522 |
+
/// If the output has a meaningful order then sequence the output of the plan
|
523 |
+
///
|
524 |
+
/// The default behavior (std::nullopt) will sequence output batches if there
|
525 |
+
/// is a meaningful ordering in the final node and will emit batches immediately
|
526 |
+
/// otherwise.
|
527 |
+
///
|
528 |
+
/// If explicitly set to true then plan execution will fail if there is no
|
529 |
+
/// meaningful ordering. This can be useful to validate a query that should
|
530 |
+
/// be emitting ordered results.
|
531 |
+
///
|
532 |
+
/// If explicitly set to false then batches will be emit immediately even if there
|
533 |
+
/// is a meaningful ordering. This could cause batches to be emit out of order but
|
534 |
+
/// may offer a small decrease to latency.
|
535 |
+
std::optional<bool> sequence_output = std::nullopt;
|
536 |
+
|
537 |
+
/// \brief should the plan use multiple background threads for CPU-intensive work
|
538 |
+
///
|
539 |
+
/// If this is false then all CPU work will be done on the calling thread. I/O tasks
|
540 |
+
/// will still happen on the I/O executor and may be multi-threaded (but should not use
|
541 |
+
/// significant CPU resources).
|
542 |
+
///
|
543 |
+
/// Will be ignored if custom_cpu_executor is set
|
544 |
+
bool use_threads = true;
|
545 |
+
|
546 |
+
/// \brief custom executor to use for CPU-intensive work
|
547 |
+
///
|
548 |
+
/// Must be null or remain valid for the duration of the plan. If this is null then
|
549 |
+
/// a default thread pool will be chosen whose behavior will be controlled by
|
550 |
+
/// the `use_threads` option.
|
551 |
+
::arrow::internal::Executor* custom_cpu_executor = NULLPTR;
|
552 |
+
|
553 |
+
/// \brief custom executor to use for IO work
|
554 |
+
///
|
555 |
+
/// Must be null or remain valid for the duration of the plan. If this is null then
|
556 |
+
/// the global io thread pool will be chosen whose behavior will be controlled by
|
557 |
+
/// the "ARROW_IO_THREADS" environment.
|
558 |
+
::arrow::internal::Executor* custom_io_executor = NULLPTR;
|
559 |
+
|
560 |
+
/// \brief a memory pool to use for allocations
|
561 |
+
///
|
562 |
+
/// Must remain valid for the duration of the plan.
|
563 |
+
MemoryPool* memory_pool = default_memory_pool();
|
564 |
+
|
565 |
+
/// \brief a function registry to use for the plan
|
566 |
+
///
|
567 |
+
/// Must remain valid for the duration of the plan.
|
568 |
+
FunctionRegistry* function_registry = GetFunctionRegistry();
|
569 |
+
/// \brief the names of the output columns
|
570 |
+
///
|
571 |
+
/// If this is empty then names will be generated based on the input columns
|
572 |
+
///
|
573 |
+
/// If set then the number of names must equal the number of output columns
|
574 |
+
std::vector<std::string> field_names;
|
575 |
+
|
576 |
+
/// \brief Policy for unaligned buffers in source data
|
577 |
+
///
|
578 |
+
/// Various compute functions and acero internals will type pun array
|
579 |
+
/// buffers from uint8_t* to some kind of value type (e.g. we might
|
580 |
+
/// cast to int32_t* to add two int32 arrays)
|
581 |
+
///
|
582 |
+
/// If the buffer is poorly aligned (e.g. an int32 array is not aligned
|
583 |
+
/// on a 4-byte boundary) then this is technically undefined behavior in C++.
|
584 |
+
/// However, most modern compilers and CPUs are fairly tolerant of this
|
585 |
+
/// behavior and nothing bad (beyond a small hit to performance) is likely
|
586 |
+
/// to happen.
|
587 |
+
///
|
588 |
+
/// Note that this only applies to source buffers. All buffers allocated internally
|
589 |
+
/// by Acero will be suitably aligned.
|
590 |
+
///
|
591 |
+
/// If this field is set to kWarn then Acero will check if any buffers are unaligned
|
592 |
+
/// and, if they are, will emit a warning.
|
593 |
+
///
|
594 |
+
/// If this field is set to kReallocate then Acero will allocate a new, suitably aligned
|
595 |
+
/// buffer and copy the contents from the old buffer into this new buffer.
|
596 |
+
///
|
597 |
+
/// If this field is set to kError then Acero will gracefully abort the plan instead.
|
598 |
+
///
|
599 |
+
/// If this field is set to kIgnore then Acero will not even check if the buffers are
|
600 |
+
/// unaligned.
|
601 |
+
///
|
602 |
+
/// If this field is not set then it will be treated as kWarn unless overridden
|
603 |
+
/// by the ACERO_ALIGNMENT_HANDLING environment variable
|
604 |
+
std::optional<UnalignedBufferHandling> unaligned_buffer_handling;
|
605 |
+
};
|
606 |
+
|
607 |
+
/// \brief Calculate the output schema of a declaration
|
608 |
+
///
|
609 |
+
/// This does not actually execute the plan. This operation may fail if the
|
610 |
+
/// declaration represents an invalid plan (e.g. a project node with multiple inputs)
|
611 |
+
///
|
612 |
+
/// \param declaration A declaration describing an execution plan
|
613 |
+
/// \param function_registry The function registry to use for function execution. If null
|
614 |
+
/// then the default function registry will be used.
|
615 |
+
///
|
616 |
+
/// \return the schema that batches would have after going through the execution plan
|
617 |
+
ARROW_ACERO_EXPORT Result<std::shared_ptr<Schema>> DeclarationToSchema(
|
618 |
+
const Declaration& declaration, FunctionRegistry* function_registry = NULLPTR);
|
619 |
+
|
620 |
+
/// \brief Create a string representation of a plan
|
621 |
+
///
|
622 |
+
/// This representation is for debug purposes only.
|
623 |
+
///
|
624 |
+
/// Conversion to a string may fail if the declaration represents an
|
625 |
+
/// invalid plan.
|
626 |
+
///
|
627 |
+
/// Use Substrait for complete serialization of plans
|
628 |
+
///
|
629 |
+
/// \param declaration A declaration describing an execution plan
|
630 |
+
/// \param function_registry The function registry to use for function execution. If null
|
631 |
+
/// then the default function registry will be used.
|
632 |
+
///
|
633 |
+
/// \return a string representation of the plan suitable for debugging output
|
634 |
+
ARROW_ACERO_EXPORT Result<std::string> DeclarationToString(
|
635 |
+
const Declaration& declaration, FunctionRegistry* function_registry = NULLPTR);
|
636 |
+
|
637 |
+
/// \brief Utility method to run a declaration and collect the results into a table
|
638 |
+
///
|
639 |
+
/// \param declaration A declaration describing the plan to run
|
640 |
+
/// \param use_threads If `use_threads` is false then all CPU work will be done on the
|
641 |
+
/// calling thread. I/O tasks will still happen on the I/O executor
|
642 |
+
/// and may be multi-threaded (but should not use significant CPU
|
643 |
+
/// resources).
|
644 |
+
/// \param memory_pool The memory pool to use for allocations made while running the plan.
|
645 |
+
/// \param function_registry The function registry to use for function execution. If null
|
646 |
+
/// then the default function registry will be used.
|
647 |
+
///
|
648 |
+
/// This method will add a sink node to the declaration to collect results into a
|
649 |
+
/// table. It will then create an ExecPlan from the declaration, start the exec plan,
|
650 |
+
/// block until the plan has finished, and return the created table.
|
651 |
+
ARROW_ACERO_EXPORT Result<std::shared_ptr<Table>> DeclarationToTable(
|
652 |
+
Declaration declaration, bool use_threads = true,
|
653 |
+
MemoryPool* memory_pool = default_memory_pool(),
|
654 |
+
FunctionRegistry* function_registry = NULLPTR);
|
655 |
+
|
656 |
+
ARROW_ACERO_EXPORT Result<std::shared_ptr<Table>> DeclarationToTable(
|
657 |
+
Declaration declaration, QueryOptions query_options);
|
658 |
+
|
659 |
+
/// \brief Asynchronous version of \see DeclarationToTable
|
660 |
+
///
|
661 |
+
/// \param declaration A declaration describing the plan to run
|
662 |
+
/// \param use_threads The behavior of use_threads is slightly different than the
|
663 |
+
/// synchronous version since we cannot run synchronously on the
|
664 |
+
/// calling thread. Instead, if use_threads=false then a new thread
|
665 |
+
/// pool will be created with a single thread and this will be used for
|
666 |
+
/// all compute work.
|
667 |
+
/// \param memory_pool The memory pool to use for allocations made while running the plan.
|
668 |
+
/// \param function_registry The function registry to use for function execution. If null
|
669 |
+
/// then the default function registry will be used.
|
670 |
+
ARROW_ACERO_EXPORT Future<std::shared_ptr<Table>> DeclarationToTableAsync(
|
671 |
+
Declaration declaration, bool use_threads = true,
|
672 |
+
MemoryPool* memory_pool = default_memory_pool(),
|
673 |
+
FunctionRegistry* function_registry = NULLPTR);
|
674 |
+
|
675 |
+
/// \brief Overload of \see DeclarationToTableAsync accepting a custom exec context
|
676 |
+
///
|
677 |
+
/// The executor must be specified (cannot be null) and must be kept alive until the
|
678 |
+
/// returned future finishes.
|
679 |
+
ARROW_ACERO_EXPORT Future<std::shared_ptr<Table>> DeclarationToTableAsync(
|
680 |
+
Declaration declaration, ExecContext custom_exec_context);
|
681 |
+
|
682 |
+
/// \brief a collection of exec batches with a common schema
|
683 |
+
struct BatchesWithCommonSchema {
|
684 |
+
std::vector<ExecBatch> batches;
|
685 |
+
std::shared_ptr<Schema> schema;
|
686 |
+
};
|
687 |
+
|
688 |
+
/// \brief Utility method to run a declaration and collect the results into ExecBatch
|
689 |
+
/// vector
|
690 |
+
///
|
691 |
+
/// \see DeclarationToTable for details on threading & execution
|
692 |
+
ARROW_ACERO_EXPORT Result<BatchesWithCommonSchema> DeclarationToExecBatches(
|
693 |
+
Declaration declaration, bool use_threads = true,
|
694 |
+
MemoryPool* memory_pool = default_memory_pool(),
|
695 |
+
FunctionRegistry* function_registry = NULLPTR);
|
696 |
+
|
697 |
+
ARROW_ACERO_EXPORT Result<BatchesWithCommonSchema> DeclarationToExecBatches(
|
698 |
+
Declaration declaration, QueryOptions query_options);
|
699 |
+
|
700 |
+
/// \brief Asynchronous version of \see DeclarationToExecBatches
|
701 |
+
///
|
702 |
+
/// \see DeclarationToTableAsync for details on threading & execution
|
703 |
+
ARROW_ACERO_EXPORT Future<BatchesWithCommonSchema> DeclarationToExecBatchesAsync(
|
704 |
+
Declaration declaration, bool use_threads = true,
|
705 |
+
MemoryPool* memory_pool = default_memory_pool(),
|
706 |
+
FunctionRegistry* function_registry = NULLPTR);
|
707 |
+
|
708 |
+
/// \brief Overload of \see DeclarationToExecBatchesAsync accepting a custom exec context
|
709 |
+
///
|
710 |
+
/// \see DeclarationToTableAsync for details on threading & execution
|
711 |
+
ARROW_ACERO_EXPORT Future<BatchesWithCommonSchema> DeclarationToExecBatchesAsync(
|
712 |
+
Declaration declaration, ExecContext custom_exec_context);
|
713 |
+
|
714 |
+
/// \brief Utility method to run a declaration and collect the results into a vector
|
715 |
+
///
|
716 |
+
/// \see DeclarationToTable for details on threading & execution
|
717 |
+
ARROW_ACERO_EXPORT Result<std::vector<std::shared_ptr<RecordBatch>>> DeclarationToBatches(
|
718 |
+
Declaration declaration, bool use_threads = true,
|
719 |
+
MemoryPool* memory_pool = default_memory_pool(),
|
720 |
+
FunctionRegistry* function_registry = NULLPTR);
|
721 |
+
|
722 |
+
ARROW_ACERO_EXPORT Result<std::vector<std::shared_ptr<RecordBatch>>> DeclarationToBatches(
|
723 |
+
Declaration declaration, QueryOptions query_options);
|
724 |
+
|
725 |
+
/// \brief Asynchronous version of \see DeclarationToBatches
|
726 |
+
///
|
727 |
+
/// \see DeclarationToTableAsync for details on threading & execution
|
728 |
+
ARROW_ACERO_EXPORT Future<std::vector<std::shared_ptr<RecordBatch>>>
|
729 |
+
DeclarationToBatchesAsync(Declaration declaration, bool use_threads = true,
|
730 |
+
MemoryPool* memory_pool = default_memory_pool(),
|
731 |
+
FunctionRegistry* function_registry = NULLPTR);
|
732 |
+
|
733 |
+
/// \brief Overload of \see DeclarationToBatchesAsync accepting a custom exec context
|
734 |
+
///
|
735 |
+
/// \see DeclarationToTableAsync for details on threading & execution
|
736 |
+
ARROW_ACERO_EXPORT Future<std::vector<std::shared_ptr<RecordBatch>>>
|
737 |
+
DeclarationToBatchesAsync(Declaration declaration, ExecContext exec_context);
|
738 |
+
|
739 |
+
/// \brief Utility method to run a declaration and return results as a RecordBatchReader
|
740 |
+
///
|
741 |
+
/// If an exec context is not provided then a default exec context will be used based
|
742 |
+
/// on the value of `use_threads`. If `use_threads` is false then the CPU executor will
|
743 |
+
/// be a serial executor and all CPU work will be done on the calling thread. I/O tasks
|
744 |
+
/// will still happen on the I/O executor and may be multi-threaded.
|
745 |
+
///
|
746 |
+
/// If `use_threads` is false then all CPU work will happen during the calls to
|
747 |
+
/// RecordBatchReader::Next and no CPU work will happen in the background. If
|
748 |
+
/// `use_threads` is true then CPU work will happen on the CPU thread pool and tasks may
|
749 |
+
/// run in between calls to RecordBatchReader::Next. If the returned reader is not
|
750 |
+
/// consumed quickly enough then the plan will eventually pause as the backpressure queue
|
751 |
+
/// fills up.
|
752 |
+
///
|
753 |
+
/// If a custom exec context is provided then the value of `use_threads` will be ignored.
|
754 |
+
///
|
755 |
+
/// The returned RecordBatchReader can be closed early to cancel the computation of record
|
756 |
+
/// batches. In this case, only errors encountered by the computation may be reported. In
|
757 |
+
/// particular, no cancellation error may be reported.
|
758 |
+
ARROW_ACERO_EXPORT Result<std::unique_ptr<RecordBatchReader>> DeclarationToReader(
|
759 |
+
Declaration declaration, bool use_threads = true,
|
760 |
+
MemoryPool* memory_pool = default_memory_pool(),
|
761 |
+
FunctionRegistry* function_registry = NULLPTR);
|
762 |
+
|
763 |
+
ARROW_ACERO_EXPORT Result<std::unique_ptr<RecordBatchReader>> DeclarationToReader(
|
764 |
+
Declaration declaration, QueryOptions query_options);
|
765 |
+
|
766 |
+
/// \brief Utility method to run a declaration and ignore results
|
767 |
+
///
|
768 |
+
/// This can be useful when the data are consumed as part of the plan itself, for
|
769 |
+
/// example, when the plan ends with a write node.
|
770 |
+
///
|
771 |
+
/// \see DeclarationToTable for details on threading & execution
|
772 |
+
ARROW_ACERO_EXPORT Status
|
773 |
+
DeclarationToStatus(Declaration declaration, bool use_threads = true,
|
774 |
+
MemoryPool* memory_pool = default_memory_pool(),
|
775 |
+
FunctionRegistry* function_registry = NULLPTR);
|
776 |
+
|
777 |
+
ARROW_ACERO_EXPORT Status DeclarationToStatus(Declaration declaration,
|
778 |
+
QueryOptions query_options);
|
779 |
+
|
780 |
+
/// \brief Asynchronous version of \see DeclarationToStatus
|
781 |
+
///
|
782 |
+
/// This can be useful when the data are consumed as part of the plan itself, for
|
783 |
+
/// example, when the plan ends with a write node.
|
784 |
+
///
|
785 |
+
/// \see DeclarationToTableAsync for details on threading & execution
|
786 |
+
ARROW_ACERO_EXPORT Future<> DeclarationToStatusAsync(
|
787 |
+
Declaration declaration, bool use_threads = true,
|
788 |
+
MemoryPool* memory_pool = default_memory_pool(),
|
789 |
+
FunctionRegistry* function_registry = NULLPTR);
|
790 |
+
|
791 |
+
/// \brief Overload of \see DeclarationToStatusAsync accepting a custom exec context
|
792 |
+
///
|
793 |
+
/// \see DeclarationToTableAsync for details on threading & execution
|
794 |
+
ARROW_ACERO_EXPORT Future<> DeclarationToStatusAsync(Declaration declaration,
|
795 |
+
ExecContext exec_context);
|
796 |
+
|
797 |
+
/// @}
|
798 |
+
|
799 |
+
/// \brief Wrap an ExecBatch generator in a RecordBatchReader.
|
800 |
+
///
|
801 |
+
/// The RecordBatchReader does not impose any ordering on emitted batches.
|
802 |
+
ARROW_ACERO_EXPORT
|
803 |
+
std::shared_ptr<RecordBatchReader> MakeGeneratorReader(
|
804 |
+
std::shared_ptr<Schema>, std::function<Future<std::optional<ExecBatch>>()>,
|
805 |
+
MemoryPool*);
|
806 |
+
|
807 |
+
constexpr int kDefaultBackgroundMaxQ = 32;
|
808 |
+
constexpr int kDefaultBackgroundQRestart = 16;
|
809 |
+
|
810 |
+
/// \brief Make a generator of RecordBatchReaders
|
811 |
+
///
|
812 |
+
/// Useful as a source node for an Exec plan
|
813 |
+
ARROW_ACERO_EXPORT
|
814 |
+
Result<std::function<Future<std::optional<ExecBatch>>()>> MakeReaderGenerator(
|
815 |
+
std::shared_ptr<RecordBatchReader> reader, arrow::internal::Executor* io_executor,
|
816 |
+
int max_q = kDefaultBackgroundMaxQ, int q_restart = kDefaultBackgroundQRestart);
|
817 |
+
|
818 |
+
} // namespace acero
|
819 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join.h
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <functional>
|
21 |
+
#include <memory>
|
22 |
+
#include <vector>
|
23 |
+
|
24 |
+
#include "arrow/acero/accumulation_queue.h"
|
25 |
+
#include "arrow/acero/bloom_filter.h"
|
26 |
+
#include "arrow/acero/options.h"
|
27 |
+
#include "arrow/acero/query_context.h"
|
28 |
+
#include "arrow/acero/schema_util.h"
|
29 |
+
#include "arrow/acero/task_util.h"
|
30 |
+
#include "arrow/result.h"
|
31 |
+
#include "arrow/status.h"
|
32 |
+
#include "arrow/type.h"
|
33 |
+
#include "arrow/util/tracing.h"
|
34 |
+
|
35 |
+
namespace arrow {
|
36 |
+
namespace acero {
|
37 |
+
|
38 |
+
using util::AccumulationQueue;
|
39 |
+
|
40 |
+
class HashJoinImpl {
|
41 |
+
public:
|
42 |
+
using OutputBatchCallback = std::function<Status(int64_t, ExecBatch)>;
|
43 |
+
using BuildFinishedCallback = std::function<Status(size_t)>;
|
44 |
+
using FinishedCallback = std::function<Status(int64_t)>;
|
45 |
+
using RegisterTaskGroupCallback = std::function<int(
|
46 |
+
std::function<Status(size_t, int64_t)>, std::function<Status(size_t)>)>;
|
47 |
+
using StartTaskGroupCallback = std::function<Status(int, int64_t)>;
|
48 |
+
using AbortContinuationImpl = std::function<void()>;
|
49 |
+
|
50 |
+
virtual ~HashJoinImpl() = default;
|
51 |
+
virtual Status Init(QueryContext* ctx, JoinType join_type, size_t num_threads,
|
52 |
+
const HashJoinProjectionMaps* proj_map_left,
|
53 |
+
const HashJoinProjectionMaps* proj_map_right,
|
54 |
+
std::vector<JoinKeyCmp> key_cmp, Expression filter,
|
55 |
+
RegisterTaskGroupCallback register_task_group_callback,
|
56 |
+
StartTaskGroupCallback start_task_group_callback,
|
57 |
+
OutputBatchCallback output_batch_callback,
|
58 |
+
FinishedCallback finished_callback) = 0;
|
59 |
+
|
60 |
+
virtual Status BuildHashTable(size_t thread_index, AccumulationQueue batches,
|
61 |
+
BuildFinishedCallback on_finished) = 0;
|
62 |
+
virtual Status ProbeSingleBatch(size_t thread_index, ExecBatch batch) = 0;
|
63 |
+
virtual Status ProbingFinished(size_t thread_index) = 0;
|
64 |
+
virtual void Abort(TaskScheduler::AbortContinuationImpl pos_abort_callback) = 0;
|
65 |
+
virtual std::string ToString() const = 0;
|
66 |
+
|
67 |
+
static Result<std::unique_ptr<HashJoinImpl>> MakeBasic();
|
68 |
+
static Result<std::unique_ptr<HashJoinImpl>> MakeSwiss();
|
69 |
+
|
70 |
+
protected:
|
71 |
+
arrow::util::tracing::Span span_;
|
72 |
+
};
|
73 |
+
|
74 |
+
} // namespace acero
|
75 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_dict.h
ADDED
@@ -0,0 +1,318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
#include <unordered_map>
|
22 |
+
|
23 |
+
#include "arrow/acero/schema_util.h"
|
24 |
+
#include "arrow/compute/exec.h"
|
25 |
+
#include "arrow/compute/kernels/row_encoder_internal.h"
|
26 |
+
#include "arrow/result.h"
|
27 |
+
#include "arrow/status.h"
|
28 |
+
#include "arrow/type.h"
|
29 |
+
|
30 |
+
// This file contains hash join logic related to handling of dictionary encoded key
|
31 |
+
// columns.
|
32 |
+
//
|
33 |
+
// A key column from probe side of the join can be matched against a key column from build
|
34 |
+
// side of the join, as long as the underlying value types are equal. That means that:
|
35 |
+
// - both scalars and arrays can be used and even mixed in the same column
|
36 |
+
// - dictionary column can be matched against non-dictionary column if underlying value
|
37 |
+
// types are equal
|
38 |
+
// - dictionary column can be matched against dictionary column with a different index
|
39 |
+
// type, and potentially using a different dictionary, if underlying value types are equal
|
40 |
+
//
|
41 |
+
// We currently require in hash join that for all dictionary encoded columns, the same
|
42 |
+
// dictionary is used in all input exec batches.
|
43 |
+
//
|
44 |
+
// In order to allow matching columns with different dictionaries, different dictionary
|
45 |
+
// index types, and dictionary key against non-dictionary key, internally comparisons will
|
46 |
+
// be evaluated after remapping values on both sides of the join to a common
|
47 |
+
// representation (which will be called "unified representation"). This common
|
48 |
+
// representation is a column of int32() type (not a dictionary column). It represents an
|
49 |
+
// index in the unified dictionary computed for the (only) dictionary present on build
|
50 |
+
// side (an empty dictionary is still created for an empty build side). Null value is
|
51 |
+
// always represented in this common representation as null int32 value, unified
|
52 |
+
// dictionary will never contain a null value (so there is no ambiguity of representing
|
53 |
+
// nulls as either index to a null entry in the dictionary or null index).
|
54 |
+
//
|
55 |
+
// Unified dictionary represents values present on build side. There may be values on
|
56 |
+
// probe side that are not present in it. All such values, that are not null, are mapped
|
57 |
+
// in the common representation to a special constant kMissingValueId.
|
58 |
+
//
|
59 |
+
|
60 |
+
namespace arrow {
|
61 |
+
|
62 |
+
using compute::ExecBatch;
|
63 |
+
using compute::ExecContext;
|
64 |
+
using compute::internal::RowEncoder;
|
65 |
+
|
66 |
+
namespace acero {
|
67 |
+
|
68 |
+
/// Helper class with operations that are stateless and common to processing of dictionary
|
69 |
+
/// keys on both build and probe side.
|
70 |
+
class HashJoinDictUtil {
|
71 |
+
public:
|
72 |
+
// Null values in unified representation are always represented as null that has
|
73 |
+
// corresponding integer set to this constant
|
74 |
+
static constexpr int32_t kNullId = 0;
|
75 |
+
// Constant representing a value, that is not null, missing on the build side, in
|
76 |
+
// unified representation.
|
77 |
+
static constexpr int32_t kMissingValueId = -1;
|
78 |
+
|
79 |
+
// Check if data types of corresponding pair of key column on build and probe side are
|
80 |
+
// compatible
|
81 |
+
static bool KeyDataTypesValid(const std::shared_ptr<DataType>& probe_data_type,
|
82 |
+
const std::shared_ptr<DataType>& build_data_type);
|
83 |
+
|
84 |
+
// Input must be dictionary array or dictionary scalar.
|
85 |
+
// A precomputed and provided here lookup table in the form of int32() array will be
|
86 |
+
// used to remap input indices to unified representation.
|
87 |
+
//
|
88 |
+
static Result<std::shared_ptr<ArrayData>> IndexRemapUsingLUT(
|
89 |
+
ExecContext* ctx, const Datum& indices, int64_t batch_length,
|
90 |
+
const std::shared_ptr<ArrayData>& map_array,
|
91 |
+
const std::shared_ptr<DataType>& data_type);
|
92 |
+
|
93 |
+
// Return int32() array that contains indices of input dictionary array or scalar after
|
94 |
+
// type casting.
|
95 |
+
static Result<std::shared_ptr<ArrayData>> ConvertToInt32(
|
96 |
+
const std::shared_ptr<DataType>& from_type, const Datum& input,
|
97 |
+
int64_t batch_length, ExecContext* ctx);
|
98 |
+
|
99 |
+
// Return an array that contains elements of input int32() array after casting to a
|
100 |
+
// given integer type. This is used for mapping unified representation stored in the
|
101 |
+
// hash table on build side back to original input data type of hash join, when
|
102 |
+
// outputting hash join results to parent exec node.
|
103 |
+
//
|
104 |
+
static Result<std::shared_ptr<ArrayData>> ConvertFromInt32(
|
105 |
+
const std::shared_ptr<DataType>& to_type, const Datum& input, int64_t batch_length,
|
106 |
+
ExecContext* ctx);
|
107 |
+
|
108 |
+
// Return dictionary referenced in either dictionary array or dictionary scalar
|
109 |
+
static std::shared_ptr<Array> ExtractDictionary(const Datum& data);
|
110 |
+
};
|
111 |
+
|
112 |
+
/// Implements processing of dictionary arrays/scalars in key columns on the build side of
|
113 |
+
/// a hash join.
|
114 |
+
/// Each instance of this class corresponds to a single column and stores and
|
115 |
+
/// processes only the information related to that column.
|
116 |
+
/// Const methods are thread-safe, non-const methods are not (the caller must make sure
|
117 |
+
/// that only one thread at any time will access them).
|
118 |
+
///
|
119 |
+
class HashJoinDictBuild {
|
120 |
+
public:
|
121 |
+
// Returns true if the key column (described in input by its data type) requires any
|
122 |
+
// pre- or post-processing related to handling dictionaries.
|
123 |
+
//
|
124 |
+
static bool KeyNeedsProcessing(const std::shared_ptr<DataType>& build_data_type) {
|
125 |
+
return (build_data_type->id() == Type::DICTIONARY);
|
126 |
+
}
|
127 |
+
|
128 |
+
// Data type of unified representation
|
129 |
+
static std::shared_ptr<DataType> DataTypeAfterRemapping() { return int32(); }
|
130 |
+
|
131 |
+
// Should be called only once in hash join, before processing any build or probe
|
132 |
+
// batches.
|
133 |
+
//
|
134 |
+
// Takes a pointer to the dictionary for a corresponding key column on the build side as
|
135 |
+
// an input. If the build side is empty, it still needs to be called, but with
|
136 |
+
// dictionary pointer set to null.
|
137 |
+
//
|
138 |
+
// Currently it is required that all input batches on build side share the same
|
139 |
+
// dictionary. For each input batch during its pre-processing, dictionary will be
|
140 |
+
// checked and error will be returned if it is different then the one provided in the
|
141 |
+
// call to this method.
|
142 |
+
//
|
143 |
+
// Unifies the dictionary. The order of the values is still preserved.
|
144 |
+
// Null and duplicate entries are removed. If the dictionary is already unified, its
|
145 |
+
// copy will be produced and stored within this class.
|
146 |
+
//
|
147 |
+
// Prepares the mapping from ids within original dictionary to the ids in the resulting
|
148 |
+
// dictionary. This is used later on to pre-process (map to unified representation) key
|
149 |
+
// column on build side.
|
150 |
+
//
|
151 |
+
// Prepares the reverse mapping (in the form of hash table) from values to the ids in
|
152 |
+
// the resulting dictionary. This will be used later on to pre-process (map to unified
|
153 |
+
// representation) key column on probe side. Values on probe side that are not present
|
154 |
+
// in the original dictionary will be mapped to a special constant kMissingValueId. The
|
155 |
+
// exception is made for nulls, which get always mapped to nulls (both when null is
|
156 |
+
// represented as a dictionary id pointing to a null and a null dictionary id).
|
157 |
+
//
|
158 |
+
Status Init(ExecContext* ctx, std::shared_ptr<Array> dictionary,
|
159 |
+
std::shared_ptr<DataType> index_type, std::shared_ptr<DataType> value_type);
|
160 |
+
|
161 |
+
// Remap array or scalar values into unified representation (array of int32()).
|
162 |
+
// Outputs kMissingValueId if input value is not found in the unified dictionary.
|
163 |
+
// Outputs null for null input value (with corresponding data set to kNullId).
|
164 |
+
//
|
165 |
+
Result<std::shared_ptr<ArrayData>> RemapInputValues(ExecContext* ctx,
|
166 |
+
const Datum& values,
|
167 |
+
int64_t batch_length) const;
|
168 |
+
|
169 |
+
// Remap dictionary array or dictionary scalar on build side to unified representation.
|
170 |
+
// Dictionary referenced in the input must match the dictionary that was
|
171 |
+
// given during initialization.
|
172 |
+
// The output is a dictionary array that references unified dictionary.
|
173 |
+
//
|
174 |
+
Result<std::shared_ptr<ArrayData>> RemapInput(
|
175 |
+
ExecContext* ctx, const Datum& indices, int64_t batch_length,
|
176 |
+
const std::shared_ptr<DataType>& data_type) const;
|
177 |
+
|
178 |
+
// Outputs dictionary array referencing unified dictionary, given an array with 32-bit
|
179 |
+
// ids.
|
180 |
+
// Used to post-process values looked up in a hash table on build side of the hash join
|
181 |
+
// before outputting to the parent exec node.
|
182 |
+
//
|
183 |
+
Result<std::shared_ptr<ArrayData>> RemapOutput(const ArrayData& indices32Bit,
|
184 |
+
ExecContext* ctx) const;
|
185 |
+
|
186 |
+
// Release shared pointers and memory
|
187 |
+
void CleanUp();
|
188 |
+
|
189 |
+
private:
|
190 |
+
// Data type of dictionary ids for the input dictionary on build side
|
191 |
+
std::shared_ptr<DataType> index_type_;
|
192 |
+
// Data type of values for the input dictionary on build side
|
193 |
+
std::shared_ptr<DataType> value_type_;
|
194 |
+
// Mapping from (encoded as string) values to the ids in unified dictionary
|
195 |
+
std::unordered_map<std::string, int32_t> hash_table_;
|
196 |
+
// Mapping from input dictionary ids to unified dictionary ids
|
197 |
+
std::shared_ptr<ArrayData> remapped_ids_;
|
198 |
+
// Input dictionary
|
199 |
+
std::shared_ptr<Array> dictionary_;
|
200 |
+
// Unified dictionary
|
201 |
+
std::shared_ptr<ArrayData> unified_dictionary_;
|
202 |
+
};
|
203 |
+
|
204 |
+
/// Implements processing of dictionary arrays/scalars in key columns on the probe side of
|
205 |
+
/// a hash join.
|
206 |
+
/// Each instance of this class corresponds to a single column and stores and
|
207 |
+
/// processes only the information related to that column.
|
208 |
+
/// It is not thread-safe - every participating thread should use its own instance of
|
209 |
+
/// this class.
|
210 |
+
///
|
211 |
+
class HashJoinDictProbe {
|
212 |
+
public:
|
213 |
+
static bool KeyNeedsProcessing(const std::shared_ptr<DataType>& probe_data_type,
|
214 |
+
const std::shared_ptr<DataType>& build_data_type);
|
215 |
+
|
216 |
+
// Data type of the result of remapping input key column.
|
217 |
+
//
|
218 |
+
// The result of remapping is what is used in hash join for matching keys on build and
|
219 |
+
// probe side. The exact data types may be different, as described below, and therefore
|
220 |
+
// a common representation is needed for simplifying comparisons of pairs of keys on
|
221 |
+
// both sides.
|
222 |
+
//
|
223 |
+
// We support matching key that is of non-dictionary type with key that is of dictionary
|
224 |
+
// type, as long as the underlying value types are equal. We support matching when both
|
225 |
+
// keys are of dictionary type, regardless whether underlying dictionary index types are
|
226 |
+
// the same or not.
|
227 |
+
//
|
228 |
+
static std::shared_ptr<DataType> DataTypeAfterRemapping(
|
229 |
+
const std::shared_ptr<DataType>& build_data_type);
|
230 |
+
|
231 |
+
// Should only be called if KeyNeedsProcessing method returns true for a pair of
|
232 |
+
// corresponding key columns from build and probe side.
|
233 |
+
// Converts values in order to match the common representation for
|
234 |
+
// both build and probe side used in hash table comparison.
|
235 |
+
// Supports arrays and scalars as input.
|
236 |
+
// Argument opt_build_side should be null if dictionary key on probe side is matched
|
237 |
+
// with non-dictionary key on build side.
|
238 |
+
//
|
239 |
+
Result<std::shared_ptr<ArrayData>> RemapInput(
|
240 |
+
const HashJoinDictBuild* opt_build_side, const Datum& data, int64_t batch_length,
|
241 |
+
const std::shared_ptr<DataType>& probe_data_type,
|
242 |
+
const std::shared_ptr<DataType>& build_data_type, ExecContext* ctx);
|
243 |
+
|
244 |
+
void CleanUp();
|
245 |
+
|
246 |
+
private:
|
247 |
+
// May be null if probe side key is non-dictionary. Otherwise it is used to verify that
|
248 |
+
// only a single dictionary is referenced in exec batch on probe side of hash join.
|
249 |
+
std::shared_ptr<Array> dictionary_;
|
250 |
+
// Mapping from dictionary on probe side of hash join (if it is used) to unified
|
251 |
+
// representation.
|
252 |
+
std::shared_ptr<ArrayData> remapped_ids_;
|
253 |
+
// Encoder of key columns that uses unified representation instead of original data type
|
254 |
+
// for key columns that need to use it (have dictionaries on either side of the join).
|
255 |
+
RowEncoder encoder_;
|
256 |
+
};
|
257 |
+
|
258 |
+
// Encapsulates dictionary handling logic for build side of hash join.
|
259 |
+
//
|
260 |
+
class HashJoinDictBuildMulti {
|
261 |
+
public:
|
262 |
+
Status Init(const SchemaProjectionMaps<HashJoinProjection>& proj_map,
|
263 |
+
const ExecBatch* opt_non_empty_batch, ExecContext* ctx);
|
264 |
+
static void InitEncoder(const SchemaProjectionMaps<HashJoinProjection>& proj_map,
|
265 |
+
RowEncoder* encoder, ExecContext* ctx);
|
266 |
+
Status EncodeBatch(size_t thread_index,
|
267 |
+
const SchemaProjectionMaps<HashJoinProjection>& proj_map,
|
268 |
+
const ExecBatch& batch, RowEncoder* encoder, ExecContext* ctx) const;
|
269 |
+
Status PostDecode(const SchemaProjectionMaps<HashJoinProjection>& proj_map,
|
270 |
+
ExecBatch* decoded_key_batch, ExecContext* ctx);
|
271 |
+
const HashJoinDictBuild& get_dict_build(int icol) const { return remap_imp_[icol]; }
|
272 |
+
|
273 |
+
private:
|
274 |
+
std::vector<bool> needs_remap_;
|
275 |
+
std::vector<HashJoinDictBuild> remap_imp_;
|
276 |
+
};
|
277 |
+
|
278 |
+
// Encapsulates dictionary handling logic for probe side of hash join
|
279 |
+
//
|
280 |
+
class HashJoinDictProbeMulti {
|
281 |
+
public:
|
282 |
+
void Init(size_t num_threads);
|
283 |
+
bool BatchRemapNeeded(size_t thread_index,
|
284 |
+
const SchemaProjectionMaps<HashJoinProjection>& proj_map_probe,
|
285 |
+
const SchemaProjectionMaps<HashJoinProjection>& proj_map_build,
|
286 |
+
ExecContext* ctx);
|
287 |
+
Status EncodeBatch(size_t thread_index,
|
288 |
+
const SchemaProjectionMaps<HashJoinProjection>& proj_map_probe,
|
289 |
+
const SchemaProjectionMaps<HashJoinProjection>& proj_map_build,
|
290 |
+
const HashJoinDictBuildMulti& dict_build, const ExecBatch& batch,
|
291 |
+
RowEncoder** out_encoder, ExecBatch* opt_out_key_batch,
|
292 |
+
ExecContext* ctx);
|
293 |
+
|
294 |
+
private:
|
295 |
+
void InitLocalStateIfNeeded(
|
296 |
+
size_t thread_index, const SchemaProjectionMaps<HashJoinProjection>& proj_map_probe,
|
297 |
+
const SchemaProjectionMaps<HashJoinProjection>& proj_map_build, ExecContext* ctx);
|
298 |
+
static void InitEncoder(const SchemaProjectionMaps<HashJoinProjection>& proj_map_probe,
|
299 |
+
const SchemaProjectionMaps<HashJoinProjection>& proj_map_build,
|
300 |
+
RowEncoder* encoder, ExecContext* ctx);
|
301 |
+
struct ThreadLocalState {
|
302 |
+
bool is_initialized;
|
303 |
+
// Whether any key column needs remapping (because of dictionaries used) before doing
|
304 |
+
// join hash table lookups
|
305 |
+
bool any_needs_remap;
|
306 |
+
// Whether each key column needs remapping before doing join hash table lookups
|
307 |
+
std::vector<bool> needs_remap;
|
308 |
+
std::vector<HashJoinDictProbe> remap_imp;
|
309 |
+
// Encoder of key columns that uses unified representation instead of original data
|
310 |
+
// type for key columns that need to use it (have dictionaries on either side of the
|
311 |
+
// join).
|
312 |
+
RowEncoder post_remap_encoder;
|
313 |
+
};
|
314 |
+
std::vector<ThreadLocalState> local_states_;
|
315 |
+
};
|
316 |
+
|
317 |
+
} // namespace acero
|
318 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_node.h
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cassert>
|
21 |
+
#include <vector>
|
22 |
+
|
23 |
+
#include "arrow/acero/options.h"
|
24 |
+
#include "arrow/acero/schema_util.h"
|
25 |
+
#include "arrow/result.h"
|
26 |
+
#include "arrow/status.h"
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
|
30 |
+
using compute::ExecContext;
|
31 |
+
|
32 |
+
namespace acero {
|
33 |
+
|
34 |
+
class ARROW_ACERO_EXPORT HashJoinSchema {
|
35 |
+
public:
|
36 |
+
Status Init(JoinType join_type, const Schema& left_schema,
|
37 |
+
const std::vector<FieldRef>& left_keys, const Schema& right_schema,
|
38 |
+
const std::vector<FieldRef>& right_keys, const Expression& filter,
|
39 |
+
const std::string& left_field_name_prefix,
|
40 |
+
const std::string& right_field_name_prefix);
|
41 |
+
|
42 |
+
Status Init(JoinType join_type, const Schema& left_schema,
|
43 |
+
const std::vector<FieldRef>& left_keys,
|
44 |
+
const std::vector<FieldRef>& left_output, const Schema& right_schema,
|
45 |
+
const std::vector<FieldRef>& right_keys,
|
46 |
+
const std::vector<FieldRef>& right_output, const Expression& filter,
|
47 |
+
const std::string& left_field_name_prefix,
|
48 |
+
const std::string& right_field_name_prefix);
|
49 |
+
|
50 |
+
static Status ValidateSchemas(JoinType join_type, const Schema& left_schema,
|
51 |
+
const std::vector<FieldRef>& left_keys,
|
52 |
+
const std::vector<FieldRef>& left_output,
|
53 |
+
const Schema& right_schema,
|
54 |
+
const std::vector<FieldRef>& right_keys,
|
55 |
+
const std::vector<FieldRef>& right_output,
|
56 |
+
const std::string& left_field_name_prefix,
|
57 |
+
const std::string& right_field_name_prefix);
|
58 |
+
|
59 |
+
bool HasDictionaries() const;
|
60 |
+
|
61 |
+
bool HasLargeBinary() const;
|
62 |
+
|
63 |
+
Result<Expression> BindFilter(Expression filter, const Schema& left_schema,
|
64 |
+
const Schema& right_schema, ExecContext* exec_context);
|
65 |
+
std::shared_ptr<Schema> MakeOutputSchema(const std::string& left_field_name_suffix,
|
66 |
+
const std::string& right_field_name_suffix);
|
67 |
+
|
68 |
+
bool LeftPayloadIsEmpty() { return PayloadIsEmpty(0); }
|
69 |
+
|
70 |
+
bool RightPayloadIsEmpty() { return PayloadIsEmpty(1); }
|
71 |
+
|
72 |
+
static int kMissingField() {
|
73 |
+
return SchemaProjectionMaps<HashJoinProjection>::kMissingField;
|
74 |
+
}
|
75 |
+
|
76 |
+
SchemaProjectionMaps<HashJoinProjection> proj_maps[2];
|
77 |
+
|
78 |
+
private:
|
79 |
+
static bool IsTypeSupported(const DataType& type);
|
80 |
+
|
81 |
+
Status CollectFilterColumns(std::vector<FieldRef>& left_filter,
|
82 |
+
std::vector<FieldRef>& right_filter,
|
83 |
+
const Expression& filter, const Schema& left_schema,
|
84 |
+
const Schema& right_schema);
|
85 |
+
|
86 |
+
Expression RewriteFilterToUseFilterSchema(int right_filter_offset,
|
87 |
+
const SchemaProjectionMap& left_to_filter,
|
88 |
+
const SchemaProjectionMap& right_to_filter,
|
89 |
+
const Expression& filter);
|
90 |
+
|
91 |
+
bool PayloadIsEmpty(int side) {
|
92 |
+
assert(side == 0 || side == 1);
|
93 |
+
return proj_maps[side].num_cols(HashJoinProjection::PAYLOAD) == 0;
|
94 |
+
}
|
95 |
+
|
96 |
+
static Result<std::vector<FieldRef>> ComputePayload(const Schema& schema,
|
97 |
+
const std::vector<FieldRef>& output,
|
98 |
+
const std::vector<FieldRef>& filter,
|
99 |
+
const std::vector<FieldRef>& key);
|
100 |
+
};
|
101 |
+
|
102 |
+
} // namespace acero
|
103 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/map_node.h
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <functional>
|
22 |
+
#include <memory>
|
23 |
+
#include <vector>
|
24 |
+
|
25 |
+
#include "arrow/acero/exec_plan.h"
|
26 |
+
#include "arrow/acero/util.h"
|
27 |
+
#include "arrow/acero/visibility.h"
|
28 |
+
#include "arrow/compute/type_fwd.h"
|
29 |
+
#include "arrow/status.h"
|
30 |
+
#include "arrow/type_fwd.h"
|
31 |
+
#include "arrow/util/cancel.h"
|
32 |
+
#include "arrow/util/type_fwd.h"
|
33 |
+
|
34 |
+
namespace arrow {
|
35 |
+
namespace acero {
|
36 |
+
|
37 |
+
/// A utility base class for simple exec nodes with one input
|
38 |
+
///
|
39 |
+
/// Pause/Resume Producing are forwarded appropriately
|
40 |
+
/// There is nothing to do in StopProducingImpl
|
41 |
+
///
|
42 |
+
/// An AtomicCounter is used to keep track of when all data has arrived. When it
|
43 |
+
/// has the Finish() method will be invoked
|
44 |
+
class ARROW_ACERO_EXPORT MapNode : public ExecNode, public TracedNode {
|
45 |
+
public:
|
46 |
+
MapNode(ExecPlan* plan, std::vector<ExecNode*> inputs,
|
47 |
+
std::shared_ptr<Schema> output_schema);
|
48 |
+
|
49 |
+
Status InputFinished(ExecNode* input, int total_batches) override;
|
50 |
+
|
51 |
+
Status StartProducing() override;
|
52 |
+
|
53 |
+
void PauseProducing(ExecNode* output, int32_t counter) override;
|
54 |
+
|
55 |
+
void ResumeProducing(ExecNode* output, int32_t counter) override;
|
56 |
+
|
57 |
+
Status InputReceived(ExecNode* input, ExecBatch batch) override;
|
58 |
+
|
59 |
+
const Ordering& ordering() const override;
|
60 |
+
|
61 |
+
protected:
|
62 |
+
Status StopProducingImpl() override;
|
63 |
+
|
64 |
+
/// Transform a batch
|
65 |
+
///
|
66 |
+
/// The output batch will have the same guarantee as the input batch
|
67 |
+
/// If this was the last batch this call may trigger Finish()
|
68 |
+
virtual Result<ExecBatch> ProcessBatch(ExecBatch batch) = 0;
|
69 |
+
|
70 |
+
/// Function called after all data has been received
|
71 |
+
///
|
72 |
+
/// By default this does nothing. Override this to provide a custom implementation.
|
73 |
+
virtual void Finish();
|
74 |
+
|
75 |
+
protected:
|
76 |
+
// Counter for the number of batches received
|
77 |
+
AtomicCounter input_counter_;
|
78 |
+
};
|
79 |
+
|
80 |
+
} // namespace acero
|
81 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/options.h
ADDED
@@ -0,0 +1,866 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <functional>
|
21 |
+
#include <memory>
|
22 |
+
#include <optional>
|
23 |
+
#include <string>
|
24 |
+
#include <vector>
|
25 |
+
|
26 |
+
#include "arrow/acero/type_fwd.h"
|
27 |
+
#include "arrow/acero/visibility.h"
|
28 |
+
#include "arrow/compute/api_aggregate.h"
|
29 |
+
#include "arrow/compute/api_vector.h"
|
30 |
+
#include "arrow/compute/exec.h"
|
31 |
+
#include "arrow/compute/expression.h"
|
32 |
+
#include "arrow/record_batch.h"
|
33 |
+
#include "arrow/result.h"
|
34 |
+
#include "arrow/util/async_generator.h"
|
35 |
+
#include "arrow/util/async_util.h"
|
36 |
+
|
37 |
+
namespace arrow {
|
38 |
+
|
39 |
+
using compute::Aggregate;
|
40 |
+
using compute::ExecBatch;
|
41 |
+
using compute::Expression;
|
42 |
+
using compute::literal;
|
43 |
+
using compute::Ordering;
|
44 |
+
using compute::SelectKOptions;
|
45 |
+
using compute::SortOptions;
|
46 |
+
|
47 |
+
namespace internal {
|
48 |
+
|
49 |
+
class Executor;
|
50 |
+
|
51 |
+
} // namespace internal
|
52 |
+
|
53 |
+
namespace acero {
|
54 |
+
|
55 |
+
/// \brief This must not be used in release-mode
|
56 |
+
struct DebugOptions;
|
57 |
+
|
58 |
+
using AsyncExecBatchGenerator = AsyncGenerator<std::optional<ExecBatch>>;
|
59 |
+
|
60 |
+
/// \addtogroup acero-nodes
|
61 |
+
/// @{
|
62 |
+
|
63 |
+
/// \brief A base class for all options objects
|
64 |
+
///
|
65 |
+
/// The only time this is used directly is when a node has no configuration
|
66 |
+
class ARROW_ACERO_EXPORT ExecNodeOptions {
|
67 |
+
public:
|
68 |
+
virtual ~ExecNodeOptions() = default;
|
69 |
+
|
70 |
+
/// \brief This must not be used in release-mode
|
71 |
+
std::shared_ptr<DebugOptions> debug_opts;
|
72 |
+
};
|
73 |
+
|
74 |
+
/// \brief A node representing a generic source of data for Acero
|
75 |
+
///
|
76 |
+
/// The source node will start calling `generator` during StartProducing. An initial
|
77 |
+
/// task will be created that will call `generator`. It will not call `generator`
|
78 |
+
/// reentrantly. If the source can be read in parallel then those details should be
|
79 |
+
/// encapsulated within `generator`.
|
80 |
+
///
|
81 |
+
/// For each batch received a new task will be created to push that batch downstream.
|
82 |
+
/// This task will slice smaller units of size `ExecPlan::kMaxBatchSize` from the
|
83 |
+
/// parent batch and call InputReceived. Thus, if the `generator` yields a large
|
84 |
+
/// batch it may result in several calls to InputReceived.
|
85 |
+
///
|
86 |
+
/// The SourceNode will, by default, assign an implicit ordering to outgoing batches.
|
87 |
+
/// This is valid as long as the generator generates batches in a deterministic fashion.
|
88 |
+
/// Currently, the only way to override this is to subclass the SourceNode.
|
89 |
+
///
|
90 |
+
/// This node is not generally used directly but can serve as the basis for various
|
91 |
+
/// specialized nodes.
|
92 |
+
class ARROW_ACERO_EXPORT SourceNodeOptions : public ExecNodeOptions {
|
93 |
+
public:
|
94 |
+
/// Create an instance from values
|
95 |
+
SourceNodeOptions(std::shared_ptr<Schema> output_schema,
|
96 |
+
std::function<Future<std::optional<ExecBatch>>()> generator)
|
97 |
+
: output_schema(std::move(output_schema)), generator(std::move(generator)) {}
|
98 |
+
|
99 |
+
/// \brief the schema for batches that will be generated by this source
|
100 |
+
std::shared_ptr<Schema> output_schema;
|
101 |
+
/// \brief an asynchronous stream of batches ending with std::nullopt
|
102 |
+
std::function<Future<std::optional<ExecBatch>>()> generator;
|
103 |
+
};
|
104 |
+
|
105 |
+
/// \brief a node that generates data from a table already loaded in memory
|
106 |
+
///
|
107 |
+
/// The table source node will slice off chunks, defined by `max_batch_size`
|
108 |
+
/// for parallel processing. The table source node extends source node and so these
|
109 |
+
/// chunks will be iteratively processed in small batches. \see SourceNodeOptions
|
110 |
+
/// for details.
|
111 |
+
class ARROW_ACERO_EXPORT TableSourceNodeOptions : public ExecNodeOptions {
|
112 |
+
public:
|
113 |
+
static constexpr int64_t kDefaultMaxBatchSize = 1 << 20;
|
114 |
+
|
115 |
+
/// Create an instance from values
|
116 |
+
TableSourceNodeOptions(std::shared_ptr<Table> table,
|
117 |
+
int64_t max_batch_size = kDefaultMaxBatchSize)
|
118 |
+
: table(std::move(table)), max_batch_size(max_batch_size) {}
|
119 |
+
|
120 |
+
/// \brief a table which acts as the data source
|
121 |
+
std::shared_ptr<Table> table;
|
122 |
+
/// \brief size of batches to emit from this node
|
123 |
+
/// If the table is larger the node will emit multiple batches from the
|
124 |
+
/// the table to be processed in parallel.
|
125 |
+
int64_t max_batch_size;
|
126 |
+
};
|
127 |
+
|
128 |
+
/// \brief define a lazily resolved Arrow table.
|
129 |
+
///
|
130 |
+
/// The table uniquely identified by the names can typically be resolved at the time when
|
131 |
+
/// the plan is to be consumed.
|
132 |
+
///
|
133 |
+
/// This node is for serialization purposes only and can never be executed.
|
134 |
+
class ARROW_ACERO_EXPORT NamedTableNodeOptions : public ExecNodeOptions {
|
135 |
+
public:
|
136 |
+
/// Create an instance from values
|
137 |
+
NamedTableNodeOptions(std::vector<std::string> names, std::shared_ptr<Schema> schema)
|
138 |
+
: names(std::move(names)), schema(std::move(schema)) {}
|
139 |
+
|
140 |
+
/// \brief the names to put in the serialized plan
|
141 |
+
std::vector<std::string> names;
|
142 |
+
/// \brief the output schema of the table
|
143 |
+
std::shared_ptr<Schema> schema;
|
144 |
+
};
|
145 |
+
|
146 |
+
/// \brief a source node which feeds data from a synchronous iterator of batches
|
147 |
+
///
|
148 |
+
/// ItMaker is a maker of an iterator of tabular data.
|
149 |
+
///
|
150 |
+
/// The node can be configured to use an I/O executor. If set then each time the
|
151 |
+
/// iterator is polled a new I/O thread task will be created to do the polling. This
|
152 |
+
/// allows a blocking iterator to stay off the CPU thread pool.
|
153 |
+
template <typename ItMaker>
|
154 |
+
class ARROW_ACERO_EXPORT SchemaSourceNodeOptions : public ExecNodeOptions {
|
155 |
+
public:
|
156 |
+
/// Create an instance that will create a new task on io_executor for each iteration
|
157 |
+
SchemaSourceNodeOptions(std::shared_ptr<Schema> schema, ItMaker it_maker,
|
158 |
+
arrow::internal::Executor* io_executor)
|
159 |
+
: schema(std::move(schema)),
|
160 |
+
it_maker(std::move(it_maker)),
|
161 |
+
io_executor(io_executor),
|
162 |
+
requires_io(true) {}
|
163 |
+
|
164 |
+
/// Create an instance that will either iterate synchronously or use the default I/O
|
165 |
+
/// executor
|
166 |
+
SchemaSourceNodeOptions(std::shared_ptr<Schema> schema, ItMaker it_maker,
|
167 |
+
bool requires_io = false)
|
168 |
+
: schema(std::move(schema)),
|
169 |
+
it_maker(std::move(it_maker)),
|
170 |
+
io_executor(NULLPTR),
|
171 |
+
requires_io(requires_io) {}
|
172 |
+
|
173 |
+
/// \brief The schema of the record batches from the iterator
|
174 |
+
std::shared_ptr<Schema> schema;
|
175 |
+
|
176 |
+
/// \brief A maker of an iterator which acts as the data source
|
177 |
+
ItMaker it_maker;
|
178 |
+
|
179 |
+
/// \brief The executor to use for scanning the iterator
|
180 |
+
///
|
181 |
+
/// Defaults to the default I/O executor. Only used if requires_io is true.
|
182 |
+
/// If requires_io is false then this MUST be nullptr.
|
183 |
+
arrow::internal::Executor* io_executor;
|
184 |
+
|
185 |
+
/// \brief If true then items will be fetched from the iterator on a dedicated I/O
|
186 |
+
/// thread to keep I/O off the CPU thread
|
187 |
+
bool requires_io;
|
188 |
+
};
|
189 |
+
|
190 |
+
/// a source node that reads from a RecordBatchReader
|
191 |
+
///
|
192 |
+
/// Each iteration of the RecordBatchReader will be run on a new thread task created
|
193 |
+
/// on the I/O thread pool.
|
194 |
+
class ARROW_ACERO_EXPORT RecordBatchReaderSourceNodeOptions : public ExecNodeOptions {
|
195 |
+
public:
|
196 |
+
/// Create an instance from values
|
197 |
+
RecordBatchReaderSourceNodeOptions(std::shared_ptr<RecordBatchReader> reader,
|
198 |
+
arrow::internal::Executor* io_executor = NULLPTR)
|
199 |
+
: reader(std::move(reader)), io_executor(io_executor) {}
|
200 |
+
|
201 |
+
/// \brief The RecordBatchReader which acts as the data source
|
202 |
+
std::shared_ptr<RecordBatchReader> reader;
|
203 |
+
|
204 |
+
/// \brief The executor to use for the reader
|
205 |
+
///
|
206 |
+
/// Defaults to the default I/O executor.
|
207 |
+
arrow::internal::Executor* io_executor;
|
208 |
+
};
|
209 |
+
|
210 |
+
/// a source node that reads from an iterator of array vectors
|
211 |
+
using ArrayVectorIteratorMaker = std::function<Iterator<std::shared_ptr<ArrayVector>>()>;
|
212 |
+
/// \brief An extended Source node which accepts a schema and array-vectors
|
213 |
+
class ARROW_ACERO_EXPORT ArrayVectorSourceNodeOptions
|
214 |
+
: public SchemaSourceNodeOptions<ArrayVectorIteratorMaker> {
|
215 |
+
using SchemaSourceNodeOptions::SchemaSourceNodeOptions;
|
216 |
+
};
|
217 |
+
|
218 |
+
/// a source node that reads from an iterator of ExecBatch
|
219 |
+
using ExecBatchIteratorMaker = std::function<Iterator<std::shared_ptr<ExecBatch>>()>;
|
220 |
+
/// \brief An extended Source node which accepts a schema and exec-batches
|
221 |
+
class ARROW_ACERO_EXPORT ExecBatchSourceNodeOptions
|
222 |
+
: public SchemaSourceNodeOptions<ExecBatchIteratorMaker> {
|
223 |
+
public:
|
224 |
+
using SchemaSourceNodeOptions::SchemaSourceNodeOptions;
|
225 |
+
ExecBatchSourceNodeOptions(std::shared_ptr<Schema> schema,
|
226 |
+
std::vector<ExecBatch> batches,
|
227 |
+
::arrow::internal::Executor* io_executor);
|
228 |
+
ExecBatchSourceNodeOptions(std::shared_ptr<Schema> schema,
|
229 |
+
std::vector<ExecBatch> batches, bool requires_io = false);
|
230 |
+
};
|
231 |
+
|
232 |
+
using RecordBatchIteratorMaker = std::function<Iterator<std::shared_ptr<RecordBatch>>()>;
|
233 |
+
/// a source node that reads from an iterator of RecordBatch
|
234 |
+
class ARROW_ACERO_EXPORT RecordBatchSourceNodeOptions
|
235 |
+
: public SchemaSourceNodeOptions<RecordBatchIteratorMaker> {
|
236 |
+
using SchemaSourceNodeOptions::SchemaSourceNodeOptions;
|
237 |
+
};
|
238 |
+
|
239 |
+
/// \brief a node which excludes some rows from batches passed through it
|
240 |
+
///
|
241 |
+
/// filter_expression will be evaluated against each batch which is pushed to
|
242 |
+
/// this node. Any rows for which filter_expression does not evaluate to `true` will be
|
243 |
+
/// excluded in the batch emitted by this node.
|
244 |
+
///
|
245 |
+
/// This node will emit empty batches if all rows are excluded. This is done
|
246 |
+
/// to avoid gaps in the ordering.
|
247 |
+
class ARROW_ACERO_EXPORT FilterNodeOptions : public ExecNodeOptions {
|
248 |
+
public:
|
249 |
+
/// \brief create an instance from values
|
250 |
+
explicit FilterNodeOptions(Expression filter_expression)
|
251 |
+
: filter_expression(std::move(filter_expression)) {}
|
252 |
+
|
253 |
+
/// \brief the expression to filter batches
|
254 |
+
///
|
255 |
+
/// The return type of this expression must be boolean
|
256 |
+
Expression filter_expression;
|
257 |
+
};
|
258 |
+
|
259 |
+
/// \brief a node which selects a specified subset from the input
|
260 |
+
class ARROW_ACERO_EXPORT FetchNodeOptions : public ExecNodeOptions {
|
261 |
+
public:
|
262 |
+
static constexpr std::string_view kName = "fetch";
|
263 |
+
/// \brief create an instance from values
|
264 |
+
FetchNodeOptions(int64_t offset, int64_t count) : offset(offset), count(count) {}
|
265 |
+
/// \brief the number of rows to skip
|
266 |
+
int64_t offset;
|
267 |
+
/// \brief the number of rows to keep (not counting skipped rows)
|
268 |
+
int64_t count;
|
269 |
+
};
|
270 |
+
|
271 |
+
/// \brief a node which executes expressions on input batches, producing batches
|
272 |
+
/// of the same length with new columns.
|
273 |
+
///
|
274 |
+
/// Each expression will be evaluated against each batch which is pushed to
|
275 |
+
/// this node to produce a corresponding output column.
|
276 |
+
///
|
277 |
+
/// If names are not provided, the string representations of exprs will be used.
|
278 |
+
class ARROW_ACERO_EXPORT ProjectNodeOptions : public ExecNodeOptions {
|
279 |
+
public:
|
280 |
+
/// \brief create an instance from values
|
281 |
+
explicit ProjectNodeOptions(std::vector<Expression> expressions,
|
282 |
+
std::vector<std::string> names = {})
|
283 |
+
: expressions(std::move(expressions)), names(std::move(names)) {}
|
284 |
+
|
285 |
+
/// \brief the expressions to run on the batches
|
286 |
+
///
|
287 |
+
/// The output will have one column for each expression. If you wish to keep any of
|
288 |
+
/// the columns from the input then you should create a simple field_ref expression
|
289 |
+
/// for that column.
|
290 |
+
std::vector<Expression> expressions;
|
291 |
+
/// \brief the names of the output columns
|
292 |
+
///
|
293 |
+
/// If this is not specified then the result of calling ToString on the expression will
|
294 |
+
/// be used instead
|
295 |
+
///
|
296 |
+
/// This list should either be empty or have the same length as `expressions`
|
297 |
+
std::vector<std::string> names;
|
298 |
+
};
|
299 |
+
|
300 |
+
/// \brief a node which aggregates input batches and calculates summary statistics
|
301 |
+
///
|
302 |
+
/// The node can summarize the entire input or it can group the input with grouping keys
|
303 |
+
/// and segment keys.
|
304 |
+
///
|
305 |
+
/// By default, the aggregate node is a pipeline breaker. It must accumulate all input
|
306 |
+
/// before any output is produced. Segment keys are a performance optimization. If
|
307 |
+
/// you know your input is already partitioned by one or more columns then you can
|
308 |
+
/// specify these as segment keys. At each change in the segment keys the node will
|
309 |
+
/// emit values for all data seen so far.
|
310 |
+
///
|
311 |
+
/// Segment keys are currently limited to single-threaded mode.
|
312 |
+
///
|
313 |
+
/// Both keys and segment-keys determine the group. However segment-keys are also used
|
314 |
+
/// for determining grouping segments, which should be large, and allow streaming a
|
315 |
+
/// partial aggregation result after processing each segment. One common use-case for
|
316 |
+
/// segment-keys is ordered aggregation, in which the segment-key attribute specifies a
|
317 |
+
/// column with non-decreasing values or a lexicographically-ordered set of such columns.
|
318 |
+
///
|
319 |
+
/// If the keys attribute is a non-empty vector, then each aggregate in `aggregates` is
|
320 |
+
/// expected to be a HashAggregate function. If the keys attribute is an empty vector,
|
321 |
+
/// then each aggregate is assumed to be a ScalarAggregate function.
|
322 |
+
///
|
323 |
+
/// If the segment_keys attribute is a non-empty vector, then segmented aggregation, as
|
324 |
+
/// described above, applies.
|
325 |
+
///
|
326 |
+
/// The keys and segment_keys vectors must be disjoint.
|
327 |
+
///
|
328 |
+
/// If no measures are provided then you will simply get the list of unique keys.
|
329 |
+
///
|
330 |
+
/// This node outputs segment keys first, followed by regular keys, followed by one
|
331 |
+
/// column for each aggregate.
|
332 |
+
class ARROW_ACERO_EXPORT AggregateNodeOptions : public ExecNodeOptions {
|
333 |
+
public:
|
334 |
+
/// \brief create an instance from values
|
335 |
+
explicit AggregateNodeOptions(std::vector<Aggregate> aggregates,
|
336 |
+
std::vector<FieldRef> keys = {},
|
337 |
+
std::vector<FieldRef> segment_keys = {})
|
338 |
+
: aggregates(std::move(aggregates)),
|
339 |
+
keys(std::move(keys)),
|
340 |
+
segment_keys(std::move(segment_keys)) {}
|
341 |
+
|
342 |
+
// aggregations which will be applied to the targeted fields
|
343 |
+
std::vector<Aggregate> aggregates;
|
344 |
+
// keys by which aggregations will be grouped (optional)
|
345 |
+
std::vector<FieldRef> keys;
|
346 |
+
// keys by which aggregations will be segmented (optional)
|
347 |
+
std::vector<FieldRef> segment_keys;
|
348 |
+
};
|
349 |
+
|
350 |
+
/// \brief a default value at which backpressure will be applied
|
351 |
+
constexpr int32_t kDefaultBackpressureHighBytes = 1 << 30; // 1GiB
|
352 |
+
/// \brief a default value at which backpressure will be removed
|
353 |
+
constexpr int32_t kDefaultBackpressureLowBytes = 1 << 28; // 256MiB
|
354 |
+
|
355 |
+
/// \brief an interface that can be queried for backpressure statistics
|
356 |
+
class ARROW_ACERO_EXPORT BackpressureMonitor {
|
357 |
+
public:
|
358 |
+
virtual ~BackpressureMonitor() = default;
|
359 |
+
/// \brief fetches the number of bytes currently queued up
|
360 |
+
virtual uint64_t bytes_in_use() = 0;
|
361 |
+
/// \brief checks to see if backpressure is currently applied
|
362 |
+
virtual bool is_paused() = 0;
|
363 |
+
};
|
364 |
+
|
365 |
+
/// \brief Options to control backpressure behavior
|
366 |
+
struct ARROW_ACERO_EXPORT BackpressureOptions {
|
367 |
+
/// \brief Create default options that perform no backpressure
|
368 |
+
BackpressureOptions() : resume_if_below(0), pause_if_above(0) {}
|
369 |
+
/// \brief Create options that will perform backpressure
|
370 |
+
///
|
371 |
+
/// \param resume_if_below The producer should resume producing if the backpressure
|
372 |
+
/// queue has fewer than resume_if_below items.
|
373 |
+
/// \param pause_if_above The producer should pause producing if the backpressure
|
374 |
+
/// queue has more than pause_if_above items
|
375 |
+
BackpressureOptions(uint64_t resume_if_below, uint64_t pause_if_above)
|
376 |
+
: resume_if_below(resume_if_below), pause_if_above(pause_if_above) {}
|
377 |
+
|
378 |
+
/// \brief create an instance using default values for backpressure limits
|
379 |
+
static BackpressureOptions DefaultBackpressure() {
|
380 |
+
return BackpressureOptions(kDefaultBackpressureLowBytes,
|
381 |
+
kDefaultBackpressureHighBytes);
|
382 |
+
}
|
383 |
+
|
384 |
+
/// \brief helper method to determine if backpressure is disabled
|
385 |
+
/// \return true if pause_if_above is greater than zero, false otherwise
|
386 |
+
bool should_apply_backpressure() const { return pause_if_above > 0; }
|
387 |
+
|
388 |
+
/// \brief the number of bytes at which the producer should resume producing
|
389 |
+
uint64_t resume_if_below;
|
390 |
+
/// \brief the number of bytes at which the producer should pause producing
|
391 |
+
///
|
392 |
+
/// If this is <= 0 then backpressure will be disabled
|
393 |
+
uint64_t pause_if_above;
|
394 |
+
};
|
395 |
+
|
396 |
+
/// \brief a sink node which collects results in a queue
|
397 |
+
///
|
398 |
+
/// Emitted batches will only be ordered if there is a meaningful ordering
|
399 |
+
/// and sequence_output is not set to false.
|
400 |
+
class ARROW_ACERO_EXPORT SinkNodeOptions : public ExecNodeOptions {
|
401 |
+
public:
|
402 |
+
explicit SinkNodeOptions(std::function<Future<std::optional<ExecBatch>>()>* generator,
|
403 |
+
std::shared_ptr<Schema>* schema,
|
404 |
+
BackpressureOptions backpressure = {},
|
405 |
+
BackpressureMonitor** backpressure_monitor = NULLPTR,
|
406 |
+
std::optional<bool> sequence_output = std::nullopt)
|
407 |
+
: generator(generator),
|
408 |
+
schema(schema),
|
409 |
+
backpressure(backpressure),
|
410 |
+
backpressure_monitor(backpressure_monitor),
|
411 |
+
sequence_output(sequence_output) {}
|
412 |
+
|
413 |
+
explicit SinkNodeOptions(std::function<Future<std::optional<ExecBatch>>()>* generator,
|
414 |
+
BackpressureOptions backpressure = {},
|
415 |
+
BackpressureMonitor** backpressure_monitor = NULLPTR,
|
416 |
+
std::optional<bool> sequence_output = std::nullopt)
|
417 |
+
: generator(generator),
|
418 |
+
schema(NULLPTR),
|
419 |
+
backpressure(std::move(backpressure)),
|
420 |
+
backpressure_monitor(backpressure_monitor),
|
421 |
+
sequence_output(sequence_output) {}
|
422 |
+
|
423 |
+
/// \brief A pointer to a generator of batches.
|
424 |
+
///
|
425 |
+
/// This will be set when the node is added to the plan and should be used to consume
|
426 |
+
/// data from the plan. If this function is not called frequently enough then the sink
|
427 |
+
/// node will start to accumulate data and may apply backpressure.
|
428 |
+
std::function<Future<std::optional<ExecBatch>>()>* generator;
|
429 |
+
/// \brief A pointer which will be set to the schema of the generated batches
|
430 |
+
///
|
431 |
+
/// This is optional, if nullptr is passed in then it will be ignored.
|
432 |
+
/// This will be set when the node is added to the plan, before StartProducing is called
|
433 |
+
std::shared_ptr<Schema>* schema;
|
434 |
+
/// \brief Options to control when to apply backpressure
|
435 |
+
///
|
436 |
+
/// This is optional, the default is to never apply backpressure. If the plan is not
|
437 |
+
/// consumed quickly enough the system may eventually run out of memory.
|
438 |
+
BackpressureOptions backpressure;
|
439 |
+
/// \brief A pointer to a backpressure monitor
|
440 |
+
///
|
441 |
+
/// This will be set when the node is added to the plan. This can be used to inspect
|
442 |
+
/// the amount of data currently queued in the sink node. This is an optional utility
|
443 |
+
/// and backpressure can be applied even if this is not used.
|
444 |
+
BackpressureMonitor** backpressure_monitor;
|
445 |
+
/// \brief Controls whether batches should be emitted immediately or sequenced in order
|
446 |
+
///
|
447 |
+
/// \see QueryOptions for more details
|
448 |
+
std::optional<bool> sequence_output;
|
449 |
+
};
|
450 |
+
|
451 |
+
/// \brief Control used by a SinkNodeConsumer to pause & resume
|
452 |
+
///
|
453 |
+
/// Callers should ensure that they do not call Pause and Resume simultaneously and they
|
454 |
+
/// should sequence things so that a call to Pause() is always followed by an eventual
|
455 |
+
/// call to Resume()
|
456 |
+
class ARROW_ACERO_EXPORT BackpressureControl {
|
457 |
+
public:
|
458 |
+
virtual ~BackpressureControl() = default;
|
459 |
+
/// \brief Ask the input to pause
|
460 |
+
///
|
461 |
+
/// This is best effort, batches may continue to arrive
|
462 |
+
/// Must eventually be followed by a call to Resume() or deadlock will occur
|
463 |
+
virtual void Pause() = 0;
|
464 |
+
/// \brief Ask the input to resume
|
465 |
+
virtual void Resume() = 0;
|
466 |
+
};
|
467 |
+
|
468 |
+
/// \brief a sink node that consumes the data as part of the plan using callbacks
|
469 |
+
class ARROW_ACERO_EXPORT SinkNodeConsumer {
|
470 |
+
public:
|
471 |
+
virtual ~SinkNodeConsumer() = default;
|
472 |
+
/// \brief Prepare any consumer state
|
473 |
+
///
|
474 |
+
/// This will be run once the schema is finalized as the plan is starting and
|
475 |
+
/// before any calls to Consume. A common use is to save off the schema so that
|
476 |
+
/// batches can be interpreted.
|
477 |
+
virtual Status Init(const std::shared_ptr<Schema>& schema,
|
478 |
+
BackpressureControl* backpressure_control, ExecPlan* plan) = 0;
|
479 |
+
/// \brief Consume a batch of data
|
480 |
+
virtual Status Consume(ExecBatch batch) = 0;
|
481 |
+
/// \brief Signal to the consumer that the last batch has been delivered
|
482 |
+
///
|
483 |
+
/// The returned future should only finish when all outstanding tasks have completed
|
484 |
+
///
|
485 |
+
/// If the plan is ended early or aborts due to an error then this will not be
|
486 |
+
/// called.
|
487 |
+
virtual Future<> Finish() = 0;
|
488 |
+
};
|
489 |
+
|
490 |
+
/// \brief Add a sink node which consumes data within the exec plan run
|
491 |
+
class ARROW_ACERO_EXPORT ConsumingSinkNodeOptions : public ExecNodeOptions {
|
492 |
+
public:
|
493 |
+
explicit ConsumingSinkNodeOptions(std::shared_ptr<SinkNodeConsumer> consumer,
|
494 |
+
std::vector<std::string> names = {},
|
495 |
+
std::optional<bool> sequence_output = std::nullopt)
|
496 |
+
: consumer(std::move(consumer)),
|
497 |
+
names(std::move(names)),
|
498 |
+
sequence_output(sequence_output) {}
|
499 |
+
|
500 |
+
std::shared_ptr<SinkNodeConsumer> consumer;
|
501 |
+
/// \brief Names to rename the sink's schema fields to
|
502 |
+
///
|
503 |
+
/// If specified then names must be provided for all fields. Currently, only a flat
|
504 |
+
/// schema is supported (see GH-31875).
|
505 |
+
///
|
506 |
+
/// If not specified then names will be generated based on the source data.
|
507 |
+
std::vector<std::string> names;
|
508 |
+
/// \brief Controls whether batches should be emitted immediately or sequenced in order
|
509 |
+
///
|
510 |
+
/// \see QueryOptions for more details
|
511 |
+
std::optional<bool> sequence_output;
|
512 |
+
};
|
513 |
+
|
514 |
+
/// \brief Make a node which sorts rows passed through it
|
515 |
+
///
|
516 |
+
/// All batches pushed to this node will be accumulated, then sorted, by the given
|
517 |
+
/// fields. Then sorted batches will be forwarded to the generator in sorted order.
|
518 |
+
class ARROW_ACERO_EXPORT OrderBySinkNodeOptions : public SinkNodeOptions {
|
519 |
+
public:
|
520 |
+
/// \brief create an instance from values
|
521 |
+
explicit OrderBySinkNodeOptions(
|
522 |
+
SortOptions sort_options,
|
523 |
+
std::function<Future<std::optional<ExecBatch>>()>* generator)
|
524 |
+
: SinkNodeOptions(generator), sort_options(std::move(sort_options)) {}
|
525 |
+
|
526 |
+
/// \brief options describing which columns and direction to sort
|
527 |
+
SortOptions sort_options;
|
528 |
+
};
|
529 |
+
|
530 |
+
/// \brief Apply a new ordering to data
|
531 |
+
///
|
532 |
+
/// Currently this node works by accumulating all data, sorting, and then emitting
|
533 |
+
/// the new data with an updated batch index.
|
534 |
+
///
|
535 |
+
/// Larger-than-memory sort is not currently supported.
|
536 |
+
class ARROW_ACERO_EXPORT OrderByNodeOptions : public ExecNodeOptions {
|
537 |
+
public:
|
538 |
+
static constexpr std::string_view kName = "order_by";
|
539 |
+
explicit OrderByNodeOptions(Ordering ordering) : ordering(std::move(ordering)) {}
|
540 |
+
|
541 |
+
/// \brief The new ordering to apply to outgoing data
|
542 |
+
Ordering ordering;
|
543 |
+
};
|
544 |
+
|
545 |
+
enum class JoinType {
|
546 |
+
LEFT_SEMI,
|
547 |
+
RIGHT_SEMI,
|
548 |
+
LEFT_ANTI,
|
549 |
+
RIGHT_ANTI,
|
550 |
+
INNER,
|
551 |
+
LEFT_OUTER,
|
552 |
+
RIGHT_OUTER,
|
553 |
+
FULL_OUTER
|
554 |
+
};
|
555 |
+
|
556 |
+
std::string ToString(JoinType t);
|
557 |
+
|
558 |
+
enum class JoinKeyCmp { EQ, IS };
|
559 |
+
|
560 |
+
/// \brief a node which implements a join operation using a hash table
|
561 |
+
class ARROW_ACERO_EXPORT HashJoinNodeOptions : public ExecNodeOptions {
|
562 |
+
public:
|
563 |
+
static constexpr const char* default_output_suffix_for_left = "";
|
564 |
+
static constexpr const char* default_output_suffix_for_right = "";
|
565 |
+
/// \brief create an instance from values that outputs all columns
|
566 |
+
HashJoinNodeOptions(
|
567 |
+
JoinType in_join_type, std::vector<FieldRef> in_left_keys,
|
568 |
+
std::vector<FieldRef> in_right_keys, Expression filter = literal(true),
|
569 |
+
std::string output_suffix_for_left = default_output_suffix_for_left,
|
570 |
+
std::string output_suffix_for_right = default_output_suffix_for_right,
|
571 |
+
bool disable_bloom_filter = false)
|
572 |
+
: join_type(in_join_type),
|
573 |
+
left_keys(std::move(in_left_keys)),
|
574 |
+
right_keys(std::move(in_right_keys)),
|
575 |
+
output_all(true),
|
576 |
+
output_suffix_for_left(std::move(output_suffix_for_left)),
|
577 |
+
output_suffix_for_right(std::move(output_suffix_for_right)),
|
578 |
+
filter(std::move(filter)),
|
579 |
+
disable_bloom_filter(disable_bloom_filter) {
|
580 |
+
this->key_cmp.resize(this->left_keys.size());
|
581 |
+
for (size_t i = 0; i < this->left_keys.size(); ++i) {
|
582 |
+
this->key_cmp[i] = JoinKeyCmp::EQ;
|
583 |
+
}
|
584 |
+
}
|
585 |
+
/// \brief create an instance from keys
|
586 |
+
///
|
587 |
+
/// This will create an inner join that outputs all columns and has no post join filter
|
588 |
+
///
|
589 |
+
/// `in_left_keys` should have the same length and types as `in_right_keys`
|
590 |
+
/// @param in_left_keys the keys in the left input
|
591 |
+
/// @param in_right_keys the keys in the right input
|
592 |
+
HashJoinNodeOptions(std::vector<FieldRef> in_left_keys,
|
593 |
+
std::vector<FieldRef> in_right_keys)
|
594 |
+
: left_keys(std::move(in_left_keys)), right_keys(std::move(in_right_keys)) {
|
595 |
+
this->join_type = JoinType::INNER;
|
596 |
+
this->output_all = true;
|
597 |
+
this->output_suffix_for_left = default_output_suffix_for_left;
|
598 |
+
this->output_suffix_for_right = default_output_suffix_for_right;
|
599 |
+
this->key_cmp.resize(this->left_keys.size());
|
600 |
+
for (size_t i = 0; i < this->left_keys.size(); ++i) {
|
601 |
+
this->key_cmp[i] = JoinKeyCmp::EQ;
|
602 |
+
}
|
603 |
+
this->filter = literal(true);
|
604 |
+
}
|
605 |
+
/// \brief create an instance from values using JoinKeyCmp::EQ for all comparisons
|
606 |
+
HashJoinNodeOptions(
|
607 |
+
JoinType join_type, std::vector<FieldRef> left_keys,
|
608 |
+
std::vector<FieldRef> right_keys, std::vector<FieldRef> left_output,
|
609 |
+
std::vector<FieldRef> right_output, Expression filter = literal(true),
|
610 |
+
std::string output_suffix_for_left = default_output_suffix_for_left,
|
611 |
+
std::string output_suffix_for_right = default_output_suffix_for_right,
|
612 |
+
bool disable_bloom_filter = false)
|
613 |
+
: join_type(join_type),
|
614 |
+
left_keys(std::move(left_keys)),
|
615 |
+
right_keys(std::move(right_keys)),
|
616 |
+
output_all(false),
|
617 |
+
left_output(std::move(left_output)),
|
618 |
+
right_output(std::move(right_output)),
|
619 |
+
output_suffix_for_left(std::move(output_suffix_for_left)),
|
620 |
+
output_suffix_for_right(std::move(output_suffix_for_right)),
|
621 |
+
filter(std::move(filter)),
|
622 |
+
disable_bloom_filter(disable_bloom_filter) {
|
623 |
+
this->key_cmp.resize(this->left_keys.size());
|
624 |
+
for (size_t i = 0; i < this->left_keys.size(); ++i) {
|
625 |
+
this->key_cmp[i] = JoinKeyCmp::EQ;
|
626 |
+
}
|
627 |
+
}
|
628 |
+
/// \brief create an instance from values
|
629 |
+
HashJoinNodeOptions(
|
630 |
+
JoinType join_type, std::vector<FieldRef> left_keys,
|
631 |
+
std::vector<FieldRef> right_keys, std::vector<FieldRef> left_output,
|
632 |
+
std::vector<FieldRef> right_output, std::vector<JoinKeyCmp> key_cmp,
|
633 |
+
Expression filter = literal(true),
|
634 |
+
std::string output_suffix_for_left = default_output_suffix_for_left,
|
635 |
+
std::string output_suffix_for_right = default_output_suffix_for_right,
|
636 |
+
bool disable_bloom_filter = false)
|
637 |
+
: join_type(join_type),
|
638 |
+
left_keys(std::move(left_keys)),
|
639 |
+
right_keys(std::move(right_keys)),
|
640 |
+
output_all(false),
|
641 |
+
left_output(std::move(left_output)),
|
642 |
+
right_output(std::move(right_output)),
|
643 |
+
key_cmp(std::move(key_cmp)),
|
644 |
+
output_suffix_for_left(std::move(output_suffix_for_left)),
|
645 |
+
output_suffix_for_right(std::move(output_suffix_for_right)),
|
646 |
+
filter(std::move(filter)),
|
647 |
+
disable_bloom_filter(disable_bloom_filter) {}
|
648 |
+
|
649 |
+
HashJoinNodeOptions() = default;
|
650 |
+
|
651 |
+
// type of join (inner, left, semi...)
|
652 |
+
JoinType join_type = JoinType::INNER;
|
653 |
+
// key fields from left input
|
654 |
+
std::vector<FieldRef> left_keys;
|
655 |
+
// key fields from right input
|
656 |
+
std::vector<FieldRef> right_keys;
|
657 |
+
// if set all valid fields from both left and right input will be output
|
658 |
+
// (and field ref vectors for output fields will be ignored)
|
659 |
+
bool output_all = false;
|
660 |
+
// output fields passed from left input
|
661 |
+
std::vector<FieldRef> left_output;
|
662 |
+
// output fields passed from right input
|
663 |
+
std::vector<FieldRef> right_output;
|
664 |
+
// key comparison function (determines whether a null key is equal another null
|
665 |
+
// key or not)
|
666 |
+
std::vector<JoinKeyCmp> key_cmp;
|
667 |
+
// suffix added to names of output fields coming from left input (used to distinguish,
|
668 |
+
// if necessary, between fields of the same name in left and right input and can be left
|
669 |
+
// empty if there are no name collisions)
|
670 |
+
std::string output_suffix_for_left;
|
671 |
+
// suffix added to names of output fields coming from right input
|
672 |
+
std::string output_suffix_for_right;
|
673 |
+
// residual filter which is applied to matching rows. Rows that do not match
|
674 |
+
// the filter are not included. The filter is applied against the
|
675 |
+
// concatenated input schema (left fields then right fields) and can reference
|
676 |
+
// fields that are not included in the output.
|
677 |
+
Expression filter = literal(true);
|
678 |
+
// whether or not to disable Bloom filters in this join
|
679 |
+
bool disable_bloom_filter = false;
|
680 |
+
};
|
681 |
+
|
682 |
+
/// \brief a node which implements the asof join operation
|
683 |
+
///
|
684 |
+
/// Note, this API is experimental and will change in the future
|
685 |
+
///
|
686 |
+
/// This node takes one left table and any number of right tables, and asof joins them
|
687 |
+
/// together. Batches produced by each input must be ordered by the "on" key.
|
688 |
+
/// This node will output one row for each row in the left table.
|
689 |
+
class ARROW_ACERO_EXPORT AsofJoinNodeOptions : public ExecNodeOptions {
|
690 |
+
public:
|
691 |
+
/// \brief Keys for one input table of the AsofJoin operation
|
692 |
+
///
|
693 |
+
/// The keys must be consistent across the input tables:
|
694 |
+
/// Each "on" key must refer to a field of the same type and units across the tables.
|
695 |
+
/// Each "by" key must refer to a list of fields of the same types across the tables.
|
696 |
+
struct Keys {
|
697 |
+
/// \brief "on" key for the join.
|
698 |
+
///
|
699 |
+
/// The input table must be sorted by the "on" key. Must be a single field of a common
|
700 |
+
/// type. Inexact match is used on the "on" key. i.e., a row is considered a match iff
|
701 |
+
/// left_on - tolerance <= right_on <= left_on.
|
702 |
+
/// Currently, the "on" key must be of an integer, date, or timestamp type.
|
703 |
+
FieldRef on_key;
|
704 |
+
/// \brief "by" key for the join.
|
705 |
+
///
|
706 |
+
/// Each input table must have each field of the "by" key. Exact equality is used for
|
707 |
+
/// each field of the "by" key.
|
708 |
+
/// Currently, each field of the "by" key must be of an integer, date, timestamp, or
|
709 |
+
/// base-binary type.
|
710 |
+
std::vector<FieldRef> by_key;
|
711 |
+
};
|
712 |
+
|
713 |
+
AsofJoinNodeOptions(std::vector<Keys> input_keys, int64_t tolerance)
|
714 |
+
: input_keys(std::move(input_keys)), tolerance(tolerance) {}
|
715 |
+
|
716 |
+
/// \brief AsofJoin keys per input table. At least two keys must be given. The first key
|
717 |
+
/// corresponds to a left table and all other keys correspond to right tables for the
|
718 |
+
/// as-of-join.
|
719 |
+
///
|
720 |
+
/// \see `Keys` for details.
|
721 |
+
std::vector<Keys> input_keys;
|
722 |
+
/// \brief Tolerance for inexact "on" key matching. A right row is considered a match
|
723 |
+
/// with the left row if `right.on - left.on <= tolerance`. The `tolerance` may be:
|
724 |
+
/// - negative, in which case a past-as-of-join occurs;
|
725 |
+
/// - or positive, in which case a future-as-of-join occurs;
|
726 |
+
/// - or zero, in which case an exact-as-of-join occurs.
|
727 |
+
///
|
728 |
+
/// The tolerance is interpreted in the same units as the "on" key.
|
729 |
+
int64_t tolerance;
|
730 |
+
};
|
731 |
+
|
732 |
+
/// \brief a node which select top_k/bottom_k rows passed through it
|
733 |
+
///
|
734 |
+
/// All batches pushed to this node will be accumulated, then selected, by the given
|
735 |
+
/// fields. Then sorted batches will be forwarded to the generator in sorted order.
|
736 |
+
class ARROW_ACERO_EXPORT SelectKSinkNodeOptions : public SinkNodeOptions {
|
737 |
+
public:
|
738 |
+
explicit SelectKSinkNodeOptions(
|
739 |
+
SelectKOptions select_k_options,
|
740 |
+
std::function<Future<std::optional<ExecBatch>>()>* generator)
|
741 |
+
: SinkNodeOptions(generator), select_k_options(std::move(select_k_options)) {}
|
742 |
+
|
743 |
+
/// SelectK options
|
744 |
+
SelectKOptions select_k_options;
|
745 |
+
};
|
746 |
+
|
747 |
+
/// \brief a sink node which accumulates all output into a table
|
748 |
+
class ARROW_ACERO_EXPORT TableSinkNodeOptions : public ExecNodeOptions {
|
749 |
+
public:
|
750 |
+
/// \brief create an instance from values
|
751 |
+
explicit TableSinkNodeOptions(std::shared_ptr<Table>* output_table,
|
752 |
+
std::optional<bool> sequence_output = std::nullopt)
|
753 |
+
: output_table(output_table), sequence_output(sequence_output) {}
|
754 |
+
|
755 |
+
/// \brief an "out parameter" specifying the table that will be created
|
756 |
+
///
|
757 |
+
/// Must not be null and remain valid for the entirety of the plan execution. After the
|
758 |
+
/// plan has completed this will be set to point to the result table
|
759 |
+
std::shared_ptr<Table>* output_table;
|
760 |
+
/// \brief Controls whether batches should be emitted immediately or sequenced in order
|
761 |
+
///
|
762 |
+
/// \see QueryOptions for more details
|
763 |
+
std::optional<bool> sequence_output;
|
764 |
+
/// \brief Custom names to use for the columns.
|
765 |
+
///
|
766 |
+
/// If specified then names must be provided for all fields. Currently, only a flat
|
767 |
+
/// schema is supported (see GH-31875).
|
768 |
+
///
|
769 |
+
/// If not specified then names will be generated based on the source data.
|
770 |
+
std::vector<std::string> names;
|
771 |
+
};
|
772 |
+
|
773 |
+
/// \brief a row template that describes one row that will be generated for each input row
|
774 |
+
struct ARROW_ACERO_EXPORT PivotLongerRowTemplate {
|
775 |
+
PivotLongerRowTemplate(std::vector<std::string> feature_values,
|
776 |
+
std::vector<std::optional<FieldRef>> measurement_values)
|
777 |
+
: feature_values(std::move(feature_values)),
|
778 |
+
measurement_values(std::move(measurement_values)) {}
|
779 |
+
/// A (typically unique) set of feature values for the template, usually derived from a
|
780 |
+
/// column name
|
781 |
+
///
|
782 |
+
/// These will be used to populate the feature columns
|
783 |
+
std::vector<std::string> feature_values;
|
784 |
+
/// The fields containing the measurements to use for this row
|
785 |
+
///
|
786 |
+
/// These will be used to populate the measurement columns. If nullopt then nulls
|
787 |
+
/// will be inserted for the given value.
|
788 |
+
std::vector<std::optional<FieldRef>> measurement_values;
|
789 |
+
};
|
790 |
+
|
791 |
+
/// \brief Reshape a table by turning some columns into additional rows
|
792 |
+
///
|
793 |
+
/// This operation is sometimes also referred to as UNPIVOT
|
794 |
+
///
|
795 |
+
/// This is typically done when there are multiple observations in each row in order to
|
796 |
+
/// transform to a table containing a single observation per row.
|
797 |
+
///
|
798 |
+
/// For example:
|
799 |
+
///
|
800 |
+
/// | time | left_temp | right_temp |
|
801 |
+
/// | ---- | --------- | ---------- |
|
802 |
+
/// | 1 | 10 | 20 |
|
803 |
+
/// | 2 | 15 | 18 |
|
804 |
+
///
|
805 |
+
/// The above table contains two observations per row. There is an implicit feature
|
806 |
+
/// "location" (left vs right) and a measurement "temp". What we really want is:
|
807 |
+
///
|
808 |
+
/// | time | location | temp |
|
809 |
+
/// | --- | --- | --- |
|
810 |
+
/// | 1 | left | 10 |
|
811 |
+
/// | 1 | right | 20 |
|
812 |
+
/// | 2 | left | 15 |
|
813 |
+
/// | 2 | right | 18 |
|
814 |
+
///
|
815 |
+
/// For a more complex example consider:
|
816 |
+
///
|
817 |
+
/// | time | ax1 | ay1 | bx1 | ay2 |
|
818 |
+
/// | ---- | --- | --- | --- | --- |
|
819 |
+
/// | 0 | 1 | 2 | 3 | 4 |
|
820 |
+
///
|
821 |
+
/// We can pretend a vs b and x vs y are features while 1 and 2 are two different
|
822 |
+
/// kinds of measurements. We thus want to pivot to
|
823 |
+
///
|
824 |
+
/// | time | a/b | x/y | f1 | f2 |
|
825 |
+
/// | ---- | --- | --- | ---- | ---- |
|
826 |
+
/// | 0 | a | x | 1 | null |
|
827 |
+
/// | 0 | a | y | 2 | 4 |
|
828 |
+
/// | 0 | b | x | 3 | null |
|
829 |
+
///
|
830 |
+
/// To do this we create a row template for each combination of features. One should
|
831 |
+
/// be able to do this purely by looking at the column names. For example, given the
|
832 |
+
/// above columns "ax1", "ay1", "bx1", and "ay2" we know we have three feature
|
833 |
+
/// combinations (a, x), (a, y), and (b, x). Similarly, we know we have two possible
|
834 |
+
/// measurements, "1" and "2".
|
835 |
+
///
|
836 |
+
/// For each combination of features we create a row template. In each row template we
|
837 |
+
/// describe the combination and then list which columns to use for the measurements.
|
838 |
+
/// If a measurement doesn't exist for a given combination then we use nullopt.
|
839 |
+
///
|
840 |
+
/// So, for our above example, we have:
|
841 |
+
///
|
842 |
+
/// (a, x): names={"a", "x"}, values={"ax1", nullopt}
|
843 |
+
/// (a, y): names={"a", "y"}, values={"ay1", "ay2"}
|
844 |
+
/// (b, x): names={"b", "x"}, values={"bx1", nullopt}
|
845 |
+
///
|
846 |
+
/// Finishing it off we name our new columns:
|
847 |
+
/// feature_field_names={"a/b","x/y"}
|
848 |
+
/// measurement_field_names={"f1", "f2"}
|
849 |
+
class ARROW_ACERO_EXPORT PivotLongerNodeOptions : public ExecNodeOptions {
|
850 |
+
public:
|
851 |
+
static constexpr std::string_view kName = "pivot_longer";
|
852 |
+
/// One or more row templates to create new output rows
|
853 |
+
///
|
854 |
+
/// Normally there are at least two row templates. The output # of rows
|
855 |
+
/// will be the input # of rows * the number of row templates
|
856 |
+
std::vector<PivotLongerRowTemplate> row_templates;
|
857 |
+
/// The names of the columns which describe the new features
|
858 |
+
std::vector<std::string> feature_field_names;
|
859 |
+
/// The names of the columns which represent the measurements
|
860 |
+
std::vector<std::string> measurement_field_names;
|
861 |
+
};
|
862 |
+
|
863 |
+
/// @}
|
864 |
+
|
865 |
+
} // namespace acero
|
866 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/order_by_impl.h
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <functional>
|
21 |
+
#include <memory>
|
22 |
+
#include <vector>
|
23 |
+
|
24 |
+
#include "arrow/acero/options.h"
|
25 |
+
#include "arrow/record_batch.h"
|
26 |
+
#include "arrow/result.h"
|
27 |
+
#include "arrow/status.h"
|
28 |
+
#include "arrow/type.h"
|
29 |
+
|
30 |
+
namespace arrow {
|
31 |
+
|
32 |
+
using compute::ExecContext;
|
33 |
+
|
34 |
+
namespace acero {
|
35 |
+
|
36 |
+
class OrderByImpl {
|
37 |
+
public:
|
38 |
+
virtual ~OrderByImpl() = default;
|
39 |
+
|
40 |
+
virtual void InputReceived(const std::shared_ptr<RecordBatch>& batch) = 0;
|
41 |
+
|
42 |
+
virtual Result<Datum> DoFinish() = 0;
|
43 |
+
|
44 |
+
virtual std::string ToString() const = 0;
|
45 |
+
|
46 |
+
static Result<std::unique_ptr<OrderByImpl>> MakeSort(
|
47 |
+
ExecContext* ctx, const std::shared_ptr<Schema>& output_schema,
|
48 |
+
const SortOptions& options);
|
49 |
+
|
50 |
+
static Result<std::unique_ptr<OrderByImpl>> MakeSelectK(
|
51 |
+
ExecContext* ctx, const std::shared_ptr<Schema>& output_schema,
|
52 |
+
const SelectKOptions& options);
|
53 |
+
};
|
54 |
+
|
55 |
+
} // namespace acero
|
56 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/partition_util.h
ADDED
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <atomic>
|
21 |
+
#include <cassert>
|
22 |
+
#include <cstdint>
|
23 |
+
#include <functional>
|
24 |
+
#include <random>
|
25 |
+
#include "arrow/acero/util.h"
|
26 |
+
#include "arrow/buffer.h"
|
27 |
+
#include "arrow/util/pcg_random.h"
|
28 |
+
|
29 |
+
namespace arrow {
|
30 |
+
namespace acero {
|
31 |
+
|
32 |
+
class PartitionSort {
|
33 |
+
public:
|
34 |
+
/// \brief Bucket sort rows on partition ids in O(num_rows) time.
|
35 |
+
///
|
36 |
+
/// Include in the output exclusive cumulative sum of bucket sizes.
|
37 |
+
/// This corresponds to ranges in the sorted array containing all row ids for
|
38 |
+
/// each of the partitions.
|
39 |
+
///
|
40 |
+
/// prtn_ranges must be initialized and have at least prtn_ranges + 1 elements
|
41 |
+
/// when this method returns prtn_ranges[i] will contains the total number of
|
42 |
+
/// elements in partitions 0 through i. prtn_ranges[0] will be 0.
|
43 |
+
///
|
44 |
+
/// prtn_id_impl must be a function that takes in a row id (int) and returns
|
45 |
+
/// a partition id (int). The returned partition id must be between 0 and
|
46 |
+
/// num_prtns (exclusive).
|
47 |
+
///
|
48 |
+
/// output_pos_impl is a function that takes in a row id (int) and a position (int)
|
49 |
+
/// in the bucket sorted output. The function should insert the row in the
|
50 |
+
/// output.
|
51 |
+
///
|
52 |
+
/// For example:
|
53 |
+
///
|
54 |
+
/// in_arr: [5, 7, 2, 3, 5, 4]
|
55 |
+
/// num_prtns: 3
|
56 |
+
/// prtn_id_impl: [&in_arr] (int row_id) { return in_arr[row_id] / 3; }
|
57 |
+
/// output_pos_impl: [&out_arr] (int row_id, int pos) { out_arr[pos] = row_id; }
|
58 |
+
///
|
59 |
+
/// After Execution
|
60 |
+
/// out_arr: [2, 5, 3, 5, 4, 7]
|
61 |
+
/// prtn_ranges: [0, 1, 5, 6]
|
62 |
+
template <class INPUT_PRTN_ID_FN, class OUTPUT_POS_FN>
|
63 |
+
static void Eval(int64_t num_rows, int num_prtns, uint16_t* prtn_ranges,
|
64 |
+
INPUT_PRTN_ID_FN prtn_id_impl, OUTPUT_POS_FN output_pos_impl) {
|
65 |
+
ARROW_DCHECK(num_rows > 0 && num_rows <= (1 << 15));
|
66 |
+
ARROW_DCHECK(num_prtns >= 1 && num_prtns <= (1 << 15));
|
67 |
+
|
68 |
+
memset(prtn_ranges, 0, (num_prtns + 1) * sizeof(uint16_t));
|
69 |
+
|
70 |
+
for (int64_t i = 0; i < num_rows; ++i) {
|
71 |
+
int prtn_id = static_cast<int>(prtn_id_impl(i));
|
72 |
+
++prtn_ranges[prtn_id + 1];
|
73 |
+
}
|
74 |
+
|
75 |
+
uint16_t sum = 0;
|
76 |
+
for (int i = 0; i < num_prtns; ++i) {
|
77 |
+
uint16_t sum_next = sum + prtn_ranges[i + 1];
|
78 |
+
prtn_ranges[i + 1] = sum;
|
79 |
+
sum = sum_next;
|
80 |
+
}
|
81 |
+
|
82 |
+
for (int64_t i = 0; i < num_rows; ++i) {
|
83 |
+
int prtn_id = static_cast<int>(prtn_id_impl(i));
|
84 |
+
int pos = prtn_ranges[prtn_id + 1]++;
|
85 |
+
output_pos_impl(i, pos);
|
86 |
+
}
|
87 |
+
}
|
88 |
+
};
|
89 |
+
|
90 |
+
/// \brief A control for synchronizing threads on a partitionable workload
|
91 |
+
class PartitionLocks {
|
92 |
+
public:
|
93 |
+
PartitionLocks();
|
94 |
+
~PartitionLocks();
|
95 |
+
/// \brief Initializes the control, must be called before use
|
96 |
+
///
|
97 |
+
/// \param num_threads Maximum number of threads that will access the partitions
|
98 |
+
/// \param num_prtns Number of partitions to synchronize
|
99 |
+
void Init(size_t num_threads, int num_prtns);
|
100 |
+
/// \brief Cleans up the control, it should not be used after this call
|
101 |
+
void CleanUp();
|
102 |
+
/// \brief Acquire a partition to work on one
|
103 |
+
///
|
104 |
+
/// \param thread_id The index of the thread trying to acquire the partition lock
|
105 |
+
/// \param num_prtns Length of prtns_to_try, must be <= num_prtns used in Init
|
106 |
+
/// \param prtns_to_try An array of partitions that still have remaining work
|
107 |
+
/// \param limit_retries If false, this method will spinwait forever until success
|
108 |
+
/// \param max_retries Max times to attempt checking out work before returning false
|
109 |
+
/// \param[out] locked_prtn_id The id of the partition locked
|
110 |
+
/// \param[out] locked_prtn_id_pos The index of the partition locked in prtns_to_try
|
111 |
+
/// \return True if a partition was locked, false if max_retries was attempted
|
112 |
+
/// without successfully acquiring a lock
|
113 |
+
///
|
114 |
+
/// This method is thread safe
|
115 |
+
bool AcquirePartitionLock(size_t thread_id, int num_prtns, const int* prtns_to_try,
|
116 |
+
bool limit_retries, int max_retries, int* locked_prtn_id,
|
117 |
+
int* locked_prtn_id_pos);
|
118 |
+
/// \brief Release a partition so that other threads can work on it
|
119 |
+
void ReleasePartitionLock(int prtn_id);
|
120 |
+
|
121 |
+
// Executes (synchronously and using current thread) the same operation on a set of
|
122 |
+
// multiple partitions. Tries to minimize partition locking overhead by randomizing and
|
123 |
+
// adjusting order in which partitions are processed.
|
124 |
+
//
|
125 |
+
// PROCESS_PRTN_FN is a callback which will be executed for each partition after
|
126 |
+
// acquiring the lock for that partition. It gets partition id as an argument.
|
127 |
+
// IS_PRTN_EMPTY_FN is a callback which filters out (when returning true) partitions
|
128 |
+
// with specific ids from processing.
|
129 |
+
//
|
130 |
+
template <typename IS_PRTN_EMPTY_FN, typename PROCESS_PRTN_FN>
|
131 |
+
Status ForEachPartition(size_t thread_id,
|
132 |
+
/*scratch space buffer with space for one element per partition;
|
133 |
+
dirty in and dirty out*/
|
134 |
+
int* temp_unprocessed_prtns, IS_PRTN_EMPTY_FN is_prtn_empty_fn,
|
135 |
+
PROCESS_PRTN_FN process_prtn_fn) {
|
136 |
+
int num_unprocessed_partitions = 0;
|
137 |
+
for (int i = 0; i < num_prtns_; ++i) {
|
138 |
+
bool is_prtn_empty = is_prtn_empty_fn(i);
|
139 |
+
if (!is_prtn_empty) {
|
140 |
+
temp_unprocessed_prtns[num_unprocessed_partitions++] = i;
|
141 |
+
}
|
142 |
+
}
|
143 |
+
while (num_unprocessed_partitions > 0) {
|
144 |
+
int locked_prtn_id;
|
145 |
+
int locked_prtn_id_pos;
|
146 |
+
AcquirePartitionLock(thread_id, num_unprocessed_partitions, temp_unprocessed_prtns,
|
147 |
+
/*limit_retries=*/false, /*max_retries=*/-1, &locked_prtn_id,
|
148 |
+
&locked_prtn_id_pos);
|
149 |
+
{
|
150 |
+
class AutoReleaseLock {
|
151 |
+
public:
|
152 |
+
AutoReleaseLock(PartitionLocks* locks, int prtn_id)
|
153 |
+
: locks(locks), prtn_id(prtn_id) {}
|
154 |
+
~AutoReleaseLock() { locks->ReleasePartitionLock(prtn_id); }
|
155 |
+
PartitionLocks* locks;
|
156 |
+
int prtn_id;
|
157 |
+
} auto_release_lock(this, locked_prtn_id);
|
158 |
+
ARROW_RETURN_NOT_OK(process_prtn_fn(locked_prtn_id));
|
159 |
+
}
|
160 |
+
if (locked_prtn_id_pos < num_unprocessed_partitions - 1) {
|
161 |
+
temp_unprocessed_prtns[locked_prtn_id_pos] =
|
162 |
+
temp_unprocessed_prtns[num_unprocessed_partitions - 1];
|
163 |
+
}
|
164 |
+
--num_unprocessed_partitions;
|
165 |
+
}
|
166 |
+
return Status::OK();
|
167 |
+
}
|
168 |
+
|
169 |
+
private:
|
170 |
+
std::atomic<bool>* lock_ptr(int prtn_id);
|
171 |
+
int random_int(size_t thread_id, int num_values);
|
172 |
+
|
173 |
+
struct PartitionLock {
|
174 |
+
static constexpr int kCacheLineBytes = 64;
|
175 |
+
std::atomic<bool> lock;
|
176 |
+
uint8_t padding[kCacheLineBytes];
|
177 |
+
};
|
178 |
+
int num_prtns_;
|
179 |
+
std::unique_ptr<PartitionLock[]> locks_;
|
180 |
+
std::unique_ptr<arrow::random::pcg32_fast[]> rngs_;
|
181 |
+
};
|
182 |
+
|
183 |
+
} // namespace acero
|
184 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/pch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// Often-used headers, for precompiling.
|
19 |
+
// If updating this header, please make sure you check compilation speed
|
20 |
+
// before checking in. Adding headers which are not used extremely often
|
21 |
+
// may incur a slowdown, since it makes the precompiled header heavier to load.
|
22 |
+
|
23 |
+
#include "arrow/pch.h"
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/query_context.h
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
#pragma once
|
18 |
+
|
19 |
+
#include <string_view>
|
20 |
+
|
21 |
+
#include "arrow/acero/exec_plan.h"
|
22 |
+
#include "arrow/acero/task_util.h"
|
23 |
+
#include "arrow/acero/util.h"
|
24 |
+
#include "arrow/compute/exec.h"
|
25 |
+
#include "arrow/io/interfaces.h"
|
26 |
+
#include "arrow/util/async_util.h"
|
27 |
+
#include "arrow/util/type_fwd.h"
|
28 |
+
|
29 |
+
namespace arrow {
|
30 |
+
|
31 |
+
using compute::default_exec_context;
|
32 |
+
using io::IOContext;
|
33 |
+
|
34 |
+
namespace acero {
|
35 |
+
|
36 |
+
class ARROW_ACERO_EXPORT QueryContext {
|
37 |
+
public:
|
38 |
+
QueryContext(QueryOptions opts = {},
|
39 |
+
ExecContext exec_context = *default_exec_context());
|
40 |
+
|
41 |
+
Status Init(size_t max_num_threads, arrow::util::AsyncTaskScheduler* scheduler);
|
42 |
+
|
43 |
+
const ::arrow::internal::CpuInfo* cpu_info() const;
|
44 |
+
int64_t hardware_flags() const;
|
45 |
+
const QueryOptions& options() const { return options_; }
|
46 |
+
MemoryPool* memory_pool() const { return exec_context_.memory_pool(); }
|
47 |
+
::arrow::internal::Executor* executor() const { return exec_context_.executor(); }
|
48 |
+
ExecContext* exec_context() { return &exec_context_; }
|
49 |
+
IOContext* io_context() { return &io_context_; }
|
50 |
+
TaskScheduler* scheduler() { return task_scheduler_.get(); }
|
51 |
+
arrow::util::AsyncTaskScheduler* async_scheduler() { return async_scheduler_; }
|
52 |
+
|
53 |
+
size_t GetThreadIndex();
|
54 |
+
size_t max_concurrency() const;
|
55 |
+
Result<arrow::util::TempVectorStack*> GetTempStack(size_t thread_index);
|
56 |
+
|
57 |
+
/// \brief Start an external task
|
58 |
+
///
|
59 |
+
/// This should be avoided if possible. It is kept in for now for legacy
|
60 |
+
/// purposes. This should be called before the external task is started. If
|
61 |
+
/// a valid future is returned then it should be marked complete when the
|
62 |
+
/// external task has finished.
|
63 |
+
///
|
64 |
+
/// \param name A name to give the task for traceability and debugging
|
65 |
+
///
|
66 |
+
/// \return an invalid future if the plan has already ended, otherwise this
|
67 |
+
/// returns a future that must be completed when the external task
|
68 |
+
/// finishes.
|
69 |
+
Result<Future<>> BeginExternalTask(std::string_view name);
|
70 |
+
|
71 |
+
/// \brief Add a single function as a task to the query's task group
|
72 |
+
/// on the compute threadpool.
|
73 |
+
///
|
74 |
+
/// \param fn The task to run. Takes no arguments and returns a Status.
|
75 |
+
/// \param name A name to give the task for traceability and debugging
|
76 |
+
void ScheduleTask(std::function<Status()> fn, std::string_view name);
|
77 |
+
/// \brief Add a single function as a task to the query's task group
|
78 |
+
/// on the compute threadpool.
|
79 |
+
///
|
80 |
+
/// \param fn The task to run. Takes the thread index and returns a Status.
|
81 |
+
/// \param name A name to give the task for traceability and debugging
|
82 |
+
void ScheduleTask(std::function<Status(size_t)> fn, std::string_view name);
|
83 |
+
/// \brief Add a single function as a task to the query's task group on
|
84 |
+
/// the IO thread pool
|
85 |
+
///
|
86 |
+
/// \param fn The task to run. Returns a status.
|
87 |
+
/// \param name A name to give the task for traceability and debugging
|
88 |
+
void ScheduleIOTask(std::function<Status()> fn, std::string_view name);
|
89 |
+
|
90 |
+
// Register/Start TaskGroup is a way of performing a "Parallel For" pattern:
|
91 |
+
// - The task function takes the thread index and the index of the task
|
92 |
+
// - The on_finished function takes the thread index
|
93 |
+
// Returns an integer ID that will be used to reference the task group in
|
94 |
+
// StartTaskGroup. At runtime, call StartTaskGroup with the ID and the number of times
|
95 |
+
// you'd like the task to be executed. The need to register a task group before use will
|
96 |
+
// be removed after we rewrite the scheduler.
|
97 |
+
/// \brief Register a "parallel for" task group with the scheduler
|
98 |
+
///
|
99 |
+
/// \param task The function implementing the task. Takes the thread_index and
|
100 |
+
/// the task index.
|
101 |
+
/// \param on_finished The function that gets run once all tasks have been completed.
|
102 |
+
/// Takes the thread_index.
|
103 |
+
///
|
104 |
+
/// Must be called inside of ExecNode::Init.
|
105 |
+
int RegisterTaskGroup(std::function<Status(size_t, int64_t)> task,
|
106 |
+
std::function<Status(size_t)> on_finished);
|
107 |
+
|
108 |
+
/// \brief Start the task group with the specified ID. This can only
|
109 |
+
/// be called once per task_group_id.
|
110 |
+
///
|
111 |
+
/// \param task_group_id The ID of the task group to run
|
112 |
+
/// \param num_tasks The number of times to run the task
|
113 |
+
Status StartTaskGroup(int task_group_id, int64_t num_tasks);
|
114 |
+
|
115 |
+
// This is an RAII class for keeping track of in-flight file IO. Useful for getting
|
116 |
+
// an estimate of memory use, and how much memory we expect to be freed soon.
|
117 |
+
// Returned by ReportTempFileIO.
|
118 |
+
struct [[nodiscard]] TempFileIOMark {
|
119 |
+
QueryContext* ctx_;
|
120 |
+
size_t bytes_;
|
121 |
+
|
122 |
+
TempFileIOMark(QueryContext* ctx, size_t bytes) : ctx_(ctx), bytes_(bytes) {
|
123 |
+
ctx_->in_flight_bytes_to_disk_.fetch_add(bytes_, std::memory_order_acquire);
|
124 |
+
}
|
125 |
+
|
126 |
+
ARROW_DISALLOW_COPY_AND_ASSIGN(TempFileIOMark);
|
127 |
+
|
128 |
+
~TempFileIOMark() {
|
129 |
+
ctx_->in_flight_bytes_to_disk_.fetch_sub(bytes_, std::memory_order_release);
|
130 |
+
}
|
131 |
+
};
|
132 |
+
|
133 |
+
TempFileIOMark ReportTempFileIO(size_t bytes) { return {this, bytes}; }
|
134 |
+
|
135 |
+
size_t GetCurrentTempFileIO() { return in_flight_bytes_to_disk_.load(); }
|
136 |
+
|
137 |
+
private:
|
138 |
+
QueryOptions options_;
|
139 |
+
// To be replaced with Acero-specific context once scheduler is done and
|
140 |
+
// we don't need ExecContext for kernels
|
141 |
+
ExecContext exec_context_;
|
142 |
+
IOContext io_context_;
|
143 |
+
|
144 |
+
arrow::util::AsyncTaskScheduler* async_scheduler_ = NULLPTR;
|
145 |
+
std::unique_ptr<TaskScheduler> task_scheduler_ = TaskScheduler::Make();
|
146 |
+
|
147 |
+
ThreadIndexer thread_indexer_;
|
148 |
+
struct ThreadLocalData {
|
149 |
+
bool is_init = false;
|
150 |
+
arrow::util::TempVectorStack stack;
|
151 |
+
};
|
152 |
+
std::vector<ThreadLocalData> tld_;
|
153 |
+
|
154 |
+
std::atomic<size_t> in_flight_bytes_to_disk_{0};
|
155 |
+
};
|
156 |
+
} // namespace acero
|
157 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/schema_util.h
ADDED
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cassert>
|
21 |
+
#include <cstdint>
|
22 |
+
#include <memory>
|
23 |
+
#include <string>
|
24 |
+
#include <vector>
|
25 |
+
|
26 |
+
#include "arrow/type.h" // for DataType, FieldRef, Field and Schema
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
|
30 |
+
using internal::checked_cast;
|
31 |
+
|
32 |
+
namespace acero {
|
33 |
+
|
34 |
+
// Identifiers for all different row schemas that are used in a join
|
35 |
+
//
|
36 |
+
enum class HashJoinProjection : int {
|
37 |
+
INPUT = 0,
|
38 |
+
KEY = 1,
|
39 |
+
PAYLOAD = 2,
|
40 |
+
FILTER = 3,
|
41 |
+
OUTPUT = 4
|
42 |
+
};
|
43 |
+
|
44 |
+
struct SchemaProjectionMap {
|
45 |
+
static constexpr int kMissingField = -1;
|
46 |
+
int num_cols;
|
47 |
+
const int* source_to_base;
|
48 |
+
const int* base_to_target;
|
49 |
+
inline int get(int i) const {
|
50 |
+
assert(i >= 0 && i < num_cols);
|
51 |
+
assert(source_to_base[i] != kMissingField);
|
52 |
+
return base_to_target[source_to_base[i]];
|
53 |
+
}
|
54 |
+
};
|
55 |
+
|
56 |
+
/// Helper class for managing different projections of the same row schema.
|
57 |
+
/// Used to efficiently map any field in one projection to a corresponding field in
|
58 |
+
/// another projection.
|
59 |
+
/// Materialized mappings are generated lazily at the time of the first access.
|
60 |
+
/// Thread-safe apart from initialization.
|
61 |
+
template <typename ProjectionIdEnum>
|
62 |
+
class SchemaProjectionMaps {
|
63 |
+
public:
|
64 |
+
static constexpr int kMissingField = -1;
|
65 |
+
|
66 |
+
Status Init(ProjectionIdEnum full_schema_handle, const Schema& schema,
|
67 |
+
const std::vector<ProjectionIdEnum>& projection_handles,
|
68 |
+
const std::vector<const std::vector<FieldRef>*>& projections) {
|
69 |
+
assert(projection_handles.size() == projections.size());
|
70 |
+
ARROW_RETURN_NOT_OK(RegisterSchema(full_schema_handle, schema));
|
71 |
+
for (size_t i = 0; i < projections.size(); ++i) {
|
72 |
+
ARROW_RETURN_NOT_OK(
|
73 |
+
RegisterProjectedSchema(projection_handles[i], *(projections[i]), schema));
|
74 |
+
}
|
75 |
+
RegisterEnd();
|
76 |
+
return Status::OK();
|
77 |
+
}
|
78 |
+
|
79 |
+
int num_cols(ProjectionIdEnum schema_handle) const {
|
80 |
+
int id = schema_id(schema_handle);
|
81 |
+
return static_cast<int>(schemas_[id].second.data_types.size());
|
82 |
+
}
|
83 |
+
|
84 |
+
bool is_empty(ProjectionIdEnum schema_handle) const {
|
85 |
+
return num_cols(schema_handle) == 0;
|
86 |
+
}
|
87 |
+
|
88 |
+
const std::string& field_name(ProjectionIdEnum schema_handle, int field_id) const {
|
89 |
+
int id = schema_id(schema_handle);
|
90 |
+
return schemas_[id].second.field_names[field_id];
|
91 |
+
}
|
92 |
+
|
93 |
+
const std::shared_ptr<DataType>& data_type(ProjectionIdEnum schema_handle,
|
94 |
+
int field_id) const {
|
95 |
+
int id = schema_id(schema_handle);
|
96 |
+
return schemas_[id].second.data_types[field_id];
|
97 |
+
}
|
98 |
+
|
99 |
+
const std::vector<std::shared_ptr<DataType>>& data_types(
|
100 |
+
ProjectionIdEnum schema_handle) const {
|
101 |
+
int id = schema_id(schema_handle);
|
102 |
+
return schemas_[id].second.data_types;
|
103 |
+
}
|
104 |
+
|
105 |
+
SchemaProjectionMap map(ProjectionIdEnum from, ProjectionIdEnum to) const {
|
106 |
+
int id_from = schema_id(from);
|
107 |
+
int id_to = schema_id(to);
|
108 |
+
SchemaProjectionMap result;
|
109 |
+
result.num_cols = num_cols(from);
|
110 |
+
result.source_to_base = mappings_[id_from].data();
|
111 |
+
result.base_to_target = inverse_mappings_[id_to].data();
|
112 |
+
return result;
|
113 |
+
}
|
114 |
+
|
115 |
+
protected:
|
116 |
+
struct FieldInfos {
|
117 |
+
std::vector<int> field_paths;
|
118 |
+
std::vector<std::string> field_names;
|
119 |
+
std::vector<std::shared_ptr<DataType>> data_types;
|
120 |
+
};
|
121 |
+
|
122 |
+
Status RegisterSchema(ProjectionIdEnum handle, const Schema& schema) {
|
123 |
+
FieldInfos out_fields;
|
124 |
+
const FieldVector& in_fields = schema.fields();
|
125 |
+
out_fields.field_paths.resize(in_fields.size());
|
126 |
+
out_fields.field_names.resize(in_fields.size());
|
127 |
+
out_fields.data_types.resize(in_fields.size());
|
128 |
+
for (size_t i = 0; i < in_fields.size(); ++i) {
|
129 |
+
const std::string& name = in_fields[i]->name();
|
130 |
+
const std::shared_ptr<DataType>& type = in_fields[i]->type();
|
131 |
+
out_fields.field_paths[i] = static_cast<int>(i);
|
132 |
+
out_fields.field_names[i] = name;
|
133 |
+
out_fields.data_types[i] = type;
|
134 |
+
}
|
135 |
+
schemas_.push_back(std::make_pair(handle, out_fields));
|
136 |
+
return Status::OK();
|
137 |
+
}
|
138 |
+
|
139 |
+
Status RegisterProjectedSchema(ProjectionIdEnum handle,
|
140 |
+
const std::vector<FieldRef>& selected_fields,
|
141 |
+
const Schema& full_schema) {
|
142 |
+
FieldInfos out_fields;
|
143 |
+
const FieldVector& in_fields = full_schema.fields();
|
144 |
+
out_fields.field_paths.resize(selected_fields.size());
|
145 |
+
out_fields.field_names.resize(selected_fields.size());
|
146 |
+
out_fields.data_types.resize(selected_fields.size());
|
147 |
+
for (size_t i = 0; i < selected_fields.size(); ++i) {
|
148 |
+
// All fields must be found in schema without ambiguity
|
149 |
+
ARROW_ASSIGN_OR_RAISE(auto match, selected_fields[i].FindOne(full_schema));
|
150 |
+
const std::string& name = in_fields[match[0]]->name();
|
151 |
+
const std::shared_ptr<DataType>& type = in_fields[match[0]]->type();
|
152 |
+
out_fields.field_paths[i] = match[0];
|
153 |
+
out_fields.field_names[i] = name;
|
154 |
+
out_fields.data_types[i] = type;
|
155 |
+
}
|
156 |
+
schemas_.push_back(std::make_pair(handle, out_fields));
|
157 |
+
return Status::OK();
|
158 |
+
}
|
159 |
+
|
160 |
+
void RegisterEnd() {
|
161 |
+
size_t size = schemas_.size();
|
162 |
+
mappings_.resize(size);
|
163 |
+
inverse_mappings_.resize(size);
|
164 |
+
int id_base = 0;
|
165 |
+
for (size_t i = 0; i < size; ++i) {
|
166 |
+
GenerateMapForProjection(static_cast<int>(i), id_base);
|
167 |
+
}
|
168 |
+
}
|
169 |
+
|
170 |
+
int schema_id(ProjectionIdEnum schema_handle) const {
|
171 |
+
for (size_t i = 0; i < schemas_.size(); ++i) {
|
172 |
+
if (schemas_[i].first == schema_handle) {
|
173 |
+
return static_cast<int>(i);
|
174 |
+
}
|
175 |
+
}
|
176 |
+
// We should never get here
|
177 |
+
assert(false);
|
178 |
+
return -1;
|
179 |
+
}
|
180 |
+
|
181 |
+
void GenerateMapForProjection(int id_proj, int id_base) {
|
182 |
+
int num_cols_proj = static_cast<int>(schemas_[id_proj].second.data_types.size());
|
183 |
+
int num_cols_base = static_cast<int>(schemas_[id_base].second.data_types.size());
|
184 |
+
|
185 |
+
std::vector<int>& mapping = mappings_[id_proj];
|
186 |
+
std::vector<int>& inverse_mapping = inverse_mappings_[id_proj];
|
187 |
+
mapping.resize(num_cols_proj);
|
188 |
+
inverse_mapping.resize(num_cols_base);
|
189 |
+
|
190 |
+
if (id_proj == id_base) {
|
191 |
+
for (int i = 0; i < num_cols_base; ++i) {
|
192 |
+
mapping[i] = inverse_mapping[i] = i;
|
193 |
+
}
|
194 |
+
} else {
|
195 |
+
const FieldInfos& fields_proj = schemas_[id_proj].second;
|
196 |
+
const FieldInfos& fields_base = schemas_[id_base].second;
|
197 |
+
for (int i = 0; i < num_cols_base; ++i) {
|
198 |
+
inverse_mapping[i] = SchemaProjectionMap::kMissingField;
|
199 |
+
}
|
200 |
+
for (int i = 0; i < num_cols_proj; ++i) {
|
201 |
+
int field_id = SchemaProjectionMap::kMissingField;
|
202 |
+
for (int j = 0; j < num_cols_base; ++j) {
|
203 |
+
if (fields_proj.field_paths[i] == fields_base.field_paths[j]) {
|
204 |
+
field_id = j;
|
205 |
+
// If there are multiple matches for the same input field,
|
206 |
+
// it will be mapped to the first match.
|
207 |
+
break;
|
208 |
+
}
|
209 |
+
}
|
210 |
+
assert(field_id != SchemaProjectionMap::kMissingField);
|
211 |
+
mapping[i] = field_id;
|
212 |
+
inverse_mapping[field_id] = i;
|
213 |
+
}
|
214 |
+
}
|
215 |
+
}
|
216 |
+
|
217 |
+
// vector used as a mapping from ProjectionIdEnum to fields
|
218 |
+
std::vector<std::pair<ProjectionIdEnum, FieldInfos>> schemas_;
|
219 |
+
std::vector<std::vector<int>> mappings_;
|
220 |
+
std::vector<std::vector<int>> inverse_mappings_;
|
221 |
+
};
|
222 |
+
|
223 |
+
using HashJoinProjectionMaps = SchemaProjectionMaps<HashJoinProjection>;
|
224 |
+
|
225 |
+
} // namespace acero
|
226 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/task_util.h
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <atomic>
|
21 |
+
#include <cstdint>
|
22 |
+
#include <functional>
|
23 |
+
#include <vector>
|
24 |
+
|
25 |
+
#include "arrow/acero/visibility.h"
|
26 |
+
#include "arrow/status.h"
|
27 |
+
#include "arrow/util/config.h"
|
28 |
+
#include "arrow/util/logging.h"
|
29 |
+
|
30 |
+
namespace arrow {
|
31 |
+
namespace acero {
|
32 |
+
|
33 |
+
// Atomic value surrounded by padding bytes to avoid cache line invalidation
|
34 |
+
// whenever it is modified by a concurrent thread on a different CPU core.
|
35 |
+
//
|
36 |
+
template <typename T>
|
37 |
+
class AtomicWithPadding {
|
38 |
+
private:
|
39 |
+
static constexpr int kCacheLineSize = 64;
|
40 |
+
uint8_t padding_before[kCacheLineSize];
|
41 |
+
|
42 |
+
public:
|
43 |
+
std::atomic<T> value;
|
44 |
+
|
45 |
+
private:
|
46 |
+
uint8_t padding_after[kCacheLineSize];
|
47 |
+
};
|
48 |
+
|
49 |
+
// Used for asynchronous execution of operations that can be broken into
|
50 |
+
// a fixed number of symmetric tasks that can be executed concurrently.
|
51 |
+
//
|
52 |
+
// Implements priorities between multiple such operations, called task groups.
|
53 |
+
//
|
54 |
+
// Allows to specify the maximum number of in-flight tasks at any moment.
|
55 |
+
//
|
56 |
+
// Also allows for executing next pending tasks immediately using a caller thread.
|
57 |
+
//
|
58 |
+
class ARROW_ACERO_EXPORT TaskScheduler {
|
59 |
+
public:
|
60 |
+
using TaskImpl = std::function<Status(size_t, int64_t)>;
|
61 |
+
using TaskGroupContinuationImpl = std::function<Status(size_t)>;
|
62 |
+
using ScheduleImpl = std::function<Status(TaskGroupContinuationImpl)>;
|
63 |
+
using AbortContinuationImpl = std::function<void()>;
|
64 |
+
|
65 |
+
virtual ~TaskScheduler() = default;
|
66 |
+
|
67 |
+
// Order in which task groups are registered represents priorities of their tasks
|
68 |
+
// (the first group has the highest priority).
|
69 |
+
//
|
70 |
+
// Returns task group identifier that is used to request operations on the task group.
|
71 |
+
virtual int RegisterTaskGroup(TaskImpl task_impl,
|
72 |
+
TaskGroupContinuationImpl cont_impl) = 0;
|
73 |
+
|
74 |
+
virtual void RegisterEnd() = 0;
|
75 |
+
|
76 |
+
// total_num_tasks may be zero, in which case task group continuation will be executed
|
77 |
+
// immediately
|
78 |
+
virtual Status StartTaskGroup(size_t thread_id, int group_id,
|
79 |
+
int64_t total_num_tasks) = 0;
|
80 |
+
|
81 |
+
// Execute given number of tasks immediately using caller thread
|
82 |
+
virtual Status ExecuteMore(size_t thread_id, int num_tasks_to_execute,
|
83 |
+
bool execute_all) = 0;
|
84 |
+
|
85 |
+
// Begin scheduling tasks using provided callback and
|
86 |
+
// the limit on the number of in-flight tasks at any moment.
|
87 |
+
//
|
88 |
+
// Scheduling will continue as long as there are waiting tasks.
|
89 |
+
//
|
90 |
+
// It will automatically resume whenever new task group gets started.
|
91 |
+
virtual Status StartScheduling(size_t thread_id, ScheduleImpl schedule_impl,
|
92 |
+
int num_concurrent_tasks, bool use_sync_execution) = 0;
|
93 |
+
|
94 |
+
// Abort scheduling and execution.
|
95 |
+
// Used in case of being notified about unrecoverable error for the entire query.
|
96 |
+
virtual void Abort(AbortContinuationImpl impl) = 0;
|
97 |
+
|
98 |
+
static std::unique_ptr<TaskScheduler> Make();
|
99 |
+
};
|
100 |
+
|
101 |
+
} // namespace acero
|
102 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/test_nodes.h
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <string>
|
21 |
+
|
22 |
+
#include "arrow/acero/options.h"
|
23 |
+
#include "arrow/acero/test_util_internal.h"
|
24 |
+
#include "arrow/testing/random.h"
|
25 |
+
|
26 |
+
namespace arrow {
|
27 |
+
namespace acero {
|
28 |
+
|
29 |
+
// \brief Make a delaying source that is optionally noisy (prints when it emits)
|
30 |
+
AsyncGenerator<std::optional<ExecBatch>> MakeDelayedGen(
|
31 |
+
Iterator<std::optional<ExecBatch>> src, std::string label, double delay_sec,
|
32 |
+
bool noisy = false);
|
33 |
+
|
34 |
+
// \brief Make a delaying source that is optionally noisy (prints when it emits)
|
35 |
+
AsyncGenerator<std::optional<ExecBatch>> MakeDelayedGen(
|
36 |
+
AsyncGenerator<std::optional<ExecBatch>> src, std::string label, double delay_sec,
|
37 |
+
bool noisy = false);
|
38 |
+
|
39 |
+
// \brief Make a delaying source that is optionally noisy (prints when it emits)
|
40 |
+
AsyncGenerator<std::optional<ExecBatch>> MakeDelayedGen(BatchesWithSchema src,
|
41 |
+
std::string label,
|
42 |
+
double delay_sec,
|
43 |
+
bool noisy = false);
|
44 |
+
|
45 |
+
/// A node that slightly resequences the input at random
|
46 |
+
struct JitterNodeOptions : public ExecNodeOptions {
|
47 |
+
random::SeedType seed;
|
48 |
+
/// The max amount to add to a node's "cost".
|
49 |
+
int max_jitter_modifier;
|
50 |
+
|
51 |
+
explicit JitterNodeOptions(random::SeedType seed, int max_jitter_modifier = 5)
|
52 |
+
: seed(seed), max_jitter_modifier(max_jitter_modifier) {}
|
53 |
+
static constexpr std::string_view kName = "jitter";
|
54 |
+
};
|
55 |
+
|
56 |
+
class GateImpl;
|
57 |
+
|
58 |
+
class Gate {
|
59 |
+
public:
|
60 |
+
static std::shared_ptr<Gate> Make();
|
61 |
+
|
62 |
+
Gate();
|
63 |
+
virtual ~Gate();
|
64 |
+
|
65 |
+
void ReleaseAllBatches();
|
66 |
+
void ReleaseOneBatch();
|
67 |
+
Future<> WaitForNextReleasedBatch();
|
68 |
+
|
69 |
+
private:
|
70 |
+
ARROW_DISALLOW_COPY_AND_ASSIGN(Gate);
|
71 |
+
|
72 |
+
GateImpl* impl_;
|
73 |
+
};
|
74 |
+
|
75 |
+
// A node that holds all input batches until a given gate is released
|
76 |
+
struct GatedNodeOptions : public ExecNodeOptions {
|
77 |
+
explicit GatedNodeOptions(Gate* gate) : gate(gate) {}
|
78 |
+
Gate* gate;
|
79 |
+
|
80 |
+
static constexpr std::string_view kName = "gated";
|
81 |
+
};
|
82 |
+
|
83 |
+
void RegisterTestNodes();
|
84 |
+
|
85 |
+
} // namespace acero
|
86 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/time_series_util.h
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include "arrow/record_batch.h"
|
21 |
+
#include "arrow/type_traits.h"
|
22 |
+
|
23 |
+
namespace arrow::acero {
|
24 |
+
|
25 |
+
// normalize the value to unsigned 64-bits while preserving ordering of values
|
26 |
+
template <typename T, enable_if_t<std::is_integral<T>::value, bool> = true>
|
27 |
+
uint64_t NormalizeTime(T t);
|
28 |
+
|
29 |
+
uint64_t GetTime(const RecordBatch* batch, Type::type time_type, int col, uint64_t row);
|
30 |
+
|
31 |
+
} // namespace arrow::acero
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/tpch_node.h
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
#include <optional>
|
22 |
+
#include <string>
|
23 |
+
#include <vector>
|
24 |
+
|
25 |
+
#include "arrow/acero/type_fwd.h"
|
26 |
+
#include "arrow/acero/visibility.h"
|
27 |
+
#include "arrow/result.h"
|
28 |
+
#include "arrow/status.h"
|
29 |
+
|
30 |
+
namespace arrow {
|
31 |
+
namespace acero {
|
32 |
+
namespace internal {
|
33 |
+
|
34 |
+
class ARROW_ACERO_EXPORT TpchGen {
|
35 |
+
public:
|
36 |
+
virtual ~TpchGen() = default;
|
37 |
+
|
38 |
+
/*
|
39 |
+
* \brief Create a factory for nodes that generate TPC-H data
|
40 |
+
*
|
41 |
+
* Note: Individual tables will reference each other. It is important that you only
|
42 |
+
* create a single TpchGen instance for each plan and then you can create nodes for each
|
43 |
+
* table from that single TpchGen instance. Note: Every batch will be scheduled as a new
|
44 |
+
* task using the ExecPlan's scheduler.
|
45 |
+
*/
|
46 |
+
static Result<std::unique_ptr<TpchGen>> Make(
|
47 |
+
ExecPlan* plan, double scale_factor = 1.0, int64_t batch_size = 4096,
|
48 |
+
std::optional<int64_t> seed = std::nullopt);
|
49 |
+
|
50 |
+
// The below methods will create and add an ExecNode to the plan that generates
|
51 |
+
// data for the desired table. If columns is empty, all columns will be generated.
|
52 |
+
// The methods return the added ExecNode, which should be used for inputs.
|
53 |
+
virtual Result<ExecNode*> Supplier(std::vector<std::string> columns = {}) = 0;
|
54 |
+
virtual Result<ExecNode*> Part(std::vector<std::string> columns = {}) = 0;
|
55 |
+
virtual Result<ExecNode*> PartSupp(std::vector<std::string> columns = {}) = 0;
|
56 |
+
virtual Result<ExecNode*> Customer(std::vector<std::string> columns = {}) = 0;
|
57 |
+
virtual Result<ExecNode*> Orders(std::vector<std::string> columns = {}) = 0;
|
58 |
+
virtual Result<ExecNode*> Lineitem(std::vector<std::string> columns = {}) = 0;
|
59 |
+
virtual Result<ExecNode*> Nation(std::vector<std::string> columns = {}) = 0;
|
60 |
+
virtual Result<ExecNode*> Region(std::vector<std::string> columns = {}) = 0;
|
61 |
+
};
|
62 |
+
|
63 |
+
} // namespace internal
|
64 |
+
} // namespace acero
|
65 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/type_fwd.h
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include "arrow/compute/type_fwd.h"
|
21 |
+
|
22 |
+
namespace arrow {
|
23 |
+
|
24 |
+
namespace acero {
|
25 |
+
|
26 |
+
class ExecNode;
|
27 |
+
class ExecPlan;
|
28 |
+
class ExecNodeOptions;
|
29 |
+
class ExecFactoryRegistry;
|
30 |
+
class QueryContext;
|
31 |
+
struct QueryOptions;
|
32 |
+
struct Declaration;
|
33 |
+
class SinkNodeConsumer;
|
34 |
+
|
35 |
+
} // namespace acero
|
36 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/unmaterialized_table.h
ADDED
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <optional>
|
21 |
+
#include <vector>
|
22 |
+
#include "arrow/array/builder_base.h"
|
23 |
+
#include "arrow/array/builder_binary.h"
|
24 |
+
#include "arrow/array/builder_primitive.h"
|
25 |
+
#include "arrow/memory_pool.h"
|
26 |
+
#include "arrow/record_batch.h"
|
27 |
+
#include "arrow/type_traits.h"
|
28 |
+
#include "arrow/util/logging.h"
|
29 |
+
|
30 |
+
namespace arrow::acero {
|
31 |
+
|
32 |
+
/// Lightweight representation of a cell of an unmaterialized table.
|
33 |
+
///
|
34 |
+
struct CompositeEntry {
|
35 |
+
RecordBatch* batch;
|
36 |
+
uint64_t start;
|
37 |
+
uint64_t end;
|
38 |
+
};
|
39 |
+
|
40 |
+
// Forward declare the builder
|
41 |
+
template <size_t MAX_COMPOSITE_TABLES>
|
42 |
+
class UnmaterializedSliceBuilder;
|
43 |
+
|
44 |
+
/// A table of composite reference rows. Rows maintain pointers to the
|
45 |
+
/// constituent record batches, but the overall table retains shared_ptr
|
46 |
+
/// references to ensure memory remains resident while the table is live.
|
47 |
+
///
|
48 |
+
/// The main reason for this is that, especially for wide tables, some operations
|
49 |
+
/// such as sorted_merge or asof_join are effectively row-oriented, rather than
|
50 |
+
/// column-oriented. Separating the join part from the columnar materialization
|
51 |
+
/// part simplifies the logic around data types and increases efficiency.
|
52 |
+
///
|
53 |
+
/// We don't put the shared_ptr's into the rows for efficiency reasons. Use
|
54 |
+
/// UnmaterializedSliceBuilder to add ranges of record batches to this table
|
55 |
+
template <size_t MAX_COMPOSITE_TABLES>
|
56 |
+
class UnmaterializedCompositeTable {
|
57 |
+
public:
|
58 |
+
UnmaterializedCompositeTable(
|
59 |
+
const std::shared_ptr<arrow::Schema>& output_schema, size_t num_composite_tables,
|
60 |
+
std::unordered_map<int, std::pair<int, int>> output_col_to_src_,
|
61 |
+
arrow::MemoryPool* pool_ = arrow::default_memory_pool())
|
62 |
+
: schema(output_schema),
|
63 |
+
num_composite_tables(num_composite_tables),
|
64 |
+
output_col_to_src(std::move(output_col_to_src_)),
|
65 |
+
pool{pool_} {}
|
66 |
+
|
67 |
+
// Shallow wrappers around std::vector for performance
|
68 |
+
inline size_t capacity() { return slices.capacity(); }
|
69 |
+
inline void reserve(size_t num_slices) { slices.reserve(num_slices); }
|
70 |
+
|
71 |
+
inline size_t Size() const { return num_rows; }
|
72 |
+
inline size_t Empty() const { return num_rows == 0; }
|
73 |
+
|
74 |
+
Result<std::optional<std::shared_ptr<RecordBatch>>> Materialize() {
|
75 |
+
// Don't build empty batches
|
76 |
+
if (Empty()) {
|
77 |
+
return std::nullopt;
|
78 |
+
}
|
79 |
+
DCHECK_LE(Size(), (uint64_t)std::numeric_limits<int64_t>::max());
|
80 |
+
std::vector<std::shared_ptr<arrow::Array>> arrays(schema->num_fields());
|
81 |
+
|
82 |
+
#define MATERIALIZE_CASE(id) \
|
83 |
+
case arrow::Type::id: { \
|
84 |
+
using T = typename arrow::TypeIdTraits<arrow::Type::id>::Type; \
|
85 |
+
ARROW_ASSIGN_OR_RAISE(arrays.at(i_col), materializeColumn<T>(field_type, i_col)); \
|
86 |
+
break; \
|
87 |
+
}
|
88 |
+
|
89 |
+
// Build the arrays column-by-column from the rows
|
90 |
+
for (int i_col = 0; i_col < schema->num_fields(); ++i_col) {
|
91 |
+
const std::shared_ptr<arrow::Field>& field = schema->field(i_col);
|
92 |
+
const auto& field_type = field->type();
|
93 |
+
|
94 |
+
switch (field_type->id()) {
|
95 |
+
MATERIALIZE_CASE(BOOL)
|
96 |
+
MATERIALIZE_CASE(INT8)
|
97 |
+
MATERIALIZE_CASE(INT16)
|
98 |
+
MATERIALIZE_CASE(INT32)
|
99 |
+
MATERIALIZE_CASE(INT64)
|
100 |
+
MATERIALIZE_CASE(UINT8)
|
101 |
+
MATERIALIZE_CASE(UINT16)
|
102 |
+
MATERIALIZE_CASE(UINT32)
|
103 |
+
MATERIALIZE_CASE(UINT64)
|
104 |
+
MATERIALIZE_CASE(FLOAT)
|
105 |
+
MATERIALIZE_CASE(DOUBLE)
|
106 |
+
MATERIALIZE_CASE(DATE32)
|
107 |
+
MATERIALIZE_CASE(DATE64)
|
108 |
+
MATERIALIZE_CASE(TIME32)
|
109 |
+
MATERIALIZE_CASE(TIME64)
|
110 |
+
MATERIALIZE_CASE(TIMESTAMP)
|
111 |
+
MATERIALIZE_CASE(STRING)
|
112 |
+
MATERIALIZE_CASE(LARGE_STRING)
|
113 |
+
MATERIALIZE_CASE(BINARY)
|
114 |
+
MATERIALIZE_CASE(LARGE_BINARY)
|
115 |
+
default:
|
116 |
+
return arrow::Status::Invalid("Unsupported data type ",
|
117 |
+
field->type()->ToString(), " for field ",
|
118 |
+
field->name());
|
119 |
+
}
|
120 |
+
}
|
121 |
+
|
122 |
+
#undef MATERIALIZE_CASE
|
123 |
+
|
124 |
+
std::shared_ptr<arrow::RecordBatch> r =
|
125 |
+
arrow::RecordBatch::Make(schema, (int64_t)num_rows, arrays);
|
126 |
+
return r;
|
127 |
+
}
|
128 |
+
|
129 |
+
private:
|
130 |
+
struct UnmaterializedSlice {
|
131 |
+
CompositeEntry components[MAX_COMPOSITE_TABLES];
|
132 |
+
size_t num_components;
|
133 |
+
|
134 |
+
inline int64_t Size() const {
|
135 |
+
if (num_components == 0) {
|
136 |
+
return 0;
|
137 |
+
}
|
138 |
+
return components[0].end - components[0].start;
|
139 |
+
}
|
140 |
+
};
|
141 |
+
|
142 |
+
// Mapping from an output column ID to a source table ID and column ID
|
143 |
+
std::shared_ptr<arrow::Schema> schema;
|
144 |
+
size_t num_composite_tables;
|
145 |
+
std::unordered_map<int, std::pair<int, int>> output_col_to_src;
|
146 |
+
|
147 |
+
arrow::MemoryPool* pool;
|
148 |
+
|
149 |
+
/// A map from address of a record batch to the record batch. Used to
|
150 |
+
/// maintain the lifetime of the record batch in case it goes out of scope
|
151 |
+
/// by the main exec node thread
|
152 |
+
std::unordered_map<uintptr_t, std::shared_ptr<arrow::RecordBatch>> ptr2Ref = {};
|
153 |
+
std::vector<UnmaterializedSlice> slices;
|
154 |
+
|
155 |
+
size_t num_rows = 0;
|
156 |
+
|
157 |
+
// for AddRecordBatchRef/AddSlice and access to UnmaterializedSlice
|
158 |
+
friend class UnmaterializedSliceBuilder<MAX_COMPOSITE_TABLES>;
|
159 |
+
|
160 |
+
void AddRecordBatchRef(const std::shared_ptr<arrow::RecordBatch>& ref) {
|
161 |
+
ptr2Ref[(uintptr_t)ref.get()] = ref;
|
162 |
+
}
|
163 |
+
void AddSlice(const UnmaterializedSlice& slice) {
|
164 |
+
slices.push_back(slice);
|
165 |
+
num_rows += slice.Size();
|
166 |
+
}
|
167 |
+
|
168 |
+
template <class Type, class Builder = typename TypeTraits<Type>::BuilderType>
|
169 |
+
enable_if_boolean<Type, Status> static BuilderAppend(
|
170 |
+
Builder& builder, const std::shared_ptr<ArrayData>& source, uint64_t row) {
|
171 |
+
if (source->IsNull(row)) {
|
172 |
+
builder.UnsafeAppendNull();
|
173 |
+
return Status::OK();
|
174 |
+
}
|
175 |
+
builder.UnsafeAppend(bit_util::GetBit(source->template GetValues<uint8_t>(1), row));
|
176 |
+
return Status::OK();
|
177 |
+
}
|
178 |
+
|
179 |
+
template <class Type, class Builder = typename TypeTraits<Type>::BuilderType>
|
180 |
+
enable_if_t<is_fixed_width_type<Type>::value && !is_boolean_type<Type>::value,
|
181 |
+
Status> static BuilderAppend(Builder& builder,
|
182 |
+
const std::shared_ptr<ArrayData>& source,
|
183 |
+
uint64_t row) {
|
184 |
+
if (source->IsNull(row)) {
|
185 |
+
builder.UnsafeAppendNull();
|
186 |
+
return Status::OK();
|
187 |
+
}
|
188 |
+
using CType = typename TypeTraits<Type>::CType;
|
189 |
+
builder.UnsafeAppend(source->template GetValues<CType>(1)[row]);
|
190 |
+
return Status::OK();
|
191 |
+
}
|
192 |
+
|
193 |
+
template <class Type, class Builder = typename TypeTraits<Type>::BuilderType>
|
194 |
+
enable_if_base_binary<Type, Status> static BuilderAppend(
|
195 |
+
Builder& builder, const std::shared_ptr<ArrayData>& source, uint64_t row) {
|
196 |
+
if (source->IsNull(row)) {
|
197 |
+
return builder.AppendNull();
|
198 |
+
}
|
199 |
+
using offset_type = typename Type::offset_type;
|
200 |
+
const uint8_t* data = source->buffers[2]->data();
|
201 |
+
const offset_type* offsets = source->GetValues<offset_type>(1);
|
202 |
+
const offset_type offset0 = offsets[row];
|
203 |
+
const offset_type offset1 = offsets[row + 1];
|
204 |
+
return builder.Append(data + offset0, offset1 - offset0);
|
205 |
+
}
|
206 |
+
|
207 |
+
template <class Type, class Builder = typename arrow::TypeTraits<Type>::BuilderType>
|
208 |
+
arrow::Result<std::shared_ptr<arrow::Array>> materializeColumn(
|
209 |
+
const std::shared_ptr<arrow::DataType>& type, int i_col) {
|
210 |
+
ARROW_ASSIGN_OR_RAISE(auto builderPtr, arrow::MakeBuilder(type, pool));
|
211 |
+
Builder& builder = *arrow::internal::checked_cast<Builder*>(builderPtr.get());
|
212 |
+
ARROW_RETURN_NOT_OK(builder.Reserve(num_rows));
|
213 |
+
|
214 |
+
const auto& [table_index, column_index] = output_col_to_src[i_col];
|
215 |
+
|
216 |
+
for (const auto& unmaterialized_slice : slices) {
|
217 |
+
const auto& [batch, start, end] = unmaterialized_slice.components[table_index];
|
218 |
+
if (batch) {
|
219 |
+
for (uint64_t rowNum = start; rowNum < end; ++rowNum) {
|
220 |
+
arrow::Status st = BuilderAppend<Type, Builder>(
|
221 |
+
builder, batch->column_data(column_index), rowNum);
|
222 |
+
ARROW_RETURN_NOT_OK(st);
|
223 |
+
}
|
224 |
+
} else {
|
225 |
+
for (uint64_t rowNum = start; rowNum < end; ++rowNum) {
|
226 |
+
ARROW_RETURN_NOT_OK(builder.AppendNull());
|
227 |
+
}
|
228 |
+
}
|
229 |
+
}
|
230 |
+
std::shared_ptr<arrow::Array> result;
|
231 |
+
ARROW_RETURN_NOT_OK(builder.Finish(&result));
|
232 |
+
return Result{std::move(result)};
|
233 |
+
}
|
234 |
+
};
|
235 |
+
|
236 |
+
/// A builder class that can append blocks of data to a row. A "slice"
|
237 |
+
/// is built by horizontally concatenating record batches.
|
238 |
+
template <size_t MAX_COMPOSITE_TABLES>
|
239 |
+
class UnmaterializedSliceBuilder {
|
240 |
+
public:
|
241 |
+
explicit UnmaterializedSliceBuilder(
|
242 |
+
UnmaterializedCompositeTable<MAX_COMPOSITE_TABLES>* table_)
|
243 |
+
: table(table_) {}
|
244 |
+
|
245 |
+
void AddEntry(std::shared_ptr<RecordBatch> rb, uint64_t start, uint64_t end) {
|
246 |
+
if (rb) {
|
247 |
+
table->AddRecordBatchRef(rb);
|
248 |
+
}
|
249 |
+
if (slice.num_components) {
|
250 |
+
size_t last_index = slice.num_components - 1;
|
251 |
+
DCHECK_EQ(slice.components[last_index].end - slice.components[last_index].start,
|
252 |
+
end - start)
|
253 |
+
<< "Slices should be the same length. ";
|
254 |
+
}
|
255 |
+
slice.components[slice.num_components++] = CompositeEntry{rb.get(), start, end};
|
256 |
+
}
|
257 |
+
|
258 |
+
void Finalize() { table->AddSlice(slice); }
|
259 |
+
int64_t Size() { return slice.Size(); }
|
260 |
+
|
261 |
+
private:
|
262 |
+
using TUnmaterializedCompositeTable =
|
263 |
+
UnmaterializedCompositeTable<MAX_COMPOSITE_TABLES>;
|
264 |
+
using TUnmaterializedSlice =
|
265 |
+
typename TUnmaterializedCompositeTable::UnmaterializedSlice;
|
266 |
+
|
267 |
+
TUnmaterializedCompositeTable* table;
|
268 |
+
TUnmaterializedSlice slice{};
|
269 |
+
};
|
270 |
+
|
271 |
+
} // namespace arrow::acero
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/util.h
ADDED
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <atomic>
|
21 |
+
#include <cstdint>
|
22 |
+
#include <optional>
|
23 |
+
#include <thread>
|
24 |
+
#include <unordered_map>
|
25 |
+
#include <vector>
|
26 |
+
|
27 |
+
#include "arrow/acero/options.h"
|
28 |
+
#include "arrow/acero/type_fwd.h"
|
29 |
+
#include "arrow/buffer.h"
|
30 |
+
#include "arrow/compute/expression.h"
|
31 |
+
#include "arrow/compute/util.h"
|
32 |
+
#include "arrow/memory_pool.h"
|
33 |
+
#include "arrow/result.h"
|
34 |
+
#include "arrow/status.h"
|
35 |
+
#include "arrow/util/bit_util.h"
|
36 |
+
#include "arrow/util/cpu_info.h"
|
37 |
+
#include "arrow/util/logging.h"
|
38 |
+
#include "arrow/util/mutex.h"
|
39 |
+
#include "arrow/util/thread_pool.h"
|
40 |
+
#include "arrow/util/type_fwd.h"
|
41 |
+
|
42 |
+
namespace arrow {
|
43 |
+
|
44 |
+
namespace acero {
|
45 |
+
|
46 |
+
ARROW_ACERO_EXPORT
|
47 |
+
Status ValidateExecNodeInputs(ExecPlan* plan, const std::vector<ExecNode*>& inputs,
|
48 |
+
int expected_num_inputs, const char* kind_name);
|
49 |
+
|
50 |
+
ARROW_ACERO_EXPORT
|
51 |
+
Result<std::shared_ptr<Table>> TableFromExecBatches(
|
52 |
+
const std::shared_ptr<Schema>& schema, const std::vector<ExecBatch>& exec_batches);
|
53 |
+
|
54 |
+
class ARROW_ACERO_EXPORT AtomicCounter {
|
55 |
+
public:
|
56 |
+
AtomicCounter() = default;
|
57 |
+
|
58 |
+
int count() const { return count_.load(); }
|
59 |
+
|
60 |
+
std::optional<int> total() const {
|
61 |
+
int total = total_.load();
|
62 |
+
if (total == -1) return {};
|
63 |
+
return total;
|
64 |
+
}
|
65 |
+
|
66 |
+
// return true if the counter is complete
|
67 |
+
bool Increment() {
|
68 |
+
DCHECK_NE(count_.load(), total_.load());
|
69 |
+
int count = count_.fetch_add(1) + 1;
|
70 |
+
if (count != total_.load()) return false;
|
71 |
+
return DoneOnce();
|
72 |
+
}
|
73 |
+
|
74 |
+
// return true if the counter is complete
|
75 |
+
bool SetTotal(int total) {
|
76 |
+
total_.store(total);
|
77 |
+
if (count_.load() != total) return false;
|
78 |
+
return DoneOnce();
|
79 |
+
}
|
80 |
+
|
81 |
+
// return true if the counter has not already been completed
|
82 |
+
bool Cancel() { return DoneOnce(); }
|
83 |
+
|
84 |
+
// return true if the counter has finished or been cancelled
|
85 |
+
bool Completed() { return complete_.load(); }
|
86 |
+
|
87 |
+
private:
|
88 |
+
// ensure there is only one true return from Increment(), SetTotal(), or Cancel()
|
89 |
+
bool DoneOnce() {
|
90 |
+
bool expected = false;
|
91 |
+
return complete_.compare_exchange_strong(expected, true);
|
92 |
+
}
|
93 |
+
|
94 |
+
std::atomic<int> count_{0}, total_{-1};
|
95 |
+
std::atomic<bool> complete_{false};
|
96 |
+
};
|
97 |
+
|
98 |
+
class ARROW_ACERO_EXPORT ThreadIndexer {
|
99 |
+
public:
|
100 |
+
size_t operator()();
|
101 |
+
|
102 |
+
static size_t Capacity();
|
103 |
+
|
104 |
+
private:
|
105 |
+
static size_t Check(size_t thread_index);
|
106 |
+
|
107 |
+
arrow::util::Mutex mutex_;
|
108 |
+
std::unordered_map<std::thread::id, size_t> id_to_index_;
|
109 |
+
};
|
110 |
+
|
111 |
+
/// \brief A consumer that collects results into an in-memory table
|
112 |
+
struct ARROW_ACERO_EXPORT TableSinkNodeConsumer : public SinkNodeConsumer {
|
113 |
+
public:
|
114 |
+
TableSinkNodeConsumer(std::shared_ptr<Table>* out, MemoryPool* pool)
|
115 |
+
: out_(out), pool_(pool) {}
|
116 |
+
Status Init(const std::shared_ptr<Schema>& schema,
|
117 |
+
BackpressureControl* backpressure_control, ExecPlan* plan) override;
|
118 |
+
Status Consume(ExecBatch batch) override;
|
119 |
+
Future<> Finish() override;
|
120 |
+
|
121 |
+
private:
|
122 |
+
std::shared_ptr<Table>* out_;
|
123 |
+
MemoryPool* pool_;
|
124 |
+
std::shared_ptr<Schema> schema_;
|
125 |
+
std::vector<std::shared_ptr<RecordBatch>> batches_;
|
126 |
+
arrow::util::Mutex consume_mutex_;
|
127 |
+
};
|
128 |
+
|
129 |
+
class ARROW_ACERO_EXPORT NullSinkNodeConsumer : public SinkNodeConsumer {
|
130 |
+
public:
|
131 |
+
Status Init(const std::shared_ptr<Schema>&, BackpressureControl*,
|
132 |
+
ExecPlan* plan) override {
|
133 |
+
return Status::OK();
|
134 |
+
}
|
135 |
+
Status Consume(ExecBatch exec_batch) override { return Status::OK(); }
|
136 |
+
Future<> Finish() override { return Status::OK(); }
|
137 |
+
|
138 |
+
public:
|
139 |
+
static std::shared_ptr<NullSinkNodeConsumer> Make() {
|
140 |
+
return std::make_shared<NullSinkNodeConsumer>();
|
141 |
+
}
|
142 |
+
};
|
143 |
+
|
144 |
+
/// CRTP helper for tracing helper functions
|
145 |
+
|
146 |
+
class ARROW_ACERO_EXPORT TracedNode {
|
147 |
+
public:
|
148 |
+
// All nodes should call TraceStartProducing or NoteStartProducing exactly once
|
149 |
+
// Most nodes will be fine with a call to NoteStartProducing since the StartProducing
|
150 |
+
// call is usually fairly cheap and simply schedules tasks to fetch the actual data.
|
151 |
+
|
152 |
+
explicit TracedNode(ExecNode* node) : node_(node) {}
|
153 |
+
|
154 |
+
// Create a span to record the StartProducing work
|
155 |
+
[[nodiscard]] ::arrow::internal::tracing::Scope TraceStartProducing(
|
156 |
+
std::string extra_details) const;
|
157 |
+
|
158 |
+
// Record a call to StartProducing without creating with a span
|
159 |
+
void NoteStartProducing(std::string extra_details) const;
|
160 |
+
|
161 |
+
// All nodes should call TraceInputReceived for each batch they receive. This call
|
162 |
+
// should track the time spent processing the batch. NoteInputReceived is available
|
163 |
+
// but usually won't be used unless a node is simply adding batches to a trivial queue.
|
164 |
+
|
165 |
+
// Create a span to record the InputReceived work
|
166 |
+
[[nodiscard]] ::arrow::internal::tracing::Scope TraceInputReceived(
|
167 |
+
const ExecBatch& batch) const;
|
168 |
+
|
169 |
+
// Record a call to InputReceived without creating with a span
|
170 |
+
void NoteInputReceived(const ExecBatch& batch) const;
|
171 |
+
|
172 |
+
// Create a span to record any "finish" work. This should NOT be called as part of
|
173 |
+
// InputFinished and many nodes may not need to call this at all. This should be used
|
174 |
+
// when a node has some extra work that has to be done once it has received all of its
|
175 |
+
// data. For example, an aggregation node calculating aggregations. This will
|
176 |
+
// typically be called as a result of InputFinished OR InputReceived.
|
177 |
+
[[nodiscard]] ::arrow::internal::tracing::Scope TraceFinish() const;
|
178 |
+
|
179 |
+
private:
|
180 |
+
ExecNode* node_;
|
181 |
+
};
|
182 |
+
|
183 |
+
} // namespace acero
|
184 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/acero/visibility.h
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// This API is EXPERIMENTAL.
|
19 |
+
|
20 |
+
#pragma once
|
21 |
+
|
22 |
+
#if defined(_WIN32) || defined(__CYGWIN__)
|
23 |
+
#if defined(_MSC_VER)
|
24 |
+
#pragma warning(push)
|
25 |
+
#pragma warning(disable : 4251)
|
26 |
+
#else
|
27 |
+
#pragma GCC diagnostic ignored "-Wattributes"
|
28 |
+
#endif
|
29 |
+
|
30 |
+
#ifdef ARROW_ACERO_STATIC
|
31 |
+
#define ARROW_ACERO_EXPORT
|
32 |
+
#elif defined(ARROW_ACERO_EXPORTING)
|
33 |
+
#define ARROW_ACERO_EXPORT __declspec(dllexport)
|
34 |
+
#else
|
35 |
+
#define ARROW_ACERO_EXPORT __declspec(dllimport)
|
36 |
+
#endif
|
37 |
+
|
38 |
+
#define ARROW_ACERO_NO_EXPORT
|
39 |
+
#else // Not Windows
|
40 |
+
#ifndef ARROW_ACERO_EXPORT
|
41 |
+
#define ARROW_ACERO_EXPORT __attribute__((visibility("default")))
|
42 |
+
#endif
|
43 |
+
#ifndef ARROW_ACERO_NO_EXPORT
|
44 |
+
#define ARROW_ACERO_NO_EXPORT __attribute__((visibility("hidden")))
|
45 |
+
#endif
|
46 |
+
#endif // Not-Windows
|
47 |
+
|
48 |
+
#if defined(_MSC_VER)
|
49 |
+
#pragma warning(pop)
|
50 |
+
#endif
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/c/abi.h
ADDED
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
/// \file abi.h Arrow C Data Interface
|
19 |
+
///
|
20 |
+
/// The Arrow C Data interface defines a very small, stable set
|
21 |
+
/// of C definitions which can be easily copied into any project's
|
22 |
+
/// source code and vendored to be used for columnar data interchange
|
23 |
+
/// in the Arrow format. For non-C/C++ languages and runtimes,
|
24 |
+
/// it should be almost as easy to translate the C definitions into
|
25 |
+
/// the corresponding C FFI declarations.
|
26 |
+
///
|
27 |
+
/// Applications and libraries can therefore work with Arrow memory
|
28 |
+
/// without necessarily using the Arrow libraries or reinventing
|
29 |
+
/// the wheel. Developers can choose between tight integration
|
30 |
+
/// with the Arrow software project or minimal integration with
|
31 |
+
/// the Arrow format only.
|
32 |
+
|
33 |
+
#pragma once
|
34 |
+
|
35 |
+
#include <stdint.h>
|
36 |
+
|
37 |
+
// Spec and documentation: https://arrow.apache.org/docs/format/CDataInterface.html
|
38 |
+
|
39 |
+
#ifdef __cplusplus
|
40 |
+
extern "C" {
|
41 |
+
#endif
|
42 |
+
|
43 |
+
#ifndef ARROW_C_DATA_INTERFACE
|
44 |
+
#define ARROW_C_DATA_INTERFACE
|
45 |
+
|
46 |
+
#define ARROW_FLAG_DICTIONARY_ORDERED 1
|
47 |
+
#define ARROW_FLAG_NULLABLE 2
|
48 |
+
#define ARROW_FLAG_MAP_KEYS_SORTED 4
|
49 |
+
|
50 |
+
struct ArrowSchema {
|
51 |
+
// Array type description
|
52 |
+
const char* format;
|
53 |
+
const char* name;
|
54 |
+
const char* metadata;
|
55 |
+
int64_t flags;
|
56 |
+
int64_t n_children;
|
57 |
+
struct ArrowSchema** children;
|
58 |
+
struct ArrowSchema* dictionary;
|
59 |
+
|
60 |
+
// Release callback
|
61 |
+
void (*release)(struct ArrowSchema*);
|
62 |
+
// Opaque producer-specific data
|
63 |
+
void* private_data;
|
64 |
+
};
|
65 |
+
|
66 |
+
struct ArrowArray {
|
67 |
+
// Array data description
|
68 |
+
int64_t length;
|
69 |
+
int64_t null_count;
|
70 |
+
int64_t offset;
|
71 |
+
int64_t n_buffers;
|
72 |
+
int64_t n_children;
|
73 |
+
const void** buffers;
|
74 |
+
struct ArrowArray** children;
|
75 |
+
struct ArrowArray* dictionary;
|
76 |
+
|
77 |
+
// Release callback
|
78 |
+
void (*release)(struct ArrowArray*);
|
79 |
+
// Opaque producer-specific data
|
80 |
+
void* private_data;
|
81 |
+
};
|
82 |
+
|
83 |
+
#endif // ARROW_C_DATA_INTERFACE
|
84 |
+
|
85 |
+
#ifndef ARROW_C_DEVICE_DATA_INTERFACE
|
86 |
+
#define ARROW_C_DEVICE_DATA_INTERFACE
|
87 |
+
|
88 |
+
// Spec and Documentation: https://arrow.apache.org/docs/format/CDeviceDataInterface.html
|
89 |
+
|
90 |
+
// DeviceType for the allocated memory
|
91 |
+
typedef int32_t ArrowDeviceType;
|
92 |
+
|
93 |
+
// CPU device, same as using ArrowArray directly
|
94 |
+
#define ARROW_DEVICE_CPU 1
|
95 |
+
// CUDA GPU Device
|
96 |
+
#define ARROW_DEVICE_CUDA 2
|
97 |
+
// Pinned CUDA CPU memory by cudaMallocHost
|
98 |
+
#define ARROW_DEVICE_CUDA_HOST 3
|
99 |
+
// OpenCL Device
|
100 |
+
#define ARROW_DEVICE_OPENCL 4
|
101 |
+
// Vulkan buffer for next-gen graphics
|
102 |
+
#define ARROW_DEVICE_VULKAN 7
|
103 |
+
// Metal for Apple GPU
|
104 |
+
#define ARROW_DEVICE_METAL 8
|
105 |
+
// Verilog simulator buffer
|
106 |
+
#define ARROW_DEVICE_VPI 9
|
107 |
+
// ROCm GPUs for AMD GPUs
|
108 |
+
#define ARROW_DEVICE_ROCM 10
|
109 |
+
// Pinned ROCm CPU memory allocated by hipMallocHost
|
110 |
+
#define ARROW_DEVICE_ROCM_HOST 11
|
111 |
+
// Reserved for extension
|
112 |
+
#define ARROW_DEVICE_EXT_DEV 12
|
113 |
+
// CUDA managed/unified memory allocated by cudaMallocManaged
|
114 |
+
#define ARROW_DEVICE_CUDA_MANAGED 13
|
115 |
+
// unified shared memory allocated on a oneAPI non-partitioned device.
|
116 |
+
#define ARROW_DEVICE_ONEAPI 14
|
117 |
+
// GPU support for next-gen WebGPU standard
|
118 |
+
#define ARROW_DEVICE_WEBGPU 15
|
119 |
+
// Qualcomm Hexagon DSP
|
120 |
+
#define ARROW_DEVICE_HEXAGON 16
|
121 |
+
|
122 |
+
struct ArrowDeviceArray {
|
123 |
+
// the Allocated Array
|
124 |
+
//
|
125 |
+
// the buffers in the array (along with the buffers of any
|
126 |
+
// children) are what is allocated on the device.
|
127 |
+
struct ArrowArray array;
|
128 |
+
// The device id to identify a specific device
|
129 |
+
int64_t device_id;
|
130 |
+
// The type of device which can access this memory.
|
131 |
+
ArrowDeviceType device_type;
|
132 |
+
// An event-like object to synchronize on if needed.
|
133 |
+
void* sync_event;
|
134 |
+
// Reserved bytes for future expansion.
|
135 |
+
int64_t reserved[3];
|
136 |
+
};
|
137 |
+
|
138 |
+
#endif // ARROW_C_DEVICE_DATA_INTERFACE
|
139 |
+
|
140 |
+
#ifndef ARROW_C_STREAM_INTERFACE
|
141 |
+
#define ARROW_C_STREAM_INTERFACE
|
142 |
+
|
143 |
+
struct ArrowArrayStream {
|
144 |
+
// Callback to get the stream type
|
145 |
+
// (will be the same for all arrays in the stream).
|
146 |
+
//
|
147 |
+
// Return value: 0 if successful, an `errno`-compatible error code otherwise.
|
148 |
+
//
|
149 |
+
// If successful, the ArrowSchema must be released independently from the stream.
|
150 |
+
int (*get_schema)(struct ArrowArrayStream*, struct ArrowSchema* out);
|
151 |
+
|
152 |
+
// Callback to get the next array
|
153 |
+
// (if no error and the array is released, the stream has ended)
|
154 |
+
//
|
155 |
+
// Return value: 0 if successful, an `errno`-compatible error code otherwise.
|
156 |
+
//
|
157 |
+
// If successful, the ArrowArray must be released independently from the stream.
|
158 |
+
int (*get_next)(struct ArrowArrayStream*, struct ArrowArray* out);
|
159 |
+
|
160 |
+
// Callback to get optional detailed error information.
|
161 |
+
// This must only be called if the last stream operation failed
|
162 |
+
// with a non-0 return code.
|
163 |
+
//
|
164 |
+
// Return value: pointer to a null-terminated character array describing
|
165 |
+
// the last error, or NULL if no description is available.
|
166 |
+
//
|
167 |
+
// The returned pointer is only valid until the next operation on this stream
|
168 |
+
// (including release).
|
169 |
+
const char* (*get_last_error)(struct ArrowArrayStream*);
|
170 |
+
|
171 |
+
// Release callback: release the stream's own resources.
|
172 |
+
// Note that arrays returned by `get_next` must be individually released.
|
173 |
+
void (*release)(struct ArrowArrayStream*);
|
174 |
+
|
175 |
+
// Opaque producer-specific data
|
176 |
+
void* private_data;
|
177 |
+
};
|
178 |
+
|
179 |
+
#endif // ARROW_C_STREAM_INTERFACE
|
180 |
+
|
181 |
+
#ifndef ARROW_C_DEVICE_STREAM_INTERFACE
|
182 |
+
#define ARROW_C_DEVICE_STREAM_INTERFACE
|
183 |
+
|
184 |
+
// Equivalent to ArrowArrayStream, but for ArrowDeviceArrays.
|
185 |
+
//
|
186 |
+
// This stream is intended to provide a stream of data on a single
|
187 |
+
// device, if a producer wants data to be produced on multiple devices
|
188 |
+
// then multiple streams should be provided. One per device.
|
189 |
+
struct ArrowDeviceArrayStream {
|
190 |
+
// The device that this stream produces data on.
|
191 |
+
ArrowDeviceType device_type;
|
192 |
+
|
193 |
+
// Callback to get the stream schema
|
194 |
+
// (will be the same for all arrays in the stream).
|
195 |
+
//
|
196 |
+
// Return value 0 if successful, an `errno`-compatible error code otherwise.
|
197 |
+
//
|
198 |
+
// If successful, the ArrowSchema must be released independently from the stream.
|
199 |
+
// The schema should be accessible via CPU memory.
|
200 |
+
int (*get_schema)(struct ArrowDeviceArrayStream* self, struct ArrowSchema* out);
|
201 |
+
|
202 |
+
// Callback to get the next array
|
203 |
+
// (if no error and the array is released, the stream has ended)
|
204 |
+
//
|
205 |
+
// Return value: 0 if successful, an `errno`-compatible error code otherwise.
|
206 |
+
//
|
207 |
+
// If successful, the ArrowDeviceArray must be released independently from the stream.
|
208 |
+
int (*get_next)(struct ArrowDeviceArrayStream* self, struct ArrowDeviceArray* out);
|
209 |
+
|
210 |
+
// Callback to get optional detailed error information.
|
211 |
+
// This must only be called if the last stream operation failed
|
212 |
+
// with a non-0 return code.
|
213 |
+
//
|
214 |
+
// Return value: pointer to a null-terminated character array describing
|
215 |
+
// the last error, or NULL if no description is available.
|
216 |
+
//
|
217 |
+
// The returned pointer is only valid until the next operation on this stream
|
218 |
+
// (including release).
|
219 |
+
const char* (*get_last_error)(struct ArrowDeviceArrayStream* self);
|
220 |
+
|
221 |
+
// Release callback: release the stream's own resources.
|
222 |
+
// Note that arrays returned by `get_next` must be individually released.
|
223 |
+
void (*release)(struct ArrowDeviceArrayStream* self);
|
224 |
+
|
225 |
+
// Opaque producer-specific data
|
226 |
+
void* private_data;
|
227 |
+
};
|
228 |
+
|
229 |
+
#endif // ARROW_C_DEVICE_STREAM_INTERFACE
|
230 |
+
|
231 |
+
#ifdef __cplusplus
|
232 |
+
}
|
233 |
+
#endif
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/c/bridge.h
ADDED
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <functional>
|
21 |
+
#include <memory>
|
22 |
+
#include <string>
|
23 |
+
|
24 |
+
#include "arrow/c/abi.h"
|
25 |
+
#include "arrow/device.h"
|
26 |
+
#include "arrow/result.h"
|
27 |
+
#include "arrow/status.h"
|
28 |
+
#include "arrow/type_fwd.h"
|
29 |
+
#include "arrow/util/macros.h"
|
30 |
+
#include "arrow/util/visibility.h"
|
31 |
+
|
32 |
+
namespace arrow {
|
33 |
+
|
34 |
+
/// \defgroup c-data-interface Functions for working with the C data interface.
|
35 |
+
///
|
36 |
+
/// @{
|
37 |
+
|
38 |
+
/// \brief Export C++ DataType using the C data interface format.
|
39 |
+
///
|
40 |
+
/// The root type is considered to have empty name and metadata.
|
41 |
+
/// If you want the root type to have a name and/or metadata, pass
|
42 |
+
/// a Field instead.
|
43 |
+
///
|
44 |
+
/// \param[in] type DataType object to export
|
45 |
+
/// \param[out] out C struct where to export the datatype
|
46 |
+
ARROW_EXPORT
|
47 |
+
Status ExportType(const DataType& type, struct ArrowSchema* out);
|
48 |
+
|
49 |
+
/// \brief Export C++ Field using the C data interface format.
|
50 |
+
///
|
51 |
+
/// \param[in] field Field object to export
|
52 |
+
/// \param[out] out C struct where to export the field
|
53 |
+
ARROW_EXPORT
|
54 |
+
Status ExportField(const Field& field, struct ArrowSchema* out);
|
55 |
+
|
56 |
+
/// \brief Export C++ Schema using the C data interface format.
|
57 |
+
///
|
58 |
+
/// \param[in] schema Schema object to export
|
59 |
+
/// \param[out] out C struct where to export the field
|
60 |
+
ARROW_EXPORT
|
61 |
+
Status ExportSchema(const Schema& schema, struct ArrowSchema* out);
|
62 |
+
|
63 |
+
/// \brief Export C++ Array using the C data interface format.
|
64 |
+
///
|
65 |
+
/// The resulting ArrowArray struct keeps the array data and buffers alive
|
66 |
+
/// until its release callback is called by the consumer.
|
67 |
+
///
|
68 |
+
/// \param[in] array Array object to export
|
69 |
+
/// \param[out] out C struct where to export the array
|
70 |
+
/// \param[out] out_schema optional C struct where to export the array type
|
71 |
+
ARROW_EXPORT
|
72 |
+
Status ExportArray(const Array& array, struct ArrowArray* out,
|
73 |
+
struct ArrowSchema* out_schema = NULLPTR);
|
74 |
+
|
75 |
+
/// \brief Export C++ RecordBatch using the C data interface format.
|
76 |
+
///
|
77 |
+
/// The record batch is exported as if it were a struct array.
|
78 |
+
/// The resulting ArrowArray struct keeps the record batch data and buffers alive
|
79 |
+
/// until its release callback is called by the consumer.
|
80 |
+
///
|
81 |
+
/// \param[in] batch Record batch to export
|
82 |
+
/// \param[out] out C struct where to export the record batch
|
83 |
+
/// \param[out] out_schema optional C struct where to export the record batch schema
|
84 |
+
ARROW_EXPORT
|
85 |
+
Status ExportRecordBatch(const RecordBatch& batch, struct ArrowArray* out,
|
86 |
+
struct ArrowSchema* out_schema = NULLPTR);
|
87 |
+
|
88 |
+
/// \brief Import C++ DataType from the C data interface.
|
89 |
+
///
|
90 |
+
/// The given ArrowSchema struct is released (as per the C data interface
|
91 |
+
/// specification), even if this function fails.
|
92 |
+
///
|
93 |
+
/// \param[in,out] schema C data interface struct representing the data type
|
94 |
+
/// \return Imported type object
|
95 |
+
ARROW_EXPORT
|
96 |
+
Result<std::shared_ptr<DataType>> ImportType(struct ArrowSchema* schema);
|
97 |
+
|
98 |
+
/// \brief Import C++ Field from the C data interface.
|
99 |
+
///
|
100 |
+
/// The given ArrowSchema struct is released (as per the C data interface
|
101 |
+
/// specification), even if this function fails.
|
102 |
+
///
|
103 |
+
/// \param[in,out] schema C data interface struct representing the field
|
104 |
+
/// \return Imported field object
|
105 |
+
ARROW_EXPORT
|
106 |
+
Result<std::shared_ptr<Field>> ImportField(struct ArrowSchema* schema);
|
107 |
+
|
108 |
+
/// \brief Import C++ Schema from the C data interface.
|
109 |
+
///
|
110 |
+
/// The given ArrowSchema struct is released (as per the C data interface
|
111 |
+
/// specification), even if this function fails.
|
112 |
+
///
|
113 |
+
/// \param[in,out] schema C data interface struct representing the field
|
114 |
+
/// \return Imported field object
|
115 |
+
ARROW_EXPORT
|
116 |
+
Result<std::shared_ptr<Schema>> ImportSchema(struct ArrowSchema* schema);
|
117 |
+
|
118 |
+
/// \brief Import C++ array from the C data interface.
|
119 |
+
///
|
120 |
+
/// The ArrowArray struct has its contents moved (as per the C data interface
|
121 |
+
/// specification) to a private object held alive by the resulting array.
|
122 |
+
///
|
123 |
+
/// \param[in,out] array C data interface struct holding the array data
|
124 |
+
/// \param[in] type type of the imported array
|
125 |
+
/// \return Imported array object
|
126 |
+
ARROW_EXPORT
|
127 |
+
Result<std::shared_ptr<Array>> ImportArray(struct ArrowArray* array,
|
128 |
+
std::shared_ptr<DataType> type);
|
129 |
+
|
130 |
+
/// \brief Import C++ array and its type from the C data interface.
|
131 |
+
///
|
132 |
+
/// The ArrowArray struct has its contents moved (as per the C data interface
|
133 |
+
/// specification) to a private object held alive by the resulting array.
|
134 |
+
/// The ArrowSchema struct is released, even if this function fails.
|
135 |
+
///
|
136 |
+
/// \param[in,out] array C data interface struct holding the array data
|
137 |
+
/// \param[in,out] type C data interface struct holding the array type
|
138 |
+
/// \return Imported array object
|
139 |
+
ARROW_EXPORT
|
140 |
+
Result<std::shared_ptr<Array>> ImportArray(struct ArrowArray* array,
|
141 |
+
struct ArrowSchema* type);
|
142 |
+
|
143 |
+
/// \brief Import C++ record batch from the C data interface.
|
144 |
+
///
|
145 |
+
/// The ArrowArray struct has its contents moved (as per the C data interface
|
146 |
+
/// specification) to a private object held alive by the resulting record batch.
|
147 |
+
///
|
148 |
+
/// \param[in,out] array C data interface struct holding the record batch data
|
149 |
+
/// \param[in] schema schema of the imported record batch
|
150 |
+
/// \return Imported record batch object
|
151 |
+
ARROW_EXPORT
|
152 |
+
Result<std::shared_ptr<RecordBatch>> ImportRecordBatch(struct ArrowArray* array,
|
153 |
+
std::shared_ptr<Schema> schema);
|
154 |
+
|
155 |
+
/// \brief Import C++ record batch and its schema from the C data interface.
|
156 |
+
///
|
157 |
+
/// The type represented by the ArrowSchema struct must be a struct type array.
|
158 |
+
/// The ArrowArray struct has its contents moved (as per the C data interface
|
159 |
+
/// specification) to a private object held alive by the resulting record batch.
|
160 |
+
/// The ArrowSchema struct is released, even if this function fails.
|
161 |
+
///
|
162 |
+
/// \param[in,out] array C data interface struct holding the record batch data
|
163 |
+
/// \param[in,out] schema C data interface struct holding the record batch schema
|
164 |
+
/// \return Imported record batch object
|
165 |
+
ARROW_EXPORT
|
166 |
+
Result<std::shared_ptr<RecordBatch>> ImportRecordBatch(struct ArrowArray* array,
|
167 |
+
struct ArrowSchema* schema);
|
168 |
+
|
169 |
+
/// @}
|
170 |
+
|
171 |
+
/// \defgroup c-data-device-interface Functions for working with the C data device
|
172 |
+
/// interface.
|
173 |
+
///
|
174 |
+
/// @{
|
175 |
+
|
176 |
+
/// \brief EXPERIMENTAL: Export C++ Array as an ArrowDeviceArray.
|
177 |
+
///
|
178 |
+
/// The resulting ArrowDeviceArray struct keeps the array data and buffers alive
|
179 |
+
/// until its release callback is called by the consumer. All buffers in
|
180 |
+
/// the provided array MUST have the same device_type, otherwise an error
|
181 |
+
/// will be returned.
|
182 |
+
///
|
183 |
+
/// If sync is non-null, get_event will be called on it in order to
|
184 |
+
/// potentially provide an event for consumers to synchronize on.
|
185 |
+
///
|
186 |
+
/// \param[in] array Array object to export
|
187 |
+
/// \param[in] sync shared_ptr to object derived from Device::SyncEvent or null
|
188 |
+
/// \param[out] out C struct to export the array to
|
189 |
+
/// \param[out] out_schema optional C struct to export the array type to
|
190 |
+
ARROW_EXPORT
|
191 |
+
Status ExportDeviceArray(const Array& array, std::shared_ptr<Device::SyncEvent> sync,
|
192 |
+
struct ArrowDeviceArray* out,
|
193 |
+
struct ArrowSchema* out_schema = NULLPTR);
|
194 |
+
|
195 |
+
/// \brief EXPERIMENTAL: Export C++ RecordBatch as an ArrowDeviceArray.
|
196 |
+
///
|
197 |
+
/// The record batch is exported as if it were a struct array.
|
198 |
+
/// The resulting ArrowDeviceArray struct keeps the record batch data and buffers alive
|
199 |
+
/// until its release callback is called by the consumer.
|
200 |
+
///
|
201 |
+
/// All buffers of all columns in the record batch must have the same device_type
|
202 |
+
/// otherwise an error will be returned. If columns are on different devices,
|
203 |
+
/// they should be exported using different ArrowDeviceArray instances.
|
204 |
+
///
|
205 |
+
/// If sync is non-null, get_event will be called on it in order to
|
206 |
+
/// potentially provide an event for consumers to synchronize on.
|
207 |
+
///
|
208 |
+
/// \param[in] batch Record batch to export
|
209 |
+
/// \param[in] sync shared_ptr to object derived from Device::SyncEvent or null
|
210 |
+
/// \param[out] out C struct where to export the record batch
|
211 |
+
/// \param[out] out_schema optional C struct where to export the record batch schema
|
212 |
+
ARROW_EXPORT
|
213 |
+
Status ExportDeviceRecordBatch(const RecordBatch& batch,
|
214 |
+
std::shared_ptr<Device::SyncEvent> sync,
|
215 |
+
struct ArrowDeviceArray* out,
|
216 |
+
struct ArrowSchema* out_schema = NULLPTR);
|
217 |
+
|
218 |
+
using DeviceMemoryMapper =
|
219 |
+
std::function<Result<std::shared_ptr<MemoryManager>>(ArrowDeviceType, int64_t)>;
|
220 |
+
|
221 |
+
ARROW_EXPORT
|
222 |
+
Result<std::shared_ptr<MemoryManager>> DefaultDeviceMemoryMapper(
|
223 |
+
ArrowDeviceType device_type, int64_t device_id);
|
224 |
+
|
225 |
+
/// \brief EXPERIMENTAL: Import C++ device array from the C data interface.
|
226 |
+
///
|
227 |
+
/// The ArrowArray struct has its contents moved (as per the C data interface
|
228 |
+
/// specification) to a private object held alive by the resulting array. The
|
229 |
+
/// buffers of the Array are located on the device indicated by the device_type.
|
230 |
+
///
|
231 |
+
/// \param[in,out] array C data interface struct holding the array data
|
232 |
+
/// \param[in] type type of the imported array
|
233 |
+
/// \param[in] mapper A function to map device + id to memory manager. If not
|
234 |
+
/// specified, defaults to map "cpu" to the built-in default memory manager.
|
235 |
+
/// \return Imported array object
|
236 |
+
ARROW_EXPORT
|
237 |
+
Result<std::shared_ptr<Array>> ImportDeviceArray(
|
238 |
+
struct ArrowDeviceArray* array, std::shared_ptr<DataType> type,
|
239 |
+
const DeviceMemoryMapper& mapper = DefaultDeviceMemoryMapper);
|
240 |
+
|
241 |
+
/// \brief EXPERIMENTAL: Import C++ device array and its type from the C data interface.
|
242 |
+
///
|
243 |
+
/// The ArrowArray struct has its contents moved (as per the C data interface
|
244 |
+
/// specification) to a private object held alive by the resulting array.
|
245 |
+
/// The ArrowSchema struct is released, even if this function fails. The
|
246 |
+
/// buffers of the Array are located on the device indicated by the device_type.
|
247 |
+
///
|
248 |
+
/// \param[in,out] array C data interface struct holding the array data
|
249 |
+
/// \param[in,out] type C data interface struct holding the array type
|
250 |
+
/// \param[in] mapper A function to map device + id to memory manager. If not
|
251 |
+
/// specified, defaults to map "cpu" to the built-in default memory manager.
|
252 |
+
/// \return Imported array object
|
253 |
+
ARROW_EXPORT
|
254 |
+
Result<std::shared_ptr<Array>> ImportDeviceArray(
|
255 |
+
struct ArrowDeviceArray* array, struct ArrowSchema* type,
|
256 |
+
const DeviceMemoryMapper& mapper = DefaultDeviceMemoryMapper);
|
257 |
+
|
258 |
+
/// \brief EXPERIMENTAL: Import C++ record batch with buffers on a device from the C data
|
259 |
+
/// interface.
|
260 |
+
///
|
261 |
+
/// The ArrowArray struct has its contents moved (as per the C data interface
|
262 |
+
/// specification) to a private object held alive by the resulting record batch.
|
263 |
+
/// The buffers of all columns of the record batch are located on the device
|
264 |
+
/// indicated by the device type.
|
265 |
+
///
|
266 |
+
/// \param[in,out] array C data interface struct holding the record batch data
|
267 |
+
/// \param[in] schema schema of the imported record batch
|
268 |
+
/// \param[in] mapper A function to map device + id to memory manager. If not
|
269 |
+
/// specified, defaults to map "cpu" to the built-in default memory manager.
|
270 |
+
/// \return Imported record batch object
|
271 |
+
ARROW_EXPORT
|
272 |
+
Result<std::shared_ptr<RecordBatch>> ImportDeviceRecordBatch(
|
273 |
+
struct ArrowDeviceArray* array, std::shared_ptr<Schema> schema,
|
274 |
+
const DeviceMemoryMapper& mapper = DefaultDeviceMemoryMapper);
|
275 |
+
|
276 |
+
/// \brief EXPERIMENTAL: Import C++ record batch with buffers on a device and its schema
|
277 |
+
/// from the C data interface.
|
278 |
+
///
|
279 |
+
/// The type represented by the ArrowSchema struct must be a struct type array.
|
280 |
+
/// The ArrowArray struct has its contents moved (as per the C data interface
|
281 |
+
/// specification) to a private object held alive by the resulting record batch.
|
282 |
+
/// The ArrowSchema struct is released, even if this function fails. The buffers
|
283 |
+
/// of all columns of the record batch are located on the device indicated by the
|
284 |
+
/// device type.
|
285 |
+
///
|
286 |
+
/// \param[in,out] array C data interface struct holding the record batch data
|
287 |
+
/// \param[in,out] schema C data interface struct holding the record batch schema
|
288 |
+
/// \param[in] mapper A function to map device + id to memory manager. If not
|
289 |
+
/// specified, defaults to map "cpu" to the built-in default memory manager.
|
290 |
+
/// \return Imported record batch object
|
291 |
+
ARROW_EXPORT
|
292 |
+
Result<std::shared_ptr<RecordBatch>> ImportDeviceRecordBatch(
|
293 |
+
struct ArrowDeviceArray* array, struct ArrowSchema* schema,
|
294 |
+
const DeviceMemoryMapper& mapper = DefaultDeviceMemoryMapper);
|
295 |
+
|
296 |
+
/// @}
|
297 |
+
|
298 |
+
/// \defgroup c-stream-interface Functions for working with the C data interface.
|
299 |
+
///
|
300 |
+
/// @{
|
301 |
+
|
302 |
+
/// \brief Export C++ RecordBatchReader using the C stream interface.
|
303 |
+
///
|
304 |
+
/// The resulting ArrowArrayStream struct keeps the record batch reader alive
|
305 |
+
/// until its release callback is called by the consumer.
|
306 |
+
///
|
307 |
+
/// \param[in] reader RecordBatchReader object to export
|
308 |
+
/// \param[out] out C struct where to export the stream
|
309 |
+
ARROW_EXPORT
|
310 |
+
Status ExportRecordBatchReader(std::shared_ptr<RecordBatchReader> reader,
|
311 |
+
struct ArrowArrayStream* out);
|
312 |
+
|
313 |
+
/// \brief Export C++ ChunkedArray using the C data interface format.
|
314 |
+
///
|
315 |
+
/// The resulting ArrowArrayStream struct keeps the chunked array data and buffers alive
|
316 |
+
/// until its release callback is called by the consumer.
|
317 |
+
///
|
318 |
+
/// \param[in] chunked_array ChunkedArray object to export
|
319 |
+
/// \param[out] out C struct where to export the stream
|
320 |
+
ARROW_EXPORT
|
321 |
+
Status ExportChunkedArray(std::shared_ptr<ChunkedArray> chunked_array,
|
322 |
+
struct ArrowArrayStream* out);
|
323 |
+
|
324 |
+
/// \brief Import C++ RecordBatchReader from the C stream interface.
|
325 |
+
///
|
326 |
+
/// The ArrowArrayStream struct has its contents moved to a private object
|
327 |
+
/// held alive by the resulting record batch reader.
|
328 |
+
///
|
329 |
+
/// \param[in,out] stream C stream interface struct
|
330 |
+
/// \return Imported RecordBatchReader object
|
331 |
+
ARROW_EXPORT
|
332 |
+
Result<std::shared_ptr<RecordBatchReader>> ImportRecordBatchReader(
|
333 |
+
struct ArrowArrayStream* stream);
|
334 |
+
|
335 |
+
/// \brief Import C++ ChunkedArray from the C stream interface
|
336 |
+
///
|
337 |
+
/// The ArrowArrayStream struct has its contents moved to a private object,
|
338 |
+
/// is consumed in its entirity, and released before returning all chunks
|
339 |
+
/// as a ChunkedArray.
|
340 |
+
///
|
341 |
+
/// \param[in,out] stream C stream interface struct
|
342 |
+
/// \return Imported ChunkedArray object
|
343 |
+
ARROW_EXPORT
|
344 |
+
Result<std::shared_ptr<ChunkedArray>> ImportChunkedArray(struct ArrowArrayStream* stream);
|
345 |
+
|
346 |
+
/// @}
|
347 |
+
|
348 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack.h
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include "arrow/array/array_base.h"
|
21 |
+
#include "arrow/c/dlpack_abi.h"
|
22 |
+
|
23 |
+
namespace arrow::dlpack {
|
24 |
+
|
25 |
+
/// \brief Export Arrow array as DLPack tensor.
|
26 |
+
///
|
27 |
+
/// DLMangedTensor is produced as defined by the DLPack protocol,
|
28 |
+
/// see https://dmlc.github.io/dlpack/latest/.
|
29 |
+
///
|
30 |
+
/// Data types for which the protocol is supported are
|
31 |
+
/// integer and floating-point data types.
|
32 |
+
///
|
33 |
+
/// DLPack protocol only supports arrays with one contiguous
|
34 |
+
/// memory region which means Arrow Arrays with validity buffers
|
35 |
+
/// are not supported.
|
36 |
+
///
|
37 |
+
/// \param[in] arr Arrow array
|
38 |
+
/// \return DLManagedTensor struct
|
39 |
+
ARROW_EXPORT
|
40 |
+
Result<DLManagedTensor*> ExportArray(const std::shared_ptr<Array>& arr);
|
41 |
+
|
42 |
+
/// \brief Get DLDevice with enumerator specifying the
|
43 |
+
/// type of the device data is stored on and index of the
|
44 |
+
/// device which is 0 by default for CPU.
|
45 |
+
///
|
46 |
+
/// \param[in] arr Arrow array
|
47 |
+
/// \return DLDevice struct
|
48 |
+
ARROW_EXPORT
|
49 |
+
Result<DLDevice> ExportDevice(const std::shared_ptr<Array>& arr);
|
50 |
+
|
51 |
+
} // namespace arrow::dlpack
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack_abi.h
ADDED
@@ -0,0 +1,321 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Taken from:
|
2 |
+
// https://github.com/dmlc/dlpack/blob/ca4d00ad3e2e0f410eeab3264d21b8a39397f362/include/dlpack/dlpack.h
|
3 |
+
/*!
|
4 |
+
* Copyright (c) 2017 by Contributors
|
5 |
+
* \file dlpack.h
|
6 |
+
* \brief The common header of DLPack.
|
7 |
+
*/
|
8 |
+
#ifndef DLPACK_DLPACK_H_
|
9 |
+
#define DLPACK_DLPACK_H_
|
10 |
+
|
11 |
+
/**
|
12 |
+
* \brief Compatibility with C++
|
13 |
+
*/
|
14 |
+
#ifdef __cplusplus
|
15 |
+
#define DLPACK_EXTERN_C extern "C"
|
16 |
+
#else
|
17 |
+
#define DLPACK_EXTERN_C
|
18 |
+
#endif
|
19 |
+
|
20 |
+
/*! \brief The current major version of dlpack */
|
21 |
+
#define DLPACK_MAJOR_VERSION 1
|
22 |
+
|
23 |
+
/*! \brief The current minor version of dlpack */
|
24 |
+
#define DLPACK_MINOR_VERSION 0
|
25 |
+
|
26 |
+
/*! \brief DLPACK_DLL prefix for windows */
|
27 |
+
#ifdef _WIN32
|
28 |
+
#ifdef DLPACK_EXPORTS
|
29 |
+
#define DLPACK_DLL __declspec(dllexport)
|
30 |
+
#else
|
31 |
+
#define DLPACK_DLL __declspec(dllimport)
|
32 |
+
#endif
|
33 |
+
#else
|
34 |
+
#define DLPACK_DLL
|
35 |
+
#endif
|
36 |
+
|
37 |
+
#include <stddef.h>
|
38 |
+
#include <stdint.h>
|
39 |
+
|
40 |
+
#ifdef __cplusplus
|
41 |
+
extern "C" {
|
42 |
+
#endif
|
43 |
+
|
44 |
+
/*!
|
45 |
+
* \brief The DLPack version.
|
46 |
+
*
|
47 |
+
* A change in major version indicates that we have changed the
|
48 |
+
* data layout of the ABI - DLManagedTensorVersioned.
|
49 |
+
*
|
50 |
+
* A change in minor version indicates that we have added new
|
51 |
+
* code, such as a new device type, but the ABI is kept the same.
|
52 |
+
*
|
53 |
+
* If an obtained DLPack tensor has a major version that disagrees
|
54 |
+
* with the version number specified in this header file
|
55 |
+
* (i.e. major != DLPACK_MAJOR_VERSION), the consumer must call the deleter
|
56 |
+
* (and it is safe to do so). It is not safe to access any other fields
|
57 |
+
* as the memory layout will have changed.
|
58 |
+
*
|
59 |
+
* In the case of a minor version mismatch, the tensor can be safely used as
|
60 |
+
* long as the consumer knows how to interpret all fields. Minor version
|
61 |
+
* updates indicate the addition of enumeration values.
|
62 |
+
*/
|
63 |
+
typedef struct {
|
64 |
+
/*! \brief DLPack major version. */
|
65 |
+
uint32_t major;
|
66 |
+
/*! \brief DLPack minor version. */
|
67 |
+
uint32_t minor;
|
68 |
+
} DLPackVersion;
|
69 |
+
|
70 |
+
/*!
|
71 |
+
* \brief The device type in DLDevice.
|
72 |
+
*/
|
73 |
+
#ifdef __cplusplus
|
74 |
+
typedef enum : int32_t {
|
75 |
+
#else
|
76 |
+
typedef enum {
|
77 |
+
#endif
|
78 |
+
/*! \brief CPU device */
|
79 |
+
kDLCPU = 1,
|
80 |
+
/*! \brief CUDA GPU device */
|
81 |
+
kDLCUDA = 2,
|
82 |
+
/*!
|
83 |
+
* \brief Pinned CUDA CPU memory by cudaMallocHost
|
84 |
+
*/
|
85 |
+
kDLCUDAHost = 3,
|
86 |
+
/*! \brief OpenCL devices. */
|
87 |
+
kDLOpenCL = 4,
|
88 |
+
/*! \brief Vulkan buffer for next generation graphics. */
|
89 |
+
kDLVulkan = 7,
|
90 |
+
/*! \brief Metal for Apple GPU. */
|
91 |
+
kDLMetal = 8,
|
92 |
+
/*! \brief Verilog simulator buffer */
|
93 |
+
kDLVPI = 9,
|
94 |
+
/*! \brief ROCm GPUs for AMD GPUs */
|
95 |
+
kDLROCM = 10,
|
96 |
+
/*!
|
97 |
+
* \brief Pinned ROCm CPU memory allocated by hipMallocHost
|
98 |
+
*/
|
99 |
+
kDLROCMHost = 11,
|
100 |
+
/*!
|
101 |
+
* \brief Reserved extension device type,
|
102 |
+
* used for quickly test extension device
|
103 |
+
* The semantics can differ depending on the implementation.
|
104 |
+
*/
|
105 |
+
kDLExtDev = 12,
|
106 |
+
/*!
|
107 |
+
* \brief CUDA managed/unified memory allocated by cudaMallocManaged
|
108 |
+
*/
|
109 |
+
kDLCUDAManaged = 13,
|
110 |
+
/*!
|
111 |
+
* \brief Unified shared memory allocated on a oneAPI non-partititioned
|
112 |
+
* device. Call to oneAPI runtime is required to determine the device
|
113 |
+
* type, the USM allocation type and the sycl context it is bound to.
|
114 |
+
*
|
115 |
+
*/
|
116 |
+
kDLOneAPI = 14,
|
117 |
+
/*! \brief GPU support for next generation WebGPU standard. */
|
118 |
+
kDLWebGPU = 15,
|
119 |
+
/*! \brief Qualcomm Hexagon DSP */
|
120 |
+
kDLHexagon = 16,
|
121 |
+
} DLDeviceType;
|
122 |
+
|
123 |
+
/*!
|
124 |
+
* \brief A Device for Tensor and operator.
|
125 |
+
*/
|
126 |
+
typedef struct {
|
127 |
+
/*! \brief The device type used in the device. */
|
128 |
+
DLDeviceType device_type;
|
129 |
+
/*!
|
130 |
+
* \brief The device index.
|
131 |
+
* For vanilla CPU memory, pinned memory, or managed memory, this is set to 0.
|
132 |
+
*/
|
133 |
+
int32_t device_id;
|
134 |
+
} DLDevice;
|
135 |
+
|
136 |
+
/*!
|
137 |
+
* \brief The type code options DLDataType.
|
138 |
+
*/
|
139 |
+
typedef enum {
|
140 |
+
/*! \brief signed integer */
|
141 |
+
kDLInt = 0U,
|
142 |
+
/*! \brief unsigned integer */
|
143 |
+
kDLUInt = 1U,
|
144 |
+
/*! \brief IEEE floating point */
|
145 |
+
kDLFloat = 2U,
|
146 |
+
/*!
|
147 |
+
* \brief Opaque handle type, reserved for testing purposes.
|
148 |
+
* Frameworks need to agree on the handle data type for the exchange to be well-defined.
|
149 |
+
*/
|
150 |
+
kDLOpaqueHandle = 3U,
|
151 |
+
/*! \brief bfloat16 */
|
152 |
+
kDLBfloat = 4U,
|
153 |
+
/*!
|
154 |
+
* \brief complex number
|
155 |
+
* (C/C++/Python layout: compact struct per complex number)
|
156 |
+
*/
|
157 |
+
kDLComplex = 5U,
|
158 |
+
/*! \brief boolean */
|
159 |
+
kDLBool = 6U,
|
160 |
+
} DLDataTypeCode;
|
161 |
+
|
162 |
+
/*!
|
163 |
+
* \brief The data type the tensor can hold. The data type is assumed to follow the
|
164 |
+
* native endian-ness. An explicit error message should be raised when attempting to
|
165 |
+
* export an array with non-native endianness
|
166 |
+
*
|
167 |
+
* Examples
|
168 |
+
* - float: type_code = 2, bits = 32, lanes = 1
|
169 |
+
* - float4(vectorized 4 float): type_code = 2, bits = 32, lanes = 4
|
170 |
+
* - int8: type_code = 0, bits = 8, lanes = 1
|
171 |
+
* - std::complex<float>: type_code = 5, bits = 64, lanes = 1
|
172 |
+
* - bool: type_code = 6, bits = 8, lanes = 1 (as per common array library convention,
|
173 |
+
* the underlying storage size of bool is 8 bits)
|
174 |
+
*/
|
175 |
+
typedef struct {
|
176 |
+
/*!
|
177 |
+
* \brief Type code of base types.
|
178 |
+
* We keep it uint8_t instead of DLDataTypeCode for minimal memory
|
179 |
+
* footprint, but the value should be one of DLDataTypeCode enum values.
|
180 |
+
* */
|
181 |
+
uint8_t code;
|
182 |
+
/*!
|
183 |
+
* \brief Number of bits, common choices are 8, 16, 32.
|
184 |
+
*/
|
185 |
+
uint8_t bits;
|
186 |
+
/*! \brief Number of lanes in the type, used for vector types. */
|
187 |
+
uint16_t lanes;
|
188 |
+
} DLDataType;
|
189 |
+
|
190 |
+
/*!
|
191 |
+
* \brief Plain C Tensor object, does not manage memory.
|
192 |
+
*/
|
193 |
+
typedef struct {
|
194 |
+
/*!
|
195 |
+
* \brief The data pointer points to the allocated data. This will be CUDA
|
196 |
+
* device pointer or cl_mem handle in OpenCL. It may be opaque on some device
|
197 |
+
* types. This pointer is always aligned to 256 bytes as in CUDA. The
|
198 |
+
* `byte_offset` field should be used to point to the beginning of the data.
|
199 |
+
*
|
200 |
+
* Note that as of Nov 2021, multiply libraries (CuPy, PyTorch, TensorFlow,
|
201 |
+
* TVM, perhaps others) do not adhere to this 256 byte aligment requirement
|
202 |
+
* on CPU/CUDA/ROCm, and always use `byte_offset=0`. This must be fixed
|
203 |
+
* (after which this note will be updated); at the moment it is recommended
|
204 |
+
* to not rely on the data pointer being correctly aligned.
|
205 |
+
*
|
206 |
+
* For given DLTensor, the size of memory required to store the contents of
|
207 |
+
* data is calculated as follows:
|
208 |
+
*
|
209 |
+
* \code{.c}
|
210 |
+
* static inline size_t GetDataSize(const DLTensor* t) {
|
211 |
+
* size_t size = 1;
|
212 |
+
* for (tvm_index_t i = 0; i < t->ndim; ++i) {
|
213 |
+
* size *= t->shape[i];
|
214 |
+
* }
|
215 |
+
* size *= (t->dtype.bits * t->dtype.lanes + 7) / 8;
|
216 |
+
* return size;
|
217 |
+
* }
|
218 |
+
* \endcode
|
219 |
+
*/
|
220 |
+
void* data;
|
221 |
+
/*! \brief The device of the tensor */
|
222 |
+
DLDevice device;
|
223 |
+
/*! \brief Number of dimensions */
|
224 |
+
int32_t ndim;
|
225 |
+
/*! \brief The data type of the pointer*/
|
226 |
+
DLDataType dtype;
|
227 |
+
/*! \brief The shape of the tensor */
|
228 |
+
int64_t* shape;
|
229 |
+
/*!
|
230 |
+
* \brief strides of the tensor (in number of elements, not bytes)
|
231 |
+
* can be NULL, indicating tensor is compact and row-majored.
|
232 |
+
*/
|
233 |
+
int64_t* strides;
|
234 |
+
/*! \brief The offset in bytes to the beginning pointer to data */
|
235 |
+
uint64_t byte_offset;
|
236 |
+
} DLTensor;
|
237 |
+
|
238 |
+
/*!
|
239 |
+
* \brief C Tensor object, manage memory of DLTensor. This data structure is
|
240 |
+
* intended to facilitate the borrowing of DLTensor by another framework. It is
|
241 |
+
* not meant to transfer the tensor. When the borrowing framework doesn't need
|
242 |
+
* the tensor, it should call the deleter to notify the host that the resource
|
243 |
+
* is no longer needed.
|
244 |
+
*
|
245 |
+
* \note This data structure is used as Legacy DLManagedTensor
|
246 |
+
* in DLPack exchange and is deprecated after DLPack v0.8
|
247 |
+
* Use DLManagedTensorVersioned instead.
|
248 |
+
* This data structure may get renamed or deleted in future versions.
|
249 |
+
*
|
250 |
+
* \sa DLManagedTensorVersioned
|
251 |
+
*/
|
252 |
+
typedef struct DLManagedTensor {
|
253 |
+
/*! \brief DLTensor which is being memory managed */
|
254 |
+
DLTensor dl_tensor;
|
255 |
+
/*! \brief the context of the original host framework of DLManagedTensor in
|
256 |
+
* which DLManagedTensor is used in the framework. It can also be NULL.
|
257 |
+
*/
|
258 |
+
void* manager_ctx;
|
259 |
+
/*!
|
260 |
+
* \brief Destructor - this should be called
|
261 |
+
* to destruct the manager_ctx which backs the DLManagedTensor. It can be
|
262 |
+
* NULL if there is no way for the caller to provide a reasonable destructor.
|
263 |
+
* The destructors deletes the argument self as well.
|
264 |
+
*/
|
265 |
+
void (*deleter)(struct DLManagedTensor* self);
|
266 |
+
} DLManagedTensor;
|
267 |
+
|
268 |
+
// bit masks used in in the DLManagedTensorVersioned
|
269 |
+
|
270 |
+
/*! \brief bit mask to indicate that the tensor is read only. */
|
271 |
+
#define DLPACK_FLAG_BITMASK_READ_ONLY (1UL << 0UL)
|
272 |
+
|
273 |
+
/*!
|
274 |
+
* \brief A versioned and managed C Tensor object, manage memory of DLTensor.
|
275 |
+
*
|
276 |
+
* This data structure is intended to facilitate the borrowing of DLTensor by
|
277 |
+
* another framework. It is not meant to transfer the tensor. When the borrowing
|
278 |
+
* framework doesn't need the tensor, it should call the deleter to notify the
|
279 |
+
* host that the resource is no longer needed.
|
280 |
+
*
|
281 |
+
* \note This is the current standard DLPack exchange data structure.
|
282 |
+
*/
|
283 |
+
struct DLManagedTensorVersioned {
|
284 |
+
/*!
|
285 |
+
* \brief The API and ABI version of the current managed Tensor
|
286 |
+
*/
|
287 |
+
DLPackVersion version;
|
288 |
+
/*!
|
289 |
+
* \brief the context of the original host framework.
|
290 |
+
*
|
291 |
+
* Stores DLManagedTensorVersioned is used in the
|
292 |
+
* framework. It can also be NULL.
|
293 |
+
*/
|
294 |
+
void* manager_ctx;
|
295 |
+
/*!
|
296 |
+
* \brief Destructor.
|
297 |
+
*
|
298 |
+
* This should be called to destruct manager_ctx which holds the
|
299 |
+
* DLManagedTensorVersioned. It can be NULL if there is no way for the caller to provide
|
300 |
+
* a reasonable destructor. The destructors deletes the argument self as well.
|
301 |
+
*/
|
302 |
+
void (*deleter)(struct DLManagedTensorVersioned* self);
|
303 |
+
/*!
|
304 |
+
* \brief Additional bitmask flags information about the tensor.
|
305 |
+
*
|
306 |
+
* By default the flags should be set to 0.
|
307 |
+
*
|
308 |
+
* \note Future ABI changes should keep everything until this field
|
309 |
+
* stable, to ensure that deleter can be correctly called.
|
310 |
+
*
|
311 |
+
* \sa DLPACK_FLAG_BITMASK_READ_ONLY
|
312 |
+
*/
|
313 |
+
uint64_t flags;
|
314 |
+
/*! \brief DLTensor which is being memory managed */
|
315 |
+
DLTensor dl_tensor;
|
316 |
+
};
|
317 |
+
|
318 |
+
#ifdef __cplusplus
|
319 |
+
} // DLPACK_EXTERN_C
|
320 |
+
#endif
|
321 |
+
#endif // DLPACK_DLPACK_H_
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/c/helpers.h
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <stdio.h>
|
21 |
+
#include <stdlib.h>
|
22 |
+
#include <string.h>
|
23 |
+
|
24 |
+
#include "arrow/c/abi.h"
|
25 |
+
|
26 |
+
#define ARROW_C_ASSERT(condition, msg) \
|
27 |
+
do { \
|
28 |
+
if (!(condition)) { \
|
29 |
+
fprintf(stderr, "%s:%d:: %s", __FILE__, __LINE__, (msg)); \
|
30 |
+
abort(); \
|
31 |
+
} \
|
32 |
+
} while (0)
|
33 |
+
|
34 |
+
#ifdef __cplusplus
|
35 |
+
extern "C" {
|
36 |
+
#endif
|
37 |
+
|
38 |
+
/// Query whether the C schema is released
|
39 |
+
inline int ArrowSchemaIsReleased(const struct ArrowSchema* schema) {
|
40 |
+
return schema->release == NULL;
|
41 |
+
}
|
42 |
+
|
43 |
+
/// Mark the C schema released (for use in release callbacks)
|
44 |
+
inline void ArrowSchemaMarkReleased(struct ArrowSchema* schema) {
|
45 |
+
schema->release = NULL;
|
46 |
+
}
|
47 |
+
|
48 |
+
/// Move the C schema from `src` to `dest`
|
49 |
+
///
|
50 |
+
/// Note `dest` must *not* point to a valid schema already, otherwise there
|
51 |
+
/// will be a memory leak.
|
52 |
+
inline void ArrowSchemaMove(struct ArrowSchema* src, struct ArrowSchema* dest) {
|
53 |
+
assert(dest != src);
|
54 |
+
assert(!ArrowSchemaIsReleased(src));
|
55 |
+
memcpy(dest, src, sizeof(struct ArrowSchema));
|
56 |
+
ArrowSchemaMarkReleased(src);
|
57 |
+
}
|
58 |
+
|
59 |
+
/// Release the C schema, if necessary, by calling its release callback
|
60 |
+
inline void ArrowSchemaRelease(struct ArrowSchema* schema) {
|
61 |
+
if (!ArrowSchemaIsReleased(schema)) {
|
62 |
+
schema->release(schema);
|
63 |
+
ARROW_C_ASSERT(ArrowSchemaIsReleased(schema),
|
64 |
+
"ArrowSchemaRelease did not cleanup release callback");
|
65 |
+
}
|
66 |
+
}
|
67 |
+
|
68 |
+
/// Query whether the C array is released
|
69 |
+
inline int ArrowArrayIsReleased(const struct ArrowArray* array) {
|
70 |
+
return array->release == NULL;
|
71 |
+
}
|
72 |
+
|
73 |
+
/// Mark the C array released (for use in release callbacks)
|
74 |
+
inline void ArrowArrayMarkReleased(struct ArrowArray* array) { array->release = NULL; }
|
75 |
+
|
76 |
+
/// Move the C array from `src` to `dest`
|
77 |
+
///
|
78 |
+
/// Note `dest` must *not* point to a valid array already, otherwise there
|
79 |
+
/// will be a memory leak.
|
80 |
+
inline void ArrowArrayMove(struct ArrowArray* src, struct ArrowArray* dest) {
|
81 |
+
assert(dest != src);
|
82 |
+
assert(!ArrowArrayIsReleased(src));
|
83 |
+
memcpy(dest, src, sizeof(struct ArrowArray));
|
84 |
+
ArrowArrayMarkReleased(src);
|
85 |
+
}
|
86 |
+
|
87 |
+
/// Release the C array, if necessary, by calling its release callback
|
88 |
+
inline void ArrowArrayRelease(struct ArrowArray* array) {
|
89 |
+
if (!ArrowArrayIsReleased(array)) {
|
90 |
+
array->release(array);
|
91 |
+
ARROW_C_ASSERT(ArrowArrayIsReleased(array),
|
92 |
+
"ArrowArrayRelease did not cleanup release callback");
|
93 |
+
}
|
94 |
+
}
|
95 |
+
|
96 |
+
/// Query whether the C array stream is released
|
97 |
+
inline int ArrowArrayStreamIsReleased(const struct ArrowArrayStream* stream) {
|
98 |
+
return stream->release == NULL;
|
99 |
+
}
|
100 |
+
|
101 |
+
/// Mark the C array stream released (for use in release callbacks)
|
102 |
+
inline void ArrowArrayStreamMarkReleased(struct ArrowArrayStream* stream) {
|
103 |
+
stream->release = NULL;
|
104 |
+
}
|
105 |
+
|
106 |
+
/// Move the C array stream from `src` to `dest`
|
107 |
+
///
|
108 |
+
/// Note `dest` must *not* point to a valid stream already, otherwise there
|
109 |
+
/// will be a memory leak.
|
110 |
+
inline void ArrowArrayStreamMove(struct ArrowArrayStream* src,
|
111 |
+
struct ArrowArrayStream* dest) {
|
112 |
+
assert(dest != src);
|
113 |
+
assert(!ArrowArrayStreamIsReleased(src));
|
114 |
+
memcpy(dest, src, sizeof(struct ArrowArrayStream));
|
115 |
+
ArrowArrayStreamMarkReleased(src);
|
116 |
+
}
|
117 |
+
|
118 |
+
/// Release the C array stream, if necessary, by calling its release callback
|
119 |
+
inline void ArrowArrayStreamRelease(struct ArrowArrayStream* stream) {
|
120 |
+
if (!ArrowArrayStreamIsReleased(stream)) {
|
121 |
+
stream->release(stream);
|
122 |
+
ARROW_C_ASSERT(ArrowArrayStreamIsReleased(stream),
|
123 |
+
"ArrowArrayStreamRelease did not cleanup release callback");
|
124 |
+
}
|
125 |
+
}
|
126 |
+
|
127 |
+
#ifdef __cplusplus
|
128 |
+
}
|
129 |
+
#endif
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api.h
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// NOTE: API is EXPERIMENTAL and will change without going through a
|
19 |
+
// deprecation cycle
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
/// \defgroup compute-functions Abstract compute function API
|
24 |
+
/// @{
|
25 |
+
/// @}
|
26 |
+
|
27 |
+
/// \defgroup compute-concrete-options Concrete option classes for compute functions
|
28 |
+
/// @{
|
29 |
+
/// @}
|
30 |
+
|
31 |
+
#include "arrow/compute/api_aggregate.h" // IWYU pragma: export
|
32 |
+
#include "arrow/compute/api_scalar.h" // IWYU pragma: export
|
33 |
+
#include "arrow/compute/api_vector.h" // IWYU pragma: export
|
34 |
+
#include "arrow/compute/cast.h" // IWYU pragma: export
|
35 |
+
#include "arrow/compute/function.h" // IWYU pragma: export
|
36 |
+
#include "arrow/compute/function_options.h" // IWYU pragma: export
|
37 |
+
#include "arrow/compute/kernel.h" // IWYU pragma: export
|
38 |
+
#include "arrow/compute/registry.h" // IWYU pragma: export
|
39 |
+
#include "arrow/datum.h" // IWYU pragma: export
|
40 |
+
|
41 |
+
#include "arrow/compute/expression.h" // IWYU pragma: export
|
42 |
+
|
43 |
+
/// \defgroup execnode-row Utilities for working with data in a row-major format
|
44 |
+
/// @{
|
45 |
+
/// @}
|
46 |
+
|
47 |
+
#include "arrow/compute/row/grouper.h" // IWYU pragma: export
|
48 |
+
|
49 |
+
/// \defgroup acero-internals Acero internals, useful for those extending Acero
|
50 |
+
/// @{
|
51 |
+
/// @}
|
52 |
+
|
53 |
+
#include "arrow/compute/exec.h" // IWYU pragma: export
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/api_aggregate.h
ADDED
@@ -0,0 +1,466 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// Eager evaluation convenience APIs for invoking common functions, including
|
19 |
+
// necessary memory allocations
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
#include <vector>
|
24 |
+
|
25 |
+
#include "arrow/compute/function_options.h"
|
26 |
+
#include "arrow/datum.h"
|
27 |
+
#include "arrow/result.h"
|
28 |
+
#include "arrow/util/macros.h"
|
29 |
+
#include "arrow/util/visibility.h"
|
30 |
+
|
31 |
+
namespace arrow {
|
32 |
+
|
33 |
+
class Array;
|
34 |
+
|
35 |
+
namespace compute {
|
36 |
+
|
37 |
+
class ExecContext;
|
38 |
+
|
39 |
+
// ----------------------------------------------------------------------
|
40 |
+
// Aggregate functions
|
41 |
+
|
42 |
+
/// \addtogroup compute-concrete-options
|
43 |
+
/// @{
|
44 |
+
|
45 |
+
/// \brief Control general scalar aggregate kernel behavior
|
46 |
+
///
|
47 |
+
/// By default, null values are ignored (skip_nulls = true).
|
48 |
+
class ARROW_EXPORT ScalarAggregateOptions : public FunctionOptions {
|
49 |
+
public:
|
50 |
+
explicit ScalarAggregateOptions(bool skip_nulls = true, uint32_t min_count = 1);
|
51 |
+
static constexpr char const kTypeName[] = "ScalarAggregateOptions";
|
52 |
+
static ScalarAggregateOptions Defaults() { return ScalarAggregateOptions{}; }
|
53 |
+
|
54 |
+
/// If true (the default), null values are ignored. Otherwise, if any value is null,
|
55 |
+
/// emit null.
|
56 |
+
bool skip_nulls;
|
57 |
+
/// If less than this many non-null values are observed, emit null.
|
58 |
+
uint32_t min_count;
|
59 |
+
};
|
60 |
+
|
61 |
+
/// \brief Control count aggregate kernel behavior.
|
62 |
+
///
|
63 |
+
/// By default, only non-null values are counted.
|
64 |
+
class ARROW_EXPORT CountOptions : public FunctionOptions {
|
65 |
+
public:
|
66 |
+
enum CountMode {
|
67 |
+
/// Count only non-null values.
|
68 |
+
ONLY_VALID = 0,
|
69 |
+
/// Count only null values.
|
70 |
+
ONLY_NULL,
|
71 |
+
/// Count both non-null and null values.
|
72 |
+
ALL,
|
73 |
+
};
|
74 |
+
explicit CountOptions(CountMode mode = CountMode::ONLY_VALID);
|
75 |
+
static constexpr char const kTypeName[] = "CountOptions";
|
76 |
+
static CountOptions Defaults() { return CountOptions{}; }
|
77 |
+
|
78 |
+
CountMode mode;
|
79 |
+
};
|
80 |
+
|
81 |
+
/// \brief Control Mode kernel behavior
|
82 |
+
///
|
83 |
+
/// Returns top-n common values and counts.
|
84 |
+
/// By default, returns the most common value and count.
|
85 |
+
class ARROW_EXPORT ModeOptions : public FunctionOptions {
|
86 |
+
public:
|
87 |
+
explicit ModeOptions(int64_t n = 1, bool skip_nulls = true, uint32_t min_count = 0);
|
88 |
+
static constexpr char const kTypeName[] = "ModeOptions";
|
89 |
+
static ModeOptions Defaults() { return ModeOptions{}; }
|
90 |
+
|
91 |
+
int64_t n = 1;
|
92 |
+
/// If true (the default), null values are ignored. Otherwise, if any value is null,
|
93 |
+
/// emit null.
|
94 |
+
bool skip_nulls;
|
95 |
+
/// If less than this many non-null values are observed, emit null.
|
96 |
+
uint32_t min_count;
|
97 |
+
};
|
98 |
+
|
99 |
+
/// \brief Control Delta Degrees of Freedom (ddof) of Variance and Stddev kernel
|
100 |
+
///
|
101 |
+
/// The divisor used in calculations is N - ddof, where N is the number of elements.
|
102 |
+
/// By default, ddof is zero, and population variance or stddev is returned.
|
103 |
+
class ARROW_EXPORT VarianceOptions : public FunctionOptions {
|
104 |
+
public:
|
105 |
+
explicit VarianceOptions(int ddof = 0, bool skip_nulls = true, uint32_t min_count = 0);
|
106 |
+
static constexpr char const kTypeName[] = "VarianceOptions";
|
107 |
+
static VarianceOptions Defaults() { return VarianceOptions{}; }
|
108 |
+
|
109 |
+
int ddof = 0;
|
110 |
+
/// If true (the default), null values are ignored. Otherwise, if any value is null,
|
111 |
+
/// emit null.
|
112 |
+
bool skip_nulls;
|
113 |
+
/// If less than this many non-null values are observed, emit null.
|
114 |
+
uint32_t min_count;
|
115 |
+
};
|
116 |
+
|
117 |
+
/// \brief Control Quantile kernel behavior
|
118 |
+
///
|
119 |
+
/// By default, returns the median value.
|
120 |
+
class ARROW_EXPORT QuantileOptions : public FunctionOptions {
|
121 |
+
public:
|
122 |
+
/// Interpolation method to use when quantile lies between two data points
|
123 |
+
enum Interpolation {
|
124 |
+
LINEAR = 0,
|
125 |
+
LOWER,
|
126 |
+
HIGHER,
|
127 |
+
NEAREST,
|
128 |
+
MIDPOINT,
|
129 |
+
};
|
130 |
+
|
131 |
+
explicit QuantileOptions(double q = 0.5, enum Interpolation interpolation = LINEAR,
|
132 |
+
bool skip_nulls = true, uint32_t min_count = 0);
|
133 |
+
|
134 |
+
explicit QuantileOptions(std::vector<double> q,
|
135 |
+
enum Interpolation interpolation = LINEAR,
|
136 |
+
bool skip_nulls = true, uint32_t min_count = 0);
|
137 |
+
|
138 |
+
static constexpr char const kTypeName[] = "QuantileOptions";
|
139 |
+
static QuantileOptions Defaults() { return QuantileOptions{}; }
|
140 |
+
|
141 |
+
/// probability level of quantile must be between 0 and 1 inclusive
|
142 |
+
std::vector<double> q;
|
143 |
+
enum Interpolation interpolation;
|
144 |
+
/// If true (the default), null values are ignored. Otherwise, if any value is null,
|
145 |
+
/// emit null.
|
146 |
+
bool skip_nulls;
|
147 |
+
/// If less than this many non-null values are observed, emit null.
|
148 |
+
uint32_t min_count;
|
149 |
+
};
|
150 |
+
|
151 |
+
/// \brief Control TDigest approximate quantile kernel behavior
|
152 |
+
///
|
153 |
+
/// By default, returns the median value.
|
154 |
+
class ARROW_EXPORT TDigestOptions : public FunctionOptions {
|
155 |
+
public:
|
156 |
+
explicit TDigestOptions(double q = 0.5, uint32_t delta = 100,
|
157 |
+
uint32_t buffer_size = 500, bool skip_nulls = true,
|
158 |
+
uint32_t min_count = 0);
|
159 |
+
explicit TDigestOptions(std::vector<double> q, uint32_t delta = 100,
|
160 |
+
uint32_t buffer_size = 500, bool skip_nulls = true,
|
161 |
+
uint32_t min_count = 0);
|
162 |
+
static constexpr char const kTypeName[] = "TDigestOptions";
|
163 |
+
static TDigestOptions Defaults() { return TDigestOptions{}; }
|
164 |
+
|
165 |
+
/// probability level of quantile must be between 0 and 1 inclusive
|
166 |
+
std::vector<double> q;
|
167 |
+
/// compression parameter, default 100
|
168 |
+
uint32_t delta;
|
169 |
+
/// input buffer size, default 500
|
170 |
+
uint32_t buffer_size;
|
171 |
+
/// If true (the default), null values are ignored. Otherwise, if any value is null,
|
172 |
+
/// emit null.
|
173 |
+
bool skip_nulls;
|
174 |
+
/// If less than this many non-null values are observed, emit null.
|
175 |
+
uint32_t min_count;
|
176 |
+
};
|
177 |
+
|
178 |
+
/// \brief Control Index kernel behavior
|
179 |
+
class ARROW_EXPORT IndexOptions : public FunctionOptions {
|
180 |
+
public:
|
181 |
+
explicit IndexOptions(std::shared_ptr<Scalar> value);
|
182 |
+
// Default constructor for serialization
|
183 |
+
IndexOptions();
|
184 |
+
static constexpr char const kTypeName[] = "IndexOptions";
|
185 |
+
|
186 |
+
std::shared_ptr<Scalar> value;
|
187 |
+
};
|
188 |
+
|
189 |
+
/// \brief Configure a grouped aggregation
|
190 |
+
struct ARROW_EXPORT Aggregate {
|
191 |
+
Aggregate() = default;
|
192 |
+
|
193 |
+
Aggregate(std::string function, std::shared_ptr<FunctionOptions> options,
|
194 |
+
std::vector<FieldRef> target, std::string name = "")
|
195 |
+
: function(std::move(function)),
|
196 |
+
options(std::move(options)),
|
197 |
+
target(std::move(target)),
|
198 |
+
name(std::move(name)) {}
|
199 |
+
|
200 |
+
Aggregate(std::string function, std::shared_ptr<FunctionOptions> options,
|
201 |
+
FieldRef target, std::string name = "")
|
202 |
+
: Aggregate(std::move(function), std::move(options),
|
203 |
+
std::vector<FieldRef>{std::move(target)}, std::move(name)) {}
|
204 |
+
|
205 |
+
Aggregate(std::string function, FieldRef target, std::string name)
|
206 |
+
: Aggregate(std::move(function), /*options=*/NULLPTR,
|
207 |
+
std::vector<FieldRef>{std::move(target)}, std::move(name)) {}
|
208 |
+
|
209 |
+
Aggregate(std::string function, std::string name)
|
210 |
+
: Aggregate(std::move(function), /*options=*/NULLPTR,
|
211 |
+
/*target=*/std::vector<FieldRef>{}, std::move(name)) {}
|
212 |
+
|
213 |
+
/// the name of the aggregation function
|
214 |
+
std::string function;
|
215 |
+
|
216 |
+
/// options for the aggregation function
|
217 |
+
std::shared_ptr<FunctionOptions> options;
|
218 |
+
|
219 |
+
/// zero or more fields to which aggregations will be applied
|
220 |
+
std::vector<FieldRef> target;
|
221 |
+
|
222 |
+
/// optional output field name for aggregations
|
223 |
+
std::string name;
|
224 |
+
};
|
225 |
+
|
226 |
+
/// @}
|
227 |
+
|
228 |
+
/// \brief Count values in an array.
|
229 |
+
///
|
230 |
+
/// \param[in] options counting options, see CountOptions for more information
|
231 |
+
/// \param[in] datum to count
|
232 |
+
/// \param[in] ctx the function execution context, optional
|
233 |
+
/// \return out resulting datum
|
234 |
+
///
|
235 |
+
/// \since 1.0.0
|
236 |
+
/// \note API not yet finalized
|
237 |
+
ARROW_EXPORT
|
238 |
+
Result<Datum> Count(const Datum& datum,
|
239 |
+
const CountOptions& options = CountOptions::Defaults(),
|
240 |
+
ExecContext* ctx = NULLPTR);
|
241 |
+
|
242 |
+
/// \brief Compute the mean of a numeric array.
|
243 |
+
///
|
244 |
+
/// \param[in] value datum to compute the mean, expecting Array
|
245 |
+
/// \param[in] options see ScalarAggregateOptions for more information
|
246 |
+
/// \param[in] ctx the function execution context, optional
|
247 |
+
/// \return datum of the computed mean as a DoubleScalar
|
248 |
+
///
|
249 |
+
/// \since 1.0.0
|
250 |
+
/// \note API not yet finalized
|
251 |
+
ARROW_EXPORT
|
252 |
+
Result<Datum> Mean(
|
253 |
+
const Datum& value,
|
254 |
+
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
255 |
+
ExecContext* ctx = NULLPTR);
|
256 |
+
|
257 |
+
/// \brief Compute the product of values of a numeric array.
|
258 |
+
///
|
259 |
+
/// \param[in] value datum to compute product of, expecting Array or ChunkedArray
|
260 |
+
/// \param[in] options see ScalarAggregateOptions for more information
|
261 |
+
/// \param[in] ctx the function execution context, optional
|
262 |
+
/// \return datum of the computed sum as a Scalar
|
263 |
+
///
|
264 |
+
/// \since 6.0.0
|
265 |
+
/// \note API not yet finalized
|
266 |
+
ARROW_EXPORT
|
267 |
+
Result<Datum> Product(
|
268 |
+
const Datum& value,
|
269 |
+
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
270 |
+
ExecContext* ctx = NULLPTR);
|
271 |
+
|
272 |
+
/// \brief Sum values of a numeric array.
|
273 |
+
///
|
274 |
+
/// \param[in] value datum to sum, expecting Array or ChunkedArray
|
275 |
+
/// \param[in] options see ScalarAggregateOptions for more information
|
276 |
+
/// \param[in] ctx the function execution context, optional
|
277 |
+
/// \return datum of the computed sum as a Scalar
|
278 |
+
///
|
279 |
+
/// \since 1.0.0
|
280 |
+
/// \note API not yet finalized
|
281 |
+
ARROW_EXPORT
|
282 |
+
Result<Datum> Sum(
|
283 |
+
const Datum& value,
|
284 |
+
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
285 |
+
ExecContext* ctx = NULLPTR);
|
286 |
+
|
287 |
+
/// \brief Calculate the first value of an array
|
288 |
+
///
|
289 |
+
/// \param[in] value input datum, expecting Array or ChunkedArray
|
290 |
+
/// \param[in] options see ScalarAggregateOptions for more information
|
291 |
+
/// \param[in] ctx the function execution context, optional
|
292 |
+
/// \return datum of the computed first as Scalar
|
293 |
+
///
|
294 |
+
/// \since 13.0.0
|
295 |
+
/// \note API not yet finalized
|
296 |
+
ARROW_EXPORT
|
297 |
+
Result<Datum> First(
|
298 |
+
const Datum& value,
|
299 |
+
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
300 |
+
ExecContext* ctx = NULLPTR);
|
301 |
+
|
302 |
+
/// \brief Calculate the last value of an array
|
303 |
+
///
|
304 |
+
/// \param[in] value input datum, expecting Array or ChunkedArray
|
305 |
+
/// \param[in] options see ScalarAggregateOptions for more information
|
306 |
+
/// \param[in] ctx the function execution context, optional
|
307 |
+
/// \return datum of the computed last as a Scalar
|
308 |
+
///
|
309 |
+
/// \since 13.0.0
|
310 |
+
/// \note API not yet finalized
|
311 |
+
ARROW_EXPORT
|
312 |
+
Result<Datum> Last(
|
313 |
+
const Datum& value,
|
314 |
+
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
315 |
+
ExecContext* ctx = NULLPTR);
|
316 |
+
|
317 |
+
/// \brief Calculate the min / max of a numeric array
|
318 |
+
///
|
319 |
+
/// This function returns both the min and max as a struct scalar, with type
|
320 |
+
/// struct<min: T, max: T>, where T is the input type
|
321 |
+
///
|
322 |
+
/// \param[in] value input datum, expecting Array or ChunkedArray
|
323 |
+
/// \param[in] options see ScalarAggregateOptions for more information
|
324 |
+
/// \param[in] ctx the function execution context, optional
|
325 |
+
/// \return resulting datum as a struct<min: T, max: T> scalar
|
326 |
+
///
|
327 |
+
/// \since 1.0.0
|
328 |
+
/// \note API not yet finalized
|
329 |
+
ARROW_EXPORT
|
330 |
+
Result<Datum> MinMax(
|
331 |
+
const Datum& value,
|
332 |
+
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
333 |
+
ExecContext* ctx = NULLPTR);
|
334 |
+
|
335 |
+
/// \brief Test whether any element in a boolean array evaluates to true.
|
336 |
+
///
|
337 |
+
/// This function returns true if any of the elements in the array evaluates
|
338 |
+
/// to true and false otherwise. Null values are ignored by default.
|
339 |
+
/// If null values are taken into account by setting ScalarAggregateOptions
|
340 |
+
/// parameter skip_nulls = false then Kleene logic is used.
|
341 |
+
/// See KleeneOr for more details on Kleene logic.
|
342 |
+
///
|
343 |
+
/// \param[in] value input datum, expecting a boolean array
|
344 |
+
/// \param[in] options see ScalarAggregateOptions for more information
|
345 |
+
/// \param[in] ctx the function execution context, optional
|
346 |
+
/// \return resulting datum as a BooleanScalar
|
347 |
+
///
|
348 |
+
/// \since 3.0.0
|
349 |
+
/// \note API not yet finalized
|
350 |
+
ARROW_EXPORT
|
351 |
+
Result<Datum> Any(
|
352 |
+
const Datum& value,
|
353 |
+
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
354 |
+
ExecContext* ctx = NULLPTR);
|
355 |
+
|
356 |
+
/// \brief Test whether all elements in a boolean array evaluate to true.
|
357 |
+
///
|
358 |
+
/// This function returns true if all of the elements in the array evaluate
|
359 |
+
/// to true and false otherwise. Null values are ignored by default.
|
360 |
+
/// If null values are taken into account by setting ScalarAggregateOptions
|
361 |
+
/// parameter skip_nulls = false then Kleene logic is used.
|
362 |
+
/// See KleeneAnd for more details on Kleene logic.
|
363 |
+
///
|
364 |
+
/// \param[in] value input datum, expecting a boolean array
|
365 |
+
/// \param[in] options see ScalarAggregateOptions for more information
|
366 |
+
/// \param[in] ctx the function execution context, optional
|
367 |
+
/// \return resulting datum as a BooleanScalar
|
368 |
+
|
369 |
+
/// \since 3.0.0
|
370 |
+
/// \note API not yet finalized
|
371 |
+
ARROW_EXPORT
|
372 |
+
Result<Datum> All(
|
373 |
+
const Datum& value,
|
374 |
+
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
375 |
+
ExecContext* ctx = NULLPTR);
|
376 |
+
|
377 |
+
/// \brief Calculate the modal (most common) value of a numeric array
|
378 |
+
///
|
379 |
+
/// This function returns top-n most common values and number of times they occur as
|
380 |
+
/// an array of `struct<mode: T, count: int64>`, where T is the input type.
|
381 |
+
/// Values with larger counts are returned before smaller ones.
|
382 |
+
/// If there are more than one values with same count, smaller value is returned first.
|
383 |
+
///
|
384 |
+
/// \param[in] value input datum, expecting Array or ChunkedArray
|
385 |
+
/// \param[in] options see ModeOptions for more information
|
386 |
+
/// \param[in] ctx the function execution context, optional
|
387 |
+
/// \return resulting datum as an array of struct<mode: T, count: int64>
|
388 |
+
///
|
389 |
+
/// \since 2.0.0
|
390 |
+
/// \note API not yet finalized
|
391 |
+
ARROW_EXPORT
|
392 |
+
Result<Datum> Mode(const Datum& value,
|
393 |
+
const ModeOptions& options = ModeOptions::Defaults(),
|
394 |
+
ExecContext* ctx = NULLPTR);
|
395 |
+
|
396 |
+
/// \brief Calculate the standard deviation of a numeric array
|
397 |
+
///
|
398 |
+
/// \param[in] value input datum, expecting Array or ChunkedArray
|
399 |
+
/// \param[in] options see VarianceOptions for more information
|
400 |
+
/// \param[in] ctx the function execution context, optional
|
401 |
+
/// \return datum of the computed standard deviation as a DoubleScalar
|
402 |
+
///
|
403 |
+
/// \since 2.0.0
|
404 |
+
/// \note API not yet finalized
|
405 |
+
ARROW_EXPORT
|
406 |
+
Result<Datum> Stddev(const Datum& value,
|
407 |
+
const VarianceOptions& options = VarianceOptions::Defaults(),
|
408 |
+
ExecContext* ctx = NULLPTR);
|
409 |
+
|
410 |
+
/// \brief Calculate the variance of a numeric array
|
411 |
+
///
|
412 |
+
/// \param[in] value input datum, expecting Array or ChunkedArray
|
413 |
+
/// \param[in] options see VarianceOptions for more information
|
414 |
+
/// \param[in] ctx the function execution context, optional
|
415 |
+
/// \return datum of the computed variance as a DoubleScalar
|
416 |
+
///
|
417 |
+
/// \since 2.0.0
|
418 |
+
/// \note API not yet finalized
|
419 |
+
ARROW_EXPORT
|
420 |
+
Result<Datum> Variance(const Datum& value,
|
421 |
+
const VarianceOptions& options = VarianceOptions::Defaults(),
|
422 |
+
ExecContext* ctx = NULLPTR);
|
423 |
+
|
424 |
+
/// \brief Calculate the quantiles of a numeric array
|
425 |
+
///
|
426 |
+
/// \param[in] value input datum, expecting Array or ChunkedArray
|
427 |
+
/// \param[in] options see QuantileOptions for more information
|
428 |
+
/// \param[in] ctx the function execution context, optional
|
429 |
+
/// \return resulting datum as an array
|
430 |
+
///
|
431 |
+
/// \since 4.0.0
|
432 |
+
/// \note API not yet finalized
|
433 |
+
ARROW_EXPORT
|
434 |
+
Result<Datum> Quantile(const Datum& value,
|
435 |
+
const QuantileOptions& options = QuantileOptions::Defaults(),
|
436 |
+
ExecContext* ctx = NULLPTR);
|
437 |
+
|
438 |
+
/// \brief Calculate the approximate quantiles of a numeric array with T-Digest algorithm
|
439 |
+
///
|
440 |
+
/// \param[in] value input datum, expecting Array or ChunkedArray
|
441 |
+
/// \param[in] options see TDigestOptions for more information
|
442 |
+
/// \param[in] ctx the function execution context, optional
|
443 |
+
/// \return resulting datum as an array
|
444 |
+
///
|
445 |
+
/// \since 4.0.0
|
446 |
+
/// \note API not yet finalized
|
447 |
+
ARROW_EXPORT
|
448 |
+
Result<Datum> TDigest(const Datum& value,
|
449 |
+
const TDigestOptions& options = TDigestOptions::Defaults(),
|
450 |
+
ExecContext* ctx = NULLPTR);
|
451 |
+
|
452 |
+
/// \brief Find the first index of a value in an array.
|
453 |
+
///
|
454 |
+
/// \param[in] value The array to search.
|
455 |
+
/// \param[in] options The array to search for. See IndexOptions.
|
456 |
+
/// \param[in] ctx the function execution context, optional
|
457 |
+
/// \return out a Scalar containing the index (or -1 if not found).
|
458 |
+
///
|
459 |
+
/// \since 5.0.0
|
460 |
+
/// \note API not yet finalized
|
461 |
+
ARROW_EXPORT
|
462 |
+
Result<Datum> Index(const Datum& value, const IndexOptions& options,
|
463 |
+
ExecContext* ctx = NULLPTR);
|
464 |
+
|
465 |
+
} // namespace compute
|
466 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/expression.h
ADDED
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// This API is EXPERIMENTAL.
|
19 |
+
|
20 |
+
#pragma once
|
21 |
+
|
22 |
+
#include <memory>
|
23 |
+
#include <string>
|
24 |
+
#include <utility>
|
25 |
+
#include <variant>
|
26 |
+
#include <vector>
|
27 |
+
|
28 |
+
#include "arrow/compute/type_fwd.h"
|
29 |
+
#include "arrow/datum.h"
|
30 |
+
#include "arrow/type_fwd.h"
|
31 |
+
#include "arrow/util/small_vector.h"
|
32 |
+
|
33 |
+
namespace arrow {
|
34 |
+
namespace compute {
|
35 |
+
|
36 |
+
/// \defgroup expression-core Expressions to describe data transformations
|
37 |
+
///
|
38 |
+
/// @{
|
39 |
+
|
40 |
+
/// An unbound expression which maps a single Datum to another Datum.
|
41 |
+
/// An expression is one of
|
42 |
+
/// - A literal Datum.
|
43 |
+
/// - A reference to a single (potentially nested) field of the input Datum.
|
44 |
+
/// - A call to a compute function, with arguments specified by other Expressions.
|
45 |
+
class ARROW_EXPORT Expression {
|
46 |
+
public:
|
47 |
+
struct Call {
|
48 |
+
std::string function_name;
|
49 |
+
std::vector<Expression> arguments;
|
50 |
+
std::shared_ptr<FunctionOptions> options;
|
51 |
+
// Cached hash value
|
52 |
+
size_t hash;
|
53 |
+
|
54 |
+
// post-Bind properties:
|
55 |
+
std::shared_ptr<Function> function;
|
56 |
+
const Kernel* kernel = NULLPTR;
|
57 |
+
std::shared_ptr<KernelState> kernel_state;
|
58 |
+
TypeHolder type;
|
59 |
+
|
60 |
+
void ComputeHash();
|
61 |
+
};
|
62 |
+
|
63 |
+
std::string ToString() const;
|
64 |
+
bool Equals(const Expression& other) const;
|
65 |
+
size_t hash() const;
|
66 |
+
struct Hash {
|
67 |
+
size_t operator()(const Expression& expr) const { return expr.hash(); }
|
68 |
+
};
|
69 |
+
|
70 |
+
/// Bind this expression to the given input type, looking up Kernels and field types.
|
71 |
+
/// Some expression simplification may be performed and implicit casts will be inserted.
|
72 |
+
/// Any state necessary for execution will be initialized and returned.
|
73 |
+
Result<Expression> Bind(const TypeHolder& in, ExecContext* = NULLPTR) const;
|
74 |
+
Result<Expression> Bind(const Schema& in_schema, ExecContext* = NULLPTR) const;
|
75 |
+
|
76 |
+
// XXX someday
|
77 |
+
// Clone all KernelState in this bound expression. If any function referenced by this
|
78 |
+
// expression has mutable KernelState, it is not safe to execute or apply simplification
|
79 |
+
// passes to it (or copies of it!) from multiple threads. Cloning state produces new
|
80 |
+
// KernelStates where necessary to ensure that Expressions may be manipulated safely
|
81 |
+
// on multiple threads.
|
82 |
+
// Result<ExpressionState> CloneState() const;
|
83 |
+
// Status SetState(ExpressionState);
|
84 |
+
|
85 |
+
/// Return true if all an expression's field references have explicit types
|
86 |
+
/// and all of its functions' kernels are looked up.
|
87 |
+
bool IsBound() const;
|
88 |
+
|
89 |
+
/// Return true if this expression is composed only of Scalar literals, field
|
90 |
+
/// references, and calls to ScalarFunctions.
|
91 |
+
bool IsScalarExpression() const;
|
92 |
+
|
93 |
+
/// Return true if this expression is literal and entirely null.
|
94 |
+
bool IsNullLiteral() const;
|
95 |
+
|
96 |
+
/// Return true if this expression could evaluate to true. Will return true for any
|
97 |
+
/// unbound or non-boolean Expressions. IsSatisfiable does not (currently) do any
|
98 |
+
/// canonicalization or simplification of the expression, so even Expressions
|
99 |
+
/// which are unsatisfiable may spuriously return `true` here. This function is
|
100 |
+
/// intended for use in predicate pushdown where a filter expression is simplified
|
101 |
+
/// by a guarantee, so it assumes that trying to simplify again would be redundant.
|
102 |
+
bool IsSatisfiable() const;
|
103 |
+
|
104 |
+
// XXX someday
|
105 |
+
// Result<PipelineGraph> GetPipelines();
|
106 |
+
|
107 |
+
bool is_valid() const { return impl_ != NULLPTR; }
|
108 |
+
|
109 |
+
/// Access a Call or return nullptr if this expression is not a call
|
110 |
+
const Call* call() const;
|
111 |
+
/// Access a Datum or return nullptr if this expression is not a literal
|
112 |
+
const Datum* literal() const;
|
113 |
+
/// Access a FieldRef or return nullptr if this expression is not a field_ref
|
114 |
+
const FieldRef* field_ref() const;
|
115 |
+
|
116 |
+
/// The type to which this expression will evaluate
|
117 |
+
const DataType* type() const;
|
118 |
+
// XXX someday
|
119 |
+
// NullGeneralization::type nullable() const;
|
120 |
+
|
121 |
+
struct Parameter {
|
122 |
+
FieldRef ref;
|
123 |
+
|
124 |
+
// post-bind properties
|
125 |
+
TypeHolder type;
|
126 |
+
::arrow::internal::SmallVector<int, 2> indices;
|
127 |
+
};
|
128 |
+
const Parameter* parameter() const;
|
129 |
+
|
130 |
+
Expression() = default;
|
131 |
+
explicit Expression(Call call);
|
132 |
+
explicit Expression(Datum literal);
|
133 |
+
explicit Expression(Parameter parameter);
|
134 |
+
|
135 |
+
private:
|
136 |
+
using Impl = std::variant<Datum, Parameter, Call>;
|
137 |
+
std::shared_ptr<Impl> impl_;
|
138 |
+
|
139 |
+
ARROW_FRIEND_EXPORT friend bool Identical(const Expression& l, const Expression& r);
|
140 |
+
};
|
141 |
+
|
142 |
+
inline bool operator==(const Expression& l, const Expression& r) { return l.Equals(r); }
|
143 |
+
inline bool operator!=(const Expression& l, const Expression& r) { return !l.Equals(r); }
|
144 |
+
|
145 |
+
ARROW_EXPORT void PrintTo(const Expression&, std::ostream*);
|
146 |
+
|
147 |
+
// Factories
|
148 |
+
|
149 |
+
ARROW_EXPORT
|
150 |
+
Expression literal(Datum lit);
|
151 |
+
|
152 |
+
template <typename Arg>
|
153 |
+
Expression literal(Arg&& arg) {
|
154 |
+
return literal(Datum(std::forward<Arg>(arg)));
|
155 |
+
}
|
156 |
+
|
157 |
+
ARROW_EXPORT
|
158 |
+
Expression field_ref(FieldRef ref);
|
159 |
+
|
160 |
+
ARROW_EXPORT
|
161 |
+
Expression call(std::string function, std::vector<Expression> arguments,
|
162 |
+
std::shared_ptr<FunctionOptions> options = NULLPTR);
|
163 |
+
|
164 |
+
template <typename Options, typename = typename std::enable_if<
|
165 |
+
std::is_base_of<FunctionOptions, Options>::value>::type>
|
166 |
+
Expression call(std::string function, std::vector<Expression> arguments,
|
167 |
+
Options options) {
|
168 |
+
return call(std::move(function), std::move(arguments),
|
169 |
+
std::make_shared<Options>(std::move(options)));
|
170 |
+
}
|
171 |
+
|
172 |
+
/// Assemble a list of all fields referenced by an Expression at any depth.
|
173 |
+
ARROW_EXPORT
|
174 |
+
std::vector<FieldRef> FieldsInExpression(const Expression&);
|
175 |
+
|
176 |
+
/// Check if the expression references any fields.
|
177 |
+
ARROW_EXPORT
|
178 |
+
bool ExpressionHasFieldRefs(const Expression&);
|
179 |
+
|
180 |
+
struct ARROW_EXPORT KnownFieldValues;
|
181 |
+
|
182 |
+
/// Assemble a mapping from field references to known values. This derives known values
|
183 |
+
/// from "equal" and "is_null" Expressions referencing a field and a literal.
|
184 |
+
ARROW_EXPORT
|
185 |
+
Result<KnownFieldValues> ExtractKnownFieldValues(
|
186 |
+
const Expression& guaranteed_true_predicate);
|
187 |
+
|
188 |
+
/// @}
|
189 |
+
|
190 |
+
/// \defgroup expression-passes Functions for modification of Expressions
|
191 |
+
///
|
192 |
+
/// @{
|
193 |
+
///
|
194 |
+
/// These transform bound expressions. Some transforms utilize a guarantee, which is
|
195 |
+
/// provided as an Expression which is guaranteed to evaluate to true. The
|
196 |
+
/// guaranteed_true_predicate need not be bound, but canonicalization is currently
|
197 |
+
/// deferred to producers of guarantees. For example in order to be recognized as a
|
198 |
+
/// guarantee on a field value, an Expression must be a call to "equal" with field_ref LHS
|
199 |
+
/// and literal RHS. Flipping the arguments, "is_in" with a one-long value_set, ... or
|
200 |
+
/// other semantically identical Expressions will not be recognized.
|
201 |
+
|
202 |
+
/// Weak canonicalization which establishes guarantees for subsequent passes. Even
|
203 |
+
/// equivalent Expressions may result in different canonicalized expressions.
|
204 |
+
/// TODO this could be a strong canonicalization
|
205 |
+
ARROW_EXPORT
|
206 |
+
Result<Expression> Canonicalize(Expression, ExecContext* = NULLPTR);
|
207 |
+
|
208 |
+
/// Simplify Expressions based on literal arguments (for example, add(null, x) will always
|
209 |
+
/// be null so replace the call with a null literal). Includes early evaluation of all
|
210 |
+
/// calls whose arguments are entirely literal.
|
211 |
+
ARROW_EXPORT
|
212 |
+
Result<Expression> FoldConstants(Expression);
|
213 |
+
|
214 |
+
/// Simplify Expressions by replacing with known values of the fields which it references.
|
215 |
+
ARROW_EXPORT
|
216 |
+
Result<Expression> ReplaceFieldsWithKnownValues(const KnownFieldValues& known_values,
|
217 |
+
Expression);
|
218 |
+
|
219 |
+
/// Simplify an expression by replacing subexpressions based on a guarantee:
|
220 |
+
/// a boolean expression which is guaranteed to evaluate to `true`. For example, this is
|
221 |
+
/// used to remove redundant function calls from a filter expression or to replace a
|
222 |
+
/// reference to a constant-value field with a literal.
|
223 |
+
ARROW_EXPORT
|
224 |
+
Result<Expression> SimplifyWithGuarantee(Expression,
|
225 |
+
const Expression& guaranteed_true_predicate);
|
226 |
+
|
227 |
+
/// Replace all named field refs (e.g. "x" or "x.y") with field paths (e.g. [0] or [1,3])
|
228 |
+
///
|
229 |
+
/// This isn't usually needed and does not offer any simplification by itself. However,
|
230 |
+
/// it can be useful to normalize an expression to paths to make it simpler to work with.
|
231 |
+
ARROW_EXPORT Result<Expression> RemoveNamedRefs(Expression expression);
|
232 |
+
|
233 |
+
/// @}
|
234 |
+
|
235 |
+
// Execution
|
236 |
+
|
237 |
+
/// Create an ExecBatch suitable for passing to ExecuteScalarExpression() from a
|
238 |
+
/// RecordBatch which may have missing or incorrectly ordered columns.
|
239 |
+
/// Missing fields will be replaced with null scalars.
|
240 |
+
ARROW_EXPORT Result<ExecBatch> MakeExecBatch(const Schema& full_schema,
|
241 |
+
const Datum& partial,
|
242 |
+
Expression guarantee = literal(true));
|
243 |
+
|
244 |
+
/// Execute a scalar expression against the provided state and input ExecBatch. This
|
245 |
+
/// expression must be bound.
|
246 |
+
ARROW_EXPORT
|
247 |
+
Result<Datum> ExecuteScalarExpression(const Expression&, const ExecBatch& input,
|
248 |
+
ExecContext* = NULLPTR);
|
249 |
+
|
250 |
+
/// Convenience function for invoking against a RecordBatch
|
251 |
+
ARROW_EXPORT
|
252 |
+
Result<Datum> ExecuteScalarExpression(const Expression&, const Schema& full_schema,
|
253 |
+
const Datum& partial_input, ExecContext* = NULLPTR);
|
254 |
+
|
255 |
+
// Serialization
|
256 |
+
|
257 |
+
ARROW_EXPORT
|
258 |
+
Result<std::shared_ptr<Buffer>> Serialize(const Expression&);
|
259 |
+
|
260 |
+
ARROW_EXPORT
|
261 |
+
Result<Expression> Deserialize(std::shared_ptr<Buffer>);
|
262 |
+
|
263 |
+
/// \defgroup expression-convenience Helpers for convenient expression creation
|
264 |
+
///
|
265 |
+
/// @{
|
266 |
+
|
267 |
+
ARROW_EXPORT Expression project(std::vector<Expression> values,
|
268 |
+
std::vector<std::string> names);
|
269 |
+
|
270 |
+
ARROW_EXPORT Expression equal(Expression lhs, Expression rhs);
|
271 |
+
|
272 |
+
ARROW_EXPORT Expression not_equal(Expression lhs, Expression rhs);
|
273 |
+
|
274 |
+
ARROW_EXPORT Expression less(Expression lhs, Expression rhs);
|
275 |
+
|
276 |
+
ARROW_EXPORT Expression less_equal(Expression lhs, Expression rhs);
|
277 |
+
|
278 |
+
ARROW_EXPORT Expression greater(Expression lhs, Expression rhs);
|
279 |
+
|
280 |
+
ARROW_EXPORT Expression greater_equal(Expression lhs, Expression rhs);
|
281 |
+
|
282 |
+
ARROW_EXPORT Expression is_null(Expression lhs, bool nan_is_null = false);
|
283 |
+
|
284 |
+
ARROW_EXPORT Expression is_valid(Expression lhs);
|
285 |
+
|
286 |
+
ARROW_EXPORT Expression and_(Expression lhs, Expression rhs);
|
287 |
+
ARROW_EXPORT Expression and_(const std::vector<Expression>&);
|
288 |
+
ARROW_EXPORT Expression or_(Expression lhs, Expression rhs);
|
289 |
+
ARROW_EXPORT Expression or_(const std::vector<Expression>&);
|
290 |
+
ARROW_EXPORT Expression not_(Expression operand);
|
291 |
+
|
292 |
+
/// @}
|
293 |
+
|
294 |
+
} // namespace compute
|
295 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/kernel.h
ADDED
@@ -0,0 +1,752 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// NOTE: API is EXPERIMENTAL and will change without going through a
|
19 |
+
// deprecation cycle
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
#include <cstddef>
|
24 |
+
#include <cstdint>
|
25 |
+
#include <functional>
|
26 |
+
#include <memory>
|
27 |
+
#include <string>
|
28 |
+
#include <utility>
|
29 |
+
#include <vector>
|
30 |
+
|
31 |
+
#include "arrow/buffer.h"
|
32 |
+
#include "arrow/compute/exec.h"
|
33 |
+
#include "arrow/datum.h"
|
34 |
+
#include "arrow/memory_pool.h"
|
35 |
+
#include "arrow/result.h"
|
36 |
+
#include "arrow/status.h"
|
37 |
+
#include "arrow/type.h"
|
38 |
+
#include "arrow/util/macros.h"
|
39 |
+
#include "arrow/util/visibility.h"
|
40 |
+
|
41 |
+
// macOS defines PREALLOCATE as a preprocessor macro in the header sys/vnode.h.
|
42 |
+
// No other BSD seems to do so. The name is used as an identifier in MemAllocation enum.
|
43 |
+
#if defined(__APPLE__) && defined(PREALLOCATE)
|
44 |
+
#undef PREALLOCATE
|
45 |
+
#endif
|
46 |
+
|
47 |
+
namespace arrow {
|
48 |
+
namespace compute {
|
49 |
+
|
50 |
+
class FunctionOptions;
|
51 |
+
|
52 |
+
/// \brief Base class for opaque kernel-specific state. For example, if there
|
53 |
+
/// is some kind of initialization required.
|
54 |
+
struct ARROW_EXPORT KernelState {
|
55 |
+
virtual ~KernelState() = default;
|
56 |
+
};
|
57 |
+
|
58 |
+
/// \brief Context/state for the execution of a particular kernel.
|
59 |
+
class ARROW_EXPORT KernelContext {
|
60 |
+
public:
|
61 |
+
// Can pass optional backreference; not used consistently for the
|
62 |
+
// moment but will be made so in the future
|
63 |
+
explicit KernelContext(ExecContext* exec_ctx, const Kernel* kernel = NULLPTR)
|
64 |
+
: exec_ctx_(exec_ctx), kernel_(kernel) {}
|
65 |
+
|
66 |
+
/// \brief Allocate buffer from the context's memory pool. The contents are
|
67 |
+
/// not initialized.
|
68 |
+
Result<std::shared_ptr<ResizableBuffer>> Allocate(int64_t nbytes);
|
69 |
+
|
70 |
+
/// \brief Allocate buffer for bitmap from the context's memory pool. Like
|
71 |
+
/// Allocate, the contents of the buffer are not initialized but the last
|
72 |
+
/// byte is preemptively zeroed to help avoid ASAN or valgrind issues.
|
73 |
+
Result<std::shared_ptr<ResizableBuffer>> AllocateBitmap(int64_t num_bits);
|
74 |
+
|
75 |
+
/// \brief Assign the active KernelState to be utilized for each stage of
|
76 |
+
/// kernel execution. Ownership and memory lifetime of the KernelState must
|
77 |
+
/// be minded separately.
|
78 |
+
void SetState(KernelState* state) { state_ = state; }
|
79 |
+
|
80 |
+
// Set kernel that is being invoked since some kernel
|
81 |
+
// implementations will examine the kernel state.
|
82 |
+
void SetKernel(const Kernel* kernel) { kernel_ = kernel; }
|
83 |
+
|
84 |
+
KernelState* state() { return state_; }
|
85 |
+
|
86 |
+
/// \brief Configuration related to function execution that is to be shared
|
87 |
+
/// across multiple kernels.
|
88 |
+
ExecContext* exec_context() { return exec_ctx_; }
|
89 |
+
|
90 |
+
/// \brief The memory pool to use for allocations. For now, it uses the
|
91 |
+
/// MemoryPool contained in the ExecContext used to create the KernelContext.
|
92 |
+
MemoryPool* memory_pool() { return exec_ctx_->memory_pool(); }
|
93 |
+
|
94 |
+
const Kernel* kernel() const { return kernel_; }
|
95 |
+
|
96 |
+
private:
|
97 |
+
ExecContext* exec_ctx_;
|
98 |
+
KernelState* state_ = NULLPTR;
|
99 |
+
const Kernel* kernel_ = NULLPTR;
|
100 |
+
};
|
101 |
+
|
102 |
+
/// \brief An type-checking interface to permit customizable validation rules
|
103 |
+
/// for use with InputType and KernelSignature. This is for scenarios where the
|
104 |
+
/// acceptance is not an exact type instance, such as a TIMESTAMP type for a
|
105 |
+
/// specific TimeUnit, but permitting any time zone.
|
106 |
+
struct ARROW_EXPORT TypeMatcher {
|
107 |
+
virtual ~TypeMatcher() = default;
|
108 |
+
|
109 |
+
/// \brief Return true if this matcher accepts the data type.
|
110 |
+
virtual bool Matches(const DataType& type) const = 0;
|
111 |
+
|
112 |
+
/// \brief A human-interpretable string representation of what the type
|
113 |
+
/// matcher checks for, usable when printing KernelSignature or formatting
|
114 |
+
/// error messages.
|
115 |
+
virtual std::string ToString() const = 0;
|
116 |
+
|
117 |
+
/// \brief Return true if this TypeMatcher contains the same matching rule as
|
118 |
+
/// the other. Currently depends on RTTI.
|
119 |
+
virtual bool Equals(const TypeMatcher& other) const = 0;
|
120 |
+
};
|
121 |
+
|
122 |
+
namespace match {
|
123 |
+
|
124 |
+
/// \brief Match any DataType instance having the same DataType::id.
|
125 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> SameTypeId(Type::type type_id);
|
126 |
+
|
127 |
+
/// \brief Match any TimestampType instance having the same unit, but the time
|
128 |
+
/// zones can be different.
|
129 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> TimestampTypeUnit(TimeUnit::type unit);
|
130 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> Time32TypeUnit(TimeUnit::type unit);
|
131 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> Time64TypeUnit(TimeUnit::type unit);
|
132 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> DurationTypeUnit(TimeUnit::type unit);
|
133 |
+
|
134 |
+
// \brief Match any integer type
|
135 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> Integer();
|
136 |
+
|
137 |
+
// Match types using 32-bit varbinary representation
|
138 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> BinaryLike();
|
139 |
+
|
140 |
+
// Match types using 64-bit varbinary representation
|
141 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> LargeBinaryLike();
|
142 |
+
|
143 |
+
// Match any fixed binary type
|
144 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> FixedSizeBinaryLike();
|
145 |
+
|
146 |
+
// \brief Match any primitive type (boolean or any type representable as a C
|
147 |
+
// Type)
|
148 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> Primitive();
|
149 |
+
|
150 |
+
// \brief Match any integer type that can be used as run-end in run-end encoded
|
151 |
+
// arrays
|
152 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> RunEndInteger();
|
153 |
+
|
154 |
+
/// \brief Match run-end encoded types that use any valid run-end type and
|
155 |
+
/// encode specific value types
|
156 |
+
///
|
157 |
+
/// @param[in] value_type_matcher a matcher that is applied to the values field
|
158 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> RunEndEncoded(
|
159 |
+
std::shared_ptr<TypeMatcher> value_type_matcher);
|
160 |
+
|
161 |
+
/// \brief Match run-end encoded types that use any valid run-end type and
|
162 |
+
/// encode specific value types
|
163 |
+
///
|
164 |
+
/// @param[in] value_type_id a type id that the type of the values field should match
|
165 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> RunEndEncoded(Type::type value_type_id);
|
166 |
+
|
167 |
+
/// \brief Match run-end encoded types that encode specific run-end and value types
|
168 |
+
///
|
169 |
+
/// @param[in] run_end_type_matcher a matcher that is applied to the run_ends field
|
170 |
+
/// @param[in] value_type_matcher a matcher that is applied to the values field
|
171 |
+
ARROW_EXPORT std::shared_ptr<TypeMatcher> RunEndEncoded(
|
172 |
+
std::shared_ptr<TypeMatcher> run_end_type_matcher,
|
173 |
+
std::shared_ptr<TypeMatcher> value_type_matcher);
|
174 |
+
|
175 |
+
} // namespace match
|
176 |
+
|
177 |
+
/// \brief An object used for type-checking arguments to be passed to a kernel
|
178 |
+
/// and stored in a KernelSignature. The type-checking rule can be supplied
|
179 |
+
/// either with an exact DataType instance or a custom TypeMatcher.
|
180 |
+
class ARROW_EXPORT InputType {
|
181 |
+
public:
|
182 |
+
/// \brief The kind of type-checking rule that the InputType contains.
|
183 |
+
enum Kind {
|
184 |
+
/// \brief Accept any value type.
|
185 |
+
ANY_TYPE,
|
186 |
+
|
187 |
+
/// \brief A fixed arrow::DataType and will only exact match having this
|
188 |
+
/// exact type (e.g. same TimestampType unit, same decimal scale and
|
189 |
+
/// precision, or same nested child types).
|
190 |
+
EXACT_TYPE,
|
191 |
+
|
192 |
+
/// \brief Uses a TypeMatcher implementation to check the type.
|
193 |
+
USE_TYPE_MATCHER
|
194 |
+
};
|
195 |
+
|
196 |
+
/// \brief Accept any value type
|
197 |
+
InputType() : kind_(ANY_TYPE) {}
|
198 |
+
|
199 |
+
/// \brief Accept an exact value type.
|
200 |
+
InputType(std::shared_ptr<DataType> type) // NOLINT implicit construction
|
201 |
+
: kind_(EXACT_TYPE), type_(std::move(type)) {}
|
202 |
+
|
203 |
+
/// \brief Use the passed TypeMatcher to type check.
|
204 |
+
InputType(std::shared_ptr<TypeMatcher> type_matcher) // NOLINT implicit construction
|
205 |
+
: kind_(USE_TYPE_MATCHER), type_matcher_(std::move(type_matcher)) {}
|
206 |
+
|
207 |
+
/// \brief Match any type with the given Type::type. Uses a TypeMatcher for
|
208 |
+
/// its implementation.
|
209 |
+
InputType(Type::type type_id) // NOLINT implicit construction
|
210 |
+
: InputType(match::SameTypeId(type_id)) {}
|
211 |
+
|
212 |
+
InputType(const InputType& other) { CopyInto(other); }
|
213 |
+
|
214 |
+
void operator=(const InputType& other) { CopyInto(other); }
|
215 |
+
|
216 |
+
InputType(InputType&& other) { MoveInto(std::forward<InputType>(other)); }
|
217 |
+
|
218 |
+
void operator=(InputType&& other) { MoveInto(std::forward<InputType>(other)); }
|
219 |
+
|
220 |
+
// \brief Match any input (array, scalar of any type)
|
221 |
+
static InputType Any() { return InputType(); }
|
222 |
+
|
223 |
+
/// \brief Return true if this input type matches the same type cases as the
|
224 |
+
/// other.
|
225 |
+
bool Equals(const InputType& other) const;
|
226 |
+
|
227 |
+
bool operator==(const InputType& other) const { return this->Equals(other); }
|
228 |
+
|
229 |
+
bool operator!=(const InputType& other) const { return !(*this == other); }
|
230 |
+
|
231 |
+
/// \brief Return hash code.
|
232 |
+
size_t Hash() const;
|
233 |
+
|
234 |
+
/// \brief Render a human-readable string representation.
|
235 |
+
std::string ToString() const;
|
236 |
+
|
237 |
+
/// \brief Return true if the Datum matches this argument kind in
|
238 |
+
/// type (and only allows scalar or array-like Datums).
|
239 |
+
bool Matches(const Datum& value) const;
|
240 |
+
|
241 |
+
/// \brief Return true if the type matches this InputType
|
242 |
+
bool Matches(const DataType& type) const;
|
243 |
+
|
244 |
+
/// \brief The type matching rule that this InputType uses.
|
245 |
+
Kind kind() const { return kind_; }
|
246 |
+
|
247 |
+
/// \brief For InputType::EXACT_TYPE kind, the exact type that this InputType
|
248 |
+
/// must match. Otherwise this function should not be used and will assert in
|
249 |
+
/// debug builds.
|
250 |
+
const std::shared_ptr<DataType>& type() const;
|
251 |
+
|
252 |
+
/// \brief For InputType::USE_TYPE_MATCHER, the TypeMatcher to be used for
|
253 |
+
/// checking the type of a value. Otherwise this function should not be used
|
254 |
+
/// and will assert in debug builds.
|
255 |
+
const TypeMatcher& type_matcher() const;
|
256 |
+
|
257 |
+
private:
|
258 |
+
void CopyInto(const InputType& other) {
|
259 |
+
this->kind_ = other.kind_;
|
260 |
+
this->type_ = other.type_;
|
261 |
+
this->type_matcher_ = other.type_matcher_;
|
262 |
+
}
|
263 |
+
|
264 |
+
void MoveInto(InputType&& other) {
|
265 |
+
this->kind_ = other.kind_;
|
266 |
+
this->type_ = std::move(other.type_);
|
267 |
+
this->type_matcher_ = std::move(other.type_matcher_);
|
268 |
+
}
|
269 |
+
|
270 |
+
Kind kind_;
|
271 |
+
|
272 |
+
// For EXACT_TYPE Kind
|
273 |
+
std::shared_ptr<DataType> type_;
|
274 |
+
|
275 |
+
// For USE_TYPE_MATCHER Kind
|
276 |
+
std::shared_ptr<TypeMatcher> type_matcher_;
|
277 |
+
};
|
278 |
+
|
279 |
+
/// \brief Container to capture both exact and input-dependent output types.
|
280 |
+
class ARROW_EXPORT OutputType {
|
281 |
+
public:
|
282 |
+
/// \brief An enum indicating whether the value type is an invariant fixed
|
283 |
+
/// value or one that's computed by a kernel-defined resolver function.
|
284 |
+
enum ResolveKind { FIXED, COMPUTED };
|
285 |
+
|
286 |
+
/// Type resolution function. Given input types, return output type. This
|
287 |
+
/// function MAY may use the kernel state to decide the output type based on
|
288 |
+
/// the FunctionOptions.
|
289 |
+
///
|
290 |
+
/// This function SHOULD _not_ be used to check for arity, that is to be
|
291 |
+
/// performed one or more layers above.
|
292 |
+
using Resolver =
|
293 |
+
std::function<Result<TypeHolder>(KernelContext*, const std::vector<TypeHolder>&)>;
|
294 |
+
|
295 |
+
/// \brief Output an exact type
|
296 |
+
OutputType(std::shared_ptr<DataType> type) // NOLINT implicit construction
|
297 |
+
: kind_(FIXED), type_(std::move(type)) {}
|
298 |
+
|
299 |
+
/// \brief Output a computed type depending on actual input types
|
300 |
+
template <typename Fn>
|
301 |
+
OutputType(Fn resolver) // NOLINT implicit construction
|
302 |
+
: kind_(COMPUTED), resolver_(std::move(resolver)) {}
|
303 |
+
|
304 |
+
OutputType(const OutputType& other) {
|
305 |
+
this->kind_ = other.kind_;
|
306 |
+
this->type_ = other.type_;
|
307 |
+
this->resolver_ = other.resolver_;
|
308 |
+
}
|
309 |
+
|
310 |
+
OutputType(OutputType&& other) {
|
311 |
+
this->kind_ = other.kind_;
|
312 |
+
this->type_ = std::move(other.type_);
|
313 |
+
this->resolver_ = other.resolver_;
|
314 |
+
}
|
315 |
+
|
316 |
+
OutputType& operator=(const OutputType&) = default;
|
317 |
+
OutputType& operator=(OutputType&&) = default;
|
318 |
+
|
319 |
+
/// \brief Return the type of the expected output value of the kernel given
|
320 |
+
/// the input argument types. The resolver may make use of state information
|
321 |
+
/// kept in the KernelContext.
|
322 |
+
Result<TypeHolder> Resolve(KernelContext* ctx,
|
323 |
+
const std::vector<TypeHolder>& args) const;
|
324 |
+
|
325 |
+
/// \brief The exact output value type for the FIXED kind.
|
326 |
+
const std::shared_ptr<DataType>& type() const;
|
327 |
+
|
328 |
+
/// \brief For use with COMPUTED resolution strategy. It may be more
|
329 |
+
/// convenient to invoke this with OutputType::Resolve returned from this
|
330 |
+
/// method.
|
331 |
+
const Resolver& resolver() const;
|
332 |
+
|
333 |
+
/// \brief Render a human-readable string representation.
|
334 |
+
std::string ToString() const;
|
335 |
+
|
336 |
+
/// \brief Return the kind of type resolution of this output type, whether
|
337 |
+
/// fixed/invariant or computed by a resolver.
|
338 |
+
ResolveKind kind() const { return kind_; }
|
339 |
+
|
340 |
+
private:
|
341 |
+
ResolveKind kind_;
|
342 |
+
|
343 |
+
// For FIXED resolution
|
344 |
+
std::shared_ptr<DataType> type_;
|
345 |
+
|
346 |
+
// For COMPUTED resolution
|
347 |
+
Resolver resolver_ = NULLPTR;
|
348 |
+
};
|
349 |
+
|
350 |
+
/// \brief Holds the input types and output type of the kernel.
|
351 |
+
///
|
352 |
+
/// VarArgs functions with minimum N arguments should pass up to N input types to be
|
353 |
+
/// used to validate the input types of a function invocation. The first N-1 types
|
354 |
+
/// will be matched against the first N-1 arguments, and the last type will be
|
355 |
+
/// matched against the remaining arguments.
|
356 |
+
class ARROW_EXPORT KernelSignature {
|
357 |
+
public:
|
358 |
+
KernelSignature(std::vector<InputType> in_types, OutputType out_type,
|
359 |
+
bool is_varargs = false);
|
360 |
+
|
361 |
+
/// \brief Convenience ctor since make_shared can be awkward
|
362 |
+
static std::shared_ptr<KernelSignature> Make(std::vector<InputType> in_types,
|
363 |
+
OutputType out_type,
|
364 |
+
bool is_varargs = false);
|
365 |
+
|
366 |
+
/// \brief Return true if the signature if compatible with the list of input
|
367 |
+
/// value descriptors.
|
368 |
+
bool MatchesInputs(const std::vector<TypeHolder>& types) const;
|
369 |
+
|
370 |
+
/// \brief Returns true if the input types of each signature are
|
371 |
+
/// equal. Well-formed functions should have a deterministic output type
|
372 |
+
/// given input types, but currently it is the responsibility of the
|
373 |
+
/// developer to ensure this.
|
374 |
+
bool Equals(const KernelSignature& other) const;
|
375 |
+
|
376 |
+
bool operator==(const KernelSignature& other) const { return this->Equals(other); }
|
377 |
+
|
378 |
+
bool operator!=(const KernelSignature& other) const { return !(*this == other); }
|
379 |
+
|
380 |
+
/// \brief Compute a hash code for the signature
|
381 |
+
size_t Hash() const;
|
382 |
+
|
383 |
+
/// \brief The input types for the kernel. For VarArgs functions, this should
|
384 |
+
/// generally contain a single validator to use for validating all of the
|
385 |
+
/// function arguments.
|
386 |
+
const std::vector<InputType>& in_types() const { return in_types_; }
|
387 |
+
|
388 |
+
/// \brief The output type for the kernel. Use Resolve to return the
|
389 |
+
/// exact output given input argument types, since many kernels'
|
390 |
+
/// output types depend on their input types (or their type
|
391 |
+
/// metadata).
|
392 |
+
const OutputType& out_type() const { return out_type_; }
|
393 |
+
|
394 |
+
/// \brief Render a human-readable string representation
|
395 |
+
std::string ToString() const;
|
396 |
+
|
397 |
+
bool is_varargs() const { return is_varargs_; }
|
398 |
+
|
399 |
+
private:
|
400 |
+
std::vector<InputType> in_types_;
|
401 |
+
OutputType out_type_;
|
402 |
+
bool is_varargs_;
|
403 |
+
|
404 |
+
// For caching the hash code after it's computed the first time
|
405 |
+
mutable uint64_t hash_code_;
|
406 |
+
};
|
407 |
+
|
408 |
+
/// \brief A function may contain multiple variants of a kernel for a given
|
409 |
+
/// type combination for different SIMD levels. Based on the active system's
|
410 |
+
/// CPU info or the user's preferences, we can elect to use one over the other.
|
411 |
+
struct SimdLevel {
|
412 |
+
enum type { NONE = 0, SSE4_2, AVX, AVX2, AVX512, NEON, MAX };
|
413 |
+
};
|
414 |
+
|
415 |
+
/// \brief The strategy to use for propagating or otherwise populating the
|
416 |
+
/// validity bitmap of a kernel output.
|
417 |
+
struct NullHandling {
|
418 |
+
enum type {
|
419 |
+
/// Compute the output validity bitmap by intersecting the validity bitmaps
|
420 |
+
/// of the arguments using bitwise-and operations. This means that values
|
421 |
+
/// in the output are valid/non-null only if the corresponding values in
|
422 |
+
/// all input arguments were valid/non-null. Kernel generally need not
|
423 |
+
/// touch the bitmap thereafter, but a kernel's exec function is permitted
|
424 |
+
/// to alter the bitmap after the null intersection is computed if it needs
|
425 |
+
/// to.
|
426 |
+
INTERSECTION,
|
427 |
+
|
428 |
+
/// Kernel expects a pre-allocated buffer to write the result bitmap
|
429 |
+
/// into. The preallocated memory is not zeroed (except for the last byte),
|
430 |
+
/// so the kernel should ensure to completely populate the bitmap.
|
431 |
+
COMPUTED_PREALLOCATE,
|
432 |
+
|
433 |
+
/// Kernel allocates and sets the validity bitmap of the output.
|
434 |
+
COMPUTED_NO_PREALLOCATE,
|
435 |
+
|
436 |
+
/// Kernel output is never null and a validity bitmap does not need to be
|
437 |
+
/// allocated.
|
438 |
+
OUTPUT_NOT_NULL
|
439 |
+
};
|
440 |
+
};
|
441 |
+
|
442 |
+
/// \brief The preference for memory preallocation of fixed-width type outputs
|
443 |
+
/// in kernel execution.
|
444 |
+
struct MemAllocation {
|
445 |
+
enum type {
|
446 |
+
// For data types that support pre-allocation (i.e. fixed-width), the
|
447 |
+
// kernel expects to be provided a pre-allocated data buffer to write
|
448 |
+
// into. Non-fixed-width types must always allocate their own data
|
449 |
+
// buffers. The allocation made for the same length as the execution batch,
|
450 |
+
// so vector kernels yielding differently sized output should not use this.
|
451 |
+
//
|
452 |
+
// It is valid for the data to not be preallocated but the validity bitmap
|
453 |
+
// is (or is computed using the intersection/bitwise-and method).
|
454 |
+
//
|
455 |
+
// For variable-size output types like BinaryType or StringType, or for
|
456 |
+
// nested types, this option has no effect.
|
457 |
+
PREALLOCATE,
|
458 |
+
|
459 |
+
// The kernel is responsible for allocating its own data buffer for
|
460 |
+
// fixed-width type outputs.
|
461 |
+
NO_PREALLOCATE
|
462 |
+
};
|
463 |
+
};
|
464 |
+
|
465 |
+
struct Kernel;
|
466 |
+
|
467 |
+
/// \brief Arguments to pass to an KernelInit function. A struct is used to help
|
468 |
+
/// avoid API breakage should the arguments passed need to be expanded.
|
469 |
+
struct KernelInitArgs {
|
470 |
+
/// \brief A pointer to the kernel being initialized. The init function may
|
471 |
+
/// depend on the kernel's KernelSignature or other data contained there.
|
472 |
+
const Kernel* kernel;
|
473 |
+
|
474 |
+
/// \brief The types of the input arguments that the kernel is
|
475 |
+
/// about to be executed against.
|
476 |
+
const std::vector<TypeHolder>& inputs;
|
477 |
+
|
478 |
+
/// \brief Opaque options specific to this kernel. May be nullptr for functions
|
479 |
+
/// that do not require options.
|
480 |
+
const FunctionOptions* options;
|
481 |
+
};
|
482 |
+
|
483 |
+
/// \brief Common initializer function for all kernel types.
|
484 |
+
using KernelInit = std::function<Result<std::unique_ptr<KernelState>>(
|
485 |
+
KernelContext*, const KernelInitArgs&)>;
|
486 |
+
|
487 |
+
/// \brief Base type for kernels. Contains the function signature and
|
488 |
+
/// optionally the state initialization function, along with some common
|
489 |
+
/// attributes
|
490 |
+
struct ARROW_EXPORT Kernel {
|
491 |
+
Kernel() = default;
|
492 |
+
|
493 |
+
Kernel(std::shared_ptr<KernelSignature> sig, KernelInit init)
|
494 |
+
: signature(std::move(sig)), init(std::move(init)) {}
|
495 |
+
|
496 |
+
Kernel(std::vector<InputType> in_types, OutputType out_type, KernelInit init)
|
497 |
+
: Kernel(KernelSignature::Make(std::move(in_types), std::move(out_type)),
|
498 |
+
std::move(init)) {}
|
499 |
+
|
500 |
+
/// \brief The "signature" of the kernel containing the InputType input
|
501 |
+
/// argument validators and OutputType output type resolver.
|
502 |
+
std::shared_ptr<KernelSignature> signature;
|
503 |
+
|
504 |
+
/// \brief Create a new KernelState for invocations of this kernel, e.g. to
|
505 |
+
/// set up any options or state relevant for execution.
|
506 |
+
KernelInit init;
|
507 |
+
|
508 |
+
/// \brief Create a vector of new KernelState for invocations of this kernel.
|
509 |
+
static Status InitAll(KernelContext*, const KernelInitArgs&,
|
510 |
+
std::vector<std::unique_ptr<KernelState>>*);
|
511 |
+
|
512 |
+
/// \brief Indicates whether execution can benefit from parallelization
|
513 |
+
/// (splitting large chunks into smaller chunks and using multiple
|
514 |
+
/// threads). Some kernels may not support parallel execution at
|
515 |
+
/// all. Synchronization and concurrency-related issues are currently the
|
516 |
+
/// responsibility of the Kernel's implementation.
|
517 |
+
bool parallelizable = true;
|
518 |
+
|
519 |
+
/// \brief Indicates the level of SIMD instruction support in the host CPU is
|
520 |
+
/// required to use the function. The intention is for functions to be able to
|
521 |
+
/// contain multiple kernels with the same signature but different levels of SIMD,
|
522 |
+
/// so that the most optimized kernel supported on a host's processor can be chosen.
|
523 |
+
SimdLevel::type simd_level = SimdLevel::NONE;
|
524 |
+
|
525 |
+
// Additional kernel-specific data
|
526 |
+
std::shared_ptr<KernelState> data;
|
527 |
+
};
|
528 |
+
|
529 |
+
/// \brief The scalar kernel execution API that must be implemented for SCALAR
|
530 |
+
/// kernel types. This includes both stateless and stateful kernels. Kernels
|
531 |
+
/// depending on some execution state access that state via subclasses of
|
532 |
+
/// KernelState set on the KernelContext object. Implementations should
|
533 |
+
/// endeavor to write into pre-allocated memory if they are able, though for
|
534 |
+
/// some kernels (e.g. in cases when a builder like StringBuilder) must be
|
535 |
+
/// employed this may not be possible.
|
536 |
+
using ArrayKernelExec = Status (*)(KernelContext*, const ExecSpan&, ExecResult*);
|
537 |
+
|
538 |
+
/// \brief Kernel data structure for implementations of ScalarFunction. In
|
539 |
+
/// addition to the members found in Kernel, contains the null handling
|
540 |
+
/// and memory pre-allocation preferences.
|
541 |
+
struct ARROW_EXPORT ScalarKernel : public Kernel {
|
542 |
+
ScalarKernel() = default;
|
543 |
+
|
544 |
+
ScalarKernel(std::shared_ptr<KernelSignature> sig, ArrayKernelExec exec,
|
545 |
+
KernelInit init = NULLPTR)
|
546 |
+
: Kernel(std::move(sig), init), exec(exec) {}
|
547 |
+
|
548 |
+
ScalarKernel(std::vector<InputType> in_types, OutputType out_type, ArrayKernelExec exec,
|
549 |
+
KernelInit init = NULLPTR)
|
550 |
+
: Kernel(std::move(in_types), std::move(out_type), std::move(init)), exec(exec) {}
|
551 |
+
|
552 |
+
/// \brief Perform a single invocation of this kernel. Depending on the
|
553 |
+
/// implementation, it may only write into preallocated memory, while in some
|
554 |
+
/// cases it will allocate its own memory. Any required state is managed
|
555 |
+
/// through the KernelContext.
|
556 |
+
ArrayKernelExec exec;
|
557 |
+
|
558 |
+
/// \brief Writing execution results into larger contiguous allocations
|
559 |
+
/// requires that the kernel be able to write into sliced output ArrayData*,
|
560 |
+
/// including sliced output validity bitmaps. Some kernel implementations may
|
561 |
+
/// not be able to do this, so setting this to false disables this
|
562 |
+
/// functionality.
|
563 |
+
bool can_write_into_slices = true;
|
564 |
+
|
565 |
+
// For scalar functions preallocated data and intersecting arg validity
|
566 |
+
// bitmaps is a reasonable default
|
567 |
+
NullHandling::type null_handling = NullHandling::INTERSECTION;
|
568 |
+
MemAllocation::type mem_allocation = MemAllocation::PREALLOCATE;
|
569 |
+
};
|
570 |
+
|
571 |
+
// ----------------------------------------------------------------------
|
572 |
+
// VectorKernel (for VectorFunction)
|
573 |
+
|
574 |
+
/// \brief Kernel data structure for implementations of VectorFunction. In
|
575 |
+
/// contains an optional finalizer function, the null handling and memory
|
576 |
+
/// pre-allocation preferences (which have different defaults from
|
577 |
+
/// ScalarKernel), and some other execution-related options.
|
578 |
+
struct ARROW_EXPORT VectorKernel : public Kernel {
|
579 |
+
/// \brief See VectorKernel::finalize member for usage
|
580 |
+
using FinalizeFunc = std::function<Status(KernelContext*, std::vector<Datum>*)>;
|
581 |
+
|
582 |
+
/// \brief Function for executing a stateful VectorKernel against a
|
583 |
+
/// ChunkedArray input. Does not need to be defined for all VectorKernels
|
584 |
+
using ChunkedExec = Status (*)(KernelContext*, const ExecBatch&, Datum* out);
|
585 |
+
|
586 |
+
VectorKernel() = default;
|
587 |
+
|
588 |
+
VectorKernel(std::vector<InputType> in_types, OutputType out_type, ArrayKernelExec exec,
|
589 |
+
KernelInit init = NULLPTR, FinalizeFunc finalize = NULLPTR)
|
590 |
+
: Kernel(std::move(in_types), std::move(out_type), std::move(init)),
|
591 |
+
exec(exec),
|
592 |
+
finalize(std::move(finalize)) {}
|
593 |
+
|
594 |
+
VectorKernel(std::shared_ptr<KernelSignature> sig, ArrayKernelExec exec,
|
595 |
+
KernelInit init = NULLPTR, FinalizeFunc finalize = NULLPTR)
|
596 |
+
: Kernel(std::move(sig), std::move(init)),
|
597 |
+
exec(exec),
|
598 |
+
finalize(std::move(finalize)) {}
|
599 |
+
|
600 |
+
/// \brief Perform a single invocation of this kernel. Any required state is
|
601 |
+
/// managed through the KernelContext.
|
602 |
+
ArrayKernelExec exec;
|
603 |
+
|
604 |
+
/// \brief Execute the kernel on a ChunkedArray. Does not need to be defined
|
605 |
+
ChunkedExec exec_chunked = NULLPTR;
|
606 |
+
|
607 |
+
/// \brief For VectorKernel, convert intermediate results into finalized
|
608 |
+
/// results. Mutates input argument. Some kernels may accumulate state
|
609 |
+
/// (example: hashing-related functions) through processing chunked inputs, and
|
610 |
+
/// then need to attach some accumulated state to each of the outputs of
|
611 |
+
/// processing each chunk of data.
|
612 |
+
FinalizeFunc finalize;
|
613 |
+
|
614 |
+
/// Since vector kernels generally are implemented rather differently from
|
615 |
+
/// scalar/elementwise kernels (and they may not even yield arrays of the same
|
616 |
+
/// size), so we make the developer opt-in to any memory preallocation rather
|
617 |
+
/// than having to turn it off.
|
618 |
+
NullHandling::type null_handling = NullHandling::COMPUTED_NO_PREALLOCATE;
|
619 |
+
MemAllocation::type mem_allocation = MemAllocation::NO_PREALLOCATE;
|
620 |
+
|
621 |
+
/// \brief Writing execution results into larger contiguous allocations
|
622 |
+
/// requires that the kernel be able to write into sliced output ArrayData*,
|
623 |
+
/// including sliced output validity bitmaps. Some kernel implementations may
|
624 |
+
/// not be able to do this, so setting this to false disables this
|
625 |
+
/// functionality.
|
626 |
+
bool can_write_into_slices = true;
|
627 |
+
|
628 |
+
/// Some vector kernels can do chunkwise execution using ExecSpanIterator,
|
629 |
+
/// in some cases accumulating some state. Other kernels (like Take) need to
|
630 |
+
/// be passed whole arrays and don't work on ChunkedArray inputs
|
631 |
+
bool can_execute_chunkwise = true;
|
632 |
+
|
633 |
+
/// Some kernels (like unique and value_counts) yield non-chunked output from
|
634 |
+
/// chunked-array inputs. This option controls how the results are boxed when
|
635 |
+
/// returned from ExecVectorFunction
|
636 |
+
///
|
637 |
+
/// true -> ChunkedArray
|
638 |
+
/// false -> Array
|
639 |
+
bool output_chunked = true;
|
640 |
+
};
|
641 |
+
|
642 |
+
// ----------------------------------------------------------------------
|
643 |
+
// ScalarAggregateKernel (for ScalarAggregateFunction)
|
644 |
+
|
645 |
+
using ScalarAggregateConsume = Status (*)(KernelContext*, const ExecSpan&);
|
646 |
+
using ScalarAggregateMerge = Status (*)(KernelContext*, KernelState&&, KernelState*);
|
647 |
+
// Finalize returns Datum to permit multiple return values
|
648 |
+
using ScalarAggregateFinalize = Status (*)(KernelContext*, Datum*);
|
649 |
+
|
650 |
+
/// \brief Kernel data structure for implementations of
|
651 |
+
/// ScalarAggregateFunction. The four necessary components of an aggregation
|
652 |
+
/// kernel are the init, consume, merge, and finalize functions.
|
653 |
+
///
|
654 |
+
/// * init: creates a new KernelState for a kernel.
|
655 |
+
/// * consume: processes an ExecSpan and updates the KernelState found in the
|
656 |
+
/// KernelContext.
|
657 |
+
/// * merge: combines one KernelState with another.
|
658 |
+
/// * finalize: produces the end result of the aggregation using the
|
659 |
+
/// KernelState in the KernelContext.
|
660 |
+
struct ARROW_EXPORT ScalarAggregateKernel : public Kernel {
|
661 |
+
ScalarAggregateKernel(std::shared_ptr<KernelSignature> sig, KernelInit init,
|
662 |
+
ScalarAggregateConsume consume, ScalarAggregateMerge merge,
|
663 |
+
ScalarAggregateFinalize finalize, const bool ordered)
|
664 |
+
: Kernel(std::move(sig), std::move(init)),
|
665 |
+
consume(consume),
|
666 |
+
merge(merge),
|
667 |
+
finalize(finalize),
|
668 |
+
ordered(ordered) {}
|
669 |
+
|
670 |
+
ScalarAggregateKernel(std::vector<InputType> in_types, OutputType out_type,
|
671 |
+
KernelInit init, ScalarAggregateConsume consume,
|
672 |
+
ScalarAggregateMerge merge, ScalarAggregateFinalize finalize,
|
673 |
+
const bool ordered)
|
674 |
+
: ScalarAggregateKernel(
|
675 |
+
KernelSignature::Make(std::move(in_types), std::move(out_type)),
|
676 |
+
std::move(init), consume, merge, finalize, ordered) {}
|
677 |
+
|
678 |
+
/// \brief Merge a vector of KernelStates into a single KernelState.
|
679 |
+
/// The merged state will be returned and will be set on the KernelContext.
|
680 |
+
static Result<std::unique_ptr<KernelState>> MergeAll(
|
681 |
+
const ScalarAggregateKernel* kernel, KernelContext* ctx,
|
682 |
+
std::vector<std::unique_ptr<KernelState>> states);
|
683 |
+
|
684 |
+
ScalarAggregateConsume consume;
|
685 |
+
ScalarAggregateMerge merge;
|
686 |
+
ScalarAggregateFinalize finalize;
|
687 |
+
/// \brief Whether this kernel requires ordering
|
688 |
+
/// Some aggregations, such as, "first", requires some kind of input order. The
|
689 |
+
/// order can be implicit, e.g., the order of the input data, or explicit, e.g.
|
690 |
+
/// the ordering specified with a window aggregation.
|
691 |
+
/// The caller of the aggregate kernel is responsible for passing data in some
|
692 |
+
/// defined order to the kernel. The flag here is a way for the kernel to tell
|
693 |
+
/// the caller that data passed to the kernel must be defined in some order.
|
694 |
+
bool ordered = false;
|
695 |
+
};
|
696 |
+
|
697 |
+
// ----------------------------------------------------------------------
|
698 |
+
// HashAggregateKernel (for HashAggregateFunction)
|
699 |
+
|
700 |
+
using HashAggregateResize = Status (*)(KernelContext*, int64_t);
|
701 |
+
using HashAggregateConsume = Status (*)(KernelContext*, const ExecSpan&);
|
702 |
+
using HashAggregateMerge = Status (*)(KernelContext*, KernelState&&, const ArrayData&);
|
703 |
+
|
704 |
+
// Finalize returns Datum to permit multiple return values
|
705 |
+
using HashAggregateFinalize = Status (*)(KernelContext*, Datum*);
|
706 |
+
|
707 |
+
/// \brief Kernel data structure for implementations of
|
708 |
+
/// HashAggregateFunction. The four necessary components of an aggregation
|
709 |
+
/// kernel are the init, consume, merge, and finalize functions.
|
710 |
+
///
|
711 |
+
/// * init: creates a new KernelState for a kernel.
|
712 |
+
/// * resize: ensure that the KernelState can accommodate the specified number of groups.
|
713 |
+
/// * consume: processes an ExecSpan (which includes the argument as well
|
714 |
+
/// as an array of group identifiers) and updates the KernelState found in the
|
715 |
+
/// KernelContext.
|
716 |
+
/// * merge: combines one KernelState with another.
|
717 |
+
/// * finalize: produces the end result of the aggregation using the
|
718 |
+
/// KernelState in the KernelContext.
|
719 |
+
struct ARROW_EXPORT HashAggregateKernel : public Kernel {
|
720 |
+
HashAggregateKernel() = default;
|
721 |
+
|
722 |
+
HashAggregateKernel(std::shared_ptr<KernelSignature> sig, KernelInit init,
|
723 |
+
HashAggregateResize resize, HashAggregateConsume consume,
|
724 |
+
HashAggregateMerge merge, HashAggregateFinalize finalize,
|
725 |
+
const bool ordered)
|
726 |
+
: Kernel(std::move(sig), std::move(init)),
|
727 |
+
resize(resize),
|
728 |
+
consume(consume),
|
729 |
+
merge(merge),
|
730 |
+
finalize(finalize),
|
731 |
+
ordered(ordered) {}
|
732 |
+
|
733 |
+
HashAggregateKernel(std::vector<InputType> in_types, OutputType out_type,
|
734 |
+
KernelInit init, HashAggregateConsume consume,
|
735 |
+
HashAggregateResize resize, HashAggregateMerge merge,
|
736 |
+
HashAggregateFinalize finalize, const bool ordered)
|
737 |
+
: HashAggregateKernel(
|
738 |
+
KernelSignature::Make(std::move(in_types), std::move(out_type)),
|
739 |
+
std::move(init), resize, consume, merge, finalize, ordered) {}
|
740 |
+
|
741 |
+
HashAggregateResize resize;
|
742 |
+
HashAggregateConsume consume;
|
743 |
+
HashAggregateMerge merge;
|
744 |
+
HashAggregateFinalize finalize;
|
745 |
+
/// @brief whether the summarizer requires ordering
|
746 |
+
/// This is similar to ScalarAggregateKernel. See ScalarAggregateKernel
|
747 |
+
/// for detailed doc of this variable.
|
748 |
+
bool ordered = false;
|
749 |
+
};
|
750 |
+
|
751 |
+
} // namespace compute
|
752 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/row/grouper.h
ADDED
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
#include <vector>
|
22 |
+
|
23 |
+
#include "arrow/compute/kernel.h"
|
24 |
+
#include "arrow/datum.h"
|
25 |
+
#include "arrow/result.h"
|
26 |
+
#include "arrow/util/visibility.h"
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
namespace compute {
|
30 |
+
|
31 |
+
/// \brief A segment
|
32 |
+
/// A segment group is a chunk of continuous rows that have the same segment key. (For
|
33 |
+
/// example, in ordered time series processing, segment key can be "date", and a segment
|
34 |
+
/// group can be all the rows that belong to the same date.) A segment group can span
|
35 |
+
/// across multiple exec batches. A segment is a chunk of continuous rows that has the
|
36 |
+
/// same segment key within a given batch. When a segment group span cross batches, it
|
37 |
+
/// will have multiple segments. A segment never spans cross batches. The segment data
|
38 |
+
/// structure only makes sense when used along with a exec batch.
|
39 |
+
struct ARROW_EXPORT Segment {
|
40 |
+
/// \brief the offset into the batch where the segment starts
|
41 |
+
int64_t offset;
|
42 |
+
/// \brief the length of the segment
|
43 |
+
int64_t length;
|
44 |
+
/// \brief whether the segment may be extended by a next one
|
45 |
+
bool is_open;
|
46 |
+
/// \brief whether the segment extends a preceeding one
|
47 |
+
bool extends;
|
48 |
+
};
|
49 |
+
|
50 |
+
inline bool operator==(const Segment& segment1, const Segment& segment2) {
|
51 |
+
return segment1.offset == segment2.offset && segment1.length == segment2.length &&
|
52 |
+
segment1.is_open == segment2.is_open && segment1.extends == segment2.extends;
|
53 |
+
}
|
54 |
+
inline bool operator!=(const Segment& segment1, const Segment& segment2) {
|
55 |
+
return !(segment1 == segment2);
|
56 |
+
}
|
57 |
+
|
58 |
+
/// \brief a helper class to divide a batch into segments of equal values
|
59 |
+
///
|
60 |
+
/// For example, given a batch with two rows:
|
61 |
+
///
|
62 |
+
/// A A
|
63 |
+
/// A A
|
64 |
+
/// A B
|
65 |
+
/// A B
|
66 |
+
/// A A
|
67 |
+
///
|
68 |
+
/// Then the batch could be divided into 3 segments. The first would be rows 0 & 1,
|
69 |
+
/// the second would be rows 2 & 3, and the third would be row 4.
|
70 |
+
///
|
71 |
+
/// Further, a segmenter keeps track of the last value seen. This allows it to calculate
|
72 |
+
/// segments which span batches. In our above example the last batch we emit would set
|
73 |
+
/// the "open" flag, which indicates whether the segment may extend into the next batch.
|
74 |
+
///
|
75 |
+
/// If the next call to the segmenter starts with `A A` then that segment would set the
|
76 |
+
/// "extends" flag, which indicates whether the segment continues the last open batch.
|
77 |
+
class ARROW_EXPORT RowSegmenter {
|
78 |
+
public:
|
79 |
+
virtual ~RowSegmenter() = default;
|
80 |
+
|
81 |
+
/// \brief Construct a Segmenter which segments on the specified key types
|
82 |
+
///
|
83 |
+
/// \param[in] key_types the specified key types
|
84 |
+
/// \param[in] nullable_keys whether values of the specified keys may be null
|
85 |
+
/// \param[in] ctx the execution context to use
|
86 |
+
static Result<std::unique_ptr<RowSegmenter>> Make(
|
87 |
+
const std::vector<TypeHolder>& key_types, bool nullable_keys, ExecContext* ctx);
|
88 |
+
|
89 |
+
/// \brief Return the key types of this segmenter
|
90 |
+
virtual const std::vector<TypeHolder>& key_types() const = 0;
|
91 |
+
|
92 |
+
/// \brief Reset this segmenter
|
93 |
+
///
|
94 |
+
/// A segmenter normally extends (see `Segment`) a segment from one batch to the next.
|
95 |
+
/// If segment-extension is undesirable, for example when each batch is processed
|
96 |
+
/// independently, then `Reset` should be invoked before processing the next batch.
|
97 |
+
virtual Status Reset() = 0;
|
98 |
+
|
99 |
+
/// \brief Get the next segment for the given batch starting from the given offset
|
100 |
+
virtual Result<Segment> GetNextSegment(const ExecSpan& batch, int64_t offset) = 0;
|
101 |
+
};
|
102 |
+
|
103 |
+
/// Consumes batches of keys and yields batches of the group ids.
|
104 |
+
class ARROW_EXPORT Grouper {
|
105 |
+
public:
|
106 |
+
virtual ~Grouper() = default;
|
107 |
+
|
108 |
+
/// Construct a Grouper which receives the specified key types
|
109 |
+
static Result<std::unique_ptr<Grouper>> Make(const std::vector<TypeHolder>& key_types,
|
110 |
+
ExecContext* ctx = default_exec_context());
|
111 |
+
|
112 |
+
/// Consume a batch of keys, producing the corresponding group ids as an integer array,
|
113 |
+
/// over a slice defined by an offset and length, which defaults to the batch length.
|
114 |
+
/// Currently only uint32 indices will be produced, eventually the bit width will only
|
115 |
+
/// be as wide as necessary.
|
116 |
+
virtual Result<Datum> Consume(const ExecSpan& batch, int64_t offset = 0,
|
117 |
+
int64_t length = -1) = 0;
|
118 |
+
|
119 |
+
/// Get current unique keys. May be called multiple times.
|
120 |
+
virtual Result<ExecBatch> GetUniques() = 0;
|
121 |
+
|
122 |
+
/// Get the current number of groups.
|
123 |
+
virtual uint32_t num_groups() const = 0;
|
124 |
+
|
125 |
+
/// \brief Assemble lists of indices of identical elements.
|
126 |
+
///
|
127 |
+
/// \param[in] ids An unsigned, all-valid integral array which will be
|
128 |
+
/// used as grouping criteria.
|
129 |
+
/// \param[in] num_groups An upper bound for the elements of ids
|
130 |
+
/// \param[in] ctx Execution context to use during the operation
|
131 |
+
/// \return A num_groups-long ListArray where the slot at i contains a
|
132 |
+
/// list of indices where i appears in ids.
|
133 |
+
///
|
134 |
+
/// MakeGroupings([
|
135 |
+
/// 2,
|
136 |
+
/// 2,
|
137 |
+
/// 5,
|
138 |
+
/// 5,
|
139 |
+
/// 2,
|
140 |
+
/// 3
|
141 |
+
/// ], 8) == [
|
142 |
+
/// [],
|
143 |
+
/// [],
|
144 |
+
/// [0, 1, 4],
|
145 |
+
/// [5],
|
146 |
+
/// [],
|
147 |
+
/// [2, 3],
|
148 |
+
/// [],
|
149 |
+
/// []
|
150 |
+
/// ]
|
151 |
+
static Result<std::shared_ptr<ListArray>> MakeGroupings(
|
152 |
+
const UInt32Array& ids, uint32_t num_groups,
|
153 |
+
ExecContext* ctx = default_exec_context());
|
154 |
+
|
155 |
+
/// \brief Produce a ListArray whose slots are selections of `array` which correspond to
|
156 |
+
/// the provided groupings.
|
157 |
+
///
|
158 |
+
/// For example,
|
159 |
+
/// ApplyGroupings([
|
160 |
+
/// [],
|
161 |
+
/// [],
|
162 |
+
/// [0, 1, 4],
|
163 |
+
/// [5],
|
164 |
+
/// [],
|
165 |
+
/// [2, 3],
|
166 |
+
/// [],
|
167 |
+
/// []
|
168 |
+
/// ], [2, 2, 5, 5, 2, 3]) == [
|
169 |
+
/// [],
|
170 |
+
/// [],
|
171 |
+
/// [2, 2, 2],
|
172 |
+
/// [3],
|
173 |
+
/// [],
|
174 |
+
/// [5, 5],
|
175 |
+
/// [],
|
176 |
+
/// []
|
177 |
+
/// ]
|
178 |
+
static Result<std::shared_ptr<ListArray>> ApplyGroupings(
|
179 |
+
const ListArray& groupings, const Array& array,
|
180 |
+
ExecContext* ctx = default_exec_context());
|
181 |
+
};
|
182 |
+
|
183 |
+
} // namespace compute
|
184 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/compute/util.h
ADDED
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <atomic>
|
21 |
+
#include <cstdint>
|
22 |
+
#include <optional>
|
23 |
+
#include <thread>
|
24 |
+
#include <unordered_map>
|
25 |
+
#include <vector>
|
26 |
+
|
27 |
+
#include "arrow/buffer.h"
|
28 |
+
#include "arrow/compute/expression.h"
|
29 |
+
#include "arrow/compute/type_fwd.h"
|
30 |
+
#include "arrow/memory_pool.h"
|
31 |
+
#include "arrow/result.h"
|
32 |
+
#include "arrow/status.h"
|
33 |
+
#include "arrow/util/bit_util.h"
|
34 |
+
#include "arrow/util/cpu_info.h"
|
35 |
+
#include "arrow/util/mutex.h"
|
36 |
+
#include "arrow/util/thread_pool.h"
|
37 |
+
#include "arrow/util/type_fwd.h"
|
38 |
+
|
39 |
+
#if defined(__clang__) || defined(__GNUC__)
|
40 |
+
#define BYTESWAP(x) __builtin_bswap64(x)
|
41 |
+
#define ROTL(x, n) (((x) << (n)) | ((x) >> ((-n) & 31)))
|
42 |
+
#define ROTL64(x, n) (((x) << (n)) | ((x) >> ((-n) & 63)))
|
43 |
+
#define PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
|
44 |
+
#elif defined(_MSC_VER)
|
45 |
+
#include <intrin.h>
|
46 |
+
#define BYTESWAP(x) _byteswap_uint64(x)
|
47 |
+
#define ROTL(x, n) _rotl((x), (n))
|
48 |
+
#define ROTL64(x, n) _rotl64((x), (n))
|
49 |
+
#if defined(_M_X64) || defined(_M_I86)
|
50 |
+
#include <mmintrin.h> // https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx
|
51 |
+
#define PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
|
52 |
+
#else
|
53 |
+
#define PREFETCH(ptr) (void)(ptr) /* disabled */
|
54 |
+
#endif
|
55 |
+
#endif
|
56 |
+
|
57 |
+
namespace arrow {
|
58 |
+
namespace util {
|
59 |
+
|
60 |
+
// Some platforms typedef int64_t as long int instead of long long int,
|
61 |
+
// which breaks the _mm256_i64gather_epi64 and _mm256_i32gather_epi64 intrinsics
|
62 |
+
// which need long long.
|
63 |
+
// We use the cast to the type below in these intrinsics to make the code
|
64 |
+
// compile in all cases.
|
65 |
+
//
|
66 |
+
using int64_for_gather_t = const long long int; // NOLINT runtime-int
|
67 |
+
|
68 |
+
// All MiniBatch... classes use TempVectorStack for vector allocations and can
|
69 |
+
// only work with vectors up to 1024 elements.
|
70 |
+
//
|
71 |
+
// They should only be allocated on the stack to guarantee the right sequence
|
72 |
+
// of allocation and deallocation of vectors from TempVectorStack.
|
73 |
+
//
|
74 |
+
class MiniBatch {
|
75 |
+
public:
|
76 |
+
static constexpr int kLogMiniBatchLength = 10;
|
77 |
+
static constexpr int kMiniBatchLength = 1 << kLogMiniBatchLength;
|
78 |
+
};
|
79 |
+
|
80 |
+
/// Storage used to allocate temporary vectors of a batch size.
|
81 |
+
/// Temporary vectors should resemble allocating temporary variables on the stack
|
82 |
+
/// but in the context of vectorized processing where we need to store a vector of
|
83 |
+
/// temporaries instead of a single value.
|
84 |
+
class ARROW_EXPORT TempVectorStack {
|
85 |
+
template <typename>
|
86 |
+
friend class TempVectorHolder;
|
87 |
+
|
88 |
+
public:
|
89 |
+
Status Init(MemoryPool* pool, int64_t size) {
|
90 |
+
num_vectors_ = 0;
|
91 |
+
top_ = 0;
|
92 |
+
buffer_size_ = EstimatedAllocationSize(size);
|
93 |
+
ARROW_ASSIGN_OR_RAISE(auto buffer, AllocateResizableBuffer(size, pool));
|
94 |
+
// Ensure later operations don't accidentally read uninitialized memory.
|
95 |
+
std::memset(buffer->mutable_data(), 0xFF, size);
|
96 |
+
buffer_ = std::move(buffer);
|
97 |
+
return Status::OK();
|
98 |
+
}
|
99 |
+
|
100 |
+
private:
|
101 |
+
static int64_t EstimatedAllocationSize(int64_t size) {
|
102 |
+
return PaddedAllocationSize(size) + 2 * sizeof(uint64_t);
|
103 |
+
}
|
104 |
+
|
105 |
+
static int64_t PaddedAllocationSize(int64_t num_bytes) {
|
106 |
+
// Round up allocation size to multiple of 8 bytes
|
107 |
+
// to avoid returning temp vectors with unaligned address.
|
108 |
+
//
|
109 |
+
// Also add padding at the end to facilitate loads and stores
|
110 |
+
// using SIMD when number of vector elements is not divisible
|
111 |
+
// by the number of SIMD lanes.
|
112 |
+
//
|
113 |
+
return ::arrow::bit_util::RoundUp(num_bytes, sizeof(int64_t)) + kPadding;
|
114 |
+
}
|
115 |
+
void alloc(uint32_t num_bytes, uint8_t** data, int* id);
|
116 |
+
void release(int id, uint32_t num_bytes);
|
117 |
+
static constexpr uint64_t kGuard1 = 0x3141592653589793ULL;
|
118 |
+
static constexpr uint64_t kGuard2 = 0x0577215664901532ULL;
|
119 |
+
static constexpr int64_t kPadding = 64;
|
120 |
+
int num_vectors_;
|
121 |
+
int64_t top_;
|
122 |
+
std::unique_ptr<Buffer> buffer_;
|
123 |
+
int64_t buffer_size_;
|
124 |
+
};
|
125 |
+
|
126 |
+
template <typename T>
|
127 |
+
class TempVectorHolder {
|
128 |
+
friend class TempVectorStack;
|
129 |
+
|
130 |
+
public:
|
131 |
+
~TempVectorHolder() { stack_->release(id_, num_elements_ * sizeof(T)); }
|
132 |
+
T* mutable_data() { return reinterpret_cast<T*>(data_); }
|
133 |
+
TempVectorHolder(TempVectorStack* stack, uint32_t num_elements) {
|
134 |
+
stack_ = stack;
|
135 |
+
num_elements_ = num_elements;
|
136 |
+
stack_->alloc(num_elements * sizeof(T), &data_, &id_);
|
137 |
+
}
|
138 |
+
|
139 |
+
private:
|
140 |
+
TempVectorStack* stack_;
|
141 |
+
uint8_t* data_;
|
142 |
+
int id_;
|
143 |
+
uint32_t num_elements_;
|
144 |
+
};
|
145 |
+
|
146 |
+
namespace bit_util {
|
147 |
+
|
148 |
+
ARROW_EXPORT void bits_to_indexes(int bit_to_search, int64_t hardware_flags,
|
149 |
+
const int num_bits, const uint8_t* bits,
|
150 |
+
int* num_indexes, uint16_t* indexes,
|
151 |
+
int bit_offset = 0);
|
152 |
+
|
153 |
+
ARROW_EXPORT void bits_filter_indexes(int bit_to_search, int64_t hardware_flags,
|
154 |
+
const int num_bits, const uint8_t* bits,
|
155 |
+
const uint16_t* input_indexes, int* num_indexes,
|
156 |
+
uint16_t* indexes, int bit_offset = 0);
|
157 |
+
|
158 |
+
// Input and output indexes may be pointing to the same data (in-place filtering).
|
159 |
+
ARROW_EXPORT void bits_split_indexes(int64_t hardware_flags, const int num_bits,
|
160 |
+
const uint8_t* bits, int* num_indexes_bit0,
|
161 |
+
uint16_t* indexes_bit0, uint16_t* indexes_bit1,
|
162 |
+
int bit_offset = 0);
|
163 |
+
|
164 |
+
// Bit 1 is replaced with byte 0xFF.
|
165 |
+
ARROW_EXPORT void bits_to_bytes(int64_t hardware_flags, const int num_bits,
|
166 |
+
const uint8_t* bits, uint8_t* bytes, int bit_offset = 0);
|
167 |
+
|
168 |
+
// Return highest bit of each byte.
|
169 |
+
ARROW_EXPORT void bytes_to_bits(int64_t hardware_flags, const int num_bits,
|
170 |
+
const uint8_t* bytes, uint8_t* bits, int bit_offset = 0);
|
171 |
+
|
172 |
+
ARROW_EXPORT bool are_all_bytes_zero(int64_t hardware_flags, const uint8_t* bytes,
|
173 |
+
uint32_t num_bytes);
|
174 |
+
|
175 |
+
#if defined(ARROW_HAVE_RUNTIME_AVX2) && defined(ARROW_HAVE_RUNTIME_BMI2)
|
176 |
+
// The functions below use BMI2 instructions, be careful before calling!
|
177 |
+
|
178 |
+
namespace avx2 {
|
179 |
+
ARROW_EXPORT void bits_filter_indexes_avx2(int bit_to_search, const int num_bits,
|
180 |
+
const uint8_t* bits,
|
181 |
+
const uint16_t* input_indexes,
|
182 |
+
int* num_indexes, uint16_t* indexes);
|
183 |
+
ARROW_EXPORT void bits_to_indexes_avx2(int bit_to_search, const int num_bits,
|
184 |
+
const uint8_t* bits, int* num_indexes,
|
185 |
+
uint16_t* indexes, uint16_t base_index = 0);
|
186 |
+
ARROW_EXPORT void bits_to_bytes_avx2(const int num_bits, const uint8_t* bits,
|
187 |
+
uint8_t* bytes);
|
188 |
+
ARROW_EXPORT void bytes_to_bits_avx2(const int num_bits, const uint8_t* bytes,
|
189 |
+
uint8_t* bits);
|
190 |
+
ARROW_EXPORT bool are_all_bytes_zero_avx2(const uint8_t* bytes, uint32_t num_bytes);
|
191 |
+
} // namespace avx2
|
192 |
+
|
193 |
+
#endif
|
194 |
+
|
195 |
+
} // namespace bit_util
|
196 |
+
} // namespace util
|
197 |
+
|
198 |
+
namespace compute {
|
199 |
+
|
200 |
+
/// Modify an Expression with pre-order and post-order visitation.
|
201 |
+
/// `pre` will be invoked on each Expression. `pre` will visit Calls before their
|
202 |
+
/// arguments, `post_call` will visit Calls (and no other Expressions) after their
|
203 |
+
/// arguments. Visitors should return the Identical expression to indicate no change; this
|
204 |
+
/// will prevent unnecessary construction in the common case where a modification is not
|
205 |
+
/// possible/necessary/...
|
206 |
+
///
|
207 |
+
/// If an argument was modified, `post_call` visits a reconstructed Call with the modified
|
208 |
+
/// arguments but also receives a pointer to the unmodified Expression as a second
|
209 |
+
/// argument. If no arguments were modified the unmodified Expression* will be nullptr.
|
210 |
+
template <typename PreVisit, typename PostVisitCall>
|
211 |
+
Result<Expression> ModifyExpression(Expression expr, const PreVisit& pre,
|
212 |
+
const PostVisitCall& post_call) {
|
213 |
+
ARROW_ASSIGN_OR_RAISE(expr, Result<Expression>(pre(std::move(expr))));
|
214 |
+
|
215 |
+
auto call = expr.call();
|
216 |
+
if (!call) return expr;
|
217 |
+
|
218 |
+
bool at_least_one_modified = false;
|
219 |
+
std::vector<Expression> modified_arguments;
|
220 |
+
|
221 |
+
for (size_t i = 0; i < call->arguments.size(); ++i) {
|
222 |
+
ARROW_ASSIGN_OR_RAISE(auto modified_argument,
|
223 |
+
ModifyExpression(call->arguments[i], pre, post_call));
|
224 |
+
|
225 |
+
if (Identical(modified_argument, call->arguments[i])) {
|
226 |
+
continue;
|
227 |
+
}
|
228 |
+
|
229 |
+
if (!at_least_one_modified) {
|
230 |
+
modified_arguments = call->arguments;
|
231 |
+
at_least_one_modified = true;
|
232 |
+
}
|
233 |
+
|
234 |
+
modified_arguments[i] = std::move(modified_argument);
|
235 |
+
}
|
236 |
+
|
237 |
+
if (at_least_one_modified) {
|
238 |
+
// reconstruct the call expression with the modified arguments
|
239 |
+
auto modified_call = *call;
|
240 |
+
modified_call.arguments = std::move(modified_arguments);
|
241 |
+
return post_call(Expression(std::move(modified_call)), &expr);
|
242 |
+
}
|
243 |
+
|
244 |
+
return post_call(std::move(expr), NULLPTR);
|
245 |
+
}
|
246 |
+
|
247 |
+
// Helper class to calculate the modified number of rows to process using SIMD.
|
248 |
+
//
|
249 |
+
// Some array elements at the end will be skipped in order to avoid buffer
|
250 |
+
// overrun, when doing memory loads and stores using larger word size than a
|
251 |
+
// single array element.
|
252 |
+
//
|
253 |
+
class TailSkipForSIMD {
|
254 |
+
public:
|
255 |
+
static int64_t FixBitAccess(int num_bytes_accessed_together, int64_t num_rows,
|
256 |
+
int bit_offset) {
|
257 |
+
int64_t num_bytes = bit_util::BytesForBits(num_rows + bit_offset);
|
258 |
+
int64_t num_bytes_safe =
|
259 |
+
std::max(static_cast<int64_t>(0LL), num_bytes - num_bytes_accessed_together + 1);
|
260 |
+
int64_t num_rows_safe =
|
261 |
+
std::max(static_cast<int64_t>(0LL), 8 * num_bytes_safe - bit_offset);
|
262 |
+
return std::min(num_rows_safe, num_rows);
|
263 |
+
}
|
264 |
+
static int64_t FixBinaryAccess(int num_bytes_accessed_together, int64_t num_rows,
|
265 |
+
int64_t length) {
|
266 |
+
int64_t num_rows_to_skip = bit_util::CeilDiv(length, num_bytes_accessed_together);
|
267 |
+
int64_t num_rows_safe =
|
268 |
+
std::max(static_cast<int64_t>(0LL), num_rows - num_rows_to_skip);
|
269 |
+
return num_rows_safe;
|
270 |
+
}
|
271 |
+
static int64_t FixVarBinaryAccess(int num_bytes_accessed_together, int64_t num_rows,
|
272 |
+
const uint32_t* offsets) {
|
273 |
+
// Do not process rows that could read past the end of the buffer using N
|
274 |
+
// byte loads/stores.
|
275 |
+
//
|
276 |
+
int64_t num_rows_safe = num_rows;
|
277 |
+
while (num_rows_safe > 0 &&
|
278 |
+
offsets[num_rows_safe] + num_bytes_accessed_together > offsets[num_rows]) {
|
279 |
+
--num_rows_safe;
|
280 |
+
}
|
281 |
+
return num_rows_safe;
|
282 |
+
}
|
283 |
+
static int FixSelection(int64_t num_rows_safe, int num_selected,
|
284 |
+
const uint16_t* selection) {
|
285 |
+
int num_selected_safe = num_selected;
|
286 |
+
while (num_selected_safe > 0 && selection[num_selected_safe - 1] >= num_rows_safe) {
|
287 |
+
--num_selected_safe;
|
288 |
+
}
|
289 |
+
return num_selected_safe;
|
290 |
+
}
|
291 |
+
};
|
292 |
+
|
293 |
+
} // namespace compute
|
294 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/chunker.h
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <memory>
|
22 |
+
|
23 |
+
#include "arrow/csv/options.h"
|
24 |
+
#include "arrow/status.h"
|
25 |
+
#include "arrow/util/delimiting.h"
|
26 |
+
#include "arrow/util/macros.h"
|
27 |
+
#include "arrow/util/visibility.h"
|
28 |
+
|
29 |
+
namespace arrow {
|
30 |
+
namespace csv {
|
31 |
+
|
32 |
+
ARROW_EXPORT
|
33 |
+
std::unique_ptr<Chunker> MakeChunker(const ParseOptions& options);
|
34 |
+
|
35 |
+
} // namespace csv
|
36 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/converter.h
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <memory>
|
22 |
+
|
23 |
+
#include "arrow/csv/options.h"
|
24 |
+
#include "arrow/result.h"
|
25 |
+
#include "arrow/type_fwd.h"
|
26 |
+
#include "arrow/util/macros.h"
|
27 |
+
#include "arrow/util/visibility.h"
|
28 |
+
|
29 |
+
namespace arrow {
|
30 |
+
namespace csv {
|
31 |
+
|
32 |
+
class BlockParser;
|
33 |
+
|
34 |
+
class ARROW_EXPORT Converter {
|
35 |
+
public:
|
36 |
+
Converter(const std::shared_ptr<DataType>& type, const ConvertOptions& options,
|
37 |
+
MemoryPool* pool);
|
38 |
+
virtual ~Converter() = default;
|
39 |
+
|
40 |
+
virtual Result<std::shared_ptr<Array>> Convert(const BlockParser& parser,
|
41 |
+
int32_t col_index) = 0;
|
42 |
+
|
43 |
+
std::shared_ptr<DataType> type() const { return type_; }
|
44 |
+
|
45 |
+
// Create a Converter for the given data type
|
46 |
+
static Result<std::shared_ptr<Converter>> Make(
|
47 |
+
const std::shared_ptr<DataType>& type, const ConvertOptions& options,
|
48 |
+
MemoryPool* pool = default_memory_pool());
|
49 |
+
|
50 |
+
protected:
|
51 |
+
ARROW_DISALLOW_COPY_AND_ASSIGN(Converter);
|
52 |
+
|
53 |
+
virtual Status Initialize() = 0;
|
54 |
+
|
55 |
+
// CAUTION: ConvertOptions can grow large (if it customizes hundreds or
|
56 |
+
// thousands of columns), so avoid copying it in each Converter.
|
57 |
+
const ConvertOptions& options_;
|
58 |
+
MemoryPool* pool_;
|
59 |
+
std::shared_ptr<DataType> type_;
|
60 |
+
};
|
61 |
+
|
62 |
+
class ARROW_EXPORT DictionaryConverter : public Converter {
|
63 |
+
public:
|
64 |
+
DictionaryConverter(const std::shared_ptr<DataType>& value_type,
|
65 |
+
const ConvertOptions& options, MemoryPool* pool);
|
66 |
+
|
67 |
+
// If the dictionary length goes above this value, conversion will fail
|
68 |
+
// with Status::IndexError.
|
69 |
+
virtual void SetMaxCardinality(int32_t max_length) = 0;
|
70 |
+
|
71 |
+
// Create a Converter for the given dictionary value type.
|
72 |
+
// The dictionary index type will always be Int32.
|
73 |
+
static Result<std::shared_ptr<DictionaryConverter>> Make(
|
74 |
+
const std::shared_ptr<DataType>& value_type, const ConvertOptions& options,
|
75 |
+
MemoryPool* pool = default_memory_pool());
|
76 |
+
|
77 |
+
protected:
|
78 |
+
std::shared_ptr<DataType> value_type_;
|
79 |
+
};
|
80 |
+
|
81 |
+
} // namespace csv
|
82 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/invalid_row.h
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <functional>
|
21 |
+
#include <string_view>
|
22 |
+
|
23 |
+
namespace arrow {
|
24 |
+
namespace csv {
|
25 |
+
|
26 |
+
/// \brief Description of an invalid row
|
27 |
+
struct InvalidRow {
|
28 |
+
/// \brief Number of columns expected in the row
|
29 |
+
int32_t expected_columns;
|
30 |
+
/// \brief Actual number of columns found in the row
|
31 |
+
int32_t actual_columns;
|
32 |
+
/// \brief The physical row number if known or -1
|
33 |
+
///
|
34 |
+
/// This number is one-based and also accounts for non-data rows (such as
|
35 |
+
/// CSV header rows).
|
36 |
+
int64_t number;
|
37 |
+
/// \brief View of the entire row. Memory will be freed after callback returns
|
38 |
+
const std::string_view text;
|
39 |
+
};
|
40 |
+
|
41 |
+
/// \brief Result returned by an InvalidRowHandler
|
42 |
+
enum class InvalidRowResult {
|
43 |
+
// Generate an error describing this row
|
44 |
+
Error,
|
45 |
+
// Skip over this row
|
46 |
+
Skip
|
47 |
+
};
|
48 |
+
|
49 |
+
/// \brief callback for handling a row with an invalid number of columns while parsing
|
50 |
+
/// \return result indicating if an error should be returned from the parser or the row is
|
51 |
+
/// skipped
|
52 |
+
using InvalidRowHandler = std::function<InvalidRowResult(const InvalidRow&)>;
|
53 |
+
|
54 |
+
} // namespace csv
|
55 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/options.h
ADDED
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <cstdint>
|
21 |
+
#include <memory>
|
22 |
+
#include <string>
|
23 |
+
#include <unordered_map>
|
24 |
+
#include <vector>
|
25 |
+
|
26 |
+
#include "arrow/csv/invalid_row.h"
|
27 |
+
#include "arrow/csv/type_fwd.h"
|
28 |
+
#include "arrow/io/interfaces.h"
|
29 |
+
#include "arrow/status.h"
|
30 |
+
#include "arrow/util/visibility.h"
|
31 |
+
|
32 |
+
namespace arrow {
|
33 |
+
|
34 |
+
class DataType;
|
35 |
+
class TimestampParser;
|
36 |
+
|
37 |
+
namespace csv {
|
38 |
+
|
39 |
+
// Silly workaround for https://github.com/michaeljones/breathe/issues/453
|
40 |
+
constexpr char kDefaultEscapeChar = '\\';
|
41 |
+
|
42 |
+
struct ARROW_EXPORT ParseOptions {
|
43 |
+
// Parsing options
|
44 |
+
|
45 |
+
/// Field delimiter
|
46 |
+
char delimiter = ',';
|
47 |
+
/// Whether quoting is used
|
48 |
+
bool quoting = true;
|
49 |
+
/// Quoting character (if `quoting` is true)
|
50 |
+
char quote_char = '"';
|
51 |
+
/// Whether a quote inside a value is double-quoted
|
52 |
+
bool double_quote = true;
|
53 |
+
/// Whether escaping is used
|
54 |
+
bool escaping = false;
|
55 |
+
/// Escaping character (if `escaping` is true)
|
56 |
+
char escape_char = kDefaultEscapeChar;
|
57 |
+
/// Whether values are allowed to contain CR (0x0d) and LF (0x0a) characters
|
58 |
+
bool newlines_in_values = false;
|
59 |
+
/// Whether empty lines are ignored. If false, an empty line represents
|
60 |
+
/// a single empty value (assuming a one-column CSV file).
|
61 |
+
bool ignore_empty_lines = true;
|
62 |
+
/// A handler function for rows which do not have the correct number of columns
|
63 |
+
InvalidRowHandler invalid_row_handler;
|
64 |
+
|
65 |
+
/// Create parsing options with default values
|
66 |
+
static ParseOptions Defaults();
|
67 |
+
|
68 |
+
/// \brief Test that all set options are valid
|
69 |
+
Status Validate() const;
|
70 |
+
};
|
71 |
+
|
72 |
+
struct ARROW_EXPORT ConvertOptions {
|
73 |
+
// Conversion options
|
74 |
+
|
75 |
+
/// Whether to check UTF8 validity of string columns
|
76 |
+
bool check_utf8 = true;
|
77 |
+
/// Optional per-column types (disabling type inference on those columns)
|
78 |
+
std::unordered_map<std::string, std::shared_ptr<DataType>> column_types;
|
79 |
+
/// Recognized spellings for null values
|
80 |
+
std::vector<std::string> null_values;
|
81 |
+
/// Recognized spellings for boolean true values
|
82 |
+
std::vector<std::string> true_values;
|
83 |
+
/// Recognized spellings for boolean false values
|
84 |
+
std::vector<std::string> false_values;
|
85 |
+
|
86 |
+
/// Whether string / binary columns can have null values.
|
87 |
+
///
|
88 |
+
/// If true, then strings in "null_values" are considered null for string columns.
|
89 |
+
/// If false, then all strings are valid string values.
|
90 |
+
bool strings_can_be_null = false;
|
91 |
+
|
92 |
+
/// Whether quoted values can be null.
|
93 |
+
///
|
94 |
+
/// If true, then strings in "null_values" are also considered null when they
|
95 |
+
/// appear quoted in the CSV file. Otherwise, quoted values are never considered null.
|
96 |
+
bool quoted_strings_can_be_null = true;
|
97 |
+
|
98 |
+
/// Whether to try to automatically dict-encode string / binary data.
|
99 |
+
/// If true, then when type inference detects a string or binary column,
|
100 |
+
/// it is dict-encoded up to `auto_dict_max_cardinality` distinct values
|
101 |
+
/// (per chunk), after which it switches to regular encoding.
|
102 |
+
///
|
103 |
+
/// This setting is ignored for non-inferred columns (those in `column_types`).
|
104 |
+
bool auto_dict_encode = false;
|
105 |
+
int32_t auto_dict_max_cardinality = 50;
|
106 |
+
|
107 |
+
/// Decimal point character for floating-point and decimal data
|
108 |
+
char decimal_point = '.';
|
109 |
+
|
110 |
+
// XXX Should we have a separate FilterOptions?
|
111 |
+
|
112 |
+
/// If non-empty, indicates the names of columns from the CSV file that should
|
113 |
+
/// be actually read and converted (in the vector's order).
|
114 |
+
/// Columns not in this vector will be ignored.
|
115 |
+
std::vector<std::string> include_columns;
|
116 |
+
/// If false, columns in `include_columns` but not in the CSV file will error out.
|
117 |
+
/// If true, columns in `include_columns` but not in the CSV file will produce
|
118 |
+
/// a column of nulls (whose type is selected using `column_types`,
|
119 |
+
/// or null by default)
|
120 |
+
/// This option is ignored if `include_columns` is empty.
|
121 |
+
bool include_missing_columns = false;
|
122 |
+
|
123 |
+
/// User-defined timestamp parsers, using the virtual parser interface in
|
124 |
+
/// arrow/util/value_parsing.h. More than one parser can be specified, and
|
125 |
+
/// the CSV conversion logic will try parsing values starting from the
|
126 |
+
/// beginning of this vector. If no parsers are specified, we use the default
|
127 |
+
/// built-in ISO-8601 parser.
|
128 |
+
std::vector<std::shared_ptr<TimestampParser>> timestamp_parsers;
|
129 |
+
|
130 |
+
/// Create conversion options with default values, including conventional
|
131 |
+
/// values for `null_values`, `true_values` and `false_values`
|
132 |
+
static ConvertOptions Defaults();
|
133 |
+
|
134 |
+
/// \brief Test that all set options are valid
|
135 |
+
Status Validate() const;
|
136 |
+
};
|
137 |
+
|
138 |
+
struct ARROW_EXPORT ReadOptions {
|
139 |
+
// Reader options
|
140 |
+
|
141 |
+
/// Whether to use the global CPU thread pool
|
142 |
+
bool use_threads = true;
|
143 |
+
|
144 |
+
/// \brief Block size we request from the IO layer.
|
145 |
+
///
|
146 |
+
/// This will determine multi-threading granularity as well as
|
147 |
+
/// the size of individual record batches.
|
148 |
+
/// Minimum valid value for block size is 1
|
149 |
+
int32_t block_size = 1 << 20; // 1 MB
|
150 |
+
|
151 |
+
/// Number of header rows to skip (not including the row of column names, if any)
|
152 |
+
int32_t skip_rows = 0;
|
153 |
+
|
154 |
+
/// Number of rows to skip after the column names are read, if any
|
155 |
+
int32_t skip_rows_after_names = 0;
|
156 |
+
|
157 |
+
/// Column names for the target table.
|
158 |
+
/// If empty, fall back on autogenerate_column_names.
|
159 |
+
std::vector<std::string> column_names;
|
160 |
+
|
161 |
+
/// Whether to autogenerate column names if `column_names` is empty.
|
162 |
+
/// If true, column names will be of the form "f0", "f1"...
|
163 |
+
/// If false, column names will be read from the first CSV row after `skip_rows`.
|
164 |
+
bool autogenerate_column_names = false;
|
165 |
+
|
166 |
+
/// Create read options with default values
|
167 |
+
static ReadOptions Defaults();
|
168 |
+
|
169 |
+
/// \brief Test that all set options are valid
|
170 |
+
Status Validate() const;
|
171 |
+
};
|
172 |
+
|
173 |
+
/// \brief Quoting style for CSV writing
|
174 |
+
enum class ARROW_EXPORT QuotingStyle {
|
175 |
+
/// Only enclose values in quotes which need them, because their CSV rendering can
|
176 |
+
/// contain quotes itself (e.g. strings or binary values)
|
177 |
+
Needed,
|
178 |
+
/// Enclose all valid values in quotes. Nulls are not quoted. May cause readers to
|
179 |
+
/// interpret all values as strings if schema is inferred.
|
180 |
+
AllValid,
|
181 |
+
/// Do not enclose any values in quotes. Prevents values from containing quotes ("),
|
182 |
+
/// cell delimiters (,) or line endings (\\r, \\n), (following RFC4180). If values
|
183 |
+
/// contain these characters, an error is caused when attempting to write.
|
184 |
+
None
|
185 |
+
};
|
186 |
+
|
187 |
+
struct ARROW_EXPORT WriteOptions {
|
188 |
+
/// Whether to write an initial header line with column names
|
189 |
+
bool include_header = true;
|
190 |
+
|
191 |
+
/// \brief Maximum number of rows processed at a time
|
192 |
+
///
|
193 |
+
/// The CSV writer converts and writes data in batches of N rows.
|
194 |
+
/// This number can impact performance.
|
195 |
+
int32_t batch_size = 1024;
|
196 |
+
|
197 |
+
/// Field delimiter
|
198 |
+
char delimiter = ',';
|
199 |
+
|
200 |
+
/// \brief The string to write for null values. Quotes are not allowed in this string.
|
201 |
+
std::string null_string;
|
202 |
+
|
203 |
+
/// \brief IO context for writing.
|
204 |
+
io::IOContext io_context;
|
205 |
+
|
206 |
+
/// \brief The end of line character to use for ending rows
|
207 |
+
std::string eol = "\n";
|
208 |
+
|
209 |
+
/// \brief Quoting style
|
210 |
+
QuotingStyle quoting_style = QuotingStyle::Needed;
|
211 |
+
|
212 |
+
/// Create write options with default values
|
213 |
+
static WriteOptions Defaults();
|
214 |
+
|
215 |
+
/// \brief Test that all set options are valid
|
216 |
+
Status Validate() const;
|
217 |
+
};
|
218 |
+
|
219 |
+
} // namespace csv
|
220 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/reader.h
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
|
22 |
+
#include "arrow/csv/options.h" // IWYU pragma: keep
|
23 |
+
#include "arrow/io/interfaces.h"
|
24 |
+
#include "arrow/record_batch.h"
|
25 |
+
#include "arrow/result.h"
|
26 |
+
#include "arrow/type.h"
|
27 |
+
#include "arrow/type_fwd.h"
|
28 |
+
#include "arrow/util/future.h"
|
29 |
+
#include "arrow/util/thread_pool.h"
|
30 |
+
#include "arrow/util/visibility.h"
|
31 |
+
|
32 |
+
namespace arrow {
|
33 |
+
namespace io {
|
34 |
+
class InputStream;
|
35 |
+
} // namespace io
|
36 |
+
|
37 |
+
namespace csv {
|
38 |
+
|
39 |
+
/// A class that reads an entire CSV file into a Arrow Table
|
40 |
+
class ARROW_EXPORT TableReader {
|
41 |
+
public:
|
42 |
+
virtual ~TableReader() = default;
|
43 |
+
|
44 |
+
/// Read the entire CSV file and convert it to a Arrow Table
|
45 |
+
virtual Result<std::shared_ptr<Table>> Read() = 0;
|
46 |
+
/// Read the entire CSV file and convert it to a Arrow Table
|
47 |
+
virtual Future<std::shared_ptr<Table>> ReadAsync() = 0;
|
48 |
+
|
49 |
+
/// Create a TableReader instance
|
50 |
+
static Result<std::shared_ptr<TableReader>> Make(io::IOContext io_context,
|
51 |
+
std::shared_ptr<io::InputStream> input,
|
52 |
+
const ReadOptions&,
|
53 |
+
const ParseOptions&,
|
54 |
+
const ConvertOptions&);
|
55 |
+
};
|
56 |
+
|
57 |
+
/// \brief A class that reads a CSV file incrementally
|
58 |
+
///
|
59 |
+
/// Caveats:
|
60 |
+
/// - For now, this is always single-threaded (regardless of `ReadOptions::use_threads`.
|
61 |
+
/// - Type inference is done on the first block and types are frozen afterwards;
|
62 |
+
/// to make sure the right data types are inferred, either set
|
63 |
+
/// `ReadOptions::block_size` to a large enough value, or use
|
64 |
+
/// `ConvertOptions::column_types` to set the desired data types explicitly.
|
65 |
+
class ARROW_EXPORT StreamingReader : public RecordBatchReader {
|
66 |
+
public:
|
67 |
+
virtual ~StreamingReader() = default;
|
68 |
+
|
69 |
+
virtual Future<std::shared_ptr<RecordBatch>> ReadNextAsync() = 0;
|
70 |
+
|
71 |
+
/// \brief Return the number of bytes which have been read and processed
|
72 |
+
///
|
73 |
+
/// The returned number includes CSV bytes which the StreamingReader has
|
74 |
+
/// finished processing, but not bytes for which some processing (e.g.
|
75 |
+
/// CSV parsing or conversion to Arrow layout) is still ongoing.
|
76 |
+
///
|
77 |
+
/// Furthermore, the following rules apply:
|
78 |
+
/// - bytes skipped by `ReadOptions.skip_rows` are counted as being read before
|
79 |
+
/// any records are returned.
|
80 |
+
/// - bytes read while parsing the header are counted as being read before any
|
81 |
+
/// records are returned.
|
82 |
+
/// - bytes skipped by `ReadOptions.skip_rows_after_names` are counted after the
|
83 |
+
/// first batch is returned.
|
84 |
+
virtual int64_t bytes_read() const = 0;
|
85 |
+
|
86 |
+
/// Create a StreamingReader instance
|
87 |
+
///
|
88 |
+
/// This involves some I/O as the first batch must be loaded during the creation process
|
89 |
+
/// so it is returned as a future
|
90 |
+
///
|
91 |
+
/// Currently, the StreamingReader is not async-reentrant and does not do any fan-out
|
92 |
+
/// parsing (see ARROW-11889)
|
93 |
+
static Future<std::shared_ptr<StreamingReader>> MakeAsync(
|
94 |
+
io::IOContext io_context, std::shared_ptr<io::InputStream> input,
|
95 |
+
arrow::internal::Executor* cpu_executor, const ReadOptions&, const ParseOptions&,
|
96 |
+
const ConvertOptions&);
|
97 |
+
|
98 |
+
static Result<std::shared_ptr<StreamingReader>> Make(
|
99 |
+
io::IOContext io_context, std::shared_ptr<io::InputStream> input,
|
100 |
+
const ReadOptions&, const ParseOptions&, const ConvertOptions&);
|
101 |
+
};
|
102 |
+
|
103 |
+
/// \brief Count the logical rows of data in a CSV file (i.e. the
|
104 |
+
/// number of rows you would get if you read the file into a table).
|
105 |
+
ARROW_EXPORT
|
106 |
+
Future<int64_t> CountRowsAsync(io::IOContext io_context,
|
107 |
+
std::shared_ptr<io::InputStream> input,
|
108 |
+
arrow::internal::Executor* cpu_executor,
|
109 |
+
const ReadOptions&, const ParseOptions&);
|
110 |
+
|
111 |
+
} // namespace csv
|
112 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/test_common.h
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <functional>
|
21 |
+
#include <memory>
|
22 |
+
#include <string>
|
23 |
+
#include <vector>
|
24 |
+
|
25 |
+
#include "arrow/csv/parser.h"
|
26 |
+
#include "arrow/testing/visibility.h"
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
namespace csv {
|
30 |
+
|
31 |
+
ARROW_TESTING_EXPORT
|
32 |
+
std::string MakeCSVData(std::vector<std::string> lines);
|
33 |
+
|
34 |
+
// Make a BlockParser from a vector of lines representing a CSV file
|
35 |
+
ARROW_TESTING_EXPORT
|
36 |
+
void MakeCSVParser(std::vector<std::string> lines, ParseOptions options, int32_t num_cols,
|
37 |
+
MemoryPool* pool, std::shared_ptr<BlockParser>* out);
|
38 |
+
|
39 |
+
ARROW_TESTING_EXPORT
|
40 |
+
void MakeCSVParser(std::vector<std::string> lines, ParseOptions options,
|
41 |
+
std::shared_ptr<BlockParser>* out);
|
42 |
+
|
43 |
+
ARROW_TESTING_EXPORT
|
44 |
+
void MakeCSVParser(std::vector<std::string> lines, std::shared_ptr<BlockParser>* out);
|
45 |
+
|
46 |
+
// Make a BlockParser from a vector of strings representing a single CSV column
|
47 |
+
ARROW_TESTING_EXPORT
|
48 |
+
void MakeColumnParser(std::vector<std::string> items, std::shared_ptr<BlockParser>* out);
|
49 |
+
|
50 |
+
ARROW_TESTING_EXPORT
|
51 |
+
Result<std::shared_ptr<Buffer>> MakeSampleCsvBuffer(
|
52 |
+
size_t num_rows, std::function<bool(size_t row_num)> is_valid = {});
|
53 |
+
|
54 |
+
} // namespace csv
|
55 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/writer.h
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include <memory>
|
21 |
+
|
22 |
+
#include "arrow/csv/options.h"
|
23 |
+
#include "arrow/io/interfaces.h"
|
24 |
+
#include "arrow/ipc/type_fwd.h"
|
25 |
+
#include "arrow/record_batch.h"
|
26 |
+
#include "arrow/table.h"
|
27 |
+
|
28 |
+
namespace arrow {
|
29 |
+
namespace csv {
|
30 |
+
|
31 |
+
// Functionality for converting Arrow data to Comma separated value text.
|
32 |
+
// This library supports all primitive types that can be cast to a StringArrays.
|
33 |
+
// It applies to following formatting rules:
|
34 |
+
// - For non-binary types no quotes surround values. Nulls are represented as the empty
|
35 |
+
// string.
|
36 |
+
// - For binary types all non-null data is quoted (and quotes within data are escaped
|
37 |
+
// with an additional quote).
|
38 |
+
// Null values are empty and unquoted.
|
39 |
+
|
40 |
+
/// \defgroup csv-write-functions High-level functions for writing CSV files
|
41 |
+
/// @{
|
42 |
+
|
43 |
+
/// \brief Convert table to CSV and write the result to output.
|
44 |
+
/// Experimental
|
45 |
+
ARROW_EXPORT Status WriteCSV(const Table& table, const WriteOptions& options,
|
46 |
+
arrow::io::OutputStream* output);
|
47 |
+
/// \brief Convert batch to CSV and write the result to output.
|
48 |
+
/// Experimental
|
49 |
+
ARROW_EXPORT Status WriteCSV(const RecordBatch& batch, const WriteOptions& options,
|
50 |
+
arrow::io::OutputStream* output);
|
51 |
+
/// \brief Convert batches read through a RecordBatchReader
|
52 |
+
/// to CSV and write the results to output.
|
53 |
+
/// Experimental
|
54 |
+
ARROW_EXPORT Status WriteCSV(const std::shared_ptr<RecordBatchReader>& reader,
|
55 |
+
const WriteOptions& options,
|
56 |
+
arrow::io::OutputStream* output);
|
57 |
+
|
58 |
+
/// @}
|
59 |
+
|
60 |
+
/// \defgroup csv-writer-factories Functions for creating an incremental CSV writer
|
61 |
+
/// @{
|
62 |
+
|
63 |
+
/// \brief Create a new CSV writer. User is responsible for closing the
|
64 |
+
/// actual OutputStream.
|
65 |
+
///
|
66 |
+
/// \param[in] sink output stream to write to
|
67 |
+
/// \param[in] schema the schema of the record batches to be written
|
68 |
+
/// \param[in] options options for serialization
|
69 |
+
/// \return Result<std::shared_ptr<RecordBatchWriter>>
|
70 |
+
ARROW_EXPORT
|
71 |
+
Result<std::shared_ptr<ipc::RecordBatchWriter>> MakeCSVWriter(
|
72 |
+
std::shared_ptr<io::OutputStream> sink, const std::shared_ptr<Schema>& schema,
|
73 |
+
const WriteOptions& options = WriteOptions::Defaults());
|
74 |
+
|
75 |
+
/// \brief Create a new CSV writer.
|
76 |
+
///
|
77 |
+
/// \param[in] sink output stream to write to (does not take ownership)
|
78 |
+
/// \param[in] schema the schema of the record batches to be written
|
79 |
+
/// \param[in] options options for serialization
|
80 |
+
/// \return Result<std::shared_ptr<RecordBatchWriter>>
|
81 |
+
ARROW_EXPORT
|
82 |
+
Result<std::shared_ptr<ipc::RecordBatchWriter>> MakeCSVWriter(
|
83 |
+
io::OutputStream* sink, const std::shared_ptr<Schema>& schema,
|
84 |
+
const WriteOptions& options = WriteOptions::Defaults());
|
85 |
+
|
86 |
+
/// @}
|
87 |
+
|
88 |
+
} // namespace csv
|
89 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/api.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
#pragma once
|
19 |
+
|
20 |
+
#include "arrow/ipc/dictionary.h"
|
21 |
+
#include "arrow/ipc/feather.h"
|
22 |
+
#include "arrow/ipc/json_simple.h"
|
23 |
+
#include "arrow/ipc/message.h"
|
24 |
+
#include "arrow/ipc/reader.h"
|
25 |
+
#include "arrow/ipc/writer.h"
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/dictionary.h
ADDED
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// Tools for dictionaries in IPC context
|
19 |
+
|
20 |
+
#pragma once
|
21 |
+
|
22 |
+
#include <cstdint>
|
23 |
+
#include <memory>
|
24 |
+
#include <utility>
|
25 |
+
#include <vector>
|
26 |
+
|
27 |
+
#include "arrow/result.h"
|
28 |
+
#include "arrow/status.h"
|
29 |
+
#include "arrow/type_fwd.h"
|
30 |
+
#include "arrow/util/macros.h"
|
31 |
+
#include "arrow/util/visibility.h"
|
32 |
+
|
33 |
+
namespace arrow {
|
34 |
+
namespace ipc {
|
35 |
+
|
36 |
+
namespace internal {
|
37 |
+
|
38 |
+
class FieldPosition {
|
39 |
+
public:
|
40 |
+
FieldPosition() : parent_(NULLPTR), index_(-1), depth_(0) {}
|
41 |
+
|
42 |
+
FieldPosition child(int index) const { return {this, index}; }
|
43 |
+
|
44 |
+
std::vector<int> path() const {
|
45 |
+
std::vector<int> path(depth_);
|
46 |
+
const FieldPosition* cur = this;
|
47 |
+
for (int i = depth_ - 1; i >= 0; --i) {
|
48 |
+
path[i] = cur->index_;
|
49 |
+
cur = cur->parent_;
|
50 |
+
}
|
51 |
+
return path;
|
52 |
+
}
|
53 |
+
|
54 |
+
protected:
|
55 |
+
FieldPosition(const FieldPosition* parent, int index)
|
56 |
+
: parent_(parent), index_(index), depth_(parent->depth_ + 1) {}
|
57 |
+
|
58 |
+
const FieldPosition* parent_;
|
59 |
+
int index_;
|
60 |
+
int depth_;
|
61 |
+
};
|
62 |
+
|
63 |
+
} // namespace internal
|
64 |
+
|
65 |
+
/// \brief Map fields in a schema to dictionary ids
|
66 |
+
///
|
67 |
+
/// The mapping is structural, i.e. the field path (as a vector of indices)
|
68 |
+
/// is associated to the dictionary id. A dictionary id may be associated
|
69 |
+
/// to multiple fields.
|
70 |
+
class ARROW_EXPORT DictionaryFieldMapper {
|
71 |
+
public:
|
72 |
+
DictionaryFieldMapper();
|
73 |
+
explicit DictionaryFieldMapper(const Schema& schema);
|
74 |
+
~DictionaryFieldMapper();
|
75 |
+
|
76 |
+
Status AddSchemaFields(const Schema& schema);
|
77 |
+
Status AddField(int64_t id, std::vector<int> field_path);
|
78 |
+
|
79 |
+
Result<int64_t> GetFieldId(std::vector<int> field_path) const;
|
80 |
+
|
81 |
+
int num_fields() const;
|
82 |
+
|
83 |
+
/// \brief Returns number of unique dictionaries, taking into
|
84 |
+
/// account that different fields can share the same dictionary.
|
85 |
+
int num_dicts() const;
|
86 |
+
|
87 |
+
private:
|
88 |
+
struct Impl;
|
89 |
+
std::unique_ptr<Impl> impl_;
|
90 |
+
};
|
91 |
+
|
92 |
+
using DictionaryVector = std::vector<std::pair<int64_t, std::shared_ptr<Array>>>;
|
93 |
+
|
94 |
+
/// \brief Memoization data structure for reading dictionaries from IPC streams
|
95 |
+
///
|
96 |
+
/// This structure tracks the following associations:
|
97 |
+
/// - field position (structural) -> dictionary id
|
98 |
+
/// - dictionary id -> value type
|
99 |
+
/// - dictionary id -> dictionary (value) data
|
100 |
+
///
|
101 |
+
/// Together, they allow resolving dictionary data when reading an IPC stream,
|
102 |
+
/// using metadata recorded in the schema message and data recorded in the
|
103 |
+
/// dictionary batch messages (see ResolveDictionaries).
|
104 |
+
///
|
105 |
+
/// This structure isn't useful for writing an IPC stream, where only
|
106 |
+
/// DictionaryFieldMapper is necessary.
|
107 |
+
class ARROW_EXPORT DictionaryMemo {
|
108 |
+
public:
|
109 |
+
DictionaryMemo();
|
110 |
+
~DictionaryMemo();
|
111 |
+
|
112 |
+
DictionaryFieldMapper& fields();
|
113 |
+
const DictionaryFieldMapper& fields() const;
|
114 |
+
|
115 |
+
/// \brief Return current dictionary corresponding to a particular
|
116 |
+
/// id. Returns KeyError if id not found
|
117 |
+
Result<std::shared_ptr<ArrayData>> GetDictionary(int64_t id, MemoryPool* pool) const;
|
118 |
+
|
119 |
+
/// \brief Return dictionary value type corresponding to a
|
120 |
+
/// particular dictionary id.
|
121 |
+
Result<std::shared_ptr<DataType>> GetDictionaryType(int64_t id) const;
|
122 |
+
|
123 |
+
/// \brief Return true if we have a dictionary for the input id
|
124 |
+
bool HasDictionary(int64_t id) const;
|
125 |
+
|
126 |
+
/// \brief Add a dictionary value type to the memo with a particular id.
|
127 |
+
/// Returns KeyError if a different type is already registered with the same id.
|
128 |
+
Status AddDictionaryType(int64_t id, const std::shared_ptr<DataType>& type);
|
129 |
+
|
130 |
+
/// \brief Add a dictionary to the memo with a particular id. Returns
|
131 |
+
/// KeyError if that dictionary already exists
|
132 |
+
Status AddDictionary(int64_t id, const std::shared_ptr<ArrayData>& dictionary);
|
133 |
+
|
134 |
+
/// \brief Append a dictionary delta to the memo with a particular id. Returns
|
135 |
+
/// KeyError if that dictionary does not exists
|
136 |
+
Status AddDictionaryDelta(int64_t id, const std::shared_ptr<ArrayData>& dictionary);
|
137 |
+
|
138 |
+
/// \brief Add a dictionary to the memo if it does not have one with the id,
|
139 |
+
/// otherwise, replace the dictionary with the new one.
|
140 |
+
///
|
141 |
+
/// Return true if the dictionary was added, false if replaced.
|
142 |
+
Result<bool> AddOrReplaceDictionary(int64_t id,
|
143 |
+
const std::shared_ptr<ArrayData>& dictionary);
|
144 |
+
|
145 |
+
private:
|
146 |
+
struct Impl;
|
147 |
+
std::unique_ptr<Impl> impl_;
|
148 |
+
};
|
149 |
+
|
150 |
+
// For writing: collect dictionary entries to write to the IPC stream, in order
|
151 |
+
// (i.e. inner dictionaries before dependent outer dictionaries).
|
152 |
+
ARROW_EXPORT
|
153 |
+
Result<DictionaryVector> CollectDictionaries(const RecordBatch& batch,
|
154 |
+
const DictionaryFieldMapper& mapper);
|
155 |
+
|
156 |
+
// For reading: resolve all dictionaries in columns, according to the field
|
157 |
+
// mapping and dictionary arrays stored in memo.
|
158 |
+
// Columns may be sparse, i.e. some entries may be left null
|
159 |
+
// (e.g. if an inclusion mask was used).
|
160 |
+
ARROW_EXPORT
|
161 |
+
Status ResolveDictionaries(const ArrayDataVector& columns, const DictionaryMemo& memo,
|
162 |
+
MemoryPool* pool);
|
163 |
+
|
164 |
+
namespace internal {
|
165 |
+
|
166 |
+
// Like CollectDictionaries above, but uses the memo's DictionaryFieldMapper
|
167 |
+
// and all collected dictionaries are added to the memo using AddDictionary.
|
168 |
+
//
|
169 |
+
// This is used as a shortcut in some roundtripping tests (to avoid emitting
|
170 |
+
// any actual dictionary batches).
|
171 |
+
ARROW_EXPORT
|
172 |
+
Status CollectDictionaries(const RecordBatch& batch, DictionaryMemo* memo);
|
173 |
+
|
174 |
+
} // namespace internal
|
175 |
+
|
176 |
+
} // namespace ipc
|
177 |
+
} // namespace arrow
|
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/feather.h
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
// or more contributor license agreements. See the NOTICE file
|
3 |
+
// distributed with this work for additional information
|
4 |
+
// regarding copyright ownership. The ASF licenses this file
|
5 |
+
// to you under the Apache License, Version 2.0 (the
|
6 |
+
// "License"); you may not use this file except in compliance
|
7 |
+
// with the License. You may obtain a copy of the License at
|
8 |
+
//
|
9 |
+
// http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
//
|
11 |
+
// Unless required by applicable law or agreed to in writing,
|
12 |
+
// software distributed under the License is distributed on an
|
13 |
+
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
// KIND, either express or implied. See the License for the
|
15 |
+
// specific language governing permissions and limitations
|
16 |
+
// under the License.
|
17 |
+
|
18 |
+
// Public API for the "Feather" file format, originally created at
|
19 |
+
// http://github.com/wesm/feather
|
20 |
+
|
21 |
+
#pragma once
|
22 |
+
|
23 |
+
#include <cstdint>
|
24 |
+
#include <memory>
|
25 |
+
#include <string>
|
26 |
+
#include <vector>
|
27 |
+
|
28 |
+
#include "arrow/ipc/options.h"
|
29 |
+
#include "arrow/type_fwd.h"
|
30 |
+
#include "arrow/util/compression.h"
|
31 |
+
#include "arrow/util/visibility.h"
|
32 |
+
|
33 |
+
namespace arrow {
|
34 |
+
|
35 |
+
class Schema;
|
36 |
+
class Status;
|
37 |
+
class Table;
|
38 |
+
|
39 |
+
namespace io {
|
40 |
+
|
41 |
+
class OutputStream;
|
42 |
+
class RandomAccessFile;
|
43 |
+
|
44 |
+
} // namespace io
|
45 |
+
|
46 |
+
namespace ipc {
|
47 |
+
namespace feather {
|
48 |
+
|
49 |
+
static constexpr const int kFeatherV1Version = 2;
|
50 |
+
static constexpr const int kFeatherV2Version = 3;
|
51 |
+
|
52 |
+
// ----------------------------------------------------------------------
|
53 |
+
// Metadata accessor classes
|
54 |
+
|
55 |
+
/// \class Reader
|
56 |
+
/// \brief An interface for reading columns from Feather files
|
57 |
+
class ARROW_EXPORT Reader {
|
58 |
+
public:
|
59 |
+
virtual ~Reader() = default;
|
60 |
+
|
61 |
+
/// \brief Open a Feather file from a RandomAccessFile interface
|
62 |
+
///
|
63 |
+
/// \param[in] source a RandomAccessFile instance
|
64 |
+
/// \return the table reader
|
65 |
+
static Result<std::shared_ptr<Reader>> Open(
|
66 |
+
const std::shared_ptr<io::RandomAccessFile>& source);
|
67 |
+
|
68 |
+
/// \brief Open a Feather file from a RandomAccessFile interface
|
69 |
+
/// with IPC Read options
|
70 |
+
///
|
71 |
+
/// \param[in] source a RandomAccessFile instance
|
72 |
+
/// \param[in] options IPC Read options
|
73 |
+
/// \return the table reader
|
74 |
+
static Result<std::shared_ptr<Reader>> Open(
|
75 |
+
const std::shared_ptr<io::RandomAccessFile>& source, const IpcReadOptions& options);
|
76 |
+
|
77 |
+
/// \brief Return the version number of the Feather file
|
78 |
+
virtual int version() const = 0;
|
79 |
+
|
80 |
+
virtual std::shared_ptr<Schema> schema() const = 0;
|
81 |
+
|
82 |
+
/// \brief Read all columns from the file as an arrow::Table.
|
83 |
+
///
|
84 |
+
/// \param[out] out the returned table
|
85 |
+
/// \return Status
|
86 |
+
///
|
87 |
+
/// This function is zero-copy if the file source supports zero-copy reads
|
88 |
+
virtual Status Read(std::shared_ptr<Table>* out) = 0;
|
89 |
+
|
90 |
+
/// \brief Read only the specified columns from the file as an arrow::Table.
|
91 |
+
///
|
92 |
+
/// \param[in] indices the column indices to read
|
93 |
+
/// \param[out] out the returned table
|
94 |
+
/// \return Status
|
95 |
+
///
|
96 |
+
/// This function is zero-copy if the file source supports zero-copy reads
|
97 |
+
virtual Status Read(const std::vector<int>& indices, std::shared_ptr<Table>* out) = 0;
|
98 |
+
|
99 |
+
/// \brief Read only the specified columns from the file as an arrow::Table.
|
100 |
+
///
|
101 |
+
/// \param[in] names the column names to read
|
102 |
+
/// \param[out] out the returned table
|
103 |
+
/// \return Status
|
104 |
+
///
|
105 |
+
/// This function is zero-copy if the file source supports zero-copy reads
|
106 |
+
virtual Status Read(const std::vector<std::string>& names,
|
107 |
+
std::shared_ptr<Table>* out) = 0;
|
108 |
+
};
|
109 |
+
|
110 |
+
struct ARROW_EXPORT WriteProperties {
|
111 |
+
static WriteProperties Defaults();
|
112 |
+
|
113 |
+
static WriteProperties DefaultsV1() {
|
114 |
+
WriteProperties props = Defaults();
|
115 |
+
props.version = kFeatherV1Version;
|
116 |
+
return props;
|
117 |
+
}
|
118 |
+
|
119 |
+
/// Feather file version number
|
120 |
+
///
|
121 |
+
/// version 2: "Feather V1" Apache Arrow <= 0.16.0
|
122 |
+
/// version 3: "Feather V2" Apache Arrow > 0.16.0
|
123 |
+
int version = kFeatherV2Version;
|
124 |
+
|
125 |
+
// Parameters for Feather V2 only
|
126 |
+
|
127 |
+
/// Number of rows per intra-file chunk. Use smaller chunksize when you need
|
128 |
+
/// faster random row access
|
129 |
+
int64_t chunksize = 1LL << 16;
|
130 |
+
|
131 |
+
/// Compression type to use. Only UNCOMPRESSED, LZ4_FRAME, and ZSTD are
|
132 |
+
/// supported. The default compression returned by Defaults() is LZ4 if the
|
133 |
+
/// project is built with support for it, otherwise
|
134 |
+
/// UNCOMPRESSED. UNCOMPRESSED is set as the object default here so that if
|
135 |
+
/// WriteProperties::Defaults() is not used, the default constructor for
|
136 |
+
/// WriteProperties will work regardless of the options used to build the C++
|
137 |
+
/// project.
|
138 |
+
Compression::type compression = Compression::UNCOMPRESSED;
|
139 |
+
|
140 |
+
/// Compressor-specific compression level
|
141 |
+
int compression_level = ::arrow::util::kUseDefaultCompressionLevel;
|
142 |
+
};
|
143 |
+
|
144 |
+
ARROW_EXPORT
|
145 |
+
Status WriteTable(const Table& table, io::OutputStream* dst,
|
146 |
+
const WriteProperties& properties = WriteProperties::Defaults());
|
147 |
+
|
148 |
+
} // namespace feather
|
149 |
+
} // namespace ipc
|
150 |
+
} // namespace arrow
|