applied-ai-018 commited on
Commit
8179f69
·
verified ·
1 Parent(s): b2cbd6f

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/accumulation_queue.h +160 -0
  2. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/aggregate_node.h +57 -0
  3. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/api.h +32 -0
  4. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/asof_join_node.h +41 -0
  5. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/backpressure_handler.h +74 -0
  6. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/benchmark_util.h +48 -0
  7. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/bloom_filter.h +326 -0
  8. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/exec_plan.h +819 -0
  9. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join.h +75 -0
  10. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_dict.h +318 -0
  11. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_node.h +102 -0
  12. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/map_node.h +81 -0
  13. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/options.h +866 -0
  14. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/order_by_impl.h +56 -0
  15. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/partition_util.h +184 -0
  16. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/pch.h +23 -0
  17. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/query_context.h +157 -0
  18. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/schema_util.h +226 -0
  19. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/task_util.h +102 -0
  20. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/test_nodes.h +86 -0
  21. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/time_series_util.h +31 -0
  22. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/tpch_node.h +65 -0
  23. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/type_fwd.h +36 -0
  24. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/unmaterialized_table.h +271 -0
  25. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/util.h +184 -0
  26. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/visibility.h +50 -0
  27. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/c/bridge.h +318 -0
  28. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/c/helpers.h +129 -0
  29. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/api.h +30 -0
  30. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client.h +415 -0
  31. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_auth.h +62 -0
  32. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_cookie_middleware.h +33 -0
  33. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_middleware.h +78 -0
  34. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_tracing_middleware.h +34 -0
  35. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/middleware.h +75 -0
  36. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/pch.h +26 -0
  37. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/platform.h +31 -0
  38. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server_auth.h +125 -0
  39. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server_middleware.h +105 -0
  40. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server_tracing_middleware.h +68 -0
  41. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/test_definitions.h +317 -0
  42. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/transport_server.h +133 -0
  43. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/type_fwd.h +65 -0
  44. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/types.h +942 -0
  45. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/types_async.h +80 -0
  46. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/visibility.h +48 -0
  47. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/io/api.h +25 -0
  48. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/io/buffered.h +167 -0
  49. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/io/caching.h +157 -0
  50. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/io/compressed.h +118 -0
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/accumulation_queue.h ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <functional>
22
+ #include <optional>
23
+ #include <vector>
24
+
25
+ #include "arrow/compute/exec.h"
26
+ #include "arrow/result.h"
27
+
28
+ namespace arrow {
29
+ namespace acero {
30
+ namespace util {
31
+
32
+ using arrow::compute::ExecBatch;
33
+
34
+ /// \brief A container that accumulates batches until they are ready to
35
+ /// be processed.
36
+ class AccumulationQueue {
37
+ public:
38
+ AccumulationQueue() : row_count_(0) {}
39
+ ~AccumulationQueue() = default;
40
+
41
+ // We should never be copying ExecBatch around
42
+ AccumulationQueue(const AccumulationQueue&) = delete;
43
+ AccumulationQueue& operator=(const AccumulationQueue&) = delete;
44
+
45
+ AccumulationQueue(AccumulationQueue&& that);
46
+ AccumulationQueue& operator=(AccumulationQueue&& that);
47
+
48
+ void Concatenate(AccumulationQueue&& that);
49
+ void InsertBatch(ExecBatch batch);
50
+ int64_t row_count() { return row_count_; }
51
+ size_t batch_count() { return batches_.size(); }
52
+ bool empty() const { return batches_.empty(); }
53
+ void Clear();
54
+ ExecBatch& operator[](size_t i);
55
+
56
+ private:
57
+ int64_t row_count_;
58
+ std::vector<ExecBatch> batches_;
59
+ };
60
+
61
+ /// A queue that sequences incoming batches
62
+ ///
63
+ /// This can be used when a node needs to do some kind of ordered processing on
64
+ /// the stream.
65
+ ///
66
+ /// Batches can be inserted in any order. The process_callback will be called on
67
+ /// the batches, in order, without reentrant calls. For this reason the callback
68
+ /// should be quick.
69
+ ///
70
+ /// For example, in a top-n node, the process callback should determine how many
71
+ /// rows need to be delivered for the given batch, and then return a task to actually
72
+ /// deliver those rows.
73
+ class SequencingQueue {
74
+ public:
75
+ using Task = std::function<Status()>;
76
+
77
+ /// Strategy that describes how to handle items
78
+ class Processor {
79
+ public:
80
+ /// Process the batch, potentially generating a task
81
+ ///
82
+ /// This method will be called on each batch in order. Calls to this method
83
+ /// will be serialized and it will not be called reentrantly. This makes it
84
+ /// safe to do things that rely on order but minimal time should be spent here
85
+ /// to avoid becoming a bottleneck.
86
+ ///
87
+ /// \return a follow-up task that will be scheduled. The follow-up task(s) are
88
+ /// is not guaranteed to run in any particular order. If nullopt is
89
+ /// returned then nothing will be scheduled.
90
+ virtual Result<std::optional<Task>> Process(ExecBatch batch) = 0;
91
+ /// Schedule a task
92
+ virtual void Schedule(Task task) = 0;
93
+ };
94
+
95
+ virtual ~SequencingQueue() = default;
96
+
97
+ /// Insert a batch into the queue
98
+ ///
99
+ /// This will insert the batch into the queue. If this batch was the next batch
100
+ /// to deliver then this will trigger 1+ calls to the process callback to generate
101
+ /// 1+ tasks.
102
+ ///
103
+ /// The task generated by this call will be executed immediately. The remaining
104
+ /// tasks will be scheduled using the schedule callback.
105
+ ///
106
+ /// From a data pipeline perspective the sequencing queue is a "sometimes" breaker. If
107
+ /// a task arrives in order then this call will usually execute the downstream pipeline.
108
+ /// If this task arrives early then this call will only queue the data.
109
+ virtual Status InsertBatch(ExecBatch batch) = 0;
110
+
111
+ /// Create a queue
112
+ /// \param processor describes how to process the batches, must outlive the queue
113
+ static std::unique_ptr<SequencingQueue> Make(Processor* processor);
114
+ };
115
+
116
+ /// A queue that sequences incoming batches
117
+ ///
118
+ /// Unlike SequencingQueue the Process method is not expected to schedule new tasks.
119
+ ///
120
+ /// If a batch arrives and another thread is currently processing then the batch
121
+ /// will be queued and control will return. In other words, delivery of batches will
122
+ /// not block on the Process method.
123
+ ///
124
+ /// It can be helpful to think of this as if a dedicated thread is running Process as
125
+ /// batches arrive
126
+ class SerialSequencingQueue {
127
+ public:
128
+ /// Strategy that describes how to handle items
129
+ class Processor {
130
+ public:
131
+ /// Process the batch
132
+ ///
133
+ /// This method will be called on each batch in order. Calls to this method
134
+ /// will be serialized and it will not be called reentrantly. This makes it
135
+ /// safe to do things that rely on order.
136
+ ///
137
+ /// If this falls behind then data may accumulate
138
+ ///
139
+ /// TODO: Could add backpressure if needed but right now all uses of this should
140
+ /// be pretty fast and so are unlikely to block.
141
+ virtual Status Process(ExecBatch batch) = 0;
142
+ };
143
+
144
+ virtual ~SerialSequencingQueue() = default;
145
+
146
+ /// Insert a batch into the queue
147
+ ///
148
+ /// This will insert the batch into the queue. If this batch was the next batch
149
+ /// to deliver then this may trigger calls to the processor which will be run
150
+ /// as part of this call.
151
+ virtual Status InsertBatch(ExecBatch batch) = 0;
152
+
153
+ /// Create a queue
154
+ /// \param processor describes how to process the batches, must outlive the queue
155
+ static std::unique_ptr<SerialSequencingQueue> Make(Processor* processor);
156
+ };
157
+
158
+ } // namespace util
159
+ } // namespace acero
160
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/aggregate_node.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+ #include <vector>
24
+
25
+ #include "arrow/acero/visibility.h"
26
+ #include "arrow/compute/api_aggregate.h"
27
+ #include "arrow/compute/type_fwd.h"
28
+ #include "arrow/result.h"
29
+ #include "arrow/type_fwd.h"
30
+
31
+ namespace arrow {
32
+ namespace acero {
33
+ namespace aggregate {
34
+
35
+ using compute::Aggregate;
36
+ using compute::default_exec_context;
37
+ using compute::ExecContext;
38
+
39
+ /// \brief Make the output schema of an aggregate node
40
+ ///
41
+ /// The output schema is determined by the aggregation kernels, which may depend on the
42
+ /// ExecContext argument. To guarantee correct results, the same ExecContext argument
43
+ /// should be used in execution.
44
+ ///
45
+ /// \param[in] input_schema the schema of the input to the node
46
+ /// \param[in] keys the grouping keys for the aggregation
47
+ /// \param[in] segment_keys the segmenting keys for the aggregation
48
+ /// \param[in] aggregates the aggregates for the aggregation
49
+ /// \param[in] exec_ctx the execution context for the aggregation
50
+ ARROW_ACERO_EXPORT Result<std::shared_ptr<Schema>> MakeOutputSchema(
51
+ const std::shared_ptr<Schema>& input_schema, const std::vector<FieldRef>& keys,
52
+ const std::vector<FieldRef>& segment_keys, const std::vector<Aggregate>& aggregates,
53
+ ExecContext* exec_ctx = default_exec_context());
54
+
55
+ } // namespace aggregate
56
+ } // namespace acero
57
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/api.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // NOTE: API is EXPERIMENTAL and will change without going through a
19
+ // deprecation cycle
20
+
21
+ #pragma once
22
+
23
+ /// \defgroup acero-api Utilities for creating and executing execution plans
24
+ /// @{
25
+ /// @}
26
+
27
+ /// \defgroup acero-nodes Options classes for the various exec nodes
28
+ /// @{
29
+ /// @}
30
+
31
+ #include "arrow/acero/exec_plan.h"
32
+ #include "arrow/acero/options.h"
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/asof_join_node.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include <vector>
19
+
20
+ #include "arrow/acero/options.h"
21
+ #include "arrow/acero/visibility.h"
22
+ #include "arrow/compute/exec.h"
23
+ #include "arrow/type.h"
24
+
25
+ namespace arrow {
26
+ namespace acero {
27
+ namespace asofjoin {
28
+
29
+ using AsofJoinKeys = AsofJoinNodeOptions::Keys;
30
+
31
+ /// \brief Make the output schema of an as-of-join node
32
+ ///
33
+ /// \param[in] input_schema the schema of each input to the node
34
+ /// \param[in] input_keys the key of each input to the node
35
+ ARROW_ACERO_EXPORT Result<std::shared_ptr<Schema>> MakeOutputSchema(
36
+ const std::vector<std::shared_ptr<Schema>>& input_schema,
37
+ const std::vector<AsofJoinKeys>& input_keys);
38
+
39
+ } // namespace asofjoin
40
+ } // namespace acero
41
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/backpressure_handler.h ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+ #include "arrow/acero/exec_plan.h"
20
+ #include "arrow/acero/options.h"
21
+
22
+ #include <memory>
23
+
24
+ namespace arrow::acero {
25
+
26
+ class BackpressureHandler {
27
+ private:
28
+ BackpressureHandler(ExecNode* input, size_t low_threshold, size_t high_threshold,
29
+ std::unique_ptr<BackpressureControl> backpressure_control)
30
+ : input_(input),
31
+ low_threshold_(low_threshold),
32
+ high_threshold_(high_threshold),
33
+ backpressure_control_(std::move(backpressure_control)) {}
34
+
35
+ public:
36
+ static Result<BackpressureHandler> Make(
37
+ ExecNode* input, size_t low_threshold, size_t high_threshold,
38
+ std::unique_ptr<BackpressureControl> backpressure_control) {
39
+ if (low_threshold >= high_threshold) {
40
+ return Status::Invalid("low threshold (", low_threshold,
41
+ ") must be less than high threshold (", high_threshold, ")");
42
+ }
43
+ if (backpressure_control == NULLPTR) {
44
+ return Status::Invalid("null backpressure control parameter");
45
+ }
46
+ BackpressureHandler backpressure_handler(input, low_threshold, high_threshold,
47
+ std::move(backpressure_control));
48
+ return std::move(backpressure_handler);
49
+ }
50
+
51
+ void Handle(size_t start_level, size_t end_level) {
52
+ if (start_level < high_threshold_ && end_level >= high_threshold_) {
53
+ backpressure_control_->Pause();
54
+ } else if (start_level > low_threshold_ && end_level <= low_threshold_) {
55
+ backpressure_control_->Resume();
56
+ }
57
+ }
58
+
59
+ Status ForceShutdown() {
60
+ // It may be unintuitive to call Resume() here, but this is to avoid a deadlock.
61
+ // Since acero's executor won't terminate if any one node is paused, we need to
62
+ // force resume the node before stopping production.
63
+ backpressure_control_->Resume();
64
+ return input_->StopProducing();
65
+ }
66
+
67
+ private:
68
+ ExecNode* input_;
69
+ size_t low_threshold_;
70
+ size_t high_threshold_;
71
+ std::unique_ptr<BackpressureControl> backpressure_control_;
72
+ };
73
+
74
+ } // namespace arrow::acero
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/benchmark_util.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <string>
22
+ #include <vector>
23
+
24
+ #include "benchmark/benchmark.h"
25
+
26
+ #include "arrow/acero/exec_plan.h"
27
+ #include "arrow/acero/test_util_internal.h"
28
+ #include "arrow/compute/exec.h"
29
+
30
+ namespace arrow {
31
+
32
+ namespace acero {
33
+
34
+ Status BenchmarkNodeOverhead(benchmark::State& state, int32_t num_batches,
35
+ int32_t batch_size, arrow::acero::BatchesWithSchema data,
36
+ std::vector<arrow::acero::Declaration>& node_declarations,
37
+ arrow::MemoryPool* pool = default_memory_pool());
38
+
39
+ Status BenchmarkIsolatedNodeOverhead(benchmark::State& state,
40
+ arrow::compute::Expression expr, int32_t num_batches,
41
+ int32_t batch_size,
42
+ arrow::acero::BatchesWithSchema data,
43
+ std::string factory_name,
44
+ arrow::acero::ExecNodeOptions& options,
45
+ arrow::MemoryPool* pool = default_memory_pool());
46
+
47
+ } // namespace acero
48
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/bloom_filter.h ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #if defined(ARROW_HAVE_RUNTIME_AVX2)
21
+ #include <immintrin.h>
22
+ #endif
23
+
24
+ #include <atomic>
25
+ #include <cstdint>
26
+ #include <memory>
27
+
28
+ #include "arrow/acero/partition_util.h"
29
+ #include "arrow/acero/util.h"
30
+ #include "arrow/memory_pool.h"
31
+ #include "arrow/result.h"
32
+ #include "arrow/status.h"
33
+
34
+ namespace arrow {
35
+ namespace acero {
36
+
37
+ // A set of pre-generated bit masks from a 64-bit word.
38
+ //
39
+ // It is used to map selected bits of hash to a bit mask that will be used in
40
+ // a Bloom filter.
41
+ //
42
+ // These bit masks need to look random and need to have a similar fractions of
43
+ // bits set in order for a Bloom filter to have a low false positives rate.
44
+ //
45
+ struct ARROW_ACERO_EXPORT BloomFilterMasks {
46
+ // Generate all masks as a single bit vector. Each bit offset in this bit
47
+ // vector corresponds to a single mask.
48
+ // In each consecutive kBitsPerMask bits, there must be between
49
+ // kMinBitsSet and kMaxBitsSet bits set.
50
+ //
51
+ BloomFilterMasks();
52
+
53
+ inline uint64_t mask(int bit_offset) {
54
+ #if ARROW_LITTLE_ENDIAN
55
+ return (arrow::util::SafeLoadAs<uint64_t>(masks_ + bit_offset / 8) >>
56
+ (bit_offset % 8)) &
57
+ kFullMask;
58
+ #else
59
+ return (BYTESWAP(arrow::util::SafeLoadAs<uint64_t>(masks_ + bit_offset / 8)) >>
60
+ (bit_offset % 8)) &
61
+ kFullMask;
62
+ #endif
63
+ }
64
+
65
+ // Masks are 57 bits long because then they can be accessed at an
66
+ // arbitrary bit offset using a single unaligned 64-bit load instruction.
67
+ //
68
+ static constexpr int kBitsPerMask = 57;
69
+ static constexpr uint64_t kFullMask = (1ULL << kBitsPerMask) - 1;
70
+
71
+ // Minimum and maximum number of bits set in each mask.
72
+ // This constraint is enforced when generating the bit masks.
73
+ // Values should be close to each other and chosen as to minimize a Bloom
74
+ // filter false positives rate.
75
+ //
76
+ static constexpr int kMinBitsSet = 4;
77
+ static constexpr int kMaxBitsSet = 5;
78
+
79
+ // Number of generated masks.
80
+ // Having more masks to choose will improve false positives rate of Bloom
81
+ // filter but will also use more memory, which may lead to more CPU cache
82
+ // misses.
83
+ // The chosen value results in using only a few cache-lines for mask lookups,
84
+ // while providing a good variety of available bit masks.
85
+ //
86
+ static constexpr int kLogNumMasks = 10;
87
+ static constexpr int kNumMasks = 1 << kLogNumMasks;
88
+
89
+ // Data of masks. Masks are stored in a single bit vector. Nth mask is
90
+ // kBitsPerMask bits starting at bit offset N.
91
+ //
92
+ static constexpr int kTotalBytes = (kNumMasks + 64) / 8;
93
+ uint8_t masks_[kTotalBytes];
94
+ };
95
+
96
+ // A variant of a blocked Bloom filter implementation.
97
+ // A Bloom filter is a data structure that provides approximate membership test
98
+ // functionality based only on the hash of the key. Membership test may return
99
+ // false positives but not false negatives. Approximation of the result allows
100
+ // in general case (for arbitrary data types of keys) to save on both memory and
101
+ // lookup cost compared to the accurate membership test.
102
+ // The accurate test may sometimes still be cheaper for a specific data types
103
+ // and inputs, e.g. integers from a small range.
104
+ //
105
+ // This blocked Bloom filter is optimized for use in hash joins, to achieve a
106
+ // good balance between the size of the filter, the cost of its building and
107
+ // querying and the rate of false positives.
108
+ //
109
+ class ARROW_ACERO_EXPORT BlockedBloomFilter {
110
+ friend class BloomFilterBuilder_SingleThreaded;
111
+ friend class BloomFilterBuilder_Parallel;
112
+
113
+ public:
114
+ BlockedBloomFilter() : log_num_blocks_(0), num_blocks_(0), blocks_(NULLPTR) {}
115
+
116
+ inline bool Find(uint64_t hash) const {
117
+ uint64_t m = mask(hash);
118
+ uint64_t b = blocks_[block_id(hash)];
119
+ return (b & m) == m;
120
+ }
121
+
122
+ // Uses SIMD if available for smaller Bloom filters.
123
+ // Uses memory prefetching for larger Bloom filters.
124
+ //
125
+ void Find(int64_t hardware_flags, int64_t num_rows, const uint32_t* hashes,
126
+ uint8_t* result_bit_vector, bool enable_prefetch = true) const;
127
+ void Find(int64_t hardware_flags, int64_t num_rows, const uint64_t* hashes,
128
+ uint8_t* result_bit_vector, bool enable_prefetch = true) const;
129
+
130
+ int log_num_blocks() const { return log_num_blocks_; }
131
+
132
+ int NumHashBitsUsed() const;
133
+
134
+ bool IsSameAs(const BlockedBloomFilter* other) const;
135
+
136
+ int64_t NumBitsSet() const;
137
+
138
+ // Folding of a block Bloom filter after the initial version
139
+ // has been built.
140
+ //
141
+ // One of the parameters for creation of Bloom filter is the number
142
+ // of bits allocated for it. The more bits allocated, the lower the
143
+ // probability of false positives. A good heuristic is to aim for
144
+ // half of the bits set in the constructed Bloom filter. This should
145
+ // result in a good trade off between size (and following cost of
146
+ // memory accesses) and false positives rate.
147
+ //
148
+ // There might have been many duplicate keys in the input provided
149
+ // to Bloom filter builder. In that case the resulting bit vector
150
+ // would be more sparse then originally intended. It is possible to
151
+ // easily correct that and cut in half the size of Bloom filter
152
+ // after it has already been constructed. The process to do that is
153
+ // approximately equal to OR-ing bits from upper and lower half (the
154
+ // way we address these bits when inserting or querying a hash makes
155
+ // such folding in half possible).
156
+ //
157
+ // We will keep folding as long as the fraction of bits set is less
158
+ // than 1/4. The resulting bit vector density should be in the [1/4,
159
+ // 1/2) range.
160
+ //
161
+ void Fold();
162
+
163
+ private:
164
+ Status CreateEmpty(int64_t num_rows_to_insert, MemoryPool* pool);
165
+
166
+ inline void Insert(uint64_t hash) {
167
+ uint64_t m = mask(hash);
168
+ uint64_t& b = blocks_[block_id(hash)];
169
+ b |= m;
170
+ }
171
+
172
+ void Insert(int64_t hardware_flags, int64_t num_rows, const uint32_t* hashes);
173
+ void Insert(int64_t hardware_flags, int64_t num_rows, const uint64_t* hashes);
174
+
175
+ inline uint64_t mask(uint64_t hash) const {
176
+ // The lowest bits of hash are used to pick mask index.
177
+ //
178
+ int mask_id = static_cast<int>(hash & (BloomFilterMasks::kNumMasks - 1));
179
+ uint64_t result = masks_.mask(mask_id);
180
+
181
+ // The next set of hash bits is used to pick the amount of bit
182
+ // rotation of the mask.
183
+ //
184
+ int rotation = (hash >> BloomFilterMasks::kLogNumMasks) & 63;
185
+ result = ROTL64(result, rotation);
186
+
187
+ return result;
188
+ }
189
+
190
+ inline int64_t block_id(uint64_t hash) const {
191
+ // The next set of hash bits following the bits used to select a
192
+ // mask is used to pick block id (index of 64-bit word in a bit
193
+ // vector).
194
+ //
195
+ return (hash >> (BloomFilterMasks::kLogNumMasks + 6)) & (num_blocks_ - 1);
196
+ }
197
+
198
+ template <typename T>
199
+ inline void InsertImp(int64_t num_rows, const T* hashes);
200
+
201
+ template <typename T>
202
+ inline void FindImp(int64_t num_rows, const T* hashes, uint8_t* result_bit_vector,
203
+ bool enable_prefetch) const;
204
+
205
+ void SingleFold(int num_folds);
206
+
207
+ #if defined(ARROW_HAVE_RUNTIME_AVX2)
208
+ inline __m256i mask_avx2(__m256i hash) const;
209
+ inline __m256i block_id_avx2(__m256i hash) const;
210
+ int64_t Insert_avx2(int64_t num_rows, const uint32_t* hashes);
211
+ int64_t Insert_avx2(int64_t num_rows, const uint64_t* hashes);
212
+ template <typename T>
213
+ int64_t InsertImp_avx2(int64_t num_rows, const T* hashes);
214
+ int64_t Find_avx2(int64_t num_rows, const uint32_t* hashes,
215
+ uint8_t* result_bit_vector) const;
216
+ int64_t Find_avx2(int64_t num_rows, const uint64_t* hashes,
217
+ uint8_t* result_bit_vector) const;
218
+ template <typename T>
219
+ int64_t FindImp_avx2(int64_t num_rows, const T* hashes,
220
+ uint8_t* result_bit_vector) const;
221
+ #endif
222
+
223
+ bool UsePrefetch() const {
224
+ return num_blocks_ * sizeof(uint64_t) > kPrefetchLimitBytes;
225
+ }
226
+
227
+ static constexpr int64_t kPrefetchLimitBytes = 256 * 1024;
228
+
229
+ static BloomFilterMasks masks_;
230
+
231
+ // Total number of bits used by block Bloom filter must be a power
232
+ // of 2.
233
+ //
234
+ int log_num_blocks_;
235
+ int64_t num_blocks_;
236
+
237
+ // Buffer allocated to store an array of power of 2 64-bit blocks.
238
+ //
239
+ std::shared_ptr<Buffer> buf_;
240
+ // Pointer to mutable data owned by Buffer
241
+ //
242
+ uint64_t* blocks_;
243
+ };
244
+
245
+ // We have two separate implementations of building a Bloom filter, multi-threaded and
246
+ // single-threaded.
247
+ //
248
+ // Single threaded version is useful in two ways:
249
+ // a) It allows to verify parallel implementation in tests (the single threaded one is
250
+ // simpler and can be used as the source of truth).
251
+ // b) It is preferred for small and medium size Bloom filters, because it skips extra
252
+ // synchronization related steps from parallel variant (partitioning and taking locks).
253
+ //
254
+ enum class BloomFilterBuildStrategy {
255
+ SINGLE_THREADED = 0,
256
+ PARALLEL = 1,
257
+ };
258
+
259
+ class ARROW_ACERO_EXPORT BloomFilterBuilder {
260
+ public:
261
+ virtual ~BloomFilterBuilder() = default;
262
+ virtual Status Begin(size_t num_threads, int64_t hardware_flags, MemoryPool* pool,
263
+ int64_t num_rows, int64_t num_batches,
264
+ BlockedBloomFilter* build_target) = 0;
265
+ virtual int64_t num_tasks() const { return 0; }
266
+ virtual Status PushNextBatch(size_t thread_index, int64_t num_rows,
267
+ const uint32_t* hashes) = 0;
268
+ virtual Status PushNextBatch(size_t thread_index, int64_t num_rows,
269
+ const uint64_t* hashes) = 0;
270
+ virtual void CleanUp() {}
271
+ static std::unique_ptr<BloomFilterBuilder> Make(BloomFilterBuildStrategy strategy);
272
+ };
273
+
274
+ class ARROW_ACERO_EXPORT BloomFilterBuilder_SingleThreaded : public BloomFilterBuilder {
275
+ public:
276
+ Status Begin(size_t num_threads, int64_t hardware_flags, MemoryPool* pool,
277
+ int64_t num_rows, int64_t num_batches,
278
+ BlockedBloomFilter* build_target) override;
279
+
280
+ Status PushNextBatch(size_t /*thread_index*/, int64_t num_rows,
281
+ const uint32_t* hashes) override;
282
+
283
+ Status PushNextBatch(size_t /*thread_index*/, int64_t num_rows,
284
+ const uint64_t* hashes) override;
285
+
286
+ private:
287
+ template <typename T>
288
+ void PushNextBatchImp(int64_t num_rows, const T* hashes);
289
+
290
+ int64_t hardware_flags_;
291
+ BlockedBloomFilter* build_target_;
292
+ };
293
+
294
+ class ARROW_ACERO_EXPORT BloomFilterBuilder_Parallel : public BloomFilterBuilder {
295
+ public:
296
+ Status Begin(size_t num_threads, int64_t hardware_flags, MemoryPool* pool,
297
+ int64_t num_rows, int64_t num_batches,
298
+ BlockedBloomFilter* build_target) override;
299
+
300
+ Status PushNextBatch(size_t thread_id, int64_t num_rows,
301
+ const uint32_t* hashes) override;
302
+
303
+ Status PushNextBatch(size_t thread_id, int64_t num_rows,
304
+ const uint64_t* hashes) override;
305
+
306
+ void CleanUp() override;
307
+
308
+ private:
309
+ template <typename T>
310
+ void PushNextBatchImp(size_t thread_id, int64_t num_rows, const T* hashes);
311
+
312
+ int64_t hardware_flags_;
313
+ BlockedBloomFilter* build_target_;
314
+ int log_num_prtns_;
315
+ struct ThreadLocalState {
316
+ std::vector<uint32_t> partitioned_hashes_32;
317
+ std::vector<uint64_t> partitioned_hashes_64;
318
+ std::vector<uint16_t> partition_ranges;
319
+ std::vector<int> unprocessed_partition_ids;
320
+ };
321
+ std::vector<ThreadLocalState> thread_local_states_;
322
+ PartitionLocks prtn_locks_;
323
+ };
324
+
325
+ } // namespace acero
326
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/exec_plan.h ADDED
@@ -0,0 +1,819 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstddef>
21
+ #include <cstdint>
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <optional>
25
+ #include <string>
26
+ #include <utility>
27
+ #include <vector>
28
+
29
+ #include "arrow/acero/type_fwd.h"
30
+ #include "arrow/acero/visibility.h"
31
+ #include "arrow/compute/api_vector.h"
32
+ #include "arrow/compute/exec.h"
33
+ #include "arrow/compute/ordering.h"
34
+ #include "arrow/type_fwd.h"
35
+ #include "arrow/util/future.h"
36
+ #include "arrow/util/macros.h"
37
+ #include "arrow/util/tracing.h"
38
+ #include "arrow/util/type_fwd.h"
39
+
40
+ namespace arrow {
41
+
42
+ using compute::ExecBatch;
43
+ using compute::ExecContext;
44
+ using compute::FunctionRegistry;
45
+ using compute::GetFunctionRegistry;
46
+ using compute::Ordering;
47
+ using compute::threaded_exec_context;
48
+
49
+ namespace acero {
50
+
51
+ /// \addtogroup acero-internals
52
+ /// @{
53
+
54
+ class ARROW_ACERO_EXPORT ExecPlan : public std::enable_shared_from_this<ExecPlan> {
55
+ public:
56
+ // This allows operators to rely on signed 16-bit indices
57
+ static const uint32_t kMaxBatchSize = 1 << 15;
58
+ using NodeVector = std::vector<ExecNode*>;
59
+
60
+ virtual ~ExecPlan() = default;
61
+
62
+ QueryContext* query_context();
63
+
64
+ /// \brief retrieve the nodes in the plan
65
+ const NodeVector& nodes() const;
66
+
67
+ /// Make an empty exec plan
68
+ static Result<std::shared_ptr<ExecPlan>> Make(
69
+ QueryOptions options, ExecContext exec_context = *threaded_exec_context(),
70
+ std::shared_ptr<const KeyValueMetadata> metadata = NULLPTR);
71
+
72
+ static Result<std::shared_ptr<ExecPlan>> Make(
73
+ ExecContext exec_context = *threaded_exec_context(),
74
+ std::shared_ptr<const KeyValueMetadata> metadata = NULLPTR);
75
+
76
+ static Result<std::shared_ptr<ExecPlan>> Make(
77
+ QueryOptions options, ExecContext* exec_context,
78
+ std::shared_ptr<const KeyValueMetadata> metadata = NULLPTR);
79
+
80
+ static Result<std::shared_ptr<ExecPlan>> Make(
81
+ ExecContext* exec_context,
82
+ std::shared_ptr<const KeyValueMetadata> metadata = NULLPTR);
83
+
84
+ ExecNode* AddNode(std::unique_ptr<ExecNode> node);
85
+
86
+ template <typename Node, typename... Args>
87
+ Node* EmplaceNode(Args&&... args) {
88
+ std::unique_ptr<Node> node{new Node{std::forward<Args>(args)...}};
89
+ auto out = node.get();
90
+ AddNode(std::move(node));
91
+ return out;
92
+ }
93
+
94
+ Status Validate();
95
+
96
+ /// \brief Start producing on all nodes
97
+ ///
98
+ /// Nodes are started in reverse topological order, such that any node
99
+ /// is started before all of its inputs.
100
+ void StartProducing();
101
+
102
+ /// \brief Stop producing on all nodes
103
+ ///
104
+ /// Triggers all sources to stop producing new data. In order to cleanly stop the plan
105
+ /// will continue to run any tasks that are already in progress. The caller should
106
+ /// still wait for `finished` to complete before destroying the plan.
107
+ void StopProducing();
108
+
109
+ /// \brief A future which will be marked finished when all tasks have finished.
110
+ Future<> finished();
111
+
112
+ /// \brief Return whether the plan has non-empty metadata
113
+ bool HasMetadata() const;
114
+
115
+ /// \brief Return the plan's attached metadata
116
+ std::shared_ptr<const KeyValueMetadata> metadata() const;
117
+
118
+ std::string ToString() const;
119
+ };
120
+
121
+ // Acero can be extended by providing custom implementations of ExecNode. The methods
122
+ // below are documented in detail and provide careful instruction on how to fulfill the
123
+ // ExecNode contract. It's suggested you familiarize yourself with the Acero
124
+ // documentation in the C++ user guide.
125
+ class ARROW_ACERO_EXPORT ExecNode {
126
+ public:
127
+ using NodeVector = std::vector<ExecNode*>;
128
+
129
+ virtual ~ExecNode() = default;
130
+
131
+ virtual const char* kind_name() const = 0;
132
+
133
+ // The number of inputs expected by this node
134
+ int num_inputs() const { return static_cast<int>(inputs_.size()); }
135
+
136
+ /// This node's predecessors in the exec plan
137
+ const NodeVector& inputs() const { return inputs_; }
138
+
139
+ /// True if the plan has no output schema (is a sink)
140
+ bool is_sink() const { return !output_schema_; }
141
+
142
+ /// \brief Labels identifying the function of each input.
143
+ const std::vector<std::string>& input_labels() const { return input_labels_; }
144
+
145
+ /// This node's successor in the exec plan
146
+ const ExecNode* output() const { return output_; }
147
+
148
+ /// The datatypes for batches produced by this node
149
+ const std::shared_ptr<Schema>& output_schema() const { return output_schema_; }
150
+
151
+ /// This node's exec plan
152
+ ExecPlan* plan() { return plan_; }
153
+
154
+ /// \brief An optional label, for display and debugging
155
+ ///
156
+ /// There is no guarantee that this value is non-empty or unique.
157
+ const std::string& label() const { return label_; }
158
+ void SetLabel(std::string label) { label_ = std::move(label); }
159
+
160
+ virtual Status Validate() const;
161
+
162
+ /// \brief the ordering of the output batches
163
+ ///
164
+ /// This does not guarantee the batches will be emitted by this node
165
+ /// in order. Instead it guarantees that the batches will have their
166
+ /// ExecBatch::index property set in a way that respects this ordering.
167
+ ///
168
+ /// In other words, given the ordering {{"x", SortOrder::Ascending}} we
169
+ /// know that all values of x in a batch with index N will be less than
170
+ /// or equal to all values of x in a batch with index N+k (assuming k > 0).
171
+ /// Furthermore, we also know that values will be sorted within a batch.
172
+ /// Any row N will have a value of x that is less than the value for
173
+ /// any row N+k.
174
+ ///
175
+ /// Note that an ordering can be both Ordering::Unordered and Ordering::Implicit.
176
+ /// A node's output should be marked Ordering::Unordered if the order is
177
+ /// non-deterministic. For example, a hash-join has no predictable output order.
178
+ ///
179
+ /// If the ordering is Ordering::Implicit then there is a meaningful order but that
180
+ /// ordering is not represented by any column in the data. The most common case for
181
+ /// this is when reading data from an in-memory table. The data has an implicit "row
182
+ /// order" which is not necessarily represented in the data set.
183
+ ///
184
+ /// A filter or project node will not modify the ordering. Nothing needs to be done
185
+ /// other than ensure the index assigned to output batches is the same as the
186
+ /// input batch that was mapped.
187
+ ///
188
+ /// Other nodes may introduce order. For example, an order-by node will emit
189
+ /// a brand new ordering independent of the input ordering.
190
+ ///
191
+ /// Finally, as described above, such as a hash-join or aggregation may may
192
+ /// destroy ordering (although these nodes could also choose to establish a
193
+ /// new ordering based on the hash keys).
194
+ ///
195
+ /// Some nodes will require an ordering. For example, a fetch node or an
196
+ /// asof join node will only function if the input data is ordered (for fetch
197
+ /// it is enough to be implicitly ordered. For an asof join the ordering must
198
+ /// be explicit and compatible with the on key.)
199
+ ///
200
+ /// Nodes that maintain ordering should be careful to avoid introducing gaps
201
+ /// in the batch index. This may require emitting empty batches in order to
202
+ /// maintain continuity.
203
+ virtual const Ordering& ordering() const;
204
+
205
+ /// Upstream API:
206
+ /// These functions are called by input nodes that want to inform this node
207
+ /// about an updated condition (a new input batch or an impending
208
+ /// end of stream).
209
+ ///
210
+ /// Implementation rules:
211
+ /// - these may be called anytime after StartProducing() has succeeded
212
+ /// (and even during or after StopProducing())
213
+ /// - these may be called concurrently
214
+ /// - these are allowed to call back into PauseProducing(), ResumeProducing()
215
+ /// and StopProducing()
216
+
217
+ /// Transfer input batch to ExecNode
218
+ ///
219
+ /// A node will typically perform some kind of operation on the batch
220
+ /// and then call InputReceived on its outputs with the result.
221
+ ///
222
+ /// Other nodes may need to accumulate some number of inputs before any
223
+ /// output can be produced. These nodes will add the batch to some kind
224
+ /// of in-memory accumulation queue and return.
225
+ virtual Status InputReceived(ExecNode* input, ExecBatch batch) = 0;
226
+
227
+ /// Mark the inputs finished after the given number of batches.
228
+ ///
229
+ /// This may be called before all inputs are received. This simply fixes
230
+ /// the total number of incoming batches for an input, so that the ExecNode
231
+ /// knows when it has received all input, regardless of order.
232
+ virtual Status InputFinished(ExecNode* input, int total_batches) = 0;
233
+
234
+ /// \brief Perform any needed initialization
235
+ ///
236
+ /// This hook performs any actions in between creation of ExecPlan and the call to
237
+ /// StartProducing. An example could be Bloom filter pushdown. The order of ExecNodes
238
+ /// that executes this method is undefined, but the calls are made synchronously.
239
+ ///
240
+ /// At this point a node can rely on all inputs & outputs (and the input schemas)
241
+ /// being well defined.
242
+ virtual Status Init();
243
+
244
+ /// Lifecycle API:
245
+ /// - start / stop to initiate and terminate production
246
+ /// - pause / resume to apply backpressure
247
+ ///
248
+ /// Implementation rules:
249
+ /// - StartProducing() should not recurse into the inputs, as it is
250
+ /// handled by ExecPlan::StartProducing()
251
+ /// - PauseProducing(), ResumeProducing(), StopProducing() may be called
252
+ /// concurrently, potentially even before the call to StartProducing
253
+ /// has finished.
254
+ /// - PauseProducing(), ResumeProducing(), StopProducing() may be called
255
+ /// by the downstream nodes' InputReceived(), InputFinished() methods
256
+ ///
257
+ /// StopProducing may be called due to an error, by the user (e.g. cancel), or
258
+ /// because a node has all the data it needs (e.g. limit, top-k on sorted data).
259
+ /// This means the method may be called multiple times and we have the following
260
+ /// additional rules
261
+ /// - StopProducing() must be idempotent
262
+ /// - StopProducing() must be forwarded to inputs (this is needed for the limit/top-k
263
+ /// case because we may not be stopping the entire plan)
264
+
265
+ // Right now, since synchronous calls happen in both directions (input to
266
+ // output and then output to input), a node must be careful to be reentrant
267
+ // against synchronous calls from its output, *and* also concurrent calls from
268
+ // other threads. The most reliable solution is to update the internal state
269
+ // first, and notify outputs only at the end.
270
+ //
271
+ // Concurrent calls to PauseProducing and ResumeProducing can be hard to sequence
272
+ // as they may travel at different speeds through the plan.
273
+ //
274
+ // For example, consider a resume that comes quickly after a pause. If the source
275
+ // receives the resume before the pause the source may think the destination is full
276
+ // and halt production which would lead to deadlock.
277
+ //
278
+ // To resolve this a counter is sent for all calls to pause/resume. Only the call with
279
+ // the highest counter value is valid. So if a call to PauseProducing(5) comes after
280
+ // a call to ResumeProducing(6) then the source should continue producing.
281
+
282
+ /// \brief Start producing
283
+ ///
284
+ /// This must only be called once.
285
+ ///
286
+ /// This is typically called automatically by ExecPlan::StartProducing().
287
+ virtual Status StartProducing() = 0;
288
+
289
+ /// \brief Pause producing temporarily
290
+ ///
291
+ /// \param output Pointer to the output that is full
292
+ /// \param counter Counter used to sequence calls to pause/resume
293
+ ///
294
+ /// This call is a hint that an output node is currently not willing
295
+ /// to receive data.
296
+ ///
297
+ /// This may be called any number of times.
298
+ /// However, the node is still free to produce data (which may be difficult
299
+ /// to prevent anyway if data is produced using multiple threads).
300
+ virtual void PauseProducing(ExecNode* output, int32_t counter) = 0;
301
+
302
+ /// \brief Resume producing after a temporary pause
303
+ ///
304
+ /// \param output Pointer to the output that is now free
305
+ /// \param counter Counter used to sequence calls to pause/resume
306
+ ///
307
+ /// This call is a hint that an output node is willing to receive data again.
308
+ ///
309
+ /// This may be called any number of times.
310
+ virtual void ResumeProducing(ExecNode* output, int32_t counter) = 0;
311
+
312
+ /// \brief Stop producing new data
313
+ ///
314
+ /// If this node is a source then the source should stop generating data
315
+ /// as quickly as possible. If this node is not a source then there is typically
316
+ /// nothing that needs to be done although a node may choose to start ignoring incoming
317
+ /// data.
318
+ ///
319
+ /// This method will be called when an error occurs in the plan
320
+ /// This method may also be called by the user if they wish to end a plan early
321
+ /// Finally, this method may be called if a node determines it no longer needs any more
322
+ /// input (for example, a limit node).
323
+ ///
324
+ /// This method may be called multiple times.
325
+ ///
326
+ /// This is not a pause. There will be no way to start the source again after this has
327
+ /// been called.
328
+ virtual Status StopProducing();
329
+
330
+ std::string ToString(int indent = 0) const;
331
+
332
+ protected:
333
+ ExecNode(ExecPlan* plan, NodeVector inputs, std::vector<std::string> input_labels,
334
+ std::shared_ptr<Schema> output_schema);
335
+
336
+ virtual Status StopProducingImpl() = 0;
337
+
338
+ /// Provide extra info to include in the string representation.
339
+ virtual std::string ToStringExtra(int indent = 0) const;
340
+
341
+ std::atomic<bool> stopped_;
342
+ ExecPlan* plan_;
343
+ std::string label_;
344
+
345
+ NodeVector inputs_;
346
+ std::vector<std::string> input_labels_;
347
+
348
+ std::shared_ptr<Schema> output_schema_;
349
+ ExecNode* output_ = NULLPTR;
350
+ };
351
+
352
+ /// \brief An extensible registry for factories of ExecNodes
353
+ class ARROW_ACERO_EXPORT ExecFactoryRegistry {
354
+ public:
355
+ using Factory = std::function<Result<ExecNode*>(ExecPlan*, std::vector<ExecNode*>,
356
+ const ExecNodeOptions&)>;
357
+
358
+ virtual ~ExecFactoryRegistry() = default;
359
+
360
+ /// \brief Get the named factory from this registry
361
+ ///
362
+ /// will raise if factory_name is not found
363
+ virtual Result<Factory> GetFactory(const std::string& factory_name) = 0;
364
+
365
+ /// \brief Add a factory to this registry with the provided name
366
+ ///
367
+ /// will raise if factory_name is already in the registry
368
+ virtual Status AddFactory(std::string factory_name, Factory factory) = 0;
369
+ };
370
+
371
+ /// The default registry, which includes built-in factories.
372
+ ARROW_ACERO_EXPORT
373
+ ExecFactoryRegistry* default_exec_factory_registry();
374
+
375
+ /// \brief Construct an ExecNode using the named factory
376
+ inline Result<ExecNode*> MakeExecNode(
377
+ const std::string& factory_name, ExecPlan* plan, std::vector<ExecNode*> inputs,
378
+ const ExecNodeOptions& options,
379
+ ExecFactoryRegistry* registry = default_exec_factory_registry()) {
380
+ ARROW_ASSIGN_OR_RAISE(auto factory, registry->GetFactory(factory_name));
381
+ return factory(plan, std::move(inputs), options);
382
+ }
383
+
384
+ /// @}
385
+
386
+ /// \addtogroup acero-api
387
+ /// @{
388
+
389
+ /// \brief Helper class for declaring execution nodes
390
+ ///
391
+ /// A Declaration represents an unconstructed ExecNode (and potentially an entire graph
392
+ /// since its inputs may also be Declarations)
393
+ ///
394
+ /// A Declaration can be converted to a plan and executed using one of the
395
+ /// DeclarationToXyz methods.
396
+ ///
397
+ /// For more direct control, a Declaration can be added to an existing execution
398
+ /// plan with Declaration::AddToPlan, which will recursively construct any inputs as
399
+ /// necessary.
400
+ struct ARROW_ACERO_EXPORT Declaration {
401
+ using Input = std::variant<ExecNode*, Declaration>;
402
+
403
+ Declaration() {}
404
+
405
+ /// \brief construct a declaration
406
+ /// \param factory_name the name of the exec node to construct. The node must have
407
+ /// been added to the exec node registry with this name.
408
+ /// \param inputs the inputs to the node, these should be other declarations
409
+ /// \param options options that control the behavior of the node. You must use
410
+ /// the appropriate subclass. For example, if `factory_name` is
411
+ /// "project" then `options` should be ProjectNodeOptions.
412
+ /// \param label a label to give the node. Can be used to distinguish it from other
413
+ /// nodes of the same type in the plan.
414
+ Declaration(std::string factory_name, std::vector<Input> inputs,
415
+ std::shared_ptr<ExecNodeOptions> options, std::string label)
416
+ : factory_name{std::move(factory_name)},
417
+ inputs{std::move(inputs)},
418
+ options{std::move(options)},
419
+ label{std::move(label)} {}
420
+
421
+ template <typename Options>
422
+ Declaration(std::string factory_name, std::vector<Input> inputs, Options options,
423
+ std::string label)
424
+ : Declaration{std::move(factory_name), std::move(inputs),
425
+ std::shared_ptr<ExecNodeOptions>(
426
+ std::make_shared<Options>(std::move(options))),
427
+ std::move(label)} {}
428
+
429
+ template <typename Options>
430
+ Declaration(std::string factory_name, std::vector<Input> inputs, Options options)
431
+ : Declaration{std::move(factory_name), std::move(inputs), std::move(options),
432
+ /*label=*/""} {}
433
+
434
+ template <typename Options>
435
+ Declaration(std::string factory_name, Options options)
436
+ : Declaration{std::move(factory_name), {}, std::move(options), /*label=*/""} {}
437
+
438
+ template <typename Options>
439
+ Declaration(std::string factory_name, Options options, std::string label)
440
+ : Declaration{std::move(factory_name), {}, std::move(options), std::move(label)} {}
441
+
442
+ /// \brief Convenience factory for the common case of a simple sequence of nodes.
443
+ ///
444
+ /// Each of decls will be appended to the inputs of the subsequent declaration,
445
+ /// and the final modified declaration will be returned.
446
+ ///
447
+ /// Without this convenience factory, constructing a sequence would require explicit,
448
+ /// difficult-to-read nesting:
449
+ ///
450
+ /// Declaration{"n3",
451
+ /// {
452
+ /// Declaration{"n2",
453
+ /// {
454
+ /// Declaration{"n1",
455
+ /// {
456
+ /// Declaration{"n0", N0Opts{}},
457
+ /// },
458
+ /// N1Opts{}},
459
+ /// },
460
+ /// N2Opts{}},
461
+ /// },
462
+ /// N3Opts{}};
463
+ ///
464
+ /// An equivalent Declaration can be constructed more tersely using Sequence:
465
+ ///
466
+ /// Declaration::Sequence({
467
+ /// {"n0", N0Opts{}},
468
+ /// {"n1", N1Opts{}},
469
+ /// {"n2", N2Opts{}},
470
+ /// {"n3", N3Opts{}},
471
+ /// });
472
+ static Declaration Sequence(std::vector<Declaration> decls);
473
+
474
+ /// \brief add the declaration to an already created execution plan
475
+ /// \param plan the plan to add the node to
476
+ /// \param registry the registry to use to lookup the node factory
477
+ ///
478
+ /// This method will recursively call AddToPlan on all of the declaration's inputs.
479
+ /// This method is only for advanced use when the DeclarationToXyz methods are not
480
+ /// sufficient.
481
+ ///
482
+ /// \return the instantiated execution node
483
+ Result<ExecNode*> AddToPlan(ExecPlan* plan, ExecFactoryRegistry* registry =
484
+ default_exec_factory_registry()) const;
485
+
486
+ // Validate a declaration
487
+ bool IsValid(ExecFactoryRegistry* registry = default_exec_factory_registry()) const;
488
+
489
+ /// \brief the name of the factory to use when creating a node
490
+ std::string factory_name;
491
+ /// \brief the declarations's inputs
492
+ std::vector<Input> inputs;
493
+ /// \brief options to control the behavior of the node
494
+ std::shared_ptr<ExecNodeOptions> options;
495
+ /// \brief a label to give the node in the plan
496
+ std::string label;
497
+ };
498
+
499
+ /// \brief How to handle unaligned buffers
500
+ enum class UnalignedBufferHandling { kWarn, kIgnore, kReallocate, kError };
501
+
502
+ /// \brief get the default behavior of unaligned buffer handling
503
+ ///
504
+ /// This is configurable via the ACERO_ALIGNMENT_HANDLING environment variable which
505
+ /// can be set to "warn", "ignore", "reallocate", or "error". If the environment
506
+ /// variable is not set, or is set to an invalid value, this will return kWarn
507
+ UnalignedBufferHandling GetDefaultUnalignedBufferHandling();
508
+
509
+ /// \brief plan-wide options that can be specified when executing an execution plan
510
+ struct ARROW_ACERO_EXPORT QueryOptions {
511
+ /// \brief Should the plan use a legacy batching strategy
512
+ ///
513
+ /// This is currently in place only to support the Scanner::ToTable
514
+ /// method. This method relies on batch indices from the scanner
515
+ /// remaining consistent. This is impractical in the ExecPlan which
516
+ /// might slice batches as needed (e.g. for a join)
517
+ ///
518
+ /// However, it still works for simple plans and this is the only way
519
+ /// we have at the moment for maintaining implicit order.
520
+ bool use_legacy_batching = false;
521
+
522
+ /// If the output has a meaningful order then sequence the output of the plan
523
+ ///
524
+ /// The default behavior (std::nullopt) will sequence output batches if there
525
+ /// is a meaningful ordering in the final node and will emit batches immediately
526
+ /// otherwise.
527
+ ///
528
+ /// If explicitly set to true then plan execution will fail if there is no
529
+ /// meaningful ordering. This can be useful to validate a query that should
530
+ /// be emitting ordered results.
531
+ ///
532
+ /// If explicitly set to false then batches will be emit immediately even if there
533
+ /// is a meaningful ordering. This could cause batches to be emit out of order but
534
+ /// may offer a small decrease to latency.
535
+ std::optional<bool> sequence_output = std::nullopt;
536
+
537
+ /// \brief should the plan use multiple background threads for CPU-intensive work
538
+ ///
539
+ /// If this is false then all CPU work will be done on the calling thread. I/O tasks
540
+ /// will still happen on the I/O executor and may be multi-threaded (but should not use
541
+ /// significant CPU resources).
542
+ ///
543
+ /// Will be ignored if custom_cpu_executor is set
544
+ bool use_threads = true;
545
+
546
+ /// \brief custom executor to use for CPU-intensive work
547
+ ///
548
+ /// Must be null or remain valid for the duration of the plan. If this is null then
549
+ /// a default thread pool will be chosen whose behavior will be controlled by
550
+ /// the `use_threads` option.
551
+ ::arrow::internal::Executor* custom_cpu_executor = NULLPTR;
552
+
553
+ /// \brief custom executor to use for IO work
554
+ ///
555
+ /// Must be null or remain valid for the duration of the plan. If this is null then
556
+ /// the global io thread pool will be chosen whose behavior will be controlled by
557
+ /// the "ARROW_IO_THREADS" environment.
558
+ ::arrow::internal::Executor* custom_io_executor = NULLPTR;
559
+
560
+ /// \brief a memory pool to use for allocations
561
+ ///
562
+ /// Must remain valid for the duration of the plan.
563
+ MemoryPool* memory_pool = default_memory_pool();
564
+
565
+ /// \brief a function registry to use for the plan
566
+ ///
567
+ /// Must remain valid for the duration of the plan.
568
+ FunctionRegistry* function_registry = GetFunctionRegistry();
569
+ /// \brief the names of the output columns
570
+ ///
571
+ /// If this is empty then names will be generated based on the input columns
572
+ ///
573
+ /// If set then the number of names must equal the number of output columns
574
+ std::vector<std::string> field_names;
575
+
576
+ /// \brief Policy for unaligned buffers in source data
577
+ ///
578
+ /// Various compute functions and acero internals will type pun array
579
+ /// buffers from uint8_t* to some kind of value type (e.g. we might
580
+ /// cast to int32_t* to add two int32 arrays)
581
+ ///
582
+ /// If the buffer is poorly aligned (e.g. an int32 array is not aligned
583
+ /// on a 4-byte boundary) then this is technically undefined behavior in C++.
584
+ /// However, most modern compilers and CPUs are fairly tolerant of this
585
+ /// behavior and nothing bad (beyond a small hit to performance) is likely
586
+ /// to happen.
587
+ ///
588
+ /// Note that this only applies to source buffers. All buffers allocated internally
589
+ /// by Acero will be suitably aligned.
590
+ ///
591
+ /// If this field is set to kWarn then Acero will check if any buffers are unaligned
592
+ /// and, if they are, will emit a warning.
593
+ ///
594
+ /// If this field is set to kReallocate then Acero will allocate a new, suitably aligned
595
+ /// buffer and copy the contents from the old buffer into this new buffer.
596
+ ///
597
+ /// If this field is set to kError then Acero will gracefully abort the plan instead.
598
+ ///
599
+ /// If this field is set to kIgnore then Acero will not even check if the buffers are
600
+ /// unaligned.
601
+ ///
602
+ /// If this field is not set then it will be treated as kWarn unless overridden
603
+ /// by the ACERO_ALIGNMENT_HANDLING environment variable
604
+ std::optional<UnalignedBufferHandling> unaligned_buffer_handling;
605
+ };
606
+
607
+ /// \brief Calculate the output schema of a declaration
608
+ ///
609
+ /// This does not actually execute the plan. This operation may fail if the
610
+ /// declaration represents an invalid plan (e.g. a project node with multiple inputs)
611
+ ///
612
+ /// \param declaration A declaration describing an execution plan
613
+ /// \param function_registry The function registry to use for function execution. If null
614
+ /// then the default function registry will be used.
615
+ ///
616
+ /// \return the schema that batches would have after going through the execution plan
617
+ ARROW_ACERO_EXPORT Result<std::shared_ptr<Schema>> DeclarationToSchema(
618
+ const Declaration& declaration, FunctionRegistry* function_registry = NULLPTR);
619
+
620
+ /// \brief Create a string representation of a plan
621
+ ///
622
+ /// This representation is for debug purposes only.
623
+ ///
624
+ /// Conversion to a string may fail if the declaration represents an
625
+ /// invalid plan.
626
+ ///
627
+ /// Use Substrait for complete serialization of plans
628
+ ///
629
+ /// \param declaration A declaration describing an execution plan
630
+ /// \param function_registry The function registry to use for function execution. If null
631
+ /// then the default function registry will be used.
632
+ ///
633
+ /// \return a string representation of the plan suitable for debugging output
634
+ ARROW_ACERO_EXPORT Result<std::string> DeclarationToString(
635
+ const Declaration& declaration, FunctionRegistry* function_registry = NULLPTR);
636
+
637
+ /// \brief Utility method to run a declaration and collect the results into a table
638
+ ///
639
+ /// \param declaration A declaration describing the plan to run
640
+ /// \param use_threads If `use_threads` is false then all CPU work will be done on the
641
+ /// calling thread. I/O tasks will still happen on the I/O executor
642
+ /// and may be multi-threaded (but should not use significant CPU
643
+ /// resources).
644
+ /// \param memory_pool The memory pool to use for allocations made while running the plan.
645
+ /// \param function_registry The function registry to use for function execution. If null
646
+ /// then the default function registry will be used.
647
+ ///
648
+ /// This method will add a sink node to the declaration to collect results into a
649
+ /// table. It will then create an ExecPlan from the declaration, start the exec plan,
650
+ /// block until the plan has finished, and return the created table.
651
+ ARROW_ACERO_EXPORT Result<std::shared_ptr<Table>> DeclarationToTable(
652
+ Declaration declaration, bool use_threads = true,
653
+ MemoryPool* memory_pool = default_memory_pool(),
654
+ FunctionRegistry* function_registry = NULLPTR);
655
+
656
+ ARROW_ACERO_EXPORT Result<std::shared_ptr<Table>> DeclarationToTable(
657
+ Declaration declaration, QueryOptions query_options);
658
+
659
+ /// \brief Asynchronous version of \see DeclarationToTable
660
+ ///
661
+ /// \param declaration A declaration describing the plan to run
662
+ /// \param use_threads The behavior of use_threads is slightly different than the
663
+ /// synchronous version since we cannot run synchronously on the
664
+ /// calling thread. Instead, if use_threads=false then a new thread
665
+ /// pool will be created with a single thread and this will be used for
666
+ /// all compute work.
667
+ /// \param memory_pool The memory pool to use for allocations made while running the plan.
668
+ /// \param function_registry The function registry to use for function execution. If null
669
+ /// then the default function registry will be used.
670
+ ARROW_ACERO_EXPORT Future<std::shared_ptr<Table>> DeclarationToTableAsync(
671
+ Declaration declaration, bool use_threads = true,
672
+ MemoryPool* memory_pool = default_memory_pool(),
673
+ FunctionRegistry* function_registry = NULLPTR);
674
+
675
+ /// \brief Overload of \see DeclarationToTableAsync accepting a custom exec context
676
+ ///
677
+ /// The executor must be specified (cannot be null) and must be kept alive until the
678
+ /// returned future finishes.
679
+ ARROW_ACERO_EXPORT Future<std::shared_ptr<Table>> DeclarationToTableAsync(
680
+ Declaration declaration, ExecContext custom_exec_context);
681
+
682
+ /// \brief a collection of exec batches with a common schema
683
+ struct BatchesWithCommonSchema {
684
+ std::vector<ExecBatch> batches;
685
+ std::shared_ptr<Schema> schema;
686
+ };
687
+
688
+ /// \brief Utility method to run a declaration and collect the results into ExecBatch
689
+ /// vector
690
+ ///
691
+ /// \see DeclarationToTable for details on threading & execution
692
+ ARROW_ACERO_EXPORT Result<BatchesWithCommonSchema> DeclarationToExecBatches(
693
+ Declaration declaration, bool use_threads = true,
694
+ MemoryPool* memory_pool = default_memory_pool(),
695
+ FunctionRegistry* function_registry = NULLPTR);
696
+
697
+ ARROW_ACERO_EXPORT Result<BatchesWithCommonSchema> DeclarationToExecBatches(
698
+ Declaration declaration, QueryOptions query_options);
699
+
700
+ /// \brief Asynchronous version of \see DeclarationToExecBatches
701
+ ///
702
+ /// \see DeclarationToTableAsync for details on threading & execution
703
+ ARROW_ACERO_EXPORT Future<BatchesWithCommonSchema> DeclarationToExecBatchesAsync(
704
+ Declaration declaration, bool use_threads = true,
705
+ MemoryPool* memory_pool = default_memory_pool(),
706
+ FunctionRegistry* function_registry = NULLPTR);
707
+
708
+ /// \brief Overload of \see DeclarationToExecBatchesAsync accepting a custom exec context
709
+ ///
710
+ /// \see DeclarationToTableAsync for details on threading & execution
711
+ ARROW_ACERO_EXPORT Future<BatchesWithCommonSchema> DeclarationToExecBatchesAsync(
712
+ Declaration declaration, ExecContext custom_exec_context);
713
+
714
+ /// \brief Utility method to run a declaration and collect the results into a vector
715
+ ///
716
+ /// \see DeclarationToTable for details on threading & execution
717
+ ARROW_ACERO_EXPORT Result<std::vector<std::shared_ptr<RecordBatch>>> DeclarationToBatches(
718
+ Declaration declaration, bool use_threads = true,
719
+ MemoryPool* memory_pool = default_memory_pool(),
720
+ FunctionRegistry* function_registry = NULLPTR);
721
+
722
+ ARROW_ACERO_EXPORT Result<std::vector<std::shared_ptr<RecordBatch>>> DeclarationToBatches(
723
+ Declaration declaration, QueryOptions query_options);
724
+
725
+ /// \brief Asynchronous version of \see DeclarationToBatches
726
+ ///
727
+ /// \see DeclarationToTableAsync for details on threading & execution
728
+ ARROW_ACERO_EXPORT Future<std::vector<std::shared_ptr<RecordBatch>>>
729
+ DeclarationToBatchesAsync(Declaration declaration, bool use_threads = true,
730
+ MemoryPool* memory_pool = default_memory_pool(),
731
+ FunctionRegistry* function_registry = NULLPTR);
732
+
733
+ /// \brief Overload of \see DeclarationToBatchesAsync accepting a custom exec context
734
+ ///
735
+ /// \see DeclarationToTableAsync for details on threading & execution
736
+ ARROW_ACERO_EXPORT Future<std::vector<std::shared_ptr<RecordBatch>>>
737
+ DeclarationToBatchesAsync(Declaration declaration, ExecContext exec_context);
738
+
739
+ /// \brief Utility method to run a declaration and return results as a RecordBatchReader
740
+ ///
741
+ /// If an exec context is not provided then a default exec context will be used based
742
+ /// on the value of `use_threads`. If `use_threads` is false then the CPU executor will
743
+ /// be a serial executor and all CPU work will be done on the calling thread. I/O tasks
744
+ /// will still happen on the I/O executor and may be multi-threaded.
745
+ ///
746
+ /// If `use_threads` is false then all CPU work will happen during the calls to
747
+ /// RecordBatchReader::Next and no CPU work will happen in the background. If
748
+ /// `use_threads` is true then CPU work will happen on the CPU thread pool and tasks may
749
+ /// run in between calls to RecordBatchReader::Next. If the returned reader is not
750
+ /// consumed quickly enough then the plan will eventually pause as the backpressure queue
751
+ /// fills up.
752
+ ///
753
+ /// If a custom exec context is provided then the value of `use_threads` will be ignored.
754
+ ///
755
+ /// The returned RecordBatchReader can be closed early to cancel the computation of record
756
+ /// batches. In this case, only errors encountered by the computation may be reported. In
757
+ /// particular, no cancellation error may be reported.
758
+ ARROW_ACERO_EXPORT Result<std::unique_ptr<RecordBatchReader>> DeclarationToReader(
759
+ Declaration declaration, bool use_threads = true,
760
+ MemoryPool* memory_pool = default_memory_pool(),
761
+ FunctionRegistry* function_registry = NULLPTR);
762
+
763
+ ARROW_ACERO_EXPORT Result<std::unique_ptr<RecordBatchReader>> DeclarationToReader(
764
+ Declaration declaration, QueryOptions query_options);
765
+
766
+ /// \brief Utility method to run a declaration and ignore results
767
+ ///
768
+ /// This can be useful when the data are consumed as part of the plan itself, for
769
+ /// example, when the plan ends with a write node.
770
+ ///
771
+ /// \see DeclarationToTable for details on threading & execution
772
+ ARROW_ACERO_EXPORT Status
773
+ DeclarationToStatus(Declaration declaration, bool use_threads = true,
774
+ MemoryPool* memory_pool = default_memory_pool(),
775
+ FunctionRegistry* function_registry = NULLPTR);
776
+
777
+ ARROW_ACERO_EXPORT Status DeclarationToStatus(Declaration declaration,
778
+ QueryOptions query_options);
779
+
780
+ /// \brief Asynchronous version of \see DeclarationToStatus
781
+ ///
782
+ /// This can be useful when the data are consumed as part of the plan itself, for
783
+ /// example, when the plan ends with a write node.
784
+ ///
785
+ /// \see DeclarationToTableAsync for details on threading & execution
786
+ ARROW_ACERO_EXPORT Future<> DeclarationToStatusAsync(
787
+ Declaration declaration, bool use_threads = true,
788
+ MemoryPool* memory_pool = default_memory_pool(),
789
+ FunctionRegistry* function_registry = NULLPTR);
790
+
791
+ /// \brief Overload of \see DeclarationToStatusAsync accepting a custom exec context
792
+ ///
793
+ /// \see DeclarationToTableAsync for details on threading & execution
794
+ ARROW_ACERO_EXPORT Future<> DeclarationToStatusAsync(Declaration declaration,
795
+ ExecContext exec_context);
796
+
797
+ /// @}
798
+
799
+ /// \brief Wrap an ExecBatch generator in a RecordBatchReader.
800
+ ///
801
+ /// The RecordBatchReader does not impose any ordering on emitted batches.
802
+ ARROW_ACERO_EXPORT
803
+ std::shared_ptr<RecordBatchReader> MakeGeneratorReader(
804
+ std::shared_ptr<Schema>, std::function<Future<std::optional<ExecBatch>>()>,
805
+ MemoryPool*);
806
+
807
+ constexpr int kDefaultBackgroundMaxQ = 32;
808
+ constexpr int kDefaultBackgroundQRestart = 16;
809
+
810
+ /// \brief Make a generator of RecordBatchReaders
811
+ ///
812
+ /// Useful as a source node for an Exec plan
813
+ ARROW_ACERO_EXPORT
814
+ Result<std::function<Future<std::optional<ExecBatch>>()>> MakeReaderGenerator(
815
+ std::shared_ptr<RecordBatchReader> reader, arrow::internal::Executor* io_executor,
816
+ int max_q = kDefaultBackgroundMaxQ, int q_restart = kDefaultBackgroundQRestart);
817
+
818
+ } // namespace acero
819
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+ #include <memory>
22
+ #include <vector>
23
+
24
+ #include "arrow/acero/accumulation_queue.h"
25
+ #include "arrow/acero/bloom_filter.h"
26
+ #include "arrow/acero/options.h"
27
+ #include "arrow/acero/query_context.h"
28
+ #include "arrow/acero/schema_util.h"
29
+ #include "arrow/acero/task_util.h"
30
+ #include "arrow/result.h"
31
+ #include "arrow/status.h"
32
+ #include "arrow/type.h"
33
+ #include "arrow/util/tracing.h"
34
+
35
+ namespace arrow {
36
+ namespace acero {
37
+
38
+ using util::AccumulationQueue;
39
+
40
+ class HashJoinImpl {
41
+ public:
42
+ using OutputBatchCallback = std::function<Status(int64_t, ExecBatch)>;
43
+ using BuildFinishedCallback = std::function<Status(size_t)>;
44
+ using FinishedCallback = std::function<Status(int64_t)>;
45
+ using RegisterTaskGroupCallback = std::function<int(
46
+ std::function<Status(size_t, int64_t)>, std::function<Status(size_t)>)>;
47
+ using StartTaskGroupCallback = std::function<Status(int, int64_t)>;
48
+ using AbortContinuationImpl = std::function<void()>;
49
+
50
+ virtual ~HashJoinImpl() = default;
51
+ virtual Status Init(QueryContext* ctx, JoinType join_type, size_t num_threads,
52
+ const HashJoinProjectionMaps* proj_map_left,
53
+ const HashJoinProjectionMaps* proj_map_right,
54
+ std::vector<JoinKeyCmp> key_cmp, Expression filter,
55
+ RegisterTaskGroupCallback register_task_group_callback,
56
+ StartTaskGroupCallback start_task_group_callback,
57
+ OutputBatchCallback output_batch_callback,
58
+ FinishedCallback finished_callback) = 0;
59
+
60
+ virtual Status BuildHashTable(size_t thread_index, AccumulationQueue batches,
61
+ BuildFinishedCallback on_finished) = 0;
62
+ virtual Status ProbeSingleBatch(size_t thread_index, ExecBatch batch) = 0;
63
+ virtual Status ProbingFinished(size_t thread_index) = 0;
64
+ virtual void Abort(TaskScheduler::AbortContinuationImpl pos_abort_callback) = 0;
65
+ virtual std::string ToString() const = 0;
66
+
67
+ static Result<std::unique_ptr<HashJoinImpl>> MakeBasic();
68
+ static Result<std::unique_ptr<HashJoinImpl>> MakeSwiss();
69
+
70
+ protected:
71
+ arrow::util::tracing::Span span_;
72
+ };
73
+
74
+ } // namespace acero
75
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_dict.h ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <unordered_map>
22
+
23
+ #include "arrow/acero/schema_util.h"
24
+ #include "arrow/compute/exec.h"
25
+ #include "arrow/compute/kernels/row_encoder_internal.h"
26
+ #include "arrow/result.h"
27
+ #include "arrow/status.h"
28
+ #include "arrow/type.h"
29
+
30
+ // This file contains hash join logic related to handling of dictionary encoded key
31
+ // columns.
32
+ //
33
+ // A key column from probe side of the join can be matched against a key column from build
34
+ // side of the join, as long as the underlying value types are equal. That means that:
35
+ // - both scalars and arrays can be used and even mixed in the same column
36
+ // - dictionary column can be matched against non-dictionary column if underlying value
37
+ // types are equal
38
+ // - dictionary column can be matched against dictionary column with a different index
39
+ // type, and potentially using a different dictionary, if underlying value types are equal
40
+ //
41
+ // We currently require in hash join that for all dictionary encoded columns, the same
42
+ // dictionary is used in all input exec batches.
43
+ //
44
+ // In order to allow matching columns with different dictionaries, different dictionary
45
+ // index types, and dictionary key against non-dictionary key, internally comparisons will
46
+ // be evaluated after remapping values on both sides of the join to a common
47
+ // representation (which will be called "unified representation"). This common
48
+ // representation is a column of int32() type (not a dictionary column). It represents an
49
+ // index in the unified dictionary computed for the (only) dictionary present on build
50
+ // side (an empty dictionary is still created for an empty build side). Null value is
51
+ // always represented in this common representation as null int32 value, unified
52
+ // dictionary will never contain a null value (so there is no ambiguity of representing
53
+ // nulls as either index to a null entry in the dictionary or null index).
54
+ //
55
+ // Unified dictionary represents values present on build side. There may be values on
56
+ // probe side that are not present in it. All such values, that are not null, are mapped
57
+ // in the common representation to a special constant kMissingValueId.
58
+ //
59
+
60
+ namespace arrow {
61
+
62
+ using compute::ExecBatch;
63
+ using compute::ExecContext;
64
+ using compute::internal::RowEncoder;
65
+
66
+ namespace acero {
67
+
68
+ /// Helper class with operations that are stateless and common to processing of dictionary
69
+ /// keys on both build and probe side.
70
+ class HashJoinDictUtil {
71
+ public:
72
+ // Null values in unified representation are always represented as null that has
73
+ // corresponding integer set to this constant
74
+ static constexpr int32_t kNullId = 0;
75
+ // Constant representing a value, that is not null, missing on the build side, in
76
+ // unified representation.
77
+ static constexpr int32_t kMissingValueId = -1;
78
+
79
+ // Check if data types of corresponding pair of key column on build and probe side are
80
+ // compatible
81
+ static bool KeyDataTypesValid(const std::shared_ptr<DataType>& probe_data_type,
82
+ const std::shared_ptr<DataType>& build_data_type);
83
+
84
+ // Input must be dictionary array or dictionary scalar.
85
+ // A precomputed and provided here lookup table in the form of int32() array will be
86
+ // used to remap input indices to unified representation.
87
+ //
88
+ static Result<std::shared_ptr<ArrayData>> IndexRemapUsingLUT(
89
+ ExecContext* ctx, const Datum& indices, int64_t batch_length,
90
+ const std::shared_ptr<ArrayData>& map_array,
91
+ const std::shared_ptr<DataType>& data_type);
92
+
93
+ // Return int32() array that contains indices of input dictionary array or scalar after
94
+ // type casting.
95
+ static Result<std::shared_ptr<ArrayData>> ConvertToInt32(
96
+ const std::shared_ptr<DataType>& from_type, const Datum& input,
97
+ int64_t batch_length, ExecContext* ctx);
98
+
99
+ // Return an array that contains elements of input int32() array after casting to a
100
+ // given integer type. This is used for mapping unified representation stored in the
101
+ // hash table on build side back to original input data type of hash join, when
102
+ // outputting hash join results to parent exec node.
103
+ //
104
+ static Result<std::shared_ptr<ArrayData>> ConvertFromInt32(
105
+ const std::shared_ptr<DataType>& to_type, const Datum& input, int64_t batch_length,
106
+ ExecContext* ctx);
107
+
108
+ // Return dictionary referenced in either dictionary array or dictionary scalar
109
+ static std::shared_ptr<Array> ExtractDictionary(const Datum& data);
110
+ };
111
+
112
+ /// Implements processing of dictionary arrays/scalars in key columns on the build side of
113
+ /// a hash join.
114
+ /// Each instance of this class corresponds to a single column and stores and
115
+ /// processes only the information related to that column.
116
+ /// Const methods are thread-safe, non-const methods are not (the caller must make sure
117
+ /// that only one thread at any time will access them).
118
+ ///
119
+ class HashJoinDictBuild {
120
+ public:
121
+ // Returns true if the key column (described in input by its data type) requires any
122
+ // pre- or post-processing related to handling dictionaries.
123
+ //
124
+ static bool KeyNeedsProcessing(const std::shared_ptr<DataType>& build_data_type) {
125
+ return (build_data_type->id() == Type::DICTIONARY);
126
+ }
127
+
128
+ // Data type of unified representation
129
+ static std::shared_ptr<DataType> DataTypeAfterRemapping() { return int32(); }
130
+
131
+ // Should be called only once in hash join, before processing any build or probe
132
+ // batches.
133
+ //
134
+ // Takes a pointer to the dictionary for a corresponding key column on the build side as
135
+ // an input. If the build side is empty, it still needs to be called, but with
136
+ // dictionary pointer set to null.
137
+ //
138
+ // Currently it is required that all input batches on build side share the same
139
+ // dictionary. For each input batch during its pre-processing, dictionary will be
140
+ // checked and error will be returned if it is different then the one provided in the
141
+ // call to this method.
142
+ //
143
+ // Unifies the dictionary. The order of the values is still preserved.
144
+ // Null and duplicate entries are removed. If the dictionary is already unified, its
145
+ // copy will be produced and stored within this class.
146
+ //
147
+ // Prepares the mapping from ids within original dictionary to the ids in the resulting
148
+ // dictionary. This is used later on to pre-process (map to unified representation) key
149
+ // column on build side.
150
+ //
151
+ // Prepares the reverse mapping (in the form of hash table) from values to the ids in
152
+ // the resulting dictionary. This will be used later on to pre-process (map to unified
153
+ // representation) key column on probe side. Values on probe side that are not present
154
+ // in the original dictionary will be mapped to a special constant kMissingValueId. The
155
+ // exception is made for nulls, which get always mapped to nulls (both when null is
156
+ // represented as a dictionary id pointing to a null and a null dictionary id).
157
+ //
158
+ Status Init(ExecContext* ctx, std::shared_ptr<Array> dictionary,
159
+ std::shared_ptr<DataType> index_type, std::shared_ptr<DataType> value_type);
160
+
161
+ // Remap array or scalar values into unified representation (array of int32()).
162
+ // Outputs kMissingValueId if input value is not found in the unified dictionary.
163
+ // Outputs null for null input value (with corresponding data set to kNullId).
164
+ //
165
+ Result<std::shared_ptr<ArrayData>> RemapInputValues(ExecContext* ctx,
166
+ const Datum& values,
167
+ int64_t batch_length) const;
168
+
169
+ // Remap dictionary array or dictionary scalar on build side to unified representation.
170
+ // Dictionary referenced in the input must match the dictionary that was
171
+ // given during initialization.
172
+ // The output is a dictionary array that references unified dictionary.
173
+ //
174
+ Result<std::shared_ptr<ArrayData>> RemapInput(
175
+ ExecContext* ctx, const Datum& indices, int64_t batch_length,
176
+ const std::shared_ptr<DataType>& data_type) const;
177
+
178
+ // Outputs dictionary array referencing unified dictionary, given an array with 32-bit
179
+ // ids.
180
+ // Used to post-process values looked up in a hash table on build side of the hash join
181
+ // before outputting to the parent exec node.
182
+ //
183
+ Result<std::shared_ptr<ArrayData>> RemapOutput(const ArrayData& indices32Bit,
184
+ ExecContext* ctx) const;
185
+
186
+ // Release shared pointers and memory
187
+ void CleanUp();
188
+
189
+ private:
190
+ // Data type of dictionary ids for the input dictionary on build side
191
+ std::shared_ptr<DataType> index_type_;
192
+ // Data type of values for the input dictionary on build side
193
+ std::shared_ptr<DataType> value_type_;
194
+ // Mapping from (encoded as string) values to the ids in unified dictionary
195
+ std::unordered_map<std::string, int32_t> hash_table_;
196
+ // Mapping from input dictionary ids to unified dictionary ids
197
+ std::shared_ptr<ArrayData> remapped_ids_;
198
+ // Input dictionary
199
+ std::shared_ptr<Array> dictionary_;
200
+ // Unified dictionary
201
+ std::shared_ptr<ArrayData> unified_dictionary_;
202
+ };
203
+
204
+ /// Implements processing of dictionary arrays/scalars in key columns on the probe side of
205
+ /// a hash join.
206
+ /// Each instance of this class corresponds to a single column and stores and
207
+ /// processes only the information related to that column.
208
+ /// It is not thread-safe - every participating thread should use its own instance of
209
+ /// this class.
210
+ ///
211
+ class HashJoinDictProbe {
212
+ public:
213
+ static bool KeyNeedsProcessing(const std::shared_ptr<DataType>& probe_data_type,
214
+ const std::shared_ptr<DataType>& build_data_type);
215
+
216
+ // Data type of the result of remapping input key column.
217
+ //
218
+ // The result of remapping is what is used in hash join for matching keys on build and
219
+ // probe side. The exact data types may be different, as described below, and therefore
220
+ // a common representation is needed for simplifying comparisons of pairs of keys on
221
+ // both sides.
222
+ //
223
+ // We support matching key that is of non-dictionary type with key that is of dictionary
224
+ // type, as long as the underlying value types are equal. We support matching when both
225
+ // keys are of dictionary type, regardless whether underlying dictionary index types are
226
+ // the same or not.
227
+ //
228
+ static std::shared_ptr<DataType> DataTypeAfterRemapping(
229
+ const std::shared_ptr<DataType>& build_data_type);
230
+
231
+ // Should only be called if KeyNeedsProcessing method returns true for a pair of
232
+ // corresponding key columns from build and probe side.
233
+ // Converts values in order to match the common representation for
234
+ // both build and probe side used in hash table comparison.
235
+ // Supports arrays and scalars as input.
236
+ // Argument opt_build_side should be null if dictionary key on probe side is matched
237
+ // with non-dictionary key on build side.
238
+ //
239
+ Result<std::shared_ptr<ArrayData>> RemapInput(
240
+ const HashJoinDictBuild* opt_build_side, const Datum& data, int64_t batch_length,
241
+ const std::shared_ptr<DataType>& probe_data_type,
242
+ const std::shared_ptr<DataType>& build_data_type, ExecContext* ctx);
243
+
244
+ void CleanUp();
245
+
246
+ private:
247
+ // May be null if probe side key is non-dictionary. Otherwise it is used to verify that
248
+ // only a single dictionary is referenced in exec batch on probe side of hash join.
249
+ std::shared_ptr<Array> dictionary_;
250
+ // Mapping from dictionary on probe side of hash join (if it is used) to unified
251
+ // representation.
252
+ std::shared_ptr<ArrayData> remapped_ids_;
253
+ // Encoder of key columns that uses unified representation instead of original data type
254
+ // for key columns that need to use it (have dictionaries on either side of the join).
255
+ RowEncoder encoder_;
256
+ };
257
+
258
+ // Encapsulates dictionary handling logic for build side of hash join.
259
+ //
260
+ class HashJoinDictBuildMulti {
261
+ public:
262
+ Status Init(const SchemaProjectionMaps<HashJoinProjection>& proj_map,
263
+ const ExecBatch* opt_non_empty_batch, ExecContext* ctx);
264
+ static void InitEncoder(const SchemaProjectionMaps<HashJoinProjection>& proj_map,
265
+ RowEncoder* encoder, ExecContext* ctx);
266
+ Status EncodeBatch(size_t thread_index,
267
+ const SchemaProjectionMaps<HashJoinProjection>& proj_map,
268
+ const ExecBatch& batch, RowEncoder* encoder, ExecContext* ctx) const;
269
+ Status PostDecode(const SchemaProjectionMaps<HashJoinProjection>& proj_map,
270
+ ExecBatch* decoded_key_batch, ExecContext* ctx);
271
+ const HashJoinDictBuild& get_dict_build(int icol) const { return remap_imp_[icol]; }
272
+
273
+ private:
274
+ std::vector<bool> needs_remap_;
275
+ std::vector<HashJoinDictBuild> remap_imp_;
276
+ };
277
+
278
+ // Encapsulates dictionary handling logic for probe side of hash join
279
+ //
280
+ class HashJoinDictProbeMulti {
281
+ public:
282
+ void Init(size_t num_threads);
283
+ bool BatchRemapNeeded(size_t thread_index,
284
+ const SchemaProjectionMaps<HashJoinProjection>& proj_map_probe,
285
+ const SchemaProjectionMaps<HashJoinProjection>& proj_map_build,
286
+ ExecContext* ctx);
287
+ Status EncodeBatch(size_t thread_index,
288
+ const SchemaProjectionMaps<HashJoinProjection>& proj_map_probe,
289
+ const SchemaProjectionMaps<HashJoinProjection>& proj_map_build,
290
+ const HashJoinDictBuildMulti& dict_build, const ExecBatch& batch,
291
+ RowEncoder** out_encoder, ExecBatch* opt_out_key_batch,
292
+ ExecContext* ctx);
293
+
294
+ private:
295
+ void InitLocalStateIfNeeded(
296
+ size_t thread_index, const SchemaProjectionMaps<HashJoinProjection>& proj_map_probe,
297
+ const SchemaProjectionMaps<HashJoinProjection>& proj_map_build, ExecContext* ctx);
298
+ static void InitEncoder(const SchemaProjectionMaps<HashJoinProjection>& proj_map_probe,
299
+ const SchemaProjectionMaps<HashJoinProjection>& proj_map_build,
300
+ RowEncoder* encoder, ExecContext* ctx);
301
+ struct ThreadLocalState {
302
+ bool is_initialized;
303
+ // Whether any key column needs remapping (because of dictionaries used) before doing
304
+ // join hash table lookups
305
+ bool any_needs_remap;
306
+ // Whether each key column needs remapping before doing join hash table lookups
307
+ std::vector<bool> needs_remap;
308
+ std::vector<HashJoinDictProbe> remap_imp;
309
+ // Encoder of key columns that uses unified representation instead of original data
310
+ // type for key columns that need to use it (have dictionaries on either side of the
311
+ // join).
312
+ RowEncoder post_remap_encoder;
313
+ };
314
+ std::vector<ThreadLocalState> local_states_;
315
+ };
316
+
317
+ } // namespace acero
318
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_node.h ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <vector>
21
+
22
+ #include "arrow/acero/options.h"
23
+ #include "arrow/acero/schema_util.h"
24
+ #include "arrow/result.h"
25
+ #include "arrow/status.h"
26
+
27
+ namespace arrow {
28
+
29
+ using compute::ExecContext;
30
+
31
+ namespace acero {
32
+
33
+ class ARROW_ACERO_EXPORT HashJoinSchema {
34
+ public:
35
+ Status Init(JoinType join_type, const Schema& left_schema,
36
+ const std::vector<FieldRef>& left_keys, const Schema& right_schema,
37
+ const std::vector<FieldRef>& right_keys, const Expression& filter,
38
+ const std::string& left_field_name_prefix,
39
+ const std::string& right_field_name_prefix);
40
+
41
+ Status Init(JoinType join_type, const Schema& left_schema,
42
+ const std::vector<FieldRef>& left_keys,
43
+ const std::vector<FieldRef>& left_output, const Schema& right_schema,
44
+ const std::vector<FieldRef>& right_keys,
45
+ const std::vector<FieldRef>& right_output, const Expression& filter,
46
+ const std::string& left_field_name_prefix,
47
+ const std::string& right_field_name_prefix);
48
+
49
+ static Status ValidateSchemas(JoinType join_type, const Schema& left_schema,
50
+ const std::vector<FieldRef>& left_keys,
51
+ const std::vector<FieldRef>& left_output,
52
+ const Schema& right_schema,
53
+ const std::vector<FieldRef>& right_keys,
54
+ const std::vector<FieldRef>& right_output,
55
+ const std::string& left_field_name_prefix,
56
+ const std::string& right_field_name_prefix);
57
+
58
+ bool HasDictionaries() const;
59
+
60
+ bool HasLargeBinary() const;
61
+
62
+ Result<Expression> BindFilter(Expression filter, const Schema& left_schema,
63
+ const Schema& right_schema, ExecContext* exec_context);
64
+ std::shared_ptr<Schema> MakeOutputSchema(const std::string& left_field_name_suffix,
65
+ const std::string& right_field_name_suffix);
66
+
67
+ bool LeftPayloadIsEmpty() { return PayloadIsEmpty(0); }
68
+
69
+ bool RightPayloadIsEmpty() { return PayloadIsEmpty(1); }
70
+
71
+ static int kMissingField() {
72
+ return SchemaProjectionMaps<HashJoinProjection>::kMissingField;
73
+ }
74
+
75
+ SchemaProjectionMaps<HashJoinProjection> proj_maps[2];
76
+
77
+ private:
78
+ static bool IsTypeSupported(const DataType& type);
79
+
80
+ Status CollectFilterColumns(std::vector<FieldRef>& left_filter,
81
+ std::vector<FieldRef>& right_filter,
82
+ const Expression& filter, const Schema& left_schema,
83
+ const Schema& right_schema);
84
+
85
+ Expression RewriteFilterToUseFilterSchema(int right_filter_offset,
86
+ const SchemaProjectionMap& left_to_filter,
87
+ const SchemaProjectionMap& right_to_filter,
88
+ const Expression& filter);
89
+
90
+ bool PayloadIsEmpty(int side) {
91
+ ARROW_DCHECK(side == 0 || side == 1);
92
+ return proj_maps[side].num_cols(HashJoinProjection::PAYLOAD) == 0;
93
+ }
94
+
95
+ static Result<std::vector<FieldRef>> ComputePayload(const Schema& schema,
96
+ const std::vector<FieldRef>& output,
97
+ const std::vector<FieldRef>& filter,
98
+ const std::vector<FieldRef>& key);
99
+ };
100
+
101
+ } // namespace acero
102
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/map_node.h ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <functional>
22
+ #include <memory>
23
+ #include <vector>
24
+
25
+ #include "arrow/acero/exec_plan.h"
26
+ #include "arrow/acero/util.h"
27
+ #include "arrow/acero/visibility.h"
28
+ #include "arrow/compute/type_fwd.h"
29
+ #include "arrow/status.h"
30
+ #include "arrow/type_fwd.h"
31
+ #include "arrow/util/cancel.h"
32
+ #include "arrow/util/type_fwd.h"
33
+
34
+ namespace arrow {
35
+ namespace acero {
36
+
37
+ /// A utility base class for simple exec nodes with one input
38
+ ///
39
+ /// Pause/Resume Producing are forwarded appropriately
40
+ /// There is nothing to do in StopProducingImpl
41
+ ///
42
+ /// An AtomicCounter is used to keep track of when all data has arrived. When it
43
+ /// has the Finish() method will be invoked
44
+ class ARROW_ACERO_EXPORT MapNode : public ExecNode, public TracedNode {
45
+ public:
46
+ MapNode(ExecPlan* plan, std::vector<ExecNode*> inputs,
47
+ std::shared_ptr<Schema> output_schema);
48
+
49
+ Status InputFinished(ExecNode* input, int total_batches) override;
50
+
51
+ Status StartProducing() override;
52
+
53
+ void PauseProducing(ExecNode* output, int32_t counter) override;
54
+
55
+ void ResumeProducing(ExecNode* output, int32_t counter) override;
56
+
57
+ Status InputReceived(ExecNode* input, ExecBatch batch) override;
58
+
59
+ const Ordering& ordering() const override;
60
+
61
+ protected:
62
+ Status StopProducingImpl() override;
63
+
64
+ /// Transform a batch
65
+ ///
66
+ /// The output batch will have the same guarantee as the input batch
67
+ /// If this was the last batch this call may trigger Finish()
68
+ virtual Result<ExecBatch> ProcessBatch(ExecBatch batch) = 0;
69
+
70
+ /// Function called after all data has been received
71
+ ///
72
+ /// By default this does nothing. Override this to provide a custom implementation.
73
+ virtual void Finish();
74
+
75
+ protected:
76
+ // Counter for the number of batches received
77
+ AtomicCounter input_counter_;
78
+ };
79
+
80
+ } // namespace acero
81
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/options.h ADDED
@@ -0,0 +1,866 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+ #include <memory>
22
+ #include <optional>
23
+ #include <string>
24
+ #include <vector>
25
+
26
+ #include "arrow/acero/type_fwd.h"
27
+ #include "arrow/acero/visibility.h"
28
+ #include "arrow/compute/api_aggregate.h"
29
+ #include "arrow/compute/api_vector.h"
30
+ #include "arrow/compute/exec.h"
31
+ #include "arrow/compute/expression.h"
32
+ #include "arrow/record_batch.h"
33
+ #include "arrow/result.h"
34
+ #include "arrow/util/async_generator.h"
35
+ #include "arrow/util/async_util.h"
36
+
37
+ namespace arrow {
38
+
39
+ using compute::Aggregate;
40
+ using compute::ExecBatch;
41
+ using compute::Expression;
42
+ using compute::literal;
43
+ using compute::Ordering;
44
+ using compute::SelectKOptions;
45
+ using compute::SortOptions;
46
+
47
+ namespace internal {
48
+
49
+ class Executor;
50
+
51
+ } // namespace internal
52
+
53
+ namespace acero {
54
+
55
+ /// \brief This must not be used in release-mode
56
+ struct DebugOptions;
57
+
58
+ using AsyncExecBatchGenerator = AsyncGenerator<std::optional<ExecBatch>>;
59
+
60
+ /// \addtogroup acero-nodes
61
+ /// @{
62
+
63
+ /// \brief A base class for all options objects
64
+ ///
65
+ /// The only time this is used directly is when a node has no configuration
66
+ class ARROW_ACERO_EXPORT ExecNodeOptions {
67
+ public:
68
+ virtual ~ExecNodeOptions() = default;
69
+
70
+ /// \brief This must not be used in release-mode
71
+ std::shared_ptr<DebugOptions> debug_opts;
72
+ };
73
+
74
+ /// \brief A node representing a generic source of data for Acero
75
+ ///
76
+ /// The source node will start calling `generator` during StartProducing. An initial
77
+ /// task will be created that will call `generator`. It will not call `generator`
78
+ /// reentrantly. If the source can be read in parallel then those details should be
79
+ /// encapsulated within `generator`.
80
+ ///
81
+ /// For each batch received a new task will be created to push that batch downstream.
82
+ /// This task will slice smaller units of size `ExecPlan::kMaxBatchSize` from the
83
+ /// parent batch and call InputReceived. Thus, if the `generator` yields a large
84
+ /// batch it may result in several calls to InputReceived.
85
+ ///
86
+ /// The SourceNode will, by default, assign an implicit ordering to outgoing batches.
87
+ /// This is valid as long as the generator generates batches in a deterministic fashion.
88
+ /// Currently, the only way to override this is to subclass the SourceNode.
89
+ ///
90
+ /// This node is not generally used directly but can serve as the basis for various
91
+ /// specialized nodes.
92
+ class ARROW_ACERO_EXPORT SourceNodeOptions : public ExecNodeOptions {
93
+ public:
94
+ /// Create an instance from values
95
+ SourceNodeOptions(std::shared_ptr<Schema> output_schema,
96
+ std::function<Future<std::optional<ExecBatch>>()> generator)
97
+ : output_schema(std::move(output_schema)), generator(std::move(generator)) {}
98
+
99
+ /// \brief the schema for batches that will be generated by this source
100
+ std::shared_ptr<Schema> output_schema;
101
+ /// \brief an asynchronous stream of batches ending with std::nullopt
102
+ std::function<Future<std::optional<ExecBatch>>()> generator;
103
+ };
104
+
105
+ /// \brief a node that generates data from a table already loaded in memory
106
+ ///
107
+ /// The table source node will slice off chunks, defined by `max_batch_size`
108
+ /// for parallel processing. The source node extends source node and so these
109
+ /// chunks will be iteratively processed in small batches. \see SourceNode
110
+ /// for details.
111
+ class ARROW_ACERO_EXPORT TableSourceNodeOptions : public ExecNodeOptions {
112
+ public:
113
+ static constexpr int64_t kDefaultMaxBatchSize = 1 << 20;
114
+
115
+ /// Create an instance from values
116
+ TableSourceNodeOptions(std::shared_ptr<Table> table,
117
+ int64_t max_batch_size = kDefaultMaxBatchSize)
118
+ : table(std::move(table)), max_batch_size(max_batch_size) {}
119
+
120
+ /// \brief a table which acts as the data source
121
+ std::shared_ptr<Table> table;
122
+ /// \brief size of batches to emit from this node
123
+ /// If the table is larger the node will emit multiple batches from the
124
+ /// the table to be processed in parallel.
125
+ int64_t max_batch_size;
126
+ };
127
+
128
+ /// \brief define a lazily resolved Arrow table.
129
+ ///
130
+ /// The table uniquely identified by the names can typically be resolved at the time when
131
+ /// the plan is to be consumed.
132
+ ///
133
+ /// This node is for serialization purposes only and can never be executed.
134
+ class ARROW_ACERO_EXPORT NamedTableNodeOptions : public ExecNodeOptions {
135
+ public:
136
+ /// Create an instance from values
137
+ NamedTableNodeOptions(std::vector<std::string> names, std::shared_ptr<Schema> schema)
138
+ : names(std::move(names)), schema(std::move(schema)) {}
139
+
140
+ /// \brief the names to put in the serialized plan
141
+ std::vector<std::string> names;
142
+ /// \brief the output schema of the table
143
+ std::shared_ptr<Schema> schema;
144
+ };
145
+
146
+ /// \brief a source node which feeds data from a synchronous iterator of batches
147
+ ///
148
+ /// ItMaker is a maker of an iterator of tabular data.
149
+ ///
150
+ /// The node can be configured to use an I/O executor. If set then each time the
151
+ /// iterator is polled a new I/O thread task will be created to do the polling. This
152
+ /// allows a blocking iterator to stay off the CPU thread pool.
153
+ template <typename ItMaker>
154
+ class ARROW_ACERO_EXPORT SchemaSourceNodeOptions : public ExecNodeOptions {
155
+ public:
156
+ /// Create an instance that will create a new task on io_executor for each iteration
157
+ SchemaSourceNodeOptions(std::shared_ptr<Schema> schema, ItMaker it_maker,
158
+ arrow::internal::Executor* io_executor)
159
+ : schema(std::move(schema)),
160
+ it_maker(std::move(it_maker)),
161
+ io_executor(io_executor),
162
+ requires_io(true) {}
163
+
164
+ /// Create an instance that will either iterate synchronously or use the default I/O
165
+ /// executor
166
+ SchemaSourceNodeOptions(std::shared_ptr<Schema> schema, ItMaker it_maker,
167
+ bool requires_io = false)
168
+ : schema(std::move(schema)),
169
+ it_maker(std::move(it_maker)),
170
+ io_executor(NULLPTR),
171
+ requires_io(requires_io) {}
172
+
173
+ /// \brief The schema of the record batches from the iterator
174
+ std::shared_ptr<Schema> schema;
175
+
176
+ /// \brief A maker of an iterator which acts as the data source
177
+ ItMaker it_maker;
178
+
179
+ /// \brief The executor to use for scanning the iterator
180
+ ///
181
+ /// Defaults to the default I/O executor. Only used if requires_io is true.
182
+ /// If requires_io is false then this MUST be nullptr.
183
+ arrow::internal::Executor* io_executor;
184
+
185
+ /// \brief If true then items will be fetched from the iterator on a dedicated I/O
186
+ /// thread to keep I/O off the CPU thread
187
+ bool requires_io;
188
+ };
189
+
190
+ /// a source node that reads from a RecordBatchReader
191
+ ///
192
+ /// Each iteration of the RecordBatchReader will be run on a new thread task created
193
+ /// on the I/O thread pool.
194
+ class ARROW_ACERO_EXPORT RecordBatchReaderSourceNodeOptions : public ExecNodeOptions {
195
+ public:
196
+ /// Create an instance from values
197
+ RecordBatchReaderSourceNodeOptions(std::shared_ptr<RecordBatchReader> reader,
198
+ arrow::internal::Executor* io_executor = NULLPTR)
199
+ : reader(std::move(reader)), io_executor(io_executor) {}
200
+
201
+ /// \brief The RecordBatchReader which acts as the data source
202
+ std::shared_ptr<RecordBatchReader> reader;
203
+
204
+ /// \brief The executor to use for the reader
205
+ ///
206
+ /// Defaults to the default I/O executor.
207
+ arrow::internal::Executor* io_executor;
208
+ };
209
+
210
+ /// a source node that reads from an iterator of array vectors
211
+ using ArrayVectorIteratorMaker = std::function<Iterator<std::shared_ptr<ArrayVector>>()>;
212
+ /// \brief An extended Source node which accepts a schema and array-vectors
213
+ class ARROW_ACERO_EXPORT ArrayVectorSourceNodeOptions
214
+ : public SchemaSourceNodeOptions<ArrayVectorIteratorMaker> {
215
+ using SchemaSourceNodeOptions::SchemaSourceNodeOptions;
216
+ };
217
+
218
+ /// a source node that reads from an iterator of ExecBatch
219
+ using ExecBatchIteratorMaker = std::function<Iterator<std::shared_ptr<ExecBatch>>()>;
220
+ /// \brief An extended Source node which accepts a schema and exec-batches
221
+ class ARROW_ACERO_EXPORT ExecBatchSourceNodeOptions
222
+ : public SchemaSourceNodeOptions<ExecBatchIteratorMaker> {
223
+ public:
224
+ using SchemaSourceNodeOptions::SchemaSourceNodeOptions;
225
+ ExecBatchSourceNodeOptions(std::shared_ptr<Schema> schema,
226
+ std::vector<ExecBatch> batches,
227
+ ::arrow::internal::Executor* io_executor);
228
+ ExecBatchSourceNodeOptions(std::shared_ptr<Schema> schema,
229
+ std::vector<ExecBatch> batches, bool requires_io = false);
230
+ };
231
+
232
+ using RecordBatchIteratorMaker = std::function<Iterator<std::shared_ptr<RecordBatch>>()>;
233
+ /// a source node that reads from an iterator of RecordBatch
234
+ class ARROW_ACERO_EXPORT RecordBatchSourceNodeOptions
235
+ : public SchemaSourceNodeOptions<RecordBatchIteratorMaker> {
236
+ using SchemaSourceNodeOptions::SchemaSourceNodeOptions;
237
+ };
238
+
239
+ /// \brief a node which excludes some rows from batches passed through it
240
+ ///
241
+ /// filter_expression will be evaluated against each batch which is pushed to
242
+ /// this node. Any rows for which filter_expression does not evaluate to `true` will be
243
+ /// excluded in the batch emitted by this node.
244
+ ///
245
+ /// This node will emit empty batches if all rows are excluded. This is done
246
+ /// to avoid gaps in the ordering.
247
+ class ARROW_ACERO_EXPORT FilterNodeOptions : public ExecNodeOptions {
248
+ public:
249
+ /// \brief create an instance from values
250
+ explicit FilterNodeOptions(Expression filter_expression)
251
+ : filter_expression(std::move(filter_expression)) {}
252
+
253
+ /// \brief the expression to filter batches
254
+ ///
255
+ /// The return type of this expression must be boolean
256
+ Expression filter_expression;
257
+ };
258
+
259
+ /// \brief a node which selects a specified subset from the input
260
+ class ARROW_ACERO_EXPORT FetchNodeOptions : public ExecNodeOptions {
261
+ public:
262
+ static constexpr std::string_view kName = "fetch";
263
+ /// \brief create an instance from values
264
+ FetchNodeOptions(int64_t offset, int64_t count) : offset(offset), count(count) {}
265
+ /// \brief the number of rows to skip
266
+ int64_t offset;
267
+ /// \brief the number of rows to keep (not counting skipped rows)
268
+ int64_t count;
269
+ };
270
+
271
+ /// \brief a node which executes expressions on input batches, producing batches
272
+ /// of the same length with new columns.
273
+ ///
274
+ /// Each expression will be evaluated against each batch which is pushed to
275
+ /// this node to produce a corresponding output column.
276
+ ///
277
+ /// If names are not provided, the string representations of exprs will be used.
278
+ class ARROW_ACERO_EXPORT ProjectNodeOptions : public ExecNodeOptions {
279
+ public:
280
+ /// \brief create an instance from values
281
+ explicit ProjectNodeOptions(std::vector<Expression> expressions,
282
+ std::vector<std::string> names = {})
283
+ : expressions(std::move(expressions)), names(std::move(names)) {}
284
+
285
+ /// \brief the expressions to run on the batches
286
+ ///
287
+ /// The output will have one column for each expression. If you wish to keep any of
288
+ /// the columns from the input then you should create a simple field_ref expression
289
+ /// for that column.
290
+ std::vector<Expression> expressions;
291
+ /// \brief the names of the output columns
292
+ ///
293
+ /// If this is not specified then the result of calling ToString on the expression will
294
+ /// be used instead
295
+ ///
296
+ /// This list should either be empty or have the same length as `expressions`
297
+ std::vector<std::string> names;
298
+ };
299
+
300
+ /// \brief a node which aggregates input batches and calculates summary statistics
301
+ ///
302
+ /// The node can summarize the entire input or it can group the input with grouping keys
303
+ /// and segment keys.
304
+ ///
305
+ /// By default, the aggregate node is a pipeline breaker. It must accumulate all input
306
+ /// before any output is produced. Segment keys are a performance optimization. If
307
+ /// you know your input is already partitioned by one or more columns then you can
308
+ /// specify these as segment keys. At each change in the segment keys the node will
309
+ /// emit values for all data seen so far.
310
+ ///
311
+ /// Segment keys are currently limited to single-threaded mode.
312
+ ///
313
+ /// Both keys and segment-keys determine the group. However segment-keys are also used
314
+ /// for determining grouping segments, which should be large, and allow streaming a
315
+ /// partial aggregation result after processing each segment. One common use-case for
316
+ /// segment-keys is ordered aggregation, in which the segment-key attribute specifies a
317
+ /// column with non-decreasing values or a lexicographically-ordered set of such columns.
318
+ ///
319
+ /// If the keys attribute is a non-empty vector, then each aggregate in `aggregates` is
320
+ /// expected to be a HashAggregate function. If the keys attribute is an empty vector,
321
+ /// then each aggregate is assumed to be a ScalarAggregate function.
322
+ ///
323
+ /// If the segment_keys attribute is a non-empty vector, then segmented aggregation, as
324
+ /// described above, applies.
325
+ ///
326
+ /// The keys and segment_keys vectors must be disjoint.
327
+ ///
328
+ /// If no measures are provided then you will simply get the list of unique keys.
329
+ ///
330
+ /// This node outputs segment keys first, followed by regular keys, followed by one
331
+ /// column for each aggregate.
332
+ class ARROW_ACERO_EXPORT AggregateNodeOptions : public ExecNodeOptions {
333
+ public:
334
+ /// \brief create an instance from values
335
+ explicit AggregateNodeOptions(std::vector<Aggregate> aggregates,
336
+ std::vector<FieldRef> keys = {},
337
+ std::vector<FieldRef> segment_keys = {})
338
+ : aggregates(std::move(aggregates)),
339
+ keys(std::move(keys)),
340
+ segment_keys(std::move(segment_keys)) {}
341
+
342
+ // aggregations which will be applied to the targeted fields
343
+ std::vector<Aggregate> aggregates;
344
+ // keys by which aggregations will be grouped (optional)
345
+ std::vector<FieldRef> keys;
346
+ // keys by which aggregations will be segmented (optional)
347
+ std::vector<FieldRef> segment_keys;
348
+ };
349
+
350
+ /// \brief a default value at which backpressure will be applied
351
+ constexpr int32_t kDefaultBackpressureHighBytes = 1 << 30; // 1GiB
352
+ /// \brief a default value at which backpressure will be removed
353
+ constexpr int32_t kDefaultBackpressureLowBytes = 1 << 28; // 256MiB
354
+
355
+ /// \brief an interface that can be queried for backpressure statistics
356
+ class ARROW_ACERO_EXPORT BackpressureMonitor {
357
+ public:
358
+ virtual ~BackpressureMonitor() = default;
359
+ /// \brief fetches the number of bytes currently queued up
360
+ virtual uint64_t bytes_in_use() = 0;
361
+ /// \brief checks to see if backpressure is currently applied
362
+ virtual bool is_paused() = 0;
363
+ };
364
+
365
+ /// \brief Options to control backpressure behavior
366
+ struct ARROW_ACERO_EXPORT BackpressureOptions {
367
+ /// \brief Create default options that perform no backpressure
368
+ BackpressureOptions() : resume_if_below(0), pause_if_above(0) {}
369
+ /// \brief Create options that will perform backpressure
370
+ ///
371
+ /// \param resume_if_below The producer should resume producing if the backpressure
372
+ /// queue has fewer than resume_if_below items.
373
+ /// \param pause_if_above The producer should pause producing if the backpressure
374
+ /// queue has more than pause_if_above items
375
+ BackpressureOptions(uint64_t resume_if_below, uint64_t pause_if_above)
376
+ : resume_if_below(resume_if_below), pause_if_above(pause_if_above) {}
377
+
378
+ /// \brief create an instance using default values for backpressure limits
379
+ static BackpressureOptions DefaultBackpressure() {
380
+ return BackpressureOptions(kDefaultBackpressureLowBytes,
381
+ kDefaultBackpressureHighBytes);
382
+ }
383
+
384
+ /// \brief helper method to determine if backpressure is disabled
385
+ /// \return true if pause_if_above is greater than zero, false otherwise
386
+ bool should_apply_backpressure() const { return pause_if_above > 0; }
387
+
388
+ /// \brief the number of bytes at which the producer should resume producing
389
+ uint64_t resume_if_below;
390
+ /// \brief the number of bytes at which the producer should pause producing
391
+ ///
392
+ /// If this is <= 0 then backpressure will be disabled
393
+ uint64_t pause_if_above;
394
+ };
395
+
396
+ /// \brief a sink node which collects results in a queue
397
+ ///
398
+ /// Emitted batches will only be ordered if there is a meaningful ordering
399
+ /// and sequence_output is not set to false.
400
+ class ARROW_ACERO_EXPORT SinkNodeOptions : public ExecNodeOptions {
401
+ public:
402
+ explicit SinkNodeOptions(std::function<Future<std::optional<ExecBatch>>()>* generator,
403
+ std::shared_ptr<Schema>* schema,
404
+ BackpressureOptions backpressure = {},
405
+ BackpressureMonitor** backpressure_monitor = NULLPTR,
406
+ std::optional<bool> sequence_output = std::nullopt)
407
+ : generator(generator),
408
+ schema(schema),
409
+ backpressure(backpressure),
410
+ backpressure_monitor(backpressure_monitor),
411
+ sequence_output(sequence_output) {}
412
+
413
+ explicit SinkNodeOptions(std::function<Future<std::optional<ExecBatch>>()>* generator,
414
+ BackpressureOptions backpressure = {},
415
+ BackpressureMonitor** backpressure_monitor = NULLPTR,
416
+ std::optional<bool> sequence_output = std::nullopt)
417
+ : generator(generator),
418
+ schema(NULLPTR),
419
+ backpressure(std::move(backpressure)),
420
+ backpressure_monitor(backpressure_monitor),
421
+ sequence_output(sequence_output) {}
422
+
423
+ /// \brief A pointer to a generator of batches.
424
+ ///
425
+ /// This will be set when the node is added to the plan and should be used to consume
426
+ /// data from the plan. If this function is not called frequently enough then the sink
427
+ /// node will start to accumulate data and may apply backpressure.
428
+ std::function<Future<std::optional<ExecBatch>>()>* generator;
429
+ /// \brief A pointer which will be set to the schema of the generated batches
430
+ ///
431
+ /// This is optional, if nullptr is passed in then it will be ignored.
432
+ /// This will be set when the node is added to the plan, before StartProducing is called
433
+ std::shared_ptr<Schema>* schema;
434
+ /// \brief Options to control when to apply backpressure
435
+ ///
436
+ /// This is optional, the default is to never apply backpressure. If the plan is not
437
+ /// consumed quickly enough the system may eventually run out of memory.
438
+ BackpressureOptions backpressure;
439
+ /// \brief A pointer to a backpressure monitor
440
+ ///
441
+ /// This will be set when the node is added to the plan. This can be used to inspect
442
+ /// the amount of data currently queued in the sink node. This is an optional utility
443
+ /// and backpressure can be applied even if this is not used.
444
+ BackpressureMonitor** backpressure_monitor;
445
+ /// \brief Controls whether batches should be emitted immediately or sequenced in order
446
+ ///
447
+ /// \see QueryOptions for more details
448
+ std::optional<bool> sequence_output;
449
+ };
450
+
451
+ /// \brief Control used by a SinkNodeConsumer to pause & resume
452
+ ///
453
+ /// Callers should ensure that they do not call Pause and Resume simultaneously and they
454
+ /// should sequence things so that a call to Pause() is always followed by an eventual
455
+ /// call to Resume()
456
+ class ARROW_ACERO_EXPORT BackpressureControl {
457
+ public:
458
+ virtual ~BackpressureControl() = default;
459
+ /// \brief Ask the input to pause
460
+ ///
461
+ /// This is best effort, batches may continue to arrive
462
+ /// Must eventually be followed by a call to Resume() or deadlock will occur
463
+ virtual void Pause() = 0;
464
+ /// \brief Ask the input to resume
465
+ virtual void Resume() = 0;
466
+ };
467
+
468
+ /// \brief a sink node that consumes the data as part of the plan using callbacks
469
+ class ARROW_ACERO_EXPORT SinkNodeConsumer {
470
+ public:
471
+ virtual ~SinkNodeConsumer() = default;
472
+ /// \brief Prepare any consumer state
473
+ ///
474
+ /// This will be run once the schema is finalized as the plan is starting and
475
+ /// before any calls to Consume. A common use is to save off the schema so that
476
+ /// batches can be interpreted.
477
+ virtual Status Init(const std::shared_ptr<Schema>& schema,
478
+ BackpressureControl* backpressure_control, ExecPlan* plan) = 0;
479
+ /// \brief Consume a batch of data
480
+ virtual Status Consume(ExecBatch batch) = 0;
481
+ /// \brief Signal to the consumer that the last batch has been delivered
482
+ ///
483
+ /// The returned future should only finish when all outstanding tasks have completed
484
+ ///
485
+ /// If the plan is ended early or aborts due to an error then this will not be
486
+ /// called.
487
+ virtual Future<> Finish() = 0;
488
+ };
489
+
490
+ /// \brief Add a sink node which consumes data within the exec plan run
491
+ class ARROW_ACERO_EXPORT ConsumingSinkNodeOptions : public ExecNodeOptions {
492
+ public:
493
+ explicit ConsumingSinkNodeOptions(std::shared_ptr<SinkNodeConsumer> consumer,
494
+ std::vector<std::string> names = {},
495
+ std::optional<bool> sequence_output = std::nullopt)
496
+ : consumer(std::move(consumer)),
497
+ names(std::move(names)),
498
+ sequence_output(sequence_output) {}
499
+
500
+ std::shared_ptr<SinkNodeConsumer> consumer;
501
+ /// \brief Names to rename the sink's schema fields to
502
+ ///
503
+ /// If specified then names must be provided for all fields. Currently, only a flat
504
+ /// schema is supported (see GH-31875).
505
+ ///
506
+ /// If not specified then names will be generated based on the source data.
507
+ std::vector<std::string> names;
508
+ /// \brief Controls whether batches should be emitted immediately or sequenced in order
509
+ ///
510
+ /// \see QueryOptions for more details
511
+ std::optional<bool> sequence_output;
512
+ };
513
+
514
+ /// \brief Make a node which sorts rows passed through it
515
+ ///
516
+ /// All batches pushed to this node will be accumulated, then sorted, by the given
517
+ /// fields. Then sorted batches will be forwarded to the generator in sorted order.
518
+ class ARROW_ACERO_EXPORT OrderBySinkNodeOptions : public SinkNodeOptions {
519
+ public:
520
+ /// \brief create an instance from values
521
+ explicit OrderBySinkNodeOptions(
522
+ SortOptions sort_options,
523
+ std::function<Future<std::optional<ExecBatch>>()>* generator)
524
+ : SinkNodeOptions(generator), sort_options(std::move(sort_options)) {}
525
+
526
+ /// \brief options describing which columns and direction to sort
527
+ SortOptions sort_options;
528
+ };
529
+
530
+ /// \brief Apply a new ordering to data
531
+ ///
532
+ /// Currently this node works by accumulating all data, sorting, and then emitting
533
+ /// the new data with an updated batch index.
534
+ ///
535
+ /// Larger-than-memory sort is not currently supported.
536
+ class ARROW_ACERO_EXPORT OrderByNodeOptions : public ExecNodeOptions {
537
+ public:
538
+ static constexpr std::string_view kName = "order_by";
539
+ explicit OrderByNodeOptions(Ordering ordering) : ordering(std::move(ordering)) {}
540
+
541
+ /// \brief The new ordering to apply to outgoing data
542
+ Ordering ordering;
543
+ };
544
+
545
+ enum class JoinType {
546
+ LEFT_SEMI,
547
+ RIGHT_SEMI,
548
+ LEFT_ANTI,
549
+ RIGHT_ANTI,
550
+ INNER,
551
+ LEFT_OUTER,
552
+ RIGHT_OUTER,
553
+ FULL_OUTER
554
+ };
555
+
556
+ std::string ToString(JoinType t);
557
+
558
+ enum class JoinKeyCmp { EQ, IS };
559
+
560
+ /// \brief a node which implements a join operation using a hash table
561
+ class ARROW_ACERO_EXPORT HashJoinNodeOptions : public ExecNodeOptions {
562
+ public:
563
+ static constexpr const char* default_output_suffix_for_left = "";
564
+ static constexpr const char* default_output_suffix_for_right = "";
565
+ /// \brief create an instance from values that outputs all columns
566
+ HashJoinNodeOptions(
567
+ JoinType in_join_type, std::vector<FieldRef> in_left_keys,
568
+ std::vector<FieldRef> in_right_keys, Expression filter = literal(true),
569
+ std::string output_suffix_for_left = default_output_suffix_for_left,
570
+ std::string output_suffix_for_right = default_output_suffix_for_right,
571
+ bool disable_bloom_filter = false)
572
+ : join_type(in_join_type),
573
+ left_keys(std::move(in_left_keys)),
574
+ right_keys(std::move(in_right_keys)),
575
+ output_all(true),
576
+ output_suffix_for_left(std::move(output_suffix_for_left)),
577
+ output_suffix_for_right(std::move(output_suffix_for_right)),
578
+ filter(std::move(filter)),
579
+ disable_bloom_filter(disable_bloom_filter) {
580
+ this->key_cmp.resize(this->left_keys.size());
581
+ for (size_t i = 0; i < this->left_keys.size(); ++i) {
582
+ this->key_cmp[i] = JoinKeyCmp::EQ;
583
+ }
584
+ }
585
+ /// \brief create an instance from keys
586
+ ///
587
+ /// This will create an inner join that outputs all columns and has no post join filter
588
+ ///
589
+ /// `in_left_keys` should have the same length and types as `in_right_keys`
590
+ /// @param in_left_keys the keys in the left input
591
+ /// @param in_right_keys the keys in the right input
592
+ HashJoinNodeOptions(std::vector<FieldRef> in_left_keys,
593
+ std::vector<FieldRef> in_right_keys)
594
+ : left_keys(std::move(in_left_keys)), right_keys(std::move(in_right_keys)) {
595
+ this->join_type = JoinType::INNER;
596
+ this->output_all = true;
597
+ this->output_suffix_for_left = default_output_suffix_for_left;
598
+ this->output_suffix_for_right = default_output_suffix_for_right;
599
+ this->key_cmp.resize(this->left_keys.size());
600
+ for (size_t i = 0; i < this->left_keys.size(); ++i) {
601
+ this->key_cmp[i] = JoinKeyCmp::EQ;
602
+ }
603
+ this->filter = literal(true);
604
+ }
605
+ /// \brief create an instance from values using JoinKeyCmp::EQ for all comparisons
606
+ HashJoinNodeOptions(
607
+ JoinType join_type, std::vector<FieldRef> left_keys,
608
+ std::vector<FieldRef> right_keys, std::vector<FieldRef> left_output,
609
+ std::vector<FieldRef> right_output, Expression filter = literal(true),
610
+ std::string output_suffix_for_left = default_output_suffix_for_left,
611
+ std::string output_suffix_for_right = default_output_suffix_for_right,
612
+ bool disable_bloom_filter = false)
613
+ : join_type(join_type),
614
+ left_keys(std::move(left_keys)),
615
+ right_keys(std::move(right_keys)),
616
+ output_all(false),
617
+ left_output(std::move(left_output)),
618
+ right_output(std::move(right_output)),
619
+ output_suffix_for_left(std::move(output_suffix_for_left)),
620
+ output_suffix_for_right(std::move(output_suffix_for_right)),
621
+ filter(std::move(filter)),
622
+ disable_bloom_filter(disable_bloom_filter) {
623
+ this->key_cmp.resize(this->left_keys.size());
624
+ for (size_t i = 0; i < this->left_keys.size(); ++i) {
625
+ this->key_cmp[i] = JoinKeyCmp::EQ;
626
+ }
627
+ }
628
+ /// \brief create an instance from values
629
+ HashJoinNodeOptions(
630
+ JoinType join_type, std::vector<FieldRef> left_keys,
631
+ std::vector<FieldRef> right_keys, std::vector<FieldRef> left_output,
632
+ std::vector<FieldRef> right_output, std::vector<JoinKeyCmp> key_cmp,
633
+ Expression filter = literal(true),
634
+ std::string output_suffix_for_left = default_output_suffix_for_left,
635
+ std::string output_suffix_for_right = default_output_suffix_for_right,
636
+ bool disable_bloom_filter = false)
637
+ : join_type(join_type),
638
+ left_keys(std::move(left_keys)),
639
+ right_keys(std::move(right_keys)),
640
+ output_all(false),
641
+ left_output(std::move(left_output)),
642
+ right_output(std::move(right_output)),
643
+ key_cmp(std::move(key_cmp)),
644
+ output_suffix_for_left(std::move(output_suffix_for_left)),
645
+ output_suffix_for_right(std::move(output_suffix_for_right)),
646
+ filter(std::move(filter)),
647
+ disable_bloom_filter(disable_bloom_filter) {}
648
+
649
+ HashJoinNodeOptions() = default;
650
+
651
+ // type of join (inner, left, semi...)
652
+ JoinType join_type = JoinType::INNER;
653
+ // key fields from left input
654
+ std::vector<FieldRef> left_keys;
655
+ // key fields from right input
656
+ std::vector<FieldRef> right_keys;
657
+ // if set all valid fields from both left and right input will be output
658
+ // (and field ref vectors for output fields will be ignored)
659
+ bool output_all = false;
660
+ // output fields passed from left input
661
+ std::vector<FieldRef> left_output;
662
+ // output fields passed from right input
663
+ std::vector<FieldRef> right_output;
664
+ // key comparison function (determines whether a null key is equal another null
665
+ // key or not)
666
+ std::vector<JoinKeyCmp> key_cmp;
667
+ // suffix added to names of output fields coming from left input (used to distinguish,
668
+ // if necessary, between fields of the same name in left and right input and can be left
669
+ // empty if there are no name collisions)
670
+ std::string output_suffix_for_left;
671
+ // suffix added to names of output fields coming from right input
672
+ std::string output_suffix_for_right;
673
+ // residual filter which is applied to matching rows. Rows that do not match
674
+ // the filter are not included. The filter is applied against the
675
+ // concatenated input schema (left fields then right fields) and can reference
676
+ // fields that are not included in the output.
677
+ Expression filter = literal(true);
678
+ // whether or not to disable Bloom filters in this join
679
+ bool disable_bloom_filter = false;
680
+ };
681
+
682
+ /// \brief a node which implements the asof join operation
683
+ ///
684
+ /// Note, this API is experimental and will change in the future
685
+ ///
686
+ /// This node takes one left table and any number of right tables, and asof joins them
687
+ /// together. Batches produced by each input must be ordered by the "on" key.
688
+ /// This node will output one row for each row in the left table.
689
+ class ARROW_ACERO_EXPORT AsofJoinNodeOptions : public ExecNodeOptions {
690
+ public:
691
+ /// \brief Keys for one input table of the AsofJoin operation
692
+ ///
693
+ /// The keys must be consistent across the input tables:
694
+ /// Each "on" key must refer to a field of the same type and units across the tables.
695
+ /// Each "by" key must refer to a list of fields of the same types across the tables.
696
+ struct Keys {
697
+ /// \brief "on" key for the join.
698
+ ///
699
+ /// The input table must be sorted by the "on" key. Must be a single field of a common
700
+ /// type. Inexact match is used on the "on" key. i.e., a row is considered a match iff
701
+ /// left_on - tolerance <= right_on <= left_on.
702
+ /// Currently, the "on" key must be of an integer, date, or timestamp type.
703
+ FieldRef on_key;
704
+ /// \brief "by" key for the join.
705
+ ///
706
+ /// Each input table must have each field of the "by" key. Exact equality is used for
707
+ /// each field of the "by" key.
708
+ /// Currently, each field of the "by" key must be of an integer, date, timestamp, or
709
+ /// base-binary type.
710
+ std::vector<FieldRef> by_key;
711
+ };
712
+
713
+ AsofJoinNodeOptions(std::vector<Keys> input_keys, int64_t tolerance)
714
+ : input_keys(std::move(input_keys)), tolerance(tolerance) {}
715
+
716
+ /// \brief AsofJoin keys per input table. At least two keys must be given. The first key
717
+ /// corresponds to a left table and all other keys correspond to right tables for the
718
+ /// as-of-join.
719
+ ///
720
+ /// \see `Keys` for details.
721
+ std::vector<Keys> input_keys;
722
+ /// \brief Tolerance for inexact "on" key matching. A right row is considered a match
723
+ /// with the left row if `right.on - left.on <= tolerance`. The `tolerance` may be:
724
+ /// - negative, in which case a past-as-of-join occurs;
725
+ /// - or positive, in which case a future-as-of-join occurs;
726
+ /// - or zero, in which case an exact-as-of-join occurs.
727
+ ///
728
+ /// The tolerance is interpreted in the same units as the "on" key.
729
+ int64_t tolerance;
730
+ };
731
+
732
+ /// \brief a node which select top_k/bottom_k rows passed through it
733
+ ///
734
+ /// All batches pushed to this node will be accumulated, then selected, by the given
735
+ /// fields. Then sorted batches will be forwarded to the generator in sorted order.
736
+ class ARROW_ACERO_EXPORT SelectKSinkNodeOptions : public SinkNodeOptions {
737
+ public:
738
+ explicit SelectKSinkNodeOptions(
739
+ SelectKOptions select_k_options,
740
+ std::function<Future<std::optional<ExecBatch>>()>* generator)
741
+ : SinkNodeOptions(generator), select_k_options(std::move(select_k_options)) {}
742
+
743
+ /// SelectK options
744
+ SelectKOptions select_k_options;
745
+ };
746
+
747
+ /// \brief a sink node which accumulates all output into a table
748
+ class ARROW_ACERO_EXPORT TableSinkNodeOptions : public ExecNodeOptions {
749
+ public:
750
+ /// \brief create an instance from values
751
+ explicit TableSinkNodeOptions(std::shared_ptr<Table>* output_table,
752
+ std::optional<bool> sequence_output = std::nullopt)
753
+ : output_table(output_table), sequence_output(sequence_output) {}
754
+
755
+ /// \brief an "out parameter" specifying the table that will be created
756
+ ///
757
+ /// Must not be null and remain valid for the entirety of the plan execution. After the
758
+ /// plan has completed this will be set to point to the result table
759
+ std::shared_ptr<Table>* output_table;
760
+ /// \brief Controls whether batches should be emitted immediately or sequenced in order
761
+ ///
762
+ /// \see QueryOptions for more details
763
+ std::optional<bool> sequence_output;
764
+ /// \brief Custom names to use for the columns.
765
+ ///
766
+ /// If specified then names must be provided for all fields. Currently, only a flat
767
+ /// schema is supported (see GH-31875).
768
+ ///
769
+ /// If not specified then names will be generated based on the source data.
770
+ std::vector<std::string> names;
771
+ };
772
+
773
+ /// \brief a row template that describes one row that will be generated for each input row
774
+ struct ARROW_ACERO_EXPORT PivotLongerRowTemplate {
775
+ PivotLongerRowTemplate(std::vector<std::string> feature_values,
776
+ std::vector<std::optional<FieldRef>> measurement_values)
777
+ : feature_values(std::move(feature_values)),
778
+ measurement_values(std::move(measurement_values)) {}
779
+ /// A (typically unique) set of feature values for the template, usually derived from a
780
+ /// column name
781
+ ///
782
+ /// These will be used to populate the feature columns
783
+ std::vector<std::string> feature_values;
784
+ /// The fields containing the measurements to use for this row
785
+ ///
786
+ /// These will be used to populate the measurement columns. If nullopt then nulls
787
+ /// will be inserted for the given value.
788
+ std::vector<std::optional<FieldRef>> measurement_values;
789
+ };
790
+
791
+ /// \brief Reshape a table by turning some columns into additional rows
792
+ ///
793
+ /// This operation is sometimes also referred to as UNPIVOT
794
+ ///
795
+ /// This is typically done when there are multiple observations in each row in order to
796
+ /// transform to a table containing a single observation per row.
797
+ ///
798
+ /// For example:
799
+ ///
800
+ /// | time | left_temp | right_temp |
801
+ /// | ---- | --------- | ---------- |
802
+ /// | 1 | 10 | 20 |
803
+ /// | 2 | 15 | 18 |
804
+ ///
805
+ /// The above table contains two observations per row. There is an implicit feature
806
+ /// "location" (left vs right) and a measurement "temp". What we really want is:
807
+ ///
808
+ /// | time | location | temp |
809
+ /// | --- | --- | --- |
810
+ /// | 1 | left | 10 |
811
+ /// | 1 | right | 20 |
812
+ /// | 2 | left | 15 |
813
+ /// | 2 | right | 18 |
814
+ ///
815
+ /// For a more complex example consider:
816
+ ///
817
+ /// | time | ax1 | ay1 | bx1 | ay2 |
818
+ /// | ---- | --- | --- | --- | --- |
819
+ /// | 0 | 1 | 2 | 3 | 4 |
820
+ ///
821
+ /// We can pretend a vs b and x vs y are features while 1 and 2 are two different
822
+ /// kinds of measurements. We thus want to pivot to
823
+ ///
824
+ /// | time | a/b | x/y | f1 | f2 |
825
+ /// | ---- | --- | --- | ---- | ---- |
826
+ /// | 0 | a | x | 1 | null |
827
+ /// | 0 | a | y | 2 | 4 |
828
+ /// | 0 | b | x | 3 | null |
829
+ ///
830
+ /// To do this we create a row template for each combination of features. One should
831
+ /// be able to do this purely by looking at the column names. For example, given the
832
+ /// above columns "ax1", "ay1", "bx1", and "ay2" we know we have three feature
833
+ /// combinations (a, x), (a, y), and (b, x). Similarly, we know we have two possible
834
+ /// measurements, "1" and "2".
835
+ ///
836
+ /// For each combination of features we create a row template. In each row template we
837
+ /// describe the combination and then list which columns to use for the measurements.
838
+ /// If a measurement doesn't exist for a given combination then we use nullopt.
839
+ ///
840
+ /// So, for our above example, we have:
841
+ ///
842
+ /// (a, x): names={"a", "x"}, values={"ax1", nullopt}
843
+ /// (a, y): names={"a", "y"}, values={"ay1", "ay2"}
844
+ /// (b, x): names={"b", "x"}, values={"bx1", nullopt}
845
+ ///
846
+ /// Finishing it off we name our new columns:
847
+ /// feature_field_names={"a/b","x/y"}
848
+ /// measurement_field_names={"f1", "f2"}
849
+ class ARROW_ACERO_EXPORT PivotLongerNodeOptions : public ExecNodeOptions {
850
+ public:
851
+ static constexpr std::string_view kName = "pivot_longer";
852
+ /// One or more row templates to create new output rows
853
+ ///
854
+ /// Normally there are at least two row templates. The output # of rows
855
+ /// will be the input # of rows * the number of row templates
856
+ std::vector<PivotLongerRowTemplate> row_templates;
857
+ /// The names of the columns which describe the new features
858
+ std::vector<std::string> feature_field_names;
859
+ /// The names of the columns which represent the measurements
860
+ std::vector<std::string> measurement_field_names;
861
+ };
862
+
863
+ /// @}
864
+
865
+ } // namespace acero
866
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/order_by_impl.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+ #include <memory>
22
+ #include <vector>
23
+
24
+ #include "arrow/acero/options.h"
25
+ #include "arrow/record_batch.h"
26
+ #include "arrow/result.h"
27
+ #include "arrow/status.h"
28
+ #include "arrow/type.h"
29
+
30
+ namespace arrow {
31
+
32
+ using compute::ExecContext;
33
+
34
+ namespace acero {
35
+
36
+ class OrderByImpl {
37
+ public:
38
+ virtual ~OrderByImpl() = default;
39
+
40
+ virtual void InputReceived(const std::shared_ptr<RecordBatch>& batch) = 0;
41
+
42
+ virtual Result<Datum> DoFinish() = 0;
43
+
44
+ virtual std::string ToString() const = 0;
45
+
46
+ static Result<std::unique_ptr<OrderByImpl>> MakeSort(
47
+ ExecContext* ctx, const std::shared_ptr<Schema>& output_schema,
48
+ const SortOptions& options);
49
+
50
+ static Result<std::unique_ptr<OrderByImpl>> MakeSelectK(
51
+ ExecContext* ctx, const std::shared_ptr<Schema>& output_schema,
52
+ const SelectKOptions& options);
53
+ };
54
+
55
+ } // namespace acero
56
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/partition_util.h ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <atomic>
21
+ #include <cassert>
22
+ #include <cstdint>
23
+ #include <functional>
24
+ #include <random>
25
+ #include "arrow/acero/util.h"
26
+ #include "arrow/buffer.h"
27
+ #include "arrow/util/pcg_random.h"
28
+
29
+ namespace arrow {
30
+ namespace acero {
31
+
32
+ class PartitionSort {
33
+ public:
34
+ /// \brief Bucket sort rows on partition ids in O(num_rows) time.
35
+ ///
36
+ /// Include in the output exclusive cumulative sum of bucket sizes.
37
+ /// This corresponds to ranges in the sorted array containing all row ids for
38
+ /// each of the partitions.
39
+ ///
40
+ /// prtn_ranges must be initialized and have at least prtn_ranges + 1 elements
41
+ /// when this method returns prtn_ranges[i] will contains the total number of
42
+ /// elements in partitions 0 through i. prtn_ranges[0] will be 0.
43
+ ///
44
+ /// prtn_id_impl must be a function that takes in a row id (int) and returns
45
+ /// a partition id (int). The returned partition id must be between 0 and
46
+ /// num_prtns (exclusive).
47
+ ///
48
+ /// output_pos_impl is a function that takes in a row id (int) and a position (int)
49
+ /// in the bucket sorted output. The function should insert the row in the
50
+ /// output.
51
+ ///
52
+ /// For example:
53
+ ///
54
+ /// in_arr: [5, 7, 2, 3, 5, 4]
55
+ /// num_prtns: 3
56
+ /// prtn_id_impl: [&in_arr] (int row_id) { return in_arr[row_id] / 3; }
57
+ /// output_pos_impl: [&out_arr] (int row_id, int pos) { out_arr[pos] = row_id; }
58
+ ///
59
+ /// After Execution
60
+ /// out_arr: [2, 5, 3, 5, 4, 7]
61
+ /// prtn_ranges: [0, 1, 5, 6]
62
+ template <class INPUT_PRTN_ID_FN, class OUTPUT_POS_FN>
63
+ static void Eval(int64_t num_rows, int num_prtns, uint16_t* prtn_ranges,
64
+ INPUT_PRTN_ID_FN prtn_id_impl, OUTPUT_POS_FN output_pos_impl) {
65
+ ARROW_DCHECK(num_rows > 0 && num_rows <= (1 << 15));
66
+ ARROW_DCHECK(num_prtns >= 1 && num_prtns <= (1 << 15));
67
+
68
+ memset(prtn_ranges, 0, (num_prtns + 1) * sizeof(uint16_t));
69
+
70
+ for (int64_t i = 0; i < num_rows; ++i) {
71
+ int prtn_id = static_cast<int>(prtn_id_impl(i));
72
+ ++prtn_ranges[prtn_id + 1];
73
+ }
74
+
75
+ uint16_t sum = 0;
76
+ for (int i = 0; i < num_prtns; ++i) {
77
+ uint16_t sum_next = sum + prtn_ranges[i + 1];
78
+ prtn_ranges[i + 1] = sum;
79
+ sum = sum_next;
80
+ }
81
+
82
+ for (int64_t i = 0; i < num_rows; ++i) {
83
+ int prtn_id = static_cast<int>(prtn_id_impl(i));
84
+ int pos = prtn_ranges[prtn_id + 1]++;
85
+ output_pos_impl(i, pos);
86
+ }
87
+ }
88
+ };
89
+
90
+ /// \brief A control for synchronizing threads on a partitionable workload
91
+ class PartitionLocks {
92
+ public:
93
+ PartitionLocks();
94
+ ~PartitionLocks();
95
+ /// \brief Initializes the control, must be called before use
96
+ ///
97
+ /// \param num_threads Maximum number of threads that will access the partitions
98
+ /// \param num_prtns Number of partitions to synchronize
99
+ void Init(size_t num_threads, int num_prtns);
100
+ /// \brief Cleans up the control, it should not be used after this call
101
+ void CleanUp();
102
+ /// \brief Acquire a partition to work on one
103
+ ///
104
+ /// \param thread_id The index of the thread trying to acquire the partition lock
105
+ /// \param num_prtns Length of prtns_to_try, must be <= num_prtns used in Init
106
+ /// \param prtns_to_try An array of partitions that still have remaining work
107
+ /// \param limit_retries If false, this method will spinwait forever until success
108
+ /// \param max_retries Max times to attempt checking out work before returning false
109
+ /// \param[out] locked_prtn_id The id of the partition locked
110
+ /// \param[out] locked_prtn_id_pos The index of the partition locked in prtns_to_try
111
+ /// \return True if a partition was locked, false if max_retries was attempted
112
+ /// without successfully acquiring a lock
113
+ ///
114
+ /// This method is thread safe
115
+ bool AcquirePartitionLock(size_t thread_id, int num_prtns, const int* prtns_to_try,
116
+ bool limit_retries, int max_retries, int* locked_prtn_id,
117
+ int* locked_prtn_id_pos);
118
+ /// \brief Release a partition so that other threads can work on it
119
+ void ReleasePartitionLock(int prtn_id);
120
+
121
+ // Executes (synchronously and using current thread) the same operation on a set of
122
+ // multiple partitions. Tries to minimize partition locking overhead by randomizing and
123
+ // adjusting order in which partitions are processed.
124
+ //
125
+ // PROCESS_PRTN_FN is a callback which will be executed for each partition after
126
+ // acquiring the lock for that partition. It gets partition id as an argument.
127
+ // IS_PRTN_EMPTY_FN is a callback which filters out (when returning true) partitions
128
+ // with specific ids from processing.
129
+ //
130
+ template <typename IS_PRTN_EMPTY_FN, typename PROCESS_PRTN_FN>
131
+ Status ForEachPartition(size_t thread_id,
132
+ /*scratch space buffer with space for one element per partition;
133
+ dirty in and dirty out*/
134
+ int* temp_unprocessed_prtns, IS_PRTN_EMPTY_FN is_prtn_empty_fn,
135
+ PROCESS_PRTN_FN process_prtn_fn) {
136
+ int num_unprocessed_partitions = 0;
137
+ for (int i = 0; i < num_prtns_; ++i) {
138
+ bool is_prtn_empty = is_prtn_empty_fn(i);
139
+ if (!is_prtn_empty) {
140
+ temp_unprocessed_prtns[num_unprocessed_partitions++] = i;
141
+ }
142
+ }
143
+ while (num_unprocessed_partitions > 0) {
144
+ int locked_prtn_id;
145
+ int locked_prtn_id_pos;
146
+ AcquirePartitionLock(thread_id, num_unprocessed_partitions, temp_unprocessed_prtns,
147
+ /*limit_retries=*/false, /*max_retries=*/-1, &locked_prtn_id,
148
+ &locked_prtn_id_pos);
149
+ {
150
+ class AutoReleaseLock {
151
+ public:
152
+ AutoReleaseLock(PartitionLocks* locks, int prtn_id)
153
+ : locks(locks), prtn_id(prtn_id) {}
154
+ ~AutoReleaseLock() { locks->ReleasePartitionLock(prtn_id); }
155
+ PartitionLocks* locks;
156
+ int prtn_id;
157
+ } auto_release_lock(this, locked_prtn_id);
158
+ ARROW_RETURN_NOT_OK(process_prtn_fn(locked_prtn_id));
159
+ }
160
+ if (locked_prtn_id_pos < num_unprocessed_partitions - 1) {
161
+ temp_unprocessed_prtns[locked_prtn_id_pos] =
162
+ temp_unprocessed_prtns[num_unprocessed_partitions - 1];
163
+ }
164
+ --num_unprocessed_partitions;
165
+ }
166
+ return Status::OK();
167
+ }
168
+
169
+ private:
170
+ std::atomic<bool>* lock_ptr(int prtn_id);
171
+ int random_int(size_t thread_id, int num_values);
172
+
173
+ struct PartitionLock {
174
+ static constexpr int kCacheLineBytes = 64;
175
+ std::atomic<bool> lock;
176
+ uint8_t padding[kCacheLineBytes];
177
+ };
178
+ int num_prtns_;
179
+ std::unique_ptr<PartitionLock[]> locks_;
180
+ std::unique_ptr<arrow::random::pcg32_fast[]> rngs_;
181
+ };
182
+
183
+ } // namespace acero
184
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/pch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Often-used headers, for precompiling.
19
+ // If updating this header, please make sure you check compilation speed
20
+ // before checking in. Adding headers which are not used extremely often
21
+ // may incur a slowdown, since it makes the precompiled header heavier to load.
22
+
23
+ #include "arrow/pch.h"
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/query_context.h ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+ #pragma once
18
+
19
+ #include <string_view>
20
+
21
+ #include "arrow/acero/exec_plan.h"
22
+ #include "arrow/acero/task_util.h"
23
+ #include "arrow/acero/util.h"
24
+ #include "arrow/compute/exec.h"
25
+ #include "arrow/io/interfaces.h"
26
+ #include "arrow/util/async_util.h"
27
+ #include "arrow/util/type_fwd.h"
28
+
29
+ namespace arrow {
30
+
31
+ using compute::default_exec_context;
32
+ using io::IOContext;
33
+
34
+ namespace acero {
35
+
36
+ class ARROW_ACERO_EXPORT QueryContext {
37
+ public:
38
+ QueryContext(QueryOptions opts = {},
39
+ ExecContext exec_context = *default_exec_context());
40
+
41
+ Status Init(size_t max_num_threads, arrow::util::AsyncTaskScheduler* scheduler);
42
+
43
+ const ::arrow::internal::CpuInfo* cpu_info() const;
44
+ int64_t hardware_flags() const;
45
+ const QueryOptions& options() const { return options_; }
46
+ MemoryPool* memory_pool() const { return exec_context_.memory_pool(); }
47
+ ::arrow::internal::Executor* executor() const { return exec_context_.executor(); }
48
+ ExecContext* exec_context() { return &exec_context_; }
49
+ IOContext* io_context() { return &io_context_; }
50
+ TaskScheduler* scheduler() { return task_scheduler_.get(); }
51
+ arrow::util::AsyncTaskScheduler* async_scheduler() { return async_scheduler_; }
52
+
53
+ size_t GetThreadIndex();
54
+ size_t max_concurrency() const;
55
+ Result<arrow::util::TempVectorStack*> GetTempStack(size_t thread_index);
56
+
57
+ /// \brief Start an external task
58
+ ///
59
+ /// This should be avoided if possible. It is kept in for now for legacy
60
+ /// purposes. This should be called before the external task is started. If
61
+ /// a valid future is returned then it should be marked complete when the
62
+ /// external task has finished.
63
+ ///
64
+ /// \param name A name to give the task for traceability and debugging
65
+ ///
66
+ /// \return an invalid future if the plan has already ended, otherwise this
67
+ /// returns a future that must be completed when the external task
68
+ /// finishes.
69
+ Result<Future<>> BeginExternalTask(std::string_view name);
70
+
71
+ /// \brief Add a single function as a task to the query's task group
72
+ /// on the compute threadpool.
73
+ ///
74
+ /// \param fn The task to run. Takes no arguments and returns a Status.
75
+ /// \param name A name to give the task for traceability and debugging
76
+ void ScheduleTask(std::function<Status()> fn, std::string_view name);
77
+ /// \brief Add a single function as a task to the query's task group
78
+ /// on the compute threadpool.
79
+ ///
80
+ /// \param fn The task to run. Takes the thread index and returns a Status.
81
+ /// \param name A name to give the task for traceability and debugging
82
+ void ScheduleTask(std::function<Status(size_t)> fn, std::string_view name);
83
+ /// \brief Add a single function as a task to the query's task group on
84
+ /// the IO thread pool
85
+ ///
86
+ /// \param fn The task to run. Returns a status.
87
+ /// \param name A name to give the task for traceability and debugging
88
+ void ScheduleIOTask(std::function<Status()> fn, std::string_view name);
89
+
90
+ // Register/Start TaskGroup is a way of performing a "Parallel For" pattern:
91
+ // - The task function takes the thread index and the index of the task
92
+ // - The on_finished function takes the thread index
93
+ // Returns an integer ID that will be used to reference the task group in
94
+ // StartTaskGroup. At runtime, call StartTaskGroup with the ID and the number of times
95
+ // you'd like the task to be executed. The need to register a task group before use will
96
+ // be removed after we rewrite the scheduler.
97
+ /// \brief Register a "parallel for" task group with the scheduler
98
+ ///
99
+ /// \param task The function implementing the task. Takes the thread_index and
100
+ /// the task index.
101
+ /// \param on_finished The function that gets run once all tasks have been completed.
102
+ /// Takes the thread_index.
103
+ ///
104
+ /// Must be called inside of ExecNode::Init.
105
+ int RegisterTaskGroup(std::function<Status(size_t, int64_t)> task,
106
+ std::function<Status(size_t)> on_finished);
107
+
108
+ /// \brief Start the task group with the specified ID. This can only
109
+ /// be called once per task_group_id.
110
+ ///
111
+ /// \param task_group_id The ID of the task group to run
112
+ /// \param num_tasks The number of times to run the task
113
+ Status StartTaskGroup(int task_group_id, int64_t num_tasks);
114
+
115
+ // This is an RAII class for keeping track of in-flight file IO. Useful for getting
116
+ // an estimate of memory use, and how much memory we expect to be freed soon.
117
+ // Returned by ReportTempFileIO.
118
+ struct [[nodiscard]] TempFileIOMark {
119
+ QueryContext* ctx_;
120
+ size_t bytes_;
121
+
122
+ TempFileIOMark(QueryContext* ctx, size_t bytes) : ctx_(ctx), bytes_(bytes) {
123
+ ctx_->in_flight_bytes_to_disk_.fetch_add(bytes_, std::memory_order_acquire);
124
+ }
125
+
126
+ ARROW_DISALLOW_COPY_AND_ASSIGN(TempFileIOMark);
127
+
128
+ ~TempFileIOMark() {
129
+ ctx_->in_flight_bytes_to_disk_.fetch_sub(bytes_, std::memory_order_release);
130
+ }
131
+ };
132
+
133
+ TempFileIOMark ReportTempFileIO(size_t bytes) { return {this, bytes}; }
134
+
135
+ size_t GetCurrentTempFileIO() { return in_flight_bytes_to_disk_.load(); }
136
+
137
+ private:
138
+ QueryOptions options_;
139
+ // To be replaced with Acero-specific context once scheduler is done and
140
+ // we don't need ExecContext for kernels
141
+ ExecContext exec_context_;
142
+ IOContext io_context_;
143
+
144
+ arrow::util::AsyncTaskScheduler* async_scheduler_ = NULLPTR;
145
+ std::unique_ptr<TaskScheduler> task_scheduler_ = TaskScheduler::Make();
146
+
147
+ ThreadIndexer thread_indexer_;
148
+ struct ThreadLocalData {
149
+ bool is_init = false;
150
+ arrow::util::TempVectorStack stack;
151
+ };
152
+ std::vector<ThreadLocalData> tld_;
153
+
154
+ std::atomic<size_t> in_flight_bytes_to_disk_{0};
155
+ };
156
+ } // namespace acero
157
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/schema_util.h ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <vector>
24
+
25
+ #include "arrow/compute/light_array.h" // for KeyColumnMetadata
26
+ #include "arrow/type.h" // for DataType, FieldRef, Field and Schema
27
+
28
+ namespace arrow {
29
+
30
+ using internal::checked_cast;
31
+
32
+ namespace acero {
33
+
34
+ // Identifiers for all different row schemas that are used in a join
35
+ //
36
+ enum class HashJoinProjection : int {
37
+ INPUT = 0,
38
+ KEY = 1,
39
+ PAYLOAD = 2,
40
+ FILTER = 3,
41
+ OUTPUT = 4
42
+ };
43
+
44
+ struct SchemaProjectionMap {
45
+ static constexpr int kMissingField = -1;
46
+ int num_cols;
47
+ const int* source_to_base;
48
+ const int* base_to_target;
49
+ inline int get(int i) const {
50
+ ARROW_DCHECK(i >= 0 && i < num_cols);
51
+ ARROW_DCHECK(source_to_base[i] != kMissingField);
52
+ return base_to_target[source_to_base[i]];
53
+ }
54
+ };
55
+
56
+ /// Helper class for managing different projections of the same row schema.
57
+ /// Used to efficiently map any field in one projection to a corresponding field in
58
+ /// another projection.
59
+ /// Materialized mappings are generated lazily at the time of the first access.
60
+ /// Thread-safe apart from initialization.
61
+ template <typename ProjectionIdEnum>
62
+ class SchemaProjectionMaps {
63
+ public:
64
+ static constexpr int kMissingField = -1;
65
+
66
+ Status Init(ProjectionIdEnum full_schema_handle, const Schema& schema,
67
+ const std::vector<ProjectionIdEnum>& projection_handles,
68
+ const std::vector<const std::vector<FieldRef>*>& projections) {
69
+ ARROW_DCHECK(projection_handles.size() == projections.size());
70
+ ARROW_RETURN_NOT_OK(RegisterSchema(full_schema_handle, schema));
71
+ for (size_t i = 0; i < projections.size(); ++i) {
72
+ ARROW_RETURN_NOT_OK(
73
+ RegisterProjectedSchema(projection_handles[i], *(projections[i]), schema));
74
+ }
75
+ RegisterEnd();
76
+ return Status::OK();
77
+ }
78
+
79
+ int num_cols(ProjectionIdEnum schema_handle) const {
80
+ int id = schema_id(schema_handle);
81
+ return static_cast<int>(schemas_[id].second.data_types.size());
82
+ }
83
+
84
+ bool is_empty(ProjectionIdEnum schema_handle) const {
85
+ return num_cols(schema_handle) == 0;
86
+ }
87
+
88
+ const std::string& field_name(ProjectionIdEnum schema_handle, int field_id) const {
89
+ int id = schema_id(schema_handle);
90
+ return schemas_[id].second.field_names[field_id];
91
+ }
92
+
93
+ const std::shared_ptr<DataType>& data_type(ProjectionIdEnum schema_handle,
94
+ int field_id) const {
95
+ int id = schema_id(schema_handle);
96
+ return schemas_[id].second.data_types[field_id];
97
+ }
98
+
99
+ const std::vector<std::shared_ptr<DataType>>& data_types(
100
+ ProjectionIdEnum schema_handle) const {
101
+ int id = schema_id(schema_handle);
102
+ return schemas_[id].second.data_types;
103
+ }
104
+
105
+ SchemaProjectionMap map(ProjectionIdEnum from, ProjectionIdEnum to) const {
106
+ int id_from = schema_id(from);
107
+ int id_to = schema_id(to);
108
+ SchemaProjectionMap result;
109
+ result.num_cols = num_cols(from);
110
+ result.source_to_base = mappings_[id_from].data();
111
+ result.base_to_target = inverse_mappings_[id_to].data();
112
+ return result;
113
+ }
114
+
115
+ protected:
116
+ struct FieldInfos {
117
+ std::vector<int> field_paths;
118
+ std::vector<std::string> field_names;
119
+ std::vector<std::shared_ptr<DataType>> data_types;
120
+ };
121
+
122
+ Status RegisterSchema(ProjectionIdEnum handle, const Schema& schema) {
123
+ FieldInfos out_fields;
124
+ const FieldVector& in_fields = schema.fields();
125
+ out_fields.field_paths.resize(in_fields.size());
126
+ out_fields.field_names.resize(in_fields.size());
127
+ out_fields.data_types.resize(in_fields.size());
128
+ for (size_t i = 0; i < in_fields.size(); ++i) {
129
+ const std::string& name = in_fields[i]->name();
130
+ const std::shared_ptr<DataType>& type = in_fields[i]->type();
131
+ out_fields.field_paths[i] = static_cast<int>(i);
132
+ out_fields.field_names[i] = name;
133
+ out_fields.data_types[i] = type;
134
+ }
135
+ schemas_.push_back(std::make_pair(handle, out_fields));
136
+ return Status::OK();
137
+ }
138
+
139
+ Status RegisterProjectedSchema(ProjectionIdEnum handle,
140
+ const std::vector<FieldRef>& selected_fields,
141
+ const Schema& full_schema) {
142
+ FieldInfos out_fields;
143
+ const FieldVector& in_fields = full_schema.fields();
144
+ out_fields.field_paths.resize(selected_fields.size());
145
+ out_fields.field_names.resize(selected_fields.size());
146
+ out_fields.data_types.resize(selected_fields.size());
147
+ for (size_t i = 0; i < selected_fields.size(); ++i) {
148
+ // All fields must be found in schema without ambiguity
149
+ ARROW_ASSIGN_OR_RAISE(auto match, selected_fields[i].FindOne(full_schema));
150
+ const std::string& name = in_fields[match[0]]->name();
151
+ const std::shared_ptr<DataType>& type = in_fields[match[0]]->type();
152
+ out_fields.field_paths[i] = match[0];
153
+ out_fields.field_names[i] = name;
154
+ out_fields.data_types[i] = type;
155
+ }
156
+ schemas_.push_back(std::make_pair(handle, out_fields));
157
+ return Status::OK();
158
+ }
159
+
160
+ void RegisterEnd() {
161
+ size_t size = schemas_.size();
162
+ mappings_.resize(size);
163
+ inverse_mappings_.resize(size);
164
+ int id_base = 0;
165
+ for (size_t i = 0; i < size; ++i) {
166
+ GenerateMapForProjection(static_cast<int>(i), id_base);
167
+ }
168
+ }
169
+
170
+ int schema_id(ProjectionIdEnum schema_handle) const {
171
+ for (size_t i = 0; i < schemas_.size(); ++i) {
172
+ if (schemas_[i].first == schema_handle) {
173
+ return static_cast<int>(i);
174
+ }
175
+ }
176
+ // We should never get here
177
+ ARROW_DCHECK(false);
178
+ return -1;
179
+ }
180
+
181
+ void GenerateMapForProjection(int id_proj, int id_base) {
182
+ int num_cols_proj = static_cast<int>(schemas_[id_proj].second.data_types.size());
183
+ int num_cols_base = static_cast<int>(schemas_[id_base].second.data_types.size());
184
+
185
+ std::vector<int>& mapping = mappings_[id_proj];
186
+ std::vector<int>& inverse_mapping = inverse_mappings_[id_proj];
187
+ mapping.resize(num_cols_proj);
188
+ inverse_mapping.resize(num_cols_base);
189
+
190
+ if (id_proj == id_base) {
191
+ for (int i = 0; i < num_cols_base; ++i) {
192
+ mapping[i] = inverse_mapping[i] = i;
193
+ }
194
+ } else {
195
+ const FieldInfos& fields_proj = schemas_[id_proj].second;
196
+ const FieldInfos& fields_base = schemas_[id_base].second;
197
+ for (int i = 0; i < num_cols_base; ++i) {
198
+ inverse_mapping[i] = SchemaProjectionMap::kMissingField;
199
+ }
200
+ for (int i = 0; i < num_cols_proj; ++i) {
201
+ int field_id = SchemaProjectionMap::kMissingField;
202
+ for (int j = 0; j < num_cols_base; ++j) {
203
+ if (fields_proj.field_paths[i] == fields_base.field_paths[j]) {
204
+ field_id = j;
205
+ // If there are multiple matches for the same input field,
206
+ // it will be mapped to the first match.
207
+ break;
208
+ }
209
+ }
210
+ ARROW_DCHECK(field_id != SchemaProjectionMap::kMissingField);
211
+ mapping[i] = field_id;
212
+ inverse_mapping[field_id] = i;
213
+ }
214
+ }
215
+ }
216
+
217
+ // vector used as a mapping from ProjectionIdEnum to fields
218
+ std::vector<std::pair<ProjectionIdEnum, FieldInfos>> schemas_;
219
+ std::vector<std::vector<int>> mappings_;
220
+ std::vector<std::vector<int>> inverse_mappings_;
221
+ };
222
+
223
+ using HashJoinProjectionMaps = SchemaProjectionMaps<HashJoinProjection>;
224
+
225
+ } // namespace acero
226
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/task_util.h ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <atomic>
21
+ #include <cstdint>
22
+ #include <functional>
23
+ #include <vector>
24
+
25
+ #include "arrow/acero/visibility.h"
26
+ #include "arrow/status.h"
27
+ #include "arrow/util/config.h"
28
+ #include "arrow/util/logging.h"
29
+
30
+ namespace arrow {
31
+ namespace acero {
32
+
33
+ // Atomic value surrounded by padding bytes to avoid cache line invalidation
34
+ // whenever it is modified by a concurrent thread on a different CPU core.
35
+ //
36
+ template <typename T>
37
+ class AtomicWithPadding {
38
+ private:
39
+ static constexpr int kCacheLineSize = 64;
40
+ uint8_t padding_before[kCacheLineSize];
41
+
42
+ public:
43
+ std::atomic<T> value;
44
+
45
+ private:
46
+ uint8_t padding_after[kCacheLineSize];
47
+ };
48
+
49
+ // Used for asynchronous execution of operations that can be broken into
50
+ // a fixed number of symmetric tasks that can be executed concurrently.
51
+ //
52
+ // Implements priorities between multiple such operations, called task groups.
53
+ //
54
+ // Allows to specify the maximum number of in-flight tasks at any moment.
55
+ //
56
+ // Also allows for executing next pending tasks immediately using a caller thread.
57
+ //
58
+ class ARROW_ACERO_EXPORT TaskScheduler {
59
+ public:
60
+ using TaskImpl = std::function<Status(size_t, int64_t)>;
61
+ using TaskGroupContinuationImpl = std::function<Status(size_t)>;
62
+ using ScheduleImpl = std::function<Status(TaskGroupContinuationImpl)>;
63
+ using AbortContinuationImpl = std::function<void()>;
64
+
65
+ virtual ~TaskScheduler() = default;
66
+
67
+ // Order in which task groups are registered represents priorities of their tasks
68
+ // (the first group has the highest priority).
69
+ //
70
+ // Returns task group identifier that is used to request operations on the task group.
71
+ virtual int RegisterTaskGroup(TaskImpl task_impl,
72
+ TaskGroupContinuationImpl cont_impl) = 0;
73
+
74
+ virtual void RegisterEnd() = 0;
75
+
76
+ // total_num_tasks may be zero, in which case task group continuation will be executed
77
+ // immediately
78
+ virtual Status StartTaskGroup(size_t thread_id, int group_id,
79
+ int64_t total_num_tasks) = 0;
80
+
81
+ // Execute given number of tasks immediately using caller thread
82
+ virtual Status ExecuteMore(size_t thread_id, int num_tasks_to_execute,
83
+ bool execute_all) = 0;
84
+
85
+ // Begin scheduling tasks using provided callback and
86
+ // the limit on the number of in-flight tasks at any moment.
87
+ //
88
+ // Scheduling will continue as long as there are waiting tasks.
89
+ //
90
+ // It will automatically resume whenever new task group gets started.
91
+ virtual Status StartScheduling(size_t thread_id, ScheduleImpl schedule_impl,
92
+ int num_concurrent_tasks, bool use_sync_execution) = 0;
93
+
94
+ // Abort scheduling and execution.
95
+ // Used in case of being notified about unrecoverable error for the entire query.
96
+ virtual void Abort(AbortContinuationImpl impl) = 0;
97
+
98
+ static std::unique_ptr<TaskScheduler> Make();
99
+ };
100
+
101
+ } // namespace acero
102
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/test_nodes.h ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+
22
+ #include "arrow/acero/options.h"
23
+ #include "arrow/acero/test_util_internal.h"
24
+ #include "arrow/testing/random.h"
25
+
26
+ namespace arrow {
27
+ namespace acero {
28
+
29
+ // \brief Make a delaying source that is optionally noisy (prints when it emits)
30
+ AsyncGenerator<std::optional<ExecBatch>> MakeDelayedGen(
31
+ Iterator<std::optional<ExecBatch>> src, std::string label, double delay_sec,
32
+ bool noisy = false);
33
+
34
+ // \brief Make a delaying source that is optionally noisy (prints when it emits)
35
+ AsyncGenerator<std::optional<ExecBatch>> MakeDelayedGen(
36
+ AsyncGenerator<std::optional<ExecBatch>> src, std::string label, double delay_sec,
37
+ bool noisy = false);
38
+
39
+ // \brief Make a delaying source that is optionally noisy (prints when it emits)
40
+ AsyncGenerator<std::optional<ExecBatch>> MakeDelayedGen(BatchesWithSchema src,
41
+ std::string label,
42
+ double delay_sec,
43
+ bool noisy = false);
44
+
45
+ /// A node that slightly resequences the input at random
46
+ struct JitterNodeOptions : public ExecNodeOptions {
47
+ random::SeedType seed;
48
+ /// The max amount to add to a node's "cost".
49
+ int max_jitter_modifier;
50
+
51
+ explicit JitterNodeOptions(random::SeedType seed, int max_jitter_modifier = 5)
52
+ : seed(seed), max_jitter_modifier(max_jitter_modifier) {}
53
+ static constexpr std::string_view kName = "jitter";
54
+ };
55
+
56
+ class GateImpl;
57
+
58
+ class Gate {
59
+ public:
60
+ static std::shared_ptr<Gate> Make();
61
+
62
+ Gate();
63
+ virtual ~Gate();
64
+
65
+ void ReleaseAllBatches();
66
+ void ReleaseOneBatch();
67
+ Future<> WaitForNextReleasedBatch();
68
+
69
+ private:
70
+ ARROW_DISALLOW_COPY_AND_ASSIGN(Gate);
71
+
72
+ GateImpl* impl_;
73
+ };
74
+
75
+ // A node that holds all input batches until a given gate is released
76
+ struct GatedNodeOptions : public ExecNodeOptions {
77
+ explicit GatedNodeOptions(Gate* gate) : gate(gate) {}
78
+ Gate* gate;
79
+
80
+ static constexpr std::string_view kName = "gated";
81
+ };
82
+
83
+ void RegisterTestNodes();
84
+
85
+ } // namespace acero
86
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/time_series_util.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/record_batch.h"
21
+ #include "arrow/type_traits.h"
22
+
23
+ namespace arrow::acero {
24
+
25
+ // normalize the value to unsigned 64-bits while preserving ordering of values
26
+ template <typename T, enable_if_t<std::is_integral<T>::value, bool> = true>
27
+ uint64_t NormalizeTime(T t);
28
+
29
+ uint64_t GetTime(const RecordBatch* batch, Type::type time_type, int col, uint64_t row);
30
+
31
+ } // namespace arrow::acero
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/tpch_node.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <optional>
22
+ #include <string>
23
+ #include <vector>
24
+
25
+ #include "arrow/acero/type_fwd.h"
26
+ #include "arrow/acero/visibility.h"
27
+ #include "arrow/result.h"
28
+ #include "arrow/status.h"
29
+
30
+ namespace arrow {
31
+ namespace acero {
32
+ namespace internal {
33
+
34
+ class ARROW_ACERO_EXPORT TpchGen {
35
+ public:
36
+ virtual ~TpchGen() = default;
37
+
38
+ /*
39
+ * \brief Create a factory for nodes that generate TPC-H data
40
+ *
41
+ * Note: Individual tables will reference each other. It is important that you only
42
+ * create a single TpchGen instance for each plan and then you can create nodes for each
43
+ * table from that single TpchGen instance. Note: Every batch will be scheduled as a new
44
+ * task using the ExecPlan's scheduler.
45
+ */
46
+ static Result<std::unique_ptr<TpchGen>> Make(
47
+ ExecPlan* plan, double scale_factor = 1.0, int64_t batch_size = 4096,
48
+ std::optional<int64_t> seed = std::nullopt);
49
+
50
+ // The below methods will create and add an ExecNode to the plan that generates
51
+ // data for the desired table. If columns is empty, all columns will be generated.
52
+ // The methods return the added ExecNode, which should be used for inputs.
53
+ virtual Result<ExecNode*> Supplier(std::vector<std::string> columns = {}) = 0;
54
+ virtual Result<ExecNode*> Part(std::vector<std::string> columns = {}) = 0;
55
+ virtual Result<ExecNode*> PartSupp(std::vector<std::string> columns = {}) = 0;
56
+ virtual Result<ExecNode*> Customer(std::vector<std::string> columns = {}) = 0;
57
+ virtual Result<ExecNode*> Orders(std::vector<std::string> columns = {}) = 0;
58
+ virtual Result<ExecNode*> Lineitem(std::vector<std::string> columns = {}) = 0;
59
+ virtual Result<ExecNode*> Nation(std::vector<std::string> columns = {}) = 0;
60
+ virtual Result<ExecNode*> Region(std::vector<std::string> columns = {}) = 0;
61
+ };
62
+
63
+ } // namespace internal
64
+ } // namespace acero
65
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/type_fwd.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/compute/type_fwd.h"
21
+
22
+ namespace arrow {
23
+
24
+ namespace acero {
25
+
26
+ class ExecNode;
27
+ class ExecPlan;
28
+ class ExecNodeOptions;
29
+ class ExecFactoryRegistry;
30
+ class QueryContext;
31
+ struct QueryOptions;
32
+ struct Declaration;
33
+ class SinkNodeConsumer;
34
+
35
+ } // namespace acero
36
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/unmaterialized_table.h ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <optional>
21
+ #include <vector>
22
+ #include "arrow/array/builder_base.h"
23
+ #include "arrow/array/builder_binary.h"
24
+ #include "arrow/array/builder_primitive.h"
25
+ #include "arrow/memory_pool.h"
26
+ #include "arrow/record_batch.h"
27
+ #include "arrow/type_traits.h"
28
+ #include "arrow/util/logging.h"
29
+
30
+ namespace arrow::acero {
31
+
32
+ /// Lightweight representation of a cell of an unmaterialized table.
33
+ ///
34
+ struct CompositeEntry {
35
+ RecordBatch* batch;
36
+ uint64_t start;
37
+ uint64_t end;
38
+ };
39
+
40
+ // Forward declare the builder
41
+ template <size_t MAX_COMPOSITE_TABLES>
42
+ class UnmaterializedSliceBuilder;
43
+
44
+ /// A table of composite reference rows. Rows maintain pointers to the
45
+ /// constituent record batches, but the overall table retains shared_ptr
46
+ /// references to ensure memory remains resident while the table is live.
47
+ ///
48
+ /// The main reason for this is that, especially for wide tables, some operations
49
+ /// such as sorted_merge or asof_join are effectively row-oriented, rather than
50
+ /// column-oriented. Separating the join part from the columnar materialization
51
+ /// part simplifies the logic around data types and increases efficiency.
52
+ ///
53
+ /// We don't put the shared_ptr's into the rows for efficiency reasons. Use
54
+ /// UnmaterializedSliceBuilder to add ranges of record batches to this table
55
+ template <size_t MAX_COMPOSITE_TABLES>
56
+ class UnmaterializedCompositeTable {
57
+ public:
58
+ UnmaterializedCompositeTable(
59
+ const std::shared_ptr<arrow::Schema>& output_schema, size_t num_composite_tables,
60
+ std::unordered_map<int, std::pair<int, int>> output_col_to_src_,
61
+ arrow::MemoryPool* pool_ = arrow::default_memory_pool())
62
+ : schema(output_schema),
63
+ num_composite_tables(num_composite_tables),
64
+ output_col_to_src(std::move(output_col_to_src_)),
65
+ pool{pool_} {}
66
+
67
+ // Shallow wrappers around std::vector for performance
68
+ inline size_t capacity() { return slices.capacity(); }
69
+ inline void reserve(size_t num_slices) { slices.reserve(num_slices); }
70
+
71
+ inline size_t Size() const { return num_rows; }
72
+ inline size_t Empty() const { return num_rows == 0; }
73
+
74
+ Result<std::optional<std::shared_ptr<RecordBatch>>> Materialize() {
75
+ // Don't build empty batches
76
+ if (Empty()) {
77
+ return std::nullopt;
78
+ }
79
+ DCHECK_LE(Size(), (uint64_t)std::numeric_limits<int64_t>::max());
80
+ std::vector<std::shared_ptr<arrow::Array>> arrays(schema->num_fields());
81
+
82
+ #define MATERIALIZE_CASE(id) \
83
+ case arrow::Type::id: { \
84
+ using T = typename arrow::TypeIdTraits<arrow::Type::id>::Type; \
85
+ ARROW_ASSIGN_OR_RAISE(arrays.at(i_col), materializeColumn<T>(field_type, i_col)); \
86
+ break; \
87
+ }
88
+
89
+ // Build the arrays column-by-column from the rows
90
+ for (int i_col = 0; i_col < schema->num_fields(); ++i_col) {
91
+ const std::shared_ptr<arrow::Field>& field = schema->field(i_col);
92
+ const auto& field_type = field->type();
93
+
94
+ switch (field_type->id()) {
95
+ MATERIALIZE_CASE(BOOL)
96
+ MATERIALIZE_CASE(INT8)
97
+ MATERIALIZE_CASE(INT16)
98
+ MATERIALIZE_CASE(INT32)
99
+ MATERIALIZE_CASE(INT64)
100
+ MATERIALIZE_CASE(UINT8)
101
+ MATERIALIZE_CASE(UINT16)
102
+ MATERIALIZE_CASE(UINT32)
103
+ MATERIALIZE_CASE(UINT64)
104
+ MATERIALIZE_CASE(FLOAT)
105
+ MATERIALIZE_CASE(DOUBLE)
106
+ MATERIALIZE_CASE(DATE32)
107
+ MATERIALIZE_CASE(DATE64)
108
+ MATERIALIZE_CASE(TIME32)
109
+ MATERIALIZE_CASE(TIME64)
110
+ MATERIALIZE_CASE(TIMESTAMP)
111
+ MATERIALIZE_CASE(STRING)
112
+ MATERIALIZE_CASE(LARGE_STRING)
113
+ MATERIALIZE_CASE(BINARY)
114
+ MATERIALIZE_CASE(LARGE_BINARY)
115
+ default:
116
+ return arrow::Status::Invalid("Unsupported data type ",
117
+ field->type()->ToString(), " for field ",
118
+ field->name());
119
+ }
120
+ }
121
+
122
+ #undef MATERIALIZE_CASE
123
+
124
+ std::shared_ptr<arrow::RecordBatch> r =
125
+ arrow::RecordBatch::Make(schema, (int64_t)num_rows, arrays);
126
+ return r;
127
+ }
128
+
129
+ private:
130
+ struct UnmaterializedSlice {
131
+ CompositeEntry components[MAX_COMPOSITE_TABLES];
132
+ size_t num_components;
133
+
134
+ inline int64_t Size() const {
135
+ if (num_components == 0) {
136
+ return 0;
137
+ }
138
+ return components[0].end - components[0].start;
139
+ }
140
+ };
141
+
142
+ // Mapping from an output column ID to a source table ID and column ID
143
+ std::shared_ptr<arrow::Schema> schema;
144
+ size_t num_composite_tables;
145
+ std::unordered_map<int, std::pair<int, int>> output_col_to_src;
146
+
147
+ arrow::MemoryPool* pool;
148
+
149
+ /// A map from address of a record batch to the record batch. Used to
150
+ /// maintain the lifetime of the record batch in case it goes out of scope
151
+ /// by the main exec node thread
152
+ std::unordered_map<uintptr_t, std::shared_ptr<arrow::RecordBatch>> ptr2Ref = {};
153
+ std::vector<UnmaterializedSlice> slices;
154
+
155
+ size_t num_rows = 0;
156
+
157
+ // for AddRecordBatchRef/AddSlice and access to UnmaterializedSlice
158
+ friend class UnmaterializedSliceBuilder<MAX_COMPOSITE_TABLES>;
159
+
160
+ void AddRecordBatchRef(const std::shared_ptr<arrow::RecordBatch>& ref) {
161
+ ptr2Ref[(uintptr_t)ref.get()] = ref;
162
+ }
163
+ void AddSlice(const UnmaterializedSlice& slice) {
164
+ slices.push_back(slice);
165
+ num_rows += slice.Size();
166
+ }
167
+
168
+ template <class Type, class Builder = typename TypeTraits<Type>::BuilderType>
169
+ enable_if_boolean<Type, Status> static BuilderAppend(
170
+ Builder& builder, const std::shared_ptr<ArrayData>& source, uint64_t row) {
171
+ if (source->IsNull(row)) {
172
+ builder.UnsafeAppendNull();
173
+ return Status::OK();
174
+ }
175
+ builder.UnsafeAppend(bit_util::GetBit(source->template GetValues<uint8_t>(1), row));
176
+ return Status::OK();
177
+ }
178
+
179
+ template <class Type, class Builder = typename TypeTraits<Type>::BuilderType>
180
+ enable_if_t<is_fixed_width_type<Type>::value && !is_boolean_type<Type>::value,
181
+ Status> static BuilderAppend(Builder& builder,
182
+ const std::shared_ptr<ArrayData>& source,
183
+ uint64_t row) {
184
+ if (source->IsNull(row)) {
185
+ builder.UnsafeAppendNull();
186
+ return Status::OK();
187
+ }
188
+ using CType = typename TypeTraits<Type>::CType;
189
+ builder.UnsafeAppend(source->template GetValues<CType>(1)[row]);
190
+ return Status::OK();
191
+ }
192
+
193
+ template <class Type, class Builder = typename TypeTraits<Type>::BuilderType>
194
+ enable_if_base_binary<Type, Status> static BuilderAppend(
195
+ Builder& builder, const std::shared_ptr<ArrayData>& source, uint64_t row) {
196
+ if (source->IsNull(row)) {
197
+ return builder.AppendNull();
198
+ }
199
+ using offset_type = typename Type::offset_type;
200
+ const uint8_t* data = source->buffers[2]->data();
201
+ const offset_type* offsets = source->GetValues<offset_type>(1);
202
+ const offset_type offset0 = offsets[row];
203
+ const offset_type offset1 = offsets[row + 1];
204
+ return builder.Append(data + offset0, offset1 - offset0);
205
+ }
206
+
207
+ template <class Type, class Builder = typename arrow::TypeTraits<Type>::BuilderType>
208
+ arrow::Result<std::shared_ptr<arrow::Array>> materializeColumn(
209
+ const std::shared_ptr<arrow::DataType>& type, int i_col) {
210
+ ARROW_ASSIGN_OR_RAISE(auto builderPtr, arrow::MakeBuilder(type, pool));
211
+ Builder& builder = *arrow::internal::checked_cast<Builder*>(builderPtr.get());
212
+ ARROW_RETURN_NOT_OK(builder.Reserve(num_rows));
213
+
214
+ const auto& [table_index, column_index] = output_col_to_src[i_col];
215
+
216
+ for (const auto& unmaterialized_slice : slices) {
217
+ const auto& [batch, start, end] = unmaterialized_slice.components[table_index];
218
+ if (batch) {
219
+ for (uint64_t rowNum = start; rowNum < end; ++rowNum) {
220
+ arrow::Status st = BuilderAppend<Type, Builder>(
221
+ builder, batch->column_data(column_index), rowNum);
222
+ ARROW_RETURN_NOT_OK(st);
223
+ }
224
+ } else {
225
+ for (uint64_t rowNum = start; rowNum < end; ++rowNum) {
226
+ ARROW_RETURN_NOT_OK(builder.AppendNull());
227
+ }
228
+ }
229
+ }
230
+ std::shared_ptr<arrow::Array> result;
231
+ ARROW_RETURN_NOT_OK(builder.Finish(&result));
232
+ return Result{std::move(result)};
233
+ }
234
+ };
235
+
236
+ /// A builder class that can append blocks of data to a row. A "slice"
237
+ /// is built by horizontally concatenating record batches.
238
+ template <size_t MAX_COMPOSITE_TABLES>
239
+ class UnmaterializedSliceBuilder {
240
+ public:
241
+ explicit UnmaterializedSliceBuilder(
242
+ UnmaterializedCompositeTable<MAX_COMPOSITE_TABLES>* table_)
243
+ : table(table_) {}
244
+
245
+ void AddEntry(std::shared_ptr<RecordBatch> rb, uint64_t start, uint64_t end) {
246
+ if (rb) {
247
+ table->AddRecordBatchRef(rb);
248
+ }
249
+ if (slice.num_components) {
250
+ size_t last_index = slice.num_components - 1;
251
+ DCHECK_EQ(slice.components[last_index].end - slice.components[last_index].start,
252
+ end - start)
253
+ << "Slices should be the same length. ";
254
+ }
255
+ slice.components[slice.num_components++] = CompositeEntry{rb.get(), start, end};
256
+ }
257
+
258
+ void Finalize() { table->AddSlice(slice); }
259
+ int64_t Size() { return slice.Size(); }
260
+
261
+ private:
262
+ using TUnmaterializedCompositeTable =
263
+ UnmaterializedCompositeTable<MAX_COMPOSITE_TABLES>;
264
+ using TUnmaterializedSlice =
265
+ typename TUnmaterializedCompositeTable::UnmaterializedSlice;
266
+
267
+ TUnmaterializedCompositeTable* table;
268
+ TUnmaterializedSlice slice{};
269
+ };
270
+
271
+ } // namespace arrow::acero
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/util.h ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <atomic>
21
+ #include <cstdint>
22
+ #include <optional>
23
+ #include <thread>
24
+ #include <unordered_map>
25
+ #include <vector>
26
+
27
+ #include "arrow/acero/options.h"
28
+ #include "arrow/acero/type_fwd.h"
29
+ #include "arrow/buffer.h"
30
+ #include "arrow/compute/expression.h"
31
+ #include "arrow/compute/util.h"
32
+ #include "arrow/memory_pool.h"
33
+ #include "arrow/result.h"
34
+ #include "arrow/status.h"
35
+ #include "arrow/util/bit_util.h"
36
+ #include "arrow/util/cpu_info.h"
37
+ #include "arrow/util/logging.h"
38
+ #include "arrow/util/mutex.h"
39
+ #include "arrow/util/thread_pool.h"
40
+ #include "arrow/util/type_fwd.h"
41
+
42
+ namespace arrow {
43
+
44
+ namespace acero {
45
+
46
+ ARROW_ACERO_EXPORT
47
+ Status ValidateExecNodeInputs(ExecPlan* plan, const std::vector<ExecNode*>& inputs,
48
+ int expected_num_inputs, const char* kind_name);
49
+
50
+ ARROW_ACERO_EXPORT
51
+ Result<std::shared_ptr<Table>> TableFromExecBatches(
52
+ const std::shared_ptr<Schema>& schema, const std::vector<ExecBatch>& exec_batches);
53
+
54
+ class ARROW_ACERO_EXPORT AtomicCounter {
55
+ public:
56
+ AtomicCounter() = default;
57
+
58
+ int count() const { return count_.load(); }
59
+
60
+ std::optional<int> total() const {
61
+ int total = total_.load();
62
+ if (total == -1) return {};
63
+ return total;
64
+ }
65
+
66
+ // return true if the counter is complete
67
+ bool Increment() {
68
+ DCHECK_NE(count_.load(), total_.load());
69
+ int count = count_.fetch_add(1) + 1;
70
+ if (count != total_.load()) return false;
71
+ return DoneOnce();
72
+ }
73
+
74
+ // return true if the counter is complete
75
+ bool SetTotal(int total) {
76
+ total_.store(total);
77
+ if (count_.load() != total) return false;
78
+ return DoneOnce();
79
+ }
80
+
81
+ // return true if the counter has not already been completed
82
+ bool Cancel() { return DoneOnce(); }
83
+
84
+ // return true if the counter has finished or been cancelled
85
+ bool Completed() { return complete_.load(); }
86
+
87
+ private:
88
+ // ensure there is only one true return from Increment(), SetTotal(), or Cancel()
89
+ bool DoneOnce() {
90
+ bool expected = false;
91
+ return complete_.compare_exchange_strong(expected, true);
92
+ }
93
+
94
+ std::atomic<int> count_{0}, total_{-1};
95
+ std::atomic<bool> complete_{false};
96
+ };
97
+
98
+ class ARROW_ACERO_EXPORT ThreadIndexer {
99
+ public:
100
+ size_t operator()();
101
+
102
+ static size_t Capacity();
103
+
104
+ private:
105
+ static size_t Check(size_t thread_index);
106
+
107
+ arrow::util::Mutex mutex_;
108
+ std::unordered_map<std::thread::id, size_t> id_to_index_;
109
+ };
110
+
111
+ /// \brief A consumer that collects results into an in-memory table
112
+ struct ARROW_ACERO_EXPORT TableSinkNodeConsumer : public SinkNodeConsumer {
113
+ public:
114
+ TableSinkNodeConsumer(std::shared_ptr<Table>* out, MemoryPool* pool)
115
+ : out_(out), pool_(pool) {}
116
+ Status Init(const std::shared_ptr<Schema>& schema,
117
+ BackpressureControl* backpressure_control, ExecPlan* plan) override;
118
+ Status Consume(ExecBatch batch) override;
119
+ Future<> Finish() override;
120
+
121
+ private:
122
+ std::shared_ptr<Table>* out_;
123
+ MemoryPool* pool_;
124
+ std::shared_ptr<Schema> schema_;
125
+ std::vector<std::shared_ptr<RecordBatch>> batches_;
126
+ arrow::util::Mutex consume_mutex_;
127
+ };
128
+
129
+ class ARROW_ACERO_EXPORT NullSinkNodeConsumer : public SinkNodeConsumer {
130
+ public:
131
+ Status Init(const std::shared_ptr<Schema>&, BackpressureControl*,
132
+ ExecPlan* plan) override {
133
+ return Status::OK();
134
+ }
135
+ Status Consume(ExecBatch exec_batch) override { return Status::OK(); }
136
+ Future<> Finish() override { return Status::OK(); }
137
+
138
+ public:
139
+ static std::shared_ptr<NullSinkNodeConsumer> Make() {
140
+ return std::make_shared<NullSinkNodeConsumer>();
141
+ }
142
+ };
143
+
144
+ /// CRTP helper for tracing helper functions
145
+
146
+ class ARROW_ACERO_EXPORT TracedNode {
147
+ public:
148
+ // All nodes should call TraceStartProducing or NoteStartProducing exactly once
149
+ // Most nodes will be fine with a call to NoteStartProducing since the StartProducing
150
+ // call is usually fairly cheap and simply schedules tasks to fetch the actual data.
151
+
152
+ explicit TracedNode(ExecNode* node) : node_(node) {}
153
+
154
+ // Create a span to record the StartProducing work
155
+ [[nodiscard]] ::arrow::internal::tracing::Scope TraceStartProducing(
156
+ std::string extra_details) const;
157
+
158
+ // Record a call to StartProducing without creating with a span
159
+ void NoteStartProducing(std::string extra_details) const;
160
+
161
+ // All nodes should call TraceInputReceived for each batch they receive. This call
162
+ // should track the time spent processing the batch. NoteInputReceived is available
163
+ // but usually won't be used unless a node is simply adding batches to a trivial queue.
164
+
165
+ // Create a span to record the InputReceived work
166
+ [[nodiscard]] ::arrow::internal::tracing::Scope TraceInputReceived(
167
+ const ExecBatch& batch) const;
168
+
169
+ // Record a call to InputReceived without creating with a span
170
+ void NoteInputReceived(const ExecBatch& batch) const;
171
+
172
+ // Create a span to record any "finish" work. This should NOT be called as part of
173
+ // InputFinished and many nodes may not need to call this at all. This should be used
174
+ // when a node has some extra work that has to be done once it has received all of its
175
+ // data. For example, an aggregation node calculating aggregations. This will
176
+ // typically be called as a result of InputFinished OR InputReceived.
177
+ [[nodiscard]] ::arrow::internal::tracing::Scope TraceFinish() const;
178
+
179
+ private:
180
+ ExecNode* node_;
181
+ };
182
+
183
+ } // namespace acero
184
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/acero/visibility.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #if defined(_WIN32) || defined(__CYGWIN__)
23
+ #if defined(_MSC_VER)
24
+ #pragma warning(push)
25
+ #pragma warning(disable : 4251)
26
+ #else
27
+ #pragma GCC diagnostic ignored "-Wattributes"
28
+ #endif
29
+
30
+ #ifdef ARROW_ACERO_STATIC
31
+ #define ARROW_ACERO_EXPORT
32
+ #elif defined(ARROW_ACERO_EXPORTING)
33
+ #define ARROW_ACERO_EXPORT __declspec(dllexport)
34
+ #else
35
+ #define ARROW_ACERO_EXPORT __declspec(dllimport)
36
+ #endif
37
+
38
+ #define ARROW_ACERO_NO_EXPORT
39
+ #else // Not Windows
40
+ #ifndef ARROW_ACERO_EXPORT
41
+ #define ARROW_ACERO_EXPORT __attribute__((visibility("default")))
42
+ #endif
43
+ #ifndef ARROW_ACERO_NO_EXPORT
44
+ #define ARROW_ACERO_NO_EXPORT __attribute__((visibility("hidden")))
45
+ #endif
46
+ #endif // Not-Windows
47
+
48
+ #if defined(_MSC_VER)
49
+ #pragma warning(pop)
50
+ #endif
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/c/bridge.h ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+ #include <memory>
22
+ #include <string>
23
+
24
+ #include "arrow/c/abi.h"
25
+ #include "arrow/device.h"
26
+ #include "arrow/result.h"
27
+ #include "arrow/status.h"
28
+ #include "arrow/type_fwd.h"
29
+ #include "arrow/util/macros.h"
30
+ #include "arrow/util/visibility.h"
31
+
32
+ namespace arrow {
33
+
34
+ /// \defgroup c-data-interface Functions for working with the C data interface.
35
+ ///
36
+ /// @{
37
+
38
+ /// \brief Export C++ DataType using the C data interface format.
39
+ ///
40
+ /// The root type is considered to have empty name and metadata.
41
+ /// If you want the root type to have a name and/or metadata, pass
42
+ /// a Field instead.
43
+ ///
44
+ /// \param[in] type DataType object to export
45
+ /// \param[out] out C struct where to export the datatype
46
+ ARROW_EXPORT
47
+ Status ExportType(const DataType& type, struct ArrowSchema* out);
48
+
49
+ /// \brief Export C++ Field using the C data interface format.
50
+ ///
51
+ /// \param[in] field Field object to export
52
+ /// \param[out] out C struct where to export the field
53
+ ARROW_EXPORT
54
+ Status ExportField(const Field& field, struct ArrowSchema* out);
55
+
56
+ /// \brief Export C++ Schema using the C data interface format.
57
+ ///
58
+ /// \param[in] schema Schema object to export
59
+ /// \param[out] out C struct where to export the field
60
+ ARROW_EXPORT
61
+ Status ExportSchema(const Schema& schema, struct ArrowSchema* out);
62
+
63
+ /// \brief Export C++ Array using the C data interface format.
64
+ ///
65
+ /// The resulting ArrowArray struct keeps the array data and buffers alive
66
+ /// until its release callback is called by the consumer.
67
+ ///
68
+ /// \param[in] array Array object to export
69
+ /// \param[out] out C struct where to export the array
70
+ /// \param[out] out_schema optional C struct where to export the array type
71
+ ARROW_EXPORT
72
+ Status ExportArray(const Array& array, struct ArrowArray* out,
73
+ struct ArrowSchema* out_schema = NULLPTR);
74
+
75
+ /// \brief Export C++ RecordBatch using the C data interface format.
76
+ ///
77
+ /// The record batch is exported as if it were a struct array.
78
+ /// The resulting ArrowArray struct keeps the record batch data and buffers alive
79
+ /// until its release callback is called by the consumer.
80
+ ///
81
+ /// \param[in] batch Record batch to export
82
+ /// \param[out] out C struct where to export the record batch
83
+ /// \param[out] out_schema optional C struct where to export the record batch schema
84
+ ARROW_EXPORT
85
+ Status ExportRecordBatch(const RecordBatch& batch, struct ArrowArray* out,
86
+ struct ArrowSchema* out_schema = NULLPTR);
87
+
88
+ /// \brief Import C++ DataType from the C data interface.
89
+ ///
90
+ /// The given ArrowSchema struct is released (as per the C data interface
91
+ /// specification), even if this function fails.
92
+ ///
93
+ /// \param[in,out] schema C data interface struct representing the data type
94
+ /// \return Imported type object
95
+ ARROW_EXPORT
96
+ Result<std::shared_ptr<DataType>> ImportType(struct ArrowSchema* schema);
97
+
98
+ /// \brief Import C++ Field from the C data interface.
99
+ ///
100
+ /// The given ArrowSchema struct is released (as per the C data interface
101
+ /// specification), even if this function fails.
102
+ ///
103
+ /// \param[in,out] schema C data interface struct representing the field
104
+ /// \return Imported field object
105
+ ARROW_EXPORT
106
+ Result<std::shared_ptr<Field>> ImportField(struct ArrowSchema* schema);
107
+
108
+ /// \brief Import C++ Schema from the C data interface.
109
+ ///
110
+ /// The given ArrowSchema struct is released (as per the C data interface
111
+ /// specification), even if this function fails.
112
+ ///
113
+ /// \param[in,out] schema C data interface struct representing the field
114
+ /// \return Imported field object
115
+ ARROW_EXPORT
116
+ Result<std::shared_ptr<Schema>> ImportSchema(struct ArrowSchema* schema);
117
+
118
+ /// \brief Import C++ array from the C data interface.
119
+ ///
120
+ /// The ArrowArray struct has its contents moved (as per the C data interface
121
+ /// specification) to a private object held alive by the resulting array.
122
+ ///
123
+ /// \param[in,out] array C data interface struct holding the array data
124
+ /// \param[in] type type of the imported array
125
+ /// \return Imported array object
126
+ ARROW_EXPORT
127
+ Result<std::shared_ptr<Array>> ImportArray(struct ArrowArray* array,
128
+ std::shared_ptr<DataType> type);
129
+
130
+ /// \brief Import C++ array and its type from the C data interface.
131
+ ///
132
+ /// The ArrowArray struct has its contents moved (as per the C data interface
133
+ /// specification) to a private object held alive by the resulting array.
134
+ /// The ArrowSchema struct is released, even if this function fails.
135
+ ///
136
+ /// \param[in,out] array C data interface struct holding the array data
137
+ /// \param[in,out] type C data interface struct holding the array type
138
+ /// \return Imported array object
139
+ ARROW_EXPORT
140
+ Result<std::shared_ptr<Array>> ImportArray(struct ArrowArray* array,
141
+ struct ArrowSchema* type);
142
+
143
+ /// \brief Import C++ record batch from the C data interface.
144
+ ///
145
+ /// The ArrowArray struct has its contents moved (as per the C data interface
146
+ /// specification) to a private object held alive by the resulting record batch.
147
+ ///
148
+ /// \param[in,out] array C data interface struct holding the record batch data
149
+ /// \param[in] schema schema of the imported record batch
150
+ /// \return Imported record batch object
151
+ ARROW_EXPORT
152
+ Result<std::shared_ptr<RecordBatch>> ImportRecordBatch(struct ArrowArray* array,
153
+ std::shared_ptr<Schema> schema);
154
+
155
+ /// \brief Import C++ record batch and its schema from the C data interface.
156
+ ///
157
+ /// The type represented by the ArrowSchema struct must be a struct type array.
158
+ /// The ArrowArray struct has its contents moved (as per the C data interface
159
+ /// specification) to a private object held alive by the resulting record batch.
160
+ /// The ArrowSchema struct is released, even if this function fails.
161
+ ///
162
+ /// \param[in,out] array C data interface struct holding the record batch data
163
+ /// \param[in,out] schema C data interface struct holding the record batch schema
164
+ /// \return Imported record batch object
165
+ ARROW_EXPORT
166
+ Result<std::shared_ptr<RecordBatch>> ImportRecordBatch(struct ArrowArray* array,
167
+ struct ArrowSchema* schema);
168
+
169
+ /// @}
170
+
171
+ /// \defgroup c-data-device-interface Functions for working with the C data device
172
+ /// interface.
173
+ ///
174
+ /// @{
175
+
176
+ /// \brief EXPERIMENTAL: Export C++ Array as an ArrowDeviceArray.
177
+ ///
178
+ /// The resulting ArrowDeviceArray struct keeps the array data and buffers alive
179
+ /// until its release callback is called by the consumer. All buffers in
180
+ /// the provided array MUST have the same device_type, otherwise an error
181
+ /// will be returned.
182
+ ///
183
+ /// If sync is non-null, get_event will be called on it in order to
184
+ /// potentially provide an event for consumers to synchronize on.
185
+ ///
186
+ /// \param[in] array Array object to export
187
+ /// \param[in] sync shared_ptr to object derived from Device::SyncEvent or null
188
+ /// \param[out] out C struct to export the array to
189
+ /// \param[out] out_schema optional C struct to export the array type to
190
+ ARROW_EXPORT
191
+ Status ExportDeviceArray(const Array& array, std::shared_ptr<Device::SyncEvent> sync,
192
+ struct ArrowDeviceArray* out,
193
+ struct ArrowSchema* out_schema = NULLPTR);
194
+
195
+ /// \brief EXPERIMENTAL: Export C++ RecordBatch as an ArrowDeviceArray.
196
+ ///
197
+ /// The record batch is exported as if it were a struct array.
198
+ /// The resulting ArrowDeviceArray struct keeps the record batch data and buffers alive
199
+ /// until its release callback is called by the consumer.
200
+ ///
201
+ /// All buffers of all columns in the record batch must have the same device_type
202
+ /// otherwise an error will be returned. If columns are on different devices,
203
+ /// they should be exported using different ArrowDeviceArray instances.
204
+ ///
205
+ /// If sync is non-null, get_event will be called on it in order to
206
+ /// potentially provide an event for consumers to synchronize on.
207
+ ///
208
+ /// \param[in] batch Record batch to export
209
+ /// \param[in] sync shared_ptr to object derived from Device::SyncEvent or null
210
+ /// \param[out] out C struct where to export the record batch
211
+ /// \param[out] out_schema optional C struct where to export the record batch schema
212
+ ARROW_EXPORT
213
+ Status ExportDeviceRecordBatch(const RecordBatch& batch,
214
+ std::shared_ptr<Device::SyncEvent> sync,
215
+ struct ArrowDeviceArray* out,
216
+ struct ArrowSchema* out_schema = NULLPTR);
217
+
218
+ using DeviceMemoryMapper =
219
+ std::function<Result<std::shared_ptr<MemoryManager>>(ArrowDeviceType, int64_t)>;
220
+
221
+ /// \brief EXPERIMENTAL: Import C++ device array from the C data interface.
222
+ ///
223
+ /// The ArrowArray struct has its contents moved (as per the C data interface
224
+ /// specification) to a private object held alive by the resulting array. The
225
+ /// buffers of the Array are located on the device indicated by the device_type.
226
+ ///
227
+ /// \param[in,out] array C data interface struct holding the array data
228
+ /// \param[in] type type of the imported array
229
+ /// \param[in] mapper A function to map device + id to memory manager
230
+ /// \return Imported array object
231
+ ARROW_EXPORT
232
+ Result<std::shared_ptr<Array>> ImportDeviceArray(struct ArrowDeviceArray* array,
233
+ std::shared_ptr<DataType> type,
234
+ const DeviceMemoryMapper& mapper);
235
+
236
+ /// \brief EXPERIMENTAL: Import C++ device array and its type from the C data interface.
237
+ ///
238
+ /// The ArrowArray struct has its contents moved (as per the C data interface
239
+ /// specification) to a private object held alive by the resulting array.
240
+ /// The ArrowSchema struct is released, even if this function fails. The
241
+ /// buffers of the Array are located on the device indicated by the device_type.
242
+ ///
243
+ /// \param[in,out] array C data interface struct holding the array data
244
+ /// \param[in,out] type C data interface struct holding the array type
245
+ /// \param[in] mapper A function to map device + id to memory manager
246
+ /// \return Imported array object
247
+ ARROW_EXPORT
248
+ Result<std::shared_ptr<Array>> ImportDeviceArray(struct ArrowDeviceArray* array,
249
+ struct ArrowSchema* type,
250
+ const DeviceMemoryMapper& mapper);
251
+
252
+ /// \brief EXPERIMENTAL: Import C++ record batch with buffers on a device from the C data
253
+ /// interface.
254
+ ///
255
+ /// The ArrowArray struct has its contents moved (as per the C data interface
256
+ /// specification) to a private object held alive by the resulting record batch.
257
+ /// The buffers of all columns of the record batch are located on the device
258
+ /// indicated by the device type.
259
+ ///
260
+ /// \param[in,out] array C data interface struct holding the record batch data
261
+ /// \param[in] schema schema of the imported record batch
262
+ /// \param[in] mapper A function to map device + id to memory manager
263
+ /// \return Imported record batch object
264
+ ARROW_EXPORT
265
+ Result<std::shared_ptr<RecordBatch>> ImportDeviceRecordBatch(
266
+ struct ArrowDeviceArray* array, std::shared_ptr<Schema> schema,
267
+ const DeviceMemoryMapper& mapper);
268
+
269
+ /// \brief EXPERIMENTAL: Import C++ record batch with buffers on a device and its schema
270
+ /// from the C data interface.
271
+ ///
272
+ /// The type represented by the ArrowSchema struct must be a struct type array.
273
+ /// The ArrowArray struct has its contents moved (as per the C data interface
274
+ /// specification) to a private object held alive by the resulting record batch.
275
+ /// The ArrowSchema struct is released, even if this function fails. The buffers
276
+ /// of all columns of the record batch are located on the device indicated by the
277
+ /// device type.
278
+ ///
279
+ /// \param[in,out] array C data interface struct holding the record batch data
280
+ /// \param[in,out] schema C data interface struct holding the record batch schema
281
+ /// \param[in] mapper A function to map device + id to memory manager
282
+ /// \return Imported record batch object
283
+ ARROW_EXPORT
284
+ Result<std::shared_ptr<RecordBatch>> ImportDeviceRecordBatch(
285
+ struct ArrowDeviceArray* array, struct ArrowSchema* schema,
286
+ const DeviceMemoryMapper& mapper);
287
+
288
+ /// @}
289
+
290
+ /// \defgroup c-stream-interface Functions for working with the C data interface.
291
+ ///
292
+ /// @{
293
+
294
+ /// \brief Export C++ RecordBatchReader using the C stream interface.
295
+ ///
296
+ /// The resulting ArrowArrayStream struct keeps the record batch reader alive
297
+ /// until its release callback is called by the consumer.
298
+ ///
299
+ /// \param[in] reader RecordBatchReader object to export
300
+ /// \param[out] out C struct where to export the stream
301
+ ARROW_EXPORT
302
+ Status ExportRecordBatchReader(std::shared_ptr<RecordBatchReader> reader,
303
+ struct ArrowArrayStream* out);
304
+
305
+ /// \brief Import C++ RecordBatchReader from the C stream interface.
306
+ ///
307
+ /// The ArrowArrayStream struct has its contents moved to a private object
308
+ /// held alive by the resulting record batch reader.
309
+ ///
310
+ /// \param[in,out] stream C stream interface struct
311
+ /// \return Imported RecordBatchReader object
312
+ ARROW_EXPORT
313
+ Result<std::shared_ptr<RecordBatchReader>> ImportRecordBatchReader(
314
+ struct ArrowArrayStream* stream);
315
+
316
+ /// @}
317
+
318
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/c/helpers.h ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <stdio.h>
21
+ #include <stdlib.h>
22
+ #include <string.h>
23
+
24
+ #include "arrow/c/abi.h"
25
+
26
+ #define ARROW_C_ASSERT(condition, msg) \
27
+ do { \
28
+ if (!(condition)) { \
29
+ fprintf(stderr, "%s:%d:: %s", __FILE__, __LINE__, (msg)); \
30
+ abort(); \
31
+ } \
32
+ } while (0)
33
+
34
+ #ifdef __cplusplus
35
+ extern "C" {
36
+ #endif
37
+
38
+ /// Query whether the C schema is released
39
+ inline int ArrowSchemaIsReleased(const struct ArrowSchema* schema) {
40
+ return schema->release == NULL;
41
+ }
42
+
43
+ /// Mark the C schema released (for use in release callbacks)
44
+ inline void ArrowSchemaMarkReleased(struct ArrowSchema* schema) {
45
+ schema->release = NULL;
46
+ }
47
+
48
+ /// Move the C schema from `src` to `dest`
49
+ ///
50
+ /// Note `dest` must *not* point to a valid schema already, otherwise there
51
+ /// will be a memory leak.
52
+ inline void ArrowSchemaMove(struct ArrowSchema* src, struct ArrowSchema* dest) {
53
+ assert(dest != src);
54
+ assert(!ArrowSchemaIsReleased(src));
55
+ memcpy(dest, src, sizeof(struct ArrowSchema));
56
+ ArrowSchemaMarkReleased(src);
57
+ }
58
+
59
+ /// Release the C schema, if necessary, by calling its release callback
60
+ inline void ArrowSchemaRelease(struct ArrowSchema* schema) {
61
+ if (!ArrowSchemaIsReleased(schema)) {
62
+ schema->release(schema);
63
+ ARROW_C_ASSERT(ArrowSchemaIsReleased(schema),
64
+ "ArrowSchemaRelease did not cleanup release callback");
65
+ }
66
+ }
67
+
68
+ /// Query whether the C array is released
69
+ inline int ArrowArrayIsReleased(const struct ArrowArray* array) {
70
+ return array->release == NULL;
71
+ }
72
+
73
+ /// Mark the C array released (for use in release callbacks)
74
+ inline void ArrowArrayMarkReleased(struct ArrowArray* array) { array->release = NULL; }
75
+
76
+ /// Move the C array from `src` to `dest`
77
+ ///
78
+ /// Note `dest` must *not* point to a valid array already, otherwise there
79
+ /// will be a memory leak.
80
+ inline void ArrowArrayMove(struct ArrowArray* src, struct ArrowArray* dest) {
81
+ assert(dest != src);
82
+ assert(!ArrowArrayIsReleased(src));
83
+ memcpy(dest, src, sizeof(struct ArrowArray));
84
+ ArrowArrayMarkReleased(src);
85
+ }
86
+
87
+ /// Release the C array, if necessary, by calling its release callback
88
+ inline void ArrowArrayRelease(struct ArrowArray* array) {
89
+ if (!ArrowArrayIsReleased(array)) {
90
+ array->release(array);
91
+ ARROW_C_ASSERT(ArrowArrayIsReleased(array),
92
+ "ArrowArrayRelease did not cleanup release callback");
93
+ }
94
+ }
95
+
96
+ /// Query whether the C array stream is released
97
+ inline int ArrowArrayStreamIsReleased(const struct ArrowArrayStream* stream) {
98
+ return stream->release == NULL;
99
+ }
100
+
101
+ /// Mark the C array stream released (for use in release callbacks)
102
+ inline void ArrowArrayStreamMarkReleased(struct ArrowArrayStream* stream) {
103
+ stream->release = NULL;
104
+ }
105
+
106
+ /// Move the C array stream from `src` to `dest`
107
+ ///
108
+ /// Note `dest` must *not* point to a valid stream already, otherwise there
109
+ /// will be a memory leak.
110
+ inline void ArrowArrayStreamMove(struct ArrowArrayStream* src,
111
+ struct ArrowArrayStream* dest) {
112
+ assert(dest != src);
113
+ assert(!ArrowArrayStreamIsReleased(src));
114
+ memcpy(dest, src, sizeof(struct ArrowArrayStream));
115
+ ArrowArrayStreamMarkReleased(src);
116
+ }
117
+
118
+ /// Release the C array stream, if necessary, by calling its release callback
119
+ inline void ArrowArrayStreamRelease(struct ArrowArrayStream* stream) {
120
+ if (!ArrowArrayStreamIsReleased(stream)) {
121
+ stream->release(stream);
122
+ ARROW_C_ASSERT(ArrowArrayStreamIsReleased(stream),
123
+ "ArrowArrayStreamRelease did not cleanup release callback");
124
+ }
125
+ }
126
+
127
+ #ifdef __cplusplus
128
+ }
129
+ #endif
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/api.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/flight/client.h"
21
+ #include "arrow/flight/client_auth.h"
22
+ #include "arrow/flight/client_middleware.h"
23
+ #include "arrow/flight/client_tracing_middleware.h"
24
+ #include "arrow/flight/middleware.h"
25
+ #include "arrow/flight/server.h"
26
+ #include "arrow/flight/server_auth.h"
27
+ #include "arrow/flight/server_middleware.h"
28
+ #include "arrow/flight/server_tracing_middleware.h"
29
+ #include "arrow/flight/types.h"
30
+ #include "arrow/flight/types_async.h"
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client.h ADDED
@@ -0,0 +1,415 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ /// \brief Implementation of Flight RPC client. API should be
19
+ /// considered experimental for now
20
+
21
+ #pragma once
22
+
23
+ #include <chrono>
24
+ #include <memory>
25
+ #include <string>
26
+ #include <utility>
27
+ #include <variant>
28
+ #include <vector>
29
+
30
+ #include "arrow/ipc/options.h"
31
+ #include "arrow/ipc/reader.h"
32
+ #include "arrow/ipc/writer.h"
33
+ #include "arrow/result.h"
34
+ #include "arrow/status.h"
35
+ #include "arrow/util/cancel.h"
36
+
37
+ #include "arrow/flight/type_fwd.h"
38
+ #include "arrow/flight/types.h" // IWYU pragma: keep
39
+ #include "arrow/flight/visibility.h"
40
+
41
+ namespace arrow {
42
+
43
+ class RecordBatch;
44
+ class Schema;
45
+
46
+ namespace flight {
47
+
48
+ /// \brief A duration type for Flight call timeouts.
49
+ typedef std::chrono::duration<double, std::chrono::seconds::period> TimeoutDuration;
50
+
51
+ /// \brief Hints to the underlying RPC layer for Arrow Flight calls.
52
+ class ARROW_FLIGHT_EXPORT FlightCallOptions {
53
+ public:
54
+ /// Create a default set of call options.
55
+ FlightCallOptions();
56
+
57
+ /// \brief An optional timeout for this call. Negative durations
58
+ /// mean an implementation-defined default behavior will be used
59
+ /// instead. This is the default value.
60
+ TimeoutDuration timeout;
61
+
62
+ /// \brief IPC reader options, if applicable for the call.
63
+ ipc::IpcReadOptions read_options;
64
+
65
+ /// \brief IPC writer options, if applicable for the call.
66
+ ipc::IpcWriteOptions write_options;
67
+
68
+ /// \brief Headers for client to add to context.
69
+ std::vector<std::pair<std::string, std::string>> headers;
70
+
71
+ /// \brief A token to enable interactive user cancellation of long-running requests.
72
+ StopToken stop_token;
73
+
74
+ /// \brief An optional memory manager to control where to allocate incoming data.
75
+ std::shared_ptr<MemoryManager> memory_manager;
76
+ };
77
+
78
+ /// \brief Indicate that the client attempted to write a message
79
+ /// larger than the soft limit set via write_size_limit_bytes.
80
+ class ARROW_FLIGHT_EXPORT FlightWriteSizeStatusDetail : public arrow::StatusDetail {
81
+ public:
82
+ explicit FlightWriteSizeStatusDetail(int64_t limit, int64_t actual)
83
+ : limit_(limit), actual_(actual) {}
84
+ const char* type_id() const override;
85
+ std::string ToString() const override;
86
+ int64_t limit() const { return limit_; }
87
+ int64_t actual() const { return actual_; }
88
+
89
+ /// \brief Extract this status detail from a status, or return
90
+ /// nullptr if the status doesn't contain this status detail.
91
+ static std::shared_ptr<FlightWriteSizeStatusDetail> UnwrapStatus(
92
+ const arrow::Status& status);
93
+
94
+ private:
95
+ int64_t limit_;
96
+ int64_t actual_;
97
+ };
98
+
99
+ struct ARROW_FLIGHT_EXPORT FlightClientOptions {
100
+ /// \brief Root certificates to use for validating server
101
+ /// certificates.
102
+ std::string tls_root_certs;
103
+ /// \brief Override the hostname checked by TLS. Use with caution.
104
+ std::string override_hostname;
105
+ /// \brief The client certificate to use if using Mutual TLS
106
+ std::string cert_chain;
107
+ /// \brief The private key associated with the client certificate for Mutual TLS
108
+ std::string private_key;
109
+ /// \brief A list of client middleware to apply.
110
+ std::vector<std::shared_ptr<ClientMiddlewareFactory>> middleware;
111
+ /// \brief A soft limit on the number of bytes to write in a single
112
+ /// batch when sending Arrow data to a server.
113
+ ///
114
+ /// Used to help limit server memory consumption. Only enabled if
115
+ /// positive. When enabled, FlightStreamWriter.Write* may yield a
116
+ /// IOError with error detail FlightWriteSizeStatusDetail.
117
+ int64_t write_size_limit_bytes = 0;
118
+
119
+ /// \brief Generic connection options, passed to the underlying
120
+ /// transport; interpretation is implementation-dependent.
121
+ std::vector<std::pair<std::string, std::variant<int, std::string>>> generic_options;
122
+
123
+ /// \brief Use TLS without validating the server certificate. Use with caution.
124
+ bool disable_server_verification = false;
125
+
126
+ /// \brief Get default options.
127
+ static FlightClientOptions Defaults();
128
+ };
129
+
130
+ /// \brief A RecordBatchReader exposing Flight metadata and cancel
131
+ /// operations.
132
+ class ARROW_FLIGHT_EXPORT FlightStreamReader : public MetadataRecordBatchReader {
133
+ public:
134
+ /// \brief Try to cancel the call.
135
+ virtual void Cancel() = 0;
136
+
137
+ using MetadataRecordBatchReader::ToRecordBatches;
138
+ /// \brief Consume entire stream as a vector of record batches
139
+ virtual arrow::Result<std::vector<std::shared_ptr<RecordBatch>>> ToRecordBatches(
140
+ const StopToken& stop_token) = 0;
141
+
142
+ using MetadataRecordBatchReader::ToTable;
143
+ /// \brief Consume entire stream as a Table
144
+ arrow::Result<std::shared_ptr<Table>> ToTable(const StopToken& stop_token);
145
+ };
146
+
147
+ // Silence warning
148
+ // "non dll-interface class RecordBatchReader used as base for dll-interface class"
149
+ #ifdef _MSC_VER
150
+ #pragma warning(push)
151
+ #pragma warning(disable : 4275)
152
+ #endif
153
+
154
+ /// \brief A RecordBatchWriter that also allows sending
155
+ /// application-defined metadata via the Flight protocol.
156
+ class ARROW_FLIGHT_EXPORT FlightStreamWriter : public MetadataRecordBatchWriter {
157
+ public:
158
+ /// \brief Indicate that the application is done writing to this stream.
159
+ ///
160
+ /// The application may not write to this stream after calling
161
+ /// this. This differs from closing the stream because this writer
162
+ /// may represent only one half of a readable and writable stream.
163
+ virtual Status DoneWriting() = 0;
164
+ };
165
+
166
+ #ifdef _MSC_VER
167
+ #pragma warning(pop)
168
+ #endif
169
+
170
+ /// \brief A reader for application-specific metadata sent back to the
171
+ /// client during an upload.
172
+ class ARROW_FLIGHT_EXPORT FlightMetadataReader {
173
+ public:
174
+ virtual ~FlightMetadataReader();
175
+ /// \brief Read a message from the server.
176
+ virtual Status ReadMetadata(std::shared_ptr<Buffer>* out) = 0;
177
+ };
178
+
179
+ /// \brief Client class for Arrow Flight RPC services.
180
+ /// API experimental for now
181
+ class ARROW_FLIGHT_EXPORT FlightClient {
182
+ public:
183
+ ~FlightClient();
184
+
185
+ /// \brief Connect to an unauthenticated flight service
186
+ /// \param[in] location the URI
187
+ /// \return Arrow result with the created FlightClient, OK status may not indicate that
188
+ /// the connection was successful
189
+ static arrow::Result<std::unique_ptr<FlightClient>> Connect(const Location& location);
190
+
191
+ /// \brief Connect to an unauthenticated flight service
192
+ /// \param[in] location the URI
193
+ /// \param[in] options Other options for setting up the client
194
+ /// \return Arrow result with the created FlightClient, OK status may not indicate that
195
+ /// the connection was successful
196
+ static arrow::Result<std::unique_ptr<FlightClient>> Connect(
197
+ const Location& location, const FlightClientOptions& options);
198
+
199
+ /// \brief Authenticate to the server using the given handler.
200
+ /// \param[in] options Per-RPC options
201
+ /// \param[in] auth_handler The authentication mechanism to use
202
+ /// \return Status OK if the client authenticated successfully
203
+ Status Authenticate(const FlightCallOptions& options,
204
+ std::unique_ptr<ClientAuthHandler> auth_handler);
205
+
206
+ /// \brief Authenticate to the server using basic HTTP style authentication.
207
+ /// \param[in] options Per-RPC options
208
+ /// \param[in] username Username to use
209
+ /// \param[in] password Password to use
210
+ /// \return Arrow result with bearer token and status OK if client authenticated
211
+ /// successfully
212
+ arrow::Result<std::pair<std::string, std::string>> AuthenticateBasicToken(
213
+ const FlightCallOptions& options, const std::string& username,
214
+ const std::string& password);
215
+
216
+ /// \brief Perform the indicated action, returning an iterator to the stream
217
+ /// of results, if any
218
+ /// \param[in] options Per-RPC options
219
+ /// \param[in] action the action to be performed
220
+ /// \return Arrow result with an iterator object for reading the returned results
221
+ arrow::Result<std::unique_ptr<ResultStream>> DoAction(const FlightCallOptions& options,
222
+ const Action& action);
223
+ arrow::Result<std::unique_ptr<ResultStream>> DoAction(const Action& action) {
224
+ return DoAction({}, action);
225
+ }
226
+
227
+ /// \brief Perform the CancelFlightInfo action, returning a
228
+ /// CancelFlightInfoResult
229
+ ///
230
+ /// \param[in] options Per-RPC options
231
+ /// \param[in] request The CancelFlightInfoRequest
232
+ /// \return Arrow result with a CancelFlightInfoResult
233
+ arrow::Result<CancelFlightInfoResult> CancelFlightInfo(
234
+ const FlightCallOptions& options, const CancelFlightInfoRequest& request);
235
+ arrow::Result<CancelFlightInfoResult> CancelFlightInfo(
236
+ const CancelFlightInfoRequest& request) {
237
+ return CancelFlightInfo({}, request);
238
+ }
239
+
240
+ /// \brief Perform the RenewFlightEndpoint action, returning a renewed
241
+ /// FlightEndpoint
242
+ ///
243
+ /// \param[in] options Per-RPC options
244
+ /// \param[in] request The RenewFlightEndpointRequest
245
+ /// \return Arrow result with a renewed FlightEndpoint
246
+ arrow::Result<FlightEndpoint> RenewFlightEndpoint(
247
+ const FlightCallOptions& options, const RenewFlightEndpointRequest& request);
248
+ arrow::Result<FlightEndpoint> RenewFlightEndpoint(
249
+ const RenewFlightEndpointRequest& request) {
250
+ return RenewFlightEndpoint({}, request);
251
+ }
252
+
253
+ /// \brief Retrieve a list of available Action types
254
+ /// \param[in] options Per-RPC options
255
+ /// \return Arrow result with the available actions
256
+ arrow::Result<std::vector<ActionType>> ListActions(const FlightCallOptions& options);
257
+ arrow::Result<std::vector<ActionType>> ListActions() {
258
+ return ListActions(FlightCallOptions());
259
+ }
260
+
261
+ /// \brief Request access plan for a single flight, which may be an existing
262
+ /// dataset or a command to be executed
263
+ /// \param[in] options Per-RPC options
264
+ /// \param[in] descriptor the dataset request, whether a named dataset or
265
+ /// command
266
+ /// \return Arrow result with the FlightInfo describing where to access the dataset
267
+ arrow::Result<std::unique_ptr<FlightInfo>> GetFlightInfo(
268
+ const FlightCallOptions& options, const FlightDescriptor& descriptor);
269
+ arrow::Result<std::unique_ptr<FlightInfo>> GetFlightInfo(
270
+ const FlightDescriptor& descriptor) {
271
+ return GetFlightInfo({}, descriptor);
272
+ }
273
+
274
+ /// \brief Asynchronous GetFlightInfo.
275
+ /// \param[in] options Per-RPC options
276
+ /// \param[in] descriptor the dataset request
277
+ /// \param[in] listener Callbacks for response and RPC completion
278
+ ///
279
+ /// This API is EXPERIMENTAL.
280
+ void GetFlightInfoAsync(const FlightCallOptions& options,
281
+ const FlightDescriptor& descriptor,
282
+ std::shared_ptr<AsyncListener<FlightInfo>> listener);
283
+ void GetFlightInfoAsync(const FlightDescriptor& descriptor,
284
+ std::shared_ptr<AsyncListener<FlightInfo>> listener) {
285
+ return GetFlightInfoAsync({}, descriptor, std::move(listener));
286
+ }
287
+
288
+ /// \brief Asynchronous GetFlightInfo returning a Future.
289
+ /// \param[in] options Per-RPC options
290
+ /// \param[in] descriptor the dataset request
291
+ ///
292
+ /// This API is EXPERIMENTAL.
293
+ arrow::Future<FlightInfo> GetFlightInfoAsync(const FlightCallOptions& options,
294
+ const FlightDescriptor& descriptor);
295
+ arrow::Future<FlightInfo> GetFlightInfoAsync(const FlightDescriptor& descriptor) {
296
+ return GetFlightInfoAsync({}, descriptor);
297
+ }
298
+
299
+ /// \brief Request and poll a long running query
300
+ /// \param[in] options Per-RPC options
301
+ /// \param[in] descriptor the dataset request or a descriptor returned by a
302
+ /// prior PollFlightInfo call
303
+ /// \return Arrow result with the PollInfo describing the status of
304
+ /// the requested query
305
+ arrow::Result<std::unique_ptr<PollInfo>> PollFlightInfo(
306
+ const FlightCallOptions& options, const FlightDescriptor& descriptor);
307
+ arrow::Result<std::unique_ptr<PollInfo>> PollFlightInfo(
308
+ const FlightDescriptor& descriptor) {
309
+ return PollFlightInfo({}, descriptor);
310
+ }
311
+
312
+ /// \brief Request schema for a single flight, which may be an existing
313
+ /// dataset or a command to be executed
314
+ /// \param[in] options Per-RPC options
315
+ /// \param[in] descriptor the dataset request, whether a named dataset or
316
+ /// command
317
+ /// \return Arrow result with the SchemaResult describing the dataset schema
318
+ arrow::Result<std::unique_ptr<SchemaResult>> GetSchema(
319
+ const FlightCallOptions& options, const FlightDescriptor& descriptor);
320
+
321
+ arrow::Result<std::unique_ptr<SchemaResult>> GetSchema(
322
+ const FlightDescriptor& descriptor) {
323
+ return GetSchema({}, descriptor);
324
+ }
325
+
326
+ /// \brief List all available flights known to the server
327
+ /// \return Arrow result with an iterator that returns a FlightInfo for each flight
328
+ arrow::Result<std::unique_ptr<FlightListing>> ListFlights();
329
+
330
+ /// \brief List available flights given indicated filter criteria
331
+ /// \param[in] options Per-RPC options
332
+ /// \param[in] criteria the filter criteria (opaque)
333
+ /// \return Arrow result with an iterator that returns a FlightInfo for each flight
334
+ arrow::Result<std::unique_ptr<FlightListing>> ListFlights(
335
+ const FlightCallOptions& options, const Criteria& criteria);
336
+
337
+ /// \brief Given a flight ticket and schema, request to be sent the
338
+ /// stream. Returns record batch stream reader
339
+ /// \param[in] options Per-RPC options
340
+ /// \param[in] ticket The flight ticket to use
341
+ /// \return Arrow result with the returned RecordBatchReader
342
+ arrow::Result<std::unique_ptr<FlightStreamReader>> DoGet(
343
+ const FlightCallOptions& options, const Ticket& ticket);
344
+ arrow::Result<std::unique_ptr<FlightStreamReader>> DoGet(const Ticket& ticket) {
345
+ return DoGet({}, ticket);
346
+ }
347
+
348
+ /// \brief DoPut return value
349
+ struct DoPutResult {
350
+ /// \brief a writer to write record batches to
351
+ std::unique_ptr<FlightStreamWriter> writer;
352
+ /// \brief a reader for application metadata from the server
353
+ std::unique_ptr<FlightMetadataReader> reader;
354
+ };
355
+ /// \brief Upload data to a Flight described by the given
356
+ /// descriptor. The caller must call Close() on the returned stream
357
+ /// once they are done writing.
358
+ ///
359
+ /// The reader and writer are linked; closing the writer will also
360
+ /// close the reader. Use \a DoneWriting to only close the write
361
+ /// side of the channel.
362
+ ///
363
+ /// \param[in] options Per-RPC options
364
+ /// \param[in] descriptor the descriptor of the stream
365
+ /// \param[in] schema the schema for the data to upload
366
+ /// \return Arrow result with a DoPutResult struct holding a reader and a writer
367
+ arrow::Result<DoPutResult> DoPut(const FlightCallOptions& options,
368
+ const FlightDescriptor& descriptor,
369
+ const std::shared_ptr<Schema>& schema);
370
+
371
+ arrow::Result<DoPutResult> DoPut(const FlightDescriptor& descriptor,
372
+ const std::shared_ptr<Schema>& schema) {
373
+ return DoPut({}, descriptor, schema);
374
+ }
375
+
376
+ struct DoExchangeResult {
377
+ std::unique_ptr<FlightStreamWriter> writer;
378
+ std::unique_ptr<FlightStreamReader> reader;
379
+ };
380
+ arrow::Result<DoExchangeResult> DoExchange(const FlightCallOptions& options,
381
+ const FlightDescriptor& descriptor);
382
+ arrow::Result<DoExchangeResult> DoExchange(const FlightDescriptor& descriptor) {
383
+ return DoExchange({}, descriptor);
384
+ }
385
+
386
+ /// \brief Explicitly shut down and clean up the client.
387
+ ///
388
+ /// For backwards compatibility, this will be implicitly called by
389
+ /// the destructor if not already called, but this gives the
390
+ /// application no chance to handle errors, so it is recommended to
391
+ /// explicitly close the client.
392
+ ///
393
+ /// \since 8.0.0
394
+ Status Close();
395
+
396
+ /// \brief Whether this client supports asynchronous methods.
397
+ bool supports_async() const;
398
+
399
+ /// \brief Check whether this client supports asynchronous methods.
400
+ ///
401
+ /// This is like supports_async(), except that a detailed error message
402
+ /// is returned if async support is not available. If async support is
403
+ /// available, this function returns successfully.
404
+ Status CheckAsyncSupport() const;
405
+
406
+ private:
407
+ FlightClient();
408
+ Status CheckOpen() const;
409
+ std::unique_ptr<internal::ClientTransport> transport_;
410
+ bool closed_;
411
+ int64_t write_size_limit_bytes_;
412
+ };
413
+
414
+ } // namespace flight
415
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_auth.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+
22
+ #include "arrow/flight/visibility.h"
23
+ #include "arrow/status.h"
24
+
25
+ namespace arrow {
26
+
27
+ namespace flight {
28
+
29
+ /// \brief A reader for messages from the server during an
30
+ /// authentication handshake.
31
+ class ARROW_FLIGHT_EXPORT ClientAuthReader {
32
+ public:
33
+ virtual ~ClientAuthReader() = default;
34
+ virtual Status Read(std::string* response) = 0;
35
+ };
36
+
37
+ /// \brief A writer for messages to the server during an
38
+ /// authentication handshake.
39
+ class ARROW_FLIGHT_EXPORT ClientAuthSender {
40
+ public:
41
+ virtual ~ClientAuthSender() = default;
42
+ virtual Status Write(const std::string& token) = 0;
43
+ };
44
+
45
+ /// \brief An authentication implementation for a Flight service.
46
+ /// Authentication includes both an initial negotiation and a per-call
47
+ /// token validation. Implementations may choose to use either or both
48
+ /// mechanisms.
49
+ class ARROW_FLIGHT_EXPORT ClientAuthHandler {
50
+ public:
51
+ virtual ~ClientAuthHandler() = default;
52
+ /// \brief Authenticate the client on initial connection. The client
53
+ /// can send messages to/read responses from the server at any time.
54
+ /// \return Status OK if authenticated successfully
55
+ virtual Status Authenticate(ClientAuthSender* outgoing, ClientAuthReader* incoming) = 0;
56
+ /// \brief Get a per-call token.
57
+ /// \param[out] token The token to send to the server.
58
+ virtual Status GetToken(std::string* token) = 0;
59
+ };
60
+
61
+ } // namespace flight
62
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_cookie_middleware.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Middleware implementation for sending and receiving HTTP cookies.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+
24
+ #include "arrow/flight/client_middleware.h"
25
+
26
+ namespace arrow {
27
+ namespace flight {
28
+
29
+ /// \brief Returns a ClientMiddlewareFactory that handles sending and receiving cookies.
30
+ ARROW_FLIGHT_EXPORT std::shared_ptr<ClientMiddlewareFactory> GetCookieFactory();
31
+
32
+ } // namespace flight
33
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_middleware.h ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Interfaces for defining middleware for Flight clients. Currently
19
+ // experimental.
20
+
21
+ #pragma once
22
+
23
+ #include <memory>
24
+
25
+ #include "arrow/flight/middleware.h"
26
+ #include "arrow/flight/visibility.h" // IWYU pragma: keep
27
+ #include "arrow/status.h"
28
+
29
+ namespace arrow {
30
+ namespace flight {
31
+
32
+ /// \brief Client-side middleware for a call, instantiated per RPC.
33
+ ///
34
+ /// Middleware should be fast and must be infallible: there is no way
35
+ /// to reject the call or report errors from the middleware instance.
36
+ class ARROW_FLIGHT_EXPORT ClientMiddleware {
37
+ public:
38
+ virtual ~ClientMiddleware() = default;
39
+
40
+ /// \brief A callback before headers are sent. Extra headers can be
41
+ /// added, but existing ones cannot be read.
42
+ virtual void SendingHeaders(AddCallHeaders* outgoing_headers) = 0;
43
+
44
+ /// \brief A callback when headers are received from the server.
45
+ ///
46
+ /// This may be called more than once, since servers send both
47
+ /// headers and trailers. Some implementations (e.g. gRPC-Java, and
48
+ /// hence Arrow Flight in Java) may consolidate headers into
49
+ /// trailers if the RPC errored.
50
+ virtual void ReceivedHeaders(const CallHeaders& incoming_headers) = 0;
51
+
52
+ /// \brief A callback after the call has completed.
53
+ virtual void CallCompleted(const Status& status) = 0;
54
+ };
55
+
56
+ /// \brief A factory for new middleware instances.
57
+ ///
58
+ /// If added to a client, this will be called for each RPC (including
59
+ /// Handshake) to give the opportunity to intercept the call.
60
+ ///
61
+ /// It is guaranteed that all client middleware methods are called
62
+ /// from the same thread that calls the RPC method implementation.
63
+ class ARROW_FLIGHT_EXPORT ClientMiddlewareFactory {
64
+ public:
65
+ virtual ~ClientMiddlewareFactory() = default;
66
+
67
+ /// \brief A callback for the start of a new call.
68
+ ///
69
+ /// \param info Information about the call.
70
+ /// \param[out] middleware The middleware instance for this call. If
71
+ /// unset, will not add middleware to this call instance from
72
+ /// this factory.
73
+ virtual void StartCall(const CallInfo& info,
74
+ std::unique_ptr<ClientMiddleware>* middleware) = 0;
75
+ };
76
+
77
+ } // namespace flight
78
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_tracing_middleware.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Middleware implementation for propagating OpenTelemetry spans.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+
24
+ #include "arrow/flight/client_middleware.h"
25
+
26
+ namespace arrow {
27
+ namespace flight {
28
+
29
+ /// \brief Returns a ClientMiddlewareFactory that handles sending OpenTelemetry spans.
30
+ ARROW_FLIGHT_EXPORT std::shared_ptr<ClientMiddlewareFactory>
31
+ MakeTracingClientMiddlewareFactory();
32
+
33
+ } // namespace flight
34
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/middleware.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Interfaces for defining middleware for Flight clients and
19
+ // servers. Currently experimental.
20
+
21
+ #pragma once
22
+
23
+ #include <memory>
24
+ #include <string>
25
+ #include <string_view>
26
+ #include <utility>
27
+
28
+ #include "arrow/flight/types.h"
29
+ #include "arrow/status.h"
30
+
31
+ namespace arrow {
32
+ namespace flight {
33
+
34
+ /// \brief A write-only wrapper around headers for an RPC call.
35
+ class ARROW_FLIGHT_EXPORT AddCallHeaders {
36
+ public:
37
+ virtual ~AddCallHeaders() = default;
38
+
39
+ /// \brief Add a header to be sent to the client.
40
+ ///
41
+ /// \param[in] key The header name. Must be lowercase ASCII; some
42
+ /// transports may reject invalid header names.
43
+ /// \param[in] value The header value. Some transports may only
44
+ /// accept binary header values if the header name ends in "-bin".
45
+ virtual void AddHeader(const std::string& key, const std::string& value) = 0;
46
+ };
47
+
48
+ /// \brief An enumeration of the RPC methods Flight implements.
49
+ enum class FlightMethod : char {
50
+ Invalid = 0,
51
+ Handshake = 1,
52
+ ListFlights = 2,
53
+ GetFlightInfo = 3,
54
+ GetSchema = 4,
55
+ DoGet = 5,
56
+ DoPut = 6,
57
+ DoAction = 7,
58
+ ListActions = 8,
59
+ DoExchange = 9,
60
+ PollFlightInfo = 10,
61
+ };
62
+
63
+ /// \brief Get a human-readable name for a Flight method.
64
+ ARROW_FLIGHT_EXPORT
65
+ std::string ToString(FlightMethod method);
66
+
67
+ /// \brief Information about an instance of a Flight RPC.
68
+ struct ARROW_FLIGHT_EXPORT CallInfo {
69
+ public:
70
+ /// \brief The RPC method of this call.
71
+ FlightMethod method;
72
+ };
73
+
74
+ } // namespace flight
75
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/pch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Often-used headers, for precompiling.
19
+ // If updating this header, please make sure you check compilation speed
20
+ // before checking in. Adding headers which are not used extremely often
21
+ // may incur a slowdown, since it makes the precompiled header heavier to load.
22
+
23
+ #include "arrow/flight/client.h"
24
+ #include "arrow/flight/server.h"
25
+ #include "arrow/flight/types.h"
26
+ #include "arrow/pch.h"
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/platform.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Internal header. Platform-specific definitions for Flight.
19
+
20
+ #pragma once
21
+
22
+ #ifdef _MSC_VER
23
+
24
+ // The protobuf documentation says that C4251 warnings when using the
25
+ // library are spurious and suppressed when the build the library and
26
+ // compiler, but must be also suppressed in downstream projects
27
+ #pragma warning(disable : 4251)
28
+
29
+ #endif // _MSC_VER
30
+
31
+ #include "arrow/util/config.h" // IWYU pragma: keep
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server_auth.h ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ /// \brief Server-side APIs to implement authentication for Flight.
19
+
20
+ #pragma once
21
+
22
+ #include <string>
23
+
24
+ #include "arrow/flight/type_fwd.h"
25
+ #include "arrow/flight/visibility.h"
26
+ #include "arrow/status.h"
27
+
28
+ namespace arrow {
29
+
30
+ namespace flight {
31
+
32
+ /// \brief A reader for messages from the client during an
33
+ /// authentication handshake.
34
+ class ARROW_FLIGHT_EXPORT ServerAuthReader {
35
+ public:
36
+ virtual ~ServerAuthReader() = default;
37
+ virtual Status Read(std::string* token) = 0;
38
+ };
39
+
40
+ /// \brief A writer for messages to the client during an
41
+ /// authentication handshake.
42
+ class ARROW_FLIGHT_EXPORT ServerAuthSender {
43
+ public:
44
+ virtual ~ServerAuthSender() = default;
45
+ virtual Status Write(const std::string& message) = 0;
46
+ };
47
+
48
+ /// \brief An authentication implementation for a Flight service.
49
+ /// Authentication includes both an initial negotiation and a per-call
50
+ /// token validation. Implementations may choose to use either or both
51
+ /// mechanisms.
52
+ /// An implementation may need to track some state, e.g. a mapping of
53
+ /// client tokens to authenticated identities.
54
+ class ARROW_FLIGHT_EXPORT ServerAuthHandler {
55
+ public:
56
+ virtual ~ServerAuthHandler();
57
+ /// \brief Authenticate the client on initial connection. The server
58
+ /// can send and read responses from the client at any time.
59
+ /// \param[in] context The call context.
60
+ /// \param[in] outgoing The writer for messages to the client.
61
+ /// \param[in] incoming The reader for messages from the client.
62
+ /// \return Status OK if this authentication is succeeded.
63
+ virtual Status Authenticate(const ServerCallContext& context,
64
+ ServerAuthSender* outgoing, ServerAuthReader* incoming) {
65
+ // TODO: We can make this pure virtual function when we remove
66
+ // the deprecated version.
67
+ ARROW_SUPPRESS_DEPRECATION_WARNING
68
+ return Authenticate(outgoing, incoming);
69
+ ARROW_UNSUPPRESS_DEPRECATION_WARNING
70
+ }
71
+ /// \brief Authenticate the client on initial connection. The server
72
+ /// can send and read responses from the client at any time.
73
+ /// \param[in] outgoing The writer for messages to the client.
74
+ /// \param[in] incoming The reader for messages from the client.
75
+ /// \return Status OK if this authentication is succeeded.
76
+ /// \deprecated Deprecated in 13.0.0. Implement the Authentication()
77
+ /// with ServerCallContext version instead.
78
+ ARROW_DEPRECATED("Deprecated in 13.0.0. Use ServerCallContext overload instead.")
79
+ virtual Status Authenticate(ServerAuthSender* outgoing, ServerAuthReader* incoming) {
80
+ return Status::NotImplemented(typeid(this).name(),
81
+ "::Authenticate() isn't implemented");
82
+ }
83
+ /// \brief Validate a per-call client token.
84
+ /// \param[in] context The call context.
85
+ /// \param[in] token The client token. May be the empty string if
86
+ /// the client does not provide a token.
87
+ /// \param[out] peer_identity The identity of the peer, if this
88
+ /// authentication method supports it.
89
+ /// \return Status OK if the token is valid, any other status if
90
+ /// validation failed
91
+ virtual Status IsValid(const ServerCallContext& context, const std::string& token,
92
+ std::string* peer_identity) {
93
+ // TODO: We can make this pure virtual function when we remove
94
+ // the deprecated version.
95
+ ARROW_SUPPRESS_DEPRECATION_WARNING
96
+ return IsValid(token, peer_identity);
97
+ ARROW_UNSUPPRESS_DEPRECATION_WARNING
98
+ }
99
+ /// \brief Validate a per-call client token.
100
+ /// \param[in] token The client token. May be the empty string if
101
+ /// the client does not provide a token.
102
+ /// \param[out] peer_identity The identity of the peer, if this
103
+ /// authentication method supports it.
104
+ /// \return Status OK if the token is valid, any other status if
105
+ /// validation failed
106
+ /// \deprecated Deprecated in 13.0.0. Implement the IsValid()
107
+ /// with ServerCallContext version instead.
108
+ ARROW_DEPRECATED("Deprecated in 13.0.0. Use ServerCallContext overload instead.")
109
+ virtual Status IsValid(const std::string& token, std::string* peer_identity) {
110
+ return Status::NotImplemented(typeid(this).name(), "::IsValid() isn't implemented");
111
+ }
112
+ };
113
+
114
+ /// \brief An authentication mechanism that does nothing.
115
+ class ARROW_FLIGHT_EXPORT NoOpAuthHandler : public ServerAuthHandler {
116
+ public:
117
+ ~NoOpAuthHandler() override;
118
+ Status Authenticate(const ServerCallContext& context, ServerAuthSender* outgoing,
119
+ ServerAuthReader* incoming) override;
120
+ Status IsValid(const ServerCallContext& context, const std::string& token,
121
+ std::string* peer_identity) override;
122
+ };
123
+
124
+ } // namespace flight
125
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server_middleware.h ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Interfaces for defining middleware for Flight servers. Currently
19
+ // experimental.
20
+
21
+ #pragma once
22
+
23
+ #include <memory>
24
+ #include <string>
25
+
26
+ #include "arrow/flight/middleware.h"
27
+ #include "arrow/flight/type_fwd.h"
28
+ #include "arrow/flight/visibility.h" // IWYU pragma: keep
29
+ #include "arrow/status.h"
30
+
31
+ namespace arrow {
32
+ namespace flight {
33
+
34
+ /// \brief Server-side middleware for a call, instantiated per RPC.
35
+ ///
36
+ /// Middleware should be fast and must be infallible: there is no way
37
+ /// to reject the call or report errors from the middleware instance.
38
+ class ARROW_FLIGHT_EXPORT ServerMiddleware {
39
+ public:
40
+ virtual ~ServerMiddleware() = default;
41
+
42
+ /// \brief Unique name of middleware, used as alternative to RTTI
43
+ /// \return the string name of the middleware
44
+ virtual std::string name() const = 0;
45
+
46
+ /// \brief A callback before headers are sent. Extra headers can be
47
+ /// added, but existing ones cannot be read.
48
+ virtual void SendingHeaders(AddCallHeaders* outgoing_headers) = 0;
49
+
50
+ /// \brief A callback after the call has completed.
51
+ virtual void CallCompleted(const Status& status) = 0;
52
+ };
53
+
54
+ /// \brief A factory for new middleware instances.
55
+ ///
56
+ /// If added to a server, this will be called for each RPC (including
57
+ /// Handshake) to give the opportunity to intercept the call.
58
+ ///
59
+ /// It is guaranteed that all server middleware methods are called
60
+ /// from the same thread that calls the RPC method implementation.
61
+ class ARROW_FLIGHT_EXPORT ServerMiddlewareFactory {
62
+ public:
63
+ virtual ~ServerMiddlewareFactory() = default;
64
+
65
+ /// \brief A callback for the start of a new call.
66
+ ///
67
+ /// Return a non-OK status to reject the call with the given status.
68
+ ///
69
+ /// \param[in] info Information about the call.
70
+ /// \param[in] context The call context.
71
+ /// \param[out] middleware The middleware instance for this call. If
72
+ /// null, no middleware will be added to this call instance from
73
+ /// this factory.
74
+ /// \return Status A non-OK status will reject the call with the
75
+ /// given status. Middleware previously in the chain will have
76
+ /// their CallCompleted callback called. Other middleware
77
+ /// factories will not be called.
78
+ virtual Status StartCall(const CallInfo& info, const ServerCallContext& context,
79
+ std::shared_ptr<ServerMiddleware>* middleware);
80
+
81
+ /// \brief A callback for the start of a new call.
82
+ ///
83
+ /// Return a non-OK status to reject the call with the given status.
84
+ ///
85
+ /// \param info Information about the call.
86
+ /// \param incoming_headers Headers sent by the client for this call.
87
+ /// Do not retain a reference to this object.
88
+ /// \param[out] middleware The middleware instance for this call. If
89
+ /// null, no middleware will be added to this call instance from
90
+ /// this factory.
91
+ /// \return Status A non-OK status will reject the call with the
92
+ /// given status. Middleware previously in the chain will have
93
+ /// their CallCompleted callback called. Other middleware
94
+ /// factories will not be called.
95
+ /// \deprecated Deprecated in 13.0.0. Implement the StartCall()
96
+ /// with ServerCallContext version instead.
97
+ ARROW_DEPRECATED("Deprecated in 13.0.0. Use ServerCallContext overload instead.")
98
+ virtual Status StartCall(const CallInfo& info, const CallHeaders& incoming_headers,
99
+ std::shared_ptr<ServerMiddleware>* middleware) {
100
+ return Status::NotImplemented(typeid(this).name(), "::StartCall() isn't implemented");
101
+ }
102
+ };
103
+
104
+ } // namespace flight
105
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server_tracing_middleware.h ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Middleware implementation for propagating OpenTelemetry spans.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+ #include <string>
24
+ #include <vector>
25
+
26
+ #include "arrow/flight/server_middleware.h"
27
+ #include "arrow/flight/visibility.h"
28
+ #include "arrow/status.h"
29
+
30
+ namespace arrow {
31
+ namespace flight {
32
+
33
+ /// \brief Returns a ServerMiddlewareFactory that handles receiving OpenTelemetry spans.
34
+ ARROW_FLIGHT_EXPORT std::shared_ptr<ServerMiddlewareFactory>
35
+ MakeTracingServerMiddlewareFactory();
36
+
37
+ /// \brief A server middleware that provides access to the
38
+ /// OpenTelemetry context, if present.
39
+ ///
40
+ /// Used to make the OpenTelemetry span available in Python.
41
+ class ARROW_FLIGHT_EXPORT TracingServerMiddleware : public ServerMiddleware {
42
+ public:
43
+ ~TracingServerMiddleware();
44
+
45
+ static constexpr char const kMiddlewareName[] =
46
+ "arrow::flight::TracingServerMiddleware";
47
+
48
+ std::string name() const override { return kMiddlewareName; }
49
+ void SendingHeaders(AddCallHeaders*) override;
50
+ void CallCompleted(const Status&) override;
51
+
52
+ struct TraceKey {
53
+ std::string key;
54
+ std::string value;
55
+ };
56
+ /// \brief Get the trace context.
57
+ std::vector<TraceKey> GetTraceContext() const;
58
+
59
+ private:
60
+ class Impl;
61
+ friend class TracingServerMiddlewareFactory;
62
+
63
+ explicit TracingServerMiddleware(std::unique_ptr<Impl> impl);
64
+ std::unique_ptr<Impl> impl_;
65
+ };
66
+
67
+ } // namespace flight
68
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/test_definitions.h ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Common test definitions for Flight. Individual transport
19
+ // implementations can instantiate these tests.
20
+ //
21
+ // While Googletest's value-parameterized tests would be a more
22
+ // natural way to do this, they cause runtime issues on MinGW/MSVC
23
+ // (Googletest thinks the test suite has been defined twice).
24
+
25
+ #pragma once
26
+
27
+ #include <functional>
28
+ #include <memory>
29
+ #include <string>
30
+ #include <type_traits>
31
+ #include <vector>
32
+
33
+ #include "arrow/flight/server.h"
34
+ #include "arrow/flight/types.h"
35
+ #include "arrow/util/macros.h"
36
+
37
+ namespace arrow {
38
+ namespace flight {
39
+
40
+ class ARROW_FLIGHT_EXPORT FlightTest {
41
+ protected:
42
+ virtual std::string transport() const = 0;
43
+ virtual bool supports_async() const { return false; }
44
+ virtual void SetUpTest() {}
45
+ virtual void TearDownTest() {}
46
+ };
47
+
48
+ /// Common tests of startup/shutdown
49
+ class ARROW_FLIGHT_EXPORT ConnectivityTest : public FlightTest {
50
+ public:
51
+ // Test methods
52
+ void TestGetPort();
53
+ void TestBuilderHook();
54
+ void TestShutdown();
55
+ void TestShutdownWithDeadline();
56
+ void TestBrokenConnection();
57
+ };
58
+
59
+ #define ARROW_FLIGHT_TEST_CONNECTIVITY(FIXTURE) \
60
+ static_assert(std::is_base_of<ConnectivityTest, FIXTURE>::value, \
61
+ ARROW_STRINGIFY(FIXTURE) " must inherit from ConnectivityTest"); \
62
+ TEST_F(FIXTURE, GetPort) { TestGetPort(); } \
63
+ TEST_F(FIXTURE, BuilderHook) { TestBuilderHook(); } \
64
+ TEST_F(FIXTURE, Shutdown) { TestShutdown(); } \
65
+ TEST_F(FIXTURE, ShutdownWithDeadline) { TestShutdownWithDeadline(); } \
66
+ TEST_F(FIXTURE, BrokenConnection) { TestBrokenConnection(); }
67
+
68
+ /// Common tests of data plane methods
69
+ class ARROW_FLIGHT_EXPORT DataTest : public FlightTest {
70
+ public:
71
+ void SetUpTest() override;
72
+ void TearDownTest() override;
73
+ Status ConnectClient();
74
+
75
+ // Test methods
76
+ void TestDoGetInts();
77
+ void TestDoGetFloats();
78
+ void TestDoGetDicts();
79
+ void TestDoGetLargeBatch();
80
+ void TestFlightDataStreamError();
81
+ void TestOverflowServerBatch();
82
+ void TestOverflowClientBatch();
83
+ void TestDoExchange();
84
+ void TestDoExchangeNoData();
85
+ void TestDoExchangeWriteOnlySchema();
86
+ void TestDoExchangeGet();
87
+ void TestDoExchangePut();
88
+ void TestDoExchangeEcho();
89
+ void TestDoExchangeTotal();
90
+ void TestDoExchangeError();
91
+ void TestDoExchangeConcurrency();
92
+ void TestDoExchangeUndrained();
93
+ void TestIssue5095();
94
+
95
+ private:
96
+ void CheckDoGet(
97
+ const FlightDescriptor& descr, const RecordBatchVector& expected_batches,
98
+ std::function<void(const std::vector<FlightEndpoint>&)> check_endpoints);
99
+ void CheckDoGet(const Ticket& ticket, const RecordBatchVector& expected_batches);
100
+
101
+ std::unique_ptr<FlightClient> client_;
102
+ std::unique_ptr<FlightServerBase> server_;
103
+ };
104
+
105
+ #define ARROW_FLIGHT_TEST_DATA(FIXTURE) \
106
+ static_assert(std::is_base_of<DataTest, FIXTURE>::value, \
107
+ ARROW_STRINGIFY(FIXTURE) " must inherit from DataTest"); \
108
+ TEST_F(FIXTURE, TestDoGetInts) { TestDoGetInts(); } \
109
+ TEST_F(FIXTURE, TestDoGetFloats) { TestDoGetFloats(); } \
110
+ TEST_F(FIXTURE, TestDoGetDicts) { TestDoGetDicts(); } \
111
+ TEST_F(FIXTURE, TestDoGetLargeBatch) { TestDoGetLargeBatch(); } \
112
+ TEST_F(FIXTURE, TestFlightDataStreamError) { TestFlightDataStreamError(); } \
113
+ TEST_F(FIXTURE, TestOverflowServerBatch) { TestOverflowServerBatch(); } \
114
+ TEST_F(FIXTURE, TestOverflowClientBatch) { TestOverflowClientBatch(); } \
115
+ TEST_F(FIXTURE, TestDoExchange) { TestDoExchange(); } \
116
+ TEST_F(FIXTURE, TestDoExchangeNoData) { TestDoExchangeNoData(); } \
117
+ TEST_F(FIXTURE, TestDoExchangeWriteOnlySchema) { TestDoExchangeWriteOnlySchema(); } \
118
+ TEST_F(FIXTURE, TestDoExchangeGet) { TestDoExchangeGet(); } \
119
+ TEST_F(FIXTURE, TestDoExchangePut) { TestDoExchangePut(); } \
120
+ TEST_F(FIXTURE, TestDoExchangeEcho) { TestDoExchangeEcho(); } \
121
+ TEST_F(FIXTURE, TestDoExchangeTotal) { TestDoExchangeTotal(); } \
122
+ TEST_F(FIXTURE, TestDoExchangeError) { TestDoExchangeError(); } \
123
+ TEST_F(FIXTURE, TestDoExchangeConcurrency) { TestDoExchangeConcurrency(); } \
124
+ TEST_F(FIXTURE, TestDoExchangeUndrained) { TestDoExchangeUndrained(); } \
125
+ TEST_F(FIXTURE, TestIssue5095) { TestIssue5095(); }
126
+
127
+ /// \brief Specific tests of DoPut.
128
+ class ARROW_FLIGHT_EXPORT DoPutTest : public FlightTest {
129
+ public:
130
+ void SetUpTest() override;
131
+ void TearDownTest() override;
132
+ void CheckBatches(const FlightDescriptor& expected_descriptor,
133
+ const RecordBatchVector& expected_batches);
134
+ void CheckDoPut(const FlightDescriptor& descr, const std::shared_ptr<Schema>& schema,
135
+ const RecordBatchVector& batches);
136
+
137
+ // Test methods
138
+ void TestInts();
139
+ void TestFloats();
140
+ void TestEmptyBatch();
141
+ void TestDicts();
142
+ void TestLargeBatch();
143
+ void TestSizeLimit();
144
+ void TestUndrained();
145
+
146
+ private:
147
+ std::unique_ptr<FlightClient> client_;
148
+ std::unique_ptr<FlightServerBase> server_;
149
+ };
150
+
151
+ #define ARROW_FLIGHT_TEST_DO_PUT(FIXTURE) \
152
+ static_assert(std::is_base_of<DoPutTest, FIXTURE>::value, \
153
+ ARROW_STRINGIFY(FIXTURE) " must inherit from DoPutTest"); \
154
+ TEST_F(FIXTURE, TestInts) { TestInts(); } \
155
+ TEST_F(FIXTURE, TestFloats) { TestFloats(); } \
156
+ TEST_F(FIXTURE, TestEmptyBatch) { TestEmptyBatch(); } \
157
+ TEST_F(FIXTURE, TestDicts) { TestDicts(); } \
158
+ TEST_F(FIXTURE, TestLargeBatch) { TestLargeBatch(); } \
159
+ TEST_F(FIXTURE, TestSizeLimit) { TestSizeLimit(); } \
160
+ TEST_F(FIXTURE, TestUndrained) { TestUndrained(); }
161
+
162
+ class ARROW_FLIGHT_EXPORT AppMetadataTestServer : public FlightServerBase {
163
+ public:
164
+ virtual ~AppMetadataTestServer() = default;
165
+
166
+ Status DoGet(const ServerCallContext& context, const Ticket& request,
167
+ std::unique_ptr<FlightDataStream>* data_stream) override;
168
+
169
+ Status DoPut(const ServerCallContext& context,
170
+ std::unique_ptr<FlightMessageReader> reader,
171
+ std::unique_ptr<FlightMetadataWriter> writer) override;
172
+ };
173
+
174
+ /// \brief Tests of app_metadata in data plane methods.
175
+ class ARROW_FLIGHT_EXPORT AppMetadataTest : public FlightTest {
176
+ public:
177
+ void SetUpTest() override;
178
+ void TearDownTest() override;
179
+
180
+ // Test methods
181
+ void TestDoGet();
182
+ void TestDoGetDictionaries();
183
+ void TestDoPut();
184
+ void TestDoPutDictionaries();
185
+ void TestDoPutReadMetadata();
186
+
187
+ private:
188
+ std::unique_ptr<FlightClient> client_;
189
+ std::unique_ptr<FlightServerBase> server_;
190
+ };
191
+
192
+ #define ARROW_FLIGHT_TEST_APP_METADATA(FIXTURE) \
193
+ static_assert(std::is_base_of<AppMetadataTest, FIXTURE>::value, \
194
+ ARROW_STRINGIFY(FIXTURE) " must inherit from AppMetadataTest"); \
195
+ TEST_F(FIXTURE, TestDoGet) { TestDoGet(); } \
196
+ TEST_F(FIXTURE, TestDoGetDictionaries) { TestDoGetDictionaries(); } \
197
+ TEST_F(FIXTURE, TestDoPut) { TestDoPut(); } \
198
+ TEST_F(FIXTURE, TestDoPutDictionaries) { TestDoPutDictionaries(); } \
199
+ TEST_F(FIXTURE, TestDoPutReadMetadata) { TestDoPutReadMetadata(); }
200
+
201
+ /// \brief Tests of IPC options in data plane methods.
202
+ class ARROW_FLIGHT_EXPORT IpcOptionsTest : public FlightTest {
203
+ public:
204
+ void SetUpTest() override;
205
+ void TearDownTest() override;
206
+
207
+ // Test methods
208
+ void TestDoGetReadOptions();
209
+ void TestDoPutWriteOptions();
210
+ void TestDoExchangeClientWriteOptions();
211
+ void TestDoExchangeClientWriteOptionsBegin();
212
+ void TestDoExchangeServerWriteOptions();
213
+
214
+ private:
215
+ std::unique_ptr<FlightClient> client_;
216
+ std::unique_ptr<FlightServerBase> server_;
217
+ };
218
+
219
+ #define ARROW_FLIGHT_TEST_IPC_OPTIONS(FIXTURE) \
220
+ static_assert(std::is_base_of<IpcOptionsTest, FIXTURE>::value, \
221
+ ARROW_STRINGIFY(FIXTURE) " must inherit from IpcOptionsTest"); \
222
+ TEST_F(FIXTURE, TestDoGetReadOptions) { TestDoGetReadOptions(); } \
223
+ TEST_F(FIXTURE, TestDoPutWriteOptions) { TestDoPutWriteOptions(); } \
224
+ TEST_F(FIXTURE, TestDoExchangeClientWriteOptions) { \
225
+ TestDoExchangeClientWriteOptions(); \
226
+ } \
227
+ TEST_F(FIXTURE, TestDoExchangeClientWriteOptionsBegin) { \
228
+ TestDoExchangeClientWriteOptionsBegin(); \
229
+ } \
230
+ TEST_F(FIXTURE, TestDoExchangeServerWriteOptions) { \
231
+ TestDoExchangeServerWriteOptions(); \
232
+ }
233
+
234
+ /// \brief Tests of data plane methods with CUDA memory.
235
+ ///
236
+ /// If not built with ARROW_CUDA, tests are no-ops.
237
+ class ARROW_FLIGHT_EXPORT CudaDataTest : public FlightTest {
238
+ public:
239
+ void SetUpTest() override;
240
+ void TearDownTest() override;
241
+
242
+ // Test methods
243
+ void TestDoGet();
244
+ void TestDoPut();
245
+ void TestDoExchange();
246
+
247
+ private:
248
+ class Impl;
249
+ std::unique_ptr<FlightClient> client_;
250
+ std::unique_ptr<FlightServerBase> server_;
251
+ std::shared_ptr<Impl> impl_;
252
+ };
253
+
254
+ #define ARROW_FLIGHT_TEST_CUDA_DATA(FIXTURE) \
255
+ static_assert(std::is_base_of<CudaDataTest, FIXTURE>::value, \
256
+ ARROW_STRINGIFY(FIXTURE) " must inherit from CudaDataTest"); \
257
+ TEST_F(FIXTURE, TestDoGet) { TestDoGet(); } \
258
+ TEST_F(FIXTURE, TestDoPut) { TestDoPut(); } \
259
+ TEST_F(FIXTURE, TestDoExchange) { TestDoExchange(); }
260
+
261
+ /// \brief Tests of error handling.
262
+ class ARROW_FLIGHT_EXPORT ErrorHandlingTest : public FlightTest {
263
+ public:
264
+ void SetUpTest() override;
265
+ void TearDownTest() override;
266
+
267
+ // Test methods
268
+ void TestGetFlightInfo();
269
+ void TestGetFlightInfoMetadata();
270
+ void TestAsyncGetFlightInfo();
271
+ void TestDoPut();
272
+ void TestDoExchange();
273
+
274
+ protected:
275
+ struct Impl;
276
+
277
+ std::vector<std::pair<std::string, std::string>> GetHeaders();
278
+
279
+ std::shared_ptr<Impl> impl_;
280
+ std::unique_ptr<FlightClient> client_;
281
+ std::unique_ptr<FlightServerBase> server_;
282
+ };
283
+
284
+ #define ARROW_FLIGHT_TEST_ERROR_HANDLING(FIXTURE) \
285
+ static_assert(std::is_base_of<ErrorHandlingTest, FIXTURE>::value, \
286
+ ARROW_STRINGIFY(FIXTURE) " must inherit from ErrorHandlingTest"); \
287
+ TEST_F(FIXTURE, TestAsyncGetFlightInfo) { TestAsyncGetFlightInfo(); } \
288
+ TEST_F(FIXTURE, TestGetFlightInfo) { TestGetFlightInfo(); } \
289
+ TEST_F(FIXTURE, TestGetFlightInfoMetadata) { TestGetFlightInfoMetadata(); } \
290
+ TEST_F(FIXTURE, TestDoPut) { TestDoPut(); } \
291
+ TEST_F(FIXTURE, TestDoExchange) { TestDoExchange(); }
292
+
293
+ /// \brief Tests of the async client.
294
+ class ARROW_FLIGHT_EXPORT AsyncClientTest : public FlightTest {
295
+ public:
296
+ void SetUpTest() override;
297
+ void TearDownTest() override;
298
+
299
+ // Test methods
300
+ void TestGetFlightInfo();
301
+ void TestGetFlightInfoFuture();
302
+ void TestListenerLifetime();
303
+
304
+ private:
305
+ std::unique_ptr<FlightClient> client_;
306
+ std::unique_ptr<FlightServerBase> server_;
307
+ };
308
+
309
+ #define ARROW_FLIGHT_TEST_ASYNC_CLIENT(FIXTURE) \
310
+ static_assert(std::is_base_of<AsyncClientTest, FIXTURE>::value, \
311
+ ARROW_STRINGIFY(FIXTURE) " must inherit from AsyncClientTest"); \
312
+ TEST_F(FIXTURE, TestGetFlightInfo) { TestGetFlightInfo(); } \
313
+ TEST_F(FIXTURE, TestGetFlightInfoFuture) { TestGetFlightInfoFuture(); } \
314
+ TEST_F(FIXTURE, TestListenerLifetime) { TestListenerLifetime(); }
315
+
316
+ } // namespace flight
317
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/transport_server.h ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <chrono>
21
+ #include <memory>
22
+
23
+ #include "arrow/flight/transport.h"
24
+ #include "arrow/flight/type_fwd.h"
25
+ #include "arrow/flight/visibility.h"
26
+ #include "arrow/type_fwd.h"
27
+
28
+ namespace arrow {
29
+ namespace ipc {
30
+ class Message;
31
+ }
32
+ namespace flight {
33
+ namespace internal {
34
+
35
+ /// \brief A transport-specific interface for reading/writing Arrow
36
+ /// data for a server.
37
+ class ARROW_FLIGHT_EXPORT ServerDataStream : public TransportDataStream {
38
+ public:
39
+ /// \brief Attempt to write a non-data message.
40
+ ///
41
+ /// Only implemented for DoPut; mutually exclusive with
42
+ /// WriteData(const FlightPayload&).
43
+ virtual Status WritePutMetadata(const Buffer& payload);
44
+ };
45
+
46
+ /// \brief An implementation of a Flight server for a particular
47
+ /// transport.
48
+ ///
49
+ /// This class (the transport implementation) implements the underlying
50
+ /// server and handles connections/incoming RPC calls. It should forward RPC
51
+ /// calls to the RPC handlers defined on this class, which work in terms of
52
+ /// the generic interfaces above. The RPC handlers here then forward calls
53
+ /// to the underlying FlightServerBase instance that contains the actual
54
+ /// application RPC method handlers.
55
+ ///
56
+ /// Used by FlightServerBase to manage the server lifecycle.
57
+ class ARROW_FLIGHT_EXPORT ServerTransport {
58
+ public:
59
+ ServerTransport(FlightServerBase* base, std::shared_ptr<MemoryManager> memory_manager)
60
+ : base_(base), memory_manager_(std::move(memory_manager)) {}
61
+ virtual ~ServerTransport() = default;
62
+
63
+ /// \name Server Lifecycle Methods
64
+ /// Transports implement these methods to start/shutdown the underlying
65
+ /// server.
66
+ /// @{
67
+ /// \brief Initialize the server.
68
+ ///
69
+ /// This method should launch the server in a background thread, i.e. it
70
+ /// should not block. Once this returns, the server should be active.
71
+ virtual Status Init(const FlightServerOptions& options,
72
+ const arrow::internal::Uri& uri) = 0;
73
+ /// \brief Shutdown the server.
74
+ ///
75
+ /// This should wait for active RPCs to finish. Once this returns, the
76
+ /// server is no longer listening.
77
+ virtual Status Shutdown() = 0;
78
+ /// \brief Shutdown the server with a deadline.
79
+ ///
80
+ /// This should wait for active RPCs to finish, or for the deadline to
81
+ /// expire. Once this returns, the server is no longer listening.
82
+ virtual Status Shutdown(const std::chrono::system_clock::time_point& deadline) = 0;
83
+ /// \brief Wait for the server to shutdown (but do not shut down the server).
84
+ ///
85
+ /// Once this returns, the server is no longer listening.
86
+ virtual Status Wait() = 0;
87
+ /// \brief Get the address the server is listening on, else an empty Location.
88
+ virtual Location location() const = 0;
89
+ ///@}
90
+
91
+ /// \name RPC Handlers
92
+ /// Implementations of RPC handlers for Flight methods using the common
93
+ /// interfaces here. Transports should call these methods from their
94
+ /// server implementation to handle the actual RPC calls.
95
+ ///@{
96
+ /// \brief Get the FlightServerBase.
97
+ ///
98
+ /// Intended as an escape hatch for now since not all methods have been
99
+ /// factored into a transport-agnostic interface.
100
+ FlightServerBase* base() const { return base_; }
101
+ /// \brief Implement DoGet in terms of a transport-level stream.
102
+ ///
103
+ /// \param[in] context The server context.
104
+ /// \param[in] request The request payload.
105
+ /// \param[in] stream The transport-specific data stream
106
+ /// implementation. Must implement WriteData(const
107
+ /// FlightPayload&).
108
+ Status DoGet(const ServerCallContext& context, const Ticket& request,
109
+ ServerDataStream* stream);
110
+ /// \brief Implement DoPut in terms of a transport-level stream.
111
+ ///
112
+ /// \param[in] context The server context.
113
+ /// \param[in] stream The transport-specific data stream
114
+ /// implementation. Must implement ReadData(FlightData*)
115
+ /// and WritePutMetadata(const Buffer&).
116
+ Status DoPut(const ServerCallContext& context, ServerDataStream* stream);
117
+ /// \brief Implement DoExchange in terms of a transport-level stream.
118
+ ///
119
+ /// \param[in] context The server context.
120
+ /// \param[in] stream The transport-specific data stream
121
+ /// implementation. Must implement ReadData(FlightData*)
122
+ /// and WriteData(const FlightPayload&).
123
+ Status DoExchange(const ServerCallContext& context, ServerDataStream* stream);
124
+ ///@}
125
+
126
+ protected:
127
+ FlightServerBase* base_;
128
+ std::shared_ptr<MemoryManager> memory_manager_;
129
+ };
130
+
131
+ } // namespace internal
132
+ } // namespace flight
133
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/type_fwd.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ namespace arrow {
21
+ namespace internal {
22
+ class Uri;
23
+ }
24
+ namespace flight {
25
+ struct Action;
26
+ struct ActionType;
27
+ template <typename T>
28
+ class AsyncListener;
29
+ class AsyncListenerBase;
30
+ class AsyncRpc;
31
+ struct BasicAuth;
32
+ class ClientAuthHandler;
33
+ class ClientMiddleware;
34
+ class ClientMiddlewareFactory;
35
+ struct Criteria;
36
+ class FlightCallOptions;
37
+ struct FlightClientOptions;
38
+ struct FlightDescriptor;
39
+ struct FlightEndpoint;
40
+ class FlightInfo;
41
+ class PollInfo;
42
+ class FlightListing;
43
+ class FlightMetadataReader;
44
+ class FlightMetadataWriter;
45
+ struct FlightPayload;
46
+ class FlightServerBase;
47
+ class FlightServerOptions;
48
+ class FlightStreamReader;
49
+ class FlightStreamWriter;
50
+ struct Location;
51
+ struct Result;
52
+ class ResultStream;
53
+ struct SchemaResult;
54
+ class ServerCallContext;
55
+ class ServerMiddleware;
56
+ class ServerMiddlewareFactory;
57
+ struct Ticket;
58
+ namespace internal {
59
+ class AsyncRpc;
60
+ class ClientTransport;
61
+ struct FlightData;
62
+ class ServerTransport;
63
+ } // namespace internal
64
+ } // namespace flight
65
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/types.h ADDED
@@ -0,0 +1,942 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Data structure for Flight RPC. API should be considered experimental for now
19
+
20
+ #pragma once
21
+
22
+ #include <chrono>
23
+ #include <cstddef>
24
+ #include <cstdint>
25
+ #include <map>
26
+ #include <memory>
27
+ #include <optional>
28
+ #include <string>
29
+ #include <string_view>
30
+ #include <utility>
31
+ #include <vector>
32
+
33
+ #include "arrow/flight/type_fwd.h"
34
+ #include "arrow/flight/visibility.h"
35
+ #include "arrow/ipc/options.h"
36
+ #include "arrow/ipc/writer.h"
37
+ #include "arrow/result.h"
38
+ #include "arrow/status.h"
39
+
40
+ namespace arrow {
41
+
42
+ class Buffer;
43
+ class RecordBatch;
44
+ class Schema;
45
+ class Status;
46
+ class Table;
47
+
48
+ namespace ipc {
49
+
50
+ class DictionaryMemo;
51
+
52
+ } // namespace ipc
53
+
54
+ namespace internal {
55
+
56
+ class Uri;
57
+
58
+ } // namespace internal
59
+
60
+ namespace flight {
61
+
62
+ /// \brief A timestamp compatible with Protocol Buffer's
63
+ /// google.protobuf.Timestamp:
64
+ ///
65
+ /// https://protobuf.dev/reference/protobuf/google.protobuf/#timestamp
66
+ ///
67
+ /// > A Timestamp represents a point in time independent of any time
68
+ /// > zone or calendar, represented as seconds and fractions of
69
+ /// > seconds at nanosecond resolution in UTC Epoch time. It is
70
+ /// > encoded using the Proleptic Gregorian Calendar which extends the
71
+ /// > Gregorian calendar backwards to year one. It is encoded assuming
72
+ /// > all minutes are 60 seconds long, i.e. leap seconds are "smeared"
73
+ /// > so that no leap second table is needed for interpretation. Range
74
+ /// > is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
75
+ using Timestamp = std::chrono::system_clock::time_point;
76
+
77
+ /// \brief A Flight-specific status code. Used to encode some
78
+ /// additional status codes into an Arrow Status.
79
+ enum class FlightStatusCode : int8_t {
80
+ /// An implementation error has occurred.
81
+ Internal,
82
+ /// A request timed out.
83
+ TimedOut,
84
+ /// A request was cancelled.
85
+ Cancelled,
86
+ /// We are not authenticated to the remote service.
87
+ Unauthenticated,
88
+ /// We do not have permission to make this request.
89
+ Unauthorized,
90
+ /// The remote service cannot handle this request at the moment.
91
+ Unavailable,
92
+ /// A request failed for some other reason
93
+ Failed
94
+ };
95
+
96
+ // Silence warning
97
+ // "non dll-interface class RecordBatchReader used as base for dll-interface class"
98
+ #ifdef _MSC_VER
99
+ #pragma warning(push)
100
+ #pragma warning(disable : 4275)
101
+ #endif
102
+
103
+ /// \brief Flight-specific error information in a Status.
104
+ class ARROW_FLIGHT_EXPORT FlightStatusDetail : public arrow::StatusDetail {
105
+ public:
106
+ explicit FlightStatusDetail(FlightStatusCode code) : code_{code} {}
107
+ explicit FlightStatusDetail(FlightStatusCode code, std::string extra_info)
108
+ : code_{code}, extra_info_(std::move(extra_info)) {}
109
+ const char* type_id() const override;
110
+ std::string ToString() const override;
111
+
112
+ /// \brief Get the Flight status code.
113
+ FlightStatusCode code() const;
114
+ /// \brief Get the extra error info
115
+ std::string extra_info() const;
116
+ /// \brief Get the human-readable name of the status code.
117
+ std::string CodeAsString() const;
118
+ /// \brief Set the extra error info
119
+ void set_extra_info(std::string extra_info);
120
+
121
+ /// \brief Try to extract a \a FlightStatusDetail from any Arrow
122
+ /// status.
123
+ ///
124
+ /// \return a \a FlightStatusDetail if it could be unwrapped, \a
125
+ /// nullptr otherwise
126
+ static std::shared_ptr<FlightStatusDetail> UnwrapStatus(const arrow::Status& status);
127
+
128
+ private:
129
+ FlightStatusCode code_;
130
+ std::string extra_info_;
131
+ };
132
+
133
+ #ifdef _MSC_VER
134
+ #pragma warning(pop)
135
+ #endif
136
+
137
+ /// \brief Make an appropriate Arrow status for the given
138
+ /// Flight-specific status.
139
+ ///
140
+ /// \param code The Flight status code.
141
+ /// \param message The message for the error.
142
+ /// \param extra_info Optional extra binary info for the error (eg protobuf)
143
+ ARROW_FLIGHT_EXPORT
144
+ Status MakeFlightError(FlightStatusCode code, std::string message,
145
+ std::string extra_info = {});
146
+
147
+ /// \brief Headers sent from the client or server.
148
+ ///
149
+ /// Header values are ordered.
150
+ using CallHeaders = std::multimap<std::string_view, std::string_view>;
151
+
152
+ /// \brief A TLS certificate plus key.
153
+ struct ARROW_FLIGHT_EXPORT CertKeyPair {
154
+ /// \brief The certificate in PEM format.
155
+ std::string pem_cert;
156
+
157
+ /// \brief The key in PEM format.
158
+ std::string pem_key;
159
+ };
160
+
161
+ /// \brief A type of action that can be performed with the DoAction RPC.
162
+ struct ARROW_FLIGHT_EXPORT ActionType {
163
+ /// \brief The name of the action.
164
+ std::string type;
165
+
166
+ /// \brief A human-readable description of the action.
167
+ std::string description;
168
+
169
+ std::string ToString() const;
170
+ bool Equals(const ActionType& other) const;
171
+
172
+ friend bool operator==(const ActionType& left, const ActionType& right) {
173
+ return left.Equals(right);
174
+ }
175
+ friend bool operator!=(const ActionType& left, const ActionType& right) {
176
+ return !(left == right);
177
+ }
178
+
179
+ /// \brief Serialize this message to its wire-format representation.
180
+ arrow::Result<std::string> SerializeToString() const;
181
+
182
+ /// \brief Deserialize this message from its wire-format representation.
183
+ static arrow::Result<ActionType> Deserialize(std::string_view serialized);
184
+
185
+ static const ActionType kCancelFlightInfo;
186
+ static const ActionType kRenewFlightEndpoint;
187
+ };
188
+
189
+ /// \brief Opaque selection criteria for ListFlights RPC
190
+ struct ARROW_FLIGHT_EXPORT Criteria {
191
+ /// Opaque criteria expression, dependent on server implementation
192
+ std::string expression;
193
+
194
+ std::string ToString() const;
195
+ bool Equals(const Criteria& other) const;
196
+
197
+ friend bool operator==(const Criteria& left, const Criteria& right) {
198
+ return left.Equals(right);
199
+ }
200
+ friend bool operator!=(const Criteria& left, const Criteria& right) {
201
+ return !(left == right);
202
+ }
203
+
204
+ /// \brief Serialize this message to its wire-format representation.
205
+ arrow::Result<std::string> SerializeToString() const;
206
+
207
+ /// \brief Deserialize this message from its wire-format representation.
208
+ static arrow::Result<Criteria> Deserialize(std::string_view serialized);
209
+ };
210
+
211
+ /// \brief An action to perform with the DoAction RPC
212
+ struct ARROW_FLIGHT_EXPORT Action {
213
+ /// The action type
214
+ std::string type;
215
+
216
+ /// The action content as a Buffer
217
+ std::shared_ptr<Buffer> body;
218
+
219
+ std::string ToString() const;
220
+ bool Equals(const Action& other) const;
221
+
222
+ friend bool operator==(const Action& left, const Action& right) {
223
+ return left.Equals(right);
224
+ }
225
+ friend bool operator!=(const Action& left, const Action& right) {
226
+ return !(left == right);
227
+ }
228
+
229
+ /// \brief Serialize this message to its wire-format representation.
230
+ arrow::Result<std::string> SerializeToString() const;
231
+
232
+ /// \brief Deserialize this message from its wire-format representation.
233
+ static arrow::Result<Action> Deserialize(std::string_view serialized);
234
+ };
235
+
236
+ /// \brief Opaque result returned after executing an action
237
+ struct ARROW_FLIGHT_EXPORT Result {
238
+ std::shared_ptr<Buffer> body;
239
+
240
+ std::string ToString() const;
241
+ bool Equals(const Result& other) const;
242
+
243
+ friend bool operator==(const Result& left, const Result& right) {
244
+ return left.Equals(right);
245
+ }
246
+ friend bool operator!=(const Result& left, const Result& right) {
247
+ return !(left == right);
248
+ }
249
+
250
+ /// \brief Serialize this message to its wire-format representation.
251
+ arrow::Result<std::string> SerializeToString() const;
252
+
253
+ /// \brief Deserialize this message from its wire-format representation.
254
+ static arrow::Result<Result> Deserialize(std::string_view serialized);
255
+ };
256
+
257
+ enum class CancelStatus {
258
+ /// The cancellation status is unknown. Servers should avoid using
259
+ /// this value (send a kNotCancellable if the requested FlightInfo
260
+ /// is not known). Clients can retry the request.
261
+ kUnspecified = 0,
262
+ /// The cancellation request is complete. Subsequent requests with
263
+ /// the same payload may return kCancelled or a kNotCancellable error.
264
+ kCancelled = 1,
265
+ /// The cancellation request is in progress. The client may retry
266
+ /// the cancellation request.
267
+ kCancelling = 2,
268
+ // The FlightInfo is not cancellable. The client should not retry the
269
+ // cancellation request.
270
+ kNotCancellable = 3,
271
+ };
272
+
273
+ /// \brief The result of the CancelFlightInfo action.
274
+ struct ARROW_FLIGHT_EXPORT CancelFlightInfoResult {
275
+ CancelStatus status;
276
+
277
+ std::string ToString() const;
278
+ bool Equals(const CancelFlightInfoResult& other) const;
279
+
280
+ friend bool operator==(const CancelFlightInfoResult& left,
281
+ const CancelFlightInfoResult& right) {
282
+ return left.Equals(right);
283
+ }
284
+ friend bool operator!=(const CancelFlightInfoResult& left,
285
+ const CancelFlightInfoResult& right) {
286
+ return !(left == right);
287
+ }
288
+
289
+ /// \brief Serialize this message to its wire-format representation.
290
+ arrow::Result<std::string> SerializeToString() const;
291
+
292
+ /// \brief Deserialize this message from its wire-format representation.
293
+ static arrow::Result<CancelFlightInfoResult> Deserialize(std::string_view serialized);
294
+ };
295
+
296
+ ARROW_FLIGHT_EXPORT
297
+ std::ostream& operator<<(std::ostream& os, CancelStatus status);
298
+
299
+ /// \brief message for simple auth
300
+ struct ARROW_FLIGHT_EXPORT BasicAuth {
301
+ std::string username;
302
+ std::string password;
303
+
304
+ std::string ToString() const;
305
+ bool Equals(const BasicAuth& other) const;
306
+
307
+ friend bool operator==(const BasicAuth& left, const BasicAuth& right) {
308
+ return left.Equals(right);
309
+ }
310
+ friend bool operator!=(const BasicAuth& left, const BasicAuth& right) {
311
+ return !(left == right);
312
+ }
313
+
314
+ /// \brief Deserialize this message from its wire-format representation.
315
+ static arrow::Result<BasicAuth> Deserialize(std::string_view serialized);
316
+ /// \brief Serialize this message to its wire-format representation.
317
+ arrow::Result<std::string> SerializeToString() const;
318
+ };
319
+
320
+ /// \brief A request to retrieve or generate a dataset
321
+ struct ARROW_FLIGHT_EXPORT FlightDescriptor {
322
+ enum DescriptorType {
323
+ UNKNOWN = 0, /// Unused
324
+ PATH = 1, /// Named path identifying a dataset
325
+ CMD = 2 /// Opaque command to generate a dataset
326
+ };
327
+
328
+ /// The descriptor type
329
+ DescriptorType type;
330
+
331
+ /// Opaque value used to express a command. Should only be defined when type
332
+ /// is CMD
333
+ std::string cmd;
334
+
335
+ /// List of strings identifying a particular dataset. Should only be defined
336
+ /// when type is PATH
337
+ std::vector<std::string> path;
338
+
339
+ bool Equals(const FlightDescriptor& other) const;
340
+
341
+ /// \brief Get a human-readable form of this descriptor.
342
+ std::string ToString() const;
343
+
344
+ /// \brief Get the wire-format representation of this type.
345
+ ///
346
+ /// Useful when interoperating with non-Flight systems (e.g. REST
347
+ /// services) that may want to return Flight types.
348
+ arrow::Result<std::string> SerializeToString() const;
349
+
350
+ /// \brief Parse the wire-format representation of this type.
351
+ ///
352
+ /// Useful when interoperating with non-Flight systems (e.g. REST
353
+ /// services) that may want to return Flight types.
354
+ static arrow::Result<FlightDescriptor> Deserialize(std::string_view serialized);
355
+
356
+ // Convenience factory functions
357
+
358
+ static FlightDescriptor Command(const std::string& c) {
359
+ return FlightDescriptor{CMD, c, {}};
360
+ }
361
+
362
+ static FlightDescriptor Path(const std::vector<std::string>& p) {
363
+ return FlightDescriptor{PATH, "", p};
364
+ }
365
+
366
+ friend bool operator==(const FlightDescriptor& left, const FlightDescriptor& right) {
367
+ return left.Equals(right);
368
+ }
369
+ friend bool operator!=(const FlightDescriptor& left, const FlightDescriptor& right) {
370
+ return !(left == right);
371
+ }
372
+ };
373
+
374
+ /// \brief Data structure providing an opaque identifier or credential to use
375
+ /// when requesting a data stream with the DoGet RPC
376
+ struct ARROW_FLIGHT_EXPORT Ticket {
377
+ std::string ticket;
378
+
379
+ std::string ToString() const;
380
+ bool Equals(const Ticket& other) const;
381
+
382
+ friend bool operator==(const Ticket& left, const Ticket& right) {
383
+ return left.Equals(right);
384
+ }
385
+ friend bool operator!=(const Ticket& left, const Ticket& right) {
386
+ return !(left == right);
387
+ }
388
+
389
+ /// \brief Get the wire-format representation of this type.
390
+ ///
391
+ /// Useful when interoperating with non-Flight systems (e.g. REST
392
+ /// services) that may want to return Flight types.
393
+ arrow::Result<std::string> SerializeToString() const;
394
+
395
+ /// \brief Parse the wire-format representation of this type.
396
+ ///
397
+ /// Useful when interoperating with non-Flight systems (e.g. REST
398
+ /// services) that may want to return Flight types.
399
+ static arrow::Result<Ticket> Deserialize(std::string_view serialized);
400
+ };
401
+
402
+ class FlightClient;
403
+ class FlightServerBase;
404
+
405
+ ARROW_FLIGHT_EXPORT
406
+ extern const char* kSchemeGrpc;
407
+ ARROW_FLIGHT_EXPORT
408
+ extern const char* kSchemeGrpcTcp;
409
+ ARROW_FLIGHT_EXPORT
410
+ extern const char* kSchemeGrpcUnix;
411
+ ARROW_FLIGHT_EXPORT
412
+ extern const char* kSchemeGrpcTls;
413
+
414
+ /// \brief A host location (a URI)
415
+ struct ARROW_FLIGHT_EXPORT Location {
416
+ public:
417
+ /// \brief Initialize a blank location.
418
+ Location();
419
+
420
+ /// \brief Initialize a location by parsing a URI string
421
+ static arrow::Result<Location> Parse(const std::string& uri_string);
422
+
423
+ /// \brief Initialize a location for a non-TLS, gRPC-based Flight
424
+ /// service from a host and port
425
+ /// \param[in] host The hostname to connect to
426
+ /// \param[in] port The port
427
+ /// \return Arrow result with the resulting location
428
+ static arrow::Result<Location> ForGrpcTcp(const std::string& host, const int port);
429
+
430
+ /// \brief Initialize a location for a TLS-enabled, gRPC-based Flight
431
+ /// service from a host and port
432
+ /// \param[in] host The hostname to connect to
433
+ /// \param[in] port The port
434
+ /// \return Arrow result with the resulting location
435
+ static arrow::Result<Location> ForGrpcTls(const std::string& host, const int port);
436
+
437
+ /// \brief Initialize a location for a domain socket-based Flight
438
+ /// service
439
+ /// \param[in] path The path to the domain socket
440
+ /// \return Arrow result with the resulting location
441
+ static arrow::Result<Location> ForGrpcUnix(const std::string& path);
442
+
443
+ /// \brief Initialize a location based on a URI scheme
444
+ static arrow::Result<Location> ForScheme(const std::string& scheme,
445
+ const std::string& host, const int port);
446
+
447
+ /// \brief Get a representation of this URI as a string.
448
+ std::string ToString() const;
449
+
450
+ /// \brief Get the scheme of this URI.
451
+ std::string scheme() const;
452
+
453
+ bool Equals(const Location& other) const;
454
+
455
+ friend bool operator==(const Location& left, const Location& right) {
456
+ return left.Equals(right);
457
+ }
458
+ friend bool operator!=(const Location& left, const Location& right) {
459
+ return !(left == right);
460
+ }
461
+
462
+ private:
463
+ friend class FlightClient;
464
+ friend class FlightServerBase;
465
+ std::shared_ptr<arrow::internal::Uri> uri_;
466
+ };
467
+
468
+ /// \brief A flight ticket and list of locations where the ticket can be
469
+ /// redeemed
470
+ struct ARROW_FLIGHT_EXPORT FlightEndpoint {
471
+ /// Opaque ticket identify; use with DoGet RPC
472
+ Ticket ticket;
473
+
474
+ /// List of locations where ticket can be redeemed. If the list is empty, the
475
+ /// ticket can only be redeemed on the current service where the ticket was
476
+ /// generated
477
+ std::vector<Location> locations;
478
+
479
+ /// Expiration time of this stream. If present, clients may assume
480
+ /// they can retry DoGet requests. Otherwise, clients should avoid
481
+ /// retrying DoGet requests.
482
+ std::optional<Timestamp> expiration_time;
483
+
484
+ /// Opaque Application-defined metadata
485
+ std::string app_metadata;
486
+
487
+ std::string ToString() const;
488
+ bool Equals(const FlightEndpoint& other) const;
489
+
490
+ friend bool operator==(const FlightEndpoint& left, const FlightEndpoint& right) {
491
+ return left.Equals(right);
492
+ }
493
+ friend bool operator!=(const FlightEndpoint& left, const FlightEndpoint& right) {
494
+ return !(left == right);
495
+ }
496
+
497
+ /// \brief Serialize this message to its wire-format representation.
498
+ arrow::Result<std::string> SerializeToString() const;
499
+
500
+ /// \brief Deserialize this message from its wire-format representation.
501
+ static arrow::Result<FlightEndpoint> Deserialize(std::string_view serialized);
502
+ };
503
+
504
+ /// \brief The request of the RenewFlightEndpoint action.
505
+ struct ARROW_FLIGHT_EXPORT RenewFlightEndpointRequest {
506
+ FlightEndpoint endpoint;
507
+
508
+ std::string ToString() const;
509
+ bool Equals(const RenewFlightEndpointRequest& other) const;
510
+
511
+ friend bool operator==(const RenewFlightEndpointRequest& left,
512
+ const RenewFlightEndpointRequest& right) {
513
+ return left.Equals(right);
514
+ }
515
+ friend bool operator!=(const RenewFlightEndpointRequest& left,
516
+ const RenewFlightEndpointRequest& right) {
517
+ return !(left == right);
518
+ }
519
+
520
+ /// \brief Serialize this message to its wire-format representation.
521
+ arrow::Result<std::string> SerializeToString() const;
522
+
523
+ /// \brief Deserialize this message from its wire-format representation.
524
+ static arrow::Result<RenewFlightEndpointRequest> Deserialize(
525
+ std::string_view serialized);
526
+ };
527
+
528
+ /// \brief Staging data structure for messages about to be put on the wire
529
+ ///
530
+ /// This structure corresponds to FlightData in the protocol.
531
+ struct ARROW_FLIGHT_EXPORT FlightPayload {
532
+ std::shared_ptr<Buffer> descriptor;
533
+ std::shared_ptr<Buffer> app_metadata;
534
+ ipc::IpcPayload ipc_message;
535
+
536
+ /// \brief Check that the payload can be written to the wire.
537
+ Status Validate() const;
538
+ };
539
+
540
+ /// \brief Schema result returned after a schema request RPC
541
+ struct ARROW_FLIGHT_EXPORT SchemaResult {
542
+ public:
543
+ SchemaResult() = default;
544
+ explicit SchemaResult(std::string schema) : raw_schema_(std::move(schema)) {}
545
+
546
+ /// \brief Factory method to construct a SchemaResult.
547
+ static arrow::Result<std::unique_ptr<SchemaResult>> Make(const Schema& schema);
548
+
549
+ /// \brief return schema
550
+ /// \param[in,out] dictionary_memo for dictionary bookkeeping, will
551
+ /// be modified
552
+ /// \return Arrow result with the reconstructed Schema
553
+ arrow::Result<std::shared_ptr<Schema>> GetSchema(
554
+ ipc::DictionaryMemo* dictionary_memo) const;
555
+
556
+ const std::string& serialized_schema() const { return raw_schema_; }
557
+
558
+ std::string ToString() const;
559
+ bool Equals(const SchemaResult& other) const;
560
+
561
+ friend bool operator==(const SchemaResult& left, const SchemaResult& right) {
562
+ return left.Equals(right);
563
+ }
564
+ friend bool operator!=(const SchemaResult& left, const SchemaResult& right) {
565
+ return !(left == right);
566
+ }
567
+
568
+ /// \brief Serialize this message to its wire-format representation.
569
+ arrow::Result<std::string> SerializeToString() const;
570
+
571
+ /// \brief Deserialize this message from its wire-format representation.
572
+ static arrow::Result<SchemaResult> Deserialize(std::string_view serialized);
573
+
574
+ private:
575
+ std::string raw_schema_;
576
+ };
577
+
578
+ /// \brief The access coordinates for retrieval of a dataset, returned by
579
+ /// GetFlightInfo
580
+ class ARROW_FLIGHT_EXPORT FlightInfo {
581
+ public:
582
+ struct Data {
583
+ std::string schema;
584
+ FlightDescriptor descriptor;
585
+ std::vector<FlightEndpoint> endpoints;
586
+ int64_t total_records = -1;
587
+ int64_t total_bytes = -1;
588
+ bool ordered = false;
589
+ std::string app_metadata;
590
+ };
591
+
592
+ explicit FlightInfo(Data data) : data_(std::move(data)), reconstructed_schema_(false) {}
593
+
594
+ /// \brief Factory method to construct a FlightInfo.
595
+ static arrow::Result<FlightInfo> Make(const Schema& schema,
596
+ const FlightDescriptor& descriptor,
597
+ const std::vector<FlightEndpoint>& endpoints,
598
+ int64_t total_records, int64_t total_bytes,
599
+ bool ordered = false,
600
+ std::string app_metadata = "");
601
+
602
+ /// \brief Deserialize the Arrow schema of the dataset. Populate any
603
+ /// dictionary encoded fields into a DictionaryMemo for
604
+ /// bookkeeping
605
+ /// \param[in,out] dictionary_memo for dictionary bookkeeping, will
606
+ /// be modified
607
+ /// \return Arrow result with the reconstructed Schema
608
+ arrow::Result<std::shared_ptr<Schema>> GetSchema(
609
+ ipc::DictionaryMemo* dictionary_memo) const;
610
+
611
+ const std::string& serialized_schema() const { return data_.schema; }
612
+
613
+ /// The descriptor associated with this flight, may not be set
614
+ const FlightDescriptor& descriptor() const { return data_.descriptor; }
615
+
616
+ /// A list of endpoints associated with the flight (dataset). To consume the
617
+ /// whole flight, all endpoints must be consumed
618
+ const std::vector<FlightEndpoint>& endpoints() const { return data_.endpoints; }
619
+
620
+ /// The total number of records (rows) in the dataset. If unknown, set to -1
621
+ int64_t total_records() const { return data_.total_records; }
622
+
623
+ /// The total number of bytes in the dataset. If unknown, set to -1
624
+ int64_t total_bytes() const { return data_.total_bytes; }
625
+
626
+ /// Whether endpoints are in the same order as the data.
627
+ bool ordered() const { return data_.ordered; }
628
+
629
+ /// Application-defined opaque metadata
630
+ const std::string& app_metadata() const { return data_.app_metadata; }
631
+
632
+ /// \brief Get the wire-format representation of this type.
633
+ ///
634
+ /// Useful when interoperating with non-Flight systems (e.g. REST
635
+ /// services) that may want to return Flight types.
636
+ arrow::Result<std::string> SerializeToString() const;
637
+
638
+ /// \brief Parse the wire-format representation of this type.
639
+ ///
640
+ /// Useful when interoperating with non-Flight systems (e.g. REST
641
+ /// services) that may want to return Flight types.
642
+ static arrow::Result<std::unique_ptr<FlightInfo>> Deserialize(
643
+ std::string_view serialized);
644
+
645
+ std::string ToString() const;
646
+
647
+ /// Compare two FlightInfo for equality. This will compare the
648
+ /// serialized schema representations, NOT the logical equality of
649
+ /// the schemas.
650
+ bool Equals(const FlightInfo& other) const;
651
+
652
+ friend bool operator==(const FlightInfo& left, const FlightInfo& right) {
653
+ return left.Equals(right);
654
+ }
655
+ friend bool operator!=(const FlightInfo& left, const FlightInfo& right) {
656
+ return !(left == right);
657
+ }
658
+
659
+ private:
660
+ Data data_;
661
+ mutable std::shared_ptr<Schema> schema_;
662
+ mutable bool reconstructed_schema_;
663
+ };
664
+
665
+ /// \brief The information to process a long-running query.
666
+ class ARROW_FLIGHT_EXPORT PollInfo {
667
+ public:
668
+ /// The currently available results so far.
669
+ std::unique_ptr<FlightInfo> info = NULLPTR;
670
+ /// The descriptor the client should use on the next try. If unset,
671
+ /// the query is complete.
672
+ std::optional<FlightDescriptor> descriptor = std::nullopt;
673
+ /// Query progress. Must be in [0.0, 1.0] but need not be
674
+ /// monotonic or nondecreasing. If unknown, do not set.
675
+ std::optional<double> progress = std::nullopt;
676
+ /// Expiration time for this request. After this passes, the server
677
+ /// might not accept the poll descriptor anymore (and the query may
678
+ /// be cancelled). This may be updated on a call to PollFlightInfo.
679
+ std::optional<Timestamp> expiration_time = std::nullopt;
680
+
681
+ PollInfo()
682
+ : info(NULLPTR),
683
+ descriptor(std::nullopt),
684
+ progress(std::nullopt),
685
+ expiration_time(std::nullopt) {}
686
+
687
+ explicit PollInfo(std::unique_ptr<FlightInfo> info,
688
+ std::optional<FlightDescriptor> descriptor,
689
+ std::optional<double> progress,
690
+ std::optional<Timestamp> expiration_time)
691
+ : info(std::move(info)),
692
+ descriptor(std::move(descriptor)),
693
+ progress(progress),
694
+ expiration_time(expiration_time) {}
695
+
696
+ // Must not be explicit; to declare one we must declare all ("rule of five")
697
+ PollInfo(const PollInfo& other) // NOLINT(runtime/explicit)
698
+ : info(other.info ? std::make_unique<FlightInfo>(*other.info) : NULLPTR),
699
+ descriptor(other.descriptor),
700
+ progress(other.progress),
701
+ expiration_time(other.expiration_time) {}
702
+ PollInfo(PollInfo&& other) noexcept = default; // NOLINT(runtime/explicit)
703
+ ~PollInfo() = default;
704
+ PollInfo& operator=(const PollInfo& other) {
705
+ info = other.info ? std::make_unique<FlightInfo>(*other.info) : NULLPTR;
706
+ descriptor = other.descriptor;
707
+ progress = other.progress;
708
+ expiration_time = other.expiration_time;
709
+ return *this;
710
+ }
711
+ PollInfo& operator=(PollInfo&& other) = default;
712
+
713
+ /// \brief Get the wire-format representation of this type.
714
+ ///
715
+ /// Useful when interoperating with non-Flight systems (e.g. REST
716
+ /// services) that may want to return Flight types.
717
+ arrow::Result<std::string> SerializeToString() const;
718
+
719
+ /// \brief Parse the wire-format representation of this type.
720
+ ///
721
+ /// Useful when interoperating with non-Flight systems (e.g. REST
722
+ /// services) that may want to return Flight types.
723
+ static arrow::Result<std::unique_ptr<PollInfo>> Deserialize(
724
+ std::string_view serialized);
725
+
726
+ std::string ToString() const;
727
+
728
+ /// Compare two PollInfo for equality. This will compare the
729
+ /// serialized schema representations, NOT the logical equality of
730
+ /// the schemas.
731
+ bool Equals(const PollInfo& other) const;
732
+
733
+ friend bool operator==(const PollInfo& left, const PollInfo& right) {
734
+ return left.Equals(right);
735
+ }
736
+ friend bool operator!=(const PollInfo& left, const PollInfo& right) {
737
+ return !(left == right);
738
+ }
739
+ };
740
+
741
+ /// \brief The request of the CancelFlightInfoRequest action.
742
+ struct ARROW_FLIGHT_EXPORT CancelFlightInfoRequest {
743
+ std::unique_ptr<FlightInfo> info;
744
+
745
+ std::string ToString() const;
746
+ bool Equals(const CancelFlightInfoRequest& other) const;
747
+
748
+ friend bool operator==(const CancelFlightInfoRequest& left,
749
+ const CancelFlightInfoRequest& right) {
750
+ return left.Equals(right);
751
+ }
752
+ friend bool operator!=(const CancelFlightInfoRequest& left,
753
+ const CancelFlightInfoRequest& right) {
754
+ return !(left == right);
755
+ }
756
+
757
+ /// \brief Serialize this message to its wire-format representation.
758
+ arrow::Result<std::string> SerializeToString() const;
759
+
760
+ /// \brief Deserialize this message from its wire-format representation.
761
+ static arrow::Result<CancelFlightInfoRequest> Deserialize(std::string_view serialized);
762
+ };
763
+
764
+ /// \brief An iterator to FlightInfo instances returned by ListFlights.
765
+ class ARROW_FLIGHT_EXPORT FlightListing {
766
+ public:
767
+ virtual ~FlightListing() = default;
768
+
769
+ /// \brief Retrieve the next FlightInfo from the iterator.
770
+ /// \return Arrow result with a single FlightInfo. Set to \a nullptr if there
771
+ /// are none left.
772
+ virtual arrow::Result<std::unique_ptr<FlightInfo>> Next() = 0;
773
+ };
774
+
775
+ /// \brief An iterator to Result instances returned by DoAction.
776
+ class ARROW_FLIGHT_EXPORT ResultStream {
777
+ public:
778
+ virtual ~ResultStream() = default;
779
+
780
+ /// \brief Retrieve the next Result from the iterator.
781
+ /// \return Arrow result with a single Result. Set to \a nullptr if there are none left.
782
+ virtual arrow::Result<std::unique_ptr<Result>> Next() = 0;
783
+
784
+ /// \brief Read and drop the remaining messages to get the error (if any) from a server.
785
+ /// \return Status OK if this is no error from a server, any other status if a
786
+ /// server returns an error.
787
+ Status Drain();
788
+ };
789
+
790
+ /// \brief A holder for a RecordBatch with associated Flight metadata.
791
+ struct ARROW_FLIGHT_EXPORT FlightStreamChunk {
792
+ public:
793
+ std::shared_ptr<RecordBatch> data;
794
+ std::shared_ptr<Buffer> app_metadata;
795
+ };
796
+
797
+ /// \brief An interface to read Flight data with metadata.
798
+ class ARROW_FLIGHT_EXPORT MetadataRecordBatchReader {
799
+ public:
800
+ virtual ~MetadataRecordBatchReader() = default;
801
+
802
+ /// \brief Get the schema for this stream.
803
+ virtual arrow::Result<std::shared_ptr<Schema>> GetSchema() = 0;
804
+
805
+ /// \brief Get the next message from Flight. If the stream is
806
+ /// finished, then the members of \a FlightStreamChunk will be
807
+ /// nullptr.
808
+ virtual arrow::Result<FlightStreamChunk> Next() = 0;
809
+
810
+ /// \brief Consume entire stream as a vector of record batches
811
+ virtual arrow::Result<std::vector<std::shared_ptr<RecordBatch>>> ToRecordBatches();
812
+
813
+ /// \brief Consume entire stream as a Table
814
+ virtual arrow::Result<std::shared_ptr<Table>> ToTable();
815
+ };
816
+
817
+ /// \brief Convert a MetadataRecordBatchReader to a regular RecordBatchReader.
818
+ ARROW_FLIGHT_EXPORT
819
+ arrow::Result<std::shared_ptr<RecordBatchReader>> MakeRecordBatchReader(
820
+ std::shared_ptr<MetadataRecordBatchReader> reader);
821
+
822
+ /// \brief An interface to write IPC payloads with metadata.
823
+ class ARROW_FLIGHT_EXPORT MetadataRecordBatchWriter : public ipc::RecordBatchWriter {
824
+ public:
825
+ virtual ~MetadataRecordBatchWriter() = default;
826
+ /// \brief Begin writing data with the given schema. Only used with \a DoExchange.
827
+ virtual Status Begin(const std::shared_ptr<Schema>& schema,
828
+ const ipc::IpcWriteOptions& options) = 0;
829
+ virtual Status Begin(const std::shared_ptr<Schema>& schema);
830
+ virtual Status WriteMetadata(std::shared_ptr<Buffer> app_metadata) = 0;
831
+ virtual Status WriteWithMetadata(const RecordBatch& batch,
832
+ std::shared_ptr<Buffer> app_metadata) = 0;
833
+ };
834
+
835
+ /// \brief A FlightListing implementation based on a vector of
836
+ /// FlightInfo objects.
837
+ ///
838
+ /// This can be iterated once, then it is consumed.
839
+ class ARROW_FLIGHT_EXPORT SimpleFlightListing : public FlightListing {
840
+ public:
841
+ explicit SimpleFlightListing(const std::vector<FlightInfo>& flights);
842
+ explicit SimpleFlightListing(std::vector<FlightInfo>&& flights);
843
+
844
+ arrow::Result<std::unique_ptr<FlightInfo>> Next() override;
845
+
846
+ private:
847
+ int position_;
848
+ std::vector<FlightInfo> flights_;
849
+ };
850
+
851
+ /// \brief A ResultStream implementation based on a vector of
852
+ /// Result objects.
853
+ ///
854
+ /// This can be iterated once, then it is consumed.
855
+ class ARROW_FLIGHT_EXPORT SimpleResultStream : public ResultStream {
856
+ public:
857
+ explicit SimpleResultStream(std::vector<Result>&& results);
858
+ arrow::Result<std::unique_ptr<Result>> Next() override;
859
+
860
+ private:
861
+ std::vector<Result> results_;
862
+ size_t position_;
863
+ };
864
+
865
+ /// \defgroup flight-error Error Handling
866
+ /// Types for handling errors from RPCs. Flight uses a set of status
867
+ /// codes standardized across Flight implementations, so these types
868
+ /// let applications work directly with those codes instead of having
869
+ /// to translate to and from Arrow Status.
870
+ /// @{
871
+
872
+ /// \brief Abstract status code for an RPC as per the Flight
873
+ /// specification.
874
+ enum class TransportStatusCode {
875
+ /// \brief No error.
876
+ kOk = 0,
877
+ /// \brief An unknown error occurred.
878
+ kUnknown = 1,
879
+ /// \brief An error occurred in the transport implementation, or an
880
+ /// error internal to the service implementation occurred.
881
+ kInternal = 2,
882
+ /// \brief An argument is invalid.
883
+ kInvalidArgument = 3,
884
+ /// \brief The request timed out.
885
+ kTimedOut = 4,
886
+ /// \brief An argument is not necessarily invalid, but references
887
+ /// some resource that does not exist. Prefer over
888
+ /// kInvalidArgument where applicable.
889
+ kNotFound = 5,
890
+ /// \brief The request attempted to create some resource that does
891
+ /// not exist.
892
+ kAlreadyExists = 6,
893
+ /// \brief The request was explicitly cancelled.
894
+ kCancelled = 7,
895
+ /// \brief The client is not authenticated.
896
+ kUnauthenticated = 8,
897
+ /// \brief The client is not authorized to perform this request.
898
+ kUnauthorized = 9,
899
+ /// \brief The request is not implemented
900
+ kUnimplemented = 10,
901
+ /// \brief There is a network connectivity error, or some resource
902
+ /// is otherwise unavailable. Most likely a temporary condition.
903
+ kUnavailable = 11,
904
+ };
905
+
906
+ /// \brief Convert a code to a string.
907
+ std::string ToString(TransportStatusCode code);
908
+
909
+ /// \brief An error from an RPC call, using Flight error codes directly
910
+ /// instead of trying to translate to Arrow Status.
911
+ ///
912
+ /// Currently, only attached to the Status passed to AsyncListener::OnFinish.
913
+ ///
914
+ /// This API is EXPERIMENTAL.
915
+ class ARROW_FLIGHT_EXPORT TransportStatusDetail : public StatusDetail {
916
+ public:
917
+ constexpr static const char* kTypeId = "flight::TransportStatusDetail";
918
+ explicit TransportStatusDetail(TransportStatusCode code, std::string message,
919
+ std::vector<std::pair<std::string, std::string>> details)
920
+ : code_(code), message_(std::move(message)), details_(std::move(details)) {}
921
+ const char* type_id() const override { return kTypeId; }
922
+ std::string ToString() const override;
923
+
924
+ static std::optional<std::reference_wrapper<const TransportStatusDetail>> Unwrap(
925
+ const Status& status);
926
+
927
+ TransportStatusCode code() const { return code_; }
928
+ std::string_view message() const { return message_; }
929
+ const std::vector<std::pair<std::string, std::string>>& details() const {
930
+ return details_;
931
+ }
932
+
933
+ private:
934
+ TransportStatusCode code_;
935
+ std::string message_;
936
+ std::vector<std::pair<std::string, std::string>> details_;
937
+ };
938
+
939
+ /// @}
940
+
941
+ } // namespace flight
942
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/types_async.h ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+
22
+ #include "arrow/flight/type_fwd.h"
23
+ #include "arrow/flight/types.h"
24
+ #include "arrow/ipc/options.h"
25
+ #include "arrow/type_fwd.h"
26
+
27
+ namespace arrow::flight {
28
+
29
+ /// \defgroup flight-async Async Flight Types
30
+ /// Common types used for asynchronous Flight APIs.
31
+ /// @{
32
+
33
+ /// \brief Non-templated state for an async RPC.
34
+ ///
35
+ /// This API is EXPERIMENTAL.
36
+ class ARROW_FLIGHT_EXPORT AsyncListenerBase {
37
+ public:
38
+ AsyncListenerBase();
39
+ virtual ~AsyncListenerBase();
40
+
41
+ /// \brief Request cancellation of the RPC.
42
+ ///
43
+ /// The RPC is not cancelled until AsyncListener::OnFinish is called.
44
+ void TryCancel();
45
+
46
+ private:
47
+ friend class arrow::flight::internal::ClientTransport;
48
+
49
+ /// Transport-specific state for this RPC. Transport
50
+ /// implementations may store and retrieve state here via
51
+ /// ClientTransport::SetAsyncRpc and ClientTransport::GetAsyncRpc.
52
+ std::unique_ptr<internal::AsyncRpc> rpc_state_;
53
+ };
54
+
55
+ /// \brief Callbacks for results from async RPCs.
56
+ ///
57
+ /// A single listener may not be used for multiple concurrent RPC
58
+ /// calls. The application MUST hold the listener alive until
59
+ /// OnFinish() is called and has finished.
60
+ ///
61
+ /// This API is EXPERIMENTAL.
62
+ template <typename T>
63
+ class ARROW_FLIGHT_EXPORT AsyncListener : public AsyncListenerBase {
64
+ public:
65
+ /// \brief Get the next server result.
66
+ ///
67
+ /// This will never be called concurrently with itself or OnFinish.
68
+ virtual void OnNext(T message) = 0;
69
+ /// \brief Get the final status.
70
+ ///
71
+ /// This will never be called concurrently with itself or OnNext. If the
72
+ /// error comes from the remote server, then a TransportStatusDetail will be
73
+ /// attached. Otherwise, the error is generated by the client-side
74
+ /// transport and will not have a TransportStatusDetail.
75
+ virtual void OnFinish(Status status) = 0;
76
+ };
77
+
78
+ /// @}
79
+
80
+ } // namespace arrow::flight
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/flight/visibility.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #if defined(_WIN32) || defined(__CYGWIN__)
21
+ #if defined(_MSC_VER)
22
+ #pragma warning(push)
23
+ #pragma warning(disable : 4251)
24
+ #else
25
+ #pragma GCC diagnostic ignored "-Wattributes"
26
+ #endif
27
+
28
+ #ifdef ARROW_FLIGHT_STATIC
29
+ #define ARROW_FLIGHT_EXPORT
30
+ #elif defined(ARROW_FLIGHT_EXPORTING)
31
+ #define ARROW_FLIGHT_EXPORT __declspec(dllexport)
32
+ #else
33
+ #define ARROW_FLIGHT_EXPORT __declspec(dllimport)
34
+ #endif
35
+
36
+ #define ARROW_FLIGHT_NO_EXPORT
37
+ #else // Not Windows
38
+ #ifndef ARROW_FLIGHT_EXPORT
39
+ #define ARROW_FLIGHT_EXPORT __attribute__((visibility("default")))
40
+ #endif
41
+ #ifndef ARROW_FLIGHT_NO_EXPORT
42
+ #define ARROW_FLIGHT_NO_EXPORT __attribute__((visibility("hidden")))
43
+ #endif
44
+ #endif // Non-Windows
45
+
46
+ #if defined(_MSC_VER)
47
+ #pragma warning(pop)
48
+ #endif
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/io/api.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/io/buffered.h"
21
+ #include "arrow/io/compressed.h"
22
+ #include "arrow/io/file.h"
23
+ #include "arrow/io/hdfs.h"
24
+ #include "arrow/io/interfaces.h"
25
+ #include "arrow/io/memory.h"
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/io/buffered.h ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Buffered stream implementations
19
+
20
+ #pragma once
21
+
22
+ #include <cstdint>
23
+ #include <memory>
24
+ #include <string_view>
25
+
26
+ #include "arrow/io/concurrency.h"
27
+ #include "arrow/io/interfaces.h"
28
+ #include "arrow/util/visibility.h"
29
+
30
+ namespace arrow {
31
+
32
+ class Buffer;
33
+ class MemoryPool;
34
+ class Status;
35
+
36
+ namespace io {
37
+
38
+ class ARROW_EXPORT BufferedOutputStream : public OutputStream {
39
+ public:
40
+ ~BufferedOutputStream() override;
41
+
42
+ /// \brief Create a buffered output stream wrapping the given output stream.
43
+ /// \param[in] buffer_size the size of the temporary write buffer
44
+ /// \param[in] pool a MemoryPool to use for allocations
45
+ /// \param[in] raw another OutputStream
46
+ /// \return the created BufferedOutputStream
47
+ static Result<std::shared_ptr<BufferedOutputStream>> Create(
48
+ int64_t buffer_size, MemoryPool* pool, std::shared_ptr<OutputStream> raw);
49
+
50
+ /// \brief Resize internal buffer
51
+ /// \param[in] new_buffer_size the new buffer size
52
+ /// \return Status
53
+ Status SetBufferSize(int64_t new_buffer_size);
54
+
55
+ /// \brief Return the current size of the internal buffer
56
+ int64_t buffer_size() const;
57
+
58
+ /// \brief Return the number of remaining bytes that have not been flushed to
59
+ /// the raw OutputStream
60
+ int64_t bytes_buffered() const;
61
+
62
+ /// \brief Flush any buffered writes and release the raw
63
+ /// OutputStream. Further operations on this object are invalid
64
+ /// \return the underlying OutputStream
65
+ Result<std::shared_ptr<OutputStream>> Detach();
66
+
67
+ // OutputStream interface
68
+
69
+ /// \brief Close the buffered output stream. This implicitly closes the
70
+ /// underlying raw output stream.
71
+ Status Close() override;
72
+ Status Abort() override;
73
+ bool closed() const override;
74
+
75
+ Result<int64_t> Tell() const override;
76
+ // Write bytes to the stream. Thread-safe
77
+ Status Write(const void* data, int64_t nbytes) override;
78
+ Status Write(const std::shared_ptr<Buffer>& data) override;
79
+
80
+ Status Flush() override;
81
+
82
+ /// \brief Return the underlying raw output stream.
83
+ std::shared_ptr<OutputStream> raw() const;
84
+
85
+ private:
86
+ explicit BufferedOutputStream(std::shared_ptr<OutputStream> raw, MemoryPool* pool);
87
+
88
+ class ARROW_NO_EXPORT Impl;
89
+ std::unique_ptr<Impl> impl_;
90
+ };
91
+
92
+ /// \class BufferedInputStream
93
+ /// \brief An InputStream that performs buffered reads from an unbuffered
94
+ /// InputStream, which can mitigate the overhead of many small reads in some
95
+ /// cases
96
+ class ARROW_EXPORT BufferedInputStream
97
+ : public internal::InputStreamConcurrencyWrapper<BufferedInputStream> {
98
+ public:
99
+ ~BufferedInputStream() override;
100
+
101
+ /// \brief Create a BufferedInputStream from a raw InputStream
102
+ /// \param[in] buffer_size the size of the temporary read buffer
103
+ /// \param[in] pool a MemoryPool to use for allocations
104
+ /// \param[in] raw a raw InputStream
105
+ /// \param[in] raw_read_bound a bound on the maximum number of bytes
106
+ /// to read from the raw input stream. The default -1 indicates that
107
+ /// it is unbounded
108
+ /// \return the created BufferedInputStream
109
+ static Result<std::shared_ptr<BufferedInputStream>> Create(
110
+ int64_t buffer_size, MemoryPool* pool, std::shared_ptr<InputStream> raw,
111
+ int64_t raw_read_bound = -1);
112
+
113
+ /// \brief Resize internal read buffer; calls to Read(...) will read at least
114
+ /// \param[in] new_buffer_size the new read buffer size
115
+ /// \return Status
116
+ Status SetBufferSize(int64_t new_buffer_size);
117
+
118
+ /// \brief Return the number of remaining bytes in the read buffer
119
+ int64_t bytes_buffered() const;
120
+
121
+ /// \brief Return the current size of the internal buffer
122
+ int64_t buffer_size() const;
123
+
124
+ /// \brief Release the raw InputStream. Any data buffered will be
125
+ /// discarded. Further operations on this object are invalid
126
+ /// \return raw the underlying InputStream
127
+ std::shared_ptr<InputStream> Detach();
128
+
129
+ /// \brief Return the unbuffered InputStream
130
+ std::shared_ptr<InputStream> raw() const;
131
+
132
+ // InputStream APIs
133
+
134
+ bool closed() const override;
135
+ Result<std::shared_ptr<const KeyValueMetadata>> ReadMetadata() override;
136
+ Future<std::shared_ptr<const KeyValueMetadata>> ReadMetadataAsync(
137
+ const IOContext& io_context) override;
138
+
139
+ private:
140
+ friend InputStreamConcurrencyWrapper<BufferedInputStream>;
141
+
142
+ explicit BufferedInputStream(std::shared_ptr<InputStream> raw, MemoryPool* pool,
143
+ int64_t raw_total_bytes_bound);
144
+
145
+ Status DoClose();
146
+ Status DoAbort() override;
147
+
148
+ /// \brief Returns the position of the buffered stream, though the position
149
+ /// of the unbuffered stream may be further advanced.
150
+ Result<int64_t> DoTell() const;
151
+
152
+ Result<int64_t> DoRead(int64_t nbytes, void* out);
153
+
154
+ /// \brief Read into buffer.
155
+ Result<std::shared_ptr<Buffer>> DoRead(int64_t nbytes);
156
+
157
+ /// \brief Return a zero-copy string view referencing buffered data,
158
+ /// but do not advance the position of the stream. Buffers data and
159
+ /// expands the buffer size if necessary
160
+ Result<std::string_view> DoPeek(int64_t nbytes) override;
161
+
162
+ class ARROW_NO_EXPORT Impl;
163
+ std::unique_ptr<Impl> impl_;
164
+ };
165
+
166
+ } // namespace io
167
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/io/caching.h ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <utility>
24
+ #include <vector>
25
+
26
+ #include "arrow/io/interfaces.h"
27
+ #include "arrow/util/type_fwd.h"
28
+ #include "arrow/util/visibility.h"
29
+
30
+ namespace arrow {
31
+ namespace io {
32
+
33
+ struct ARROW_EXPORT CacheOptions {
34
+ static constexpr double kDefaultIdealBandwidthUtilizationFrac = 0.9;
35
+ static constexpr int64_t kDefaultMaxIdealRequestSizeMib = 64;
36
+
37
+ /// \brief The maximum distance in bytes between two consecutive
38
+ /// ranges; beyond this value, ranges are not combined
39
+ int64_t hole_size_limit;
40
+ /// \brief The maximum size in bytes of a combined range; if
41
+ /// combining two consecutive ranges would produce a range of a
42
+ /// size greater than this, they are not combined
43
+ int64_t range_size_limit;
44
+ /// \brief A lazy cache does not perform any I/O until requested.
45
+ /// lazy = false: request all byte ranges when PreBuffer or WillNeed is called.
46
+ /// lazy = True, prefetch_limit = 0: request merged byte ranges only after the reader
47
+ /// needs them.
48
+ /// lazy = True, prefetch_limit = k: prefetch up to k merged byte ranges ahead of the
49
+ /// range that is currently being read.
50
+ bool lazy;
51
+ /// \brief The maximum number of ranges to be prefetched. This is only used
52
+ /// for lazy cache to asynchronously read some ranges after reading the target range.
53
+ int64_t prefetch_limit = 0;
54
+
55
+ bool operator==(const CacheOptions& other) const {
56
+ return hole_size_limit == other.hole_size_limit &&
57
+ range_size_limit == other.range_size_limit && lazy == other.lazy &&
58
+ prefetch_limit == other.prefetch_limit;
59
+ }
60
+
61
+ /// \brief Construct CacheOptions from network storage metrics (e.g. S3).
62
+ ///
63
+ /// \param[in] time_to_first_byte_millis Seek-time or Time-To-First-Byte (TTFB) in
64
+ /// milliseconds, also called call setup latency of a new read request.
65
+ /// The value is a positive integer.
66
+ /// \param[in] transfer_bandwidth_mib_per_sec Data transfer Bandwidth (BW) in MiB/sec
67
+ /// (per connection).
68
+ /// The value is a positive integer.
69
+ /// \param[in] ideal_bandwidth_utilization_frac Transfer bandwidth utilization fraction
70
+ /// (per connection) to maximize the net data load.
71
+ /// The value is a positive double precision number less than 1.
72
+ /// \param[in] max_ideal_request_size_mib The maximum single data request size (in MiB)
73
+ /// to maximize the net data load.
74
+ /// The value is a positive integer.
75
+ /// \return A new instance of CacheOptions.
76
+ static CacheOptions MakeFromNetworkMetrics(
77
+ int64_t time_to_first_byte_millis, int64_t transfer_bandwidth_mib_per_sec,
78
+ double ideal_bandwidth_utilization_frac = kDefaultIdealBandwidthUtilizationFrac,
79
+ int64_t max_ideal_request_size_mib = kDefaultMaxIdealRequestSizeMib);
80
+
81
+ static CacheOptions Defaults();
82
+ static CacheOptions LazyDefaults();
83
+ };
84
+
85
+ namespace internal {
86
+
87
+ /// \brief A read cache designed to hide IO latencies when reading.
88
+ ///
89
+ /// This class takes multiple byte ranges that an application expects to read, and
90
+ /// coalesces them into fewer, larger read requests, which benefits performance on some
91
+ /// filesystems, particularly remote ones like Amazon S3. By default, it also issues
92
+ /// these read requests in parallel up front.
93
+ ///
94
+ /// To use:
95
+ /// 1. Cache() the ranges you expect to read in the future. Ideally, these ranges have
96
+ /// the exact offset and length that will later be read. The cache will combine those
97
+ /// ranges according to parameters (see constructor).
98
+ ///
99
+ /// By default, the cache will also start fetching the combined ranges in parallel in
100
+ /// the background, unless CacheOptions.lazy is set.
101
+ ///
102
+ /// 2. Call WaitFor() to be notified when the given ranges have been read. If
103
+ /// CacheOptions.lazy is set, I/O will be triggered in the background here instead.
104
+ /// This can be done in parallel (e.g. if parsing a file, call WaitFor() for each
105
+ /// chunk of the file that can be parsed in parallel).
106
+ ///
107
+ /// 3. Call Read() to retrieve the actual data for the given ranges.
108
+ /// A synchronous application may skip WaitFor() and just call Read() - it will still
109
+ /// benefit from coalescing and parallel fetching.
110
+ class ARROW_EXPORT ReadRangeCache {
111
+ public:
112
+ static constexpr int64_t kDefaultHoleSizeLimit = 8192;
113
+ static constexpr int64_t kDefaultRangeSizeLimit = 32 * 1024 * 1024;
114
+
115
+ /// Construct a read cache with default
116
+ explicit ReadRangeCache(std::shared_ptr<RandomAccessFile> file, IOContext ctx)
117
+ : ReadRangeCache(file, file.get(), std::move(ctx), CacheOptions::Defaults()) {}
118
+
119
+ /// Construct a read cache with given options
120
+ explicit ReadRangeCache(std::shared_ptr<RandomAccessFile> file, IOContext ctx,
121
+ CacheOptions options)
122
+ : ReadRangeCache(file, file.get(), std::move(ctx), options) {}
123
+
124
+ /// Construct a read cache with an unowned file
125
+ ReadRangeCache(RandomAccessFile* file, IOContext ctx, CacheOptions options)
126
+ : ReadRangeCache(NULLPTR, file, std::move(ctx), options) {}
127
+
128
+ ~ReadRangeCache();
129
+
130
+ /// \brief Cache the given ranges in the background.
131
+ ///
132
+ /// The caller must ensure that the ranges do not overlap with each other,
133
+ /// nor with previously cached ranges. Otherwise, behaviour will be undefined.
134
+ Status Cache(std::vector<ReadRange> ranges);
135
+
136
+ /// \brief Read a range previously given to Cache().
137
+ Result<std::shared_ptr<Buffer>> Read(ReadRange range);
138
+
139
+ /// \brief Wait until all ranges added so far have been cached.
140
+ Future<> Wait();
141
+
142
+ /// \brief Wait until all given ranges have been cached.
143
+ Future<> WaitFor(std::vector<ReadRange> ranges);
144
+
145
+ protected:
146
+ struct Impl;
147
+ struct LazyImpl;
148
+
149
+ ReadRangeCache(std::shared_ptr<RandomAccessFile> owned_file, RandomAccessFile* file,
150
+ IOContext ctx, CacheOptions options);
151
+
152
+ std::unique_ptr<Impl> impl_;
153
+ };
154
+
155
+ } // namespace internal
156
+ } // namespace io
157
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/io/compressed.h ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Compressed stream implementations
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+ #include <string>
24
+
25
+ #include "arrow/io/concurrency.h"
26
+ #include "arrow/io/interfaces.h"
27
+ #include "arrow/util/visibility.h"
28
+
29
+ namespace arrow {
30
+
31
+ class MemoryPool;
32
+ class Status;
33
+
34
+ namespace util {
35
+
36
+ class Codec;
37
+
38
+ } // namespace util
39
+
40
+ namespace io {
41
+
42
+ class ARROW_EXPORT CompressedOutputStream : public OutputStream {
43
+ public:
44
+ ~CompressedOutputStream() override;
45
+
46
+ /// \brief Create a compressed output stream wrapping the given output stream.
47
+ static Result<std::shared_ptr<CompressedOutputStream>> Make(
48
+ util::Codec* codec, const std::shared_ptr<OutputStream>& raw,
49
+ MemoryPool* pool = default_memory_pool());
50
+
51
+ // OutputStream interface
52
+
53
+ /// \brief Close the compressed output stream. This implicitly closes the
54
+ /// underlying raw output stream.
55
+ Status Close() override;
56
+ Status Abort() override;
57
+ bool closed() const override;
58
+
59
+ Result<int64_t> Tell() const override;
60
+
61
+ Status Write(const void* data, int64_t nbytes) override;
62
+ /// \cond FALSE
63
+ using Writable::Write;
64
+ /// \endcond
65
+ Status Flush() override;
66
+
67
+ /// \brief Return the underlying raw output stream.
68
+ std::shared_ptr<OutputStream> raw() const;
69
+
70
+ private:
71
+ ARROW_DISALLOW_COPY_AND_ASSIGN(CompressedOutputStream);
72
+
73
+ CompressedOutputStream() = default;
74
+
75
+ class ARROW_NO_EXPORT Impl;
76
+ std::unique_ptr<Impl> impl_;
77
+ };
78
+
79
+ class ARROW_EXPORT CompressedInputStream
80
+ : public internal::InputStreamConcurrencyWrapper<CompressedInputStream> {
81
+ public:
82
+ ~CompressedInputStream() override;
83
+
84
+ /// \brief Create a compressed input stream wrapping the given input stream.
85
+ static Result<std::shared_ptr<CompressedInputStream>> Make(
86
+ util::Codec* codec, const std::shared_ptr<InputStream>& raw,
87
+ MemoryPool* pool = default_memory_pool());
88
+
89
+ // InputStream interface
90
+
91
+ bool closed() const override;
92
+ Result<std::shared_ptr<const KeyValueMetadata>> ReadMetadata() override;
93
+ Future<std::shared_ptr<const KeyValueMetadata>> ReadMetadataAsync(
94
+ const IOContext& io_context) override;
95
+
96
+ /// \brief Return the underlying raw input stream.
97
+ std::shared_ptr<InputStream> raw() const;
98
+
99
+ private:
100
+ friend InputStreamConcurrencyWrapper<CompressedInputStream>;
101
+ ARROW_DISALLOW_COPY_AND_ASSIGN(CompressedInputStream);
102
+
103
+ CompressedInputStream() = default;
104
+
105
+ /// \brief Close the compressed input stream. This implicitly closes the
106
+ /// underlying raw input stream.
107
+ Status DoClose();
108
+ Status DoAbort() override;
109
+ Result<int64_t> DoTell() const;
110
+ Result<int64_t> DoRead(int64_t nbytes, void* out);
111
+ Result<std::shared_ptr<Buffer>> DoRead(int64_t nbytes);
112
+
113
+ class ARROW_NO_EXPORT Impl;
114
+ std::unique_ptr<Impl> impl_;
115
+ };
116
+
117
+ } // namespace io
118
+ } // namespace arrow