applied-ai-018 commited on
Commit
7a1cef4
·
verified ·
1 Parent(s): 0b5e147

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step80/zero/13.attention.query_key_value.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step80/zero/13.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step80/zero/13.attention.query_key_value.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step80/zero/21.attention.dense.weight/exp_avg.pt +3 -0
  5. ckpts/universal/global_step80/zero/21.attention.dense.weight/exp_avg_sq.pt +3 -0
  6. ckpts/universal/global_step80/zero/23.attention.query_key_value.weight/exp_avg.pt +3 -0
  7. ckpts/universal/global_step80/zero/23.attention.query_key_value.weight/fp32.pt +3 -0
  8. ckpts/universal/global_step80/zero/4.attention.dense.weight/exp_avg_sq.pt +3 -0
  9. ckpts/universal/global_step80/zero/4.attention.dense.weight/fp32.pt +3 -0
  10. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/accumulation_queue.h +160 -0
  11. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/aggregate_node.h +57 -0
  12. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/api.h +32 -0
  13. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/asof_join_node.h +41 -0
  14. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/backpressure_handler.h +74 -0
  15. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/benchmark_util.h +48 -0
  16. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/bloom_filter.h +326 -0
  17. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/exec_plan.h +819 -0
  18. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join.h +75 -0
  19. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_dict.h +318 -0
  20. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_node.h +103 -0
  21. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/map_node.h +81 -0
  22. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/options.h +866 -0
  23. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/order_by_impl.h +56 -0
  24. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/partition_util.h +184 -0
  25. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/pch.h +23 -0
  26. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/query_context.h +157 -0
  27. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/schema_util.h +226 -0
  28. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/task_util.h +102 -0
  29. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/test_nodes.h +86 -0
  30. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/time_series_util.h +31 -0
  31. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/tpch_node.h +65 -0
  32. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/type_fwd.h +36 -0
  33. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/unmaterialized_table.h +271 -0
  34. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/util.h +184 -0
  35. venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/visibility.h +50 -0
  36. venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/abi.h +233 -0
  37. venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/bridge.h +348 -0
  38. venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack.h +51 -0
  39. venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack_abi.h +321 -0
  40. venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/helpers.h +129 -0
  41. venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/api.h +22 -0
  42. venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/chunker.h +36 -0
  43. venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_builder.h +78 -0
  44. venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_decoder.h +64 -0
  45. venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/converter.h +82 -0
  46. venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/invalid_row.h +55 -0
  47. venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/options.h +220 -0
  48. venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/parser.h +228 -0
  49. venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/reader.h +112 -0
  50. venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/test_common.h +55 -0
ckpts/universal/global_step80/zero/13.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21b20d8bf53ebaf291d4f162e3c52ec5cd294aca8d3f408d48275e53960045bd
3
+ size 50332828
ckpts/universal/global_step80/zero/13.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3dbb1526ef7931306b92c6108d54aa6221c9e2706bb5648c867ae7d7d5bf8bb7
3
+ size 50332843
ckpts/universal/global_step80/zero/13.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35e8e8be6d54a538a55f1231106a2dfbba51e4374c102bde521e92d66b5ea14b
3
+ size 50332749
ckpts/universal/global_step80/zero/21.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f71e4ab1cf852003e8e853eefb156a01c313e4ecba6cf1fe82388fc77450ac4
3
+ size 16778396
ckpts/universal/global_step80/zero/21.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:578071aa58364efdf2afdccaa11c160da4c4c7d9e089b7f13c264849682a622f
3
+ size 16778411
ckpts/universal/global_step80/zero/23.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74b45ef4e3b64f7bb6bb44718ab18ad0c1eb27f74fa04abab87633638b239eaf
3
+ size 50332828
ckpts/universal/global_step80/zero/23.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5030ba0dcc4d2e4a49125bd775f6f8c3126d3f5536469f64f26261fce4e19202
3
+ size 50332749
ckpts/universal/global_step80/zero/4.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7c9be51e695e2cd997713b5178064e2041e67a1db9421cea053286da6eb61c6
3
+ size 16778411
ckpts/universal/global_step80/zero/4.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c44b5e1a01068840c14ac9fd476260493f642101b03cbeba41de06f48303939f
3
+ size 16778317
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/accumulation_queue.h ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <functional>
22
+ #include <optional>
23
+ #include <vector>
24
+
25
+ #include "arrow/compute/exec.h"
26
+ #include "arrow/result.h"
27
+
28
+ namespace arrow {
29
+ namespace acero {
30
+ namespace util {
31
+
32
+ using arrow::compute::ExecBatch;
33
+
34
+ /// \brief A container that accumulates batches until they are ready to
35
+ /// be processed.
36
+ class AccumulationQueue {
37
+ public:
38
+ AccumulationQueue() : row_count_(0) {}
39
+ ~AccumulationQueue() = default;
40
+
41
+ // We should never be copying ExecBatch around
42
+ AccumulationQueue(const AccumulationQueue&) = delete;
43
+ AccumulationQueue& operator=(const AccumulationQueue&) = delete;
44
+
45
+ AccumulationQueue(AccumulationQueue&& that);
46
+ AccumulationQueue& operator=(AccumulationQueue&& that);
47
+
48
+ void Concatenate(AccumulationQueue&& that);
49
+ void InsertBatch(ExecBatch batch);
50
+ int64_t row_count() { return row_count_; }
51
+ size_t batch_count() { return batches_.size(); }
52
+ bool empty() const { return batches_.empty(); }
53
+ void Clear();
54
+ ExecBatch& operator[](size_t i);
55
+
56
+ private:
57
+ int64_t row_count_;
58
+ std::vector<ExecBatch> batches_;
59
+ };
60
+
61
+ /// A queue that sequences incoming batches
62
+ ///
63
+ /// This can be used when a node needs to do some kind of ordered processing on
64
+ /// the stream.
65
+ ///
66
+ /// Batches can be inserted in any order. The process_callback will be called on
67
+ /// the batches, in order, without reentrant calls. For this reason the callback
68
+ /// should be quick.
69
+ ///
70
+ /// For example, in a top-n node, the process callback should determine how many
71
+ /// rows need to be delivered for the given batch, and then return a task to actually
72
+ /// deliver those rows.
73
+ class SequencingQueue {
74
+ public:
75
+ using Task = std::function<Status()>;
76
+
77
+ /// Strategy that describes how to handle items
78
+ class Processor {
79
+ public:
80
+ /// Process the batch, potentially generating a task
81
+ ///
82
+ /// This method will be called on each batch in order. Calls to this method
83
+ /// will be serialized and it will not be called reentrantly. This makes it
84
+ /// safe to do things that rely on order but minimal time should be spent here
85
+ /// to avoid becoming a bottleneck.
86
+ ///
87
+ /// \return a follow-up task that will be scheduled. The follow-up task(s) are
88
+ /// is not guaranteed to run in any particular order. If nullopt is
89
+ /// returned then nothing will be scheduled.
90
+ virtual Result<std::optional<Task>> Process(ExecBatch batch) = 0;
91
+ /// Schedule a task
92
+ virtual void Schedule(Task task) = 0;
93
+ };
94
+
95
+ virtual ~SequencingQueue() = default;
96
+
97
+ /// Insert a batch into the queue
98
+ ///
99
+ /// This will insert the batch into the queue. If this batch was the next batch
100
+ /// to deliver then this will trigger 1+ calls to the process callback to generate
101
+ /// 1+ tasks.
102
+ ///
103
+ /// The task generated by this call will be executed immediately. The remaining
104
+ /// tasks will be scheduled using the schedule callback.
105
+ ///
106
+ /// From a data pipeline perspective the sequencing queue is a "sometimes" breaker. If
107
+ /// a task arrives in order then this call will usually execute the downstream pipeline.
108
+ /// If this task arrives early then this call will only queue the data.
109
+ virtual Status InsertBatch(ExecBatch batch) = 0;
110
+
111
+ /// Create a queue
112
+ /// \param processor describes how to process the batches, must outlive the queue
113
+ static std::unique_ptr<SequencingQueue> Make(Processor* processor);
114
+ };
115
+
116
+ /// A queue that sequences incoming batches
117
+ ///
118
+ /// Unlike SequencingQueue the Process method is not expected to schedule new tasks.
119
+ ///
120
+ /// If a batch arrives and another thread is currently processing then the batch
121
+ /// will be queued and control will return. In other words, delivery of batches will
122
+ /// not block on the Process method.
123
+ ///
124
+ /// It can be helpful to think of this as if a dedicated thread is running Process as
125
+ /// batches arrive
126
+ class SerialSequencingQueue {
127
+ public:
128
+ /// Strategy that describes how to handle items
129
+ class Processor {
130
+ public:
131
+ /// Process the batch
132
+ ///
133
+ /// This method will be called on each batch in order. Calls to this method
134
+ /// will be serialized and it will not be called reentrantly. This makes it
135
+ /// safe to do things that rely on order.
136
+ ///
137
+ /// If this falls behind then data may accumulate
138
+ ///
139
+ /// TODO: Could add backpressure if needed but right now all uses of this should
140
+ /// be pretty fast and so are unlikely to block.
141
+ virtual Status Process(ExecBatch batch) = 0;
142
+ };
143
+
144
+ virtual ~SerialSequencingQueue() = default;
145
+
146
+ /// Insert a batch into the queue
147
+ ///
148
+ /// This will insert the batch into the queue. If this batch was the next batch
149
+ /// to deliver then this may trigger calls to the processor which will be run
150
+ /// as part of this call.
151
+ virtual Status InsertBatch(ExecBatch batch) = 0;
152
+
153
+ /// Create a queue
154
+ /// \param processor describes how to process the batches, must outlive the queue
155
+ static std::unique_ptr<SerialSequencingQueue> Make(Processor* processor);
156
+ };
157
+
158
+ } // namespace util
159
+ } // namespace acero
160
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/aggregate_node.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+ #include <vector>
24
+
25
+ #include "arrow/acero/visibility.h"
26
+ #include "arrow/compute/api_aggregate.h"
27
+ #include "arrow/compute/type_fwd.h"
28
+ #include "arrow/result.h"
29
+ #include "arrow/type_fwd.h"
30
+
31
+ namespace arrow {
32
+ namespace acero {
33
+ namespace aggregate {
34
+
35
+ using compute::Aggregate;
36
+ using compute::default_exec_context;
37
+ using compute::ExecContext;
38
+
39
+ /// \brief Make the output schema of an aggregate node
40
+ ///
41
+ /// The output schema is determined by the aggregation kernels, which may depend on the
42
+ /// ExecContext argument. To guarantee correct results, the same ExecContext argument
43
+ /// should be used in execution.
44
+ ///
45
+ /// \param[in] input_schema the schema of the input to the node
46
+ /// \param[in] keys the grouping keys for the aggregation
47
+ /// \param[in] segment_keys the segmenting keys for the aggregation
48
+ /// \param[in] aggregates the aggregates for the aggregation
49
+ /// \param[in] exec_ctx the execution context for the aggregation
50
+ ARROW_ACERO_EXPORT Result<std::shared_ptr<Schema>> MakeOutputSchema(
51
+ const std::shared_ptr<Schema>& input_schema, const std::vector<FieldRef>& keys,
52
+ const std::vector<FieldRef>& segment_keys, const std::vector<Aggregate>& aggregates,
53
+ ExecContext* exec_ctx = default_exec_context());
54
+
55
+ } // namespace aggregate
56
+ } // namespace acero
57
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/api.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // NOTE: API is EXPERIMENTAL and will change without going through a
19
+ // deprecation cycle
20
+
21
+ #pragma once
22
+
23
+ /// \defgroup acero-api Utilities for creating and executing execution plans
24
+ /// @{
25
+ /// @}
26
+
27
+ /// \defgroup acero-nodes Options classes for the various exec nodes
28
+ /// @{
29
+ /// @}
30
+
31
+ #include "arrow/acero/exec_plan.h"
32
+ #include "arrow/acero/options.h"
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/asof_join_node.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include <vector>
19
+
20
+ #include "arrow/acero/options.h"
21
+ #include "arrow/acero/visibility.h"
22
+ #include "arrow/compute/exec.h"
23
+ #include "arrow/type.h"
24
+
25
+ namespace arrow {
26
+ namespace acero {
27
+ namespace asofjoin {
28
+
29
+ using AsofJoinKeys = AsofJoinNodeOptions::Keys;
30
+
31
+ /// \brief Make the output schema of an as-of-join node
32
+ ///
33
+ /// \param[in] input_schema the schema of each input to the node
34
+ /// \param[in] input_keys the key of each input to the node
35
+ ARROW_ACERO_EXPORT Result<std::shared_ptr<Schema>> MakeOutputSchema(
36
+ const std::vector<std::shared_ptr<Schema>>& input_schema,
37
+ const std::vector<AsofJoinKeys>& input_keys);
38
+
39
+ } // namespace asofjoin
40
+ } // namespace acero
41
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/backpressure_handler.h ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+ #include "arrow/acero/exec_plan.h"
20
+ #include "arrow/acero/options.h"
21
+
22
+ #include <memory>
23
+
24
+ namespace arrow::acero {
25
+
26
+ class BackpressureHandler {
27
+ private:
28
+ BackpressureHandler(ExecNode* input, size_t low_threshold, size_t high_threshold,
29
+ std::unique_ptr<BackpressureControl> backpressure_control)
30
+ : input_(input),
31
+ low_threshold_(low_threshold),
32
+ high_threshold_(high_threshold),
33
+ backpressure_control_(std::move(backpressure_control)) {}
34
+
35
+ public:
36
+ static Result<BackpressureHandler> Make(
37
+ ExecNode* input, size_t low_threshold, size_t high_threshold,
38
+ std::unique_ptr<BackpressureControl> backpressure_control) {
39
+ if (low_threshold >= high_threshold) {
40
+ return Status::Invalid("low threshold (", low_threshold,
41
+ ") must be less than high threshold (", high_threshold, ")");
42
+ }
43
+ if (backpressure_control == NULLPTR) {
44
+ return Status::Invalid("null backpressure control parameter");
45
+ }
46
+ BackpressureHandler backpressure_handler(input, low_threshold, high_threshold,
47
+ std::move(backpressure_control));
48
+ return std::move(backpressure_handler);
49
+ }
50
+
51
+ void Handle(size_t start_level, size_t end_level) {
52
+ if (start_level < high_threshold_ && end_level >= high_threshold_) {
53
+ backpressure_control_->Pause();
54
+ } else if (start_level > low_threshold_ && end_level <= low_threshold_) {
55
+ backpressure_control_->Resume();
56
+ }
57
+ }
58
+
59
+ Status ForceShutdown() {
60
+ // It may be unintuitive to call Resume() here, but this is to avoid a deadlock.
61
+ // Since acero's executor won't terminate if any one node is paused, we need to
62
+ // force resume the node before stopping production.
63
+ backpressure_control_->Resume();
64
+ return input_->StopProducing();
65
+ }
66
+
67
+ private:
68
+ ExecNode* input_;
69
+ size_t low_threshold_;
70
+ size_t high_threshold_;
71
+ std::unique_ptr<BackpressureControl> backpressure_control_;
72
+ };
73
+
74
+ } // namespace arrow::acero
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/benchmark_util.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <string>
22
+ #include <vector>
23
+
24
+ #include "benchmark/benchmark.h"
25
+
26
+ #include "arrow/acero/exec_plan.h"
27
+ #include "arrow/acero/test_util_internal.h"
28
+ #include "arrow/compute/exec.h"
29
+
30
+ namespace arrow {
31
+
32
+ namespace acero {
33
+
34
+ Status BenchmarkNodeOverhead(benchmark::State& state, int32_t num_batches,
35
+ int32_t batch_size, arrow::acero::BatchesWithSchema data,
36
+ std::vector<arrow::acero::Declaration>& node_declarations,
37
+ arrow::MemoryPool* pool = default_memory_pool());
38
+
39
+ Status BenchmarkIsolatedNodeOverhead(benchmark::State& state,
40
+ arrow::compute::Expression expr, int32_t num_batches,
41
+ int32_t batch_size,
42
+ arrow::acero::BatchesWithSchema data,
43
+ std::string factory_name,
44
+ arrow::acero::ExecNodeOptions& options,
45
+ arrow::MemoryPool* pool = default_memory_pool());
46
+
47
+ } // namespace acero
48
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/bloom_filter.h ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #if defined(ARROW_HAVE_RUNTIME_AVX2)
21
+ #include <immintrin.h>
22
+ #endif
23
+
24
+ #include <atomic>
25
+ #include <cstdint>
26
+ #include <memory>
27
+
28
+ #include "arrow/acero/partition_util.h"
29
+ #include "arrow/acero/util.h"
30
+ #include "arrow/memory_pool.h"
31
+ #include "arrow/result.h"
32
+ #include "arrow/status.h"
33
+
34
+ namespace arrow {
35
+ namespace acero {
36
+
37
+ // A set of pre-generated bit masks from a 64-bit word.
38
+ //
39
+ // It is used to map selected bits of hash to a bit mask that will be used in
40
+ // a Bloom filter.
41
+ //
42
+ // These bit masks need to look random and need to have a similar fractions of
43
+ // bits set in order for a Bloom filter to have a low false positives rate.
44
+ //
45
+ struct ARROW_ACERO_EXPORT BloomFilterMasks {
46
+ // Generate all masks as a single bit vector. Each bit offset in this bit
47
+ // vector corresponds to a single mask.
48
+ // In each consecutive kBitsPerMask bits, there must be between
49
+ // kMinBitsSet and kMaxBitsSet bits set.
50
+ //
51
+ BloomFilterMasks();
52
+
53
+ inline uint64_t mask(int bit_offset) {
54
+ #if ARROW_LITTLE_ENDIAN
55
+ return (arrow::util::SafeLoadAs<uint64_t>(masks_ + bit_offset / 8) >>
56
+ (bit_offset % 8)) &
57
+ kFullMask;
58
+ #else
59
+ return (BYTESWAP(arrow::util::SafeLoadAs<uint64_t>(masks_ + bit_offset / 8)) >>
60
+ (bit_offset % 8)) &
61
+ kFullMask;
62
+ #endif
63
+ }
64
+
65
+ // Masks are 57 bits long because then they can be accessed at an
66
+ // arbitrary bit offset using a single unaligned 64-bit load instruction.
67
+ //
68
+ static constexpr int kBitsPerMask = 57;
69
+ static constexpr uint64_t kFullMask = (1ULL << kBitsPerMask) - 1;
70
+
71
+ // Minimum and maximum number of bits set in each mask.
72
+ // This constraint is enforced when generating the bit masks.
73
+ // Values should be close to each other and chosen as to minimize a Bloom
74
+ // filter false positives rate.
75
+ //
76
+ static constexpr int kMinBitsSet = 4;
77
+ static constexpr int kMaxBitsSet = 5;
78
+
79
+ // Number of generated masks.
80
+ // Having more masks to choose will improve false positives rate of Bloom
81
+ // filter but will also use more memory, which may lead to more CPU cache
82
+ // misses.
83
+ // The chosen value results in using only a few cache-lines for mask lookups,
84
+ // while providing a good variety of available bit masks.
85
+ //
86
+ static constexpr int kLogNumMasks = 10;
87
+ static constexpr int kNumMasks = 1 << kLogNumMasks;
88
+
89
+ // Data of masks. Masks are stored in a single bit vector. Nth mask is
90
+ // kBitsPerMask bits starting at bit offset N.
91
+ //
92
+ static constexpr int kTotalBytes = (kNumMasks + 64) / 8;
93
+ uint8_t masks_[kTotalBytes];
94
+ };
95
+
96
+ // A variant of a blocked Bloom filter implementation.
97
+ // A Bloom filter is a data structure that provides approximate membership test
98
+ // functionality based only on the hash of the key. Membership test may return
99
+ // false positives but not false negatives. Approximation of the result allows
100
+ // in general case (for arbitrary data types of keys) to save on both memory and
101
+ // lookup cost compared to the accurate membership test.
102
+ // The accurate test may sometimes still be cheaper for a specific data types
103
+ // and inputs, e.g. integers from a small range.
104
+ //
105
+ // This blocked Bloom filter is optimized for use in hash joins, to achieve a
106
+ // good balance between the size of the filter, the cost of its building and
107
+ // querying and the rate of false positives.
108
+ //
109
+ class ARROW_ACERO_EXPORT BlockedBloomFilter {
110
+ friend class BloomFilterBuilder_SingleThreaded;
111
+ friend class BloomFilterBuilder_Parallel;
112
+
113
+ public:
114
+ BlockedBloomFilter() : log_num_blocks_(0), num_blocks_(0), blocks_(NULLPTR) {}
115
+
116
+ inline bool Find(uint64_t hash) const {
117
+ uint64_t m = mask(hash);
118
+ uint64_t b = blocks_[block_id(hash)];
119
+ return (b & m) == m;
120
+ }
121
+
122
+ // Uses SIMD if available for smaller Bloom filters.
123
+ // Uses memory prefetching for larger Bloom filters.
124
+ //
125
+ void Find(int64_t hardware_flags, int64_t num_rows, const uint32_t* hashes,
126
+ uint8_t* result_bit_vector, bool enable_prefetch = true) const;
127
+ void Find(int64_t hardware_flags, int64_t num_rows, const uint64_t* hashes,
128
+ uint8_t* result_bit_vector, bool enable_prefetch = true) const;
129
+
130
+ int log_num_blocks() const { return log_num_blocks_; }
131
+
132
+ int NumHashBitsUsed() const;
133
+
134
+ bool IsSameAs(const BlockedBloomFilter* other) const;
135
+
136
+ int64_t NumBitsSet() const;
137
+
138
+ // Folding of a block Bloom filter after the initial version
139
+ // has been built.
140
+ //
141
+ // One of the parameters for creation of Bloom filter is the number
142
+ // of bits allocated for it. The more bits allocated, the lower the
143
+ // probability of false positives. A good heuristic is to aim for
144
+ // half of the bits set in the constructed Bloom filter. This should
145
+ // result in a good trade off between size (and following cost of
146
+ // memory accesses) and false positives rate.
147
+ //
148
+ // There might have been many duplicate keys in the input provided
149
+ // to Bloom filter builder. In that case the resulting bit vector
150
+ // would be more sparse then originally intended. It is possible to
151
+ // easily correct that and cut in half the size of Bloom filter
152
+ // after it has already been constructed. The process to do that is
153
+ // approximately equal to OR-ing bits from upper and lower half (the
154
+ // way we address these bits when inserting or querying a hash makes
155
+ // such folding in half possible).
156
+ //
157
+ // We will keep folding as long as the fraction of bits set is less
158
+ // than 1/4. The resulting bit vector density should be in the [1/4,
159
+ // 1/2) range.
160
+ //
161
+ void Fold();
162
+
163
+ private:
164
+ Status CreateEmpty(int64_t num_rows_to_insert, MemoryPool* pool);
165
+
166
+ inline void Insert(uint64_t hash) {
167
+ uint64_t m = mask(hash);
168
+ uint64_t& b = blocks_[block_id(hash)];
169
+ b |= m;
170
+ }
171
+
172
+ void Insert(int64_t hardware_flags, int64_t num_rows, const uint32_t* hashes);
173
+ void Insert(int64_t hardware_flags, int64_t num_rows, const uint64_t* hashes);
174
+
175
+ inline uint64_t mask(uint64_t hash) const {
176
+ // The lowest bits of hash are used to pick mask index.
177
+ //
178
+ int mask_id = static_cast<int>(hash & (BloomFilterMasks::kNumMasks - 1));
179
+ uint64_t result = masks_.mask(mask_id);
180
+
181
+ // The next set of hash bits is used to pick the amount of bit
182
+ // rotation of the mask.
183
+ //
184
+ int rotation = (hash >> BloomFilterMasks::kLogNumMasks) & 63;
185
+ result = ROTL64(result, rotation);
186
+
187
+ return result;
188
+ }
189
+
190
+ inline int64_t block_id(uint64_t hash) const {
191
+ // The next set of hash bits following the bits used to select a
192
+ // mask is used to pick block id (index of 64-bit word in a bit
193
+ // vector).
194
+ //
195
+ return (hash >> (BloomFilterMasks::kLogNumMasks + 6)) & (num_blocks_ - 1);
196
+ }
197
+
198
+ template <typename T>
199
+ inline void InsertImp(int64_t num_rows, const T* hashes);
200
+
201
+ template <typename T>
202
+ inline void FindImp(int64_t num_rows, const T* hashes, uint8_t* result_bit_vector,
203
+ bool enable_prefetch) const;
204
+
205
+ void SingleFold(int num_folds);
206
+
207
+ #if defined(ARROW_HAVE_RUNTIME_AVX2)
208
+ inline __m256i mask_avx2(__m256i hash) const;
209
+ inline __m256i block_id_avx2(__m256i hash) const;
210
+ int64_t Insert_avx2(int64_t num_rows, const uint32_t* hashes);
211
+ int64_t Insert_avx2(int64_t num_rows, const uint64_t* hashes);
212
+ template <typename T>
213
+ int64_t InsertImp_avx2(int64_t num_rows, const T* hashes);
214
+ int64_t Find_avx2(int64_t num_rows, const uint32_t* hashes,
215
+ uint8_t* result_bit_vector) const;
216
+ int64_t Find_avx2(int64_t num_rows, const uint64_t* hashes,
217
+ uint8_t* result_bit_vector) const;
218
+ template <typename T>
219
+ int64_t FindImp_avx2(int64_t num_rows, const T* hashes,
220
+ uint8_t* result_bit_vector) const;
221
+ #endif
222
+
223
+ bool UsePrefetch() const {
224
+ return num_blocks_ * sizeof(uint64_t) > kPrefetchLimitBytes;
225
+ }
226
+
227
+ static constexpr int64_t kPrefetchLimitBytes = 256 * 1024;
228
+
229
+ static BloomFilterMasks masks_;
230
+
231
+ // Total number of bits used by block Bloom filter must be a power
232
+ // of 2.
233
+ //
234
+ int log_num_blocks_;
235
+ int64_t num_blocks_;
236
+
237
+ // Buffer allocated to store an array of power of 2 64-bit blocks.
238
+ //
239
+ std::shared_ptr<Buffer> buf_;
240
+ // Pointer to mutable data owned by Buffer
241
+ //
242
+ uint64_t* blocks_;
243
+ };
244
+
245
+ // We have two separate implementations of building a Bloom filter, multi-threaded and
246
+ // single-threaded.
247
+ //
248
+ // Single threaded version is useful in two ways:
249
+ // a) It allows to verify parallel implementation in tests (the single threaded one is
250
+ // simpler and can be used as the source of truth).
251
+ // b) It is preferred for small and medium size Bloom filters, because it skips extra
252
+ // synchronization related steps from parallel variant (partitioning and taking locks).
253
+ //
254
+ enum class BloomFilterBuildStrategy {
255
+ SINGLE_THREADED = 0,
256
+ PARALLEL = 1,
257
+ };
258
+
259
+ class ARROW_ACERO_EXPORT BloomFilterBuilder {
260
+ public:
261
+ virtual ~BloomFilterBuilder() = default;
262
+ virtual Status Begin(size_t num_threads, int64_t hardware_flags, MemoryPool* pool,
263
+ int64_t num_rows, int64_t num_batches,
264
+ BlockedBloomFilter* build_target) = 0;
265
+ virtual int64_t num_tasks() const { return 0; }
266
+ virtual Status PushNextBatch(size_t thread_index, int64_t num_rows,
267
+ const uint32_t* hashes) = 0;
268
+ virtual Status PushNextBatch(size_t thread_index, int64_t num_rows,
269
+ const uint64_t* hashes) = 0;
270
+ virtual void CleanUp() {}
271
+ static std::unique_ptr<BloomFilterBuilder> Make(BloomFilterBuildStrategy strategy);
272
+ };
273
+
274
+ class ARROW_ACERO_EXPORT BloomFilterBuilder_SingleThreaded : public BloomFilterBuilder {
275
+ public:
276
+ Status Begin(size_t num_threads, int64_t hardware_flags, MemoryPool* pool,
277
+ int64_t num_rows, int64_t num_batches,
278
+ BlockedBloomFilter* build_target) override;
279
+
280
+ Status PushNextBatch(size_t /*thread_index*/, int64_t num_rows,
281
+ const uint32_t* hashes) override;
282
+
283
+ Status PushNextBatch(size_t /*thread_index*/, int64_t num_rows,
284
+ const uint64_t* hashes) override;
285
+
286
+ private:
287
+ template <typename T>
288
+ void PushNextBatchImp(int64_t num_rows, const T* hashes);
289
+
290
+ int64_t hardware_flags_;
291
+ BlockedBloomFilter* build_target_;
292
+ };
293
+
294
+ class ARROW_ACERO_EXPORT BloomFilterBuilder_Parallel : public BloomFilterBuilder {
295
+ public:
296
+ Status Begin(size_t num_threads, int64_t hardware_flags, MemoryPool* pool,
297
+ int64_t num_rows, int64_t num_batches,
298
+ BlockedBloomFilter* build_target) override;
299
+
300
+ Status PushNextBatch(size_t thread_id, int64_t num_rows,
301
+ const uint32_t* hashes) override;
302
+
303
+ Status PushNextBatch(size_t thread_id, int64_t num_rows,
304
+ const uint64_t* hashes) override;
305
+
306
+ void CleanUp() override;
307
+
308
+ private:
309
+ template <typename T>
310
+ void PushNextBatchImp(size_t thread_id, int64_t num_rows, const T* hashes);
311
+
312
+ int64_t hardware_flags_;
313
+ BlockedBloomFilter* build_target_;
314
+ int log_num_prtns_;
315
+ struct ThreadLocalState {
316
+ std::vector<uint32_t> partitioned_hashes_32;
317
+ std::vector<uint64_t> partitioned_hashes_64;
318
+ std::vector<uint16_t> partition_ranges;
319
+ std::vector<int> unprocessed_partition_ids;
320
+ };
321
+ std::vector<ThreadLocalState> thread_local_states_;
322
+ PartitionLocks prtn_locks_;
323
+ };
324
+
325
+ } // namespace acero
326
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/exec_plan.h ADDED
@@ -0,0 +1,819 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstddef>
21
+ #include <cstdint>
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <optional>
25
+ #include <string>
26
+ #include <utility>
27
+ #include <vector>
28
+
29
+ #include "arrow/acero/type_fwd.h"
30
+ #include "arrow/acero/visibility.h"
31
+ #include "arrow/compute/api_vector.h"
32
+ #include "arrow/compute/exec.h"
33
+ #include "arrow/compute/ordering.h"
34
+ #include "arrow/type_fwd.h"
35
+ #include "arrow/util/future.h"
36
+ #include "arrow/util/macros.h"
37
+ #include "arrow/util/tracing.h"
38
+ #include "arrow/util/type_fwd.h"
39
+
40
+ namespace arrow {
41
+
42
+ using compute::ExecBatch;
43
+ using compute::ExecContext;
44
+ using compute::FunctionRegistry;
45
+ using compute::GetFunctionRegistry;
46
+ using compute::Ordering;
47
+ using compute::threaded_exec_context;
48
+
49
+ namespace acero {
50
+
51
+ /// \addtogroup acero-internals
52
+ /// @{
53
+
54
+ class ARROW_ACERO_EXPORT ExecPlan : public std::enable_shared_from_this<ExecPlan> {
55
+ public:
56
+ // This allows operators to rely on signed 16-bit indices
57
+ static const uint32_t kMaxBatchSize = 1 << 15;
58
+ using NodeVector = std::vector<ExecNode*>;
59
+
60
+ virtual ~ExecPlan() = default;
61
+
62
+ QueryContext* query_context();
63
+
64
+ /// \brief retrieve the nodes in the plan
65
+ const NodeVector& nodes() const;
66
+
67
+ /// Make an empty exec plan
68
+ static Result<std::shared_ptr<ExecPlan>> Make(
69
+ QueryOptions options, ExecContext exec_context = *threaded_exec_context(),
70
+ std::shared_ptr<const KeyValueMetadata> metadata = NULLPTR);
71
+
72
+ static Result<std::shared_ptr<ExecPlan>> Make(
73
+ ExecContext exec_context = *threaded_exec_context(),
74
+ std::shared_ptr<const KeyValueMetadata> metadata = NULLPTR);
75
+
76
+ static Result<std::shared_ptr<ExecPlan>> Make(
77
+ QueryOptions options, ExecContext* exec_context,
78
+ std::shared_ptr<const KeyValueMetadata> metadata = NULLPTR);
79
+
80
+ static Result<std::shared_ptr<ExecPlan>> Make(
81
+ ExecContext* exec_context,
82
+ std::shared_ptr<const KeyValueMetadata> metadata = NULLPTR);
83
+
84
+ ExecNode* AddNode(std::unique_ptr<ExecNode> node);
85
+
86
+ template <typename Node, typename... Args>
87
+ Node* EmplaceNode(Args&&... args) {
88
+ std::unique_ptr<Node> node{new Node{std::forward<Args>(args)...}};
89
+ auto out = node.get();
90
+ AddNode(std::move(node));
91
+ return out;
92
+ }
93
+
94
+ Status Validate();
95
+
96
+ /// \brief Start producing on all nodes
97
+ ///
98
+ /// Nodes are started in reverse topological order, such that any node
99
+ /// is started before all of its inputs.
100
+ void StartProducing();
101
+
102
+ /// \brief Stop producing on all nodes
103
+ ///
104
+ /// Triggers all sources to stop producing new data. In order to cleanly stop the plan
105
+ /// will continue to run any tasks that are already in progress. The caller should
106
+ /// still wait for `finished` to complete before destroying the plan.
107
+ void StopProducing();
108
+
109
+ /// \brief A future which will be marked finished when all tasks have finished.
110
+ Future<> finished();
111
+
112
+ /// \brief Return whether the plan has non-empty metadata
113
+ bool HasMetadata() const;
114
+
115
+ /// \brief Return the plan's attached metadata
116
+ std::shared_ptr<const KeyValueMetadata> metadata() const;
117
+
118
+ std::string ToString() const;
119
+ };
120
+
121
+ // Acero can be extended by providing custom implementations of ExecNode. The methods
122
+ // below are documented in detail and provide careful instruction on how to fulfill the
123
+ // ExecNode contract. It's suggested you familiarize yourself with the Acero
124
+ // documentation in the C++ user guide.
125
+ class ARROW_ACERO_EXPORT ExecNode {
126
+ public:
127
+ using NodeVector = std::vector<ExecNode*>;
128
+
129
+ virtual ~ExecNode() = default;
130
+
131
+ virtual const char* kind_name() const = 0;
132
+
133
+ // The number of inputs expected by this node
134
+ int num_inputs() const { return static_cast<int>(inputs_.size()); }
135
+
136
+ /// This node's predecessors in the exec plan
137
+ const NodeVector& inputs() const { return inputs_; }
138
+
139
+ /// True if the plan has no output schema (is a sink)
140
+ bool is_sink() const { return !output_schema_; }
141
+
142
+ /// \brief Labels identifying the function of each input.
143
+ const std::vector<std::string>& input_labels() const { return input_labels_; }
144
+
145
+ /// This node's successor in the exec plan
146
+ const ExecNode* output() const { return output_; }
147
+
148
+ /// The datatypes for batches produced by this node
149
+ const std::shared_ptr<Schema>& output_schema() const { return output_schema_; }
150
+
151
+ /// This node's exec plan
152
+ ExecPlan* plan() { return plan_; }
153
+
154
+ /// \brief An optional label, for display and debugging
155
+ ///
156
+ /// There is no guarantee that this value is non-empty or unique.
157
+ const std::string& label() const { return label_; }
158
+ void SetLabel(std::string label) { label_ = std::move(label); }
159
+
160
+ virtual Status Validate() const;
161
+
162
+ /// \brief the ordering of the output batches
163
+ ///
164
+ /// This does not guarantee the batches will be emitted by this node
165
+ /// in order. Instead it guarantees that the batches will have their
166
+ /// ExecBatch::index property set in a way that respects this ordering.
167
+ ///
168
+ /// In other words, given the ordering {{"x", SortOrder::Ascending}} we
169
+ /// know that all values of x in a batch with index N will be less than
170
+ /// or equal to all values of x in a batch with index N+k (assuming k > 0).
171
+ /// Furthermore, we also know that values will be sorted within a batch.
172
+ /// Any row N will have a value of x that is less than the value for
173
+ /// any row N+k.
174
+ ///
175
+ /// Note that an ordering can be both Ordering::Unordered and Ordering::Implicit.
176
+ /// A node's output should be marked Ordering::Unordered if the order is
177
+ /// non-deterministic. For example, a hash-join has no predictable output order.
178
+ ///
179
+ /// If the ordering is Ordering::Implicit then there is a meaningful order but that
180
+ /// ordering is not represented by any column in the data. The most common case for
181
+ /// this is when reading data from an in-memory table. The data has an implicit "row
182
+ /// order" which is not necessarily represented in the data set.
183
+ ///
184
+ /// A filter or project node will not modify the ordering. Nothing needs to be done
185
+ /// other than ensure the index assigned to output batches is the same as the
186
+ /// input batch that was mapped.
187
+ ///
188
+ /// Other nodes may introduce order. For example, an order-by node will emit
189
+ /// a brand new ordering independent of the input ordering.
190
+ ///
191
+ /// Finally, as described above, such as a hash-join or aggregation may may
192
+ /// destroy ordering (although these nodes could also choose to establish a
193
+ /// new ordering based on the hash keys).
194
+ ///
195
+ /// Some nodes will require an ordering. For example, a fetch node or an
196
+ /// asof join node will only function if the input data is ordered (for fetch
197
+ /// it is enough to be implicitly ordered. For an asof join the ordering must
198
+ /// be explicit and compatible with the on key.)
199
+ ///
200
+ /// Nodes that maintain ordering should be careful to avoid introducing gaps
201
+ /// in the batch index. This may require emitting empty batches in order to
202
+ /// maintain continuity.
203
+ virtual const Ordering& ordering() const;
204
+
205
+ /// Upstream API:
206
+ /// These functions are called by input nodes that want to inform this node
207
+ /// about an updated condition (a new input batch or an impending
208
+ /// end of stream).
209
+ ///
210
+ /// Implementation rules:
211
+ /// - these may be called anytime after StartProducing() has succeeded
212
+ /// (and even during or after StopProducing())
213
+ /// - these may be called concurrently
214
+ /// - these are allowed to call back into PauseProducing(), ResumeProducing()
215
+ /// and StopProducing()
216
+
217
+ /// Transfer input batch to ExecNode
218
+ ///
219
+ /// A node will typically perform some kind of operation on the batch
220
+ /// and then call InputReceived on its outputs with the result.
221
+ ///
222
+ /// Other nodes may need to accumulate some number of inputs before any
223
+ /// output can be produced. These nodes will add the batch to some kind
224
+ /// of in-memory accumulation queue and return.
225
+ virtual Status InputReceived(ExecNode* input, ExecBatch batch) = 0;
226
+
227
+ /// Mark the inputs finished after the given number of batches.
228
+ ///
229
+ /// This may be called before all inputs are received. This simply fixes
230
+ /// the total number of incoming batches for an input, so that the ExecNode
231
+ /// knows when it has received all input, regardless of order.
232
+ virtual Status InputFinished(ExecNode* input, int total_batches) = 0;
233
+
234
+ /// \brief Perform any needed initialization
235
+ ///
236
+ /// This hook performs any actions in between creation of ExecPlan and the call to
237
+ /// StartProducing. An example could be Bloom filter pushdown. The order of ExecNodes
238
+ /// that executes this method is undefined, but the calls are made synchronously.
239
+ ///
240
+ /// At this point a node can rely on all inputs & outputs (and the input schemas)
241
+ /// being well defined.
242
+ virtual Status Init();
243
+
244
+ /// Lifecycle API:
245
+ /// - start / stop to initiate and terminate production
246
+ /// - pause / resume to apply backpressure
247
+ ///
248
+ /// Implementation rules:
249
+ /// - StartProducing() should not recurse into the inputs, as it is
250
+ /// handled by ExecPlan::StartProducing()
251
+ /// - PauseProducing(), ResumeProducing(), StopProducing() may be called
252
+ /// concurrently, potentially even before the call to StartProducing
253
+ /// has finished.
254
+ /// - PauseProducing(), ResumeProducing(), StopProducing() may be called
255
+ /// by the downstream nodes' InputReceived(), InputFinished() methods
256
+ ///
257
+ /// StopProducing may be called due to an error, by the user (e.g. cancel), or
258
+ /// because a node has all the data it needs (e.g. limit, top-k on sorted data).
259
+ /// This means the method may be called multiple times and we have the following
260
+ /// additional rules
261
+ /// - StopProducing() must be idempotent
262
+ /// - StopProducing() must be forwarded to inputs (this is needed for the limit/top-k
263
+ /// case because we may not be stopping the entire plan)
264
+
265
+ // Right now, since synchronous calls happen in both directions (input to
266
+ // output and then output to input), a node must be careful to be reentrant
267
+ // against synchronous calls from its output, *and* also concurrent calls from
268
+ // other threads. The most reliable solution is to update the internal state
269
+ // first, and notify outputs only at the end.
270
+ //
271
+ // Concurrent calls to PauseProducing and ResumeProducing can be hard to sequence
272
+ // as they may travel at different speeds through the plan.
273
+ //
274
+ // For example, consider a resume that comes quickly after a pause. If the source
275
+ // receives the resume before the pause the source may think the destination is full
276
+ // and halt production which would lead to deadlock.
277
+ //
278
+ // To resolve this a counter is sent for all calls to pause/resume. Only the call with
279
+ // the highest counter value is valid. So if a call to PauseProducing(5) comes after
280
+ // a call to ResumeProducing(6) then the source should continue producing.
281
+
282
+ /// \brief Start producing
283
+ ///
284
+ /// This must only be called once.
285
+ ///
286
+ /// This is typically called automatically by ExecPlan::StartProducing().
287
+ virtual Status StartProducing() = 0;
288
+
289
+ /// \brief Pause producing temporarily
290
+ ///
291
+ /// \param output Pointer to the output that is full
292
+ /// \param counter Counter used to sequence calls to pause/resume
293
+ ///
294
+ /// This call is a hint that an output node is currently not willing
295
+ /// to receive data.
296
+ ///
297
+ /// This may be called any number of times.
298
+ /// However, the node is still free to produce data (which may be difficult
299
+ /// to prevent anyway if data is produced using multiple threads).
300
+ virtual void PauseProducing(ExecNode* output, int32_t counter) = 0;
301
+
302
+ /// \brief Resume producing after a temporary pause
303
+ ///
304
+ /// \param output Pointer to the output that is now free
305
+ /// \param counter Counter used to sequence calls to pause/resume
306
+ ///
307
+ /// This call is a hint that an output node is willing to receive data again.
308
+ ///
309
+ /// This may be called any number of times.
310
+ virtual void ResumeProducing(ExecNode* output, int32_t counter) = 0;
311
+
312
+ /// \brief Stop producing new data
313
+ ///
314
+ /// If this node is a source then the source should stop generating data
315
+ /// as quickly as possible. If this node is not a source then there is typically
316
+ /// nothing that needs to be done although a node may choose to start ignoring incoming
317
+ /// data.
318
+ ///
319
+ /// This method will be called when an error occurs in the plan
320
+ /// This method may also be called by the user if they wish to end a plan early
321
+ /// Finally, this method may be called if a node determines it no longer needs any more
322
+ /// input (for example, a limit node).
323
+ ///
324
+ /// This method may be called multiple times.
325
+ ///
326
+ /// This is not a pause. There will be no way to start the source again after this has
327
+ /// been called.
328
+ virtual Status StopProducing();
329
+
330
+ std::string ToString(int indent = 0) const;
331
+
332
+ protected:
333
+ ExecNode(ExecPlan* plan, NodeVector inputs, std::vector<std::string> input_labels,
334
+ std::shared_ptr<Schema> output_schema);
335
+
336
+ virtual Status StopProducingImpl() = 0;
337
+
338
+ /// Provide extra info to include in the string representation.
339
+ virtual std::string ToStringExtra(int indent = 0) const;
340
+
341
+ std::atomic<bool> stopped_;
342
+ ExecPlan* plan_;
343
+ std::string label_;
344
+
345
+ NodeVector inputs_;
346
+ std::vector<std::string> input_labels_;
347
+
348
+ std::shared_ptr<Schema> output_schema_;
349
+ ExecNode* output_ = NULLPTR;
350
+ };
351
+
352
+ /// \brief An extensible registry for factories of ExecNodes
353
+ class ARROW_ACERO_EXPORT ExecFactoryRegistry {
354
+ public:
355
+ using Factory = std::function<Result<ExecNode*>(ExecPlan*, std::vector<ExecNode*>,
356
+ const ExecNodeOptions&)>;
357
+
358
+ virtual ~ExecFactoryRegistry() = default;
359
+
360
+ /// \brief Get the named factory from this registry
361
+ ///
362
+ /// will raise if factory_name is not found
363
+ virtual Result<Factory> GetFactory(const std::string& factory_name) = 0;
364
+
365
+ /// \brief Add a factory to this registry with the provided name
366
+ ///
367
+ /// will raise if factory_name is already in the registry
368
+ virtual Status AddFactory(std::string factory_name, Factory factory) = 0;
369
+ };
370
+
371
+ /// The default registry, which includes built-in factories.
372
+ ARROW_ACERO_EXPORT
373
+ ExecFactoryRegistry* default_exec_factory_registry();
374
+
375
+ /// \brief Construct an ExecNode using the named factory
376
+ inline Result<ExecNode*> MakeExecNode(
377
+ const std::string& factory_name, ExecPlan* plan, std::vector<ExecNode*> inputs,
378
+ const ExecNodeOptions& options,
379
+ ExecFactoryRegistry* registry = default_exec_factory_registry()) {
380
+ ARROW_ASSIGN_OR_RAISE(auto factory, registry->GetFactory(factory_name));
381
+ return factory(plan, std::move(inputs), options);
382
+ }
383
+
384
+ /// @}
385
+
386
+ /// \addtogroup acero-api
387
+ /// @{
388
+
389
+ /// \brief Helper class for declaring execution nodes
390
+ ///
391
+ /// A Declaration represents an unconstructed ExecNode (and potentially an entire graph
392
+ /// since its inputs may also be Declarations)
393
+ ///
394
+ /// A Declaration can be converted to a plan and executed using one of the
395
+ /// DeclarationToXyz methods.
396
+ ///
397
+ /// For more direct control, a Declaration can be added to an existing execution
398
+ /// plan with Declaration::AddToPlan, which will recursively construct any inputs as
399
+ /// necessary.
400
+ struct ARROW_ACERO_EXPORT Declaration {
401
+ using Input = std::variant<ExecNode*, Declaration>;
402
+
403
+ Declaration() {}
404
+
405
+ /// \brief construct a declaration
406
+ /// \param factory_name the name of the exec node to construct. The node must have
407
+ /// been added to the exec node registry with this name.
408
+ /// \param inputs the inputs to the node, these should be other declarations
409
+ /// \param options options that control the behavior of the node. You must use
410
+ /// the appropriate subclass. For example, if `factory_name` is
411
+ /// "project" then `options` should be ProjectNodeOptions.
412
+ /// \param label a label to give the node. Can be used to distinguish it from other
413
+ /// nodes of the same type in the plan.
414
+ Declaration(std::string factory_name, std::vector<Input> inputs,
415
+ std::shared_ptr<ExecNodeOptions> options, std::string label)
416
+ : factory_name{std::move(factory_name)},
417
+ inputs{std::move(inputs)},
418
+ options{std::move(options)},
419
+ label{std::move(label)} {}
420
+
421
+ template <typename Options>
422
+ Declaration(std::string factory_name, std::vector<Input> inputs, Options options,
423
+ std::string label)
424
+ : Declaration{std::move(factory_name), std::move(inputs),
425
+ std::shared_ptr<ExecNodeOptions>(
426
+ std::make_shared<Options>(std::move(options))),
427
+ std::move(label)} {}
428
+
429
+ template <typename Options>
430
+ Declaration(std::string factory_name, std::vector<Input> inputs, Options options)
431
+ : Declaration{std::move(factory_name), std::move(inputs), std::move(options),
432
+ /*label=*/""} {}
433
+
434
+ template <typename Options>
435
+ Declaration(std::string factory_name, Options options)
436
+ : Declaration{std::move(factory_name), {}, std::move(options), /*label=*/""} {}
437
+
438
+ template <typename Options>
439
+ Declaration(std::string factory_name, Options options, std::string label)
440
+ : Declaration{std::move(factory_name), {}, std::move(options), std::move(label)} {}
441
+
442
+ /// \brief Convenience factory for the common case of a simple sequence of nodes.
443
+ ///
444
+ /// Each of decls will be appended to the inputs of the subsequent declaration,
445
+ /// and the final modified declaration will be returned.
446
+ ///
447
+ /// Without this convenience factory, constructing a sequence would require explicit,
448
+ /// difficult-to-read nesting:
449
+ ///
450
+ /// Declaration{"n3",
451
+ /// {
452
+ /// Declaration{"n2",
453
+ /// {
454
+ /// Declaration{"n1",
455
+ /// {
456
+ /// Declaration{"n0", N0Opts{}},
457
+ /// },
458
+ /// N1Opts{}},
459
+ /// },
460
+ /// N2Opts{}},
461
+ /// },
462
+ /// N3Opts{}};
463
+ ///
464
+ /// An equivalent Declaration can be constructed more tersely using Sequence:
465
+ ///
466
+ /// Declaration::Sequence({
467
+ /// {"n0", N0Opts{}},
468
+ /// {"n1", N1Opts{}},
469
+ /// {"n2", N2Opts{}},
470
+ /// {"n3", N3Opts{}},
471
+ /// });
472
+ static Declaration Sequence(std::vector<Declaration> decls);
473
+
474
+ /// \brief add the declaration to an already created execution plan
475
+ /// \param plan the plan to add the node to
476
+ /// \param registry the registry to use to lookup the node factory
477
+ ///
478
+ /// This method will recursively call AddToPlan on all of the declaration's inputs.
479
+ /// This method is only for advanced use when the DeclarationToXyz methods are not
480
+ /// sufficient.
481
+ ///
482
+ /// \return the instantiated execution node
483
+ Result<ExecNode*> AddToPlan(ExecPlan* plan, ExecFactoryRegistry* registry =
484
+ default_exec_factory_registry()) const;
485
+
486
+ // Validate a declaration
487
+ bool IsValid(ExecFactoryRegistry* registry = default_exec_factory_registry()) const;
488
+
489
+ /// \brief the name of the factory to use when creating a node
490
+ std::string factory_name;
491
+ /// \brief the declarations's inputs
492
+ std::vector<Input> inputs;
493
+ /// \brief options to control the behavior of the node
494
+ std::shared_ptr<ExecNodeOptions> options;
495
+ /// \brief a label to give the node in the plan
496
+ std::string label;
497
+ };
498
+
499
+ /// \brief How to handle unaligned buffers
500
+ enum class UnalignedBufferHandling { kWarn, kIgnore, kReallocate, kError };
501
+
502
+ /// \brief get the default behavior of unaligned buffer handling
503
+ ///
504
+ /// This is configurable via the ACERO_ALIGNMENT_HANDLING environment variable which
505
+ /// can be set to "warn", "ignore", "reallocate", or "error". If the environment
506
+ /// variable is not set, or is set to an invalid value, this will return kWarn
507
+ UnalignedBufferHandling GetDefaultUnalignedBufferHandling();
508
+
509
+ /// \brief plan-wide options that can be specified when executing an execution plan
510
+ struct ARROW_ACERO_EXPORT QueryOptions {
511
+ /// \brief Should the plan use a legacy batching strategy
512
+ ///
513
+ /// This is currently in place only to support the Scanner::ToTable
514
+ /// method. This method relies on batch indices from the scanner
515
+ /// remaining consistent. This is impractical in the ExecPlan which
516
+ /// might slice batches as needed (e.g. for a join)
517
+ ///
518
+ /// However, it still works for simple plans and this is the only way
519
+ /// we have at the moment for maintaining implicit order.
520
+ bool use_legacy_batching = false;
521
+
522
+ /// If the output has a meaningful order then sequence the output of the plan
523
+ ///
524
+ /// The default behavior (std::nullopt) will sequence output batches if there
525
+ /// is a meaningful ordering in the final node and will emit batches immediately
526
+ /// otherwise.
527
+ ///
528
+ /// If explicitly set to true then plan execution will fail if there is no
529
+ /// meaningful ordering. This can be useful to validate a query that should
530
+ /// be emitting ordered results.
531
+ ///
532
+ /// If explicitly set to false then batches will be emit immediately even if there
533
+ /// is a meaningful ordering. This could cause batches to be emit out of order but
534
+ /// may offer a small decrease to latency.
535
+ std::optional<bool> sequence_output = std::nullopt;
536
+
537
+ /// \brief should the plan use multiple background threads for CPU-intensive work
538
+ ///
539
+ /// If this is false then all CPU work will be done on the calling thread. I/O tasks
540
+ /// will still happen on the I/O executor and may be multi-threaded (but should not use
541
+ /// significant CPU resources).
542
+ ///
543
+ /// Will be ignored if custom_cpu_executor is set
544
+ bool use_threads = true;
545
+
546
+ /// \brief custom executor to use for CPU-intensive work
547
+ ///
548
+ /// Must be null or remain valid for the duration of the plan. If this is null then
549
+ /// a default thread pool will be chosen whose behavior will be controlled by
550
+ /// the `use_threads` option.
551
+ ::arrow::internal::Executor* custom_cpu_executor = NULLPTR;
552
+
553
+ /// \brief custom executor to use for IO work
554
+ ///
555
+ /// Must be null or remain valid for the duration of the plan. If this is null then
556
+ /// the global io thread pool will be chosen whose behavior will be controlled by
557
+ /// the "ARROW_IO_THREADS" environment.
558
+ ::arrow::internal::Executor* custom_io_executor = NULLPTR;
559
+
560
+ /// \brief a memory pool to use for allocations
561
+ ///
562
+ /// Must remain valid for the duration of the plan.
563
+ MemoryPool* memory_pool = default_memory_pool();
564
+
565
+ /// \brief a function registry to use for the plan
566
+ ///
567
+ /// Must remain valid for the duration of the plan.
568
+ FunctionRegistry* function_registry = GetFunctionRegistry();
569
+ /// \brief the names of the output columns
570
+ ///
571
+ /// If this is empty then names will be generated based on the input columns
572
+ ///
573
+ /// If set then the number of names must equal the number of output columns
574
+ std::vector<std::string> field_names;
575
+
576
+ /// \brief Policy for unaligned buffers in source data
577
+ ///
578
+ /// Various compute functions and acero internals will type pun array
579
+ /// buffers from uint8_t* to some kind of value type (e.g. we might
580
+ /// cast to int32_t* to add two int32 arrays)
581
+ ///
582
+ /// If the buffer is poorly aligned (e.g. an int32 array is not aligned
583
+ /// on a 4-byte boundary) then this is technically undefined behavior in C++.
584
+ /// However, most modern compilers and CPUs are fairly tolerant of this
585
+ /// behavior and nothing bad (beyond a small hit to performance) is likely
586
+ /// to happen.
587
+ ///
588
+ /// Note that this only applies to source buffers. All buffers allocated internally
589
+ /// by Acero will be suitably aligned.
590
+ ///
591
+ /// If this field is set to kWarn then Acero will check if any buffers are unaligned
592
+ /// and, if they are, will emit a warning.
593
+ ///
594
+ /// If this field is set to kReallocate then Acero will allocate a new, suitably aligned
595
+ /// buffer and copy the contents from the old buffer into this new buffer.
596
+ ///
597
+ /// If this field is set to kError then Acero will gracefully abort the plan instead.
598
+ ///
599
+ /// If this field is set to kIgnore then Acero will not even check if the buffers are
600
+ /// unaligned.
601
+ ///
602
+ /// If this field is not set then it will be treated as kWarn unless overridden
603
+ /// by the ACERO_ALIGNMENT_HANDLING environment variable
604
+ std::optional<UnalignedBufferHandling> unaligned_buffer_handling;
605
+ };
606
+
607
+ /// \brief Calculate the output schema of a declaration
608
+ ///
609
+ /// This does not actually execute the plan. This operation may fail if the
610
+ /// declaration represents an invalid plan (e.g. a project node with multiple inputs)
611
+ ///
612
+ /// \param declaration A declaration describing an execution plan
613
+ /// \param function_registry The function registry to use for function execution. If null
614
+ /// then the default function registry will be used.
615
+ ///
616
+ /// \return the schema that batches would have after going through the execution plan
617
+ ARROW_ACERO_EXPORT Result<std::shared_ptr<Schema>> DeclarationToSchema(
618
+ const Declaration& declaration, FunctionRegistry* function_registry = NULLPTR);
619
+
620
+ /// \brief Create a string representation of a plan
621
+ ///
622
+ /// This representation is for debug purposes only.
623
+ ///
624
+ /// Conversion to a string may fail if the declaration represents an
625
+ /// invalid plan.
626
+ ///
627
+ /// Use Substrait for complete serialization of plans
628
+ ///
629
+ /// \param declaration A declaration describing an execution plan
630
+ /// \param function_registry The function registry to use for function execution. If null
631
+ /// then the default function registry will be used.
632
+ ///
633
+ /// \return a string representation of the plan suitable for debugging output
634
+ ARROW_ACERO_EXPORT Result<std::string> DeclarationToString(
635
+ const Declaration& declaration, FunctionRegistry* function_registry = NULLPTR);
636
+
637
+ /// \brief Utility method to run a declaration and collect the results into a table
638
+ ///
639
+ /// \param declaration A declaration describing the plan to run
640
+ /// \param use_threads If `use_threads` is false then all CPU work will be done on the
641
+ /// calling thread. I/O tasks will still happen on the I/O executor
642
+ /// and may be multi-threaded (but should not use significant CPU
643
+ /// resources).
644
+ /// \param memory_pool The memory pool to use for allocations made while running the plan.
645
+ /// \param function_registry The function registry to use for function execution. If null
646
+ /// then the default function registry will be used.
647
+ ///
648
+ /// This method will add a sink node to the declaration to collect results into a
649
+ /// table. It will then create an ExecPlan from the declaration, start the exec plan,
650
+ /// block until the plan has finished, and return the created table.
651
+ ARROW_ACERO_EXPORT Result<std::shared_ptr<Table>> DeclarationToTable(
652
+ Declaration declaration, bool use_threads = true,
653
+ MemoryPool* memory_pool = default_memory_pool(),
654
+ FunctionRegistry* function_registry = NULLPTR);
655
+
656
+ ARROW_ACERO_EXPORT Result<std::shared_ptr<Table>> DeclarationToTable(
657
+ Declaration declaration, QueryOptions query_options);
658
+
659
+ /// \brief Asynchronous version of \see DeclarationToTable
660
+ ///
661
+ /// \param declaration A declaration describing the plan to run
662
+ /// \param use_threads The behavior of use_threads is slightly different than the
663
+ /// synchronous version since we cannot run synchronously on the
664
+ /// calling thread. Instead, if use_threads=false then a new thread
665
+ /// pool will be created with a single thread and this will be used for
666
+ /// all compute work.
667
+ /// \param memory_pool The memory pool to use for allocations made while running the plan.
668
+ /// \param function_registry The function registry to use for function execution. If null
669
+ /// then the default function registry will be used.
670
+ ARROW_ACERO_EXPORT Future<std::shared_ptr<Table>> DeclarationToTableAsync(
671
+ Declaration declaration, bool use_threads = true,
672
+ MemoryPool* memory_pool = default_memory_pool(),
673
+ FunctionRegistry* function_registry = NULLPTR);
674
+
675
+ /// \brief Overload of \see DeclarationToTableAsync accepting a custom exec context
676
+ ///
677
+ /// The executor must be specified (cannot be null) and must be kept alive until the
678
+ /// returned future finishes.
679
+ ARROW_ACERO_EXPORT Future<std::shared_ptr<Table>> DeclarationToTableAsync(
680
+ Declaration declaration, ExecContext custom_exec_context);
681
+
682
+ /// \brief a collection of exec batches with a common schema
683
+ struct BatchesWithCommonSchema {
684
+ std::vector<ExecBatch> batches;
685
+ std::shared_ptr<Schema> schema;
686
+ };
687
+
688
+ /// \brief Utility method to run a declaration and collect the results into ExecBatch
689
+ /// vector
690
+ ///
691
+ /// \see DeclarationToTable for details on threading & execution
692
+ ARROW_ACERO_EXPORT Result<BatchesWithCommonSchema> DeclarationToExecBatches(
693
+ Declaration declaration, bool use_threads = true,
694
+ MemoryPool* memory_pool = default_memory_pool(),
695
+ FunctionRegistry* function_registry = NULLPTR);
696
+
697
+ ARROW_ACERO_EXPORT Result<BatchesWithCommonSchema> DeclarationToExecBatches(
698
+ Declaration declaration, QueryOptions query_options);
699
+
700
+ /// \brief Asynchronous version of \see DeclarationToExecBatches
701
+ ///
702
+ /// \see DeclarationToTableAsync for details on threading & execution
703
+ ARROW_ACERO_EXPORT Future<BatchesWithCommonSchema> DeclarationToExecBatchesAsync(
704
+ Declaration declaration, bool use_threads = true,
705
+ MemoryPool* memory_pool = default_memory_pool(),
706
+ FunctionRegistry* function_registry = NULLPTR);
707
+
708
+ /// \brief Overload of \see DeclarationToExecBatchesAsync accepting a custom exec context
709
+ ///
710
+ /// \see DeclarationToTableAsync for details on threading & execution
711
+ ARROW_ACERO_EXPORT Future<BatchesWithCommonSchema> DeclarationToExecBatchesAsync(
712
+ Declaration declaration, ExecContext custom_exec_context);
713
+
714
+ /// \brief Utility method to run a declaration and collect the results into a vector
715
+ ///
716
+ /// \see DeclarationToTable for details on threading & execution
717
+ ARROW_ACERO_EXPORT Result<std::vector<std::shared_ptr<RecordBatch>>> DeclarationToBatches(
718
+ Declaration declaration, bool use_threads = true,
719
+ MemoryPool* memory_pool = default_memory_pool(),
720
+ FunctionRegistry* function_registry = NULLPTR);
721
+
722
+ ARROW_ACERO_EXPORT Result<std::vector<std::shared_ptr<RecordBatch>>> DeclarationToBatches(
723
+ Declaration declaration, QueryOptions query_options);
724
+
725
+ /// \brief Asynchronous version of \see DeclarationToBatches
726
+ ///
727
+ /// \see DeclarationToTableAsync for details on threading & execution
728
+ ARROW_ACERO_EXPORT Future<std::vector<std::shared_ptr<RecordBatch>>>
729
+ DeclarationToBatchesAsync(Declaration declaration, bool use_threads = true,
730
+ MemoryPool* memory_pool = default_memory_pool(),
731
+ FunctionRegistry* function_registry = NULLPTR);
732
+
733
+ /// \brief Overload of \see DeclarationToBatchesAsync accepting a custom exec context
734
+ ///
735
+ /// \see DeclarationToTableAsync for details on threading & execution
736
+ ARROW_ACERO_EXPORT Future<std::vector<std::shared_ptr<RecordBatch>>>
737
+ DeclarationToBatchesAsync(Declaration declaration, ExecContext exec_context);
738
+
739
+ /// \brief Utility method to run a declaration and return results as a RecordBatchReader
740
+ ///
741
+ /// If an exec context is not provided then a default exec context will be used based
742
+ /// on the value of `use_threads`. If `use_threads` is false then the CPU executor will
743
+ /// be a serial executor and all CPU work will be done on the calling thread. I/O tasks
744
+ /// will still happen on the I/O executor and may be multi-threaded.
745
+ ///
746
+ /// If `use_threads` is false then all CPU work will happen during the calls to
747
+ /// RecordBatchReader::Next and no CPU work will happen in the background. If
748
+ /// `use_threads` is true then CPU work will happen on the CPU thread pool and tasks may
749
+ /// run in between calls to RecordBatchReader::Next. If the returned reader is not
750
+ /// consumed quickly enough then the plan will eventually pause as the backpressure queue
751
+ /// fills up.
752
+ ///
753
+ /// If a custom exec context is provided then the value of `use_threads` will be ignored.
754
+ ///
755
+ /// The returned RecordBatchReader can be closed early to cancel the computation of record
756
+ /// batches. In this case, only errors encountered by the computation may be reported. In
757
+ /// particular, no cancellation error may be reported.
758
+ ARROW_ACERO_EXPORT Result<std::unique_ptr<RecordBatchReader>> DeclarationToReader(
759
+ Declaration declaration, bool use_threads = true,
760
+ MemoryPool* memory_pool = default_memory_pool(),
761
+ FunctionRegistry* function_registry = NULLPTR);
762
+
763
+ ARROW_ACERO_EXPORT Result<std::unique_ptr<RecordBatchReader>> DeclarationToReader(
764
+ Declaration declaration, QueryOptions query_options);
765
+
766
+ /// \brief Utility method to run a declaration and ignore results
767
+ ///
768
+ /// This can be useful when the data are consumed as part of the plan itself, for
769
+ /// example, when the plan ends with a write node.
770
+ ///
771
+ /// \see DeclarationToTable for details on threading & execution
772
+ ARROW_ACERO_EXPORT Status
773
+ DeclarationToStatus(Declaration declaration, bool use_threads = true,
774
+ MemoryPool* memory_pool = default_memory_pool(),
775
+ FunctionRegistry* function_registry = NULLPTR);
776
+
777
+ ARROW_ACERO_EXPORT Status DeclarationToStatus(Declaration declaration,
778
+ QueryOptions query_options);
779
+
780
+ /// \brief Asynchronous version of \see DeclarationToStatus
781
+ ///
782
+ /// This can be useful when the data are consumed as part of the plan itself, for
783
+ /// example, when the plan ends with a write node.
784
+ ///
785
+ /// \see DeclarationToTableAsync for details on threading & execution
786
+ ARROW_ACERO_EXPORT Future<> DeclarationToStatusAsync(
787
+ Declaration declaration, bool use_threads = true,
788
+ MemoryPool* memory_pool = default_memory_pool(),
789
+ FunctionRegistry* function_registry = NULLPTR);
790
+
791
+ /// \brief Overload of \see DeclarationToStatusAsync accepting a custom exec context
792
+ ///
793
+ /// \see DeclarationToTableAsync for details on threading & execution
794
+ ARROW_ACERO_EXPORT Future<> DeclarationToStatusAsync(Declaration declaration,
795
+ ExecContext exec_context);
796
+
797
+ /// @}
798
+
799
+ /// \brief Wrap an ExecBatch generator in a RecordBatchReader.
800
+ ///
801
+ /// The RecordBatchReader does not impose any ordering on emitted batches.
802
+ ARROW_ACERO_EXPORT
803
+ std::shared_ptr<RecordBatchReader> MakeGeneratorReader(
804
+ std::shared_ptr<Schema>, std::function<Future<std::optional<ExecBatch>>()>,
805
+ MemoryPool*);
806
+
807
+ constexpr int kDefaultBackgroundMaxQ = 32;
808
+ constexpr int kDefaultBackgroundQRestart = 16;
809
+
810
+ /// \brief Make a generator of RecordBatchReaders
811
+ ///
812
+ /// Useful as a source node for an Exec plan
813
+ ARROW_ACERO_EXPORT
814
+ Result<std::function<Future<std::optional<ExecBatch>>()>> MakeReaderGenerator(
815
+ std::shared_ptr<RecordBatchReader> reader, arrow::internal::Executor* io_executor,
816
+ int max_q = kDefaultBackgroundMaxQ, int q_restart = kDefaultBackgroundQRestart);
817
+
818
+ } // namespace acero
819
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+ #include <memory>
22
+ #include <vector>
23
+
24
+ #include "arrow/acero/accumulation_queue.h"
25
+ #include "arrow/acero/bloom_filter.h"
26
+ #include "arrow/acero/options.h"
27
+ #include "arrow/acero/query_context.h"
28
+ #include "arrow/acero/schema_util.h"
29
+ #include "arrow/acero/task_util.h"
30
+ #include "arrow/result.h"
31
+ #include "arrow/status.h"
32
+ #include "arrow/type.h"
33
+ #include "arrow/util/tracing.h"
34
+
35
+ namespace arrow {
36
+ namespace acero {
37
+
38
+ using util::AccumulationQueue;
39
+
40
+ class HashJoinImpl {
41
+ public:
42
+ using OutputBatchCallback = std::function<Status(int64_t, ExecBatch)>;
43
+ using BuildFinishedCallback = std::function<Status(size_t)>;
44
+ using FinishedCallback = std::function<Status(int64_t)>;
45
+ using RegisterTaskGroupCallback = std::function<int(
46
+ std::function<Status(size_t, int64_t)>, std::function<Status(size_t)>)>;
47
+ using StartTaskGroupCallback = std::function<Status(int, int64_t)>;
48
+ using AbortContinuationImpl = std::function<void()>;
49
+
50
+ virtual ~HashJoinImpl() = default;
51
+ virtual Status Init(QueryContext* ctx, JoinType join_type, size_t num_threads,
52
+ const HashJoinProjectionMaps* proj_map_left,
53
+ const HashJoinProjectionMaps* proj_map_right,
54
+ std::vector<JoinKeyCmp> key_cmp, Expression filter,
55
+ RegisterTaskGroupCallback register_task_group_callback,
56
+ StartTaskGroupCallback start_task_group_callback,
57
+ OutputBatchCallback output_batch_callback,
58
+ FinishedCallback finished_callback) = 0;
59
+
60
+ virtual Status BuildHashTable(size_t thread_index, AccumulationQueue batches,
61
+ BuildFinishedCallback on_finished) = 0;
62
+ virtual Status ProbeSingleBatch(size_t thread_index, ExecBatch batch) = 0;
63
+ virtual Status ProbingFinished(size_t thread_index) = 0;
64
+ virtual void Abort(TaskScheduler::AbortContinuationImpl pos_abort_callback) = 0;
65
+ virtual std::string ToString() const = 0;
66
+
67
+ static Result<std::unique_ptr<HashJoinImpl>> MakeBasic();
68
+ static Result<std::unique_ptr<HashJoinImpl>> MakeSwiss();
69
+
70
+ protected:
71
+ arrow::util::tracing::Span span_;
72
+ };
73
+
74
+ } // namespace acero
75
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_dict.h ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <unordered_map>
22
+
23
+ #include "arrow/acero/schema_util.h"
24
+ #include "arrow/compute/exec.h"
25
+ #include "arrow/compute/kernels/row_encoder_internal.h"
26
+ #include "arrow/result.h"
27
+ #include "arrow/status.h"
28
+ #include "arrow/type.h"
29
+
30
+ // This file contains hash join logic related to handling of dictionary encoded key
31
+ // columns.
32
+ //
33
+ // A key column from probe side of the join can be matched against a key column from build
34
+ // side of the join, as long as the underlying value types are equal. That means that:
35
+ // - both scalars and arrays can be used and even mixed in the same column
36
+ // - dictionary column can be matched against non-dictionary column if underlying value
37
+ // types are equal
38
+ // - dictionary column can be matched against dictionary column with a different index
39
+ // type, and potentially using a different dictionary, if underlying value types are equal
40
+ //
41
+ // We currently require in hash join that for all dictionary encoded columns, the same
42
+ // dictionary is used in all input exec batches.
43
+ //
44
+ // In order to allow matching columns with different dictionaries, different dictionary
45
+ // index types, and dictionary key against non-dictionary key, internally comparisons will
46
+ // be evaluated after remapping values on both sides of the join to a common
47
+ // representation (which will be called "unified representation"). This common
48
+ // representation is a column of int32() type (not a dictionary column). It represents an
49
+ // index in the unified dictionary computed for the (only) dictionary present on build
50
+ // side (an empty dictionary is still created for an empty build side). Null value is
51
+ // always represented in this common representation as null int32 value, unified
52
+ // dictionary will never contain a null value (so there is no ambiguity of representing
53
+ // nulls as either index to a null entry in the dictionary or null index).
54
+ //
55
+ // Unified dictionary represents values present on build side. There may be values on
56
+ // probe side that are not present in it. All such values, that are not null, are mapped
57
+ // in the common representation to a special constant kMissingValueId.
58
+ //
59
+
60
+ namespace arrow {
61
+
62
+ using compute::ExecBatch;
63
+ using compute::ExecContext;
64
+ using compute::internal::RowEncoder;
65
+
66
+ namespace acero {
67
+
68
+ /// Helper class with operations that are stateless and common to processing of dictionary
69
+ /// keys on both build and probe side.
70
+ class HashJoinDictUtil {
71
+ public:
72
+ // Null values in unified representation are always represented as null that has
73
+ // corresponding integer set to this constant
74
+ static constexpr int32_t kNullId = 0;
75
+ // Constant representing a value, that is not null, missing on the build side, in
76
+ // unified representation.
77
+ static constexpr int32_t kMissingValueId = -1;
78
+
79
+ // Check if data types of corresponding pair of key column on build and probe side are
80
+ // compatible
81
+ static bool KeyDataTypesValid(const std::shared_ptr<DataType>& probe_data_type,
82
+ const std::shared_ptr<DataType>& build_data_type);
83
+
84
+ // Input must be dictionary array or dictionary scalar.
85
+ // A precomputed and provided here lookup table in the form of int32() array will be
86
+ // used to remap input indices to unified representation.
87
+ //
88
+ static Result<std::shared_ptr<ArrayData>> IndexRemapUsingLUT(
89
+ ExecContext* ctx, const Datum& indices, int64_t batch_length,
90
+ const std::shared_ptr<ArrayData>& map_array,
91
+ const std::shared_ptr<DataType>& data_type);
92
+
93
+ // Return int32() array that contains indices of input dictionary array or scalar after
94
+ // type casting.
95
+ static Result<std::shared_ptr<ArrayData>> ConvertToInt32(
96
+ const std::shared_ptr<DataType>& from_type, const Datum& input,
97
+ int64_t batch_length, ExecContext* ctx);
98
+
99
+ // Return an array that contains elements of input int32() array after casting to a
100
+ // given integer type. This is used for mapping unified representation stored in the
101
+ // hash table on build side back to original input data type of hash join, when
102
+ // outputting hash join results to parent exec node.
103
+ //
104
+ static Result<std::shared_ptr<ArrayData>> ConvertFromInt32(
105
+ const std::shared_ptr<DataType>& to_type, const Datum& input, int64_t batch_length,
106
+ ExecContext* ctx);
107
+
108
+ // Return dictionary referenced in either dictionary array or dictionary scalar
109
+ static std::shared_ptr<Array> ExtractDictionary(const Datum& data);
110
+ };
111
+
112
+ /// Implements processing of dictionary arrays/scalars in key columns on the build side of
113
+ /// a hash join.
114
+ /// Each instance of this class corresponds to a single column and stores and
115
+ /// processes only the information related to that column.
116
+ /// Const methods are thread-safe, non-const methods are not (the caller must make sure
117
+ /// that only one thread at any time will access them).
118
+ ///
119
+ class HashJoinDictBuild {
120
+ public:
121
+ // Returns true if the key column (described in input by its data type) requires any
122
+ // pre- or post-processing related to handling dictionaries.
123
+ //
124
+ static bool KeyNeedsProcessing(const std::shared_ptr<DataType>& build_data_type) {
125
+ return (build_data_type->id() == Type::DICTIONARY);
126
+ }
127
+
128
+ // Data type of unified representation
129
+ static std::shared_ptr<DataType> DataTypeAfterRemapping() { return int32(); }
130
+
131
+ // Should be called only once in hash join, before processing any build or probe
132
+ // batches.
133
+ //
134
+ // Takes a pointer to the dictionary for a corresponding key column on the build side as
135
+ // an input. If the build side is empty, it still needs to be called, but with
136
+ // dictionary pointer set to null.
137
+ //
138
+ // Currently it is required that all input batches on build side share the same
139
+ // dictionary. For each input batch during its pre-processing, dictionary will be
140
+ // checked and error will be returned if it is different then the one provided in the
141
+ // call to this method.
142
+ //
143
+ // Unifies the dictionary. The order of the values is still preserved.
144
+ // Null and duplicate entries are removed. If the dictionary is already unified, its
145
+ // copy will be produced and stored within this class.
146
+ //
147
+ // Prepares the mapping from ids within original dictionary to the ids in the resulting
148
+ // dictionary. This is used later on to pre-process (map to unified representation) key
149
+ // column on build side.
150
+ //
151
+ // Prepares the reverse mapping (in the form of hash table) from values to the ids in
152
+ // the resulting dictionary. This will be used later on to pre-process (map to unified
153
+ // representation) key column on probe side. Values on probe side that are not present
154
+ // in the original dictionary will be mapped to a special constant kMissingValueId. The
155
+ // exception is made for nulls, which get always mapped to nulls (both when null is
156
+ // represented as a dictionary id pointing to a null and a null dictionary id).
157
+ //
158
+ Status Init(ExecContext* ctx, std::shared_ptr<Array> dictionary,
159
+ std::shared_ptr<DataType> index_type, std::shared_ptr<DataType> value_type);
160
+
161
+ // Remap array or scalar values into unified representation (array of int32()).
162
+ // Outputs kMissingValueId if input value is not found in the unified dictionary.
163
+ // Outputs null for null input value (with corresponding data set to kNullId).
164
+ //
165
+ Result<std::shared_ptr<ArrayData>> RemapInputValues(ExecContext* ctx,
166
+ const Datum& values,
167
+ int64_t batch_length) const;
168
+
169
+ // Remap dictionary array or dictionary scalar on build side to unified representation.
170
+ // Dictionary referenced in the input must match the dictionary that was
171
+ // given during initialization.
172
+ // The output is a dictionary array that references unified dictionary.
173
+ //
174
+ Result<std::shared_ptr<ArrayData>> RemapInput(
175
+ ExecContext* ctx, const Datum& indices, int64_t batch_length,
176
+ const std::shared_ptr<DataType>& data_type) const;
177
+
178
+ // Outputs dictionary array referencing unified dictionary, given an array with 32-bit
179
+ // ids.
180
+ // Used to post-process values looked up in a hash table on build side of the hash join
181
+ // before outputting to the parent exec node.
182
+ //
183
+ Result<std::shared_ptr<ArrayData>> RemapOutput(const ArrayData& indices32Bit,
184
+ ExecContext* ctx) const;
185
+
186
+ // Release shared pointers and memory
187
+ void CleanUp();
188
+
189
+ private:
190
+ // Data type of dictionary ids for the input dictionary on build side
191
+ std::shared_ptr<DataType> index_type_;
192
+ // Data type of values for the input dictionary on build side
193
+ std::shared_ptr<DataType> value_type_;
194
+ // Mapping from (encoded as string) values to the ids in unified dictionary
195
+ std::unordered_map<std::string, int32_t> hash_table_;
196
+ // Mapping from input dictionary ids to unified dictionary ids
197
+ std::shared_ptr<ArrayData> remapped_ids_;
198
+ // Input dictionary
199
+ std::shared_ptr<Array> dictionary_;
200
+ // Unified dictionary
201
+ std::shared_ptr<ArrayData> unified_dictionary_;
202
+ };
203
+
204
+ /// Implements processing of dictionary arrays/scalars in key columns on the probe side of
205
+ /// a hash join.
206
+ /// Each instance of this class corresponds to a single column and stores and
207
+ /// processes only the information related to that column.
208
+ /// It is not thread-safe - every participating thread should use its own instance of
209
+ /// this class.
210
+ ///
211
+ class HashJoinDictProbe {
212
+ public:
213
+ static bool KeyNeedsProcessing(const std::shared_ptr<DataType>& probe_data_type,
214
+ const std::shared_ptr<DataType>& build_data_type);
215
+
216
+ // Data type of the result of remapping input key column.
217
+ //
218
+ // The result of remapping is what is used in hash join for matching keys on build and
219
+ // probe side. The exact data types may be different, as described below, and therefore
220
+ // a common representation is needed for simplifying comparisons of pairs of keys on
221
+ // both sides.
222
+ //
223
+ // We support matching key that is of non-dictionary type with key that is of dictionary
224
+ // type, as long as the underlying value types are equal. We support matching when both
225
+ // keys are of dictionary type, regardless whether underlying dictionary index types are
226
+ // the same or not.
227
+ //
228
+ static std::shared_ptr<DataType> DataTypeAfterRemapping(
229
+ const std::shared_ptr<DataType>& build_data_type);
230
+
231
+ // Should only be called if KeyNeedsProcessing method returns true for a pair of
232
+ // corresponding key columns from build and probe side.
233
+ // Converts values in order to match the common representation for
234
+ // both build and probe side used in hash table comparison.
235
+ // Supports arrays and scalars as input.
236
+ // Argument opt_build_side should be null if dictionary key on probe side is matched
237
+ // with non-dictionary key on build side.
238
+ //
239
+ Result<std::shared_ptr<ArrayData>> RemapInput(
240
+ const HashJoinDictBuild* opt_build_side, const Datum& data, int64_t batch_length,
241
+ const std::shared_ptr<DataType>& probe_data_type,
242
+ const std::shared_ptr<DataType>& build_data_type, ExecContext* ctx);
243
+
244
+ void CleanUp();
245
+
246
+ private:
247
+ // May be null if probe side key is non-dictionary. Otherwise it is used to verify that
248
+ // only a single dictionary is referenced in exec batch on probe side of hash join.
249
+ std::shared_ptr<Array> dictionary_;
250
+ // Mapping from dictionary on probe side of hash join (if it is used) to unified
251
+ // representation.
252
+ std::shared_ptr<ArrayData> remapped_ids_;
253
+ // Encoder of key columns that uses unified representation instead of original data type
254
+ // for key columns that need to use it (have dictionaries on either side of the join).
255
+ RowEncoder encoder_;
256
+ };
257
+
258
+ // Encapsulates dictionary handling logic for build side of hash join.
259
+ //
260
+ class HashJoinDictBuildMulti {
261
+ public:
262
+ Status Init(const SchemaProjectionMaps<HashJoinProjection>& proj_map,
263
+ const ExecBatch* opt_non_empty_batch, ExecContext* ctx);
264
+ static void InitEncoder(const SchemaProjectionMaps<HashJoinProjection>& proj_map,
265
+ RowEncoder* encoder, ExecContext* ctx);
266
+ Status EncodeBatch(size_t thread_index,
267
+ const SchemaProjectionMaps<HashJoinProjection>& proj_map,
268
+ const ExecBatch& batch, RowEncoder* encoder, ExecContext* ctx) const;
269
+ Status PostDecode(const SchemaProjectionMaps<HashJoinProjection>& proj_map,
270
+ ExecBatch* decoded_key_batch, ExecContext* ctx);
271
+ const HashJoinDictBuild& get_dict_build(int icol) const { return remap_imp_[icol]; }
272
+
273
+ private:
274
+ std::vector<bool> needs_remap_;
275
+ std::vector<HashJoinDictBuild> remap_imp_;
276
+ };
277
+
278
+ // Encapsulates dictionary handling logic for probe side of hash join
279
+ //
280
+ class HashJoinDictProbeMulti {
281
+ public:
282
+ void Init(size_t num_threads);
283
+ bool BatchRemapNeeded(size_t thread_index,
284
+ const SchemaProjectionMaps<HashJoinProjection>& proj_map_probe,
285
+ const SchemaProjectionMaps<HashJoinProjection>& proj_map_build,
286
+ ExecContext* ctx);
287
+ Status EncodeBatch(size_t thread_index,
288
+ const SchemaProjectionMaps<HashJoinProjection>& proj_map_probe,
289
+ const SchemaProjectionMaps<HashJoinProjection>& proj_map_build,
290
+ const HashJoinDictBuildMulti& dict_build, const ExecBatch& batch,
291
+ RowEncoder** out_encoder, ExecBatch* opt_out_key_batch,
292
+ ExecContext* ctx);
293
+
294
+ private:
295
+ void InitLocalStateIfNeeded(
296
+ size_t thread_index, const SchemaProjectionMaps<HashJoinProjection>& proj_map_probe,
297
+ const SchemaProjectionMaps<HashJoinProjection>& proj_map_build, ExecContext* ctx);
298
+ static void InitEncoder(const SchemaProjectionMaps<HashJoinProjection>& proj_map_probe,
299
+ const SchemaProjectionMaps<HashJoinProjection>& proj_map_build,
300
+ RowEncoder* encoder, ExecContext* ctx);
301
+ struct ThreadLocalState {
302
+ bool is_initialized;
303
+ // Whether any key column needs remapping (because of dictionaries used) before doing
304
+ // join hash table lookups
305
+ bool any_needs_remap;
306
+ // Whether each key column needs remapping before doing join hash table lookups
307
+ std::vector<bool> needs_remap;
308
+ std::vector<HashJoinDictProbe> remap_imp;
309
+ // Encoder of key columns that uses unified representation instead of original data
310
+ // type for key columns that need to use it (have dictionaries on either side of the
311
+ // join).
312
+ RowEncoder post_remap_encoder;
313
+ };
314
+ std::vector<ThreadLocalState> local_states_;
315
+ };
316
+
317
+ } // namespace acero
318
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_node.h ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cassert>
21
+ #include <vector>
22
+
23
+ #include "arrow/acero/options.h"
24
+ #include "arrow/acero/schema_util.h"
25
+ #include "arrow/result.h"
26
+ #include "arrow/status.h"
27
+
28
+ namespace arrow {
29
+
30
+ using compute::ExecContext;
31
+
32
+ namespace acero {
33
+
34
+ class ARROW_ACERO_EXPORT HashJoinSchema {
35
+ public:
36
+ Status Init(JoinType join_type, const Schema& left_schema,
37
+ const std::vector<FieldRef>& left_keys, const Schema& right_schema,
38
+ const std::vector<FieldRef>& right_keys, const Expression& filter,
39
+ const std::string& left_field_name_prefix,
40
+ const std::string& right_field_name_prefix);
41
+
42
+ Status Init(JoinType join_type, const Schema& left_schema,
43
+ const std::vector<FieldRef>& left_keys,
44
+ const std::vector<FieldRef>& left_output, const Schema& right_schema,
45
+ const std::vector<FieldRef>& right_keys,
46
+ const std::vector<FieldRef>& right_output, const Expression& filter,
47
+ const std::string& left_field_name_prefix,
48
+ const std::string& right_field_name_prefix);
49
+
50
+ static Status ValidateSchemas(JoinType join_type, const Schema& left_schema,
51
+ const std::vector<FieldRef>& left_keys,
52
+ const std::vector<FieldRef>& left_output,
53
+ const Schema& right_schema,
54
+ const std::vector<FieldRef>& right_keys,
55
+ const std::vector<FieldRef>& right_output,
56
+ const std::string& left_field_name_prefix,
57
+ const std::string& right_field_name_prefix);
58
+
59
+ bool HasDictionaries() const;
60
+
61
+ bool HasLargeBinary() const;
62
+
63
+ Result<Expression> BindFilter(Expression filter, const Schema& left_schema,
64
+ const Schema& right_schema, ExecContext* exec_context);
65
+ std::shared_ptr<Schema> MakeOutputSchema(const std::string& left_field_name_suffix,
66
+ const std::string& right_field_name_suffix);
67
+
68
+ bool LeftPayloadIsEmpty() { return PayloadIsEmpty(0); }
69
+
70
+ bool RightPayloadIsEmpty() { return PayloadIsEmpty(1); }
71
+
72
+ static int kMissingField() {
73
+ return SchemaProjectionMaps<HashJoinProjection>::kMissingField;
74
+ }
75
+
76
+ SchemaProjectionMaps<HashJoinProjection> proj_maps[2];
77
+
78
+ private:
79
+ static bool IsTypeSupported(const DataType& type);
80
+
81
+ Status CollectFilterColumns(std::vector<FieldRef>& left_filter,
82
+ std::vector<FieldRef>& right_filter,
83
+ const Expression& filter, const Schema& left_schema,
84
+ const Schema& right_schema);
85
+
86
+ Expression RewriteFilterToUseFilterSchema(int right_filter_offset,
87
+ const SchemaProjectionMap& left_to_filter,
88
+ const SchemaProjectionMap& right_to_filter,
89
+ const Expression& filter);
90
+
91
+ bool PayloadIsEmpty(int side) {
92
+ assert(side == 0 || side == 1);
93
+ return proj_maps[side].num_cols(HashJoinProjection::PAYLOAD) == 0;
94
+ }
95
+
96
+ static Result<std::vector<FieldRef>> ComputePayload(const Schema& schema,
97
+ const std::vector<FieldRef>& output,
98
+ const std::vector<FieldRef>& filter,
99
+ const std::vector<FieldRef>& key);
100
+ };
101
+
102
+ } // namespace acero
103
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/map_node.h ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <functional>
22
+ #include <memory>
23
+ #include <vector>
24
+
25
+ #include "arrow/acero/exec_plan.h"
26
+ #include "arrow/acero/util.h"
27
+ #include "arrow/acero/visibility.h"
28
+ #include "arrow/compute/type_fwd.h"
29
+ #include "arrow/status.h"
30
+ #include "arrow/type_fwd.h"
31
+ #include "arrow/util/cancel.h"
32
+ #include "arrow/util/type_fwd.h"
33
+
34
+ namespace arrow {
35
+ namespace acero {
36
+
37
+ /// A utility base class for simple exec nodes with one input
38
+ ///
39
+ /// Pause/Resume Producing are forwarded appropriately
40
+ /// There is nothing to do in StopProducingImpl
41
+ ///
42
+ /// An AtomicCounter is used to keep track of when all data has arrived. When it
43
+ /// has the Finish() method will be invoked
44
+ class ARROW_ACERO_EXPORT MapNode : public ExecNode, public TracedNode {
45
+ public:
46
+ MapNode(ExecPlan* plan, std::vector<ExecNode*> inputs,
47
+ std::shared_ptr<Schema> output_schema);
48
+
49
+ Status InputFinished(ExecNode* input, int total_batches) override;
50
+
51
+ Status StartProducing() override;
52
+
53
+ void PauseProducing(ExecNode* output, int32_t counter) override;
54
+
55
+ void ResumeProducing(ExecNode* output, int32_t counter) override;
56
+
57
+ Status InputReceived(ExecNode* input, ExecBatch batch) override;
58
+
59
+ const Ordering& ordering() const override;
60
+
61
+ protected:
62
+ Status StopProducingImpl() override;
63
+
64
+ /// Transform a batch
65
+ ///
66
+ /// The output batch will have the same guarantee as the input batch
67
+ /// If this was the last batch this call may trigger Finish()
68
+ virtual Result<ExecBatch> ProcessBatch(ExecBatch batch) = 0;
69
+
70
+ /// Function called after all data has been received
71
+ ///
72
+ /// By default this does nothing. Override this to provide a custom implementation.
73
+ virtual void Finish();
74
+
75
+ protected:
76
+ // Counter for the number of batches received
77
+ AtomicCounter input_counter_;
78
+ };
79
+
80
+ } // namespace acero
81
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/options.h ADDED
@@ -0,0 +1,866 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+ #include <memory>
22
+ #include <optional>
23
+ #include <string>
24
+ #include <vector>
25
+
26
+ #include "arrow/acero/type_fwd.h"
27
+ #include "arrow/acero/visibility.h"
28
+ #include "arrow/compute/api_aggregate.h"
29
+ #include "arrow/compute/api_vector.h"
30
+ #include "arrow/compute/exec.h"
31
+ #include "arrow/compute/expression.h"
32
+ #include "arrow/record_batch.h"
33
+ #include "arrow/result.h"
34
+ #include "arrow/util/async_generator.h"
35
+ #include "arrow/util/async_util.h"
36
+
37
+ namespace arrow {
38
+
39
+ using compute::Aggregate;
40
+ using compute::ExecBatch;
41
+ using compute::Expression;
42
+ using compute::literal;
43
+ using compute::Ordering;
44
+ using compute::SelectKOptions;
45
+ using compute::SortOptions;
46
+
47
+ namespace internal {
48
+
49
+ class Executor;
50
+
51
+ } // namespace internal
52
+
53
+ namespace acero {
54
+
55
+ /// \brief This must not be used in release-mode
56
+ struct DebugOptions;
57
+
58
+ using AsyncExecBatchGenerator = AsyncGenerator<std::optional<ExecBatch>>;
59
+
60
+ /// \addtogroup acero-nodes
61
+ /// @{
62
+
63
+ /// \brief A base class for all options objects
64
+ ///
65
+ /// The only time this is used directly is when a node has no configuration
66
+ class ARROW_ACERO_EXPORT ExecNodeOptions {
67
+ public:
68
+ virtual ~ExecNodeOptions() = default;
69
+
70
+ /// \brief This must not be used in release-mode
71
+ std::shared_ptr<DebugOptions> debug_opts;
72
+ };
73
+
74
+ /// \brief A node representing a generic source of data for Acero
75
+ ///
76
+ /// The source node will start calling `generator` during StartProducing. An initial
77
+ /// task will be created that will call `generator`. It will not call `generator`
78
+ /// reentrantly. If the source can be read in parallel then those details should be
79
+ /// encapsulated within `generator`.
80
+ ///
81
+ /// For each batch received a new task will be created to push that batch downstream.
82
+ /// This task will slice smaller units of size `ExecPlan::kMaxBatchSize` from the
83
+ /// parent batch and call InputReceived. Thus, if the `generator` yields a large
84
+ /// batch it may result in several calls to InputReceived.
85
+ ///
86
+ /// The SourceNode will, by default, assign an implicit ordering to outgoing batches.
87
+ /// This is valid as long as the generator generates batches in a deterministic fashion.
88
+ /// Currently, the only way to override this is to subclass the SourceNode.
89
+ ///
90
+ /// This node is not generally used directly but can serve as the basis for various
91
+ /// specialized nodes.
92
+ class ARROW_ACERO_EXPORT SourceNodeOptions : public ExecNodeOptions {
93
+ public:
94
+ /// Create an instance from values
95
+ SourceNodeOptions(std::shared_ptr<Schema> output_schema,
96
+ std::function<Future<std::optional<ExecBatch>>()> generator)
97
+ : output_schema(std::move(output_schema)), generator(std::move(generator)) {}
98
+
99
+ /// \brief the schema for batches that will be generated by this source
100
+ std::shared_ptr<Schema> output_schema;
101
+ /// \brief an asynchronous stream of batches ending with std::nullopt
102
+ std::function<Future<std::optional<ExecBatch>>()> generator;
103
+ };
104
+
105
+ /// \brief a node that generates data from a table already loaded in memory
106
+ ///
107
+ /// The table source node will slice off chunks, defined by `max_batch_size`
108
+ /// for parallel processing. The table source node extends source node and so these
109
+ /// chunks will be iteratively processed in small batches. \see SourceNodeOptions
110
+ /// for details.
111
+ class ARROW_ACERO_EXPORT TableSourceNodeOptions : public ExecNodeOptions {
112
+ public:
113
+ static constexpr int64_t kDefaultMaxBatchSize = 1 << 20;
114
+
115
+ /// Create an instance from values
116
+ TableSourceNodeOptions(std::shared_ptr<Table> table,
117
+ int64_t max_batch_size = kDefaultMaxBatchSize)
118
+ : table(std::move(table)), max_batch_size(max_batch_size) {}
119
+
120
+ /// \brief a table which acts as the data source
121
+ std::shared_ptr<Table> table;
122
+ /// \brief size of batches to emit from this node
123
+ /// If the table is larger the node will emit multiple batches from the
124
+ /// the table to be processed in parallel.
125
+ int64_t max_batch_size;
126
+ };
127
+
128
+ /// \brief define a lazily resolved Arrow table.
129
+ ///
130
+ /// The table uniquely identified by the names can typically be resolved at the time when
131
+ /// the plan is to be consumed.
132
+ ///
133
+ /// This node is for serialization purposes only and can never be executed.
134
+ class ARROW_ACERO_EXPORT NamedTableNodeOptions : public ExecNodeOptions {
135
+ public:
136
+ /// Create an instance from values
137
+ NamedTableNodeOptions(std::vector<std::string> names, std::shared_ptr<Schema> schema)
138
+ : names(std::move(names)), schema(std::move(schema)) {}
139
+
140
+ /// \brief the names to put in the serialized plan
141
+ std::vector<std::string> names;
142
+ /// \brief the output schema of the table
143
+ std::shared_ptr<Schema> schema;
144
+ };
145
+
146
+ /// \brief a source node which feeds data from a synchronous iterator of batches
147
+ ///
148
+ /// ItMaker is a maker of an iterator of tabular data.
149
+ ///
150
+ /// The node can be configured to use an I/O executor. If set then each time the
151
+ /// iterator is polled a new I/O thread task will be created to do the polling. This
152
+ /// allows a blocking iterator to stay off the CPU thread pool.
153
+ template <typename ItMaker>
154
+ class ARROW_ACERO_EXPORT SchemaSourceNodeOptions : public ExecNodeOptions {
155
+ public:
156
+ /// Create an instance that will create a new task on io_executor for each iteration
157
+ SchemaSourceNodeOptions(std::shared_ptr<Schema> schema, ItMaker it_maker,
158
+ arrow::internal::Executor* io_executor)
159
+ : schema(std::move(schema)),
160
+ it_maker(std::move(it_maker)),
161
+ io_executor(io_executor),
162
+ requires_io(true) {}
163
+
164
+ /// Create an instance that will either iterate synchronously or use the default I/O
165
+ /// executor
166
+ SchemaSourceNodeOptions(std::shared_ptr<Schema> schema, ItMaker it_maker,
167
+ bool requires_io = false)
168
+ : schema(std::move(schema)),
169
+ it_maker(std::move(it_maker)),
170
+ io_executor(NULLPTR),
171
+ requires_io(requires_io) {}
172
+
173
+ /// \brief The schema of the record batches from the iterator
174
+ std::shared_ptr<Schema> schema;
175
+
176
+ /// \brief A maker of an iterator which acts as the data source
177
+ ItMaker it_maker;
178
+
179
+ /// \brief The executor to use for scanning the iterator
180
+ ///
181
+ /// Defaults to the default I/O executor. Only used if requires_io is true.
182
+ /// If requires_io is false then this MUST be nullptr.
183
+ arrow::internal::Executor* io_executor;
184
+
185
+ /// \brief If true then items will be fetched from the iterator on a dedicated I/O
186
+ /// thread to keep I/O off the CPU thread
187
+ bool requires_io;
188
+ };
189
+
190
+ /// a source node that reads from a RecordBatchReader
191
+ ///
192
+ /// Each iteration of the RecordBatchReader will be run on a new thread task created
193
+ /// on the I/O thread pool.
194
+ class ARROW_ACERO_EXPORT RecordBatchReaderSourceNodeOptions : public ExecNodeOptions {
195
+ public:
196
+ /// Create an instance from values
197
+ RecordBatchReaderSourceNodeOptions(std::shared_ptr<RecordBatchReader> reader,
198
+ arrow::internal::Executor* io_executor = NULLPTR)
199
+ : reader(std::move(reader)), io_executor(io_executor) {}
200
+
201
+ /// \brief The RecordBatchReader which acts as the data source
202
+ std::shared_ptr<RecordBatchReader> reader;
203
+
204
+ /// \brief The executor to use for the reader
205
+ ///
206
+ /// Defaults to the default I/O executor.
207
+ arrow::internal::Executor* io_executor;
208
+ };
209
+
210
+ /// a source node that reads from an iterator of array vectors
211
+ using ArrayVectorIteratorMaker = std::function<Iterator<std::shared_ptr<ArrayVector>>()>;
212
+ /// \brief An extended Source node which accepts a schema and array-vectors
213
+ class ARROW_ACERO_EXPORT ArrayVectorSourceNodeOptions
214
+ : public SchemaSourceNodeOptions<ArrayVectorIteratorMaker> {
215
+ using SchemaSourceNodeOptions::SchemaSourceNodeOptions;
216
+ };
217
+
218
+ /// a source node that reads from an iterator of ExecBatch
219
+ using ExecBatchIteratorMaker = std::function<Iterator<std::shared_ptr<ExecBatch>>()>;
220
+ /// \brief An extended Source node which accepts a schema and exec-batches
221
+ class ARROW_ACERO_EXPORT ExecBatchSourceNodeOptions
222
+ : public SchemaSourceNodeOptions<ExecBatchIteratorMaker> {
223
+ public:
224
+ using SchemaSourceNodeOptions::SchemaSourceNodeOptions;
225
+ ExecBatchSourceNodeOptions(std::shared_ptr<Schema> schema,
226
+ std::vector<ExecBatch> batches,
227
+ ::arrow::internal::Executor* io_executor);
228
+ ExecBatchSourceNodeOptions(std::shared_ptr<Schema> schema,
229
+ std::vector<ExecBatch> batches, bool requires_io = false);
230
+ };
231
+
232
+ using RecordBatchIteratorMaker = std::function<Iterator<std::shared_ptr<RecordBatch>>()>;
233
+ /// a source node that reads from an iterator of RecordBatch
234
+ class ARROW_ACERO_EXPORT RecordBatchSourceNodeOptions
235
+ : public SchemaSourceNodeOptions<RecordBatchIteratorMaker> {
236
+ using SchemaSourceNodeOptions::SchemaSourceNodeOptions;
237
+ };
238
+
239
+ /// \brief a node which excludes some rows from batches passed through it
240
+ ///
241
+ /// filter_expression will be evaluated against each batch which is pushed to
242
+ /// this node. Any rows for which filter_expression does not evaluate to `true` will be
243
+ /// excluded in the batch emitted by this node.
244
+ ///
245
+ /// This node will emit empty batches if all rows are excluded. This is done
246
+ /// to avoid gaps in the ordering.
247
+ class ARROW_ACERO_EXPORT FilterNodeOptions : public ExecNodeOptions {
248
+ public:
249
+ /// \brief create an instance from values
250
+ explicit FilterNodeOptions(Expression filter_expression)
251
+ : filter_expression(std::move(filter_expression)) {}
252
+
253
+ /// \brief the expression to filter batches
254
+ ///
255
+ /// The return type of this expression must be boolean
256
+ Expression filter_expression;
257
+ };
258
+
259
+ /// \brief a node which selects a specified subset from the input
260
+ class ARROW_ACERO_EXPORT FetchNodeOptions : public ExecNodeOptions {
261
+ public:
262
+ static constexpr std::string_view kName = "fetch";
263
+ /// \brief create an instance from values
264
+ FetchNodeOptions(int64_t offset, int64_t count) : offset(offset), count(count) {}
265
+ /// \brief the number of rows to skip
266
+ int64_t offset;
267
+ /// \brief the number of rows to keep (not counting skipped rows)
268
+ int64_t count;
269
+ };
270
+
271
+ /// \brief a node which executes expressions on input batches, producing batches
272
+ /// of the same length with new columns.
273
+ ///
274
+ /// Each expression will be evaluated against each batch which is pushed to
275
+ /// this node to produce a corresponding output column.
276
+ ///
277
+ /// If names are not provided, the string representations of exprs will be used.
278
+ class ARROW_ACERO_EXPORT ProjectNodeOptions : public ExecNodeOptions {
279
+ public:
280
+ /// \brief create an instance from values
281
+ explicit ProjectNodeOptions(std::vector<Expression> expressions,
282
+ std::vector<std::string> names = {})
283
+ : expressions(std::move(expressions)), names(std::move(names)) {}
284
+
285
+ /// \brief the expressions to run on the batches
286
+ ///
287
+ /// The output will have one column for each expression. If you wish to keep any of
288
+ /// the columns from the input then you should create a simple field_ref expression
289
+ /// for that column.
290
+ std::vector<Expression> expressions;
291
+ /// \brief the names of the output columns
292
+ ///
293
+ /// If this is not specified then the result of calling ToString on the expression will
294
+ /// be used instead
295
+ ///
296
+ /// This list should either be empty or have the same length as `expressions`
297
+ std::vector<std::string> names;
298
+ };
299
+
300
+ /// \brief a node which aggregates input batches and calculates summary statistics
301
+ ///
302
+ /// The node can summarize the entire input or it can group the input with grouping keys
303
+ /// and segment keys.
304
+ ///
305
+ /// By default, the aggregate node is a pipeline breaker. It must accumulate all input
306
+ /// before any output is produced. Segment keys are a performance optimization. If
307
+ /// you know your input is already partitioned by one or more columns then you can
308
+ /// specify these as segment keys. At each change in the segment keys the node will
309
+ /// emit values for all data seen so far.
310
+ ///
311
+ /// Segment keys are currently limited to single-threaded mode.
312
+ ///
313
+ /// Both keys and segment-keys determine the group. However segment-keys are also used
314
+ /// for determining grouping segments, which should be large, and allow streaming a
315
+ /// partial aggregation result after processing each segment. One common use-case for
316
+ /// segment-keys is ordered aggregation, in which the segment-key attribute specifies a
317
+ /// column with non-decreasing values or a lexicographically-ordered set of such columns.
318
+ ///
319
+ /// If the keys attribute is a non-empty vector, then each aggregate in `aggregates` is
320
+ /// expected to be a HashAggregate function. If the keys attribute is an empty vector,
321
+ /// then each aggregate is assumed to be a ScalarAggregate function.
322
+ ///
323
+ /// If the segment_keys attribute is a non-empty vector, then segmented aggregation, as
324
+ /// described above, applies.
325
+ ///
326
+ /// The keys and segment_keys vectors must be disjoint.
327
+ ///
328
+ /// If no measures are provided then you will simply get the list of unique keys.
329
+ ///
330
+ /// This node outputs segment keys first, followed by regular keys, followed by one
331
+ /// column for each aggregate.
332
+ class ARROW_ACERO_EXPORT AggregateNodeOptions : public ExecNodeOptions {
333
+ public:
334
+ /// \brief create an instance from values
335
+ explicit AggregateNodeOptions(std::vector<Aggregate> aggregates,
336
+ std::vector<FieldRef> keys = {},
337
+ std::vector<FieldRef> segment_keys = {})
338
+ : aggregates(std::move(aggregates)),
339
+ keys(std::move(keys)),
340
+ segment_keys(std::move(segment_keys)) {}
341
+
342
+ // aggregations which will be applied to the targeted fields
343
+ std::vector<Aggregate> aggregates;
344
+ // keys by which aggregations will be grouped (optional)
345
+ std::vector<FieldRef> keys;
346
+ // keys by which aggregations will be segmented (optional)
347
+ std::vector<FieldRef> segment_keys;
348
+ };
349
+
350
+ /// \brief a default value at which backpressure will be applied
351
+ constexpr int32_t kDefaultBackpressureHighBytes = 1 << 30; // 1GiB
352
+ /// \brief a default value at which backpressure will be removed
353
+ constexpr int32_t kDefaultBackpressureLowBytes = 1 << 28; // 256MiB
354
+
355
+ /// \brief an interface that can be queried for backpressure statistics
356
+ class ARROW_ACERO_EXPORT BackpressureMonitor {
357
+ public:
358
+ virtual ~BackpressureMonitor() = default;
359
+ /// \brief fetches the number of bytes currently queued up
360
+ virtual uint64_t bytes_in_use() = 0;
361
+ /// \brief checks to see if backpressure is currently applied
362
+ virtual bool is_paused() = 0;
363
+ };
364
+
365
+ /// \brief Options to control backpressure behavior
366
+ struct ARROW_ACERO_EXPORT BackpressureOptions {
367
+ /// \brief Create default options that perform no backpressure
368
+ BackpressureOptions() : resume_if_below(0), pause_if_above(0) {}
369
+ /// \brief Create options that will perform backpressure
370
+ ///
371
+ /// \param resume_if_below The producer should resume producing if the backpressure
372
+ /// queue has fewer than resume_if_below items.
373
+ /// \param pause_if_above The producer should pause producing if the backpressure
374
+ /// queue has more than pause_if_above items
375
+ BackpressureOptions(uint64_t resume_if_below, uint64_t pause_if_above)
376
+ : resume_if_below(resume_if_below), pause_if_above(pause_if_above) {}
377
+
378
+ /// \brief create an instance using default values for backpressure limits
379
+ static BackpressureOptions DefaultBackpressure() {
380
+ return BackpressureOptions(kDefaultBackpressureLowBytes,
381
+ kDefaultBackpressureHighBytes);
382
+ }
383
+
384
+ /// \brief helper method to determine if backpressure is disabled
385
+ /// \return true if pause_if_above is greater than zero, false otherwise
386
+ bool should_apply_backpressure() const { return pause_if_above > 0; }
387
+
388
+ /// \brief the number of bytes at which the producer should resume producing
389
+ uint64_t resume_if_below;
390
+ /// \brief the number of bytes at which the producer should pause producing
391
+ ///
392
+ /// If this is <= 0 then backpressure will be disabled
393
+ uint64_t pause_if_above;
394
+ };
395
+
396
+ /// \brief a sink node which collects results in a queue
397
+ ///
398
+ /// Emitted batches will only be ordered if there is a meaningful ordering
399
+ /// and sequence_output is not set to false.
400
+ class ARROW_ACERO_EXPORT SinkNodeOptions : public ExecNodeOptions {
401
+ public:
402
+ explicit SinkNodeOptions(std::function<Future<std::optional<ExecBatch>>()>* generator,
403
+ std::shared_ptr<Schema>* schema,
404
+ BackpressureOptions backpressure = {},
405
+ BackpressureMonitor** backpressure_monitor = NULLPTR,
406
+ std::optional<bool> sequence_output = std::nullopt)
407
+ : generator(generator),
408
+ schema(schema),
409
+ backpressure(backpressure),
410
+ backpressure_monitor(backpressure_monitor),
411
+ sequence_output(sequence_output) {}
412
+
413
+ explicit SinkNodeOptions(std::function<Future<std::optional<ExecBatch>>()>* generator,
414
+ BackpressureOptions backpressure = {},
415
+ BackpressureMonitor** backpressure_monitor = NULLPTR,
416
+ std::optional<bool> sequence_output = std::nullopt)
417
+ : generator(generator),
418
+ schema(NULLPTR),
419
+ backpressure(std::move(backpressure)),
420
+ backpressure_monitor(backpressure_monitor),
421
+ sequence_output(sequence_output) {}
422
+
423
+ /// \brief A pointer to a generator of batches.
424
+ ///
425
+ /// This will be set when the node is added to the plan and should be used to consume
426
+ /// data from the plan. If this function is not called frequently enough then the sink
427
+ /// node will start to accumulate data and may apply backpressure.
428
+ std::function<Future<std::optional<ExecBatch>>()>* generator;
429
+ /// \brief A pointer which will be set to the schema of the generated batches
430
+ ///
431
+ /// This is optional, if nullptr is passed in then it will be ignored.
432
+ /// This will be set when the node is added to the plan, before StartProducing is called
433
+ std::shared_ptr<Schema>* schema;
434
+ /// \brief Options to control when to apply backpressure
435
+ ///
436
+ /// This is optional, the default is to never apply backpressure. If the plan is not
437
+ /// consumed quickly enough the system may eventually run out of memory.
438
+ BackpressureOptions backpressure;
439
+ /// \brief A pointer to a backpressure monitor
440
+ ///
441
+ /// This will be set when the node is added to the plan. This can be used to inspect
442
+ /// the amount of data currently queued in the sink node. This is an optional utility
443
+ /// and backpressure can be applied even if this is not used.
444
+ BackpressureMonitor** backpressure_monitor;
445
+ /// \brief Controls whether batches should be emitted immediately or sequenced in order
446
+ ///
447
+ /// \see QueryOptions for more details
448
+ std::optional<bool> sequence_output;
449
+ };
450
+
451
+ /// \brief Control used by a SinkNodeConsumer to pause & resume
452
+ ///
453
+ /// Callers should ensure that they do not call Pause and Resume simultaneously and they
454
+ /// should sequence things so that a call to Pause() is always followed by an eventual
455
+ /// call to Resume()
456
+ class ARROW_ACERO_EXPORT BackpressureControl {
457
+ public:
458
+ virtual ~BackpressureControl() = default;
459
+ /// \brief Ask the input to pause
460
+ ///
461
+ /// This is best effort, batches may continue to arrive
462
+ /// Must eventually be followed by a call to Resume() or deadlock will occur
463
+ virtual void Pause() = 0;
464
+ /// \brief Ask the input to resume
465
+ virtual void Resume() = 0;
466
+ };
467
+
468
+ /// \brief a sink node that consumes the data as part of the plan using callbacks
469
+ class ARROW_ACERO_EXPORT SinkNodeConsumer {
470
+ public:
471
+ virtual ~SinkNodeConsumer() = default;
472
+ /// \brief Prepare any consumer state
473
+ ///
474
+ /// This will be run once the schema is finalized as the plan is starting and
475
+ /// before any calls to Consume. A common use is to save off the schema so that
476
+ /// batches can be interpreted.
477
+ virtual Status Init(const std::shared_ptr<Schema>& schema,
478
+ BackpressureControl* backpressure_control, ExecPlan* plan) = 0;
479
+ /// \brief Consume a batch of data
480
+ virtual Status Consume(ExecBatch batch) = 0;
481
+ /// \brief Signal to the consumer that the last batch has been delivered
482
+ ///
483
+ /// The returned future should only finish when all outstanding tasks have completed
484
+ ///
485
+ /// If the plan is ended early or aborts due to an error then this will not be
486
+ /// called.
487
+ virtual Future<> Finish() = 0;
488
+ };
489
+
490
+ /// \brief Add a sink node which consumes data within the exec plan run
491
+ class ARROW_ACERO_EXPORT ConsumingSinkNodeOptions : public ExecNodeOptions {
492
+ public:
493
+ explicit ConsumingSinkNodeOptions(std::shared_ptr<SinkNodeConsumer> consumer,
494
+ std::vector<std::string> names = {},
495
+ std::optional<bool> sequence_output = std::nullopt)
496
+ : consumer(std::move(consumer)),
497
+ names(std::move(names)),
498
+ sequence_output(sequence_output) {}
499
+
500
+ std::shared_ptr<SinkNodeConsumer> consumer;
501
+ /// \brief Names to rename the sink's schema fields to
502
+ ///
503
+ /// If specified then names must be provided for all fields. Currently, only a flat
504
+ /// schema is supported (see GH-31875).
505
+ ///
506
+ /// If not specified then names will be generated based on the source data.
507
+ std::vector<std::string> names;
508
+ /// \brief Controls whether batches should be emitted immediately or sequenced in order
509
+ ///
510
+ /// \see QueryOptions for more details
511
+ std::optional<bool> sequence_output;
512
+ };
513
+
514
+ /// \brief Make a node which sorts rows passed through it
515
+ ///
516
+ /// All batches pushed to this node will be accumulated, then sorted, by the given
517
+ /// fields. Then sorted batches will be forwarded to the generator in sorted order.
518
+ class ARROW_ACERO_EXPORT OrderBySinkNodeOptions : public SinkNodeOptions {
519
+ public:
520
+ /// \brief create an instance from values
521
+ explicit OrderBySinkNodeOptions(
522
+ SortOptions sort_options,
523
+ std::function<Future<std::optional<ExecBatch>>()>* generator)
524
+ : SinkNodeOptions(generator), sort_options(std::move(sort_options)) {}
525
+
526
+ /// \brief options describing which columns and direction to sort
527
+ SortOptions sort_options;
528
+ };
529
+
530
+ /// \brief Apply a new ordering to data
531
+ ///
532
+ /// Currently this node works by accumulating all data, sorting, and then emitting
533
+ /// the new data with an updated batch index.
534
+ ///
535
+ /// Larger-than-memory sort is not currently supported.
536
+ class ARROW_ACERO_EXPORT OrderByNodeOptions : public ExecNodeOptions {
537
+ public:
538
+ static constexpr std::string_view kName = "order_by";
539
+ explicit OrderByNodeOptions(Ordering ordering) : ordering(std::move(ordering)) {}
540
+
541
+ /// \brief The new ordering to apply to outgoing data
542
+ Ordering ordering;
543
+ };
544
+
545
+ enum class JoinType {
546
+ LEFT_SEMI,
547
+ RIGHT_SEMI,
548
+ LEFT_ANTI,
549
+ RIGHT_ANTI,
550
+ INNER,
551
+ LEFT_OUTER,
552
+ RIGHT_OUTER,
553
+ FULL_OUTER
554
+ };
555
+
556
+ std::string ToString(JoinType t);
557
+
558
+ enum class JoinKeyCmp { EQ, IS };
559
+
560
+ /// \brief a node which implements a join operation using a hash table
561
+ class ARROW_ACERO_EXPORT HashJoinNodeOptions : public ExecNodeOptions {
562
+ public:
563
+ static constexpr const char* default_output_suffix_for_left = "";
564
+ static constexpr const char* default_output_suffix_for_right = "";
565
+ /// \brief create an instance from values that outputs all columns
566
+ HashJoinNodeOptions(
567
+ JoinType in_join_type, std::vector<FieldRef> in_left_keys,
568
+ std::vector<FieldRef> in_right_keys, Expression filter = literal(true),
569
+ std::string output_suffix_for_left = default_output_suffix_for_left,
570
+ std::string output_suffix_for_right = default_output_suffix_for_right,
571
+ bool disable_bloom_filter = false)
572
+ : join_type(in_join_type),
573
+ left_keys(std::move(in_left_keys)),
574
+ right_keys(std::move(in_right_keys)),
575
+ output_all(true),
576
+ output_suffix_for_left(std::move(output_suffix_for_left)),
577
+ output_suffix_for_right(std::move(output_suffix_for_right)),
578
+ filter(std::move(filter)),
579
+ disable_bloom_filter(disable_bloom_filter) {
580
+ this->key_cmp.resize(this->left_keys.size());
581
+ for (size_t i = 0; i < this->left_keys.size(); ++i) {
582
+ this->key_cmp[i] = JoinKeyCmp::EQ;
583
+ }
584
+ }
585
+ /// \brief create an instance from keys
586
+ ///
587
+ /// This will create an inner join that outputs all columns and has no post join filter
588
+ ///
589
+ /// `in_left_keys` should have the same length and types as `in_right_keys`
590
+ /// @param in_left_keys the keys in the left input
591
+ /// @param in_right_keys the keys in the right input
592
+ HashJoinNodeOptions(std::vector<FieldRef> in_left_keys,
593
+ std::vector<FieldRef> in_right_keys)
594
+ : left_keys(std::move(in_left_keys)), right_keys(std::move(in_right_keys)) {
595
+ this->join_type = JoinType::INNER;
596
+ this->output_all = true;
597
+ this->output_suffix_for_left = default_output_suffix_for_left;
598
+ this->output_suffix_for_right = default_output_suffix_for_right;
599
+ this->key_cmp.resize(this->left_keys.size());
600
+ for (size_t i = 0; i < this->left_keys.size(); ++i) {
601
+ this->key_cmp[i] = JoinKeyCmp::EQ;
602
+ }
603
+ this->filter = literal(true);
604
+ }
605
+ /// \brief create an instance from values using JoinKeyCmp::EQ for all comparisons
606
+ HashJoinNodeOptions(
607
+ JoinType join_type, std::vector<FieldRef> left_keys,
608
+ std::vector<FieldRef> right_keys, std::vector<FieldRef> left_output,
609
+ std::vector<FieldRef> right_output, Expression filter = literal(true),
610
+ std::string output_suffix_for_left = default_output_suffix_for_left,
611
+ std::string output_suffix_for_right = default_output_suffix_for_right,
612
+ bool disable_bloom_filter = false)
613
+ : join_type(join_type),
614
+ left_keys(std::move(left_keys)),
615
+ right_keys(std::move(right_keys)),
616
+ output_all(false),
617
+ left_output(std::move(left_output)),
618
+ right_output(std::move(right_output)),
619
+ output_suffix_for_left(std::move(output_suffix_for_left)),
620
+ output_suffix_for_right(std::move(output_suffix_for_right)),
621
+ filter(std::move(filter)),
622
+ disable_bloom_filter(disable_bloom_filter) {
623
+ this->key_cmp.resize(this->left_keys.size());
624
+ for (size_t i = 0; i < this->left_keys.size(); ++i) {
625
+ this->key_cmp[i] = JoinKeyCmp::EQ;
626
+ }
627
+ }
628
+ /// \brief create an instance from values
629
+ HashJoinNodeOptions(
630
+ JoinType join_type, std::vector<FieldRef> left_keys,
631
+ std::vector<FieldRef> right_keys, std::vector<FieldRef> left_output,
632
+ std::vector<FieldRef> right_output, std::vector<JoinKeyCmp> key_cmp,
633
+ Expression filter = literal(true),
634
+ std::string output_suffix_for_left = default_output_suffix_for_left,
635
+ std::string output_suffix_for_right = default_output_suffix_for_right,
636
+ bool disable_bloom_filter = false)
637
+ : join_type(join_type),
638
+ left_keys(std::move(left_keys)),
639
+ right_keys(std::move(right_keys)),
640
+ output_all(false),
641
+ left_output(std::move(left_output)),
642
+ right_output(std::move(right_output)),
643
+ key_cmp(std::move(key_cmp)),
644
+ output_suffix_for_left(std::move(output_suffix_for_left)),
645
+ output_suffix_for_right(std::move(output_suffix_for_right)),
646
+ filter(std::move(filter)),
647
+ disable_bloom_filter(disable_bloom_filter) {}
648
+
649
+ HashJoinNodeOptions() = default;
650
+
651
+ // type of join (inner, left, semi...)
652
+ JoinType join_type = JoinType::INNER;
653
+ // key fields from left input
654
+ std::vector<FieldRef> left_keys;
655
+ // key fields from right input
656
+ std::vector<FieldRef> right_keys;
657
+ // if set all valid fields from both left and right input will be output
658
+ // (and field ref vectors for output fields will be ignored)
659
+ bool output_all = false;
660
+ // output fields passed from left input
661
+ std::vector<FieldRef> left_output;
662
+ // output fields passed from right input
663
+ std::vector<FieldRef> right_output;
664
+ // key comparison function (determines whether a null key is equal another null
665
+ // key or not)
666
+ std::vector<JoinKeyCmp> key_cmp;
667
+ // suffix added to names of output fields coming from left input (used to distinguish,
668
+ // if necessary, between fields of the same name in left and right input and can be left
669
+ // empty if there are no name collisions)
670
+ std::string output_suffix_for_left;
671
+ // suffix added to names of output fields coming from right input
672
+ std::string output_suffix_for_right;
673
+ // residual filter which is applied to matching rows. Rows that do not match
674
+ // the filter are not included. The filter is applied against the
675
+ // concatenated input schema (left fields then right fields) and can reference
676
+ // fields that are not included in the output.
677
+ Expression filter = literal(true);
678
+ // whether or not to disable Bloom filters in this join
679
+ bool disable_bloom_filter = false;
680
+ };
681
+
682
+ /// \brief a node which implements the asof join operation
683
+ ///
684
+ /// Note, this API is experimental and will change in the future
685
+ ///
686
+ /// This node takes one left table and any number of right tables, and asof joins them
687
+ /// together. Batches produced by each input must be ordered by the "on" key.
688
+ /// This node will output one row for each row in the left table.
689
+ class ARROW_ACERO_EXPORT AsofJoinNodeOptions : public ExecNodeOptions {
690
+ public:
691
+ /// \brief Keys for one input table of the AsofJoin operation
692
+ ///
693
+ /// The keys must be consistent across the input tables:
694
+ /// Each "on" key must refer to a field of the same type and units across the tables.
695
+ /// Each "by" key must refer to a list of fields of the same types across the tables.
696
+ struct Keys {
697
+ /// \brief "on" key for the join.
698
+ ///
699
+ /// The input table must be sorted by the "on" key. Must be a single field of a common
700
+ /// type. Inexact match is used on the "on" key. i.e., a row is considered a match iff
701
+ /// left_on - tolerance <= right_on <= left_on.
702
+ /// Currently, the "on" key must be of an integer, date, or timestamp type.
703
+ FieldRef on_key;
704
+ /// \brief "by" key for the join.
705
+ ///
706
+ /// Each input table must have each field of the "by" key. Exact equality is used for
707
+ /// each field of the "by" key.
708
+ /// Currently, each field of the "by" key must be of an integer, date, timestamp, or
709
+ /// base-binary type.
710
+ std::vector<FieldRef> by_key;
711
+ };
712
+
713
+ AsofJoinNodeOptions(std::vector<Keys> input_keys, int64_t tolerance)
714
+ : input_keys(std::move(input_keys)), tolerance(tolerance) {}
715
+
716
+ /// \brief AsofJoin keys per input table. At least two keys must be given. The first key
717
+ /// corresponds to a left table and all other keys correspond to right tables for the
718
+ /// as-of-join.
719
+ ///
720
+ /// \see `Keys` for details.
721
+ std::vector<Keys> input_keys;
722
+ /// \brief Tolerance for inexact "on" key matching. A right row is considered a match
723
+ /// with the left row if `right.on - left.on <= tolerance`. The `tolerance` may be:
724
+ /// - negative, in which case a past-as-of-join occurs;
725
+ /// - or positive, in which case a future-as-of-join occurs;
726
+ /// - or zero, in which case an exact-as-of-join occurs.
727
+ ///
728
+ /// The tolerance is interpreted in the same units as the "on" key.
729
+ int64_t tolerance;
730
+ };
731
+
732
+ /// \brief a node which select top_k/bottom_k rows passed through it
733
+ ///
734
+ /// All batches pushed to this node will be accumulated, then selected, by the given
735
+ /// fields. Then sorted batches will be forwarded to the generator in sorted order.
736
+ class ARROW_ACERO_EXPORT SelectKSinkNodeOptions : public SinkNodeOptions {
737
+ public:
738
+ explicit SelectKSinkNodeOptions(
739
+ SelectKOptions select_k_options,
740
+ std::function<Future<std::optional<ExecBatch>>()>* generator)
741
+ : SinkNodeOptions(generator), select_k_options(std::move(select_k_options)) {}
742
+
743
+ /// SelectK options
744
+ SelectKOptions select_k_options;
745
+ };
746
+
747
+ /// \brief a sink node which accumulates all output into a table
748
+ class ARROW_ACERO_EXPORT TableSinkNodeOptions : public ExecNodeOptions {
749
+ public:
750
+ /// \brief create an instance from values
751
+ explicit TableSinkNodeOptions(std::shared_ptr<Table>* output_table,
752
+ std::optional<bool> sequence_output = std::nullopt)
753
+ : output_table(output_table), sequence_output(sequence_output) {}
754
+
755
+ /// \brief an "out parameter" specifying the table that will be created
756
+ ///
757
+ /// Must not be null and remain valid for the entirety of the plan execution. After the
758
+ /// plan has completed this will be set to point to the result table
759
+ std::shared_ptr<Table>* output_table;
760
+ /// \brief Controls whether batches should be emitted immediately or sequenced in order
761
+ ///
762
+ /// \see QueryOptions for more details
763
+ std::optional<bool> sequence_output;
764
+ /// \brief Custom names to use for the columns.
765
+ ///
766
+ /// If specified then names must be provided for all fields. Currently, only a flat
767
+ /// schema is supported (see GH-31875).
768
+ ///
769
+ /// If not specified then names will be generated based on the source data.
770
+ std::vector<std::string> names;
771
+ };
772
+
773
+ /// \brief a row template that describes one row that will be generated for each input row
774
+ struct ARROW_ACERO_EXPORT PivotLongerRowTemplate {
775
+ PivotLongerRowTemplate(std::vector<std::string> feature_values,
776
+ std::vector<std::optional<FieldRef>> measurement_values)
777
+ : feature_values(std::move(feature_values)),
778
+ measurement_values(std::move(measurement_values)) {}
779
+ /// A (typically unique) set of feature values for the template, usually derived from a
780
+ /// column name
781
+ ///
782
+ /// These will be used to populate the feature columns
783
+ std::vector<std::string> feature_values;
784
+ /// The fields containing the measurements to use for this row
785
+ ///
786
+ /// These will be used to populate the measurement columns. If nullopt then nulls
787
+ /// will be inserted for the given value.
788
+ std::vector<std::optional<FieldRef>> measurement_values;
789
+ };
790
+
791
+ /// \brief Reshape a table by turning some columns into additional rows
792
+ ///
793
+ /// This operation is sometimes also referred to as UNPIVOT
794
+ ///
795
+ /// This is typically done when there are multiple observations in each row in order to
796
+ /// transform to a table containing a single observation per row.
797
+ ///
798
+ /// For example:
799
+ ///
800
+ /// | time | left_temp | right_temp |
801
+ /// | ---- | --------- | ---------- |
802
+ /// | 1 | 10 | 20 |
803
+ /// | 2 | 15 | 18 |
804
+ ///
805
+ /// The above table contains two observations per row. There is an implicit feature
806
+ /// "location" (left vs right) and a measurement "temp". What we really want is:
807
+ ///
808
+ /// | time | location | temp |
809
+ /// | --- | --- | --- |
810
+ /// | 1 | left | 10 |
811
+ /// | 1 | right | 20 |
812
+ /// | 2 | left | 15 |
813
+ /// | 2 | right | 18 |
814
+ ///
815
+ /// For a more complex example consider:
816
+ ///
817
+ /// | time | ax1 | ay1 | bx1 | ay2 |
818
+ /// | ---- | --- | --- | --- | --- |
819
+ /// | 0 | 1 | 2 | 3 | 4 |
820
+ ///
821
+ /// We can pretend a vs b and x vs y are features while 1 and 2 are two different
822
+ /// kinds of measurements. We thus want to pivot to
823
+ ///
824
+ /// | time | a/b | x/y | f1 | f2 |
825
+ /// | ---- | --- | --- | ---- | ---- |
826
+ /// | 0 | a | x | 1 | null |
827
+ /// | 0 | a | y | 2 | 4 |
828
+ /// | 0 | b | x | 3 | null |
829
+ ///
830
+ /// To do this we create a row template for each combination of features. One should
831
+ /// be able to do this purely by looking at the column names. For example, given the
832
+ /// above columns "ax1", "ay1", "bx1", and "ay2" we know we have three feature
833
+ /// combinations (a, x), (a, y), and (b, x). Similarly, we know we have two possible
834
+ /// measurements, "1" and "2".
835
+ ///
836
+ /// For each combination of features we create a row template. In each row template we
837
+ /// describe the combination and then list which columns to use for the measurements.
838
+ /// If a measurement doesn't exist for a given combination then we use nullopt.
839
+ ///
840
+ /// So, for our above example, we have:
841
+ ///
842
+ /// (a, x): names={"a", "x"}, values={"ax1", nullopt}
843
+ /// (a, y): names={"a", "y"}, values={"ay1", "ay2"}
844
+ /// (b, x): names={"b", "x"}, values={"bx1", nullopt}
845
+ ///
846
+ /// Finishing it off we name our new columns:
847
+ /// feature_field_names={"a/b","x/y"}
848
+ /// measurement_field_names={"f1", "f2"}
849
+ class ARROW_ACERO_EXPORT PivotLongerNodeOptions : public ExecNodeOptions {
850
+ public:
851
+ static constexpr std::string_view kName = "pivot_longer";
852
+ /// One or more row templates to create new output rows
853
+ ///
854
+ /// Normally there are at least two row templates. The output # of rows
855
+ /// will be the input # of rows * the number of row templates
856
+ std::vector<PivotLongerRowTemplate> row_templates;
857
+ /// The names of the columns which describe the new features
858
+ std::vector<std::string> feature_field_names;
859
+ /// The names of the columns which represent the measurements
860
+ std::vector<std::string> measurement_field_names;
861
+ };
862
+
863
+ /// @}
864
+
865
+ } // namespace acero
866
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/order_by_impl.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+ #include <memory>
22
+ #include <vector>
23
+
24
+ #include "arrow/acero/options.h"
25
+ #include "arrow/record_batch.h"
26
+ #include "arrow/result.h"
27
+ #include "arrow/status.h"
28
+ #include "arrow/type.h"
29
+
30
+ namespace arrow {
31
+
32
+ using compute::ExecContext;
33
+
34
+ namespace acero {
35
+
36
+ class OrderByImpl {
37
+ public:
38
+ virtual ~OrderByImpl() = default;
39
+
40
+ virtual void InputReceived(const std::shared_ptr<RecordBatch>& batch) = 0;
41
+
42
+ virtual Result<Datum> DoFinish() = 0;
43
+
44
+ virtual std::string ToString() const = 0;
45
+
46
+ static Result<std::unique_ptr<OrderByImpl>> MakeSort(
47
+ ExecContext* ctx, const std::shared_ptr<Schema>& output_schema,
48
+ const SortOptions& options);
49
+
50
+ static Result<std::unique_ptr<OrderByImpl>> MakeSelectK(
51
+ ExecContext* ctx, const std::shared_ptr<Schema>& output_schema,
52
+ const SelectKOptions& options);
53
+ };
54
+
55
+ } // namespace acero
56
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/partition_util.h ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <atomic>
21
+ #include <cassert>
22
+ #include <cstdint>
23
+ #include <functional>
24
+ #include <random>
25
+ #include "arrow/acero/util.h"
26
+ #include "arrow/buffer.h"
27
+ #include "arrow/util/pcg_random.h"
28
+
29
+ namespace arrow {
30
+ namespace acero {
31
+
32
+ class PartitionSort {
33
+ public:
34
+ /// \brief Bucket sort rows on partition ids in O(num_rows) time.
35
+ ///
36
+ /// Include in the output exclusive cumulative sum of bucket sizes.
37
+ /// This corresponds to ranges in the sorted array containing all row ids for
38
+ /// each of the partitions.
39
+ ///
40
+ /// prtn_ranges must be initialized and have at least prtn_ranges + 1 elements
41
+ /// when this method returns prtn_ranges[i] will contains the total number of
42
+ /// elements in partitions 0 through i. prtn_ranges[0] will be 0.
43
+ ///
44
+ /// prtn_id_impl must be a function that takes in a row id (int) and returns
45
+ /// a partition id (int). The returned partition id must be between 0 and
46
+ /// num_prtns (exclusive).
47
+ ///
48
+ /// output_pos_impl is a function that takes in a row id (int) and a position (int)
49
+ /// in the bucket sorted output. The function should insert the row in the
50
+ /// output.
51
+ ///
52
+ /// For example:
53
+ ///
54
+ /// in_arr: [5, 7, 2, 3, 5, 4]
55
+ /// num_prtns: 3
56
+ /// prtn_id_impl: [&in_arr] (int row_id) { return in_arr[row_id] / 3; }
57
+ /// output_pos_impl: [&out_arr] (int row_id, int pos) { out_arr[pos] = row_id; }
58
+ ///
59
+ /// After Execution
60
+ /// out_arr: [2, 5, 3, 5, 4, 7]
61
+ /// prtn_ranges: [0, 1, 5, 6]
62
+ template <class INPUT_PRTN_ID_FN, class OUTPUT_POS_FN>
63
+ static void Eval(int64_t num_rows, int num_prtns, uint16_t* prtn_ranges,
64
+ INPUT_PRTN_ID_FN prtn_id_impl, OUTPUT_POS_FN output_pos_impl) {
65
+ ARROW_DCHECK(num_rows > 0 && num_rows <= (1 << 15));
66
+ ARROW_DCHECK(num_prtns >= 1 && num_prtns <= (1 << 15));
67
+
68
+ memset(prtn_ranges, 0, (num_prtns + 1) * sizeof(uint16_t));
69
+
70
+ for (int64_t i = 0; i < num_rows; ++i) {
71
+ int prtn_id = static_cast<int>(prtn_id_impl(i));
72
+ ++prtn_ranges[prtn_id + 1];
73
+ }
74
+
75
+ uint16_t sum = 0;
76
+ for (int i = 0; i < num_prtns; ++i) {
77
+ uint16_t sum_next = sum + prtn_ranges[i + 1];
78
+ prtn_ranges[i + 1] = sum;
79
+ sum = sum_next;
80
+ }
81
+
82
+ for (int64_t i = 0; i < num_rows; ++i) {
83
+ int prtn_id = static_cast<int>(prtn_id_impl(i));
84
+ int pos = prtn_ranges[prtn_id + 1]++;
85
+ output_pos_impl(i, pos);
86
+ }
87
+ }
88
+ };
89
+
90
+ /// \brief A control for synchronizing threads on a partitionable workload
91
+ class PartitionLocks {
92
+ public:
93
+ PartitionLocks();
94
+ ~PartitionLocks();
95
+ /// \brief Initializes the control, must be called before use
96
+ ///
97
+ /// \param num_threads Maximum number of threads that will access the partitions
98
+ /// \param num_prtns Number of partitions to synchronize
99
+ void Init(size_t num_threads, int num_prtns);
100
+ /// \brief Cleans up the control, it should not be used after this call
101
+ void CleanUp();
102
+ /// \brief Acquire a partition to work on one
103
+ ///
104
+ /// \param thread_id The index of the thread trying to acquire the partition lock
105
+ /// \param num_prtns Length of prtns_to_try, must be <= num_prtns used in Init
106
+ /// \param prtns_to_try An array of partitions that still have remaining work
107
+ /// \param limit_retries If false, this method will spinwait forever until success
108
+ /// \param max_retries Max times to attempt checking out work before returning false
109
+ /// \param[out] locked_prtn_id The id of the partition locked
110
+ /// \param[out] locked_prtn_id_pos The index of the partition locked in prtns_to_try
111
+ /// \return True if a partition was locked, false if max_retries was attempted
112
+ /// without successfully acquiring a lock
113
+ ///
114
+ /// This method is thread safe
115
+ bool AcquirePartitionLock(size_t thread_id, int num_prtns, const int* prtns_to_try,
116
+ bool limit_retries, int max_retries, int* locked_prtn_id,
117
+ int* locked_prtn_id_pos);
118
+ /// \brief Release a partition so that other threads can work on it
119
+ void ReleasePartitionLock(int prtn_id);
120
+
121
+ // Executes (synchronously and using current thread) the same operation on a set of
122
+ // multiple partitions. Tries to minimize partition locking overhead by randomizing and
123
+ // adjusting order in which partitions are processed.
124
+ //
125
+ // PROCESS_PRTN_FN is a callback which will be executed for each partition after
126
+ // acquiring the lock for that partition. It gets partition id as an argument.
127
+ // IS_PRTN_EMPTY_FN is a callback which filters out (when returning true) partitions
128
+ // with specific ids from processing.
129
+ //
130
+ template <typename IS_PRTN_EMPTY_FN, typename PROCESS_PRTN_FN>
131
+ Status ForEachPartition(size_t thread_id,
132
+ /*scratch space buffer with space for one element per partition;
133
+ dirty in and dirty out*/
134
+ int* temp_unprocessed_prtns, IS_PRTN_EMPTY_FN is_prtn_empty_fn,
135
+ PROCESS_PRTN_FN process_prtn_fn) {
136
+ int num_unprocessed_partitions = 0;
137
+ for (int i = 0; i < num_prtns_; ++i) {
138
+ bool is_prtn_empty = is_prtn_empty_fn(i);
139
+ if (!is_prtn_empty) {
140
+ temp_unprocessed_prtns[num_unprocessed_partitions++] = i;
141
+ }
142
+ }
143
+ while (num_unprocessed_partitions > 0) {
144
+ int locked_prtn_id;
145
+ int locked_prtn_id_pos;
146
+ AcquirePartitionLock(thread_id, num_unprocessed_partitions, temp_unprocessed_prtns,
147
+ /*limit_retries=*/false, /*max_retries=*/-1, &locked_prtn_id,
148
+ &locked_prtn_id_pos);
149
+ {
150
+ class AutoReleaseLock {
151
+ public:
152
+ AutoReleaseLock(PartitionLocks* locks, int prtn_id)
153
+ : locks(locks), prtn_id(prtn_id) {}
154
+ ~AutoReleaseLock() { locks->ReleasePartitionLock(prtn_id); }
155
+ PartitionLocks* locks;
156
+ int prtn_id;
157
+ } auto_release_lock(this, locked_prtn_id);
158
+ ARROW_RETURN_NOT_OK(process_prtn_fn(locked_prtn_id));
159
+ }
160
+ if (locked_prtn_id_pos < num_unprocessed_partitions - 1) {
161
+ temp_unprocessed_prtns[locked_prtn_id_pos] =
162
+ temp_unprocessed_prtns[num_unprocessed_partitions - 1];
163
+ }
164
+ --num_unprocessed_partitions;
165
+ }
166
+ return Status::OK();
167
+ }
168
+
169
+ private:
170
+ std::atomic<bool>* lock_ptr(int prtn_id);
171
+ int random_int(size_t thread_id, int num_values);
172
+
173
+ struct PartitionLock {
174
+ static constexpr int kCacheLineBytes = 64;
175
+ std::atomic<bool> lock;
176
+ uint8_t padding[kCacheLineBytes];
177
+ };
178
+ int num_prtns_;
179
+ std::unique_ptr<PartitionLock[]> locks_;
180
+ std::unique_ptr<arrow::random::pcg32_fast[]> rngs_;
181
+ };
182
+
183
+ } // namespace acero
184
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/pch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Often-used headers, for precompiling.
19
+ // If updating this header, please make sure you check compilation speed
20
+ // before checking in. Adding headers which are not used extremely often
21
+ // may incur a slowdown, since it makes the precompiled header heavier to load.
22
+
23
+ #include "arrow/pch.h"
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/query_context.h ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+ #pragma once
18
+
19
+ #include <string_view>
20
+
21
+ #include "arrow/acero/exec_plan.h"
22
+ #include "arrow/acero/task_util.h"
23
+ #include "arrow/acero/util.h"
24
+ #include "arrow/compute/exec.h"
25
+ #include "arrow/io/interfaces.h"
26
+ #include "arrow/util/async_util.h"
27
+ #include "arrow/util/type_fwd.h"
28
+
29
+ namespace arrow {
30
+
31
+ using compute::default_exec_context;
32
+ using io::IOContext;
33
+
34
+ namespace acero {
35
+
36
+ class ARROW_ACERO_EXPORT QueryContext {
37
+ public:
38
+ QueryContext(QueryOptions opts = {},
39
+ ExecContext exec_context = *default_exec_context());
40
+
41
+ Status Init(size_t max_num_threads, arrow::util::AsyncTaskScheduler* scheduler);
42
+
43
+ const ::arrow::internal::CpuInfo* cpu_info() const;
44
+ int64_t hardware_flags() const;
45
+ const QueryOptions& options() const { return options_; }
46
+ MemoryPool* memory_pool() const { return exec_context_.memory_pool(); }
47
+ ::arrow::internal::Executor* executor() const { return exec_context_.executor(); }
48
+ ExecContext* exec_context() { return &exec_context_; }
49
+ IOContext* io_context() { return &io_context_; }
50
+ TaskScheduler* scheduler() { return task_scheduler_.get(); }
51
+ arrow::util::AsyncTaskScheduler* async_scheduler() { return async_scheduler_; }
52
+
53
+ size_t GetThreadIndex();
54
+ size_t max_concurrency() const;
55
+ Result<arrow::util::TempVectorStack*> GetTempStack(size_t thread_index);
56
+
57
+ /// \brief Start an external task
58
+ ///
59
+ /// This should be avoided if possible. It is kept in for now for legacy
60
+ /// purposes. This should be called before the external task is started. If
61
+ /// a valid future is returned then it should be marked complete when the
62
+ /// external task has finished.
63
+ ///
64
+ /// \param name A name to give the task for traceability and debugging
65
+ ///
66
+ /// \return an invalid future if the plan has already ended, otherwise this
67
+ /// returns a future that must be completed when the external task
68
+ /// finishes.
69
+ Result<Future<>> BeginExternalTask(std::string_view name);
70
+
71
+ /// \brief Add a single function as a task to the query's task group
72
+ /// on the compute threadpool.
73
+ ///
74
+ /// \param fn The task to run. Takes no arguments and returns a Status.
75
+ /// \param name A name to give the task for traceability and debugging
76
+ void ScheduleTask(std::function<Status()> fn, std::string_view name);
77
+ /// \brief Add a single function as a task to the query's task group
78
+ /// on the compute threadpool.
79
+ ///
80
+ /// \param fn The task to run. Takes the thread index and returns a Status.
81
+ /// \param name A name to give the task for traceability and debugging
82
+ void ScheduleTask(std::function<Status(size_t)> fn, std::string_view name);
83
+ /// \brief Add a single function as a task to the query's task group on
84
+ /// the IO thread pool
85
+ ///
86
+ /// \param fn The task to run. Returns a status.
87
+ /// \param name A name to give the task for traceability and debugging
88
+ void ScheduleIOTask(std::function<Status()> fn, std::string_view name);
89
+
90
+ // Register/Start TaskGroup is a way of performing a "Parallel For" pattern:
91
+ // - The task function takes the thread index and the index of the task
92
+ // - The on_finished function takes the thread index
93
+ // Returns an integer ID that will be used to reference the task group in
94
+ // StartTaskGroup. At runtime, call StartTaskGroup with the ID and the number of times
95
+ // you'd like the task to be executed. The need to register a task group before use will
96
+ // be removed after we rewrite the scheduler.
97
+ /// \brief Register a "parallel for" task group with the scheduler
98
+ ///
99
+ /// \param task The function implementing the task. Takes the thread_index and
100
+ /// the task index.
101
+ /// \param on_finished The function that gets run once all tasks have been completed.
102
+ /// Takes the thread_index.
103
+ ///
104
+ /// Must be called inside of ExecNode::Init.
105
+ int RegisterTaskGroup(std::function<Status(size_t, int64_t)> task,
106
+ std::function<Status(size_t)> on_finished);
107
+
108
+ /// \brief Start the task group with the specified ID. This can only
109
+ /// be called once per task_group_id.
110
+ ///
111
+ /// \param task_group_id The ID of the task group to run
112
+ /// \param num_tasks The number of times to run the task
113
+ Status StartTaskGroup(int task_group_id, int64_t num_tasks);
114
+
115
+ // This is an RAII class for keeping track of in-flight file IO. Useful for getting
116
+ // an estimate of memory use, and how much memory we expect to be freed soon.
117
+ // Returned by ReportTempFileIO.
118
+ struct [[nodiscard]] TempFileIOMark {
119
+ QueryContext* ctx_;
120
+ size_t bytes_;
121
+
122
+ TempFileIOMark(QueryContext* ctx, size_t bytes) : ctx_(ctx), bytes_(bytes) {
123
+ ctx_->in_flight_bytes_to_disk_.fetch_add(bytes_, std::memory_order_acquire);
124
+ }
125
+
126
+ ARROW_DISALLOW_COPY_AND_ASSIGN(TempFileIOMark);
127
+
128
+ ~TempFileIOMark() {
129
+ ctx_->in_flight_bytes_to_disk_.fetch_sub(bytes_, std::memory_order_release);
130
+ }
131
+ };
132
+
133
+ TempFileIOMark ReportTempFileIO(size_t bytes) { return {this, bytes}; }
134
+
135
+ size_t GetCurrentTempFileIO() { return in_flight_bytes_to_disk_.load(); }
136
+
137
+ private:
138
+ QueryOptions options_;
139
+ // To be replaced with Acero-specific context once scheduler is done and
140
+ // we don't need ExecContext for kernels
141
+ ExecContext exec_context_;
142
+ IOContext io_context_;
143
+
144
+ arrow::util::AsyncTaskScheduler* async_scheduler_ = NULLPTR;
145
+ std::unique_ptr<TaskScheduler> task_scheduler_ = TaskScheduler::Make();
146
+
147
+ ThreadIndexer thread_indexer_;
148
+ struct ThreadLocalData {
149
+ bool is_init = false;
150
+ arrow::util::TempVectorStack stack;
151
+ };
152
+ std::vector<ThreadLocalData> tld_;
153
+
154
+ std::atomic<size_t> in_flight_bytes_to_disk_{0};
155
+ };
156
+ } // namespace acero
157
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/schema_util.h ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cassert>
21
+ #include <cstdint>
22
+ #include <memory>
23
+ #include <string>
24
+ #include <vector>
25
+
26
+ #include "arrow/type.h" // for DataType, FieldRef, Field and Schema
27
+
28
+ namespace arrow {
29
+
30
+ using internal::checked_cast;
31
+
32
+ namespace acero {
33
+
34
+ // Identifiers for all different row schemas that are used in a join
35
+ //
36
+ enum class HashJoinProjection : int {
37
+ INPUT = 0,
38
+ KEY = 1,
39
+ PAYLOAD = 2,
40
+ FILTER = 3,
41
+ OUTPUT = 4
42
+ };
43
+
44
+ struct SchemaProjectionMap {
45
+ static constexpr int kMissingField = -1;
46
+ int num_cols;
47
+ const int* source_to_base;
48
+ const int* base_to_target;
49
+ inline int get(int i) const {
50
+ assert(i >= 0 && i < num_cols);
51
+ assert(source_to_base[i] != kMissingField);
52
+ return base_to_target[source_to_base[i]];
53
+ }
54
+ };
55
+
56
+ /// Helper class for managing different projections of the same row schema.
57
+ /// Used to efficiently map any field in one projection to a corresponding field in
58
+ /// another projection.
59
+ /// Materialized mappings are generated lazily at the time of the first access.
60
+ /// Thread-safe apart from initialization.
61
+ template <typename ProjectionIdEnum>
62
+ class SchemaProjectionMaps {
63
+ public:
64
+ static constexpr int kMissingField = -1;
65
+
66
+ Status Init(ProjectionIdEnum full_schema_handle, const Schema& schema,
67
+ const std::vector<ProjectionIdEnum>& projection_handles,
68
+ const std::vector<const std::vector<FieldRef>*>& projections) {
69
+ assert(projection_handles.size() == projections.size());
70
+ ARROW_RETURN_NOT_OK(RegisterSchema(full_schema_handle, schema));
71
+ for (size_t i = 0; i < projections.size(); ++i) {
72
+ ARROW_RETURN_NOT_OK(
73
+ RegisterProjectedSchema(projection_handles[i], *(projections[i]), schema));
74
+ }
75
+ RegisterEnd();
76
+ return Status::OK();
77
+ }
78
+
79
+ int num_cols(ProjectionIdEnum schema_handle) const {
80
+ int id = schema_id(schema_handle);
81
+ return static_cast<int>(schemas_[id].second.data_types.size());
82
+ }
83
+
84
+ bool is_empty(ProjectionIdEnum schema_handle) const {
85
+ return num_cols(schema_handle) == 0;
86
+ }
87
+
88
+ const std::string& field_name(ProjectionIdEnum schema_handle, int field_id) const {
89
+ int id = schema_id(schema_handle);
90
+ return schemas_[id].second.field_names[field_id];
91
+ }
92
+
93
+ const std::shared_ptr<DataType>& data_type(ProjectionIdEnum schema_handle,
94
+ int field_id) const {
95
+ int id = schema_id(schema_handle);
96
+ return schemas_[id].second.data_types[field_id];
97
+ }
98
+
99
+ const std::vector<std::shared_ptr<DataType>>& data_types(
100
+ ProjectionIdEnum schema_handle) const {
101
+ int id = schema_id(schema_handle);
102
+ return schemas_[id].second.data_types;
103
+ }
104
+
105
+ SchemaProjectionMap map(ProjectionIdEnum from, ProjectionIdEnum to) const {
106
+ int id_from = schema_id(from);
107
+ int id_to = schema_id(to);
108
+ SchemaProjectionMap result;
109
+ result.num_cols = num_cols(from);
110
+ result.source_to_base = mappings_[id_from].data();
111
+ result.base_to_target = inverse_mappings_[id_to].data();
112
+ return result;
113
+ }
114
+
115
+ protected:
116
+ struct FieldInfos {
117
+ std::vector<int> field_paths;
118
+ std::vector<std::string> field_names;
119
+ std::vector<std::shared_ptr<DataType>> data_types;
120
+ };
121
+
122
+ Status RegisterSchema(ProjectionIdEnum handle, const Schema& schema) {
123
+ FieldInfos out_fields;
124
+ const FieldVector& in_fields = schema.fields();
125
+ out_fields.field_paths.resize(in_fields.size());
126
+ out_fields.field_names.resize(in_fields.size());
127
+ out_fields.data_types.resize(in_fields.size());
128
+ for (size_t i = 0; i < in_fields.size(); ++i) {
129
+ const std::string& name = in_fields[i]->name();
130
+ const std::shared_ptr<DataType>& type = in_fields[i]->type();
131
+ out_fields.field_paths[i] = static_cast<int>(i);
132
+ out_fields.field_names[i] = name;
133
+ out_fields.data_types[i] = type;
134
+ }
135
+ schemas_.push_back(std::make_pair(handle, out_fields));
136
+ return Status::OK();
137
+ }
138
+
139
+ Status RegisterProjectedSchema(ProjectionIdEnum handle,
140
+ const std::vector<FieldRef>& selected_fields,
141
+ const Schema& full_schema) {
142
+ FieldInfos out_fields;
143
+ const FieldVector& in_fields = full_schema.fields();
144
+ out_fields.field_paths.resize(selected_fields.size());
145
+ out_fields.field_names.resize(selected_fields.size());
146
+ out_fields.data_types.resize(selected_fields.size());
147
+ for (size_t i = 0; i < selected_fields.size(); ++i) {
148
+ // All fields must be found in schema without ambiguity
149
+ ARROW_ASSIGN_OR_RAISE(auto match, selected_fields[i].FindOne(full_schema));
150
+ const std::string& name = in_fields[match[0]]->name();
151
+ const std::shared_ptr<DataType>& type = in_fields[match[0]]->type();
152
+ out_fields.field_paths[i] = match[0];
153
+ out_fields.field_names[i] = name;
154
+ out_fields.data_types[i] = type;
155
+ }
156
+ schemas_.push_back(std::make_pair(handle, out_fields));
157
+ return Status::OK();
158
+ }
159
+
160
+ void RegisterEnd() {
161
+ size_t size = schemas_.size();
162
+ mappings_.resize(size);
163
+ inverse_mappings_.resize(size);
164
+ int id_base = 0;
165
+ for (size_t i = 0; i < size; ++i) {
166
+ GenerateMapForProjection(static_cast<int>(i), id_base);
167
+ }
168
+ }
169
+
170
+ int schema_id(ProjectionIdEnum schema_handle) const {
171
+ for (size_t i = 0; i < schemas_.size(); ++i) {
172
+ if (schemas_[i].first == schema_handle) {
173
+ return static_cast<int>(i);
174
+ }
175
+ }
176
+ // We should never get here
177
+ assert(false);
178
+ return -1;
179
+ }
180
+
181
+ void GenerateMapForProjection(int id_proj, int id_base) {
182
+ int num_cols_proj = static_cast<int>(schemas_[id_proj].second.data_types.size());
183
+ int num_cols_base = static_cast<int>(schemas_[id_base].second.data_types.size());
184
+
185
+ std::vector<int>& mapping = mappings_[id_proj];
186
+ std::vector<int>& inverse_mapping = inverse_mappings_[id_proj];
187
+ mapping.resize(num_cols_proj);
188
+ inverse_mapping.resize(num_cols_base);
189
+
190
+ if (id_proj == id_base) {
191
+ for (int i = 0; i < num_cols_base; ++i) {
192
+ mapping[i] = inverse_mapping[i] = i;
193
+ }
194
+ } else {
195
+ const FieldInfos& fields_proj = schemas_[id_proj].second;
196
+ const FieldInfos& fields_base = schemas_[id_base].second;
197
+ for (int i = 0; i < num_cols_base; ++i) {
198
+ inverse_mapping[i] = SchemaProjectionMap::kMissingField;
199
+ }
200
+ for (int i = 0; i < num_cols_proj; ++i) {
201
+ int field_id = SchemaProjectionMap::kMissingField;
202
+ for (int j = 0; j < num_cols_base; ++j) {
203
+ if (fields_proj.field_paths[i] == fields_base.field_paths[j]) {
204
+ field_id = j;
205
+ // If there are multiple matches for the same input field,
206
+ // it will be mapped to the first match.
207
+ break;
208
+ }
209
+ }
210
+ assert(field_id != SchemaProjectionMap::kMissingField);
211
+ mapping[i] = field_id;
212
+ inverse_mapping[field_id] = i;
213
+ }
214
+ }
215
+ }
216
+
217
+ // vector used as a mapping from ProjectionIdEnum to fields
218
+ std::vector<std::pair<ProjectionIdEnum, FieldInfos>> schemas_;
219
+ std::vector<std::vector<int>> mappings_;
220
+ std::vector<std::vector<int>> inverse_mappings_;
221
+ };
222
+
223
+ using HashJoinProjectionMaps = SchemaProjectionMaps<HashJoinProjection>;
224
+
225
+ } // namespace acero
226
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/task_util.h ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <atomic>
21
+ #include <cstdint>
22
+ #include <functional>
23
+ #include <vector>
24
+
25
+ #include "arrow/acero/visibility.h"
26
+ #include "arrow/status.h"
27
+ #include "arrow/util/config.h"
28
+ #include "arrow/util/logging.h"
29
+
30
+ namespace arrow {
31
+ namespace acero {
32
+
33
+ // Atomic value surrounded by padding bytes to avoid cache line invalidation
34
+ // whenever it is modified by a concurrent thread on a different CPU core.
35
+ //
36
+ template <typename T>
37
+ class AtomicWithPadding {
38
+ private:
39
+ static constexpr int kCacheLineSize = 64;
40
+ uint8_t padding_before[kCacheLineSize];
41
+
42
+ public:
43
+ std::atomic<T> value;
44
+
45
+ private:
46
+ uint8_t padding_after[kCacheLineSize];
47
+ };
48
+
49
+ // Used for asynchronous execution of operations that can be broken into
50
+ // a fixed number of symmetric tasks that can be executed concurrently.
51
+ //
52
+ // Implements priorities between multiple such operations, called task groups.
53
+ //
54
+ // Allows to specify the maximum number of in-flight tasks at any moment.
55
+ //
56
+ // Also allows for executing next pending tasks immediately using a caller thread.
57
+ //
58
+ class ARROW_ACERO_EXPORT TaskScheduler {
59
+ public:
60
+ using TaskImpl = std::function<Status(size_t, int64_t)>;
61
+ using TaskGroupContinuationImpl = std::function<Status(size_t)>;
62
+ using ScheduleImpl = std::function<Status(TaskGroupContinuationImpl)>;
63
+ using AbortContinuationImpl = std::function<void()>;
64
+
65
+ virtual ~TaskScheduler() = default;
66
+
67
+ // Order in which task groups are registered represents priorities of their tasks
68
+ // (the first group has the highest priority).
69
+ //
70
+ // Returns task group identifier that is used to request operations on the task group.
71
+ virtual int RegisterTaskGroup(TaskImpl task_impl,
72
+ TaskGroupContinuationImpl cont_impl) = 0;
73
+
74
+ virtual void RegisterEnd() = 0;
75
+
76
+ // total_num_tasks may be zero, in which case task group continuation will be executed
77
+ // immediately
78
+ virtual Status StartTaskGroup(size_t thread_id, int group_id,
79
+ int64_t total_num_tasks) = 0;
80
+
81
+ // Execute given number of tasks immediately using caller thread
82
+ virtual Status ExecuteMore(size_t thread_id, int num_tasks_to_execute,
83
+ bool execute_all) = 0;
84
+
85
+ // Begin scheduling tasks using provided callback and
86
+ // the limit on the number of in-flight tasks at any moment.
87
+ //
88
+ // Scheduling will continue as long as there are waiting tasks.
89
+ //
90
+ // It will automatically resume whenever new task group gets started.
91
+ virtual Status StartScheduling(size_t thread_id, ScheduleImpl schedule_impl,
92
+ int num_concurrent_tasks, bool use_sync_execution) = 0;
93
+
94
+ // Abort scheduling and execution.
95
+ // Used in case of being notified about unrecoverable error for the entire query.
96
+ virtual void Abort(AbortContinuationImpl impl) = 0;
97
+
98
+ static std::unique_ptr<TaskScheduler> Make();
99
+ };
100
+
101
+ } // namespace acero
102
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/test_nodes.h ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+
22
+ #include "arrow/acero/options.h"
23
+ #include "arrow/acero/test_util_internal.h"
24
+ #include "arrow/testing/random.h"
25
+
26
+ namespace arrow {
27
+ namespace acero {
28
+
29
+ // \brief Make a delaying source that is optionally noisy (prints when it emits)
30
+ AsyncGenerator<std::optional<ExecBatch>> MakeDelayedGen(
31
+ Iterator<std::optional<ExecBatch>> src, std::string label, double delay_sec,
32
+ bool noisy = false);
33
+
34
+ // \brief Make a delaying source that is optionally noisy (prints when it emits)
35
+ AsyncGenerator<std::optional<ExecBatch>> MakeDelayedGen(
36
+ AsyncGenerator<std::optional<ExecBatch>> src, std::string label, double delay_sec,
37
+ bool noisy = false);
38
+
39
+ // \brief Make a delaying source that is optionally noisy (prints when it emits)
40
+ AsyncGenerator<std::optional<ExecBatch>> MakeDelayedGen(BatchesWithSchema src,
41
+ std::string label,
42
+ double delay_sec,
43
+ bool noisy = false);
44
+
45
+ /// A node that slightly resequences the input at random
46
+ struct JitterNodeOptions : public ExecNodeOptions {
47
+ random::SeedType seed;
48
+ /// The max amount to add to a node's "cost".
49
+ int max_jitter_modifier;
50
+
51
+ explicit JitterNodeOptions(random::SeedType seed, int max_jitter_modifier = 5)
52
+ : seed(seed), max_jitter_modifier(max_jitter_modifier) {}
53
+ static constexpr std::string_view kName = "jitter";
54
+ };
55
+
56
+ class GateImpl;
57
+
58
+ class Gate {
59
+ public:
60
+ static std::shared_ptr<Gate> Make();
61
+
62
+ Gate();
63
+ virtual ~Gate();
64
+
65
+ void ReleaseAllBatches();
66
+ void ReleaseOneBatch();
67
+ Future<> WaitForNextReleasedBatch();
68
+
69
+ private:
70
+ ARROW_DISALLOW_COPY_AND_ASSIGN(Gate);
71
+
72
+ GateImpl* impl_;
73
+ };
74
+
75
+ // A node that holds all input batches until a given gate is released
76
+ struct GatedNodeOptions : public ExecNodeOptions {
77
+ explicit GatedNodeOptions(Gate* gate) : gate(gate) {}
78
+ Gate* gate;
79
+
80
+ static constexpr std::string_view kName = "gated";
81
+ };
82
+
83
+ void RegisterTestNodes();
84
+
85
+ } // namespace acero
86
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/time_series_util.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/record_batch.h"
21
+ #include "arrow/type_traits.h"
22
+
23
+ namespace arrow::acero {
24
+
25
+ // normalize the value to unsigned 64-bits while preserving ordering of values
26
+ template <typename T, enable_if_t<std::is_integral<T>::value, bool> = true>
27
+ uint64_t NormalizeTime(T t);
28
+
29
+ uint64_t GetTime(const RecordBatch* batch, Type::type time_type, int col, uint64_t row);
30
+
31
+ } // namespace arrow::acero
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/tpch_node.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <optional>
22
+ #include <string>
23
+ #include <vector>
24
+
25
+ #include "arrow/acero/type_fwd.h"
26
+ #include "arrow/acero/visibility.h"
27
+ #include "arrow/result.h"
28
+ #include "arrow/status.h"
29
+
30
+ namespace arrow {
31
+ namespace acero {
32
+ namespace internal {
33
+
34
+ class ARROW_ACERO_EXPORT TpchGen {
35
+ public:
36
+ virtual ~TpchGen() = default;
37
+
38
+ /*
39
+ * \brief Create a factory for nodes that generate TPC-H data
40
+ *
41
+ * Note: Individual tables will reference each other. It is important that you only
42
+ * create a single TpchGen instance for each plan and then you can create nodes for each
43
+ * table from that single TpchGen instance. Note: Every batch will be scheduled as a new
44
+ * task using the ExecPlan's scheduler.
45
+ */
46
+ static Result<std::unique_ptr<TpchGen>> Make(
47
+ ExecPlan* plan, double scale_factor = 1.0, int64_t batch_size = 4096,
48
+ std::optional<int64_t> seed = std::nullopt);
49
+
50
+ // The below methods will create and add an ExecNode to the plan that generates
51
+ // data for the desired table. If columns is empty, all columns will be generated.
52
+ // The methods return the added ExecNode, which should be used for inputs.
53
+ virtual Result<ExecNode*> Supplier(std::vector<std::string> columns = {}) = 0;
54
+ virtual Result<ExecNode*> Part(std::vector<std::string> columns = {}) = 0;
55
+ virtual Result<ExecNode*> PartSupp(std::vector<std::string> columns = {}) = 0;
56
+ virtual Result<ExecNode*> Customer(std::vector<std::string> columns = {}) = 0;
57
+ virtual Result<ExecNode*> Orders(std::vector<std::string> columns = {}) = 0;
58
+ virtual Result<ExecNode*> Lineitem(std::vector<std::string> columns = {}) = 0;
59
+ virtual Result<ExecNode*> Nation(std::vector<std::string> columns = {}) = 0;
60
+ virtual Result<ExecNode*> Region(std::vector<std::string> columns = {}) = 0;
61
+ };
62
+
63
+ } // namespace internal
64
+ } // namespace acero
65
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/type_fwd.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/compute/type_fwd.h"
21
+
22
+ namespace arrow {
23
+
24
+ namespace acero {
25
+
26
+ class ExecNode;
27
+ class ExecPlan;
28
+ class ExecNodeOptions;
29
+ class ExecFactoryRegistry;
30
+ class QueryContext;
31
+ struct QueryOptions;
32
+ struct Declaration;
33
+ class SinkNodeConsumer;
34
+
35
+ } // namespace acero
36
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/unmaterialized_table.h ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <optional>
21
+ #include <vector>
22
+ #include "arrow/array/builder_base.h"
23
+ #include "arrow/array/builder_binary.h"
24
+ #include "arrow/array/builder_primitive.h"
25
+ #include "arrow/memory_pool.h"
26
+ #include "arrow/record_batch.h"
27
+ #include "arrow/type_traits.h"
28
+ #include "arrow/util/logging.h"
29
+
30
+ namespace arrow::acero {
31
+
32
+ /// Lightweight representation of a cell of an unmaterialized table.
33
+ ///
34
+ struct CompositeEntry {
35
+ RecordBatch* batch;
36
+ uint64_t start;
37
+ uint64_t end;
38
+ };
39
+
40
+ // Forward declare the builder
41
+ template <size_t MAX_COMPOSITE_TABLES>
42
+ class UnmaterializedSliceBuilder;
43
+
44
+ /// A table of composite reference rows. Rows maintain pointers to the
45
+ /// constituent record batches, but the overall table retains shared_ptr
46
+ /// references to ensure memory remains resident while the table is live.
47
+ ///
48
+ /// The main reason for this is that, especially for wide tables, some operations
49
+ /// such as sorted_merge or asof_join are effectively row-oriented, rather than
50
+ /// column-oriented. Separating the join part from the columnar materialization
51
+ /// part simplifies the logic around data types and increases efficiency.
52
+ ///
53
+ /// We don't put the shared_ptr's into the rows for efficiency reasons. Use
54
+ /// UnmaterializedSliceBuilder to add ranges of record batches to this table
55
+ template <size_t MAX_COMPOSITE_TABLES>
56
+ class UnmaterializedCompositeTable {
57
+ public:
58
+ UnmaterializedCompositeTable(
59
+ const std::shared_ptr<arrow::Schema>& output_schema, size_t num_composite_tables,
60
+ std::unordered_map<int, std::pair<int, int>> output_col_to_src_,
61
+ arrow::MemoryPool* pool_ = arrow::default_memory_pool())
62
+ : schema(output_schema),
63
+ num_composite_tables(num_composite_tables),
64
+ output_col_to_src(std::move(output_col_to_src_)),
65
+ pool{pool_} {}
66
+
67
+ // Shallow wrappers around std::vector for performance
68
+ inline size_t capacity() { return slices.capacity(); }
69
+ inline void reserve(size_t num_slices) { slices.reserve(num_slices); }
70
+
71
+ inline size_t Size() const { return num_rows; }
72
+ inline size_t Empty() const { return num_rows == 0; }
73
+
74
+ Result<std::optional<std::shared_ptr<RecordBatch>>> Materialize() {
75
+ // Don't build empty batches
76
+ if (Empty()) {
77
+ return std::nullopt;
78
+ }
79
+ DCHECK_LE(Size(), (uint64_t)std::numeric_limits<int64_t>::max());
80
+ std::vector<std::shared_ptr<arrow::Array>> arrays(schema->num_fields());
81
+
82
+ #define MATERIALIZE_CASE(id) \
83
+ case arrow::Type::id: { \
84
+ using T = typename arrow::TypeIdTraits<arrow::Type::id>::Type; \
85
+ ARROW_ASSIGN_OR_RAISE(arrays.at(i_col), materializeColumn<T>(field_type, i_col)); \
86
+ break; \
87
+ }
88
+
89
+ // Build the arrays column-by-column from the rows
90
+ for (int i_col = 0; i_col < schema->num_fields(); ++i_col) {
91
+ const std::shared_ptr<arrow::Field>& field = schema->field(i_col);
92
+ const auto& field_type = field->type();
93
+
94
+ switch (field_type->id()) {
95
+ MATERIALIZE_CASE(BOOL)
96
+ MATERIALIZE_CASE(INT8)
97
+ MATERIALIZE_CASE(INT16)
98
+ MATERIALIZE_CASE(INT32)
99
+ MATERIALIZE_CASE(INT64)
100
+ MATERIALIZE_CASE(UINT8)
101
+ MATERIALIZE_CASE(UINT16)
102
+ MATERIALIZE_CASE(UINT32)
103
+ MATERIALIZE_CASE(UINT64)
104
+ MATERIALIZE_CASE(FLOAT)
105
+ MATERIALIZE_CASE(DOUBLE)
106
+ MATERIALIZE_CASE(DATE32)
107
+ MATERIALIZE_CASE(DATE64)
108
+ MATERIALIZE_CASE(TIME32)
109
+ MATERIALIZE_CASE(TIME64)
110
+ MATERIALIZE_CASE(TIMESTAMP)
111
+ MATERIALIZE_CASE(STRING)
112
+ MATERIALIZE_CASE(LARGE_STRING)
113
+ MATERIALIZE_CASE(BINARY)
114
+ MATERIALIZE_CASE(LARGE_BINARY)
115
+ default:
116
+ return arrow::Status::Invalid("Unsupported data type ",
117
+ field->type()->ToString(), " for field ",
118
+ field->name());
119
+ }
120
+ }
121
+
122
+ #undef MATERIALIZE_CASE
123
+
124
+ std::shared_ptr<arrow::RecordBatch> r =
125
+ arrow::RecordBatch::Make(schema, (int64_t)num_rows, arrays);
126
+ return r;
127
+ }
128
+
129
+ private:
130
+ struct UnmaterializedSlice {
131
+ CompositeEntry components[MAX_COMPOSITE_TABLES];
132
+ size_t num_components;
133
+
134
+ inline int64_t Size() const {
135
+ if (num_components == 0) {
136
+ return 0;
137
+ }
138
+ return components[0].end - components[0].start;
139
+ }
140
+ };
141
+
142
+ // Mapping from an output column ID to a source table ID and column ID
143
+ std::shared_ptr<arrow::Schema> schema;
144
+ size_t num_composite_tables;
145
+ std::unordered_map<int, std::pair<int, int>> output_col_to_src;
146
+
147
+ arrow::MemoryPool* pool;
148
+
149
+ /// A map from address of a record batch to the record batch. Used to
150
+ /// maintain the lifetime of the record batch in case it goes out of scope
151
+ /// by the main exec node thread
152
+ std::unordered_map<uintptr_t, std::shared_ptr<arrow::RecordBatch>> ptr2Ref = {};
153
+ std::vector<UnmaterializedSlice> slices;
154
+
155
+ size_t num_rows = 0;
156
+
157
+ // for AddRecordBatchRef/AddSlice and access to UnmaterializedSlice
158
+ friend class UnmaterializedSliceBuilder<MAX_COMPOSITE_TABLES>;
159
+
160
+ void AddRecordBatchRef(const std::shared_ptr<arrow::RecordBatch>& ref) {
161
+ ptr2Ref[(uintptr_t)ref.get()] = ref;
162
+ }
163
+ void AddSlice(const UnmaterializedSlice& slice) {
164
+ slices.push_back(slice);
165
+ num_rows += slice.Size();
166
+ }
167
+
168
+ template <class Type, class Builder = typename TypeTraits<Type>::BuilderType>
169
+ enable_if_boolean<Type, Status> static BuilderAppend(
170
+ Builder& builder, const std::shared_ptr<ArrayData>& source, uint64_t row) {
171
+ if (source->IsNull(row)) {
172
+ builder.UnsafeAppendNull();
173
+ return Status::OK();
174
+ }
175
+ builder.UnsafeAppend(bit_util::GetBit(source->template GetValues<uint8_t>(1), row));
176
+ return Status::OK();
177
+ }
178
+
179
+ template <class Type, class Builder = typename TypeTraits<Type>::BuilderType>
180
+ enable_if_t<is_fixed_width_type<Type>::value && !is_boolean_type<Type>::value,
181
+ Status> static BuilderAppend(Builder& builder,
182
+ const std::shared_ptr<ArrayData>& source,
183
+ uint64_t row) {
184
+ if (source->IsNull(row)) {
185
+ builder.UnsafeAppendNull();
186
+ return Status::OK();
187
+ }
188
+ using CType = typename TypeTraits<Type>::CType;
189
+ builder.UnsafeAppend(source->template GetValues<CType>(1)[row]);
190
+ return Status::OK();
191
+ }
192
+
193
+ template <class Type, class Builder = typename TypeTraits<Type>::BuilderType>
194
+ enable_if_base_binary<Type, Status> static BuilderAppend(
195
+ Builder& builder, const std::shared_ptr<ArrayData>& source, uint64_t row) {
196
+ if (source->IsNull(row)) {
197
+ return builder.AppendNull();
198
+ }
199
+ using offset_type = typename Type::offset_type;
200
+ const uint8_t* data = source->buffers[2]->data();
201
+ const offset_type* offsets = source->GetValues<offset_type>(1);
202
+ const offset_type offset0 = offsets[row];
203
+ const offset_type offset1 = offsets[row + 1];
204
+ return builder.Append(data + offset0, offset1 - offset0);
205
+ }
206
+
207
+ template <class Type, class Builder = typename arrow::TypeTraits<Type>::BuilderType>
208
+ arrow::Result<std::shared_ptr<arrow::Array>> materializeColumn(
209
+ const std::shared_ptr<arrow::DataType>& type, int i_col) {
210
+ ARROW_ASSIGN_OR_RAISE(auto builderPtr, arrow::MakeBuilder(type, pool));
211
+ Builder& builder = *arrow::internal::checked_cast<Builder*>(builderPtr.get());
212
+ ARROW_RETURN_NOT_OK(builder.Reserve(num_rows));
213
+
214
+ const auto& [table_index, column_index] = output_col_to_src[i_col];
215
+
216
+ for (const auto& unmaterialized_slice : slices) {
217
+ const auto& [batch, start, end] = unmaterialized_slice.components[table_index];
218
+ if (batch) {
219
+ for (uint64_t rowNum = start; rowNum < end; ++rowNum) {
220
+ arrow::Status st = BuilderAppend<Type, Builder>(
221
+ builder, batch->column_data(column_index), rowNum);
222
+ ARROW_RETURN_NOT_OK(st);
223
+ }
224
+ } else {
225
+ for (uint64_t rowNum = start; rowNum < end; ++rowNum) {
226
+ ARROW_RETURN_NOT_OK(builder.AppendNull());
227
+ }
228
+ }
229
+ }
230
+ std::shared_ptr<arrow::Array> result;
231
+ ARROW_RETURN_NOT_OK(builder.Finish(&result));
232
+ return Result{std::move(result)};
233
+ }
234
+ };
235
+
236
+ /// A builder class that can append blocks of data to a row. A "slice"
237
+ /// is built by horizontally concatenating record batches.
238
+ template <size_t MAX_COMPOSITE_TABLES>
239
+ class UnmaterializedSliceBuilder {
240
+ public:
241
+ explicit UnmaterializedSliceBuilder(
242
+ UnmaterializedCompositeTable<MAX_COMPOSITE_TABLES>* table_)
243
+ : table(table_) {}
244
+
245
+ void AddEntry(std::shared_ptr<RecordBatch> rb, uint64_t start, uint64_t end) {
246
+ if (rb) {
247
+ table->AddRecordBatchRef(rb);
248
+ }
249
+ if (slice.num_components) {
250
+ size_t last_index = slice.num_components - 1;
251
+ DCHECK_EQ(slice.components[last_index].end - slice.components[last_index].start,
252
+ end - start)
253
+ << "Slices should be the same length. ";
254
+ }
255
+ slice.components[slice.num_components++] = CompositeEntry{rb.get(), start, end};
256
+ }
257
+
258
+ void Finalize() { table->AddSlice(slice); }
259
+ int64_t Size() { return slice.Size(); }
260
+
261
+ private:
262
+ using TUnmaterializedCompositeTable =
263
+ UnmaterializedCompositeTable<MAX_COMPOSITE_TABLES>;
264
+ using TUnmaterializedSlice =
265
+ typename TUnmaterializedCompositeTable::UnmaterializedSlice;
266
+
267
+ TUnmaterializedCompositeTable* table;
268
+ TUnmaterializedSlice slice{};
269
+ };
270
+
271
+ } // namespace arrow::acero
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/util.h ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <atomic>
21
+ #include <cstdint>
22
+ #include <optional>
23
+ #include <thread>
24
+ #include <unordered_map>
25
+ #include <vector>
26
+
27
+ #include "arrow/acero/options.h"
28
+ #include "arrow/acero/type_fwd.h"
29
+ #include "arrow/buffer.h"
30
+ #include "arrow/compute/expression.h"
31
+ #include "arrow/compute/util.h"
32
+ #include "arrow/memory_pool.h"
33
+ #include "arrow/result.h"
34
+ #include "arrow/status.h"
35
+ #include "arrow/util/bit_util.h"
36
+ #include "arrow/util/cpu_info.h"
37
+ #include "arrow/util/logging.h"
38
+ #include "arrow/util/mutex.h"
39
+ #include "arrow/util/thread_pool.h"
40
+ #include "arrow/util/type_fwd.h"
41
+
42
+ namespace arrow {
43
+
44
+ namespace acero {
45
+
46
+ ARROW_ACERO_EXPORT
47
+ Status ValidateExecNodeInputs(ExecPlan* plan, const std::vector<ExecNode*>& inputs,
48
+ int expected_num_inputs, const char* kind_name);
49
+
50
+ ARROW_ACERO_EXPORT
51
+ Result<std::shared_ptr<Table>> TableFromExecBatches(
52
+ const std::shared_ptr<Schema>& schema, const std::vector<ExecBatch>& exec_batches);
53
+
54
+ class ARROW_ACERO_EXPORT AtomicCounter {
55
+ public:
56
+ AtomicCounter() = default;
57
+
58
+ int count() const { return count_.load(); }
59
+
60
+ std::optional<int> total() const {
61
+ int total = total_.load();
62
+ if (total == -1) return {};
63
+ return total;
64
+ }
65
+
66
+ // return true if the counter is complete
67
+ bool Increment() {
68
+ DCHECK_NE(count_.load(), total_.load());
69
+ int count = count_.fetch_add(1) + 1;
70
+ if (count != total_.load()) return false;
71
+ return DoneOnce();
72
+ }
73
+
74
+ // return true if the counter is complete
75
+ bool SetTotal(int total) {
76
+ total_.store(total);
77
+ if (count_.load() != total) return false;
78
+ return DoneOnce();
79
+ }
80
+
81
+ // return true if the counter has not already been completed
82
+ bool Cancel() { return DoneOnce(); }
83
+
84
+ // return true if the counter has finished or been cancelled
85
+ bool Completed() { return complete_.load(); }
86
+
87
+ private:
88
+ // ensure there is only one true return from Increment(), SetTotal(), or Cancel()
89
+ bool DoneOnce() {
90
+ bool expected = false;
91
+ return complete_.compare_exchange_strong(expected, true);
92
+ }
93
+
94
+ std::atomic<int> count_{0}, total_{-1};
95
+ std::atomic<bool> complete_{false};
96
+ };
97
+
98
+ class ARROW_ACERO_EXPORT ThreadIndexer {
99
+ public:
100
+ size_t operator()();
101
+
102
+ static size_t Capacity();
103
+
104
+ private:
105
+ static size_t Check(size_t thread_index);
106
+
107
+ arrow::util::Mutex mutex_;
108
+ std::unordered_map<std::thread::id, size_t> id_to_index_;
109
+ };
110
+
111
+ /// \brief A consumer that collects results into an in-memory table
112
+ struct ARROW_ACERO_EXPORT TableSinkNodeConsumer : public SinkNodeConsumer {
113
+ public:
114
+ TableSinkNodeConsumer(std::shared_ptr<Table>* out, MemoryPool* pool)
115
+ : out_(out), pool_(pool) {}
116
+ Status Init(const std::shared_ptr<Schema>& schema,
117
+ BackpressureControl* backpressure_control, ExecPlan* plan) override;
118
+ Status Consume(ExecBatch batch) override;
119
+ Future<> Finish() override;
120
+
121
+ private:
122
+ std::shared_ptr<Table>* out_;
123
+ MemoryPool* pool_;
124
+ std::shared_ptr<Schema> schema_;
125
+ std::vector<std::shared_ptr<RecordBatch>> batches_;
126
+ arrow::util::Mutex consume_mutex_;
127
+ };
128
+
129
+ class ARROW_ACERO_EXPORT NullSinkNodeConsumer : public SinkNodeConsumer {
130
+ public:
131
+ Status Init(const std::shared_ptr<Schema>&, BackpressureControl*,
132
+ ExecPlan* plan) override {
133
+ return Status::OK();
134
+ }
135
+ Status Consume(ExecBatch exec_batch) override { return Status::OK(); }
136
+ Future<> Finish() override { return Status::OK(); }
137
+
138
+ public:
139
+ static std::shared_ptr<NullSinkNodeConsumer> Make() {
140
+ return std::make_shared<NullSinkNodeConsumer>();
141
+ }
142
+ };
143
+
144
+ /// CRTP helper for tracing helper functions
145
+
146
+ class ARROW_ACERO_EXPORT TracedNode {
147
+ public:
148
+ // All nodes should call TraceStartProducing or NoteStartProducing exactly once
149
+ // Most nodes will be fine with a call to NoteStartProducing since the StartProducing
150
+ // call is usually fairly cheap and simply schedules tasks to fetch the actual data.
151
+
152
+ explicit TracedNode(ExecNode* node) : node_(node) {}
153
+
154
+ // Create a span to record the StartProducing work
155
+ [[nodiscard]] ::arrow::internal::tracing::Scope TraceStartProducing(
156
+ std::string extra_details) const;
157
+
158
+ // Record a call to StartProducing without creating with a span
159
+ void NoteStartProducing(std::string extra_details) const;
160
+
161
+ // All nodes should call TraceInputReceived for each batch they receive. This call
162
+ // should track the time spent processing the batch. NoteInputReceived is available
163
+ // but usually won't be used unless a node is simply adding batches to a trivial queue.
164
+
165
+ // Create a span to record the InputReceived work
166
+ [[nodiscard]] ::arrow::internal::tracing::Scope TraceInputReceived(
167
+ const ExecBatch& batch) const;
168
+
169
+ // Record a call to InputReceived without creating with a span
170
+ void NoteInputReceived(const ExecBatch& batch) const;
171
+
172
+ // Create a span to record any "finish" work. This should NOT be called as part of
173
+ // InputFinished and many nodes may not need to call this at all. This should be used
174
+ // when a node has some extra work that has to be done once it has received all of its
175
+ // data. For example, an aggregation node calculating aggregations. This will
176
+ // typically be called as a result of InputFinished OR InputReceived.
177
+ [[nodiscard]] ::arrow::internal::tracing::Scope TraceFinish() const;
178
+
179
+ private:
180
+ ExecNode* node_;
181
+ };
182
+
183
+ } // namespace acero
184
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/visibility.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #if defined(_WIN32) || defined(__CYGWIN__)
23
+ #if defined(_MSC_VER)
24
+ #pragma warning(push)
25
+ #pragma warning(disable : 4251)
26
+ #else
27
+ #pragma GCC diagnostic ignored "-Wattributes"
28
+ #endif
29
+
30
+ #ifdef ARROW_ACERO_STATIC
31
+ #define ARROW_ACERO_EXPORT
32
+ #elif defined(ARROW_ACERO_EXPORTING)
33
+ #define ARROW_ACERO_EXPORT __declspec(dllexport)
34
+ #else
35
+ #define ARROW_ACERO_EXPORT __declspec(dllimport)
36
+ #endif
37
+
38
+ #define ARROW_ACERO_NO_EXPORT
39
+ #else // Not Windows
40
+ #ifndef ARROW_ACERO_EXPORT
41
+ #define ARROW_ACERO_EXPORT __attribute__((visibility("default")))
42
+ #endif
43
+ #ifndef ARROW_ACERO_NO_EXPORT
44
+ #define ARROW_ACERO_NO_EXPORT __attribute__((visibility("hidden")))
45
+ #endif
46
+ #endif // Not-Windows
47
+
48
+ #if defined(_MSC_VER)
49
+ #pragma warning(pop)
50
+ #endif
venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/abi.h ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ /// \file abi.h Arrow C Data Interface
19
+ ///
20
+ /// The Arrow C Data interface defines a very small, stable set
21
+ /// of C definitions which can be easily copied into any project's
22
+ /// source code and vendored to be used for columnar data interchange
23
+ /// in the Arrow format. For non-C/C++ languages and runtimes,
24
+ /// it should be almost as easy to translate the C definitions into
25
+ /// the corresponding C FFI declarations.
26
+ ///
27
+ /// Applications and libraries can therefore work with Arrow memory
28
+ /// without necessarily using the Arrow libraries or reinventing
29
+ /// the wheel. Developers can choose between tight integration
30
+ /// with the Arrow software project or minimal integration with
31
+ /// the Arrow format only.
32
+
33
+ #pragma once
34
+
35
+ #include <stdint.h>
36
+
37
+ // Spec and documentation: https://arrow.apache.org/docs/format/CDataInterface.html
38
+
39
+ #ifdef __cplusplus
40
+ extern "C" {
41
+ #endif
42
+
43
+ #ifndef ARROW_C_DATA_INTERFACE
44
+ #define ARROW_C_DATA_INTERFACE
45
+
46
+ #define ARROW_FLAG_DICTIONARY_ORDERED 1
47
+ #define ARROW_FLAG_NULLABLE 2
48
+ #define ARROW_FLAG_MAP_KEYS_SORTED 4
49
+
50
+ struct ArrowSchema {
51
+ // Array type description
52
+ const char* format;
53
+ const char* name;
54
+ const char* metadata;
55
+ int64_t flags;
56
+ int64_t n_children;
57
+ struct ArrowSchema** children;
58
+ struct ArrowSchema* dictionary;
59
+
60
+ // Release callback
61
+ void (*release)(struct ArrowSchema*);
62
+ // Opaque producer-specific data
63
+ void* private_data;
64
+ };
65
+
66
+ struct ArrowArray {
67
+ // Array data description
68
+ int64_t length;
69
+ int64_t null_count;
70
+ int64_t offset;
71
+ int64_t n_buffers;
72
+ int64_t n_children;
73
+ const void** buffers;
74
+ struct ArrowArray** children;
75
+ struct ArrowArray* dictionary;
76
+
77
+ // Release callback
78
+ void (*release)(struct ArrowArray*);
79
+ // Opaque producer-specific data
80
+ void* private_data;
81
+ };
82
+
83
+ #endif // ARROW_C_DATA_INTERFACE
84
+
85
+ #ifndef ARROW_C_DEVICE_DATA_INTERFACE
86
+ #define ARROW_C_DEVICE_DATA_INTERFACE
87
+
88
+ // Spec and Documentation: https://arrow.apache.org/docs/format/CDeviceDataInterface.html
89
+
90
+ // DeviceType for the allocated memory
91
+ typedef int32_t ArrowDeviceType;
92
+
93
+ // CPU device, same as using ArrowArray directly
94
+ #define ARROW_DEVICE_CPU 1
95
+ // CUDA GPU Device
96
+ #define ARROW_DEVICE_CUDA 2
97
+ // Pinned CUDA CPU memory by cudaMallocHost
98
+ #define ARROW_DEVICE_CUDA_HOST 3
99
+ // OpenCL Device
100
+ #define ARROW_DEVICE_OPENCL 4
101
+ // Vulkan buffer for next-gen graphics
102
+ #define ARROW_DEVICE_VULKAN 7
103
+ // Metal for Apple GPU
104
+ #define ARROW_DEVICE_METAL 8
105
+ // Verilog simulator buffer
106
+ #define ARROW_DEVICE_VPI 9
107
+ // ROCm GPUs for AMD GPUs
108
+ #define ARROW_DEVICE_ROCM 10
109
+ // Pinned ROCm CPU memory allocated by hipMallocHost
110
+ #define ARROW_DEVICE_ROCM_HOST 11
111
+ // Reserved for extension
112
+ #define ARROW_DEVICE_EXT_DEV 12
113
+ // CUDA managed/unified memory allocated by cudaMallocManaged
114
+ #define ARROW_DEVICE_CUDA_MANAGED 13
115
+ // unified shared memory allocated on a oneAPI non-partitioned device.
116
+ #define ARROW_DEVICE_ONEAPI 14
117
+ // GPU support for next-gen WebGPU standard
118
+ #define ARROW_DEVICE_WEBGPU 15
119
+ // Qualcomm Hexagon DSP
120
+ #define ARROW_DEVICE_HEXAGON 16
121
+
122
+ struct ArrowDeviceArray {
123
+ // the Allocated Array
124
+ //
125
+ // the buffers in the array (along with the buffers of any
126
+ // children) are what is allocated on the device.
127
+ struct ArrowArray array;
128
+ // The device id to identify a specific device
129
+ int64_t device_id;
130
+ // The type of device which can access this memory.
131
+ ArrowDeviceType device_type;
132
+ // An event-like object to synchronize on if needed.
133
+ void* sync_event;
134
+ // Reserved bytes for future expansion.
135
+ int64_t reserved[3];
136
+ };
137
+
138
+ #endif // ARROW_C_DEVICE_DATA_INTERFACE
139
+
140
+ #ifndef ARROW_C_STREAM_INTERFACE
141
+ #define ARROW_C_STREAM_INTERFACE
142
+
143
+ struct ArrowArrayStream {
144
+ // Callback to get the stream type
145
+ // (will be the same for all arrays in the stream).
146
+ //
147
+ // Return value: 0 if successful, an `errno`-compatible error code otherwise.
148
+ //
149
+ // If successful, the ArrowSchema must be released independently from the stream.
150
+ int (*get_schema)(struct ArrowArrayStream*, struct ArrowSchema* out);
151
+
152
+ // Callback to get the next array
153
+ // (if no error and the array is released, the stream has ended)
154
+ //
155
+ // Return value: 0 if successful, an `errno`-compatible error code otherwise.
156
+ //
157
+ // If successful, the ArrowArray must be released independently from the stream.
158
+ int (*get_next)(struct ArrowArrayStream*, struct ArrowArray* out);
159
+
160
+ // Callback to get optional detailed error information.
161
+ // This must only be called if the last stream operation failed
162
+ // with a non-0 return code.
163
+ //
164
+ // Return value: pointer to a null-terminated character array describing
165
+ // the last error, or NULL if no description is available.
166
+ //
167
+ // The returned pointer is only valid until the next operation on this stream
168
+ // (including release).
169
+ const char* (*get_last_error)(struct ArrowArrayStream*);
170
+
171
+ // Release callback: release the stream's own resources.
172
+ // Note that arrays returned by `get_next` must be individually released.
173
+ void (*release)(struct ArrowArrayStream*);
174
+
175
+ // Opaque producer-specific data
176
+ void* private_data;
177
+ };
178
+
179
+ #endif // ARROW_C_STREAM_INTERFACE
180
+
181
+ #ifndef ARROW_C_DEVICE_STREAM_INTERFACE
182
+ #define ARROW_C_DEVICE_STREAM_INTERFACE
183
+
184
+ // Equivalent to ArrowArrayStream, but for ArrowDeviceArrays.
185
+ //
186
+ // This stream is intended to provide a stream of data on a single
187
+ // device, if a producer wants data to be produced on multiple devices
188
+ // then multiple streams should be provided. One per device.
189
+ struct ArrowDeviceArrayStream {
190
+ // The device that this stream produces data on.
191
+ ArrowDeviceType device_type;
192
+
193
+ // Callback to get the stream schema
194
+ // (will be the same for all arrays in the stream).
195
+ //
196
+ // Return value 0 if successful, an `errno`-compatible error code otherwise.
197
+ //
198
+ // If successful, the ArrowSchema must be released independently from the stream.
199
+ // The schema should be accessible via CPU memory.
200
+ int (*get_schema)(struct ArrowDeviceArrayStream* self, struct ArrowSchema* out);
201
+
202
+ // Callback to get the next array
203
+ // (if no error and the array is released, the stream has ended)
204
+ //
205
+ // Return value: 0 if successful, an `errno`-compatible error code otherwise.
206
+ //
207
+ // If successful, the ArrowDeviceArray must be released independently from the stream.
208
+ int (*get_next)(struct ArrowDeviceArrayStream* self, struct ArrowDeviceArray* out);
209
+
210
+ // Callback to get optional detailed error information.
211
+ // This must only be called if the last stream operation failed
212
+ // with a non-0 return code.
213
+ //
214
+ // Return value: pointer to a null-terminated character array describing
215
+ // the last error, or NULL if no description is available.
216
+ //
217
+ // The returned pointer is only valid until the next operation on this stream
218
+ // (including release).
219
+ const char* (*get_last_error)(struct ArrowDeviceArrayStream* self);
220
+
221
+ // Release callback: release the stream's own resources.
222
+ // Note that arrays returned by `get_next` must be individually released.
223
+ void (*release)(struct ArrowDeviceArrayStream* self);
224
+
225
+ // Opaque producer-specific data
226
+ void* private_data;
227
+ };
228
+
229
+ #endif // ARROW_C_DEVICE_STREAM_INTERFACE
230
+
231
+ #ifdef __cplusplus
232
+ }
233
+ #endif
venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/bridge.h ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+ #include <memory>
22
+ #include <string>
23
+
24
+ #include "arrow/c/abi.h"
25
+ #include "arrow/device.h"
26
+ #include "arrow/result.h"
27
+ #include "arrow/status.h"
28
+ #include "arrow/type_fwd.h"
29
+ #include "arrow/util/macros.h"
30
+ #include "arrow/util/visibility.h"
31
+
32
+ namespace arrow {
33
+
34
+ /// \defgroup c-data-interface Functions for working with the C data interface.
35
+ ///
36
+ /// @{
37
+
38
+ /// \brief Export C++ DataType using the C data interface format.
39
+ ///
40
+ /// The root type is considered to have empty name and metadata.
41
+ /// If you want the root type to have a name and/or metadata, pass
42
+ /// a Field instead.
43
+ ///
44
+ /// \param[in] type DataType object to export
45
+ /// \param[out] out C struct where to export the datatype
46
+ ARROW_EXPORT
47
+ Status ExportType(const DataType& type, struct ArrowSchema* out);
48
+
49
+ /// \brief Export C++ Field using the C data interface format.
50
+ ///
51
+ /// \param[in] field Field object to export
52
+ /// \param[out] out C struct where to export the field
53
+ ARROW_EXPORT
54
+ Status ExportField(const Field& field, struct ArrowSchema* out);
55
+
56
+ /// \brief Export C++ Schema using the C data interface format.
57
+ ///
58
+ /// \param[in] schema Schema object to export
59
+ /// \param[out] out C struct where to export the field
60
+ ARROW_EXPORT
61
+ Status ExportSchema(const Schema& schema, struct ArrowSchema* out);
62
+
63
+ /// \brief Export C++ Array using the C data interface format.
64
+ ///
65
+ /// The resulting ArrowArray struct keeps the array data and buffers alive
66
+ /// until its release callback is called by the consumer.
67
+ ///
68
+ /// \param[in] array Array object to export
69
+ /// \param[out] out C struct where to export the array
70
+ /// \param[out] out_schema optional C struct where to export the array type
71
+ ARROW_EXPORT
72
+ Status ExportArray(const Array& array, struct ArrowArray* out,
73
+ struct ArrowSchema* out_schema = NULLPTR);
74
+
75
+ /// \brief Export C++ RecordBatch using the C data interface format.
76
+ ///
77
+ /// The record batch is exported as if it were a struct array.
78
+ /// The resulting ArrowArray struct keeps the record batch data and buffers alive
79
+ /// until its release callback is called by the consumer.
80
+ ///
81
+ /// \param[in] batch Record batch to export
82
+ /// \param[out] out C struct where to export the record batch
83
+ /// \param[out] out_schema optional C struct where to export the record batch schema
84
+ ARROW_EXPORT
85
+ Status ExportRecordBatch(const RecordBatch& batch, struct ArrowArray* out,
86
+ struct ArrowSchema* out_schema = NULLPTR);
87
+
88
+ /// \brief Import C++ DataType from the C data interface.
89
+ ///
90
+ /// The given ArrowSchema struct is released (as per the C data interface
91
+ /// specification), even if this function fails.
92
+ ///
93
+ /// \param[in,out] schema C data interface struct representing the data type
94
+ /// \return Imported type object
95
+ ARROW_EXPORT
96
+ Result<std::shared_ptr<DataType>> ImportType(struct ArrowSchema* schema);
97
+
98
+ /// \brief Import C++ Field from the C data interface.
99
+ ///
100
+ /// The given ArrowSchema struct is released (as per the C data interface
101
+ /// specification), even if this function fails.
102
+ ///
103
+ /// \param[in,out] schema C data interface struct representing the field
104
+ /// \return Imported field object
105
+ ARROW_EXPORT
106
+ Result<std::shared_ptr<Field>> ImportField(struct ArrowSchema* schema);
107
+
108
+ /// \brief Import C++ Schema from the C data interface.
109
+ ///
110
+ /// The given ArrowSchema struct is released (as per the C data interface
111
+ /// specification), even if this function fails.
112
+ ///
113
+ /// \param[in,out] schema C data interface struct representing the field
114
+ /// \return Imported field object
115
+ ARROW_EXPORT
116
+ Result<std::shared_ptr<Schema>> ImportSchema(struct ArrowSchema* schema);
117
+
118
+ /// \brief Import C++ array from the C data interface.
119
+ ///
120
+ /// The ArrowArray struct has its contents moved (as per the C data interface
121
+ /// specification) to a private object held alive by the resulting array.
122
+ ///
123
+ /// \param[in,out] array C data interface struct holding the array data
124
+ /// \param[in] type type of the imported array
125
+ /// \return Imported array object
126
+ ARROW_EXPORT
127
+ Result<std::shared_ptr<Array>> ImportArray(struct ArrowArray* array,
128
+ std::shared_ptr<DataType> type);
129
+
130
+ /// \brief Import C++ array and its type from the C data interface.
131
+ ///
132
+ /// The ArrowArray struct has its contents moved (as per the C data interface
133
+ /// specification) to a private object held alive by the resulting array.
134
+ /// The ArrowSchema struct is released, even if this function fails.
135
+ ///
136
+ /// \param[in,out] array C data interface struct holding the array data
137
+ /// \param[in,out] type C data interface struct holding the array type
138
+ /// \return Imported array object
139
+ ARROW_EXPORT
140
+ Result<std::shared_ptr<Array>> ImportArray(struct ArrowArray* array,
141
+ struct ArrowSchema* type);
142
+
143
+ /// \brief Import C++ record batch from the C data interface.
144
+ ///
145
+ /// The ArrowArray struct has its contents moved (as per the C data interface
146
+ /// specification) to a private object held alive by the resulting record batch.
147
+ ///
148
+ /// \param[in,out] array C data interface struct holding the record batch data
149
+ /// \param[in] schema schema of the imported record batch
150
+ /// \return Imported record batch object
151
+ ARROW_EXPORT
152
+ Result<std::shared_ptr<RecordBatch>> ImportRecordBatch(struct ArrowArray* array,
153
+ std::shared_ptr<Schema> schema);
154
+
155
+ /// \brief Import C++ record batch and its schema from the C data interface.
156
+ ///
157
+ /// The type represented by the ArrowSchema struct must be a struct type array.
158
+ /// The ArrowArray struct has its contents moved (as per the C data interface
159
+ /// specification) to a private object held alive by the resulting record batch.
160
+ /// The ArrowSchema struct is released, even if this function fails.
161
+ ///
162
+ /// \param[in,out] array C data interface struct holding the record batch data
163
+ /// \param[in,out] schema C data interface struct holding the record batch schema
164
+ /// \return Imported record batch object
165
+ ARROW_EXPORT
166
+ Result<std::shared_ptr<RecordBatch>> ImportRecordBatch(struct ArrowArray* array,
167
+ struct ArrowSchema* schema);
168
+
169
+ /// @}
170
+
171
+ /// \defgroup c-data-device-interface Functions for working with the C data device
172
+ /// interface.
173
+ ///
174
+ /// @{
175
+
176
+ /// \brief EXPERIMENTAL: Export C++ Array as an ArrowDeviceArray.
177
+ ///
178
+ /// The resulting ArrowDeviceArray struct keeps the array data and buffers alive
179
+ /// until its release callback is called by the consumer. All buffers in
180
+ /// the provided array MUST have the same device_type, otherwise an error
181
+ /// will be returned.
182
+ ///
183
+ /// If sync is non-null, get_event will be called on it in order to
184
+ /// potentially provide an event for consumers to synchronize on.
185
+ ///
186
+ /// \param[in] array Array object to export
187
+ /// \param[in] sync shared_ptr to object derived from Device::SyncEvent or null
188
+ /// \param[out] out C struct to export the array to
189
+ /// \param[out] out_schema optional C struct to export the array type to
190
+ ARROW_EXPORT
191
+ Status ExportDeviceArray(const Array& array, std::shared_ptr<Device::SyncEvent> sync,
192
+ struct ArrowDeviceArray* out,
193
+ struct ArrowSchema* out_schema = NULLPTR);
194
+
195
+ /// \brief EXPERIMENTAL: Export C++ RecordBatch as an ArrowDeviceArray.
196
+ ///
197
+ /// The record batch is exported as if it were a struct array.
198
+ /// The resulting ArrowDeviceArray struct keeps the record batch data and buffers alive
199
+ /// until its release callback is called by the consumer.
200
+ ///
201
+ /// All buffers of all columns in the record batch must have the same device_type
202
+ /// otherwise an error will be returned. If columns are on different devices,
203
+ /// they should be exported using different ArrowDeviceArray instances.
204
+ ///
205
+ /// If sync is non-null, get_event will be called on it in order to
206
+ /// potentially provide an event for consumers to synchronize on.
207
+ ///
208
+ /// \param[in] batch Record batch to export
209
+ /// \param[in] sync shared_ptr to object derived from Device::SyncEvent or null
210
+ /// \param[out] out C struct where to export the record batch
211
+ /// \param[out] out_schema optional C struct where to export the record batch schema
212
+ ARROW_EXPORT
213
+ Status ExportDeviceRecordBatch(const RecordBatch& batch,
214
+ std::shared_ptr<Device::SyncEvent> sync,
215
+ struct ArrowDeviceArray* out,
216
+ struct ArrowSchema* out_schema = NULLPTR);
217
+
218
+ using DeviceMemoryMapper =
219
+ std::function<Result<std::shared_ptr<MemoryManager>>(ArrowDeviceType, int64_t)>;
220
+
221
+ ARROW_EXPORT
222
+ Result<std::shared_ptr<MemoryManager>> DefaultDeviceMemoryMapper(
223
+ ArrowDeviceType device_type, int64_t device_id);
224
+
225
+ /// \brief EXPERIMENTAL: Import C++ device array from the C data interface.
226
+ ///
227
+ /// The ArrowArray struct has its contents moved (as per the C data interface
228
+ /// specification) to a private object held alive by the resulting array. The
229
+ /// buffers of the Array are located on the device indicated by the device_type.
230
+ ///
231
+ /// \param[in,out] array C data interface struct holding the array data
232
+ /// \param[in] type type of the imported array
233
+ /// \param[in] mapper A function to map device + id to memory manager. If not
234
+ /// specified, defaults to map "cpu" to the built-in default memory manager.
235
+ /// \return Imported array object
236
+ ARROW_EXPORT
237
+ Result<std::shared_ptr<Array>> ImportDeviceArray(
238
+ struct ArrowDeviceArray* array, std::shared_ptr<DataType> type,
239
+ const DeviceMemoryMapper& mapper = DefaultDeviceMemoryMapper);
240
+
241
+ /// \brief EXPERIMENTAL: Import C++ device array and its type from the C data interface.
242
+ ///
243
+ /// The ArrowArray struct has its contents moved (as per the C data interface
244
+ /// specification) to a private object held alive by the resulting array.
245
+ /// The ArrowSchema struct is released, even if this function fails. The
246
+ /// buffers of the Array are located on the device indicated by the device_type.
247
+ ///
248
+ /// \param[in,out] array C data interface struct holding the array data
249
+ /// \param[in,out] type C data interface struct holding the array type
250
+ /// \param[in] mapper A function to map device + id to memory manager. If not
251
+ /// specified, defaults to map "cpu" to the built-in default memory manager.
252
+ /// \return Imported array object
253
+ ARROW_EXPORT
254
+ Result<std::shared_ptr<Array>> ImportDeviceArray(
255
+ struct ArrowDeviceArray* array, struct ArrowSchema* type,
256
+ const DeviceMemoryMapper& mapper = DefaultDeviceMemoryMapper);
257
+
258
+ /// \brief EXPERIMENTAL: Import C++ record batch with buffers on a device from the C data
259
+ /// interface.
260
+ ///
261
+ /// The ArrowArray struct has its contents moved (as per the C data interface
262
+ /// specification) to a private object held alive by the resulting record batch.
263
+ /// The buffers of all columns of the record batch are located on the device
264
+ /// indicated by the device type.
265
+ ///
266
+ /// \param[in,out] array C data interface struct holding the record batch data
267
+ /// \param[in] schema schema of the imported record batch
268
+ /// \param[in] mapper A function to map device + id to memory manager. If not
269
+ /// specified, defaults to map "cpu" to the built-in default memory manager.
270
+ /// \return Imported record batch object
271
+ ARROW_EXPORT
272
+ Result<std::shared_ptr<RecordBatch>> ImportDeviceRecordBatch(
273
+ struct ArrowDeviceArray* array, std::shared_ptr<Schema> schema,
274
+ const DeviceMemoryMapper& mapper = DefaultDeviceMemoryMapper);
275
+
276
+ /// \brief EXPERIMENTAL: Import C++ record batch with buffers on a device and its schema
277
+ /// from the C data interface.
278
+ ///
279
+ /// The type represented by the ArrowSchema struct must be a struct type array.
280
+ /// The ArrowArray struct has its contents moved (as per the C data interface
281
+ /// specification) to a private object held alive by the resulting record batch.
282
+ /// The ArrowSchema struct is released, even if this function fails. The buffers
283
+ /// of all columns of the record batch are located on the device indicated by the
284
+ /// device type.
285
+ ///
286
+ /// \param[in,out] array C data interface struct holding the record batch data
287
+ /// \param[in,out] schema C data interface struct holding the record batch schema
288
+ /// \param[in] mapper A function to map device + id to memory manager. If not
289
+ /// specified, defaults to map "cpu" to the built-in default memory manager.
290
+ /// \return Imported record batch object
291
+ ARROW_EXPORT
292
+ Result<std::shared_ptr<RecordBatch>> ImportDeviceRecordBatch(
293
+ struct ArrowDeviceArray* array, struct ArrowSchema* schema,
294
+ const DeviceMemoryMapper& mapper = DefaultDeviceMemoryMapper);
295
+
296
+ /// @}
297
+
298
+ /// \defgroup c-stream-interface Functions for working with the C data interface.
299
+ ///
300
+ /// @{
301
+
302
+ /// \brief Export C++ RecordBatchReader using the C stream interface.
303
+ ///
304
+ /// The resulting ArrowArrayStream struct keeps the record batch reader alive
305
+ /// until its release callback is called by the consumer.
306
+ ///
307
+ /// \param[in] reader RecordBatchReader object to export
308
+ /// \param[out] out C struct where to export the stream
309
+ ARROW_EXPORT
310
+ Status ExportRecordBatchReader(std::shared_ptr<RecordBatchReader> reader,
311
+ struct ArrowArrayStream* out);
312
+
313
+ /// \brief Export C++ ChunkedArray using the C data interface format.
314
+ ///
315
+ /// The resulting ArrowArrayStream struct keeps the chunked array data and buffers alive
316
+ /// until its release callback is called by the consumer.
317
+ ///
318
+ /// \param[in] chunked_array ChunkedArray object to export
319
+ /// \param[out] out C struct where to export the stream
320
+ ARROW_EXPORT
321
+ Status ExportChunkedArray(std::shared_ptr<ChunkedArray> chunked_array,
322
+ struct ArrowArrayStream* out);
323
+
324
+ /// \brief Import C++ RecordBatchReader from the C stream interface.
325
+ ///
326
+ /// The ArrowArrayStream struct has its contents moved to a private object
327
+ /// held alive by the resulting record batch reader.
328
+ ///
329
+ /// \param[in,out] stream C stream interface struct
330
+ /// \return Imported RecordBatchReader object
331
+ ARROW_EXPORT
332
+ Result<std::shared_ptr<RecordBatchReader>> ImportRecordBatchReader(
333
+ struct ArrowArrayStream* stream);
334
+
335
+ /// \brief Import C++ ChunkedArray from the C stream interface
336
+ ///
337
+ /// The ArrowArrayStream struct has its contents moved to a private object,
338
+ /// is consumed in its entirity, and released before returning all chunks
339
+ /// as a ChunkedArray.
340
+ ///
341
+ /// \param[in,out] stream C stream interface struct
342
+ /// \return Imported ChunkedArray object
343
+ ARROW_EXPORT
344
+ Result<std::shared_ptr<ChunkedArray>> ImportChunkedArray(struct ArrowArrayStream* stream);
345
+
346
+ /// @}
347
+
348
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack.h ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/array/array_base.h"
21
+ #include "arrow/c/dlpack_abi.h"
22
+
23
+ namespace arrow::dlpack {
24
+
25
+ /// \brief Export Arrow array as DLPack tensor.
26
+ ///
27
+ /// DLMangedTensor is produced as defined by the DLPack protocol,
28
+ /// see https://dmlc.github.io/dlpack/latest/.
29
+ ///
30
+ /// Data types for which the protocol is supported are
31
+ /// integer and floating-point data types.
32
+ ///
33
+ /// DLPack protocol only supports arrays with one contiguous
34
+ /// memory region which means Arrow Arrays with validity buffers
35
+ /// are not supported.
36
+ ///
37
+ /// \param[in] arr Arrow array
38
+ /// \return DLManagedTensor struct
39
+ ARROW_EXPORT
40
+ Result<DLManagedTensor*> ExportArray(const std::shared_ptr<Array>& arr);
41
+
42
+ /// \brief Get DLDevice with enumerator specifying the
43
+ /// type of the device data is stored on and index of the
44
+ /// device which is 0 by default for CPU.
45
+ ///
46
+ /// \param[in] arr Arrow array
47
+ /// \return DLDevice struct
48
+ ARROW_EXPORT
49
+ Result<DLDevice> ExportDevice(const std::shared_ptr<Array>& arr);
50
+
51
+ } // namespace arrow::dlpack
venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack_abi.h ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Taken from:
2
+ // https://github.com/dmlc/dlpack/blob/ca4d00ad3e2e0f410eeab3264d21b8a39397f362/include/dlpack/dlpack.h
3
+ /*!
4
+ * Copyright (c) 2017 by Contributors
5
+ * \file dlpack.h
6
+ * \brief The common header of DLPack.
7
+ */
8
+ #ifndef DLPACK_DLPACK_H_
9
+ #define DLPACK_DLPACK_H_
10
+
11
+ /**
12
+ * \brief Compatibility with C++
13
+ */
14
+ #ifdef __cplusplus
15
+ #define DLPACK_EXTERN_C extern "C"
16
+ #else
17
+ #define DLPACK_EXTERN_C
18
+ #endif
19
+
20
+ /*! \brief The current major version of dlpack */
21
+ #define DLPACK_MAJOR_VERSION 1
22
+
23
+ /*! \brief The current minor version of dlpack */
24
+ #define DLPACK_MINOR_VERSION 0
25
+
26
+ /*! \brief DLPACK_DLL prefix for windows */
27
+ #ifdef _WIN32
28
+ #ifdef DLPACK_EXPORTS
29
+ #define DLPACK_DLL __declspec(dllexport)
30
+ #else
31
+ #define DLPACK_DLL __declspec(dllimport)
32
+ #endif
33
+ #else
34
+ #define DLPACK_DLL
35
+ #endif
36
+
37
+ #include <stddef.h>
38
+ #include <stdint.h>
39
+
40
+ #ifdef __cplusplus
41
+ extern "C" {
42
+ #endif
43
+
44
+ /*!
45
+ * \brief The DLPack version.
46
+ *
47
+ * A change in major version indicates that we have changed the
48
+ * data layout of the ABI - DLManagedTensorVersioned.
49
+ *
50
+ * A change in minor version indicates that we have added new
51
+ * code, such as a new device type, but the ABI is kept the same.
52
+ *
53
+ * If an obtained DLPack tensor has a major version that disagrees
54
+ * with the version number specified in this header file
55
+ * (i.e. major != DLPACK_MAJOR_VERSION), the consumer must call the deleter
56
+ * (and it is safe to do so). It is not safe to access any other fields
57
+ * as the memory layout will have changed.
58
+ *
59
+ * In the case of a minor version mismatch, the tensor can be safely used as
60
+ * long as the consumer knows how to interpret all fields. Minor version
61
+ * updates indicate the addition of enumeration values.
62
+ */
63
+ typedef struct {
64
+ /*! \brief DLPack major version. */
65
+ uint32_t major;
66
+ /*! \brief DLPack minor version. */
67
+ uint32_t minor;
68
+ } DLPackVersion;
69
+
70
+ /*!
71
+ * \brief The device type in DLDevice.
72
+ */
73
+ #ifdef __cplusplus
74
+ typedef enum : int32_t {
75
+ #else
76
+ typedef enum {
77
+ #endif
78
+ /*! \brief CPU device */
79
+ kDLCPU = 1,
80
+ /*! \brief CUDA GPU device */
81
+ kDLCUDA = 2,
82
+ /*!
83
+ * \brief Pinned CUDA CPU memory by cudaMallocHost
84
+ */
85
+ kDLCUDAHost = 3,
86
+ /*! \brief OpenCL devices. */
87
+ kDLOpenCL = 4,
88
+ /*! \brief Vulkan buffer for next generation graphics. */
89
+ kDLVulkan = 7,
90
+ /*! \brief Metal for Apple GPU. */
91
+ kDLMetal = 8,
92
+ /*! \brief Verilog simulator buffer */
93
+ kDLVPI = 9,
94
+ /*! \brief ROCm GPUs for AMD GPUs */
95
+ kDLROCM = 10,
96
+ /*!
97
+ * \brief Pinned ROCm CPU memory allocated by hipMallocHost
98
+ */
99
+ kDLROCMHost = 11,
100
+ /*!
101
+ * \brief Reserved extension device type,
102
+ * used for quickly test extension device
103
+ * The semantics can differ depending on the implementation.
104
+ */
105
+ kDLExtDev = 12,
106
+ /*!
107
+ * \brief CUDA managed/unified memory allocated by cudaMallocManaged
108
+ */
109
+ kDLCUDAManaged = 13,
110
+ /*!
111
+ * \brief Unified shared memory allocated on a oneAPI non-partititioned
112
+ * device. Call to oneAPI runtime is required to determine the device
113
+ * type, the USM allocation type and the sycl context it is bound to.
114
+ *
115
+ */
116
+ kDLOneAPI = 14,
117
+ /*! \brief GPU support for next generation WebGPU standard. */
118
+ kDLWebGPU = 15,
119
+ /*! \brief Qualcomm Hexagon DSP */
120
+ kDLHexagon = 16,
121
+ } DLDeviceType;
122
+
123
+ /*!
124
+ * \brief A Device for Tensor and operator.
125
+ */
126
+ typedef struct {
127
+ /*! \brief The device type used in the device. */
128
+ DLDeviceType device_type;
129
+ /*!
130
+ * \brief The device index.
131
+ * For vanilla CPU memory, pinned memory, or managed memory, this is set to 0.
132
+ */
133
+ int32_t device_id;
134
+ } DLDevice;
135
+
136
+ /*!
137
+ * \brief The type code options DLDataType.
138
+ */
139
+ typedef enum {
140
+ /*! \brief signed integer */
141
+ kDLInt = 0U,
142
+ /*! \brief unsigned integer */
143
+ kDLUInt = 1U,
144
+ /*! \brief IEEE floating point */
145
+ kDLFloat = 2U,
146
+ /*!
147
+ * \brief Opaque handle type, reserved for testing purposes.
148
+ * Frameworks need to agree on the handle data type for the exchange to be well-defined.
149
+ */
150
+ kDLOpaqueHandle = 3U,
151
+ /*! \brief bfloat16 */
152
+ kDLBfloat = 4U,
153
+ /*!
154
+ * \brief complex number
155
+ * (C/C++/Python layout: compact struct per complex number)
156
+ */
157
+ kDLComplex = 5U,
158
+ /*! \brief boolean */
159
+ kDLBool = 6U,
160
+ } DLDataTypeCode;
161
+
162
+ /*!
163
+ * \brief The data type the tensor can hold. The data type is assumed to follow the
164
+ * native endian-ness. An explicit error message should be raised when attempting to
165
+ * export an array with non-native endianness
166
+ *
167
+ * Examples
168
+ * - float: type_code = 2, bits = 32, lanes = 1
169
+ * - float4(vectorized 4 float): type_code = 2, bits = 32, lanes = 4
170
+ * - int8: type_code = 0, bits = 8, lanes = 1
171
+ * - std::complex<float>: type_code = 5, bits = 64, lanes = 1
172
+ * - bool: type_code = 6, bits = 8, lanes = 1 (as per common array library convention,
173
+ * the underlying storage size of bool is 8 bits)
174
+ */
175
+ typedef struct {
176
+ /*!
177
+ * \brief Type code of base types.
178
+ * We keep it uint8_t instead of DLDataTypeCode for minimal memory
179
+ * footprint, but the value should be one of DLDataTypeCode enum values.
180
+ * */
181
+ uint8_t code;
182
+ /*!
183
+ * \brief Number of bits, common choices are 8, 16, 32.
184
+ */
185
+ uint8_t bits;
186
+ /*! \brief Number of lanes in the type, used for vector types. */
187
+ uint16_t lanes;
188
+ } DLDataType;
189
+
190
+ /*!
191
+ * \brief Plain C Tensor object, does not manage memory.
192
+ */
193
+ typedef struct {
194
+ /*!
195
+ * \brief The data pointer points to the allocated data. This will be CUDA
196
+ * device pointer or cl_mem handle in OpenCL. It may be opaque on some device
197
+ * types. This pointer is always aligned to 256 bytes as in CUDA. The
198
+ * `byte_offset` field should be used to point to the beginning of the data.
199
+ *
200
+ * Note that as of Nov 2021, multiply libraries (CuPy, PyTorch, TensorFlow,
201
+ * TVM, perhaps others) do not adhere to this 256 byte aligment requirement
202
+ * on CPU/CUDA/ROCm, and always use `byte_offset=0`. This must be fixed
203
+ * (after which this note will be updated); at the moment it is recommended
204
+ * to not rely on the data pointer being correctly aligned.
205
+ *
206
+ * For given DLTensor, the size of memory required to store the contents of
207
+ * data is calculated as follows:
208
+ *
209
+ * \code{.c}
210
+ * static inline size_t GetDataSize(const DLTensor* t) {
211
+ * size_t size = 1;
212
+ * for (tvm_index_t i = 0; i < t->ndim; ++i) {
213
+ * size *= t->shape[i];
214
+ * }
215
+ * size *= (t->dtype.bits * t->dtype.lanes + 7) / 8;
216
+ * return size;
217
+ * }
218
+ * \endcode
219
+ */
220
+ void* data;
221
+ /*! \brief The device of the tensor */
222
+ DLDevice device;
223
+ /*! \brief Number of dimensions */
224
+ int32_t ndim;
225
+ /*! \brief The data type of the pointer*/
226
+ DLDataType dtype;
227
+ /*! \brief The shape of the tensor */
228
+ int64_t* shape;
229
+ /*!
230
+ * \brief strides of the tensor (in number of elements, not bytes)
231
+ * can be NULL, indicating tensor is compact and row-majored.
232
+ */
233
+ int64_t* strides;
234
+ /*! \brief The offset in bytes to the beginning pointer to data */
235
+ uint64_t byte_offset;
236
+ } DLTensor;
237
+
238
+ /*!
239
+ * \brief C Tensor object, manage memory of DLTensor. This data structure is
240
+ * intended to facilitate the borrowing of DLTensor by another framework. It is
241
+ * not meant to transfer the tensor. When the borrowing framework doesn't need
242
+ * the tensor, it should call the deleter to notify the host that the resource
243
+ * is no longer needed.
244
+ *
245
+ * \note This data structure is used as Legacy DLManagedTensor
246
+ * in DLPack exchange and is deprecated after DLPack v0.8
247
+ * Use DLManagedTensorVersioned instead.
248
+ * This data structure may get renamed or deleted in future versions.
249
+ *
250
+ * \sa DLManagedTensorVersioned
251
+ */
252
+ typedef struct DLManagedTensor {
253
+ /*! \brief DLTensor which is being memory managed */
254
+ DLTensor dl_tensor;
255
+ /*! \brief the context of the original host framework of DLManagedTensor in
256
+ * which DLManagedTensor is used in the framework. It can also be NULL.
257
+ */
258
+ void* manager_ctx;
259
+ /*!
260
+ * \brief Destructor - this should be called
261
+ * to destruct the manager_ctx which backs the DLManagedTensor. It can be
262
+ * NULL if there is no way for the caller to provide a reasonable destructor.
263
+ * The destructors deletes the argument self as well.
264
+ */
265
+ void (*deleter)(struct DLManagedTensor* self);
266
+ } DLManagedTensor;
267
+
268
+ // bit masks used in in the DLManagedTensorVersioned
269
+
270
+ /*! \brief bit mask to indicate that the tensor is read only. */
271
+ #define DLPACK_FLAG_BITMASK_READ_ONLY (1UL << 0UL)
272
+
273
+ /*!
274
+ * \brief A versioned and managed C Tensor object, manage memory of DLTensor.
275
+ *
276
+ * This data structure is intended to facilitate the borrowing of DLTensor by
277
+ * another framework. It is not meant to transfer the tensor. When the borrowing
278
+ * framework doesn't need the tensor, it should call the deleter to notify the
279
+ * host that the resource is no longer needed.
280
+ *
281
+ * \note This is the current standard DLPack exchange data structure.
282
+ */
283
+ struct DLManagedTensorVersioned {
284
+ /*!
285
+ * \brief The API and ABI version of the current managed Tensor
286
+ */
287
+ DLPackVersion version;
288
+ /*!
289
+ * \brief the context of the original host framework.
290
+ *
291
+ * Stores DLManagedTensorVersioned is used in the
292
+ * framework. It can also be NULL.
293
+ */
294
+ void* manager_ctx;
295
+ /*!
296
+ * \brief Destructor.
297
+ *
298
+ * This should be called to destruct manager_ctx which holds the
299
+ * DLManagedTensorVersioned. It can be NULL if there is no way for the caller to provide
300
+ * a reasonable destructor. The destructors deletes the argument self as well.
301
+ */
302
+ void (*deleter)(struct DLManagedTensorVersioned* self);
303
+ /*!
304
+ * \brief Additional bitmask flags information about the tensor.
305
+ *
306
+ * By default the flags should be set to 0.
307
+ *
308
+ * \note Future ABI changes should keep everything until this field
309
+ * stable, to ensure that deleter can be correctly called.
310
+ *
311
+ * \sa DLPACK_FLAG_BITMASK_READ_ONLY
312
+ */
313
+ uint64_t flags;
314
+ /*! \brief DLTensor which is being memory managed */
315
+ DLTensor dl_tensor;
316
+ };
317
+
318
+ #ifdef __cplusplus
319
+ } // DLPACK_EXTERN_C
320
+ #endif
321
+ #endif // DLPACK_DLPACK_H_
venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/helpers.h ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <stdio.h>
21
+ #include <stdlib.h>
22
+ #include <string.h>
23
+
24
+ #include "arrow/c/abi.h"
25
+
26
+ #define ARROW_C_ASSERT(condition, msg) \
27
+ do { \
28
+ if (!(condition)) { \
29
+ fprintf(stderr, "%s:%d:: %s", __FILE__, __LINE__, (msg)); \
30
+ abort(); \
31
+ } \
32
+ } while (0)
33
+
34
+ #ifdef __cplusplus
35
+ extern "C" {
36
+ #endif
37
+
38
+ /// Query whether the C schema is released
39
+ inline int ArrowSchemaIsReleased(const struct ArrowSchema* schema) {
40
+ return schema->release == NULL;
41
+ }
42
+
43
+ /// Mark the C schema released (for use in release callbacks)
44
+ inline void ArrowSchemaMarkReleased(struct ArrowSchema* schema) {
45
+ schema->release = NULL;
46
+ }
47
+
48
+ /// Move the C schema from `src` to `dest`
49
+ ///
50
+ /// Note `dest` must *not* point to a valid schema already, otherwise there
51
+ /// will be a memory leak.
52
+ inline void ArrowSchemaMove(struct ArrowSchema* src, struct ArrowSchema* dest) {
53
+ assert(dest != src);
54
+ assert(!ArrowSchemaIsReleased(src));
55
+ memcpy(dest, src, sizeof(struct ArrowSchema));
56
+ ArrowSchemaMarkReleased(src);
57
+ }
58
+
59
+ /// Release the C schema, if necessary, by calling its release callback
60
+ inline void ArrowSchemaRelease(struct ArrowSchema* schema) {
61
+ if (!ArrowSchemaIsReleased(schema)) {
62
+ schema->release(schema);
63
+ ARROW_C_ASSERT(ArrowSchemaIsReleased(schema),
64
+ "ArrowSchemaRelease did not cleanup release callback");
65
+ }
66
+ }
67
+
68
+ /// Query whether the C array is released
69
+ inline int ArrowArrayIsReleased(const struct ArrowArray* array) {
70
+ return array->release == NULL;
71
+ }
72
+
73
+ /// Mark the C array released (for use in release callbacks)
74
+ inline void ArrowArrayMarkReleased(struct ArrowArray* array) { array->release = NULL; }
75
+
76
+ /// Move the C array from `src` to `dest`
77
+ ///
78
+ /// Note `dest` must *not* point to a valid array already, otherwise there
79
+ /// will be a memory leak.
80
+ inline void ArrowArrayMove(struct ArrowArray* src, struct ArrowArray* dest) {
81
+ assert(dest != src);
82
+ assert(!ArrowArrayIsReleased(src));
83
+ memcpy(dest, src, sizeof(struct ArrowArray));
84
+ ArrowArrayMarkReleased(src);
85
+ }
86
+
87
+ /// Release the C array, if necessary, by calling its release callback
88
+ inline void ArrowArrayRelease(struct ArrowArray* array) {
89
+ if (!ArrowArrayIsReleased(array)) {
90
+ array->release(array);
91
+ ARROW_C_ASSERT(ArrowArrayIsReleased(array),
92
+ "ArrowArrayRelease did not cleanup release callback");
93
+ }
94
+ }
95
+
96
+ /// Query whether the C array stream is released
97
+ inline int ArrowArrayStreamIsReleased(const struct ArrowArrayStream* stream) {
98
+ return stream->release == NULL;
99
+ }
100
+
101
+ /// Mark the C array stream released (for use in release callbacks)
102
+ inline void ArrowArrayStreamMarkReleased(struct ArrowArrayStream* stream) {
103
+ stream->release = NULL;
104
+ }
105
+
106
+ /// Move the C array stream from `src` to `dest`
107
+ ///
108
+ /// Note `dest` must *not* point to a valid stream already, otherwise there
109
+ /// will be a memory leak.
110
+ inline void ArrowArrayStreamMove(struct ArrowArrayStream* src,
111
+ struct ArrowArrayStream* dest) {
112
+ assert(dest != src);
113
+ assert(!ArrowArrayStreamIsReleased(src));
114
+ memcpy(dest, src, sizeof(struct ArrowArrayStream));
115
+ ArrowArrayStreamMarkReleased(src);
116
+ }
117
+
118
+ /// Release the C array stream, if necessary, by calling its release callback
119
+ inline void ArrowArrayStreamRelease(struct ArrowArrayStream* stream) {
120
+ if (!ArrowArrayStreamIsReleased(stream)) {
121
+ stream->release(stream);
122
+ ARROW_C_ASSERT(ArrowArrayStreamIsReleased(stream),
123
+ "ArrowArrayStreamRelease did not cleanup release callback");
124
+ }
125
+ }
126
+
127
+ #ifdef __cplusplus
128
+ }
129
+ #endif
venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/api.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/csv/options.h"
21
+ #include "arrow/csv/reader.h"
22
+ #include "arrow/csv/writer.h"
venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/chunker.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+
23
+ #include "arrow/csv/options.h"
24
+ #include "arrow/status.h"
25
+ #include "arrow/util/delimiting.h"
26
+ #include "arrow/util/macros.h"
27
+ #include "arrow/util/visibility.h"
28
+
29
+ namespace arrow {
30
+ namespace csv {
31
+
32
+ ARROW_EXPORT
33
+ std::unique_ptr<Chunker> MakeChunker(const ParseOptions& options);
34
+
35
+ } // namespace csv
36
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_builder.h ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <utility>
23
+
24
+ #include "arrow/result.h"
25
+ #include "arrow/type_fwd.h"
26
+ #include "arrow/util/type_fwd.h"
27
+ #include "arrow/util/visibility.h"
28
+
29
+ namespace arrow {
30
+ namespace csv {
31
+
32
+ class BlockParser;
33
+ struct ConvertOptions;
34
+
35
+ class ARROW_EXPORT ColumnBuilder {
36
+ public:
37
+ virtual ~ColumnBuilder() = default;
38
+
39
+ /// Spawn a task that will try to convert and append the given CSV block.
40
+ /// All calls to Append() should happen on the same thread, otherwise
41
+ /// call Insert() instead.
42
+ virtual void Append(const std::shared_ptr<BlockParser>& parser) = 0;
43
+
44
+ /// Spawn a task that will try to convert and insert the given CSV block
45
+ virtual void Insert(int64_t block_index,
46
+ const std::shared_ptr<BlockParser>& parser) = 0;
47
+
48
+ /// Return the final chunked array. The TaskGroup _must_ have finished!
49
+ virtual Result<std::shared_ptr<ChunkedArray>> Finish() = 0;
50
+
51
+ std::shared_ptr<arrow::internal::TaskGroup> task_group() { return task_group_; }
52
+
53
+ /// Construct a strictly-typed ColumnBuilder.
54
+ static Result<std::shared_ptr<ColumnBuilder>> Make(
55
+ MemoryPool* pool, const std::shared_ptr<DataType>& type, int32_t col_index,
56
+ const ConvertOptions& options,
57
+ const std::shared_ptr<arrow::internal::TaskGroup>& task_group);
58
+
59
+ /// Construct a type-inferring ColumnBuilder.
60
+ static Result<std::shared_ptr<ColumnBuilder>> Make(
61
+ MemoryPool* pool, int32_t col_index, const ConvertOptions& options,
62
+ const std::shared_ptr<arrow::internal::TaskGroup>& task_group);
63
+
64
+ /// Construct a ColumnBuilder for a column of nulls
65
+ /// (i.e. not present in the CSV file).
66
+ static Result<std::shared_ptr<ColumnBuilder>> MakeNull(
67
+ MemoryPool* pool, const std::shared_ptr<DataType>& type,
68
+ const std::shared_ptr<arrow::internal::TaskGroup>& task_group);
69
+
70
+ protected:
71
+ explicit ColumnBuilder(std::shared_ptr<arrow::internal::TaskGroup> task_group)
72
+ : task_group_(std::move(task_group)) {}
73
+
74
+ std::shared_ptr<arrow::internal::TaskGroup> task_group_;
75
+ };
76
+
77
+ } // namespace csv
78
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_decoder.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <utility>
23
+
24
+ #include "arrow/result.h"
25
+ #include "arrow/type_fwd.h"
26
+ #include "arrow/util/type_fwd.h"
27
+ #include "arrow/util/visibility.h"
28
+
29
+ namespace arrow {
30
+ namespace csv {
31
+
32
+ class BlockParser;
33
+ struct ConvertOptions;
34
+
35
+ class ARROW_EXPORT ColumnDecoder {
36
+ public:
37
+ virtual ~ColumnDecoder() = default;
38
+
39
+ /// Spawn a task that will try to convert and insert the given CSV block
40
+ virtual Future<std::shared_ptr<Array>> Decode(
41
+ const std::shared_ptr<BlockParser>& parser) = 0;
42
+
43
+ /// Construct a strictly-typed ColumnDecoder.
44
+ static Result<std::shared_ptr<ColumnDecoder>> Make(MemoryPool* pool,
45
+ std::shared_ptr<DataType> type,
46
+ int32_t col_index,
47
+ const ConvertOptions& options);
48
+
49
+ /// Construct a type-inferring ColumnDecoder.
50
+ /// Inference will run only on the first block, the type will be frozen afterwards.
51
+ static Result<std::shared_ptr<ColumnDecoder>> Make(MemoryPool* pool, int32_t col_index,
52
+ const ConvertOptions& options);
53
+
54
+ /// Construct a ColumnDecoder for a column of nulls
55
+ /// (i.e. not present in the CSV file).
56
+ static Result<std::shared_ptr<ColumnDecoder>> MakeNull(MemoryPool* pool,
57
+ std::shared_ptr<DataType> type);
58
+
59
+ protected:
60
+ ColumnDecoder() = default;
61
+ };
62
+
63
+ } // namespace csv
64
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/converter.h ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+
23
+ #include "arrow/csv/options.h"
24
+ #include "arrow/result.h"
25
+ #include "arrow/type_fwd.h"
26
+ #include "arrow/util/macros.h"
27
+ #include "arrow/util/visibility.h"
28
+
29
+ namespace arrow {
30
+ namespace csv {
31
+
32
+ class BlockParser;
33
+
34
+ class ARROW_EXPORT Converter {
35
+ public:
36
+ Converter(const std::shared_ptr<DataType>& type, const ConvertOptions& options,
37
+ MemoryPool* pool);
38
+ virtual ~Converter() = default;
39
+
40
+ virtual Result<std::shared_ptr<Array>> Convert(const BlockParser& parser,
41
+ int32_t col_index) = 0;
42
+
43
+ std::shared_ptr<DataType> type() const { return type_; }
44
+
45
+ // Create a Converter for the given data type
46
+ static Result<std::shared_ptr<Converter>> Make(
47
+ const std::shared_ptr<DataType>& type, const ConvertOptions& options,
48
+ MemoryPool* pool = default_memory_pool());
49
+
50
+ protected:
51
+ ARROW_DISALLOW_COPY_AND_ASSIGN(Converter);
52
+
53
+ virtual Status Initialize() = 0;
54
+
55
+ // CAUTION: ConvertOptions can grow large (if it customizes hundreds or
56
+ // thousands of columns), so avoid copying it in each Converter.
57
+ const ConvertOptions& options_;
58
+ MemoryPool* pool_;
59
+ std::shared_ptr<DataType> type_;
60
+ };
61
+
62
+ class ARROW_EXPORT DictionaryConverter : public Converter {
63
+ public:
64
+ DictionaryConverter(const std::shared_ptr<DataType>& value_type,
65
+ const ConvertOptions& options, MemoryPool* pool);
66
+
67
+ // If the dictionary length goes above this value, conversion will fail
68
+ // with Status::IndexError.
69
+ virtual void SetMaxCardinality(int32_t max_length) = 0;
70
+
71
+ // Create a Converter for the given dictionary value type.
72
+ // The dictionary index type will always be Int32.
73
+ static Result<std::shared_ptr<DictionaryConverter>> Make(
74
+ const std::shared_ptr<DataType>& value_type, const ConvertOptions& options,
75
+ MemoryPool* pool = default_memory_pool());
76
+
77
+ protected:
78
+ std::shared_ptr<DataType> value_type_;
79
+ };
80
+
81
+ } // namespace csv
82
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/invalid_row.h ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+ #include <string_view>
22
+
23
+ namespace arrow {
24
+ namespace csv {
25
+
26
+ /// \brief Description of an invalid row
27
+ struct InvalidRow {
28
+ /// \brief Number of columns expected in the row
29
+ int32_t expected_columns;
30
+ /// \brief Actual number of columns found in the row
31
+ int32_t actual_columns;
32
+ /// \brief The physical row number if known or -1
33
+ ///
34
+ /// This number is one-based and also accounts for non-data rows (such as
35
+ /// CSV header rows).
36
+ int64_t number;
37
+ /// \brief View of the entire row. Memory will be freed after callback returns
38
+ const std::string_view text;
39
+ };
40
+
41
+ /// \brief Result returned by an InvalidRowHandler
42
+ enum class InvalidRowResult {
43
+ // Generate an error describing this row
44
+ Error,
45
+ // Skip over this row
46
+ Skip
47
+ };
48
+
49
+ /// \brief callback for handling a row with an invalid number of columns while parsing
50
+ /// \return result indicating if an error should be returned from the parser or the row is
51
+ /// skipped
52
+ using InvalidRowHandler = std::function<InvalidRowResult(const InvalidRow&)>;
53
+
54
+ } // namespace csv
55
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/options.h ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <unordered_map>
24
+ #include <vector>
25
+
26
+ #include "arrow/csv/invalid_row.h"
27
+ #include "arrow/csv/type_fwd.h"
28
+ #include "arrow/io/interfaces.h"
29
+ #include "arrow/status.h"
30
+ #include "arrow/util/visibility.h"
31
+
32
+ namespace arrow {
33
+
34
+ class DataType;
35
+ class TimestampParser;
36
+
37
+ namespace csv {
38
+
39
+ // Silly workaround for https://github.com/michaeljones/breathe/issues/453
40
+ constexpr char kDefaultEscapeChar = '\\';
41
+
42
+ struct ARROW_EXPORT ParseOptions {
43
+ // Parsing options
44
+
45
+ /// Field delimiter
46
+ char delimiter = ',';
47
+ /// Whether quoting is used
48
+ bool quoting = true;
49
+ /// Quoting character (if `quoting` is true)
50
+ char quote_char = '"';
51
+ /// Whether a quote inside a value is double-quoted
52
+ bool double_quote = true;
53
+ /// Whether escaping is used
54
+ bool escaping = false;
55
+ /// Escaping character (if `escaping` is true)
56
+ char escape_char = kDefaultEscapeChar;
57
+ /// Whether values are allowed to contain CR (0x0d) and LF (0x0a) characters
58
+ bool newlines_in_values = false;
59
+ /// Whether empty lines are ignored. If false, an empty line represents
60
+ /// a single empty value (assuming a one-column CSV file).
61
+ bool ignore_empty_lines = true;
62
+ /// A handler function for rows which do not have the correct number of columns
63
+ InvalidRowHandler invalid_row_handler;
64
+
65
+ /// Create parsing options with default values
66
+ static ParseOptions Defaults();
67
+
68
+ /// \brief Test that all set options are valid
69
+ Status Validate() const;
70
+ };
71
+
72
+ struct ARROW_EXPORT ConvertOptions {
73
+ // Conversion options
74
+
75
+ /// Whether to check UTF8 validity of string columns
76
+ bool check_utf8 = true;
77
+ /// Optional per-column types (disabling type inference on those columns)
78
+ std::unordered_map<std::string, std::shared_ptr<DataType>> column_types;
79
+ /// Recognized spellings for null values
80
+ std::vector<std::string> null_values;
81
+ /// Recognized spellings for boolean true values
82
+ std::vector<std::string> true_values;
83
+ /// Recognized spellings for boolean false values
84
+ std::vector<std::string> false_values;
85
+
86
+ /// Whether string / binary columns can have null values.
87
+ ///
88
+ /// If true, then strings in "null_values" are considered null for string columns.
89
+ /// If false, then all strings are valid string values.
90
+ bool strings_can_be_null = false;
91
+
92
+ /// Whether quoted values can be null.
93
+ ///
94
+ /// If true, then strings in "null_values" are also considered null when they
95
+ /// appear quoted in the CSV file. Otherwise, quoted values are never considered null.
96
+ bool quoted_strings_can_be_null = true;
97
+
98
+ /// Whether to try to automatically dict-encode string / binary data.
99
+ /// If true, then when type inference detects a string or binary column,
100
+ /// it is dict-encoded up to `auto_dict_max_cardinality` distinct values
101
+ /// (per chunk), after which it switches to regular encoding.
102
+ ///
103
+ /// This setting is ignored for non-inferred columns (those in `column_types`).
104
+ bool auto_dict_encode = false;
105
+ int32_t auto_dict_max_cardinality = 50;
106
+
107
+ /// Decimal point character for floating-point and decimal data
108
+ char decimal_point = '.';
109
+
110
+ // XXX Should we have a separate FilterOptions?
111
+
112
+ /// If non-empty, indicates the names of columns from the CSV file that should
113
+ /// be actually read and converted (in the vector's order).
114
+ /// Columns not in this vector will be ignored.
115
+ std::vector<std::string> include_columns;
116
+ /// If false, columns in `include_columns` but not in the CSV file will error out.
117
+ /// If true, columns in `include_columns` but not in the CSV file will produce
118
+ /// a column of nulls (whose type is selected using `column_types`,
119
+ /// or null by default)
120
+ /// This option is ignored if `include_columns` is empty.
121
+ bool include_missing_columns = false;
122
+
123
+ /// User-defined timestamp parsers, using the virtual parser interface in
124
+ /// arrow/util/value_parsing.h. More than one parser can be specified, and
125
+ /// the CSV conversion logic will try parsing values starting from the
126
+ /// beginning of this vector. If no parsers are specified, we use the default
127
+ /// built-in ISO-8601 parser.
128
+ std::vector<std::shared_ptr<TimestampParser>> timestamp_parsers;
129
+
130
+ /// Create conversion options with default values, including conventional
131
+ /// values for `null_values`, `true_values` and `false_values`
132
+ static ConvertOptions Defaults();
133
+
134
+ /// \brief Test that all set options are valid
135
+ Status Validate() const;
136
+ };
137
+
138
+ struct ARROW_EXPORT ReadOptions {
139
+ // Reader options
140
+
141
+ /// Whether to use the global CPU thread pool
142
+ bool use_threads = true;
143
+
144
+ /// \brief Block size we request from the IO layer.
145
+ ///
146
+ /// This will determine multi-threading granularity as well as
147
+ /// the size of individual record batches.
148
+ /// Minimum valid value for block size is 1
149
+ int32_t block_size = 1 << 20; // 1 MB
150
+
151
+ /// Number of header rows to skip (not including the row of column names, if any)
152
+ int32_t skip_rows = 0;
153
+
154
+ /// Number of rows to skip after the column names are read, if any
155
+ int32_t skip_rows_after_names = 0;
156
+
157
+ /// Column names for the target table.
158
+ /// If empty, fall back on autogenerate_column_names.
159
+ std::vector<std::string> column_names;
160
+
161
+ /// Whether to autogenerate column names if `column_names` is empty.
162
+ /// If true, column names will be of the form "f0", "f1"...
163
+ /// If false, column names will be read from the first CSV row after `skip_rows`.
164
+ bool autogenerate_column_names = false;
165
+
166
+ /// Create read options with default values
167
+ static ReadOptions Defaults();
168
+
169
+ /// \brief Test that all set options are valid
170
+ Status Validate() const;
171
+ };
172
+
173
+ /// \brief Quoting style for CSV writing
174
+ enum class ARROW_EXPORT QuotingStyle {
175
+ /// Only enclose values in quotes which need them, because their CSV rendering can
176
+ /// contain quotes itself (e.g. strings or binary values)
177
+ Needed,
178
+ /// Enclose all valid values in quotes. Nulls are not quoted. May cause readers to
179
+ /// interpret all values as strings if schema is inferred.
180
+ AllValid,
181
+ /// Do not enclose any values in quotes. Prevents values from containing quotes ("),
182
+ /// cell delimiters (,) or line endings (\\r, \\n), (following RFC4180). If values
183
+ /// contain these characters, an error is caused when attempting to write.
184
+ None
185
+ };
186
+
187
+ struct ARROW_EXPORT WriteOptions {
188
+ /// Whether to write an initial header line with column names
189
+ bool include_header = true;
190
+
191
+ /// \brief Maximum number of rows processed at a time
192
+ ///
193
+ /// The CSV writer converts and writes data in batches of N rows.
194
+ /// This number can impact performance.
195
+ int32_t batch_size = 1024;
196
+
197
+ /// Field delimiter
198
+ char delimiter = ',';
199
+
200
+ /// \brief The string to write for null values. Quotes are not allowed in this string.
201
+ std::string null_string;
202
+
203
+ /// \brief IO context for writing.
204
+ io::IOContext io_context;
205
+
206
+ /// \brief The end of line character to use for ending rows
207
+ std::string eol = "\n";
208
+
209
+ /// \brief Quoting style
210
+ QuotingStyle quoting_style = QuotingStyle::Needed;
211
+
212
+ /// Create write options with default values
213
+ static WriteOptions Defaults();
214
+
215
+ /// \brief Test that all set options are valid
216
+ Status Validate() const;
217
+ };
218
+
219
+ } // namespace csv
220
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/parser.h ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+ #include <cstddef>
22
+ #include <cstdint>
23
+ #include <memory>
24
+ #include <string_view>
25
+ #include <vector>
26
+
27
+ #include "arrow/buffer.h"
28
+ #include "arrow/csv/options.h"
29
+ #include "arrow/csv/type_fwd.h"
30
+ #include "arrow/status.h"
31
+ #include "arrow/util/macros.h"
32
+ #include "arrow/util/visibility.h"
33
+
34
+ namespace arrow {
35
+
36
+ class MemoryPool;
37
+
38
+ namespace csv {
39
+
40
+ /// Skip at most num_rows from the given input. The input pointer is updated
41
+ /// and the number of actually skipped rows is returns (may be less than
42
+ /// requested if the input is too short).
43
+ ARROW_EXPORT
44
+ int32_t SkipRows(const uint8_t* data, uint32_t size, int32_t num_rows,
45
+ const uint8_t** out_data);
46
+
47
+ class BlockParserImpl;
48
+
49
+ namespace detail {
50
+
51
+ struct ParsedValueDesc {
52
+ uint32_t offset : 31;
53
+ bool quoted : 1;
54
+ };
55
+
56
+ class ARROW_EXPORT DataBatch {
57
+ public:
58
+ explicit DataBatch(int32_t num_cols) : num_cols_(num_cols) {}
59
+
60
+ /// \brief Return the number of parsed rows (not skipped)
61
+ int32_t num_rows() const { return num_rows_; }
62
+ /// \brief Return the number of parsed columns
63
+ int32_t num_cols() const { return num_cols_; }
64
+ /// \brief Return the total size in bytes of parsed data
65
+ uint32_t num_bytes() const { return parsed_size_; }
66
+ /// \brief Return the number of skipped rows
67
+ int32_t num_skipped_rows() const { return static_cast<int32_t>(skipped_rows_.size()); }
68
+
69
+ template <typename Visitor>
70
+ Status VisitColumn(int32_t col_index, int64_t first_row, Visitor&& visit) const {
71
+ using detail::ParsedValueDesc;
72
+
73
+ int32_t batch_row = 0;
74
+ for (size_t buf_index = 0; buf_index < values_buffers_.size(); ++buf_index) {
75
+ const auto& values_buffer = values_buffers_[buf_index];
76
+ const auto values = reinterpret_cast<const ParsedValueDesc*>(values_buffer->data());
77
+ const auto max_pos =
78
+ static_cast<int32_t>(values_buffer->size() / sizeof(ParsedValueDesc)) - 1;
79
+ for (int32_t pos = col_index; pos < max_pos; pos += num_cols_, ++batch_row) {
80
+ auto start = values[pos].offset;
81
+ auto stop = values[pos + 1].offset;
82
+ auto quoted = values[pos + 1].quoted;
83
+ Status status = visit(parsed_ + start, stop - start, quoted);
84
+ if (ARROW_PREDICT_FALSE(!status.ok())) {
85
+ return DecorateWithRowNumber(std::move(status), first_row, batch_row);
86
+ }
87
+ }
88
+ }
89
+ return Status::OK();
90
+ }
91
+
92
+ template <typename Visitor>
93
+ Status VisitLastRow(Visitor&& visit) const {
94
+ using detail::ParsedValueDesc;
95
+
96
+ const auto& values_buffer = values_buffers_.back();
97
+ const auto values = reinterpret_cast<const ParsedValueDesc*>(values_buffer->data());
98
+ const auto start_pos =
99
+ static_cast<int32_t>(values_buffer->size() / sizeof(ParsedValueDesc)) -
100
+ num_cols_ - 1;
101
+ for (int32_t col_index = 0; col_index < num_cols_; ++col_index) {
102
+ auto start = values[start_pos + col_index].offset;
103
+ auto stop = values[start_pos + col_index + 1].offset;
104
+ auto quoted = values[start_pos + col_index + 1].quoted;
105
+ ARROW_RETURN_NOT_OK(visit(parsed_ + start, stop - start, quoted));
106
+ }
107
+ return Status::OK();
108
+ }
109
+
110
+ protected:
111
+ Status DecorateWithRowNumber(Status&& status, int64_t first_row,
112
+ int32_t batch_row) const {
113
+ if (first_row >= 0) {
114
+ // `skipped_rows_` is in ascending order by construction, so use bisection
115
+ // to find out how many rows were skipped before `batch_row`.
116
+ const auto skips_before =
117
+ std::upper_bound(skipped_rows_.begin(), skipped_rows_.end(), batch_row) -
118
+ skipped_rows_.begin();
119
+ status = status.WithMessage("Row #", batch_row + skips_before + first_row, ": ",
120
+ status.message());
121
+ }
122
+ // Use return_if so that when extra context is enabled it will be added
123
+ ARROW_RETURN_IF_(true, std::move(status), ARROW_STRINGIFY(status));
124
+ return std::move(status);
125
+ }
126
+
127
+ // The number of rows in this batch (not including any skipped ones)
128
+ int32_t num_rows_ = 0;
129
+ // The number of columns
130
+ int32_t num_cols_ = 0;
131
+
132
+ // XXX should we ensure the parsed buffer is padded with 8 or 16 excess zero bytes?
133
+ // It may help with null parsing...
134
+ std::vector<std::shared_ptr<Buffer>> values_buffers_;
135
+ std::shared_ptr<Buffer> parsed_buffer_;
136
+ const uint8_t* parsed_ = NULLPTR;
137
+ int32_t parsed_size_ = 0;
138
+
139
+ // Record the current num_rows_ each time a row is skipped
140
+ std::vector<int32_t> skipped_rows_;
141
+
142
+ friend class ::arrow::csv::BlockParserImpl;
143
+ };
144
+
145
+ } // namespace detail
146
+
147
+ constexpr int32_t kMaxParserNumRows = 100000;
148
+
149
+ /// \class BlockParser
150
+ /// \brief A reusable block-based parser for CSV data
151
+ ///
152
+ /// The parser takes a block of CSV data and delimits rows and fields,
153
+ /// unquoting and unescaping them on the fly. Parsed data is own by the
154
+ /// parser, so the original buffer can be discarded after Parse() returns.
155
+ ///
156
+ /// If the block is truncated (i.e. not all data can be parsed), it is up
157
+ /// to the caller to arrange the next block to start with the trailing data.
158
+ /// Also, if the previous block ends with CR (0x0d) and a new block starts
159
+ /// with LF (0x0a), the parser will consider the leading newline as an empty
160
+ /// line; the caller should therefore strip it.
161
+ class ARROW_EXPORT BlockParser {
162
+ public:
163
+ explicit BlockParser(ParseOptions options, int32_t num_cols = -1,
164
+ int64_t first_row = -1, int32_t max_num_rows = kMaxParserNumRows);
165
+ explicit BlockParser(MemoryPool* pool, ParseOptions options, int32_t num_cols = -1,
166
+ int64_t first_row = -1, int32_t max_num_rows = kMaxParserNumRows);
167
+ ~BlockParser();
168
+
169
+ /// \brief Parse a block of data
170
+ ///
171
+ /// Parse a block of CSV data, ingesting up to max_num_rows rows.
172
+ /// The number of bytes actually parsed is returned in out_size.
173
+ Status Parse(std::string_view data, uint32_t* out_size);
174
+
175
+ /// \brief Parse sequential blocks of data
176
+ ///
177
+ /// Only the last block is allowed to be truncated.
178
+ Status Parse(const std::vector<std::string_view>& data, uint32_t* out_size);
179
+
180
+ /// \brief Parse the final block of data
181
+ ///
182
+ /// Like Parse(), but called with the final block in a file.
183
+ /// The last row may lack a trailing line separator.
184
+ Status ParseFinal(std::string_view data, uint32_t* out_size);
185
+
186
+ /// \brief Parse the final sequential blocks of data
187
+ ///
188
+ /// Only the last block is allowed to be truncated.
189
+ Status ParseFinal(const std::vector<std::string_view>& data, uint32_t* out_size);
190
+
191
+ /// \brief Return the number of parsed rows
192
+ int32_t num_rows() const { return parsed_batch().num_rows(); }
193
+ /// \brief Return the number of parsed columns
194
+ int32_t num_cols() const { return parsed_batch().num_cols(); }
195
+ /// \brief Return the total size in bytes of parsed data
196
+ uint32_t num_bytes() const { return parsed_batch().num_bytes(); }
197
+
198
+ /// \brief Return the total number of rows including rows which were skipped
199
+ int32_t total_num_rows() const {
200
+ return parsed_batch().num_rows() + parsed_batch().num_skipped_rows();
201
+ }
202
+
203
+ /// \brief Return the row number of the first row in the block or -1 if unsupported
204
+ int64_t first_row_num() const;
205
+
206
+ /// \brief Visit parsed values in a column
207
+ ///
208
+ /// The signature of the visitor is
209
+ /// Status(const uint8_t* data, uint32_t size, bool quoted)
210
+ template <typename Visitor>
211
+ Status VisitColumn(int32_t col_index, Visitor&& visit) const {
212
+ return parsed_batch().VisitColumn(col_index, first_row_num(),
213
+ std::forward<Visitor>(visit));
214
+ }
215
+
216
+ template <typename Visitor>
217
+ Status VisitLastRow(Visitor&& visit) const {
218
+ return parsed_batch().VisitLastRow(std::forward<Visitor>(visit));
219
+ }
220
+
221
+ protected:
222
+ std::unique_ptr<BlockParserImpl> impl_;
223
+
224
+ const detail::DataBatch& parsed_batch() const;
225
+ };
226
+
227
+ } // namespace csv
228
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/reader.h ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+
22
+ #include "arrow/csv/options.h" // IWYU pragma: keep
23
+ #include "arrow/io/interfaces.h"
24
+ #include "arrow/record_batch.h"
25
+ #include "arrow/result.h"
26
+ #include "arrow/type.h"
27
+ #include "arrow/type_fwd.h"
28
+ #include "arrow/util/future.h"
29
+ #include "arrow/util/thread_pool.h"
30
+ #include "arrow/util/visibility.h"
31
+
32
+ namespace arrow {
33
+ namespace io {
34
+ class InputStream;
35
+ } // namespace io
36
+
37
+ namespace csv {
38
+
39
+ /// A class that reads an entire CSV file into a Arrow Table
40
+ class ARROW_EXPORT TableReader {
41
+ public:
42
+ virtual ~TableReader() = default;
43
+
44
+ /// Read the entire CSV file and convert it to a Arrow Table
45
+ virtual Result<std::shared_ptr<Table>> Read() = 0;
46
+ /// Read the entire CSV file and convert it to a Arrow Table
47
+ virtual Future<std::shared_ptr<Table>> ReadAsync() = 0;
48
+
49
+ /// Create a TableReader instance
50
+ static Result<std::shared_ptr<TableReader>> Make(io::IOContext io_context,
51
+ std::shared_ptr<io::InputStream> input,
52
+ const ReadOptions&,
53
+ const ParseOptions&,
54
+ const ConvertOptions&);
55
+ };
56
+
57
+ /// \brief A class that reads a CSV file incrementally
58
+ ///
59
+ /// Caveats:
60
+ /// - For now, this is always single-threaded (regardless of `ReadOptions::use_threads`.
61
+ /// - Type inference is done on the first block and types are frozen afterwards;
62
+ /// to make sure the right data types are inferred, either set
63
+ /// `ReadOptions::block_size` to a large enough value, or use
64
+ /// `ConvertOptions::column_types` to set the desired data types explicitly.
65
+ class ARROW_EXPORT StreamingReader : public RecordBatchReader {
66
+ public:
67
+ virtual ~StreamingReader() = default;
68
+
69
+ virtual Future<std::shared_ptr<RecordBatch>> ReadNextAsync() = 0;
70
+
71
+ /// \brief Return the number of bytes which have been read and processed
72
+ ///
73
+ /// The returned number includes CSV bytes which the StreamingReader has
74
+ /// finished processing, but not bytes for which some processing (e.g.
75
+ /// CSV parsing or conversion to Arrow layout) is still ongoing.
76
+ ///
77
+ /// Furthermore, the following rules apply:
78
+ /// - bytes skipped by `ReadOptions.skip_rows` are counted as being read before
79
+ /// any records are returned.
80
+ /// - bytes read while parsing the header are counted as being read before any
81
+ /// records are returned.
82
+ /// - bytes skipped by `ReadOptions.skip_rows_after_names` are counted after the
83
+ /// first batch is returned.
84
+ virtual int64_t bytes_read() const = 0;
85
+
86
+ /// Create a StreamingReader instance
87
+ ///
88
+ /// This involves some I/O as the first batch must be loaded during the creation process
89
+ /// so it is returned as a future
90
+ ///
91
+ /// Currently, the StreamingReader is not async-reentrant and does not do any fan-out
92
+ /// parsing (see ARROW-11889)
93
+ static Future<std::shared_ptr<StreamingReader>> MakeAsync(
94
+ io::IOContext io_context, std::shared_ptr<io::InputStream> input,
95
+ arrow::internal::Executor* cpu_executor, const ReadOptions&, const ParseOptions&,
96
+ const ConvertOptions&);
97
+
98
+ static Result<std::shared_ptr<StreamingReader>> Make(
99
+ io::IOContext io_context, std::shared_ptr<io::InputStream> input,
100
+ const ReadOptions&, const ParseOptions&, const ConvertOptions&);
101
+ };
102
+
103
+ /// \brief Count the logical rows of data in a CSV file (i.e. the
104
+ /// number of rows you would get if you read the file into a table).
105
+ ARROW_EXPORT
106
+ Future<int64_t> CountRowsAsync(io::IOContext io_context,
107
+ std::shared_ptr<io::InputStream> input,
108
+ arrow::internal::Executor* cpu_executor,
109
+ const ReadOptions&, const ParseOptions&);
110
+
111
+ } // namespace csv
112
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/test_common.h ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <vector>
24
+
25
+ #include "arrow/csv/parser.h"
26
+ #include "arrow/testing/visibility.h"
27
+
28
+ namespace arrow {
29
+ namespace csv {
30
+
31
+ ARROW_TESTING_EXPORT
32
+ std::string MakeCSVData(std::vector<std::string> lines);
33
+
34
+ // Make a BlockParser from a vector of lines representing a CSV file
35
+ ARROW_TESTING_EXPORT
36
+ void MakeCSVParser(std::vector<std::string> lines, ParseOptions options, int32_t num_cols,
37
+ MemoryPool* pool, std::shared_ptr<BlockParser>* out);
38
+
39
+ ARROW_TESTING_EXPORT
40
+ void MakeCSVParser(std::vector<std::string> lines, ParseOptions options,
41
+ std::shared_ptr<BlockParser>* out);
42
+
43
+ ARROW_TESTING_EXPORT
44
+ void MakeCSVParser(std::vector<std::string> lines, std::shared_ptr<BlockParser>* out);
45
+
46
+ // Make a BlockParser from a vector of strings representing a single CSV column
47
+ ARROW_TESTING_EXPORT
48
+ void MakeColumnParser(std::vector<std::string> items, std::shared_ptr<BlockParser>* out);
49
+
50
+ ARROW_TESTING_EXPORT
51
+ Result<std::shared_ptr<Buffer>> MakeSampleCsvBuffer(
52
+ size_t num_rows, std::function<bool(size_t row_num)> is_valid = {});
53
+
54
+ } // namespace csv
55
+ } // namespace arrow