applied-ai-018 commited on
Commit
0b5e147
·
verified ·
1 Parent(s): 9ad7093

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step80/zero/15.post_attention_layernorm.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step80/zero/15.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step80/zero/15.post_attention_layernorm.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step80/zero/4.attention.dense.weight/exp_avg.pt +3 -0
  5. ckpts/universal/global_step80/zero/9.attention.query_key_value.weight/fp32.pt +3 -0
  6. venv/lib/python3.10/site-packages/pyarrow/include/arrow/array.h +49 -0
  7. venv/lib/python3.10/site-packages/pyarrow/include/arrow/buffer.h +587 -0
  8. venv/lib/python3.10/site-packages/pyarrow/include/arrow/builder.h +33 -0
  9. venv/lib/python3.10/site-packages/pyarrow/include/arrow/chunked_array.h +275 -0
  10. venv/lib/python3.10/site-packages/pyarrow/include/arrow/compare.h +145 -0
  11. venv/lib/python3.10/site-packages/pyarrow/include/arrow/config.h +98 -0
  12. venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/api.h +39 -0
  13. venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset.h +481 -0
  14. venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset_writer.h +103 -0
  15. venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/discovery.h +275 -0
  16. venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_base.h +495 -0
  17. venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_csv.h +144 -0
  18. venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_ipc.h +123 -0
  19. venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_json.h +98 -0
  20. venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_orc.h +75 -0
  21. venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_parquet.h +404 -0
  22. venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/parquet_encryption_config.h +75 -0
  23. venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/partition.h +432 -0
  24. venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/pch.h +27 -0
  25. venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/plan.h +33 -0
  26. venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/projector.h +32 -0
  27. venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/scanner.h +578 -0
  28. venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/type_fwd.h +113 -0
  29. venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/visibility.h +50 -0
  30. venv/lib/python3.10/site-packages/pyarrow/include/arrow/datum.h +311 -0
  31. venv/lib/python3.10/site-packages/pyarrow/include/arrow/device.h +394 -0
  32. venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/api.h +22 -0
  33. venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/pch.h +23 -0
  34. venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/api.h +26 -0
  35. venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/extension_set.h +481 -0
  36. venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/extension_types.h +90 -0
  37. venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/options.h +135 -0
  38. venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/relation.h +71 -0
  39. venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/serde.h +331 -0
  40. venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/test_plan_builder.h +76 -0
  41. venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/test_util.h +45 -0
  42. venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/type_fwd.h +32 -0
  43. venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/util.h +83 -0
  44. venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/visibility.h +52 -0
  45. venv/lib/python3.10/site-packages/pyarrow/include/arrow/extension_type.h +165 -0
  46. venv/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/api.h +34 -0
  47. venv/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/azurefs.h +358 -0
  48. venv/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem.h +697 -0
  49. venv/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem_library.h +39 -0
  50. venv/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/gcsfs.h +246 -0
ckpts/universal/global_step80/zero/15.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c23166eb81bb3a411f2b97b4ad7128d6f0f05f30922cacb913d26a0e6ca646d7
3
+ size 9372
ckpts/universal/global_step80/zero/15.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:494b25875afc7e2bb53e51cc39629f4ff1542e6743ede976d721305a5d9fed2a
3
+ size 9387
ckpts/universal/global_step80/zero/15.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd7e987d75c52fbf2da70a8316c8e09c6d401c43d75a5ba2d4d455ef4221430c
3
+ size 9293
ckpts/universal/global_step80/zero/4.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5698387d11700a3ecc5e073c81e6f3852865dc6929adf5b6b9a11d9477aaba7f
3
+ size 16778396
ckpts/universal/global_step80/zero/9.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4a7549c5f55c16a91fa4b5bb73efa0ef09c6f3dd0f704f14afc6adacf0ff265
3
+ size 50332749
venv/lib/python3.10/site-packages/pyarrow/include/arrow/array.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Kitchen-sink public API for arrow::Array data structures. C++ library code
19
+ // (especially header files) in Apache Arrow should use more specific headers
20
+ // unless it's a file that uses most or all Array types in which case using
21
+ // arrow/array.h is fine.
22
+
23
+ #pragma once
24
+
25
+ /// \defgroup numeric-arrays Concrete classes for numeric arrays
26
+ /// @{
27
+ /// @}
28
+
29
+ /// \defgroup binary-arrays Concrete classes for binary/string arrays
30
+ /// @{
31
+ /// @}
32
+
33
+ /// \defgroup nested-arrays Concrete classes for nested arrays
34
+ /// @{
35
+ /// @}
36
+
37
+ /// \defgroup run-end-encoded-arrays Concrete classes for run-end encoded arrays
38
+ /// @{
39
+ /// @}
40
+
41
+ #include "arrow/array/array_base.h" // IWYU pragma: keep
42
+ #include "arrow/array/array_binary.h" // IWYU pragma: keep
43
+ #include "arrow/array/array_decimal.h" // IWYU pragma: keep
44
+ #include "arrow/array/array_dict.h" // IWYU pragma: keep
45
+ #include "arrow/array/array_nested.h" // IWYU pragma: keep
46
+ #include "arrow/array/array_primitive.h" // IWYU pragma: keep
47
+ #include "arrow/array/array_run_end.h" // IWYU pragma: keep
48
+ #include "arrow/array/data.h" // IWYU pragma: keep
49
+ #include "arrow/array/util.h" // IWYU pragma: keep
venv/lib/python3.10/site-packages/pyarrow/include/arrow/buffer.h ADDED
@@ -0,0 +1,587 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <cstring>
22
+ #include <memory>
23
+ #include <optional>
24
+ #include <string>
25
+ #include <string_view>
26
+ #include <utility>
27
+ #include <vector>
28
+
29
+ #include "arrow/device.h"
30
+ #include "arrow/status.h"
31
+ #include "arrow/type_fwd.h"
32
+ #include "arrow/util/macros.h"
33
+ #include "arrow/util/span.h"
34
+ #include "arrow/util/visibility.h"
35
+
36
+ namespace arrow {
37
+
38
+ // ----------------------------------------------------------------------
39
+ // Buffer classes
40
+
41
+ /// \class Buffer
42
+ /// \brief Object containing a pointer to a piece of contiguous memory with a
43
+ /// particular size.
44
+ ///
45
+ /// Buffers have two related notions of length: size and capacity. Size is
46
+ /// the number of bytes that might have valid data. Capacity is the number
47
+ /// of bytes that were allocated for the buffer in total.
48
+ ///
49
+ /// The Buffer base class does not own its memory, but subclasses often do.
50
+ ///
51
+ /// The following invariant is always true: Size <= Capacity
52
+ class ARROW_EXPORT Buffer {
53
+ public:
54
+ ARROW_DISALLOW_COPY_AND_ASSIGN(Buffer);
55
+
56
+ /// \brief Construct from buffer and size without copying memory
57
+ ///
58
+ /// \param[in] data a memory buffer
59
+ /// \param[in] size buffer size
60
+ ///
61
+ /// \note The passed memory must be kept alive through some other means
62
+ Buffer(const uint8_t* data, int64_t size)
63
+ : is_mutable_(false),
64
+ is_cpu_(true),
65
+ data_(data),
66
+ size_(size),
67
+ capacity_(size),
68
+ device_type_(DeviceAllocationType::kCPU) {
69
+ SetMemoryManager(default_cpu_memory_manager());
70
+ }
71
+
72
+ Buffer(const uint8_t* data, int64_t size, std::shared_ptr<MemoryManager> mm,
73
+ std::shared_ptr<Buffer> parent = NULLPTR,
74
+ std::optional<DeviceAllocationType> device_type_override = std::nullopt)
75
+ : is_mutable_(false),
76
+ data_(data),
77
+ size_(size),
78
+ capacity_(size),
79
+ parent_(std::move(parent)) {
80
+ // SetMemoryManager will also set device_type_
81
+ SetMemoryManager(std::move(mm));
82
+ // If a device type is specified, use that instead. Example of when this can be
83
+ // useful: the CudaMemoryManager can set device_type_ to kCUDA, but you can specify
84
+ // device_type_override=kCUDA_HOST as the device type to override it.
85
+ if (device_type_override != std::nullopt) {
86
+ device_type_ = *device_type_override;
87
+ }
88
+ }
89
+
90
+ Buffer(uintptr_t address, int64_t size, std::shared_ptr<MemoryManager> mm,
91
+ std::shared_ptr<Buffer> parent = NULLPTR)
92
+ : Buffer(reinterpret_cast<const uint8_t*>(address), size, std::move(mm),
93
+ std::move(parent)) {}
94
+
95
+ /// \brief Construct from string_view without copying memory
96
+ ///
97
+ /// \param[in] data a string_view object
98
+ ///
99
+ /// \note The memory viewed by data must not be deallocated in the lifetime of the
100
+ /// Buffer; temporary rvalue strings must be stored in an lvalue somewhere
101
+ explicit Buffer(std::string_view data)
102
+ : Buffer(reinterpret_cast<const uint8_t*>(data.data()),
103
+ static_cast<int64_t>(data.size())) {}
104
+
105
+ virtual ~Buffer() = default;
106
+
107
+ /// An offset into data that is owned by another buffer, but we want to be
108
+ /// able to retain a valid pointer to it even after other shared_ptr's to the
109
+ /// parent buffer have been destroyed
110
+ ///
111
+ /// This method makes no assertions about alignment or padding of the buffer but
112
+ /// in general we expected buffers to be aligned and padded to 64 bytes. In the future
113
+ /// we might add utility methods to help determine if a buffer satisfies this contract.
114
+ Buffer(const std::shared_ptr<Buffer>& parent, const int64_t offset, const int64_t size)
115
+ : Buffer(parent->data_ + offset, size) {
116
+ parent_ = parent;
117
+ SetMemoryManager(parent->memory_manager_);
118
+ }
119
+
120
+ uint8_t operator[](std::size_t i) const { return data_[i]; }
121
+
122
+ /// \brief Construct a new std::string with a hexadecimal representation of the buffer.
123
+ /// \return std::string
124
+ std::string ToHexString();
125
+
126
+ /// Return true if both buffers are the same size and contain the same bytes
127
+ /// up to the number of compared bytes
128
+ bool Equals(const Buffer& other, int64_t nbytes) const;
129
+
130
+ /// Return true if both buffers are the same size and contain the same bytes
131
+ bool Equals(const Buffer& other) const;
132
+
133
+ /// Copy a section of the buffer into a new Buffer.
134
+ Result<std::shared_ptr<Buffer>> CopySlice(
135
+ const int64_t start, const int64_t nbytes,
136
+ MemoryPool* pool = default_memory_pool()) const;
137
+
138
+ /// Zero bytes in padding, i.e. bytes between size_ and capacity_.
139
+ void ZeroPadding() {
140
+ #ifndef NDEBUG
141
+ CheckMutable();
142
+ #endif
143
+ // A zero-capacity buffer can have a null data pointer
144
+ if (capacity_ != 0) {
145
+ memset(mutable_data() + size_, 0, static_cast<size_t>(capacity_ - size_));
146
+ }
147
+ }
148
+
149
+ /// \brief Construct an immutable buffer that takes ownership of the contents
150
+ /// of an std::string (without copying it).
151
+ ///
152
+ /// \param[in] data a string to own
153
+ /// \return a new Buffer instance
154
+ static std::shared_ptr<Buffer> FromString(std::string data);
155
+
156
+ /// \brief Construct an immutable buffer that takes ownership of the contents
157
+ /// of an std::vector (without copying it). Only vectors of TrivialType objects
158
+ /// (integers, floating point numbers, ...) can be wrapped by this function.
159
+ ///
160
+ /// \param[in] vec a vector to own
161
+ /// \return a new Buffer instance
162
+ template <typename T>
163
+ static std::shared_ptr<Buffer> FromVector(std::vector<T> vec) {
164
+ static_assert(std::is_trivial_v<T>,
165
+ "Buffer::FromVector can only wrap vectors of trivial objects");
166
+
167
+ if (vec.empty()) {
168
+ return std::shared_ptr<Buffer>{new Buffer()};
169
+ }
170
+
171
+ auto* data = reinterpret_cast<uint8_t*>(vec.data());
172
+ auto size_in_bytes = static_cast<int64_t>(vec.size() * sizeof(T));
173
+ return std::shared_ptr<Buffer>{
174
+ new Buffer{data, size_in_bytes},
175
+ // Keep the vector's buffer alive inside the shared_ptr's destructor until after
176
+ // we have deleted the Buffer. Note we can't use this trick in FromString since
177
+ // std::string's data is inline for short strings so moving invalidates pointers
178
+ // into the string's buffer.
179
+ [vec = std::move(vec)](Buffer* buffer) { delete buffer; }};
180
+ }
181
+
182
+ /// \brief Create buffer referencing typed memory with some length without
183
+ /// copying
184
+ /// \param[in] data the typed memory as C array
185
+ /// \param[in] length the number of values in the array
186
+ /// \return a new shared_ptr<Buffer>
187
+ template <typename T, typename SizeType = int64_t>
188
+ static std::shared_ptr<Buffer> Wrap(const T* data, SizeType length) {
189
+ return std::make_shared<Buffer>(reinterpret_cast<const uint8_t*>(data),
190
+ static_cast<int64_t>(sizeof(T) * length));
191
+ }
192
+
193
+ /// \brief Create buffer referencing std::vector with some length without
194
+ /// copying
195
+ /// \param[in] data the vector to be referenced. If this vector is changed,
196
+ /// the buffer may become invalid
197
+ /// \return a new shared_ptr<Buffer>
198
+ template <typename T>
199
+ static std::shared_ptr<Buffer> Wrap(const std::vector<T>& data) {
200
+ return std::make_shared<Buffer>(reinterpret_cast<const uint8_t*>(data.data()),
201
+ static_cast<int64_t>(sizeof(T) * data.size()));
202
+ }
203
+
204
+ /// \brief Copy buffer contents into a new std::string
205
+ /// \return std::string
206
+ /// \note Can throw std::bad_alloc if buffer is large
207
+ std::string ToString() const;
208
+
209
+ /// \brief View buffer contents as a std::string_view
210
+ /// \return std::string_view
211
+ explicit operator std::string_view() const {
212
+ return {reinterpret_cast<const char*>(data_), static_cast<size_t>(size_)};
213
+ }
214
+
215
+ /// \brief Return a pointer to the buffer's data
216
+ ///
217
+ /// The buffer has to be a CPU buffer (`is_cpu()` is true).
218
+ /// Otherwise, an assertion may be thrown or a null pointer may be returned.
219
+ ///
220
+ /// To get the buffer's data address regardless of its device, call `address()`.
221
+ const uint8_t* data() const {
222
+ #ifndef NDEBUG
223
+ CheckCPU();
224
+ #endif
225
+ return ARROW_PREDICT_TRUE(is_cpu_) ? data_ : NULLPTR;
226
+ }
227
+
228
+ /// \brief Return a pointer to the buffer's data cast to a specific type
229
+ ///
230
+ /// The buffer has to be a CPU buffer (`is_cpu()` is true).
231
+ /// Otherwise, an assertion may be thrown or a null pointer may be returned.
232
+ template <typename T>
233
+ const T* data_as() const {
234
+ return reinterpret_cast<const T*>(data());
235
+ }
236
+
237
+ /// \brief Return the buffer's data as a span
238
+ template <typename T>
239
+ util::span<const T> span_as() const {
240
+ return util::span(data_as<T>(), static_cast<size_t>(size() / sizeof(T)));
241
+ }
242
+
243
+ /// \brief Return a writable pointer to the buffer's data
244
+ ///
245
+ /// The buffer has to be a mutable CPU buffer (`is_cpu()` and `is_mutable()`
246
+ /// are true). Otherwise, an assertion may be thrown or a null pointer may
247
+ /// be returned.
248
+ ///
249
+ /// To get the buffer's mutable data address regardless of its device, call
250
+ /// `mutable_address()`.
251
+ uint8_t* mutable_data() {
252
+ #ifndef NDEBUG
253
+ CheckCPU();
254
+ CheckMutable();
255
+ #endif
256
+ return ARROW_PREDICT_TRUE(is_cpu_ && is_mutable_) ? const_cast<uint8_t*>(data_)
257
+ : NULLPTR;
258
+ }
259
+
260
+ /// \brief Return a writable pointer to the buffer's data cast to a specific type
261
+ ///
262
+ /// The buffer has to be a mutable CPU buffer (`is_cpu()` and `is_mutable()`
263
+ /// are true). Otherwise, an assertion may be thrown or a null pointer may
264
+ /// be returned.
265
+ template <typename T>
266
+ T* mutable_data_as() {
267
+ return reinterpret_cast<T*>(mutable_data());
268
+ }
269
+
270
+ /// \brief Return the buffer's mutable data as a span
271
+ template <typename T>
272
+ util::span<T> mutable_span_as() {
273
+ return util::span(mutable_data_as<T>(), static_cast<size_t>(size() / sizeof(T)));
274
+ }
275
+
276
+ /// \brief Return the device address of the buffer's data
277
+ uintptr_t address() const { return reinterpret_cast<uintptr_t>(data_); }
278
+
279
+ /// \brief Return a writable device address to the buffer's data
280
+ ///
281
+ /// The buffer has to be a mutable buffer (`is_mutable()` is true).
282
+ /// Otherwise, an assertion may be thrown or 0 may be returned.
283
+ uintptr_t mutable_address() const {
284
+ #ifndef NDEBUG
285
+ CheckMutable();
286
+ #endif
287
+ return ARROW_PREDICT_TRUE(is_mutable_) ? reinterpret_cast<uintptr_t>(data_) : 0;
288
+ }
289
+
290
+ /// \brief Return the buffer's size in bytes
291
+ int64_t size() const { return size_; }
292
+
293
+ /// \brief Return the buffer's capacity (number of allocated bytes)
294
+ int64_t capacity() const { return capacity_; }
295
+
296
+ /// \brief Whether the buffer is directly CPU-accessible
297
+ ///
298
+ /// If this function returns true, you can read directly from the buffer's
299
+ /// `data()` pointer. Otherwise, you'll have to `View()` or `Copy()` it.
300
+ bool is_cpu() const { return is_cpu_; }
301
+
302
+ /// \brief Whether the buffer is mutable
303
+ ///
304
+ /// If this function returns true, you are allowed to modify buffer contents
305
+ /// using the pointer returned by `mutable_data()` or `mutable_address()`.
306
+ bool is_mutable() const { return is_mutable_; }
307
+
308
+ const std::shared_ptr<Device>& device() const { return memory_manager_->device(); }
309
+
310
+ const std::shared_ptr<MemoryManager>& memory_manager() const { return memory_manager_; }
311
+
312
+ DeviceAllocationType device_type() const { return device_type_; }
313
+
314
+ std::shared_ptr<Buffer> parent() const { return parent_; }
315
+
316
+ /// \brief Get a RandomAccessFile for reading a buffer
317
+ ///
318
+ /// The returned file object reads from this buffer's underlying memory.
319
+ static Result<std::shared_ptr<io::RandomAccessFile>> GetReader(std::shared_ptr<Buffer>);
320
+
321
+ /// \brief Get a OutputStream for writing to a buffer
322
+ ///
323
+ /// The buffer must be mutable. The returned stream object writes into the buffer's
324
+ /// underlying memory (but it won't resize it).
325
+ static Result<std::shared_ptr<io::OutputStream>> GetWriter(std::shared_ptr<Buffer>);
326
+
327
+ /// \brief Copy buffer
328
+ ///
329
+ /// The buffer contents will be copied into a new buffer allocated by the
330
+ /// given MemoryManager. This function supports cross-device copies.
331
+ static Result<std::shared_ptr<Buffer>> Copy(std::shared_ptr<Buffer> source,
332
+ const std::shared_ptr<MemoryManager>& to);
333
+
334
+ /// \brief Copy a non-owned buffer
335
+ ///
336
+ /// This is useful for cases where the source memory area is externally managed
337
+ /// (its lifetime not tied to the source Buffer), otherwise please use Copy().
338
+ static Result<std::unique_ptr<Buffer>> CopyNonOwned(
339
+ const Buffer& source, const std::shared_ptr<MemoryManager>& to);
340
+
341
+ /// \brief View buffer
342
+ ///
343
+ /// Return a Buffer that reflects this buffer, seen potentially from another
344
+ /// device, without making an explicit copy of the contents. The underlying
345
+ /// mechanism is typically implemented by the kernel or device driver, and may
346
+ /// involve lazy caching of parts of the buffer contents on the destination
347
+ /// device's memory.
348
+ ///
349
+ /// If a non-copy view is unsupported for the buffer on the given device,
350
+ /// nullptr is returned. An error can be returned if some low-level
351
+ /// operation fails (such as an out-of-memory condition).
352
+ static Result<std::shared_ptr<Buffer>> View(std::shared_ptr<Buffer> source,
353
+ const std::shared_ptr<MemoryManager>& to);
354
+
355
+ /// \brief View or copy buffer
356
+ ///
357
+ /// Try to view buffer contents on the given MemoryManager's device, but
358
+ /// fall back to copying if a no-copy view isn't supported.
359
+ static Result<std::shared_ptr<Buffer>> ViewOrCopy(
360
+ std::shared_ptr<Buffer> source, const std::shared_ptr<MemoryManager>& to);
361
+
362
+ virtual std::shared_ptr<Device::SyncEvent> device_sync_event() const { return NULLPTR; }
363
+
364
+ protected:
365
+ bool is_mutable_;
366
+ bool is_cpu_;
367
+ const uint8_t* data_;
368
+ int64_t size_;
369
+ int64_t capacity_;
370
+ DeviceAllocationType device_type_;
371
+
372
+ // null by default, but may be set
373
+ std::shared_ptr<Buffer> parent_;
374
+
375
+ private:
376
+ // private so that subclasses are forced to call SetMemoryManager()
377
+ std::shared_ptr<MemoryManager> memory_manager_;
378
+
379
+ protected:
380
+ Buffer();
381
+
382
+ void CheckMutable() const;
383
+ void CheckCPU() const;
384
+
385
+ void SetMemoryManager(std::shared_ptr<MemoryManager> mm) {
386
+ memory_manager_ = std::move(mm);
387
+ is_cpu_ = memory_manager_->is_cpu();
388
+ device_type_ = memory_manager_->device()->device_type();
389
+ }
390
+ };
391
+
392
+ /// \defgroup buffer-slicing-functions Functions for slicing buffers
393
+ ///
394
+ /// @{
395
+
396
+ /// \brief Construct a view on a buffer at the given offset and length.
397
+ ///
398
+ /// This function cannot fail and does not check for errors (except in debug builds)
399
+ static inline std::shared_ptr<Buffer> SliceBuffer(const std::shared_ptr<Buffer>& buffer,
400
+ const int64_t offset,
401
+ const int64_t length) {
402
+ return std::make_shared<Buffer>(buffer, offset, length);
403
+ }
404
+
405
+ /// \brief Construct a view on a buffer at the given offset, up to the buffer's end.
406
+ ///
407
+ /// This function cannot fail and does not check for errors (except in debug builds)
408
+ static inline std::shared_ptr<Buffer> SliceBuffer(const std::shared_ptr<Buffer>& buffer,
409
+ const int64_t offset) {
410
+ int64_t length = buffer->size() - offset;
411
+ return SliceBuffer(buffer, offset, length);
412
+ }
413
+
414
+ /// \brief Input-checking version of SliceBuffer
415
+ ///
416
+ /// An Invalid Status is returned if the requested slice falls out of bounds.
417
+ ARROW_EXPORT
418
+ Result<std::shared_ptr<Buffer>> SliceBufferSafe(const std::shared_ptr<Buffer>& buffer,
419
+ int64_t offset);
420
+ /// \brief Input-checking version of SliceBuffer
421
+ ///
422
+ /// An Invalid Status is returned if the requested slice falls out of bounds.
423
+ /// Note that unlike SliceBuffer, `length` isn't clamped to the available buffer size.
424
+ ARROW_EXPORT
425
+ Result<std::shared_ptr<Buffer>> SliceBufferSafe(const std::shared_ptr<Buffer>& buffer,
426
+ int64_t offset, int64_t length);
427
+
428
+ /// \brief Like SliceBuffer, but construct a mutable buffer slice.
429
+ ///
430
+ /// If the parent buffer is not mutable, behavior is undefined (it may abort
431
+ /// in debug builds).
432
+ ARROW_EXPORT
433
+ std::shared_ptr<Buffer> SliceMutableBuffer(const std::shared_ptr<Buffer>& buffer,
434
+ const int64_t offset, const int64_t length);
435
+
436
+ /// \brief Like SliceBuffer, but construct a mutable buffer slice.
437
+ ///
438
+ /// If the parent buffer is not mutable, behavior is undefined (it may abort
439
+ /// in debug builds).
440
+ static inline std::shared_ptr<Buffer> SliceMutableBuffer(
441
+ const std::shared_ptr<Buffer>& buffer, const int64_t offset) {
442
+ int64_t length = buffer->size() - offset;
443
+ return SliceMutableBuffer(buffer, offset, length);
444
+ }
445
+
446
+ /// \brief Input-checking version of SliceMutableBuffer
447
+ ///
448
+ /// An Invalid Status is returned if the requested slice falls out of bounds.
449
+ ARROW_EXPORT
450
+ Result<std::shared_ptr<Buffer>> SliceMutableBufferSafe(
451
+ const std::shared_ptr<Buffer>& buffer, int64_t offset);
452
+ /// \brief Input-checking version of SliceMutableBuffer
453
+ ///
454
+ /// An Invalid Status is returned if the requested slice falls out of bounds.
455
+ /// Note that unlike SliceBuffer, `length` isn't clamped to the available buffer size.
456
+ ARROW_EXPORT
457
+ Result<std::shared_ptr<Buffer>> SliceMutableBufferSafe(
458
+ const std::shared_ptr<Buffer>& buffer, int64_t offset, int64_t length);
459
+
460
+ /// @}
461
+
462
+ /// \class MutableBuffer
463
+ /// \brief A Buffer whose contents can be mutated. May or may not own its data.
464
+ class ARROW_EXPORT MutableBuffer : public Buffer {
465
+ public:
466
+ MutableBuffer(uint8_t* data, const int64_t size) : Buffer(data, size) {
467
+ is_mutable_ = true;
468
+ }
469
+
470
+ MutableBuffer(uint8_t* data, const int64_t size, std::shared_ptr<MemoryManager> mm)
471
+ : Buffer(data, size, std::move(mm)) {
472
+ is_mutable_ = true;
473
+ }
474
+
475
+ MutableBuffer(const std::shared_ptr<Buffer>& parent, const int64_t offset,
476
+ const int64_t size);
477
+
478
+ /// \brief Create buffer referencing typed memory with some length
479
+ /// \param[in] data the typed memory as C array
480
+ /// \param[in] length the number of values in the array
481
+ /// \return a new shared_ptr<Buffer>
482
+ template <typename T, typename SizeType = int64_t>
483
+ static std::shared_ptr<Buffer> Wrap(T* data, SizeType length) {
484
+ return std::make_shared<MutableBuffer>(reinterpret_cast<uint8_t*>(data),
485
+ static_cast<int64_t>(sizeof(T) * length));
486
+ }
487
+
488
+ protected:
489
+ MutableBuffer() : Buffer(NULLPTR, 0) {}
490
+ };
491
+
492
+ /// \class ResizableBuffer
493
+ /// \brief A mutable buffer that can be resized
494
+ class ARROW_EXPORT ResizableBuffer : public MutableBuffer {
495
+ public:
496
+ /// Change buffer reported size to indicated size, allocating memory if
497
+ /// necessary. This will ensure that the capacity of the buffer is a multiple
498
+ /// of 64 bytes as defined in Layout.md.
499
+ /// Consider using ZeroPadding afterwards, to conform to the Arrow layout
500
+ /// specification.
501
+ ///
502
+ /// @param new_size The new size for the buffer.
503
+ /// @param shrink_to_fit Whether to shrink the capacity if new size < current size
504
+ virtual Status Resize(const int64_t new_size, bool shrink_to_fit) = 0;
505
+ Status Resize(const int64_t new_size) {
506
+ return Resize(new_size, /*shrink_to_fit=*/true);
507
+ }
508
+
509
+ /// Ensure that buffer has enough memory allocated to fit the indicated
510
+ /// capacity (and meets the 64 byte padding requirement in Layout.md).
511
+ /// It does not change buffer's reported size and doesn't zero the padding.
512
+ virtual Status Reserve(const int64_t new_capacity) = 0;
513
+
514
+ template <class T>
515
+ Status TypedResize(const int64_t new_nb_elements, bool shrink_to_fit = true) {
516
+ return Resize(sizeof(T) * new_nb_elements, shrink_to_fit);
517
+ }
518
+
519
+ template <class T>
520
+ Status TypedReserve(const int64_t new_nb_elements) {
521
+ return Reserve(sizeof(T) * new_nb_elements);
522
+ }
523
+
524
+ protected:
525
+ ResizableBuffer(uint8_t* data, int64_t size) : MutableBuffer(data, size) {}
526
+ ResizableBuffer(uint8_t* data, int64_t size, std::shared_ptr<MemoryManager> mm)
527
+ : MutableBuffer(data, size, std::move(mm)) {}
528
+ };
529
+
530
+ /// \defgroup buffer-allocation-functions Functions for allocating buffers
531
+ ///
532
+ /// @{
533
+
534
+ /// \brief Allocate a fixed size mutable buffer from a memory pool, zero its padding.
535
+ ///
536
+ /// \param[in] size size of buffer to allocate
537
+ /// \param[in] pool a memory pool
538
+ ARROW_EXPORT
539
+ Result<std::unique_ptr<Buffer>> AllocateBuffer(const int64_t size,
540
+ MemoryPool* pool = NULLPTR);
541
+ ARROW_EXPORT
542
+ Result<std::unique_ptr<Buffer>> AllocateBuffer(const int64_t size, int64_t alignment,
543
+ MemoryPool* pool = NULLPTR);
544
+
545
+ /// \brief Allocate a resizeable buffer from a memory pool, zero its padding.
546
+ ///
547
+ /// \param[in] size size of buffer to allocate
548
+ /// \param[in] pool a memory pool
549
+ ARROW_EXPORT
550
+ Result<std::unique_ptr<ResizableBuffer>> AllocateResizableBuffer(
551
+ const int64_t size, MemoryPool* pool = NULLPTR);
552
+ ARROW_EXPORT
553
+ Result<std::unique_ptr<ResizableBuffer>> AllocateResizableBuffer(
554
+ const int64_t size, const int64_t alignment, MemoryPool* pool = NULLPTR);
555
+
556
+ /// \brief Allocate a bitmap buffer from a memory pool
557
+ /// no guarantee on values is provided.
558
+ ///
559
+ /// \param[in] length size in bits of bitmap to allocate
560
+ /// \param[in] pool memory pool to allocate memory from
561
+ ARROW_EXPORT
562
+ Result<std::shared_ptr<Buffer>> AllocateBitmap(int64_t length,
563
+ MemoryPool* pool = NULLPTR);
564
+
565
+ /// \brief Allocate a zero-initialized bitmap buffer from a memory pool
566
+ ///
567
+ /// \param[in] length size in bits of bitmap to allocate
568
+ /// \param[in] pool memory pool to allocate memory from
569
+ ARROW_EXPORT
570
+ Result<std::shared_ptr<Buffer>> AllocateEmptyBitmap(int64_t length,
571
+ MemoryPool* pool = NULLPTR);
572
+
573
+ ARROW_EXPORT
574
+ Result<std::shared_ptr<Buffer>> AllocateEmptyBitmap(int64_t length, int64_t alignment,
575
+ MemoryPool* pool = NULLPTR);
576
+
577
+ /// \brief Concatenate multiple buffers into a single buffer
578
+ ///
579
+ /// \param[in] buffers to be concatenated
580
+ /// \param[in] pool memory pool to allocate the new buffer from
581
+ ARROW_EXPORT
582
+ Result<std::shared_ptr<Buffer>> ConcatenateBuffers(const BufferVector& buffers,
583
+ MemoryPool* pool = NULLPTR);
584
+
585
+ /// @}
586
+
587
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/builder.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+
22
+ #include "arrow/array/builder_adaptive.h" // IWYU pragma: keep
23
+ #include "arrow/array/builder_base.h" // IWYU pragma: keep
24
+ #include "arrow/array/builder_binary.h" // IWYU pragma: keep
25
+ #include "arrow/array/builder_decimal.h" // IWYU pragma: keep
26
+ #include "arrow/array/builder_dict.h" // IWYU pragma: keep
27
+ #include "arrow/array/builder_nested.h" // IWYU pragma: keep
28
+ #include "arrow/array/builder_primitive.h" // IWYU pragma: keep
29
+ #include "arrow/array/builder_run_end.h" // IWYU pragma: keep
30
+ #include "arrow/array/builder_time.h" // IWYU pragma: keep
31
+ #include "arrow/array/builder_union.h" // IWYU pragma: keep
32
+ #include "arrow/status.h"
33
+ #include "arrow/util/visibility.h"
venv/lib/python3.10/site-packages/pyarrow/include/arrow/chunked_array.h ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <utility>
24
+ #include <vector>
25
+
26
+ #include "arrow/chunk_resolver.h"
27
+ #include "arrow/compare.h"
28
+ #include "arrow/result.h"
29
+ #include "arrow/status.h"
30
+ #include "arrow/type_fwd.h"
31
+ #include "arrow/util/macros.h"
32
+ #include "arrow/util/visibility.h"
33
+
34
+ namespace arrow {
35
+
36
+ class Array;
37
+ class DataType;
38
+ class MemoryPool;
39
+ namespace stl {
40
+ template <typename T, typename V>
41
+ class ChunkedArrayIterator;
42
+ } // namespace stl
43
+
44
+ /// \class ChunkedArray
45
+ /// \brief A data structure managing a list of primitive Arrow arrays logically
46
+ /// as one large array
47
+ ///
48
+ /// Data chunking is treated throughout this project largely as an
49
+ /// implementation detail for performance and memory use optimization.
50
+ /// ChunkedArray allows Array objects to be collected and interpreted
51
+ /// as a single logical array without requiring an expensive concatenation
52
+ /// step.
53
+ ///
54
+ /// In some cases, data produced by a function may exceed the capacity of an
55
+ /// Array (like BinaryArray or StringArray) and so returning multiple Arrays is
56
+ /// the only possibility. In these cases, we recommend returning a ChunkedArray
57
+ /// instead of vector of Arrays or some alternative.
58
+ ///
59
+ /// When data is processed in parallel, it may not be practical or possible to
60
+ /// create large contiguous memory allocations and write output into them. With
61
+ /// some data types, like binary and string types, it is not possible at all to
62
+ /// produce non-chunked array outputs without requiring a concatenation step at
63
+ /// the end of processing.
64
+ ///
65
+ /// Application developers may tune chunk sizes based on analysis of
66
+ /// performance profiles but many developer-users will not need to be
67
+ /// especially concerned with the chunking details.
68
+ ///
69
+ /// Preserving the chunk layout/sizes in processing steps is generally not
70
+ /// considered to be a contract in APIs. A function may decide to alter the
71
+ /// chunking of its result. Similarly, APIs accepting multiple ChunkedArray
72
+ /// inputs should not expect the chunk layout to be the same in each input.
73
+ class ARROW_EXPORT ChunkedArray {
74
+ public:
75
+ ChunkedArray(ChunkedArray&&) = default;
76
+ ChunkedArray& operator=(ChunkedArray&&) = default;
77
+
78
+ /// \brief Construct a chunked array from a single Array
79
+ explicit ChunkedArray(std::shared_ptr<Array> chunk)
80
+ : ChunkedArray(ArrayVector{std::move(chunk)}) {}
81
+
82
+ /// \brief Construct a chunked array from a vector of arrays and an optional data type
83
+ ///
84
+ /// The vector elements must have the same data type.
85
+ /// If the data type is passed explicitly, the vector may be empty.
86
+ /// If the data type is omitted, the vector must be non-empty.
87
+ explicit ChunkedArray(ArrayVector chunks, std::shared_ptr<DataType> type = NULLPTR);
88
+
89
+ // \brief Constructor with basic input validation.
90
+ static Result<std::shared_ptr<ChunkedArray>> Make(
91
+ ArrayVector chunks, std::shared_ptr<DataType> type = NULLPTR);
92
+
93
+ /// \brief Create an empty ChunkedArray of a given type
94
+ ///
95
+ /// The output ChunkedArray will have one chunk with an empty
96
+ /// array of the given type.
97
+ ///
98
+ /// \param[in] type the data type of the empty ChunkedArray
99
+ /// \param[in] pool the memory pool to allocate memory from
100
+ /// \return the resulting ChunkedArray
101
+ static Result<std::shared_ptr<ChunkedArray>> MakeEmpty(
102
+ std::shared_ptr<DataType> type, MemoryPool* pool = default_memory_pool());
103
+
104
+ /// \return the total length of the chunked array; computed on construction
105
+ int64_t length() const { return length_; }
106
+
107
+ /// \return the total number of nulls among all chunks
108
+ int64_t null_count() const { return null_count_; }
109
+
110
+ /// \return the total number of chunks in the chunked array
111
+ int num_chunks() const { return static_cast<int>(chunks_.size()); }
112
+
113
+ /// \return chunk a particular chunk from the chunked array
114
+ const std::shared_ptr<Array>& chunk(int i) const { return chunks_[i]; }
115
+
116
+ /// \return an ArrayVector of chunks
117
+ const ArrayVector& chunks() const { return chunks_; }
118
+
119
+ /// \brief Construct a zero-copy slice of the chunked array with the
120
+ /// indicated offset and length
121
+ ///
122
+ /// \param[in] offset the position of the first element in the constructed
123
+ /// slice
124
+ /// \param[in] length the length of the slice. If there are not enough
125
+ /// elements in the chunked array, the length will be adjusted accordingly
126
+ ///
127
+ /// \return a new object wrapped in std::shared_ptr<ChunkedArray>
128
+ std::shared_ptr<ChunkedArray> Slice(int64_t offset, int64_t length) const;
129
+
130
+ /// \brief Slice from offset until end of the chunked array
131
+ std::shared_ptr<ChunkedArray> Slice(int64_t offset) const;
132
+
133
+ /// \brief Flatten this chunked array as a vector of chunked arrays, one
134
+ /// for each struct field
135
+ ///
136
+ /// \param[in] pool The pool for buffer allocations, if any
137
+ Result<std::vector<std::shared_ptr<ChunkedArray>>> Flatten(
138
+ MemoryPool* pool = default_memory_pool()) const;
139
+
140
+ /// Construct a zero-copy view of this chunked array with the given
141
+ /// type. Calls Array::View on each constituent chunk. Always succeeds if
142
+ /// there are zero chunks
143
+ Result<std::shared_ptr<ChunkedArray>> View(const std::shared_ptr<DataType>& type) const;
144
+
145
+ /// \brief Return the type of the chunked array
146
+ const std::shared_ptr<DataType>& type() const { return type_; }
147
+
148
+ /// \brief Return a Scalar containing the value of this array at index
149
+ Result<std::shared_ptr<Scalar>> GetScalar(int64_t index) const;
150
+
151
+ /// \brief Determine if two chunked arrays are equal.
152
+ ///
153
+ /// Two chunked arrays can be equal only if they have equal datatypes.
154
+ /// However, they may be equal even if they have different chunkings.
155
+ bool Equals(const ChunkedArray& other,
156
+ const EqualOptions& opts = EqualOptions::Defaults()) const;
157
+ /// \brief Determine if two chunked arrays are equal.
158
+ bool Equals(const std::shared_ptr<ChunkedArray>& other,
159
+ const EqualOptions& opts = EqualOptions::Defaults()) const;
160
+ /// \brief Determine if two chunked arrays approximately equal
161
+ bool ApproxEquals(const ChunkedArray& other,
162
+ const EqualOptions& = EqualOptions::Defaults()) const;
163
+
164
+ /// \return PrettyPrint representation suitable for debugging
165
+ std::string ToString() const;
166
+
167
+ /// \brief Perform cheap validation checks to determine obvious inconsistencies
168
+ /// within the chunk array's internal data.
169
+ ///
170
+ /// This is O(k*m) where k is the number of array descendents,
171
+ /// and m is the number of chunks.
172
+ ///
173
+ /// \return Status
174
+ Status Validate() const;
175
+
176
+ /// \brief Perform extensive validation checks to determine inconsistencies
177
+ /// within the chunk array's internal data.
178
+ ///
179
+ /// This is O(k*n) where k is the number of array descendents,
180
+ /// and n is the length in elements.
181
+ ///
182
+ /// \return Status
183
+ Status ValidateFull() const;
184
+
185
+ protected:
186
+ ArrayVector chunks_;
187
+ std::shared_ptr<DataType> type_;
188
+ int64_t length_;
189
+ int64_t null_count_;
190
+
191
+ private:
192
+ template <typename T, typename V>
193
+ friend class ::arrow::stl::ChunkedArrayIterator;
194
+ internal::ChunkResolver chunk_resolver_;
195
+ ARROW_DISALLOW_COPY_AND_ASSIGN(ChunkedArray);
196
+ };
197
+
198
+ namespace internal {
199
+
200
+ /// \brief EXPERIMENTAL: Utility for incremental iteration over contiguous
201
+ /// pieces of potentially differently-chunked ChunkedArray objects
202
+ class ARROW_EXPORT MultipleChunkIterator {
203
+ public:
204
+ MultipleChunkIterator(const ChunkedArray& left, const ChunkedArray& right)
205
+ : left_(left),
206
+ right_(right),
207
+ pos_(0),
208
+ length_(left.length()),
209
+ chunk_idx_left_(0),
210
+ chunk_idx_right_(0),
211
+ chunk_pos_left_(0),
212
+ chunk_pos_right_(0) {}
213
+
214
+ bool Next(std::shared_ptr<Array>* next_left, std::shared_ptr<Array>* next_right);
215
+
216
+ int64_t position() const { return pos_; }
217
+
218
+ private:
219
+ const ChunkedArray& left_;
220
+ const ChunkedArray& right_;
221
+
222
+ // The amount of the entire ChunkedArray consumed
223
+ int64_t pos_;
224
+
225
+ // Length of the chunked array(s)
226
+ int64_t length_;
227
+
228
+ // Current left chunk
229
+ int chunk_idx_left_;
230
+
231
+ // Current right chunk
232
+ int chunk_idx_right_;
233
+
234
+ // Offset into the current left chunk
235
+ int64_t chunk_pos_left_;
236
+
237
+ // Offset into the current right chunk
238
+ int64_t chunk_pos_right_;
239
+ };
240
+
241
+ /// \brief Evaluate binary function on two ChunkedArray objects having possibly
242
+ /// different chunk layouts. The passed binary function / functor should have
243
+ /// the following signature.
244
+ ///
245
+ /// Status(const Array&, const Array&, int64_t)
246
+ ///
247
+ /// The third argument is the absolute position relative to the start of each
248
+ /// ChunkedArray. The function is executed against each contiguous pair of
249
+ /// array segments, slicing if necessary.
250
+ ///
251
+ /// For example, if two arrays have chunk sizes
252
+ ///
253
+ /// left: [10, 10, 20]
254
+ /// right: [15, 10, 15]
255
+ ///
256
+ /// Then the following invocations take place (pseudocode)
257
+ ///
258
+ /// func(left.chunk[0][0:10], right.chunk[0][0:10], 0)
259
+ /// func(left.chunk[1][0:5], right.chunk[0][10:15], 10)
260
+ /// func(left.chunk[1][5:10], right.chunk[1][0:5], 15)
261
+ /// func(left.chunk[2][0:5], right.chunk[1][5:10], 20)
262
+ /// func(left.chunk[2][5:20], right.chunk[2][:], 25)
263
+ template <typename Action>
264
+ Status ApplyBinaryChunked(const ChunkedArray& left, const ChunkedArray& right,
265
+ Action&& action) {
266
+ MultipleChunkIterator iterator(left, right);
267
+ std::shared_ptr<Array> left_piece, right_piece;
268
+ while (iterator.Next(&left_piece, &right_piece)) {
269
+ ARROW_RETURN_NOT_OK(action(*left_piece, *right_piece, iterator.position()));
270
+ }
271
+ return Status::OK();
272
+ }
273
+
274
+ } // namespace internal
275
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/compare.h ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Functions for comparing Arrow data structures
19
+
20
+ #pragma once
21
+
22
+ #include <cstdint>
23
+ #include <iosfwd>
24
+
25
+ #include "arrow/util/macros.h"
26
+ #include "arrow/util/visibility.h"
27
+
28
+ namespace arrow {
29
+
30
+ class Array;
31
+ class DataType;
32
+ class Tensor;
33
+ class SparseTensor;
34
+ struct Scalar;
35
+
36
+ static constexpr double kDefaultAbsoluteTolerance = 1E-5;
37
+
38
+ /// A container of options for equality comparisons
39
+ class EqualOptions {
40
+ public:
41
+ /// Whether or not NaNs are considered equal.
42
+ bool nans_equal() const { return nans_equal_; }
43
+
44
+ /// Return a new EqualOptions object with the "nans_equal" property changed.
45
+ EqualOptions nans_equal(bool v) const {
46
+ auto res = EqualOptions(*this);
47
+ res.nans_equal_ = v;
48
+ return res;
49
+ }
50
+
51
+ /// Whether or not zeros with differing signs are considered equal.
52
+ bool signed_zeros_equal() const { return signed_zeros_equal_; }
53
+
54
+ /// Return a new EqualOptions object with the "signed_zeros_equal" property changed.
55
+ EqualOptions signed_zeros_equal(bool v) const {
56
+ auto res = EqualOptions(*this);
57
+ res.signed_zeros_equal_ = v;
58
+ return res;
59
+ }
60
+
61
+ /// The absolute tolerance for approximate comparisons of floating-point values.
62
+ double atol() const { return atol_; }
63
+
64
+ /// Return a new EqualOptions object with the "atol" property changed.
65
+ EqualOptions atol(double v) const {
66
+ auto res = EqualOptions(*this);
67
+ res.atol_ = v;
68
+ return res;
69
+ }
70
+
71
+ /// The ostream to which a diff will be formatted if arrays disagree.
72
+ /// If this is null (the default) no diff will be formatted.
73
+ std::ostream* diff_sink() const { return diff_sink_; }
74
+
75
+ /// Return a new EqualOptions object with the "diff_sink" property changed.
76
+ /// This option will be ignored if diff formatting of the types of compared arrays is
77
+ /// not supported.
78
+ EqualOptions diff_sink(std::ostream* diff_sink) const {
79
+ auto res = EqualOptions(*this);
80
+ res.diff_sink_ = diff_sink;
81
+ return res;
82
+ }
83
+
84
+ static EqualOptions Defaults() { return {}; }
85
+
86
+ protected:
87
+ double atol_ = kDefaultAbsoluteTolerance;
88
+ bool nans_equal_ = false;
89
+ bool signed_zeros_equal_ = true;
90
+
91
+ std::ostream* diff_sink_ = NULLPTR;
92
+ };
93
+
94
+ /// Returns true if the arrays are exactly equal
95
+ ARROW_EXPORT bool ArrayEquals(const Array& left, const Array& right,
96
+ const EqualOptions& = EqualOptions::Defaults());
97
+
98
+ /// Returns true if the arrays are approximately equal. For non-floating point
99
+ /// types, this is equivalent to ArrayEquals(left, right)
100
+ ARROW_EXPORT bool ArrayApproxEquals(const Array& left, const Array& right,
101
+ const EqualOptions& = EqualOptions::Defaults());
102
+
103
+ /// Returns true if indicated equal-length segment of arrays are exactly equal
104
+ ARROW_EXPORT bool ArrayRangeEquals(const Array& left, const Array& right,
105
+ int64_t start_idx, int64_t end_idx,
106
+ int64_t other_start_idx,
107
+ const EqualOptions& = EqualOptions::Defaults());
108
+
109
+ /// Returns true if indicated equal-length segment of arrays are approximately equal
110
+ ARROW_EXPORT bool ArrayRangeApproxEquals(const Array& left, const Array& right,
111
+ int64_t start_idx, int64_t end_idx,
112
+ int64_t other_start_idx,
113
+ const EqualOptions& = EqualOptions::Defaults());
114
+
115
+ ARROW_EXPORT bool TensorEquals(const Tensor& left, const Tensor& right,
116
+ const EqualOptions& = EqualOptions::Defaults());
117
+
118
+ /// EXPERIMENTAL: Returns true if the given sparse tensors are exactly equal
119
+ ARROW_EXPORT bool SparseTensorEquals(const SparseTensor& left, const SparseTensor& right,
120
+ const EqualOptions& = EqualOptions::Defaults());
121
+
122
+ /// Returns true if the type metadata are exactly equal
123
+ /// \param[in] left a DataType
124
+ /// \param[in] right a DataType
125
+ /// \param[in] check_metadata whether to compare KeyValueMetadata for child
126
+ /// fields
127
+ ARROW_EXPORT bool TypeEquals(const DataType& left, const DataType& right,
128
+ bool check_metadata = true);
129
+
130
+ /// Returns true if scalars are equal
131
+ /// \param[in] left a Scalar
132
+ /// \param[in] right a Scalar
133
+ /// \param[in] options comparison options
134
+ ARROW_EXPORT bool ScalarEquals(const Scalar& left, const Scalar& right,
135
+ const EqualOptions& options = EqualOptions::Defaults());
136
+
137
+ /// Returns true if scalars are approximately equal
138
+ /// \param[in] left a Scalar
139
+ /// \param[in] right a Scalar
140
+ /// \param[in] options comparison options
141
+ ARROW_EXPORT bool ScalarApproxEquals(
142
+ const Scalar& left, const Scalar& right,
143
+ const EqualOptions& options = EqualOptions::Defaults());
144
+
145
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/config.h ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <optional>
21
+ #include <string>
22
+
23
+ #include "arrow/status.h"
24
+ #include "arrow/util/config.h" // IWYU pragma: export
25
+ #include "arrow/util/visibility.h"
26
+
27
+ namespace arrow {
28
+
29
+ struct BuildInfo {
30
+ /// The packed version number, e.g. 1002003 (decimal) for Arrow 1.2.3
31
+ int version;
32
+ /// The "major" version number, e.g. 1 for Arrow 1.2.3
33
+ int version_major;
34
+ /// The "minor" version number, e.g. 2 for Arrow 1.2.3
35
+ int version_minor;
36
+ /// The "patch" version number, e.g. 3 for Arrow 1.2.3
37
+ int version_patch;
38
+ /// The version string, e.g. "1.2.3"
39
+ std::string version_string;
40
+ std::string so_version;
41
+ std::string full_so_version;
42
+
43
+ /// The CMake compiler identifier, e.g. "GNU"
44
+ std::string compiler_id;
45
+ std::string compiler_version;
46
+ std::string compiler_flags;
47
+
48
+ /// The git changeset id, if available
49
+ std::string git_id;
50
+ /// The git changeset description, if available
51
+ std::string git_description;
52
+ std::string package_kind;
53
+
54
+ /// The uppercase build type, e.g. "DEBUG" or "RELEASE"
55
+ std::string build_type;
56
+ };
57
+
58
+ struct RuntimeInfo {
59
+ /// The enabled SIMD level
60
+ ///
61
+ /// This can be less than `detected_simd_level` if the ARROW_USER_SIMD_LEVEL
62
+ /// environment variable is set to another value.
63
+ std::string simd_level;
64
+
65
+ /// The SIMD level available on the OS and CPU
66
+ std::string detected_simd_level;
67
+
68
+ /// Whether using the OS-based timezone database
69
+ /// This is set at compile-time.
70
+ bool using_os_timezone_db;
71
+
72
+ /// The path to the timezone database; by default None.
73
+ std::optional<std::string> timezone_db_path;
74
+ };
75
+
76
+ /// \brief Get runtime build info.
77
+ ///
78
+ /// The returned values correspond to exact loaded version of the Arrow library,
79
+ /// rather than the values frozen at application compile-time through the `ARROW_*`
80
+ /// preprocessor definitions.
81
+ ARROW_EXPORT
82
+ const BuildInfo& GetBuildInfo();
83
+
84
+ /// \brief Get runtime info.
85
+ ///
86
+ ARROW_EXPORT
87
+ RuntimeInfo GetRuntimeInfo();
88
+
89
+ struct GlobalOptions {
90
+ /// Path to text timezone database. This is only configurable on Windows,
91
+ /// which does not have a compatible OS timezone database.
92
+ std::optional<std::string> timezone_db_path;
93
+ };
94
+
95
+ ARROW_EXPORT
96
+ Status Initialize(const GlobalOptions& options) noexcept;
97
+
98
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/api.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include "arrow/compute/expression.h"
23
+ #include "arrow/dataset/dataset.h"
24
+ #include "arrow/dataset/discovery.h"
25
+ #include "arrow/dataset/file_base.h"
26
+ #ifdef ARROW_CSV
27
+ #include "arrow/dataset/file_csv.h"
28
+ #endif
29
+ #ifdef ARROW_JSON
30
+ #include "arrow/dataset/file_json.h"
31
+ #endif
32
+ #include "arrow/dataset/file_ipc.h"
33
+ #ifdef ARROW_ORC
34
+ #include "arrow/dataset/file_orc.h"
35
+ #endif
36
+ #ifdef ARROW_PARQUET
37
+ #include "arrow/dataset/file_parquet.h"
38
+ #endif
39
+ #include "arrow/dataset/scanner.h"
venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset.h ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <optional>
25
+ #include <string>
26
+ #include <utility>
27
+ #include <vector>
28
+
29
+ #include "arrow/compute/expression.h"
30
+ #include "arrow/dataset/type_fwd.h"
31
+ #include "arrow/dataset/visibility.h"
32
+ #include "arrow/util/async_generator_fwd.h"
33
+ #include "arrow/util/future.h"
34
+ #include "arrow/util/macros.h"
35
+ #include "arrow/util/mutex.h"
36
+
37
+ namespace arrow {
38
+
39
+ namespace internal {
40
+ class Executor;
41
+ } // namespace internal
42
+
43
+ namespace dataset {
44
+
45
+ using RecordBatchGenerator = std::function<Future<std::shared_ptr<RecordBatch>>()>;
46
+
47
+ /// \brief Description of a column to scan
48
+ struct ARROW_DS_EXPORT FragmentSelectionColumn {
49
+ /// \brief The path to the column to load
50
+ FieldPath path;
51
+ /// \brief The type of the column in the dataset schema
52
+ ///
53
+ /// A format may choose to ignore this field completely. For example, when
54
+ /// reading from IPC the reader can just return the column in the data type
55
+ /// that is stored on disk. There is no point in doing anything special.
56
+ ///
57
+ /// However, some formats may be capable of casting on the fly. For example,
58
+ /// when reading from CSV, if we know the target type of the column, we can
59
+ /// convert from string to the target type as we read.
60
+ DataType* requested_type;
61
+ };
62
+
63
+ /// \brief A list of columns that should be loaded from a fragment
64
+ ///
65
+ /// The paths in this selection should be referring to the fragment schema. This class
66
+ /// contains a virtual destructor as it is expected evolution strategies will need to
67
+ /// extend this to add any information needed to later evolve the batches.
68
+ ///
69
+ /// For example, in the basic evolution strategy, we keep track of which columns
70
+ /// were missing from the file so that we can fill those in with null when evolving.
71
+ class ARROW_DS_EXPORT FragmentSelection {
72
+ public:
73
+ explicit FragmentSelection(std::vector<FragmentSelectionColumn> columns)
74
+ : columns_(std::move(columns)) {}
75
+ virtual ~FragmentSelection() = default;
76
+ /// The columns that should be loaded from the fragment
77
+ const std::vector<FragmentSelectionColumn>& columns() const { return columns_; }
78
+
79
+ private:
80
+ std::vector<FragmentSelectionColumn> columns_;
81
+ };
82
+
83
+ /// \brief Instructions for scanning a particular fragment
84
+ ///
85
+ /// The fragment scan request is derived from ScanV2Options. The main
86
+ /// difference is that the scan options are based on the dataset schema
87
+ /// while the fragment request is based on the fragment schema.
88
+ struct ARROW_DS_EXPORT FragmentScanRequest {
89
+ /// \brief A row filter
90
+ ///
91
+ /// The filter expression should be written against the fragment schema.
92
+ ///
93
+ /// \see ScanV2Options for details on how this filter should be applied
94
+ compute::Expression filter = compute::literal(true);
95
+
96
+ /// \brief The columns to scan
97
+ ///
98
+ /// These indices refer to the fragment schema
99
+ ///
100
+ /// Note: This is NOT a simple list of top-level column indices.
101
+ /// For more details \see ScanV2Options
102
+ ///
103
+ /// If possible a fragment should only read from disk the data needed
104
+ /// to satisfy these columns. If a format cannot partially read a nested
105
+ /// column (e.g. JSON) then it must apply the column selection (in memory)
106
+ /// before returning the scanned batch.
107
+ std::shared_ptr<FragmentSelection> fragment_selection;
108
+ /// \brief Options specific to the format being scanned
109
+ const FragmentScanOptions* format_scan_options;
110
+ };
111
+
112
+ /// \brief An iterator-like object that can yield batches created from a fragment
113
+ class ARROW_DS_EXPORT FragmentScanner {
114
+ public:
115
+ /// This instance will only be destroyed after all ongoing scan futures
116
+ /// have been completed.
117
+ ///
118
+ /// This means any callbacks created as part of the scan can safely
119
+ /// capture `this`
120
+ virtual ~FragmentScanner() = default;
121
+ /// \brief Scan a batch of data from the file
122
+ /// \param batch_number The index of the batch to read
123
+ virtual Future<std::shared_ptr<RecordBatch>> ScanBatch(int batch_number) = 0;
124
+ /// \brief Calculate an estimate of how many data bytes the given batch will represent
125
+ ///
126
+ /// "Data bytes" should be the total size of all the buffers once the data has been
127
+ /// decoded into the Arrow format.
128
+ virtual int64_t EstimatedDataBytes(int batch_number) = 0;
129
+ /// \brief The number of batches in the fragment to scan
130
+ virtual int NumBatches() = 0;
131
+ };
132
+
133
+ /// \brief Information learned about a fragment through inspection
134
+ ///
135
+ /// This information can be used to figure out which fields need
136
+ /// to be read from a file and how the data read in should be evolved
137
+ /// to match the dataset schema.
138
+ ///
139
+ /// For example, from a CSV file we can inspect and learn the column
140
+ /// names and use those column names to determine which columns to load
141
+ /// from the CSV file.
142
+ struct ARROW_DS_EXPORT InspectedFragment {
143
+ explicit InspectedFragment(std::vector<std::string> column_names)
144
+ : column_names(std::move(column_names)) {}
145
+ std::vector<std::string> column_names;
146
+ };
147
+
148
+ /// \brief A granular piece of a Dataset, such as an individual file.
149
+ ///
150
+ /// A Fragment can be read/scanned separately from other fragments. It yields a
151
+ /// collection of RecordBatches when scanned
152
+ ///
153
+ /// Note that Fragments have well defined physical schemas which are reconciled by
154
+ /// the Datasets which contain them; these physical schemas may differ from a parent
155
+ /// Dataset's schema and the physical schemas of sibling Fragments.
156
+ class ARROW_DS_EXPORT Fragment : public std::enable_shared_from_this<Fragment> {
157
+ public:
158
+ /// \brief An expression that represents no known partition information
159
+ static const compute::Expression kNoPartitionInformation;
160
+
161
+ /// \brief Return the physical schema of the Fragment.
162
+ ///
163
+ /// The physical schema is also called the writer schema.
164
+ /// This method is blocking and may suffer from high latency filesystem.
165
+ /// The schema is cached after being read once, or may be specified at construction.
166
+ Result<std::shared_ptr<Schema>> ReadPhysicalSchema();
167
+
168
+ /// An asynchronous version of Scan
169
+ virtual Result<RecordBatchGenerator> ScanBatchesAsync(
170
+ const std::shared_ptr<ScanOptions>& options) = 0;
171
+
172
+ /// \brief Inspect a fragment to learn basic information
173
+ ///
174
+ /// This will be called before a scan and a fragment should attach whatever
175
+ /// information will be needed to figure out an evolution strategy. This information
176
+ /// will then be passed to the call to BeginScan
177
+ virtual Future<std::shared_ptr<InspectedFragment>> InspectFragment(
178
+ const FragmentScanOptions* format_options, compute::ExecContext* exec_context);
179
+
180
+ /// \brief Start a scan operation
181
+ virtual Future<std::shared_ptr<FragmentScanner>> BeginScan(
182
+ const FragmentScanRequest& request, const InspectedFragment& inspected_fragment,
183
+ const FragmentScanOptions* format_options, compute::ExecContext* exec_context);
184
+
185
+ /// \brief Count the number of rows in this fragment matching the filter using metadata
186
+ /// only. That is, this method may perform I/O, but will not load data.
187
+ ///
188
+ /// If this is not possible, resolve with an empty optional. The fragment can perform
189
+ /// I/O (e.g. to read metadata) before it deciding whether it can satisfy the request.
190
+ virtual Future<std::optional<int64_t>> CountRows(
191
+ compute::Expression predicate, const std::shared_ptr<ScanOptions>& options);
192
+
193
+ virtual std::string type_name() const = 0;
194
+ virtual std::string ToString() const { return type_name(); }
195
+
196
+ /// \brief An expression which evaluates to true for all data viewed by this
197
+ /// Fragment.
198
+ const compute::Expression& partition_expression() const {
199
+ return partition_expression_;
200
+ }
201
+
202
+ virtual ~Fragment() = default;
203
+
204
+ protected:
205
+ Fragment() = default;
206
+ explicit Fragment(compute::Expression partition_expression,
207
+ std::shared_ptr<Schema> physical_schema);
208
+
209
+ virtual Result<std::shared_ptr<Schema>> ReadPhysicalSchemaImpl() = 0;
210
+
211
+ util::Mutex physical_schema_mutex_;
212
+ compute::Expression partition_expression_ = compute::literal(true);
213
+ std::shared_ptr<Schema> physical_schema_;
214
+ };
215
+
216
+ /// \brief Per-scan options for fragment(s) in a dataset.
217
+ ///
218
+ /// These options are not intrinsic to the format or fragment itself, but do affect
219
+ /// the results of a scan. These are options which make sense to change between
220
+ /// repeated reads of the same dataset, such as format-specific conversion options
221
+ /// (that do not affect the schema).
222
+ ///
223
+ /// \ingroup dataset-scanning
224
+ class ARROW_DS_EXPORT FragmentScanOptions {
225
+ public:
226
+ virtual std::string type_name() const = 0;
227
+ virtual std::string ToString() const { return type_name(); }
228
+ virtual ~FragmentScanOptions() = default;
229
+ };
230
+
231
+ /// \defgroup dataset-implementations Concrete implementations
232
+ ///
233
+ /// @{
234
+
235
+ /// \brief A trivial Fragment that yields ScanTask out of a fixed set of
236
+ /// RecordBatch.
237
+ class ARROW_DS_EXPORT InMemoryFragment : public Fragment {
238
+ public:
239
+ class Scanner;
240
+ InMemoryFragment(std::shared_ptr<Schema> schema, RecordBatchVector record_batches,
241
+ compute::Expression = compute::literal(true));
242
+ explicit InMemoryFragment(RecordBatchVector record_batches,
243
+ compute::Expression = compute::literal(true));
244
+
245
+ Result<RecordBatchGenerator> ScanBatchesAsync(
246
+ const std::shared_ptr<ScanOptions>& options) override;
247
+ Future<std::optional<int64_t>> CountRows(
248
+ compute::Expression predicate,
249
+ const std::shared_ptr<ScanOptions>& options) override;
250
+
251
+ Future<std::shared_ptr<InspectedFragment>> InspectFragment(
252
+ const FragmentScanOptions* format_options,
253
+ compute::ExecContext* exec_context) override;
254
+ Future<std::shared_ptr<FragmentScanner>> BeginScan(
255
+ const FragmentScanRequest& request, const InspectedFragment& inspected_fragment,
256
+ const FragmentScanOptions* format_options,
257
+ compute::ExecContext* exec_context) override;
258
+
259
+ std::string type_name() const override { return "in-memory"; }
260
+
261
+ protected:
262
+ Result<std::shared_ptr<Schema>> ReadPhysicalSchemaImpl() override;
263
+
264
+ RecordBatchVector record_batches_;
265
+ };
266
+
267
+ /// @}
268
+
269
+ using FragmentGenerator = AsyncGenerator<std::shared_ptr<Fragment>>;
270
+
271
+ /// \brief Rules for converting the dataset schema to and from fragment schemas
272
+ class ARROW_DS_EXPORT FragmentEvolutionStrategy {
273
+ public:
274
+ /// This instance will only be destroyed when all scan operations for the
275
+ /// fragment have completed.
276
+ virtual ~FragmentEvolutionStrategy() = default;
277
+ /// \brief A guarantee that applies to all batches of this fragment
278
+ ///
279
+ /// For example, if a fragment is missing one of the fields in the dataset
280
+ /// schema then a typical evolution strategy is to set that field to null.
281
+ ///
282
+ /// So if the column at index 3 is missing then the guarantee is
283
+ /// FieldRef(3) == null
284
+ ///
285
+ /// Individual field guarantees should be AND'd together and returned
286
+ /// as a single expression.
287
+ virtual Result<compute::Expression> GetGuarantee(
288
+ const std::vector<FieldPath>& dataset_schema_selection) const = 0;
289
+
290
+ /// \brief Return a fragment schema selection given a dataset schema selection
291
+ ///
292
+ /// For example, if the user wants fields 2 & 4 of the dataset schema and
293
+ /// in this fragment the field 2 is missing and the field 4 is at index 1 then
294
+ /// this should return {1}
295
+ virtual Result<std::unique_ptr<FragmentSelection>> DevolveSelection(
296
+ const std::vector<FieldPath>& dataset_schema_selection) const = 0;
297
+
298
+ /// \brief Return a filter expression bound to the fragment schema given
299
+ /// a filter expression bound to the dataset schema
300
+ ///
301
+ /// The dataset scan filter will first be simplified by the guarantee returned
302
+ /// by GetGuarantee. This means an evolution that only handles dropping or casting
303
+ /// fields doesn't need to do anything here except return the given filter.
304
+ ///
305
+ /// On the other hand, an evolution that is doing some kind of aliasing will likely
306
+ /// need to convert field references in the filter to the aliased field references
307
+ /// where appropriate.
308
+ virtual Result<compute::Expression> DevolveFilter(
309
+ const compute::Expression& filter) const = 0;
310
+
311
+ /// \brief Convert a batch from the fragment schema to the dataset schema
312
+ ///
313
+ /// Typically this involves casting columns from the data type stored on disk
314
+ /// to the data type of the dataset schema. For example, this fragment might
315
+ /// have columns stored as int32 and the dataset schema might have int64 for
316
+ /// the column. In this case we should cast the column from int32 to int64.
317
+ ///
318
+ /// Note: A fragment may perform this cast as the data is read from disk. In
319
+ /// that case a cast might not be needed.
320
+ virtual Result<compute::ExecBatch> EvolveBatch(
321
+ const std::shared_ptr<RecordBatch>& batch,
322
+ const std::vector<FieldPath>& dataset_selection,
323
+ const FragmentSelection& selection) const = 0;
324
+
325
+ /// \brief Return a string description of this strategy
326
+ virtual std::string ToString() const = 0;
327
+ };
328
+
329
+ /// \brief Lookup to create a FragmentEvolutionStrategy for a given fragment
330
+ class ARROW_DS_EXPORT DatasetEvolutionStrategy {
331
+ public:
332
+ virtual ~DatasetEvolutionStrategy() = default;
333
+ /// \brief Create a strategy for evolving from the given fragment
334
+ /// to the schema of the given dataset
335
+ virtual std::unique_ptr<FragmentEvolutionStrategy> GetStrategy(
336
+ const Dataset& dataset, const Fragment& fragment,
337
+ const InspectedFragment& inspected_fragment) = 0;
338
+
339
+ /// \brief Return a string description of this strategy
340
+ virtual std::string ToString() const = 0;
341
+ };
342
+
343
+ ARROW_DS_EXPORT std::unique_ptr<DatasetEvolutionStrategy>
344
+ MakeBasicDatasetEvolutionStrategy();
345
+
346
+ /// \brief A container of zero or more Fragments.
347
+ ///
348
+ /// A Dataset acts as a union of Fragments, e.g. files deeply nested in a
349
+ /// directory. A Dataset has a schema to which Fragments must align during a
350
+ /// scan operation. This is analogous to Avro's reader and writer schema.
351
+ class ARROW_DS_EXPORT Dataset : public std::enable_shared_from_this<Dataset> {
352
+ public:
353
+ /// \brief Begin to build a new Scan operation against this Dataset
354
+ Result<std::shared_ptr<ScannerBuilder>> NewScan();
355
+
356
+ /// \brief GetFragments returns an iterator of Fragments given a predicate.
357
+ Result<FragmentIterator> GetFragments(compute::Expression predicate);
358
+ Result<FragmentIterator> GetFragments();
359
+
360
+ /// \brief Async versions of `GetFragments`.
361
+ Result<FragmentGenerator> GetFragmentsAsync(compute::Expression predicate);
362
+ Result<FragmentGenerator> GetFragmentsAsync();
363
+
364
+ const std::shared_ptr<Schema>& schema() const { return schema_; }
365
+
366
+ /// \brief An expression which evaluates to true for all data viewed by this Dataset.
367
+ /// May be null, which indicates no information is available.
368
+ const compute::Expression& partition_expression() const {
369
+ return partition_expression_;
370
+ }
371
+
372
+ /// \brief The name identifying the kind of Dataset
373
+ virtual std::string type_name() const = 0;
374
+
375
+ /// \brief Return a copy of this Dataset with a different schema.
376
+ ///
377
+ /// The copy will view the same Fragments. If the new schema is not compatible with the
378
+ /// original dataset's schema then an error will be raised.
379
+ virtual Result<std::shared_ptr<Dataset>> ReplaceSchema(
380
+ std::shared_ptr<Schema> schema) const = 0;
381
+
382
+ /// \brief Rules used by this dataset to handle schema evolution
383
+ DatasetEvolutionStrategy* evolution_strategy() { return evolution_strategy_.get(); }
384
+
385
+ virtual ~Dataset() = default;
386
+
387
+ protected:
388
+ explicit Dataset(std::shared_ptr<Schema> schema) : schema_(std::move(schema)) {}
389
+
390
+ Dataset(std::shared_ptr<Schema> schema, compute::Expression partition_expression);
391
+
392
+ virtual Result<FragmentIterator> GetFragmentsImpl(compute::Expression predicate) = 0;
393
+ /// \brief Default non-virtual implementation method for the base
394
+ /// `GetFragmentsAsyncImpl` method, which creates a fragment generator for
395
+ /// the dataset, possibly filtering results with a predicate (forwarding to
396
+ /// the synchronous `GetFragmentsImpl` method and moving the computations
397
+ /// to the background, using the IO thread pool).
398
+ ///
399
+ /// Currently, `executor` is always the same as `internal::GetCPUThreadPool()`,
400
+ /// which means the results from the underlying fragment generator will be
401
+ /// transferred to the default CPU thread pool. The generator itself is
402
+ /// offloaded to run on the default IO thread pool.
403
+ virtual Result<FragmentGenerator> GetFragmentsAsyncImpl(
404
+ compute::Expression predicate, arrow::internal::Executor* executor);
405
+
406
+ std::shared_ptr<Schema> schema_;
407
+ compute::Expression partition_expression_ = compute::literal(true);
408
+ std::unique_ptr<DatasetEvolutionStrategy> evolution_strategy_ =
409
+ MakeBasicDatasetEvolutionStrategy();
410
+ };
411
+
412
+ /// \addtogroup dataset-implementations
413
+ ///
414
+ /// @{
415
+
416
+ /// \brief A Source which yields fragments wrapping a stream of record batches.
417
+ ///
418
+ /// The record batches must match the schema provided to the source at construction.
419
+ class ARROW_DS_EXPORT InMemoryDataset : public Dataset {
420
+ public:
421
+ class RecordBatchGenerator {
422
+ public:
423
+ virtual ~RecordBatchGenerator() = default;
424
+ virtual RecordBatchIterator Get() const = 0;
425
+ };
426
+
427
+ /// Construct a dataset from a schema and a factory of record batch iterators.
428
+ InMemoryDataset(std::shared_ptr<Schema> schema,
429
+ std::shared_ptr<RecordBatchGenerator> get_batches)
430
+ : Dataset(std::move(schema)), get_batches_(std::move(get_batches)) {}
431
+
432
+ /// Convenience constructor taking a fixed list of batches
433
+ InMemoryDataset(std::shared_ptr<Schema> schema, RecordBatchVector batches);
434
+
435
+ /// Convenience constructor taking a Table
436
+ explicit InMemoryDataset(std::shared_ptr<Table> table);
437
+
438
+ std::string type_name() const override { return "in-memory"; }
439
+
440
+ Result<std::shared_ptr<Dataset>> ReplaceSchema(
441
+ std::shared_ptr<Schema> schema) const override;
442
+
443
+ protected:
444
+ Result<FragmentIterator> GetFragmentsImpl(compute::Expression predicate) override;
445
+
446
+ std::shared_ptr<RecordBatchGenerator> get_batches_;
447
+ };
448
+
449
+ /// \brief A Dataset wrapping child Datasets.
450
+ class ARROW_DS_EXPORT UnionDataset : public Dataset {
451
+ public:
452
+ /// \brief Construct a UnionDataset wrapping child Datasets.
453
+ ///
454
+ /// \param[in] schema the schema of the resulting dataset.
455
+ /// \param[in] children one or more child Datasets. Their schemas must be identical to
456
+ /// schema.
457
+ static Result<std::shared_ptr<UnionDataset>> Make(std::shared_ptr<Schema> schema,
458
+ DatasetVector children);
459
+
460
+ const DatasetVector& children() const { return children_; }
461
+
462
+ std::string type_name() const override { return "union"; }
463
+
464
+ Result<std::shared_ptr<Dataset>> ReplaceSchema(
465
+ std::shared_ptr<Schema> schema) const override;
466
+
467
+ protected:
468
+ Result<FragmentIterator> GetFragmentsImpl(compute::Expression predicate) override;
469
+
470
+ explicit UnionDataset(std::shared_ptr<Schema> schema, DatasetVector children)
471
+ : Dataset(std::move(schema)), children_(std::move(children)) {}
472
+
473
+ DatasetVector children_;
474
+
475
+ friend class UnionDatasetFactory;
476
+ };
477
+
478
+ /// @}
479
+
480
+ } // namespace dataset
481
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/dataset_writer.h ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+
22
+ #include "arrow/dataset/file_base.h"
23
+ #include "arrow/record_batch.h"
24
+ #include "arrow/status.h"
25
+ #include "arrow/util/async_util.h"
26
+ #include "arrow/util/future.h"
27
+
28
+ namespace arrow {
29
+ namespace dataset {
30
+ namespace internal {
31
+
32
+ // This lines up with our other defaults in the scanner and execution plan
33
+ constexpr uint64_t kDefaultDatasetWriterMaxRowsQueued = 8 * 1024 * 1024;
34
+
35
+ /// \brief Utility class that manages a set of writers to different paths
36
+ ///
37
+ /// Writers may be closed and reopened (and a new file created) based on the dataset
38
+ /// write options (for example, max_rows_per_file or max_open_files)
39
+ ///
40
+ /// The dataset writer enforces its own back pressure based on the # of rows (as opposed
41
+ /// to # of batches which is how it is typically enforced elsewhere) and # of files.
42
+ class ARROW_DS_EXPORT DatasetWriter {
43
+ public:
44
+ /// \brief Create a dataset writer
45
+ ///
46
+ /// Will fail if basename_template is invalid or if there is existing data and
47
+ /// existing_data_behavior is kError
48
+ ///
49
+ /// \param write_options options to control how the data should be written
50
+ /// \param max_rows_queued max # of rows allowed to be queued before the dataset_writer
51
+ /// will ask for backpressure
52
+ static Result<std::unique_ptr<DatasetWriter>> Make(
53
+ FileSystemDatasetWriteOptions write_options, util::AsyncTaskScheduler* scheduler,
54
+ std::function<void()> pause_callback, std::function<void()> resume_callback,
55
+ std::function<void()> finish_callback,
56
+ uint64_t max_rows_queued = kDefaultDatasetWriterMaxRowsQueued);
57
+
58
+ ~DatasetWriter();
59
+
60
+ /// \brief Write a batch to the dataset
61
+ /// \param[in] batch The batch to write
62
+ /// \param[in] directory The directory to write to
63
+ ///
64
+ /// Note: The written filename will be {directory}/{filename_factory(i)} where i is a
65
+ /// counter controlled by `max_open_files` and `max_rows_per_file`
66
+ ///
67
+ /// If multiple WriteRecordBatch calls arrive with the same `directory` then the batches
68
+ /// may be written to the same file.
69
+ ///
70
+ /// The returned future will be marked finished when the record batch has been queued
71
+ /// to be written. If the returned future is unfinished then this indicates the dataset
72
+ /// writer's queue is full and the data provider should pause.
73
+ ///
74
+ /// This method is NOT async reentrant. The returned future will only be unfinished
75
+ /// if back pressure needs to be applied. Async reentrancy is not necessary for
76
+ /// concurrent writes to happen. Calling this method again before the previous future
77
+ /// completes will not just violate max_rows_queued but likely lead to race conditions.
78
+ ///
79
+ /// One thing to note is that the ordering of your data can affect your maximum
80
+ /// potential parallelism. If this seems odd then consider a dataset where the first
81
+ /// 1000 batches go to the same directory and then the 1001st batch goes to a different
82
+ /// directory. The only way to get two parallel writes immediately would be to queue
83
+ /// all 1000 pending writes to the first directory.
84
+ void WriteRecordBatch(std::shared_ptr<RecordBatch> batch, const std::string& directory,
85
+ const std::string& prefix = "");
86
+
87
+ /// Finish all pending writes and close any open files
88
+ void Finish();
89
+
90
+ protected:
91
+ DatasetWriter(FileSystemDatasetWriteOptions write_options,
92
+ util::AsyncTaskScheduler* scheduler, std::function<void()> pause_callback,
93
+ std::function<void()> resume_callback,
94
+ std::function<void()> finish_callback,
95
+ uint64_t max_rows_queued = kDefaultDatasetWriterMaxRowsQueued);
96
+
97
+ class DatasetWriterImpl;
98
+ std::unique_ptr<DatasetWriterImpl> impl_;
99
+ };
100
+
101
+ } // namespace internal
102
+ } // namespace dataset
103
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/discovery.h ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ /// Logic for automatically determining the structure of multi-file
19
+ /// dataset with possible partitioning according to available
20
+ /// partitioning
21
+
22
+ // This API is EXPERIMENTAL.
23
+
24
+ #pragma once
25
+
26
+ #include <memory>
27
+ #include <string>
28
+ #include <variant>
29
+ #include <vector>
30
+
31
+ #include "arrow/dataset/partition.h"
32
+ #include "arrow/dataset/type_fwd.h"
33
+ #include "arrow/dataset/visibility.h"
34
+ #include "arrow/filesystem/type_fwd.h"
35
+ #include "arrow/result.h"
36
+ #include "arrow/util/macros.h"
37
+
38
+ namespace arrow {
39
+ namespace dataset {
40
+
41
+ /// \defgroup dataset-discovery Discovery API
42
+ ///
43
+ /// @{
44
+
45
+ struct InspectOptions {
46
+ /// See `fragments` property.
47
+ static constexpr int kInspectAllFragments = -1;
48
+
49
+ /// Indicate how many fragments should be inspected to infer the unified dataset
50
+ /// schema. Limiting the number of fragments accessed improves the latency of
51
+ /// the discovery process when dealing with a high number of fragments and/or
52
+ /// high latency file systems.
53
+ ///
54
+ /// The default value of `1` inspects the schema of the first (in no particular
55
+ /// order) fragment only. If the dataset has a uniform schema for all fragments,
56
+ /// this default is the optimal value. In order to inspect all fragments and
57
+ /// robustly unify their potentially varying schemas, set this option to
58
+ /// `kInspectAllFragments`. A value of `0` disables inspection of fragments
59
+ /// altogether so only the partitioning schema will be inspected.
60
+ int fragments = 1;
61
+
62
+ /// Control how to unify types. By default, types are merged strictly (the
63
+ /// type must match exactly, except nulls can be merged with other types).
64
+ Field::MergeOptions field_merge_options = Field::MergeOptions::Defaults();
65
+ };
66
+
67
+ struct FinishOptions {
68
+ /// Finalize the dataset with this given schema. If the schema is not
69
+ /// provided, infer the schema via the Inspect, see the `inspect_options`
70
+ /// property.
71
+ std::shared_ptr<Schema> schema = NULLPTR;
72
+
73
+ /// If the schema is not provided, it will be discovered by passing the
74
+ /// following options to `DatasetDiscovery::Inspect`.
75
+ InspectOptions inspect_options{};
76
+
77
+ /// Indicate if the given Schema (when specified), should be validated against
78
+ /// the fragments' schemas. `inspect_options` will control how many fragments
79
+ /// are checked.
80
+ bool validate_fragments = false;
81
+ };
82
+
83
+ /// \brief DatasetFactory provides a way to inspect/discover a Dataset's expected
84
+ /// schema before materializing said Dataset.
85
+ class ARROW_DS_EXPORT DatasetFactory {
86
+ public:
87
+ /// \brief Get the schemas of the Fragments and Partitioning.
88
+ virtual Result<std::vector<std::shared_ptr<Schema>>> InspectSchemas(
89
+ InspectOptions options) = 0;
90
+
91
+ /// \brief Get unified schema for the resulting Dataset.
92
+ Result<std::shared_ptr<Schema>> Inspect(InspectOptions options = {});
93
+
94
+ /// \brief Create a Dataset
95
+ Result<std::shared_ptr<Dataset>> Finish();
96
+ /// \brief Create a Dataset with the given schema (see \a InspectOptions::schema)
97
+ Result<std::shared_ptr<Dataset>> Finish(std::shared_ptr<Schema> schema);
98
+ /// \brief Create a Dataset with the given options
99
+ virtual Result<std::shared_ptr<Dataset>> Finish(FinishOptions options) = 0;
100
+
101
+ /// \brief Optional root partition for the resulting Dataset.
102
+ const compute::Expression& root_partition() const { return root_partition_; }
103
+ /// \brief Set the root partition for the resulting Dataset.
104
+ Status SetRootPartition(compute::Expression partition) {
105
+ root_partition_ = std::move(partition);
106
+ return Status::OK();
107
+ }
108
+
109
+ virtual ~DatasetFactory() = default;
110
+
111
+ protected:
112
+ DatasetFactory();
113
+
114
+ compute::Expression root_partition_;
115
+ };
116
+
117
+ /// @}
118
+
119
+ /// \brief DatasetFactory provides a way to inspect/discover a Dataset's
120
+ /// expected schema before materialization.
121
+ /// \ingroup dataset-implementations
122
+ class ARROW_DS_EXPORT UnionDatasetFactory : public DatasetFactory {
123
+ public:
124
+ static Result<std::shared_ptr<DatasetFactory>> Make(
125
+ std::vector<std::shared_ptr<DatasetFactory>> factories);
126
+
127
+ /// \brief Return the list of child DatasetFactory
128
+ const std::vector<std::shared_ptr<DatasetFactory>>& factories() const {
129
+ return factories_;
130
+ }
131
+
132
+ /// \brief Get the schemas of the Datasets.
133
+ ///
134
+ /// Instead of applying options globally, it applies at each child factory.
135
+ /// This will not respect `options.fragments` exactly, but will respect the
136
+ /// spirit of peeking the first fragments or all of them.
137
+ Result<std::vector<std::shared_ptr<Schema>>> InspectSchemas(
138
+ InspectOptions options) override;
139
+
140
+ /// \brief Create a Dataset.
141
+ Result<std::shared_ptr<Dataset>> Finish(FinishOptions options) override;
142
+
143
+ protected:
144
+ explicit UnionDatasetFactory(std::vector<std::shared_ptr<DatasetFactory>> factories);
145
+
146
+ std::vector<std::shared_ptr<DatasetFactory>> factories_;
147
+ };
148
+
149
+ /// \ingroup dataset-filesystem
150
+ struct FileSystemFactoryOptions {
151
+ /// Either an explicit Partitioning or a PartitioningFactory to discover one.
152
+ ///
153
+ /// If a factory is provided, it will be used to infer a schema for partition fields
154
+ /// based on file and directory paths then construct a Partitioning. The default
155
+ /// is a Partitioning which will yield no partition information.
156
+ ///
157
+ /// The (explicit or discovered) partitioning will be applied to discovered files
158
+ /// and the resulting partition information embedded in the Dataset.
159
+ PartitioningOrFactory partitioning{Partitioning::Default()};
160
+
161
+ /// For the purposes of applying the partitioning, paths will be stripped
162
+ /// of the partition_base_dir. Files not matching the partition_base_dir
163
+ /// prefix will be skipped for partition discovery. The ignored files will still
164
+ /// be part of the Dataset, but will not have partition information.
165
+ ///
166
+ /// Example:
167
+ /// partition_base_dir = "/dataset";
168
+ ///
169
+ /// - "/dataset/US/sales.csv" -> "US/sales.csv" will be given to the partitioning
170
+ ///
171
+ /// - "/home/john/late_sales.csv" -> Will be ignored for partition discovery.
172
+ ///
173
+ /// This is useful for partitioning which parses directory when ordering
174
+ /// is important, e.g. DirectoryPartitioning.
175
+ std::string partition_base_dir;
176
+
177
+ /// Invalid files (via selector or explicitly) will be excluded by checking
178
+ /// with the FileFormat::IsSupported method. This will incur IO for each files
179
+ /// in a serial and single threaded fashion. Disabling this feature will skip the
180
+ /// IO, but unsupported files may be present in the Dataset
181
+ /// (resulting in an error at scan time).
182
+ bool exclude_invalid_files = false;
183
+
184
+ /// When discovering from a Selector (and not from an explicit file list), ignore
185
+ /// files and directories matching any of these prefixes.
186
+ ///
187
+ /// Example (with selector = "/dataset/**"):
188
+ /// selector_ignore_prefixes = {"_", ".DS_STORE" };
189
+ ///
190
+ /// - "/dataset/data.csv" -> not ignored
191
+ /// - "/dataset/_metadata" -> ignored
192
+ /// - "/dataset/.DS_STORE" -> ignored
193
+ /// - "/dataset/_hidden/dat" -> ignored
194
+ /// - "/dataset/nested/.DS_STORE" -> ignored
195
+ std::vector<std::string> selector_ignore_prefixes = {
196
+ ".",
197
+ "_",
198
+ };
199
+ };
200
+
201
+ /// \brief FileSystemDatasetFactory creates a Dataset from a vector of
202
+ /// fs::FileInfo or a fs::FileSelector.
203
+ /// \ingroup dataset-filesystem
204
+ class ARROW_DS_EXPORT FileSystemDatasetFactory : public DatasetFactory {
205
+ public:
206
+ /// \brief Build a FileSystemDatasetFactory from an explicit list of
207
+ /// paths.
208
+ ///
209
+ /// \param[in] filesystem passed to FileSystemDataset
210
+ /// \param[in] paths passed to FileSystemDataset
211
+ /// \param[in] format passed to FileSystemDataset
212
+ /// \param[in] options see FileSystemFactoryOptions for more information.
213
+ static Result<std::shared_ptr<DatasetFactory>> Make(
214
+ std::shared_ptr<fs::FileSystem> filesystem, const std::vector<std::string>& paths,
215
+ std::shared_ptr<FileFormat> format, FileSystemFactoryOptions options);
216
+
217
+ /// \brief Build a FileSystemDatasetFactory from a fs::FileSelector.
218
+ ///
219
+ /// The selector will expand to a vector of FileInfo. The expansion/crawling
220
+ /// is performed in this function call. Thus, the finalized Dataset is
221
+ /// working with a snapshot of the filesystem.
222
+ //
223
+ /// If options.partition_base_dir is not provided, it will be overwritten
224
+ /// with selector.base_dir.
225
+ ///
226
+ /// \param[in] filesystem passed to FileSystemDataset
227
+ /// \param[in] selector used to crawl and search files
228
+ /// \param[in] format passed to FileSystemDataset
229
+ /// \param[in] options see FileSystemFactoryOptions for more information.
230
+ static Result<std::shared_ptr<DatasetFactory>> Make(
231
+ std::shared_ptr<fs::FileSystem> filesystem, fs::FileSelector selector,
232
+ std::shared_ptr<FileFormat> format, FileSystemFactoryOptions options);
233
+
234
+ /// \brief Build a FileSystemDatasetFactory from an uri including filesystem
235
+ /// information.
236
+ ///
237
+ /// \param[in] uri passed to FileSystemDataset
238
+ /// \param[in] format passed to FileSystemDataset
239
+ /// \param[in] options see FileSystemFactoryOptions for more information.
240
+ static Result<std::shared_ptr<DatasetFactory>> Make(std::string uri,
241
+ std::shared_ptr<FileFormat> format,
242
+ FileSystemFactoryOptions options);
243
+
244
+ /// \brief Build a FileSystemDatasetFactory from an explicit list of
245
+ /// file information.
246
+ ///
247
+ /// \param[in] filesystem passed to FileSystemDataset
248
+ /// \param[in] files passed to FileSystemDataset
249
+ /// \param[in] format passed to FileSystemDataset
250
+ /// \param[in] options see FileSystemFactoryOptions for more information.
251
+ static Result<std::shared_ptr<DatasetFactory>> Make(
252
+ std::shared_ptr<fs::FileSystem> filesystem, const std::vector<fs::FileInfo>& files,
253
+ std::shared_ptr<FileFormat> format, FileSystemFactoryOptions options);
254
+
255
+ Result<std::vector<std::shared_ptr<Schema>>> InspectSchemas(
256
+ InspectOptions options) override;
257
+
258
+ Result<std::shared_ptr<Dataset>> Finish(FinishOptions options) override;
259
+
260
+ protected:
261
+ FileSystemDatasetFactory(std::vector<fs::FileInfo> files,
262
+ std::shared_ptr<fs::FileSystem> filesystem,
263
+ std::shared_ptr<FileFormat> format,
264
+ FileSystemFactoryOptions options);
265
+
266
+ Result<std::shared_ptr<Schema>> PartitionSchema();
267
+
268
+ std::vector<fs::FileInfo> files_;
269
+ std::shared_ptr<fs::FileSystem> fs_;
270
+ std::shared_ptr<FileFormat> format_;
271
+ FileSystemFactoryOptions options_;
272
+ };
273
+
274
+ } // namespace dataset
275
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_base.h ADDED
@@ -0,0 +1,495 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <string>
25
+ #include <utility>
26
+ #include <vector>
27
+
28
+ #include "arrow/buffer.h"
29
+ #include "arrow/dataset/dataset.h"
30
+ #include "arrow/dataset/partition.h"
31
+ #include "arrow/dataset/scanner.h"
32
+ #include "arrow/dataset/type_fwd.h"
33
+ #include "arrow/dataset/visibility.h"
34
+ #include "arrow/filesystem/filesystem.h"
35
+ #include "arrow/io/file.h"
36
+ #include "arrow/type_fwd.h"
37
+ #include "arrow/util/compression.h"
38
+
39
+ namespace arrow {
40
+
41
+ namespace dataset {
42
+
43
+ /// \defgroup dataset-file-formats File formats for reading and writing datasets
44
+ /// \defgroup dataset-filesystem File system datasets
45
+ ///
46
+ /// @{
47
+
48
+ /// \brief The path and filesystem where an actual file is located or a buffer which can
49
+ /// be read like a file
50
+ class ARROW_DS_EXPORT FileSource : public util::EqualityComparable<FileSource> {
51
+ public:
52
+ FileSource(std::string path, std::shared_ptr<fs::FileSystem> filesystem,
53
+ Compression::type compression = Compression::UNCOMPRESSED)
54
+ : file_info_(std::move(path)),
55
+ filesystem_(std::move(filesystem)),
56
+ compression_(compression) {}
57
+
58
+ FileSource(fs::FileInfo info, std::shared_ptr<fs::FileSystem> filesystem,
59
+ Compression::type compression = Compression::UNCOMPRESSED)
60
+ : file_info_(std::move(info)),
61
+ filesystem_(std::move(filesystem)),
62
+ compression_(compression) {}
63
+
64
+ explicit FileSource(std::shared_ptr<Buffer> buffer,
65
+ Compression::type compression = Compression::UNCOMPRESSED)
66
+ : buffer_(std::move(buffer)), compression_(compression) {}
67
+
68
+ using CustomOpen = std::function<Result<std::shared_ptr<io::RandomAccessFile>>()>;
69
+ FileSource(CustomOpen open, int64_t size)
70
+ : custom_open_(std::move(open)), custom_size_(size) {}
71
+
72
+ using CustomOpenWithCompression =
73
+ std::function<Result<std::shared_ptr<io::RandomAccessFile>>(Compression::type)>;
74
+ FileSource(CustomOpenWithCompression open_with_compression, int64_t size,
75
+ Compression::type compression = Compression::UNCOMPRESSED)
76
+ : custom_open_(std::bind(std::move(open_with_compression), compression)),
77
+ custom_size_(size),
78
+ compression_(compression) {}
79
+
80
+ FileSource(std::shared_ptr<io::RandomAccessFile> file, int64_t size,
81
+ Compression::type compression = Compression::UNCOMPRESSED)
82
+ : custom_open_([=] { return ToResult(file); }),
83
+ custom_size_(size),
84
+ compression_(compression) {}
85
+
86
+ explicit FileSource(std::shared_ptr<io::RandomAccessFile> file,
87
+ Compression::type compression = Compression::UNCOMPRESSED);
88
+
89
+ FileSource() : custom_open_(CustomOpen{&InvalidOpen}) {}
90
+
91
+ static std::vector<FileSource> FromPaths(const std::shared_ptr<fs::FileSystem>& fs,
92
+ std::vector<std::string> paths) {
93
+ std::vector<FileSource> sources;
94
+ for (auto&& path : paths) {
95
+ sources.emplace_back(std::move(path), fs);
96
+ }
97
+ return sources;
98
+ }
99
+
100
+ /// \brief Return the type of raw compression on the file, if any.
101
+ Compression::type compression() const { return compression_; }
102
+
103
+ /// \brief Return the file path, if any. Only valid when file source wraps a path.
104
+ const std::string& path() const {
105
+ static std::string buffer_path = "<Buffer>";
106
+ static std::string custom_open_path = "<Buffer>";
107
+ return filesystem_ ? file_info_.path() : buffer_ ? buffer_path : custom_open_path;
108
+ }
109
+
110
+ /// \brief Return the filesystem, if any. Otherwise returns nullptr
111
+ const std::shared_ptr<fs::FileSystem>& filesystem() const { return filesystem_; }
112
+
113
+ /// \brief Return the buffer containing the file, if any. Otherwise returns nullptr
114
+ const std::shared_ptr<Buffer>& buffer() const { return buffer_; }
115
+
116
+ /// \brief Get a RandomAccessFile which views this file source
117
+ Result<std::shared_ptr<io::RandomAccessFile>> Open() const;
118
+ Future<std::shared_ptr<io::RandomAccessFile>> OpenAsync() const;
119
+
120
+ /// \brief Get the size (in bytes) of the file or buffer
121
+ /// If the file is compressed this should be the compressed (on-disk) size.
122
+ int64_t Size() const;
123
+
124
+ /// \brief Get an InputStream which views this file source (and decompresses if needed)
125
+ /// \param[in] compression If nullopt, guess the compression scheme from the
126
+ /// filename, else decompress with the given codec
127
+ Result<std::shared_ptr<io::InputStream>> OpenCompressed(
128
+ std::optional<Compression::type> compression = std::nullopt) const;
129
+
130
+ /// \brief equality comparison with another FileSource
131
+ bool Equals(const FileSource& other) const;
132
+
133
+ private:
134
+ static Result<std::shared_ptr<io::RandomAccessFile>> InvalidOpen() {
135
+ return Status::Invalid("Called Open() on an uninitialized FileSource");
136
+ }
137
+
138
+ fs::FileInfo file_info_;
139
+ std::shared_ptr<fs::FileSystem> filesystem_;
140
+ std::shared_ptr<Buffer> buffer_;
141
+ CustomOpen custom_open_;
142
+ int64_t custom_size_ = 0;
143
+ Compression::type compression_ = Compression::UNCOMPRESSED;
144
+ };
145
+
146
+ /// \brief Base class for file format implementation
147
+ class ARROW_DS_EXPORT FileFormat : public std::enable_shared_from_this<FileFormat> {
148
+ public:
149
+ /// Options affecting how this format is scanned.
150
+ ///
151
+ /// The options here can be overridden at scan time.
152
+ std::shared_ptr<FragmentScanOptions> default_fragment_scan_options;
153
+
154
+ virtual ~FileFormat() = default;
155
+
156
+ /// \brief The name identifying the kind of file format
157
+ virtual std::string type_name() const = 0;
158
+
159
+ virtual bool Equals(const FileFormat& other) const = 0;
160
+
161
+ /// \brief Indicate if the FileSource is supported/readable by this format.
162
+ virtual Result<bool> IsSupported(const FileSource& source) const = 0;
163
+
164
+ /// \brief Return the schema of the file if possible.
165
+ virtual Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const = 0;
166
+
167
+ /// \brief Learn what we need about the file before we start scanning it
168
+ virtual Future<std::shared_ptr<InspectedFragment>> InspectFragment(
169
+ const FileSource& source, const FragmentScanOptions* format_options,
170
+ compute::ExecContext* exec_context) const;
171
+
172
+ virtual Result<RecordBatchGenerator> ScanBatchesAsync(
173
+ const std::shared_ptr<ScanOptions>& options,
174
+ const std::shared_ptr<FileFragment>& file) const = 0;
175
+
176
+ virtual Future<std::optional<int64_t>> CountRows(
177
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
178
+ const std::shared_ptr<ScanOptions>& options);
179
+
180
+ virtual Future<std::shared_ptr<FragmentScanner>> BeginScan(
181
+ const FragmentScanRequest& request, const InspectedFragment& inspected_fragment,
182
+ const FragmentScanOptions* format_options,
183
+ compute::ExecContext* exec_context) const;
184
+
185
+ /// \brief Open a fragment
186
+ virtual Result<std::shared_ptr<FileFragment>> MakeFragment(
187
+ FileSource source, compute::Expression partition_expression,
188
+ std::shared_ptr<Schema> physical_schema);
189
+
190
+ /// \brief Create a FileFragment for a FileSource.
191
+ Result<std::shared_ptr<FileFragment>> MakeFragment(
192
+ FileSource source, compute::Expression partition_expression);
193
+
194
+ /// \brief Create a FileFragment for a FileSource.
195
+ Result<std::shared_ptr<FileFragment>> MakeFragment(
196
+ FileSource source, std::shared_ptr<Schema> physical_schema = NULLPTR);
197
+
198
+ /// \brief Create a writer for this format.
199
+ virtual Result<std::shared_ptr<FileWriter>> MakeWriter(
200
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
201
+ std::shared_ptr<FileWriteOptions> options,
202
+ fs::FileLocator destination_locator) const = 0;
203
+
204
+ /// \brief Get default write options for this format.
205
+ ///
206
+ /// May return null shared_ptr if this file format does not yet support
207
+ /// writing datasets.
208
+ virtual std::shared_ptr<FileWriteOptions> DefaultWriteOptions() = 0;
209
+
210
+ protected:
211
+ explicit FileFormat(std::shared_ptr<FragmentScanOptions> default_fragment_scan_options)
212
+ : default_fragment_scan_options(std::move(default_fragment_scan_options)) {}
213
+ };
214
+
215
+ /// \brief A Fragment that is stored in a file with a known format
216
+ class ARROW_DS_EXPORT FileFragment : public Fragment,
217
+ public util::EqualityComparable<FileFragment> {
218
+ public:
219
+ Result<RecordBatchGenerator> ScanBatchesAsync(
220
+ const std::shared_ptr<ScanOptions>& options) override;
221
+ Future<std::optional<int64_t>> CountRows(
222
+ compute::Expression predicate,
223
+ const std::shared_ptr<ScanOptions>& options) override;
224
+ Future<std::shared_ptr<FragmentScanner>> BeginScan(
225
+ const FragmentScanRequest& request, const InspectedFragment& inspected_fragment,
226
+ const FragmentScanOptions* format_options,
227
+ compute::ExecContext* exec_context) override;
228
+ Future<std::shared_ptr<InspectedFragment>> InspectFragment(
229
+ const FragmentScanOptions* format_options,
230
+ compute::ExecContext* exec_context) override;
231
+
232
+ std::string type_name() const override { return format_->type_name(); }
233
+ std::string ToString() const override { return source_.path(); };
234
+
235
+ const FileSource& source() const { return source_; }
236
+ const std::shared_ptr<FileFormat>& format() const { return format_; }
237
+
238
+ bool Equals(const FileFragment& other) const;
239
+
240
+ protected:
241
+ FileFragment(FileSource source, std::shared_ptr<FileFormat> format,
242
+ compute::Expression partition_expression,
243
+ std::shared_ptr<Schema> physical_schema)
244
+ : Fragment(std::move(partition_expression), std::move(physical_schema)),
245
+ source_(std::move(source)),
246
+ format_(std::move(format)) {}
247
+
248
+ Result<std::shared_ptr<Schema>> ReadPhysicalSchemaImpl() override;
249
+
250
+ FileSource source_;
251
+ std::shared_ptr<FileFormat> format_;
252
+
253
+ friend class FileFormat;
254
+ };
255
+
256
+ /// \brief A Dataset of FileFragments.
257
+ ///
258
+ /// A FileSystemDataset is composed of one or more FileFragment. The fragments
259
+ /// are independent and don't need to share the same format and/or filesystem.
260
+ class ARROW_DS_EXPORT FileSystemDataset : public Dataset {
261
+ public:
262
+ /// \brief Create a FileSystemDataset.
263
+ ///
264
+ /// \param[in] schema the schema of the dataset
265
+ /// \param[in] root_partition the partition expression of the dataset
266
+ /// \param[in] format the format of each FileFragment.
267
+ /// \param[in] filesystem the filesystem of each FileFragment, or nullptr if the
268
+ /// fragments wrap buffers.
269
+ /// \param[in] fragments list of fragments to create the dataset from.
270
+ /// \param[in] partitioning the Partitioning object in case the dataset is created
271
+ /// with a known partitioning (e.g. from a discovered partitioning
272
+ /// through a DatasetFactory), or nullptr if not known.
273
+ ///
274
+ /// Note that fragments wrapping files resident in differing filesystems are not
275
+ /// permitted; to work with multiple filesystems use a UnionDataset.
276
+ ///
277
+ /// \return A constructed dataset.
278
+ static Result<std::shared_ptr<FileSystemDataset>> Make(
279
+ std::shared_ptr<Schema> schema, compute::Expression root_partition,
280
+ std::shared_ptr<FileFormat> format, std::shared_ptr<fs::FileSystem> filesystem,
281
+ std::vector<std::shared_ptr<FileFragment>> fragments,
282
+ std::shared_ptr<Partitioning> partitioning = NULLPTR);
283
+
284
+ /// \brief Write a dataset.
285
+ static Status Write(const FileSystemDatasetWriteOptions& write_options,
286
+ std::shared_ptr<Scanner> scanner);
287
+
288
+ /// \brief Return the type name of the dataset.
289
+ std::string type_name() const override { return "filesystem"; }
290
+
291
+ /// \brief Replace the schema of the dataset.
292
+ Result<std::shared_ptr<Dataset>> ReplaceSchema(
293
+ std::shared_ptr<Schema> schema) const override;
294
+
295
+ /// \brief Return the path of files.
296
+ std::vector<std::string> files() const;
297
+
298
+ /// \brief Return the format.
299
+ const std::shared_ptr<FileFormat>& format() const { return format_; }
300
+
301
+ /// \brief Return the filesystem. May be nullptr if the fragments wrap buffers.
302
+ const std::shared_ptr<fs::FileSystem>& filesystem() const { return filesystem_; }
303
+
304
+ /// \brief Return the partitioning. May be nullptr if the dataset was not constructed
305
+ /// with a partitioning.
306
+ const std::shared_ptr<Partitioning>& partitioning() const { return partitioning_; }
307
+
308
+ std::string ToString() const;
309
+
310
+ protected:
311
+ struct FragmentSubtrees;
312
+
313
+ explicit FileSystemDataset(std::shared_ptr<Schema> schema)
314
+ : Dataset(std::move(schema)) {}
315
+
316
+ FileSystemDataset(std::shared_ptr<Schema> schema,
317
+ compute::Expression partition_expression)
318
+ : Dataset(std::move(schema), partition_expression) {}
319
+
320
+ Result<FragmentIterator> GetFragmentsImpl(compute::Expression predicate) override;
321
+
322
+ void SetupSubtreePruning();
323
+
324
+ std::shared_ptr<FileFormat> format_;
325
+ std::shared_ptr<fs::FileSystem> filesystem_;
326
+ std::vector<std::shared_ptr<FileFragment>> fragments_;
327
+ std::shared_ptr<Partitioning> partitioning_;
328
+
329
+ std::shared_ptr<FragmentSubtrees> subtrees_;
330
+ };
331
+
332
+ /// \brief Options for writing a file of this format.
333
+ class ARROW_DS_EXPORT FileWriteOptions {
334
+ public:
335
+ virtual ~FileWriteOptions() = default;
336
+
337
+ const std::shared_ptr<FileFormat>& format() const { return format_; }
338
+
339
+ std::string type_name() const { return format_->type_name(); }
340
+
341
+ protected:
342
+ explicit FileWriteOptions(std::shared_ptr<FileFormat> format)
343
+ : format_(std::move(format)) {}
344
+
345
+ std::shared_ptr<FileFormat> format_;
346
+ };
347
+
348
+ /// \brief A writer for this format.
349
+ class ARROW_DS_EXPORT FileWriter {
350
+ public:
351
+ virtual ~FileWriter() = default;
352
+
353
+ /// \brief Write the given batch.
354
+ virtual Status Write(const std::shared_ptr<RecordBatch>& batch) = 0;
355
+
356
+ /// \brief Write all batches from the reader.
357
+ Status Write(RecordBatchReader* batches);
358
+
359
+ /// \brief Indicate that writing is done.
360
+ virtual Future<> Finish();
361
+
362
+ const std::shared_ptr<FileFormat>& format() const { return options_->format(); }
363
+ const std::shared_ptr<Schema>& schema() const { return schema_; }
364
+ const std::shared_ptr<FileWriteOptions>& options() const { return options_; }
365
+ const fs::FileLocator& destination() const { return destination_locator_; }
366
+
367
+ /// \brief After Finish() is called, provides number of bytes written to file.
368
+ Result<int64_t> GetBytesWritten() const;
369
+
370
+ protected:
371
+ FileWriter(std::shared_ptr<Schema> schema, std::shared_ptr<FileWriteOptions> options,
372
+ std::shared_ptr<io::OutputStream> destination,
373
+ fs::FileLocator destination_locator)
374
+ : schema_(std::move(schema)),
375
+ options_(std::move(options)),
376
+ destination_(std::move(destination)),
377
+ destination_locator_(std::move(destination_locator)) {}
378
+
379
+ virtual Future<> FinishInternal() = 0;
380
+
381
+ std::shared_ptr<Schema> schema_;
382
+ std::shared_ptr<FileWriteOptions> options_;
383
+ std::shared_ptr<io::OutputStream> destination_;
384
+ fs::FileLocator destination_locator_;
385
+ std::optional<int64_t> bytes_written_;
386
+ };
387
+
388
+ /// \brief Options for writing a dataset.
389
+ struct ARROW_DS_EXPORT FileSystemDatasetWriteOptions {
390
+ /// Options for individual fragment writing.
391
+ std::shared_ptr<FileWriteOptions> file_write_options;
392
+
393
+ /// FileSystem into which a dataset will be written.
394
+ std::shared_ptr<fs::FileSystem> filesystem;
395
+
396
+ /// Root directory into which the dataset will be written.
397
+ std::string base_dir;
398
+
399
+ /// Partitioning used to generate fragment paths.
400
+ std::shared_ptr<Partitioning> partitioning;
401
+
402
+ /// Maximum number of partitions any batch may be written into, default is 1K.
403
+ int max_partitions = 1024;
404
+
405
+ /// Template string used to generate fragment basenames.
406
+ /// {i} will be replaced by an auto incremented integer.
407
+ std::string basename_template;
408
+
409
+ /// A functor which will be applied on an incremented counter. The result will be
410
+ /// inserted into the basename_template in place of {i}.
411
+ ///
412
+ /// This can be used, for example, to left-pad the file counter.
413
+ std::function<std::string(int)> basename_template_functor;
414
+
415
+ /// If greater than 0 then this will limit the maximum number of files that can be left
416
+ /// open. If an attempt is made to open too many files then the least recently used file
417
+ /// will be closed. If this setting is set too low you may end up fragmenting your data
418
+ /// into many small files.
419
+ ///
420
+ /// The default is 900 which also allows some # of files to be open by the scanner
421
+ /// before hitting the default Linux limit of 1024
422
+ uint32_t max_open_files = 900;
423
+
424
+ /// If greater than 0 then this will limit how many rows are placed in any single file.
425
+ /// Otherwise there will be no limit and one file will be created in each output
426
+ /// directory unless files need to be closed to respect max_open_files
427
+ uint64_t max_rows_per_file = 0;
428
+
429
+ /// If greater than 0 then this will cause the dataset writer to batch incoming data
430
+ /// and only write the row groups to the disk when sufficient rows have accumulated.
431
+ /// The final row group size may be less than this value and other options such as
432
+ /// `max_open_files` or `max_rows_per_file` lead to smaller row group sizes.
433
+ uint64_t min_rows_per_group = 0;
434
+
435
+ /// If greater than 0 then the dataset writer may split up large incoming batches into
436
+ /// multiple row groups. If this value is set then min_rows_per_group should also be
437
+ /// set or else you may end up with very small row groups (e.g. if the incoming row
438
+ /// group size is just barely larger than this value).
439
+ uint64_t max_rows_per_group = 1 << 20;
440
+
441
+ /// Controls what happens if an output directory already exists.
442
+ ExistingDataBehavior existing_data_behavior = ExistingDataBehavior::kError;
443
+
444
+ /// \brief If false the dataset writer will not create directories
445
+ /// This is mainly intended for filesystems that do not require directories such as S3.
446
+ bool create_dir = true;
447
+
448
+ /// Callback to be invoked against all FileWriters before
449
+ /// they are finalized with FileWriter::Finish().
450
+ std::function<Status(FileWriter*)> writer_pre_finish = [](FileWriter*) {
451
+ return Status::OK();
452
+ };
453
+
454
+ /// Callback to be invoked against all FileWriters after they have
455
+ /// called FileWriter::Finish().
456
+ std::function<Status(FileWriter*)> writer_post_finish = [](FileWriter*) {
457
+ return Status::OK();
458
+ };
459
+
460
+ const std::shared_ptr<FileFormat>& format() const {
461
+ return file_write_options->format();
462
+ }
463
+ };
464
+
465
+ /// \brief Wraps FileSystemDatasetWriteOptions for consumption as compute::ExecNodeOptions
466
+ class ARROW_DS_EXPORT WriteNodeOptions : public acero::ExecNodeOptions {
467
+ public:
468
+ explicit WriteNodeOptions(
469
+ FileSystemDatasetWriteOptions options,
470
+ std::shared_ptr<const KeyValueMetadata> custom_metadata = NULLPTR)
471
+ : write_options(std::move(options)), custom_metadata(std::move(custom_metadata)) {}
472
+
473
+ /// \brief Options to control how to write the dataset
474
+ FileSystemDatasetWriteOptions write_options;
475
+ /// \brief Optional schema to attach to all written batches
476
+ ///
477
+ /// By default, we will use the output schema of the input.
478
+ ///
479
+ /// This can be used to alter schema metadata, field nullability, or field metadata.
480
+ /// However, this cannot be used to change the type of data. If the custom schema does
481
+ /// not have the same number of fields and the same data types as the input then the
482
+ /// plan will fail.
483
+ std::shared_ptr<Schema> custom_schema;
484
+ /// \brief Optional metadata to attach to written batches
485
+ std::shared_ptr<const KeyValueMetadata> custom_metadata;
486
+ };
487
+
488
+ /// @}
489
+
490
+ namespace internal {
491
+ ARROW_DS_EXPORT void InitializeDatasetWriter(arrow::acero::ExecFactoryRegistry* registry);
492
+ }
493
+
494
+ } // namespace dataset
495
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_csv.h ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <string>
22
+
23
+ #include "arrow/csv/options.h"
24
+ #include "arrow/dataset/dataset.h"
25
+ #include "arrow/dataset/file_base.h"
26
+ #include "arrow/dataset/type_fwd.h"
27
+ #include "arrow/dataset/visibility.h"
28
+ #include "arrow/ipc/type_fwd.h"
29
+ #include "arrow/status.h"
30
+ #include "arrow/util/compression.h"
31
+
32
+ namespace arrow {
33
+ namespace dataset {
34
+
35
+ constexpr char kCsvTypeName[] = "csv";
36
+
37
+ /// \addtogroup dataset-file-formats
38
+ ///
39
+ /// @{
40
+
41
+ /// \brief A FileFormat implementation that reads from and writes to Csv files
42
+ class ARROW_DS_EXPORT CsvFileFormat : public FileFormat {
43
+ public:
44
+ // TODO(ARROW-18328) Remove this, moved to CsvFragmentScanOptions
45
+ /// Options affecting the parsing of CSV files
46
+ csv::ParseOptions parse_options = csv::ParseOptions::Defaults();
47
+
48
+ CsvFileFormat();
49
+
50
+ std::string type_name() const override { return kCsvTypeName; }
51
+
52
+ bool Equals(const FileFormat& other) const override;
53
+
54
+ Result<bool> IsSupported(const FileSource& source) const override;
55
+
56
+ /// \brief Return the schema of the file if possible.
57
+ Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const override;
58
+
59
+ Future<std::shared_ptr<FragmentScanner>> BeginScan(
60
+ const FragmentScanRequest& request, const InspectedFragment& inspected_fragment,
61
+ const FragmentScanOptions* format_options,
62
+ compute::ExecContext* exec_context) const override;
63
+
64
+ Result<RecordBatchGenerator> ScanBatchesAsync(
65
+ const std::shared_ptr<ScanOptions>& scan_options,
66
+ const std::shared_ptr<FileFragment>& file) const override;
67
+
68
+ Future<std::shared_ptr<InspectedFragment>> InspectFragment(
69
+ const FileSource& source, const FragmentScanOptions* format_options,
70
+ compute::ExecContext* exec_context) const override;
71
+
72
+ Future<std::optional<int64_t>> CountRows(
73
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
74
+ const std::shared_ptr<ScanOptions>& options) override;
75
+
76
+ Result<std::shared_ptr<FileWriter>> MakeWriter(
77
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
78
+ std::shared_ptr<FileWriteOptions> options,
79
+ fs::FileLocator destination_locator) const override;
80
+
81
+ std::shared_ptr<FileWriteOptions> DefaultWriteOptions() override;
82
+ };
83
+
84
+ /// \brief Per-scan options for CSV fragments
85
+ struct ARROW_DS_EXPORT CsvFragmentScanOptions : public FragmentScanOptions {
86
+ std::string type_name() const override { return kCsvTypeName; }
87
+
88
+ using StreamWrapFunc = std::function<Result<std::shared_ptr<io::InputStream>>(
89
+ std::shared_ptr<io::InputStream>)>;
90
+
91
+ /// CSV conversion options
92
+ csv::ConvertOptions convert_options = csv::ConvertOptions::Defaults();
93
+
94
+ /// CSV reading options
95
+ ///
96
+ /// Note that use_threads is always ignored.
97
+ csv::ReadOptions read_options = csv::ReadOptions::Defaults();
98
+
99
+ /// CSV parse options
100
+ csv::ParseOptions parse_options = csv::ParseOptions::Defaults();
101
+
102
+ /// Optional stream wrapping function
103
+ ///
104
+ /// If defined, all open dataset file fragments will be passed
105
+ /// through this function. One possible use case is to transparently
106
+ /// transcode all input files from a given character set to utf8.
107
+ StreamWrapFunc stream_transform_func{};
108
+ };
109
+
110
+ class ARROW_DS_EXPORT CsvFileWriteOptions : public FileWriteOptions {
111
+ public:
112
+ /// Options passed to csv::MakeCSVWriter.
113
+ std::shared_ptr<csv::WriteOptions> write_options;
114
+
115
+ protected:
116
+ explicit CsvFileWriteOptions(std::shared_ptr<FileFormat> format)
117
+ : FileWriteOptions(std::move(format)) {}
118
+
119
+ friend class CsvFileFormat;
120
+ };
121
+
122
+ class ARROW_DS_EXPORT CsvFileWriter : public FileWriter {
123
+ public:
124
+ Status Write(const std::shared_ptr<RecordBatch>& batch) override;
125
+
126
+ private:
127
+ CsvFileWriter(std::shared_ptr<io::OutputStream> destination,
128
+ std::shared_ptr<ipc::RecordBatchWriter> writer,
129
+ std::shared_ptr<Schema> schema,
130
+ std::shared_ptr<CsvFileWriteOptions> options,
131
+ fs::FileLocator destination_locator);
132
+
133
+ Future<> FinishInternal() override;
134
+
135
+ std::shared_ptr<io::OutputStream> destination_;
136
+ std::shared_ptr<ipc::RecordBatchWriter> batch_writer_;
137
+
138
+ friend class CsvFileFormat;
139
+ };
140
+
141
+ /// @}
142
+
143
+ } // namespace dataset
144
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_ipc.h ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+ #include <string>
24
+
25
+ #include "arrow/dataset/file_base.h"
26
+ #include "arrow/dataset/type_fwd.h"
27
+ #include "arrow/dataset/visibility.h"
28
+ #include "arrow/io/type_fwd.h"
29
+ #include "arrow/ipc/type_fwd.h"
30
+ #include "arrow/result.h"
31
+
32
+ namespace arrow {
33
+ namespace dataset {
34
+
35
+ /// \addtogroup dataset-file-formats
36
+ ///
37
+ /// @{
38
+
39
+ constexpr char kIpcTypeName[] = "ipc";
40
+
41
+ /// \brief A FileFormat implementation that reads from and writes to Ipc files
42
+ class ARROW_DS_EXPORT IpcFileFormat : public FileFormat {
43
+ public:
44
+ std::string type_name() const override { return kIpcTypeName; }
45
+
46
+ IpcFileFormat();
47
+
48
+ bool Equals(const FileFormat& other) const override {
49
+ return type_name() == other.type_name();
50
+ }
51
+
52
+ Result<bool> IsSupported(const FileSource& source) const override;
53
+
54
+ /// \brief Return the schema of the file if possible.
55
+ Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const override;
56
+
57
+ Result<RecordBatchGenerator> ScanBatchesAsync(
58
+ const std::shared_ptr<ScanOptions>& options,
59
+ const std::shared_ptr<FileFragment>& file) const override;
60
+
61
+ Future<std::optional<int64_t>> CountRows(
62
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
63
+ const std::shared_ptr<ScanOptions>& options) override;
64
+
65
+ Result<std::shared_ptr<FileWriter>> MakeWriter(
66
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
67
+ std::shared_ptr<FileWriteOptions> options,
68
+ fs::FileLocator destination_locator) const override;
69
+
70
+ std::shared_ptr<FileWriteOptions> DefaultWriteOptions() override;
71
+ };
72
+
73
+ /// \brief Per-scan options for IPC fragments
74
+ class ARROW_DS_EXPORT IpcFragmentScanOptions : public FragmentScanOptions {
75
+ public:
76
+ std::string type_name() const override { return kIpcTypeName; }
77
+
78
+ /// Options passed to the IPC file reader.
79
+ /// included_fields, memory_pool, and use_threads are ignored.
80
+ std::shared_ptr<ipc::IpcReadOptions> options;
81
+ /// If present, the async scanner will enable I/O coalescing.
82
+ /// This is ignored by the sync scanner.
83
+ std::shared_ptr<io::CacheOptions> cache_options;
84
+ };
85
+
86
+ class ARROW_DS_EXPORT IpcFileWriteOptions : public FileWriteOptions {
87
+ public:
88
+ /// Options passed to ipc::MakeFileWriter. use_threads is ignored
89
+ std::shared_ptr<ipc::IpcWriteOptions> options;
90
+
91
+ /// custom_metadata written to the file's footer
92
+ std::shared_ptr<const KeyValueMetadata> metadata;
93
+
94
+ protected:
95
+ explicit IpcFileWriteOptions(std::shared_ptr<FileFormat> format)
96
+ : FileWriteOptions(std::move(format)) {}
97
+
98
+ friend class IpcFileFormat;
99
+ };
100
+
101
+ class ARROW_DS_EXPORT IpcFileWriter : public FileWriter {
102
+ public:
103
+ Status Write(const std::shared_ptr<RecordBatch>& batch) override;
104
+
105
+ private:
106
+ IpcFileWriter(std::shared_ptr<io::OutputStream> destination,
107
+ std::shared_ptr<ipc::RecordBatchWriter> writer,
108
+ std::shared_ptr<Schema> schema,
109
+ std::shared_ptr<IpcFileWriteOptions> options,
110
+ fs::FileLocator destination_locator);
111
+
112
+ Future<> FinishInternal() override;
113
+
114
+ std::shared_ptr<io::OutputStream> destination_;
115
+ std::shared_ptr<ipc::RecordBatchWriter> batch_writer_;
116
+
117
+ friend class IpcFileFormat;
118
+ };
119
+
120
+ /// @}
121
+
122
+ } // namespace dataset
123
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_json.h ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <optional>
22
+ #include <string>
23
+
24
+ #include "arrow/dataset/dataset.h"
25
+ #include "arrow/dataset/file_base.h"
26
+ #include "arrow/dataset/type_fwd.h"
27
+ #include "arrow/dataset/visibility.h"
28
+ #include "arrow/ipc/type_fwd.h"
29
+ #include "arrow/json/options.h"
30
+ #include "arrow/result.h"
31
+ #include "arrow/status.h"
32
+ #include "arrow/util/future.h"
33
+ #include "arrow/util/macros.h"
34
+
35
+ namespace arrow::dataset {
36
+
37
+ /// \addtogroup dataset-file-formats
38
+ ///
39
+ /// @{
40
+
41
+ constexpr char kJsonTypeName[] = "json";
42
+
43
+ /// \brief A FileFormat implementation that reads from JSON files
44
+ class ARROW_DS_EXPORT JsonFileFormat : public FileFormat {
45
+ public:
46
+ JsonFileFormat();
47
+
48
+ std::string type_name() const override { return kJsonTypeName; }
49
+
50
+ bool Equals(const FileFormat& other) const override;
51
+
52
+ Result<bool> IsSupported(const FileSource& source) const override;
53
+
54
+ Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const override;
55
+
56
+ Future<std::shared_ptr<InspectedFragment>> InspectFragment(
57
+ const FileSource& source, const FragmentScanOptions* format_options,
58
+ compute::ExecContext* exec_context) const override;
59
+
60
+ Future<std::shared_ptr<FragmentScanner>> BeginScan(
61
+ const FragmentScanRequest& scan_request, const InspectedFragment& inspected,
62
+ const FragmentScanOptions* format_options,
63
+ compute::ExecContext* exec_context) const override;
64
+
65
+ Result<RecordBatchGenerator> ScanBatchesAsync(
66
+ const std::shared_ptr<ScanOptions>& scan_options,
67
+ const std::shared_ptr<FileFragment>& file) const override;
68
+
69
+ Future<std::optional<int64_t>> CountRows(
70
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
71
+ const std::shared_ptr<ScanOptions>& scan_options) override;
72
+
73
+ Result<std::shared_ptr<FileWriter>> MakeWriter(
74
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
75
+ std::shared_ptr<FileWriteOptions> options,
76
+ fs::FileLocator destination_locator) const override {
77
+ return Status::NotImplemented("Writing JSON files is not currently supported");
78
+ }
79
+
80
+ std::shared_ptr<FileWriteOptions> DefaultWriteOptions() override { return NULLPTR; }
81
+ };
82
+
83
+ /// \brief Per-scan options for JSON fragments
84
+ struct ARROW_DS_EXPORT JsonFragmentScanOptions : public FragmentScanOptions {
85
+ std::string type_name() const override { return kJsonTypeName; }
86
+
87
+ /// @brief Options that affect JSON parsing
88
+ ///
89
+ /// Note: `explicit_schema` and `unexpected_field_behavior` are ignored.
90
+ json::ParseOptions parse_options = json::ParseOptions::Defaults();
91
+
92
+ /// @brief Options that affect JSON reading
93
+ json::ReadOptions read_options = json::ReadOptions::Defaults();
94
+ };
95
+
96
+ /// @}
97
+
98
+ } // namespace arrow::dataset
venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_orc.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+ #include <string>
24
+
25
+ #include "arrow/dataset/file_base.h"
26
+ #include "arrow/dataset/type_fwd.h"
27
+ #include "arrow/dataset/visibility.h"
28
+ #include "arrow/io/type_fwd.h"
29
+ #include "arrow/result.h"
30
+
31
+ namespace arrow {
32
+ namespace dataset {
33
+
34
+ /// \addtogroup dataset-file-formats
35
+ ///
36
+ /// @{
37
+
38
+ constexpr char kOrcTypeName[] = "orc";
39
+
40
+ /// \brief A FileFormat implementation that reads from and writes to ORC files
41
+ class ARROW_DS_EXPORT OrcFileFormat : public FileFormat {
42
+ public:
43
+ OrcFileFormat();
44
+
45
+ std::string type_name() const override { return kOrcTypeName; }
46
+
47
+ bool Equals(const FileFormat& other) const override {
48
+ return type_name() == other.type_name();
49
+ }
50
+
51
+ Result<bool> IsSupported(const FileSource& source) const override;
52
+
53
+ /// \brief Return the schema of the file if possible.
54
+ Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const override;
55
+
56
+ Result<RecordBatchGenerator> ScanBatchesAsync(
57
+ const std::shared_ptr<ScanOptions>& options,
58
+ const std::shared_ptr<FileFragment>& file) const override;
59
+
60
+ Future<std::optional<int64_t>> CountRows(
61
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
62
+ const std::shared_ptr<ScanOptions>& options) override;
63
+
64
+ Result<std::shared_ptr<FileWriter>> MakeWriter(
65
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
66
+ std::shared_ptr<FileWriteOptions> options,
67
+ fs::FileLocator destination_locator) const override;
68
+
69
+ std::shared_ptr<FileWriteOptions> DefaultWriteOptions() override;
70
+ };
71
+
72
+ /// @}
73
+
74
+ } // namespace dataset
75
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/file_parquet.h ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+ #include <optional>
24
+ #include <string>
25
+ #include <unordered_set>
26
+ #include <utility>
27
+ #include <vector>
28
+
29
+ #include "arrow/dataset/discovery.h"
30
+ #include "arrow/dataset/file_base.h"
31
+ #include "arrow/dataset/type_fwd.h"
32
+ #include "arrow/dataset/visibility.h"
33
+ #include "arrow/io/caching.h"
34
+
35
+ namespace parquet {
36
+ class ParquetFileReader;
37
+ class Statistics;
38
+ class ColumnChunkMetaData;
39
+ class RowGroupMetaData;
40
+ class FileMetaData;
41
+ class FileDecryptionProperties;
42
+ class FileEncryptionProperties;
43
+
44
+ class ReaderProperties;
45
+ class ArrowReaderProperties;
46
+
47
+ class WriterProperties;
48
+ class ArrowWriterProperties;
49
+
50
+ namespace arrow {
51
+ class FileReader;
52
+ class FileWriter;
53
+ struct SchemaManifest;
54
+ } // namespace arrow
55
+ } // namespace parquet
56
+
57
+ namespace arrow {
58
+ namespace dataset {
59
+
60
+ struct ParquetDecryptionConfig;
61
+ struct ParquetEncryptionConfig;
62
+
63
+ /// \addtogroup dataset-file-formats
64
+ ///
65
+ /// @{
66
+
67
+ constexpr char kParquetTypeName[] = "parquet";
68
+
69
+ /// \brief A FileFormat implementation that reads from Parquet files
70
+ class ARROW_DS_EXPORT ParquetFileFormat : public FileFormat {
71
+ public:
72
+ ParquetFileFormat();
73
+
74
+ /// Convenience constructor which copies properties from a parquet::ReaderProperties.
75
+ /// memory_pool will be ignored.
76
+ explicit ParquetFileFormat(const parquet::ReaderProperties& reader_properties);
77
+
78
+ std::string type_name() const override { return kParquetTypeName; }
79
+
80
+ bool Equals(const FileFormat& other) const override;
81
+
82
+ struct ReaderOptions {
83
+ /// \defgroup parquet-file-format-arrow-reader-properties properties which correspond
84
+ /// to members of parquet::ArrowReaderProperties.
85
+ ///
86
+ /// We don't embed parquet::ReaderProperties directly because column names (rather
87
+ /// than indices) are used to indicate dictionary columns, and other options are
88
+ /// deferred to scan time.
89
+ ///
90
+ /// @{
91
+ std::unordered_set<std::string> dict_columns;
92
+ arrow::TimeUnit::type coerce_int96_timestamp_unit = arrow::TimeUnit::NANO;
93
+ /// @}
94
+ } reader_options;
95
+
96
+ Result<bool> IsSupported(const FileSource& source) const override;
97
+
98
+ /// \brief Return the schema of the file if possible.
99
+ Result<std::shared_ptr<Schema>> Inspect(const FileSource& source) const override;
100
+
101
+ Result<RecordBatchGenerator> ScanBatchesAsync(
102
+ const std::shared_ptr<ScanOptions>& options,
103
+ const std::shared_ptr<FileFragment>& file) const override;
104
+
105
+ Future<std::optional<int64_t>> CountRows(
106
+ const std::shared_ptr<FileFragment>& file, compute::Expression predicate,
107
+ const std::shared_ptr<ScanOptions>& options) override;
108
+
109
+ using FileFormat::MakeFragment;
110
+
111
+ /// \brief Create a Fragment targeting all RowGroups.
112
+ Result<std::shared_ptr<FileFragment>> MakeFragment(
113
+ FileSource source, compute::Expression partition_expression,
114
+ std::shared_ptr<Schema> physical_schema) override;
115
+
116
+ /// \brief Create a Fragment, restricted to the specified row groups.
117
+ Result<std::shared_ptr<ParquetFileFragment>> MakeFragment(
118
+ FileSource source, compute::Expression partition_expression,
119
+ std::shared_ptr<Schema> physical_schema, std::vector<int> row_groups);
120
+
121
+ /// \brief Return a FileReader on the given source.
122
+ Result<std::shared_ptr<parquet::arrow::FileReader>> GetReader(
123
+ const FileSource& source, const std::shared_ptr<ScanOptions>& options) const;
124
+
125
+ Result<std::shared_ptr<parquet::arrow::FileReader>> GetReader(
126
+ const FileSource& source, const std::shared_ptr<ScanOptions>& options,
127
+ const std::shared_ptr<parquet::FileMetaData>& metadata) const;
128
+
129
+ Future<std::shared_ptr<parquet::arrow::FileReader>> GetReaderAsync(
130
+ const FileSource& source, const std::shared_ptr<ScanOptions>& options) const;
131
+
132
+ Future<std::shared_ptr<parquet::arrow::FileReader>> GetReaderAsync(
133
+ const FileSource& source, const std::shared_ptr<ScanOptions>& options,
134
+ const std::shared_ptr<parquet::FileMetaData>& metadata) const;
135
+
136
+ Result<std::shared_ptr<FileWriter>> MakeWriter(
137
+ std::shared_ptr<io::OutputStream> destination, std::shared_ptr<Schema> schema,
138
+ std::shared_ptr<FileWriteOptions> options,
139
+ fs::FileLocator destination_locator) const override;
140
+
141
+ std::shared_ptr<FileWriteOptions> DefaultWriteOptions() override;
142
+ };
143
+
144
+ /// \brief A FileFragment with parquet logic.
145
+ ///
146
+ /// ParquetFileFragment provides a lazy (with respect to IO) interface to
147
+ /// scan parquet files. Any heavy IO calls are deferred to the Scan() method.
148
+ ///
149
+ /// The caller can provide an optional list of selected RowGroups to limit the
150
+ /// number of scanned RowGroups, or to partition the scans across multiple
151
+ /// threads.
152
+ ///
153
+ /// Metadata can be explicitly provided, enabling pushdown predicate benefits without
154
+ /// the potentially heavy IO of loading Metadata from the file system. This can induce
155
+ /// significant performance boost when scanning high latency file systems.
156
+ class ARROW_DS_EXPORT ParquetFileFragment : public FileFragment {
157
+ public:
158
+ Result<FragmentVector> SplitByRowGroup(compute::Expression predicate);
159
+
160
+ /// \brief Return the RowGroups selected by this fragment.
161
+ const std::vector<int>& row_groups() const {
162
+ if (row_groups_) return *row_groups_;
163
+ static std::vector<int> empty;
164
+ return empty;
165
+ }
166
+
167
+ /// \brief Return the FileMetaData associated with this fragment.
168
+ std::shared_ptr<parquet::FileMetaData> metadata();
169
+
170
+ /// \brief Ensure this fragment's FileMetaData is in memory.
171
+ Status EnsureCompleteMetadata(parquet::arrow::FileReader* reader = NULLPTR);
172
+
173
+ /// \brief Return fragment which selects a filtered subset of this fragment's RowGroups.
174
+ Result<std::shared_ptr<Fragment>> Subset(compute::Expression predicate);
175
+ Result<std::shared_ptr<Fragment>> Subset(std::vector<int> row_group_ids);
176
+
177
+ static std::optional<compute::Expression> EvaluateStatisticsAsExpression(
178
+ const Field& field, const parquet::Statistics& statistics);
179
+
180
+ static std::optional<compute::Expression> EvaluateStatisticsAsExpression(
181
+ const Field& field, const FieldRef& field_ref,
182
+ const parquet::Statistics& statistics);
183
+
184
+ private:
185
+ ParquetFileFragment(FileSource source, std::shared_ptr<FileFormat> format,
186
+ compute::Expression partition_expression,
187
+ std::shared_ptr<Schema> physical_schema,
188
+ std::optional<std::vector<int>> row_groups);
189
+
190
+ Status SetMetadata(std::shared_ptr<parquet::FileMetaData> metadata,
191
+ std::shared_ptr<parquet::arrow::SchemaManifest> manifest,
192
+ std::shared_ptr<parquet::FileMetaData> original_metadata = {});
193
+
194
+ // Overridden to opportunistically set metadata since a reader must be opened anyway.
195
+ Result<std::shared_ptr<Schema>> ReadPhysicalSchemaImpl() override {
196
+ ARROW_RETURN_NOT_OK(EnsureCompleteMetadata());
197
+ return physical_schema_;
198
+ }
199
+
200
+ /// Return a filtered subset of row group indices.
201
+ Result<std::vector<int>> FilterRowGroups(compute::Expression predicate);
202
+ /// Simplify the predicate against the statistics of each row group.
203
+ Result<std::vector<compute::Expression>> TestRowGroups(compute::Expression predicate);
204
+ /// Try to count rows matching the predicate using metadata. Expects
205
+ /// metadata to be present, and expects the predicate to have been
206
+ /// simplified against the partition expression already.
207
+ Result<std::optional<int64_t>> TryCountRows(compute::Expression predicate);
208
+
209
+ ParquetFileFormat& parquet_format_;
210
+
211
+ /// Indices of row groups selected by this fragment,
212
+ /// or std::nullopt if all row groups are selected.
213
+ std::optional<std::vector<int>> row_groups_;
214
+
215
+ // the expressions (combined for all columns for which statistics have been
216
+ // processed) are stored per column group
217
+ std::vector<compute::Expression> statistics_expressions_;
218
+ // statistics status are kept track of by Parquet Schema column indices
219
+ // (i.e. not Arrow schema field index)
220
+ std::vector<bool> statistics_expressions_complete_;
221
+ std::shared_ptr<parquet::FileMetaData> metadata_;
222
+ std::shared_ptr<parquet::arrow::SchemaManifest> manifest_;
223
+ // The FileMetaData that owns the SchemaDescriptor pointed by SchemaManifest.
224
+ std::shared_ptr<parquet::FileMetaData> original_metadata_;
225
+
226
+ friend class ParquetFileFormat;
227
+ friend class ParquetDatasetFactory;
228
+ };
229
+
230
+ /// \brief Per-scan options for Parquet fragments
231
+ class ARROW_DS_EXPORT ParquetFragmentScanOptions : public FragmentScanOptions {
232
+ public:
233
+ ParquetFragmentScanOptions();
234
+ std::string type_name() const override { return kParquetTypeName; }
235
+
236
+ /// Reader properties. Not all properties are respected: memory_pool comes from
237
+ /// ScanOptions.
238
+ std::shared_ptr<parquet::ReaderProperties> reader_properties;
239
+ /// Arrow reader properties. Not all properties are respected: batch_size comes from
240
+ /// ScanOptions. Additionally, dictionary columns come from
241
+ /// ParquetFileFormat::ReaderOptions::dict_columns.
242
+ std::shared_ptr<parquet::ArrowReaderProperties> arrow_reader_properties;
243
+ /// A configuration structure that provides decryption properties for a dataset
244
+ std::shared_ptr<ParquetDecryptionConfig> parquet_decryption_config = NULLPTR;
245
+ };
246
+
247
+ class ARROW_DS_EXPORT ParquetFileWriteOptions : public FileWriteOptions {
248
+ public:
249
+ /// \brief Parquet writer properties.
250
+ std::shared_ptr<parquet::WriterProperties> writer_properties;
251
+
252
+ /// \brief Parquet Arrow writer properties.
253
+ std::shared_ptr<parquet::ArrowWriterProperties> arrow_writer_properties;
254
+
255
+ // A configuration structure that provides encryption properties for a dataset
256
+ std::shared_ptr<ParquetEncryptionConfig> parquet_encryption_config = NULLPTR;
257
+
258
+ protected:
259
+ explicit ParquetFileWriteOptions(std::shared_ptr<FileFormat> format)
260
+ : FileWriteOptions(std::move(format)) {}
261
+
262
+ friend class ParquetFileFormat;
263
+ };
264
+
265
+ class ARROW_DS_EXPORT ParquetFileWriter : public FileWriter {
266
+ public:
267
+ const std::shared_ptr<parquet::arrow::FileWriter>& parquet_writer() const {
268
+ return parquet_writer_;
269
+ }
270
+
271
+ Status Write(const std::shared_ptr<RecordBatch>& batch) override;
272
+
273
+ private:
274
+ ParquetFileWriter(std::shared_ptr<io::OutputStream> destination,
275
+ std::shared_ptr<parquet::arrow::FileWriter> writer,
276
+ std::shared_ptr<ParquetFileWriteOptions> options,
277
+ fs::FileLocator destination_locator);
278
+
279
+ Future<> FinishInternal() override;
280
+
281
+ std::shared_ptr<parquet::arrow::FileWriter> parquet_writer_;
282
+
283
+ friend class ParquetFileFormat;
284
+ };
285
+
286
+ /// \brief Options for making a FileSystemDataset from a Parquet _metadata file.
287
+ struct ParquetFactoryOptions {
288
+ /// Either an explicit Partitioning or a PartitioningFactory to discover one.
289
+ ///
290
+ /// If a factory is provided, it will be used to infer a schema for partition fields
291
+ /// based on file and directory paths then construct a Partitioning. The default
292
+ /// is a Partitioning which will yield no partition information.
293
+ ///
294
+ /// The (explicit or discovered) partitioning will be applied to discovered files
295
+ /// and the resulting partition information embedded in the Dataset.
296
+ PartitioningOrFactory partitioning{Partitioning::Default()};
297
+
298
+ /// For the purposes of applying the partitioning, paths will be stripped
299
+ /// of the partition_base_dir. Files not matching the partition_base_dir
300
+ /// prefix will be skipped for partition discovery. The ignored files will still
301
+ /// be part of the Dataset, but will not have partition information.
302
+ ///
303
+ /// Example:
304
+ /// partition_base_dir = "/dataset";
305
+ ///
306
+ /// - "/dataset/US/sales.csv" -> "US/sales.csv" will be given to the partitioning
307
+ ///
308
+ /// - "/home/john/late_sales.csv" -> Will be ignored for partition discovery.
309
+ ///
310
+ /// This is useful for partitioning which parses directory when ordering
311
+ /// is important, e.g. DirectoryPartitioning.
312
+ std::string partition_base_dir;
313
+
314
+ /// Assert that all ColumnChunk paths are consistent. The parquet spec allows for
315
+ /// ColumnChunk data to be stored in multiple files, but ParquetDatasetFactory
316
+ /// supports only a single file with all ColumnChunk data. If this flag is set
317
+ /// construction of a ParquetDatasetFactory will raise an error if ColumnChunk
318
+ /// data is not resident in a single file.
319
+ bool validate_column_chunk_paths = false;
320
+ };
321
+
322
+ /// \brief Create FileSystemDataset from custom `_metadata` cache file.
323
+ ///
324
+ /// Dask and other systems will generate a cache metadata file by concatenating
325
+ /// the RowGroupMetaData of multiple parquet files into a single parquet file
326
+ /// that only contains metadata and no ColumnChunk data.
327
+ ///
328
+ /// ParquetDatasetFactory creates a FileSystemDataset composed of
329
+ /// ParquetFileFragment where each fragment is pre-populated with the exact
330
+ /// number of row groups and statistics for each columns.
331
+ class ARROW_DS_EXPORT ParquetDatasetFactory : public DatasetFactory {
332
+ public:
333
+ /// \brief Create a ParquetDatasetFactory from a metadata path.
334
+ ///
335
+ /// The `metadata_path` will be read from `filesystem`. Each RowGroup
336
+ /// contained in the metadata file will be relative to `dirname(metadata_path)`.
337
+ ///
338
+ /// \param[in] metadata_path path of the metadata parquet file
339
+ /// \param[in] filesystem from which to open/read the path
340
+ /// \param[in] format to read the file with.
341
+ /// \param[in] options see ParquetFactoryOptions
342
+ static Result<std::shared_ptr<DatasetFactory>> Make(
343
+ const std::string& metadata_path, std::shared_ptr<fs::FileSystem> filesystem,
344
+ std::shared_ptr<ParquetFileFormat> format, ParquetFactoryOptions options);
345
+
346
+ /// \brief Create a ParquetDatasetFactory from a metadata source.
347
+ ///
348
+ /// Similar to the previous Make definition, but the metadata can be a Buffer
349
+ /// and the base_path is explicit instead of inferred from the metadata
350
+ /// path.
351
+ ///
352
+ /// \param[in] metadata source to open the metadata parquet file from
353
+ /// \param[in] base_path used as the prefix of every parquet files referenced
354
+ /// \param[in] filesystem from which to read the files referenced.
355
+ /// \param[in] format to read the file with.
356
+ /// \param[in] options see ParquetFactoryOptions
357
+ static Result<std::shared_ptr<DatasetFactory>> Make(
358
+ const FileSource& metadata, const std::string& base_path,
359
+ std::shared_ptr<fs::FileSystem> filesystem,
360
+ std::shared_ptr<ParquetFileFormat> format, ParquetFactoryOptions options);
361
+
362
+ Result<std::vector<std::shared_ptr<Schema>>> InspectSchemas(
363
+ InspectOptions options) override;
364
+
365
+ Result<std::shared_ptr<Dataset>> Finish(FinishOptions options) override;
366
+
367
+ protected:
368
+ ParquetDatasetFactory(
369
+ std::shared_ptr<fs::FileSystem> filesystem,
370
+ std::shared_ptr<ParquetFileFormat> format,
371
+ std::shared_ptr<parquet::FileMetaData> metadata,
372
+ std::shared_ptr<parquet::arrow::SchemaManifest> manifest,
373
+ std::shared_ptr<Schema> physical_schema, std::string base_path,
374
+ ParquetFactoryOptions options,
375
+ std::vector<std::pair<std::string, std::vector<int>>> paths_with_row_group_ids)
376
+ : filesystem_(std::move(filesystem)),
377
+ format_(std::move(format)),
378
+ metadata_(std::move(metadata)),
379
+ manifest_(std::move(manifest)),
380
+ physical_schema_(std::move(physical_schema)),
381
+ base_path_(std::move(base_path)),
382
+ options_(std::move(options)),
383
+ paths_with_row_group_ids_(std::move(paths_with_row_group_ids)) {}
384
+
385
+ std::shared_ptr<fs::FileSystem> filesystem_;
386
+ std::shared_ptr<ParquetFileFormat> format_;
387
+ std::shared_ptr<parquet::FileMetaData> metadata_;
388
+ std::shared_ptr<parquet::arrow::SchemaManifest> manifest_;
389
+ std::shared_ptr<Schema> physical_schema_;
390
+ std::string base_path_;
391
+ ParquetFactoryOptions options_;
392
+ std::vector<std::pair<std::string, std::vector<int>>> paths_with_row_group_ids_;
393
+
394
+ private:
395
+ Result<std::vector<std::shared_ptr<FileFragment>>> CollectParquetFragments(
396
+ const Partitioning& partitioning);
397
+
398
+ Result<std::shared_ptr<Schema>> PartitionSchema();
399
+ };
400
+
401
+ /// @}
402
+
403
+ } // namespace dataset
404
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/parquet_encryption_config.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/dataset/type_fwd.h"
21
+
22
+ namespace parquet::encryption {
23
+ class CryptoFactory;
24
+ struct KmsConnectionConfig;
25
+ struct EncryptionConfiguration;
26
+ struct DecryptionConfiguration;
27
+ } // namespace parquet::encryption
28
+
29
+ namespace arrow {
30
+ namespace dataset {
31
+
32
+ /// \brief Core configuration class encapsulating parameters for high-level encryption
33
+ /// within Parquet framework.
34
+ ///
35
+ /// ParquetEncryptionConfig serves as a bridge, passing encryption-related
36
+ /// parameters to appropriate components within the Parquet library. It holds references
37
+ /// to objects defining encryption strategy, Key Management Service (KMS) configuration,
38
+ /// and specific encryption configurations for Parquet data.
39
+ struct ARROW_DS_EXPORT ParquetEncryptionConfig {
40
+ /// Shared pointer to CryptoFactory object, responsible for creating cryptographic
41
+ /// components like encryptors and decryptors.
42
+ std::shared_ptr<parquet::encryption::CryptoFactory> crypto_factory;
43
+
44
+ /// Shared pointer to KmsConnectionConfig object, holding configuration parameters for
45
+ /// connecting to a Key Management Service (KMS).
46
+ std::shared_ptr<parquet::encryption::KmsConnectionConfig> kms_connection_config;
47
+
48
+ /// Shared pointer to EncryptionConfiguration object, defining specific encryption
49
+ /// settings for Parquet data, like keys for different columns.
50
+ std::shared_ptr<parquet::encryption::EncryptionConfiguration> encryption_config;
51
+ };
52
+
53
+ /// \brief Core configuration class encapsulating parameters for high-level decryption
54
+ /// within Parquet framework.
55
+ ///
56
+ /// ParquetDecryptionConfig is designed to pass decryption-related parameters to
57
+ /// appropriate decryption components within Parquet library. It holds references to
58
+ /// objects defining decryption strategy, Key Management Service (KMS) configuration,
59
+ /// and specific decryption configurations for reading encrypted Parquet data.
60
+ struct ARROW_DS_EXPORT ParquetDecryptionConfig {
61
+ /// Shared pointer to CryptoFactory object, pivotal in creating cryptographic
62
+ /// components for decryption process.
63
+ std::shared_ptr<parquet::encryption::CryptoFactory> crypto_factory;
64
+
65
+ /// Shared pointer to KmsConnectionConfig object, containing parameters for connecting
66
+ /// to a Key Management Service (KMS) during decryption.
67
+ std::shared_ptr<parquet::encryption::KmsConnectionConfig> kms_connection_config;
68
+
69
+ /// Shared pointer to DecryptionConfiguration object, specifying decryption settings
70
+ /// for reading encrypted Parquet data.
71
+ std::shared_ptr<parquet::encryption::DecryptionConfiguration> decryption_config;
72
+ };
73
+
74
+ } // namespace dataset
75
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/partition.h ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <functional>
23
+ #include <iosfwd>
24
+ #include <memory>
25
+ #include <optional>
26
+ #include <string>
27
+ #include <unordered_map>
28
+ #include <utility>
29
+ #include <vector>
30
+
31
+ #include "arrow/compute/expression.h"
32
+ #include "arrow/dataset/type_fwd.h"
33
+ #include "arrow/dataset/visibility.h"
34
+ #include "arrow/util/compare.h"
35
+
36
+ namespace arrow {
37
+
38
+ namespace dataset {
39
+
40
+ constexpr char kFilenamePartitionSep = '_';
41
+
42
+ struct ARROW_DS_EXPORT PartitionPathFormat {
43
+ std::string directory, filename;
44
+ };
45
+
46
+ // ----------------------------------------------------------------------
47
+ // Partitioning
48
+
49
+ /// \defgroup dataset-partitioning Partitioning API
50
+ ///
51
+ /// @{
52
+
53
+ /// \brief Interface for parsing partition expressions from string partition
54
+ /// identifiers.
55
+ ///
56
+ /// For example, the identifier "foo=5" might be parsed to an equality expression
57
+ /// between the "foo" field and the value 5.
58
+ ///
59
+ /// Some partitionings may store the field names in a metadata
60
+ /// store instead of in file paths, for example
61
+ /// dataset_root/2009/11/... could be used when the partition fields
62
+ /// are "year" and "month"
63
+ ///
64
+ /// Paths are consumed from left to right. Paths must be relative to
65
+ /// the root of a partition; path prefixes must be removed before passing
66
+ /// the path to a partitioning for parsing.
67
+ class ARROW_DS_EXPORT Partitioning : public util::EqualityComparable<Partitioning> {
68
+ public:
69
+ virtual ~Partitioning() = default;
70
+
71
+ /// \brief The name identifying the kind of partitioning
72
+ virtual std::string type_name() const = 0;
73
+
74
+ //// \brief Return whether the partitionings are equal
75
+ virtual bool Equals(const Partitioning& other) const {
76
+ return schema_->Equals(other.schema_, /*check_metadata=*/false);
77
+ }
78
+
79
+ /// \brief If the input batch shares any fields with this partitioning,
80
+ /// produce sub-batches which satisfy mutually exclusive Expressions.
81
+ struct PartitionedBatches {
82
+ RecordBatchVector batches;
83
+ std::vector<compute::Expression> expressions;
84
+ };
85
+ virtual Result<PartitionedBatches> Partition(
86
+ const std::shared_ptr<RecordBatch>& batch) const = 0;
87
+
88
+ /// \brief Parse a path into a partition expression
89
+ virtual Result<compute::Expression> Parse(const std::string& path) const = 0;
90
+
91
+ virtual Result<PartitionPathFormat> Format(const compute::Expression& expr) const = 0;
92
+
93
+ /// \brief A default Partitioning which is a DirectoryPartitioning
94
+ /// with an empty schema.
95
+ static std::shared_ptr<Partitioning> Default();
96
+
97
+ /// \brief The partition schema.
98
+ const std::shared_ptr<Schema>& schema() const { return schema_; }
99
+
100
+ protected:
101
+ explicit Partitioning(std::shared_ptr<Schema> schema) : schema_(std::move(schema)) {}
102
+
103
+ std::shared_ptr<Schema> schema_;
104
+ };
105
+
106
+ /// \brief The encoding of partition segments.
107
+ enum class SegmentEncoding : int8_t {
108
+ /// No encoding.
109
+ None = 0,
110
+ /// Segment values are URL-encoded.
111
+ Uri = 1,
112
+ };
113
+
114
+ ARROW_DS_EXPORT
115
+ std::ostream& operator<<(std::ostream& os, SegmentEncoding segment_encoding);
116
+
117
+ /// \brief Options for key-value based partitioning (hive/directory).
118
+ struct ARROW_DS_EXPORT KeyValuePartitioningOptions {
119
+ /// After splitting a path into components, decode the path components
120
+ /// before parsing according to this scheme.
121
+ SegmentEncoding segment_encoding = SegmentEncoding::Uri;
122
+ };
123
+
124
+ /// \brief Options for inferring a partitioning.
125
+ struct ARROW_DS_EXPORT PartitioningFactoryOptions {
126
+ /// When inferring a schema for partition fields, yield dictionary encoded types
127
+ /// instead of plain. This can be more efficient when materializing virtual
128
+ /// columns, and Expressions parsed by the finished Partitioning will include
129
+ /// dictionaries of all unique inspected values for each field.
130
+ bool infer_dictionary = false;
131
+ /// Optionally, an expected schema can be provided, in which case inference
132
+ /// will only check discovered fields against the schema and update internal
133
+ /// state (such as dictionaries).
134
+ std::shared_ptr<Schema> schema;
135
+ /// After splitting a path into components, decode the path components
136
+ /// before parsing according to this scheme.
137
+ SegmentEncoding segment_encoding = SegmentEncoding::Uri;
138
+
139
+ KeyValuePartitioningOptions AsPartitioningOptions() const;
140
+ };
141
+
142
+ /// \brief Options for inferring a hive-style partitioning.
143
+ struct ARROW_DS_EXPORT HivePartitioningFactoryOptions : PartitioningFactoryOptions {
144
+ /// The hive partitioning scheme maps null to a hard coded fallback string.
145
+ std::string null_fallback;
146
+
147
+ HivePartitioningOptions AsHivePartitioningOptions() const;
148
+ };
149
+
150
+ /// \brief PartitioningFactory provides creation of a partitioning when the
151
+ /// specific schema must be inferred from available paths (no explicit schema is known).
152
+ class ARROW_DS_EXPORT PartitioningFactory {
153
+ public:
154
+ virtual ~PartitioningFactory() = default;
155
+
156
+ /// \brief The name identifying the kind of partitioning
157
+ virtual std::string type_name() const = 0;
158
+
159
+ /// Get the schema for the resulting Partitioning.
160
+ /// This may reset internal state, for example dictionaries of unique representations.
161
+ virtual Result<std::shared_ptr<Schema>> Inspect(
162
+ const std::vector<std::string>& paths) = 0;
163
+
164
+ /// Create a partitioning using the provided schema
165
+ /// (fields may be dropped).
166
+ virtual Result<std::shared_ptr<Partitioning>> Finish(
167
+ const std::shared_ptr<Schema>& schema) const = 0;
168
+ };
169
+
170
+ /// \brief Subclass for the common case of a partitioning which yields an equality
171
+ /// expression for each segment
172
+ class ARROW_DS_EXPORT KeyValuePartitioning : public Partitioning {
173
+ public:
174
+ /// An unconverted equality expression consisting of a field name and the representation
175
+ /// of a scalar value
176
+ struct Key {
177
+ std::string name;
178
+ std::optional<std::string> value;
179
+ };
180
+
181
+ Result<PartitionedBatches> Partition(
182
+ const std::shared_ptr<RecordBatch>& batch) const override;
183
+
184
+ Result<compute::Expression> Parse(const std::string& path) const override;
185
+
186
+ Result<PartitionPathFormat> Format(const compute::Expression& expr) const override;
187
+
188
+ const ArrayVector& dictionaries() const { return dictionaries_; }
189
+
190
+ SegmentEncoding segment_encoding() const { return options_.segment_encoding; }
191
+
192
+ bool Equals(const Partitioning& other) const override;
193
+
194
+ protected:
195
+ KeyValuePartitioning(std::shared_ptr<Schema> schema, ArrayVector dictionaries,
196
+ KeyValuePartitioningOptions options)
197
+ : Partitioning(std::move(schema)),
198
+ dictionaries_(std::move(dictionaries)),
199
+ options_(options) {
200
+ if (dictionaries_.empty()) {
201
+ dictionaries_.resize(schema_->num_fields());
202
+ }
203
+ }
204
+
205
+ virtual Result<std::vector<Key>> ParseKeys(const std::string& path) const = 0;
206
+
207
+ virtual Result<PartitionPathFormat> FormatValues(const ScalarVector& values) const = 0;
208
+
209
+ /// Convert a Key to a full expression.
210
+ Result<compute::Expression> ConvertKey(const Key& key) const;
211
+
212
+ Result<std::vector<std::string>> FormatPartitionSegments(
213
+ const ScalarVector& values) const;
214
+ Result<std::vector<Key>> ParsePartitionSegments(
215
+ const std::vector<std::string>& segments) const;
216
+
217
+ ArrayVector dictionaries_;
218
+ KeyValuePartitioningOptions options_;
219
+ };
220
+
221
+ /// \brief DirectoryPartitioning parses one segment of a path for each field in its
222
+ /// schema. All fields are required, so paths passed to DirectoryPartitioning::Parse
223
+ /// must contain segments for each field.
224
+ ///
225
+ /// For example given schema<year:int16, month:int8> the path "/2009/11" would be
226
+ /// parsed to ("year"_ == 2009 and "month"_ == 11)
227
+ class ARROW_DS_EXPORT DirectoryPartitioning : public KeyValuePartitioning {
228
+ public:
229
+ /// If a field in schema is of dictionary type, the corresponding element of
230
+ /// dictionaries must be contain the dictionary of values for that field.
231
+ explicit DirectoryPartitioning(std::shared_ptr<Schema> schema,
232
+ ArrayVector dictionaries = {},
233
+ KeyValuePartitioningOptions options = {});
234
+
235
+ std::string type_name() const override { return "directory"; }
236
+
237
+ bool Equals(const Partitioning& other) const override;
238
+
239
+ /// \brief Create a factory for a directory partitioning.
240
+ ///
241
+ /// \param[in] field_names The names for the partition fields. Types will be
242
+ /// inferred.
243
+ static std::shared_ptr<PartitioningFactory> MakeFactory(
244
+ std::vector<std::string> field_names, PartitioningFactoryOptions = {});
245
+
246
+ private:
247
+ Result<std::vector<Key>> ParseKeys(const std::string& path) const override;
248
+
249
+ Result<PartitionPathFormat> FormatValues(const ScalarVector& values) const override;
250
+ };
251
+
252
+ /// \brief The default fallback used for null values in a Hive-style partitioning.
253
+ static constexpr char kDefaultHiveNullFallback[] = "__HIVE_DEFAULT_PARTITION__";
254
+
255
+ struct ARROW_DS_EXPORT HivePartitioningOptions : public KeyValuePartitioningOptions {
256
+ std::string null_fallback = kDefaultHiveNullFallback;
257
+
258
+ static HivePartitioningOptions DefaultsWithNullFallback(std::string fallback) {
259
+ HivePartitioningOptions options;
260
+ options.null_fallback = std::move(fallback);
261
+ return options;
262
+ }
263
+ };
264
+
265
+ /// \brief Multi-level, directory based partitioning
266
+ /// originating from Apache Hive with all data files stored in the
267
+ /// leaf directories. Data is partitioned by static values of a
268
+ /// particular column in the schema. Partition keys are represented in
269
+ /// the form $key=$value in directory names.
270
+ /// Field order is ignored, as are missing or unrecognized field names.
271
+ ///
272
+ /// For example given schema<year:int16, month:int8, day:int8> the path
273
+ /// "/day=321/ignored=3.4/year=2009" parses to ("year"_ == 2009 and "day"_ == 321)
274
+ class ARROW_DS_EXPORT HivePartitioning : public KeyValuePartitioning {
275
+ public:
276
+ /// If a field in schema is of dictionary type, the corresponding element of
277
+ /// dictionaries must be contain the dictionary of values for that field.
278
+ explicit HivePartitioning(std::shared_ptr<Schema> schema, ArrayVector dictionaries = {},
279
+ std::string null_fallback = kDefaultHiveNullFallback)
280
+ : KeyValuePartitioning(std::move(schema), std::move(dictionaries),
281
+ KeyValuePartitioningOptions()),
282
+ hive_options_(
283
+ HivePartitioningOptions::DefaultsWithNullFallback(std::move(null_fallback))) {
284
+ }
285
+
286
+ explicit HivePartitioning(std::shared_ptr<Schema> schema, ArrayVector dictionaries,
287
+ HivePartitioningOptions options)
288
+ : KeyValuePartitioning(std::move(schema), std::move(dictionaries), options),
289
+ hive_options_(options) {}
290
+
291
+ std::string type_name() const override { return "hive"; }
292
+ std::string null_fallback() const { return hive_options_.null_fallback; }
293
+ const HivePartitioningOptions& options() const { return hive_options_; }
294
+
295
+ static Result<std::optional<Key>> ParseKey(const std::string& segment,
296
+ const HivePartitioningOptions& options);
297
+
298
+ bool Equals(const Partitioning& other) const override;
299
+
300
+ /// \brief Create a factory for a hive partitioning.
301
+ static std::shared_ptr<PartitioningFactory> MakeFactory(
302
+ HivePartitioningFactoryOptions = {});
303
+
304
+ private:
305
+ const HivePartitioningOptions hive_options_;
306
+ Result<std::vector<Key>> ParseKeys(const std::string& path) const override;
307
+
308
+ Result<PartitionPathFormat> FormatValues(const ScalarVector& values) const override;
309
+ };
310
+
311
+ /// \brief Implementation provided by lambda or other callable
312
+ class ARROW_DS_EXPORT FunctionPartitioning : public Partitioning {
313
+ public:
314
+ using ParseImpl = std::function<Result<compute::Expression>(const std::string&)>;
315
+
316
+ using FormatImpl =
317
+ std::function<Result<PartitionPathFormat>(const compute::Expression&)>;
318
+
319
+ FunctionPartitioning(std::shared_ptr<Schema> schema, ParseImpl parse_impl,
320
+ FormatImpl format_impl = NULLPTR, std::string name = "function")
321
+ : Partitioning(std::move(schema)),
322
+ parse_impl_(std::move(parse_impl)),
323
+ format_impl_(std::move(format_impl)),
324
+ name_(std::move(name)) {}
325
+
326
+ std::string type_name() const override { return name_; }
327
+
328
+ bool Equals(const Partitioning& other) const override { return false; }
329
+
330
+ Result<compute::Expression> Parse(const std::string& path) const override {
331
+ return parse_impl_(path);
332
+ }
333
+
334
+ Result<PartitionPathFormat> Format(const compute::Expression& expr) const override {
335
+ if (format_impl_) {
336
+ return format_impl_(expr);
337
+ }
338
+ return Status::NotImplemented("formatting paths from ", type_name(), " Partitioning");
339
+ }
340
+
341
+ Result<PartitionedBatches> Partition(
342
+ const std::shared_ptr<RecordBatch>& batch) const override {
343
+ return Status::NotImplemented("partitioning batches from ", type_name(),
344
+ " Partitioning");
345
+ }
346
+
347
+ private:
348
+ ParseImpl parse_impl_;
349
+ FormatImpl format_impl_;
350
+ std::string name_;
351
+ };
352
+
353
+ class ARROW_DS_EXPORT FilenamePartitioning : public KeyValuePartitioning {
354
+ public:
355
+ /// \brief Construct a FilenamePartitioning from its components.
356
+ ///
357
+ /// If a field in schema is of dictionary type, the corresponding element of
358
+ /// dictionaries must be contain the dictionary of values for that field.
359
+ explicit FilenamePartitioning(std::shared_ptr<Schema> schema,
360
+ ArrayVector dictionaries = {},
361
+ KeyValuePartitioningOptions options = {});
362
+
363
+ std::string type_name() const override { return "filename"; }
364
+
365
+ /// \brief Create a factory for a filename partitioning.
366
+ ///
367
+ /// \param[in] field_names The names for the partition fields. Types will be
368
+ /// inferred.
369
+ static std::shared_ptr<PartitioningFactory> MakeFactory(
370
+ std::vector<std::string> field_names, PartitioningFactoryOptions = {});
371
+
372
+ bool Equals(const Partitioning& other) const override;
373
+
374
+ private:
375
+ Result<std::vector<Key>> ParseKeys(const std::string& path) const override;
376
+
377
+ Result<PartitionPathFormat> FormatValues(const ScalarVector& values) const override;
378
+ };
379
+
380
+ ARROW_DS_EXPORT std::string StripPrefix(const std::string& path,
381
+ const std::string& prefix);
382
+
383
+ /// \brief Extracts the directory and filename and removes the prefix of a path
384
+ ///
385
+ /// e.g., `StripPrefixAndFilename("/data/year=2019/c.txt", "/data") ->
386
+ /// {"year=2019","c.txt"}`
387
+ ARROW_DS_EXPORT std::string StripPrefixAndFilename(const std::string& path,
388
+ const std::string& prefix);
389
+
390
+ /// \brief Vector version of StripPrefixAndFilename.
391
+ ARROW_DS_EXPORT std::vector<std::string> StripPrefixAndFilename(
392
+ const std::vector<std::string>& paths, const std::string& prefix);
393
+
394
+ /// \brief Vector version of StripPrefixAndFilename.
395
+ ARROW_DS_EXPORT std::vector<std::string> StripPrefixAndFilename(
396
+ const std::vector<fs::FileInfo>& files, const std::string& prefix);
397
+
398
+ /// \brief Either a Partitioning or a PartitioningFactory
399
+ class ARROW_DS_EXPORT PartitioningOrFactory {
400
+ public:
401
+ explicit PartitioningOrFactory(std::shared_ptr<Partitioning> partitioning)
402
+ : partitioning_(std::move(partitioning)) {}
403
+
404
+ explicit PartitioningOrFactory(std::shared_ptr<PartitioningFactory> factory)
405
+ : factory_(std::move(factory)) {}
406
+
407
+ PartitioningOrFactory& operator=(std::shared_ptr<Partitioning> partitioning) {
408
+ return *this = PartitioningOrFactory(std::move(partitioning));
409
+ }
410
+
411
+ PartitioningOrFactory& operator=(std::shared_ptr<PartitioningFactory> factory) {
412
+ return *this = PartitioningOrFactory(std::move(factory));
413
+ }
414
+
415
+ /// \brief The partitioning (if given).
416
+ const std::shared_ptr<Partitioning>& partitioning() const { return partitioning_; }
417
+
418
+ /// \brief The partition factory (if given).
419
+ const std::shared_ptr<PartitioningFactory>& factory() const { return factory_; }
420
+
421
+ /// \brief Get the partition schema, inferring it with the given factory if needed.
422
+ Result<std::shared_ptr<Schema>> GetOrInferSchema(const std::vector<std::string>& paths);
423
+
424
+ private:
425
+ std::shared_ptr<PartitioningFactory> factory_;
426
+ std::shared_ptr<Partitioning> partitioning_;
427
+ };
428
+
429
+ /// @}
430
+
431
+ } // namespace dataset
432
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/pch.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Often-used headers, for precompiling.
19
+ // If updating this header, please make sure you check compilation speed
20
+ // before checking in. Adding headers which are not used extremely often
21
+ // may incur a slowdown, since it makes the precompiled header heavier to load.
22
+
23
+ // This API is EXPERIMENTAL.
24
+
25
+ #include "arrow/dataset/dataset.h"
26
+ #include "arrow/dataset/scanner.h"
27
+ #include "arrow/pch.h"
venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/plan.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #include "arrow/dataset/visibility.h"
21
+
22
+ namespace arrow {
23
+ namespace dataset {
24
+ namespace internal {
25
+
26
+ /// Register dataset-based exec nodes with the exec node registry
27
+ ///
28
+ /// This function must be called before using dataset ExecNode factories
29
+ ARROW_DS_EXPORT void Initialize();
30
+
31
+ } // namespace internal
32
+ } // namespace dataset
33
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/projector.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include "arrow/dataset/visibility.h"
23
+ #include "arrow/type_fwd.h"
24
+
25
+ namespace arrow {
26
+ namespace dataset {
27
+
28
+ // FIXME this is superceded by compute::Expression::Bind
29
+ ARROW_DS_EXPORT Status CheckProjectable(const Schema& from, const Schema& to);
30
+
31
+ } // namespace dataset
32
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/scanner.h ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <string>
25
+ #include <utility>
26
+ #include <vector>
27
+
28
+ #include "arrow/acero/options.h"
29
+ #include "arrow/compute/expression.h"
30
+ #include "arrow/compute/type_fwd.h"
31
+ #include "arrow/dataset/dataset.h"
32
+ #include "arrow/dataset/projector.h"
33
+ #include "arrow/dataset/type_fwd.h"
34
+ #include "arrow/dataset/visibility.h"
35
+ #include "arrow/io/interfaces.h"
36
+ #include "arrow/memory_pool.h"
37
+ #include "arrow/type_fwd.h"
38
+ #include "arrow/util/async_generator.h"
39
+ #include "arrow/util/iterator.h"
40
+ #include "arrow/util/thread_pool.h"
41
+ #include "arrow/util/type_fwd.h"
42
+
43
+ namespace arrow {
44
+
45
+ using RecordBatchGenerator = std::function<Future<std::shared_ptr<RecordBatch>>()>;
46
+
47
+ namespace dataset {
48
+
49
+ /// \defgroup dataset-scanning Scanning API
50
+ ///
51
+ /// @{
52
+
53
+ constexpr int64_t kDefaultBatchSize = 1 << 17; // 128Ki rows
54
+ // This will yield 64 batches ~ 8Mi rows
55
+ constexpr int32_t kDefaultBatchReadahead = 16;
56
+ constexpr int32_t kDefaultFragmentReadahead = 4;
57
+ constexpr int32_t kDefaultBytesReadahead = 1 << 25; // 32MiB
58
+
59
+ /// Scan-specific options, which can be changed between scans of the same dataset.
60
+ struct ARROW_DS_EXPORT ScanOptions {
61
+ /// A row filter (which will be pushed down to partitioning/reading if supported).
62
+ compute::Expression filter = compute::literal(true);
63
+ /// A projection expression (which can add/remove/rename columns).
64
+ compute::Expression projection;
65
+
66
+ /// Schema with which batches will be read from fragments. This is also known as the
67
+ /// "reader schema" it will be used (for example) in constructing CSV file readers to
68
+ /// identify column types for parsing. Usually only a subset of its fields (see
69
+ /// MaterializedFields) will be materialized during a scan.
70
+ std::shared_ptr<Schema> dataset_schema;
71
+
72
+ /// Schema of projected record batches. This is independent of dataset_schema as its
73
+ /// fields are derived from the projection. For example, let
74
+ ///
75
+ /// dataset_schema = {"a": int32, "b": int32, "id": utf8}
76
+ /// projection = project({equal(field_ref("a"), field_ref("b"))}, {"a_plus_b"})
77
+ ///
78
+ /// (no filter specified). In this case, the projected_schema would be
79
+ ///
80
+ /// {"a_plus_b": int32}
81
+ std::shared_ptr<Schema> projected_schema;
82
+
83
+ /// Maximum row count for scanned batches.
84
+ int64_t batch_size = kDefaultBatchSize;
85
+
86
+ /// How many batches to read ahead within a fragment.
87
+ ///
88
+ /// Set to 0 to disable batch readahead
89
+ ///
90
+ /// Note: May not be supported by all formats
91
+ /// Note: Will be ignored if use_threads is set to false
92
+ int32_t batch_readahead = kDefaultBatchReadahead;
93
+
94
+ /// How many files to read ahead
95
+ ///
96
+ /// Set to 0 to disable fragment readahead
97
+ ///
98
+ /// Note: May not be enforced by all scanners
99
+ /// Note: Will be ignored if use_threads is set to false
100
+ int32_t fragment_readahead = kDefaultFragmentReadahead;
101
+
102
+ /// A pool from which materialized and scanned arrays will be allocated.
103
+ MemoryPool* pool = arrow::default_memory_pool();
104
+
105
+ /// IOContext for any IO tasks
106
+ ///
107
+ /// Note: The IOContext executor will be ignored if use_threads is set to false
108
+ io::IOContext io_context;
109
+
110
+ /// If true the scanner will scan in parallel
111
+ ///
112
+ /// Note: If true, this will use threads from both the cpu_executor and the
113
+ /// io_context.executor
114
+ /// Note: This must be true in order for any readahead to happen
115
+ bool use_threads = false;
116
+
117
+ /// Fragment-specific scan options.
118
+ std::shared_ptr<FragmentScanOptions> fragment_scan_options;
119
+
120
+ /// Return a vector of FieldRefs that require materialization.
121
+ ///
122
+ /// This is usually the union of the fields referenced in the projection and the
123
+ /// filter expression. Examples:
124
+ ///
125
+ /// - `SELECT a, b WHERE a < 2 && c > 1` => ["a", "b", "a", "c"]
126
+ /// - `SELECT a + b < 3 WHERE a > 1` => ["a", "b", "a"]
127
+ ///
128
+ /// This is needed for expression where a field may not be directly
129
+ /// used in the final projection but is still required to evaluate the
130
+ /// expression.
131
+ ///
132
+ /// This is used by Fragment implementations to apply the column
133
+ /// sub-selection optimization.
134
+ std::vector<FieldRef> MaterializedFields() const;
135
+
136
+ /// Parameters which control when the plan should pause for a slow consumer
137
+ acero::BackpressureOptions backpressure =
138
+ acero::BackpressureOptions::DefaultBackpressure();
139
+ };
140
+
141
+ /// Scan-specific options, which can be changed between scans of the same dataset.
142
+ ///
143
+ /// A dataset consists of one or more individual fragments. A fragment is anything
144
+ /// that is independently scannable, often a file.
145
+ ///
146
+ /// Batches from all fragments will be converted to a single schema. This unified
147
+ /// schema is referred to as the "dataset schema" and is the output schema for
148
+ /// this node.
149
+ ///
150
+ /// Individual fragments may have schemas that are different from the dataset
151
+ /// schema. This is sometimes referred to as the physical or fragment schema.
152
+ /// Conversion from the fragment schema to the dataset schema is a process
153
+ /// known as evolution.
154
+ struct ARROW_DS_EXPORT ScanV2Options : public acero::ExecNodeOptions {
155
+ explicit ScanV2Options(std::shared_ptr<Dataset> dataset)
156
+ : dataset(std::move(dataset)) {}
157
+
158
+ /// \brief The dataset to scan
159
+ std::shared_ptr<Dataset> dataset;
160
+ /// \brief A row filter
161
+ ///
162
+ /// The filter expression should be written against the dataset schema.
163
+ /// The filter must be unbound.
164
+ ///
165
+ /// This is an opportunistic pushdown filter. Filtering capabilities will
166
+ /// vary between formats. If a format is not capable of applying the filter
167
+ /// then it will ignore it.
168
+ ///
169
+ /// Each fragment will do its best to filter the data based on the information
170
+ /// (partitioning guarantees, statistics) available to it. If it is able to
171
+ /// apply some filtering then it will indicate what filtering it was able to
172
+ /// apply by attaching a guarantee to the batch.
173
+ ///
174
+ /// For example, if a filter is x < 50 && y > 40 then a batch may be able to
175
+ /// apply a guarantee x < 50. Post-scan filtering would then only need to
176
+ /// consider y > 40 (for this specific batch). The next batch may not be able
177
+ /// to attach any guarantee and both clauses would need to be applied to that batch.
178
+ ///
179
+ /// A single guarantee-aware filtering operation should generally be applied to all
180
+ /// resulting batches. The scan node is not responsible for this.
181
+ ///
182
+ /// Fields that are referenced by the filter should be included in the `columns` vector.
183
+ /// The scan node will not automatically fetch fields referenced by the filter
184
+ /// expression. \see AddFieldsNeededForFilter
185
+ ///
186
+ /// If the filter references fields that are not included in `columns` this may or may
187
+ /// not be an error, depending on the format.
188
+ compute::Expression filter = compute::literal(true);
189
+
190
+ /// \brief The columns to scan
191
+ ///
192
+ /// This is not a simple list of top-level column indices but instead a set of paths
193
+ /// allowing for partial selection of columns
194
+ ///
195
+ /// These paths refer to the dataset schema
196
+ ///
197
+ /// For example, consider the following dataset schema:
198
+ /// schema({
199
+ /// field("score", int32()),
200
+ /// "marker", struct_({
201
+ /// field("color", utf8()),
202
+ /// field("location", struct_({
203
+ /// field("x", float64()),
204
+ /// field("y", float64())
205
+ /// })
206
+ /// })
207
+ /// })
208
+ ///
209
+ /// If `columns` is {{0}, {1,1,0}} then the output schema is:
210
+ /// schema({field("score", int32()), field("x", float64())})
211
+ ///
212
+ /// If `columns` is {{1,1,1}, {1,1}} then the output schema is:
213
+ /// schema({
214
+ /// field("y", float64()),
215
+ /// field("location", struct_({
216
+ /// field("x", float64()),
217
+ /// field("y", float64())
218
+ /// })
219
+ /// })
220
+ std::vector<FieldPath> columns;
221
+
222
+ /// \brief Target number of bytes to read ahead in a fragment
223
+ ///
224
+ /// This limit involves some amount of estimation. Formats typically only know
225
+ /// batch boundaries in terms of rows (not decoded bytes) and so an estimation
226
+ /// must be done to guess the average row size. Other formats like CSV and JSON
227
+ /// must make even more generalized guesses.
228
+ ///
229
+ /// This is a best-effort guide. Some formats may need to read ahead further,
230
+ /// for example, if scanning a parquet file that has batches with 100MiB of data
231
+ /// then the actual readahead will be at least 100MiB
232
+ ///
233
+ /// Set to 0 to disable readahead. When disabled, the scanner will read the
234
+ /// dataset one batch at a time
235
+ ///
236
+ /// This limit applies across all fragments. If the limit is 32MiB and the
237
+ /// fragment readahead allows for 20 fragments to be read at once then the
238
+ /// total readahead will still be 32MiB and NOT 20 * 32MiB.
239
+ int32_t target_bytes_readahead = kDefaultBytesReadahead;
240
+
241
+ /// \brief Number of fragments to read ahead
242
+ ///
243
+ /// Higher readahead will potentially lead to more efficient I/O but will lead
244
+ /// to the scan operation using more RAM. The default is fairly conservative
245
+ /// and designed for fast local disks (or slow local spinning disks which cannot
246
+ /// handle much parallelism anyways). When using a highly parallel remote filesystem
247
+ /// you will likely want to increase these values.
248
+ ///
249
+ /// Set to 0 to disable fragment readahead. When disabled the dataset will be scanned
250
+ /// one fragment at a time.
251
+ int32_t fragment_readahead = kDefaultFragmentReadahead;
252
+ /// \brief Options specific to the file format
253
+ const FragmentScanOptions* format_options = NULLPTR;
254
+
255
+ /// \brief Utility method to get a selection representing all columns in a dataset
256
+ static std::vector<FieldPath> AllColumns(const Schema& dataset_schema);
257
+
258
+ /// \brief Utility method to add fields needed for the current filter
259
+ ///
260
+ /// This method adds any fields that are needed by `filter` which are not already
261
+ /// included in the list of columns. Any new fields added will be added to the end
262
+ /// in no particular order.
263
+ static Status AddFieldsNeededForFilter(ScanV2Options* options);
264
+ };
265
+
266
+ /// \brief Describes a projection
267
+ struct ARROW_DS_EXPORT ProjectionDescr {
268
+ /// \brief The projection expression itself
269
+ /// This expression must be a call to make_struct
270
+ compute::Expression expression;
271
+ /// \brief The output schema of the projection.
272
+
273
+ /// This can be calculated from the input schema and the expression but it
274
+ /// is cached here for convenience.
275
+ std::shared_ptr<Schema> schema;
276
+
277
+ /// \brief Create a ProjectionDescr by binding an expression to the dataset schema
278
+ ///
279
+ /// expression must return a struct type
280
+ static Result<ProjectionDescr> FromStructExpression(
281
+ const compute::Expression& expression, const Schema& dataset_schema);
282
+
283
+ /// \brief Create a ProjectionDescr from expressions/names for each field
284
+ static Result<ProjectionDescr> FromExpressions(std::vector<compute::Expression> exprs,
285
+ std::vector<std::string> names,
286
+ const Schema& dataset_schema);
287
+
288
+ /// \brief Create a default projection referencing fields in the dataset schema
289
+ static Result<ProjectionDescr> FromNames(std::vector<std::string> names,
290
+ const Schema& dataset_schema);
291
+
292
+ /// \brief Make a projection that projects every field in the dataset schema
293
+ static Result<ProjectionDescr> Default(const Schema& dataset_schema);
294
+ };
295
+
296
+ /// \brief Utility method to set the projection expression and schema
297
+ ARROW_DS_EXPORT void SetProjection(ScanOptions* options, ProjectionDescr projection);
298
+
299
+ /// \brief Combines a record batch with the fragment that the record batch originated
300
+ /// from
301
+ ///
302
+ /// Knowing the source fragment can be useful for debugging & understanding loaded
303
+ /// data
304
+ struct TaggedRecordBatch {
305
+ std::shared_ptr<RecordBatch> record_batch;
306
+ std::shared_ptr<Fragment> fragment;
307
+ };
308
+ using TaggedRecordBatchGenerator = std::function<Future<TaggedRecordBatch>()>;
309
+ using TaggedRecordBatchIterator = Iterator<TaggedRecordBatch>;
310
+
311
+ /// \brief Combines a tagged batch with positional information
312
+ ///
313
+ /// This is returned when scanning batches in an unordered fashion. This information is
314
+ /// needed if you ever want to reassemble the batches in order
315
+ struct EnumeratedRecordBatch {
316
+ Enumerated<std::shared_ptr<RecordBatch>> record_batch;
317
+ Enumerated<std::shared_ptr<Fragment>> fragment;
318
+ };
319
+ using EnumeratedRecordBatchGenerator = std::function<Future<EnumeratedRecordBatch>()>;
320
+ using EnumeratedRecordBatchIterator = Iterator<EnumeratedRecordBatch>;
321
+
322
+ /// @}
323
+
324
+ } // namespace dataset
325
+
326
+ template <>
327
+ struct IterationTraits<dataset::TaggedRecordBatch> {
328
+ static dataset::TaggedRecordBatch End() {
329
+ return dataset::TaggedRecordBatch{NULLPTR, NULLPTR};
330
+ }
331
+ static bool IsEnd(const dataset::TaggedRecordBatch& val) {
332
+ return val.record_batch == NULLPTR;
333
+ }
334
+ };
335
+
336
+ template <>
337
+ struct IterationTraits<dataset::EnumeratedRecordBatch> {
338
+ static dataset::EnumeratedRecordBatch End() {
339
+ return dataset::EnumeratedRecordBatch{
340
+ IterationEnd<Enumerated<std::shared_ptr<RecordBatch>>>(),
341
+ IterationEnd<Enumerated<std::shared_ptr<dataset::Fragment>>>()};
342
+ }
343
+ static bool IsEnd(const dataset::EnumeratedRecordBatch& val) {
344
+ return IsIterationEnd(val.fragment);
345
+ }
346
+ };
347
+
348
+ namespace dataset {
349
+
350
+ /// \defgroup dataset-scanning Scanning API
351
+ ///
352
+ /// @{
353
+
354
+ /// \brief A scanner glues together several dataset classes to load in data.
355
+ /// The dataset contains a collection of fragments and partitioning rules.
356
+ ///
357
+ /// The fragments identify independently loadable units of data (i.e. each fragment has
358
+ /// a potentially unique schema and possibly even format. It should be possible to read
359
+ /// fragments in parallel if desired).
360
+ ///
361
+ /// The fragment's format contains the logic necessary to actually create a task to load
362
+ /// the fragment into memory. That task may or may not support parallel execution of
363
+ /// its own.
364
+ ///
365
+ /// The scanner is then responsible for creating scan tasks from every fragment in the
366
+ /// dataset and (potentially) sequencing the loaded record batches together.
367
+ ///
368
+ /// The scanner should not buffer the entire dataset in memory (unless asked) instead
369
+ /// yielding record batches as soon as they are ready to scan. Various readahead
370
+ /// properties control how much data is allowed to be scanned before pausing to let a
371
+ /// slow consumer catchup.
372
+ ///
373
+ /// Today the scanner also handles projection & filtering although that may change in
374
+ /// the future.
375
+ class ARROW_DS_EXPORT Scanner {
376
+ public:
377
+ virtual ~Scanner() = default;
378
+
379
+ /// \brief Apply a visitor to each RecordBatch as it is scanned. If multiple threads
380
+ /// are used (via use_threads), the visitor will be invoked from those threads and is
381
+ /// responsible for any synchronization.
382
+ virtual Status Scan(std::function<Status(TaggedRecordBatch)> visitor) = 0;
383
+ /// \brief Convert a Scanner into a Table.
384
+ ///
385
+ /// Use this convenience utility with care. This will serially materialize the
386
+ /// Scan result in memory before creating the Table.
387
+ virtual Result<std::shared_ptr<Table>> ToTable() = 0;
388
+ /// \brief Scan the dataset into a stream of record batches. Each batch is tagged
389
+ /// with the fragment it originated from. The batches will arrive in order. The
390
+ /// order of fragments is determined by the dataset.
391
+ ///
392
+ /// Note: The scanner will perform some readahead but will avoid materializing too
393
+ /// much in memory (this is goverended by the readahead options and use_threads option).
394
+ /// If the readahead queue fills up then I/O will pause until the calling thread catches
395
+ /// up.
396
+ virtual Result<TaggedRecordBatchIterator> ScanBatches() = 0;
397
+ virtual Result<TaggedRecordBatchGenerator> ScanBatchesAsync() = 0;
398
+ virtual Result<TaggedRecordBatchGenerator> ScanBatchesAsync(
399
+ ::arrow::internal::Executor* cpu_thread_pool) = 0;
400
+ /// \brief Scan the dataset into a stream of record batches. Unlike ScanBatches this
401
+ /// method may allow record batches to be returned out of order. This allows for more
402
+ /// efficient scanning: some fragments may be accessed more quickly than others (e.g.
403
+ /// may be cached in RAM or just happen to get scheduled earlier by the I/O)
404
+ ///
405
+ /// To make up for the out-of-order iteration each batch is further tagged with
406
+ /// positional information.
407
+ virtual Result<EnumeratedRecordBatchIterator> ScanBatchesUnordered() = 0;
408
+ virtual Result<EnumeratedRecordBatchGenerator> ScanBatchesUnorderedAsync() = 0;
409
+ virtual Result<EnumeratedRecordBatchGenerator> ScanBatchesUnorderedAsync(
410
+ ::arrow::internal::Executor* cpu_thread_pool) = 0;
411
+ /// \brief A convenience to synchronously load the given rows by index.
412
+ ///
413
+ /// Will only consume as many batches as needed from ScanBatches().
414
+ virtual Result<std::shared_ptr<Table>> TakeRows(const Array& indices) = 0;
415
+ /// \brief Get the first N rows.
416
+ virtual Result<std::shared_ptr<Table>> Head(int64_t num_rows) = 0;
417
+ /// \brief Count rows matching a predicate.
418
+ ///
419
+ /// This method will push down the predicate and compute the result based on fragment
420
+ /// metadata if possible.
421
+ virtual Result<int64_t> CountRows() = 0;
422
+ virtual Future<int64_t> CountRowsAsync() = 0;
423
+ /// \brief Convert the Scanner to a RecordBatchReader so it can be
424
+ /// easily used with APIs that expect a reader.
425
+ virtual Result<std::shared_ptr<RecordBatchReader>> ToRecordBatchReader() = 0;
426
+
427
+ /// \brief Get the options for this scan.
428
+ const std::shared_ptr<ScanOptions>& options() const { return scan_options_; }
429
+ /// \brief Get the dataset that this scanner will scan
430
+ virtual const std::shared_ptr<Dataset>& dataset() const = 0;
431
+
432
+ protected:
433
+ explicit Scanner(std::shared_ptr<ScanOptions> scan_options)
434
+ : scan_options_(std::move(scan_options)) {}
435
+
436
+ Result<EnumeratedRecordBatchIterator> AddPositioningToInOrderScan(
437
+ TaggedRecordBatchIterator scan);
438
+
439
+ const std::shared_ptr<ScanOptions> scan_options_;
440
+ };
441
+
442
+ /// \brief ScannerBuilder is a factory class to construct a Scanner. It is used
443
+ /// to pass information, notably a potential filter expression and a subset of
444
+ /// columns to materialize.
445
+ class ARROW_DS_EXPORT ScannerBuilder {
446
+ public:
447
+ explicit ScannerBuilder(std::shared_ptr<Dataset> dataset);
448
+
449
+ ScannerBuilder(std::shared_ptr<Dataset> dataset,
450
+ std::shared_ptr<ScanOptions> scan_options);
451
+
452
+ ScannerBuilder(std::shared_ptr<Schema> schema, std::shared_ptr<Fragment> fragment,
453
+ std::shared_ptr<ScanOptions> scan_options);
454
+
455
+ /// \brief Make a scanner from a record batch reader.
456
+ ///
457
+ /// The resulting scanner can be scanned only once. This is intended
458
+ /// to support writing data from streaming sources or other sources
459
+ /// that can be iterated only once.
460
+ static std::shared_ptr<ScannerBuilder> FromRecordBatchReader(
461
+ std::shared_ptr<RecordBatchReader> reader);
462
+
463
+ /// \brief Set the subset of columns to materialize.
464
+ ///
465
+ /// Columns which are not referenced may not be read from fragments.
466
+ ///
467
+ /// \param[in] columns list of columns to project. Order and duplicates will
468
+ /// be preserved.
469
+ ///
470
+ /// \return Failure if any column name does not exists in the dataset's
471
+ /// Schema.
472
+ Status Project(std::vector<std::string> columns);
473
+
474
+ /// \brief Set expressions which will be evaluated to produce the materialized
475
+ /// columns.
476
+ ///
477
+ /// Columns which are not referenced may not be read from fragments.
478
+ ///
479
+ /// \param[in] exprs expressions to evaluate to produce columns.
480
+ /// \param[in] names list of names for the resulting columns.
481
+ ///
482
+ /// \return Failure if any referenced column does not exists in the dataset's
483
+ /// Schema.
484
+ Status Project(std::vector<compute::Expression> exprs, std::vector<std::string> names);
485
+
486
+ /// \brief Set the filter expression to return only rows matching the filter.
487
+ ///
488
+ /// The predicate will be passed down to Sources and corresponding
489
+ /// Fragments to exploit predicate pushdown if possible using
490
+ /// partition information or Fragment internal metadata, e.g. Parquet statistics.
491
+ /// Columns which are not referenced may not be read from fragments.
492
+ ///
493
+ /// \param[in] filter expression to filter rows with.
494
+ ///
495
+ /// \return Failure if any referenced columns does not exist in the dataset's
496
+ /// Schema.
497
+ Status Filter(const compute::Expression& filter);
498
+
499
+ /// \brief Indicate if the Scanner should make use of the available
500
+ /// ThreadPool found in ScanOptions;
501
+ Status UseThreads(bool use_threads = true);
502
+
503
+ /// \brief Set the maximum number of rows per RecordBatch.
504
+ ///
505
+ /// \param[in] batch_size the maximum number of rows.
506
+ /// \returns An error if the number for batch is not greater than 0.
507
+ ///
508
+ /// This option provides a control limiting the memory owned by any RecordBatch.
509
+ Status BatchSize(int64_t batch_size);
510
+
511
+ /// \brief Set the number of batches to read ahead within a fragment.
512
+ ///
513
+ /// \param[in] batch_readahead How many batches to read ahead within a fragment
514
+ /// \returns an error if this number is less than 0.
515
+ ///
516
+ /// This option provides a control on the RAM vs I/O tradeoff.
517
+ /// It might not be supported by all file formats, in which case it will
518
+ /// simply be ignored.
519
+ Status BatchReadahead(int32_t batch_readahead);
520
+
521
+ /// \brief Set the number of fragments to read ahead
522
+ ///
523
+ /// \param[in] fragment_readahead How many fragments to read ahead
524
+ /// \returns an error if this number is less than 0.
525
+ ///
526
+ /// This option provides a control on the RAM vs I/O tradeoff.
527
+ Status FragmentReadahead(int32_t fragment_readahead);
528
+
529
+ /// \brief Set the pool from which materialized and scanned arrays will be allocated.
530
+ Status Pool(MemoryPool* pool);
531
+
532
+ /// \brief Set fragment-specific scan options.
533
+ Status FragmentScanOptions(std::shared_ptr<FragmentScanOptions> fragment_scan_options);
534
+
535
+ /// \brief Override default backpressure configuration
536
+ Status Backpressure(acero::BackpressureOptions backpressure);
537
+
538
+ /// \brief Return the current scan options for the builder.
539
+ Result<std::shared_ptr<ScanOptions>> GetScanOptions();
540
+
541
+ /// \brief Return the constructed now-immutable Scanner object
542
+ Result<std::shared_ptr<Scanner>> Finish();
543
+
544
+ const std::shared_ptr<Schema>& schema() const;
545
+ const std::shared_ptr<Schema>& projected_schema() const;
546
+
547
+ private:
548
+ std::shared_ptr<Dataset> dataset_;
549
+ std::shared_ptr<ScanOptions> scan_options_ = std::make_shared<ScanOptions>();
550
+ };
551
+
552
+ /// \brief Construct a source ExecNode which yields batches from a dataset scan.
553
+ ///
554
+ /// Does not construct associated filter or project nodes.
555
+ /// Yielded batches will be augmented with fragment/batch indices to enable stable
556
+ /// ordering for simple ExecPlans.
557
+ class ARROW_DS_EXPORT ScanNodeOptions : public acero::ExecNodeOptions {
558
+ public:
559
+ explicit ScanNodeOptions(std::shared_ptr<Dataset> dataset,
560
+ std::shared_ptr<ScanOptions> scan_options,
561
+ bool require_sequenced_output = false)
562
+ : dataset(std::move(dataset)),
563
+ scan_options(std::move(scan_options)),
564
+ require_sequenced_output(require_sequenced_output) {}
565
+
566
+ std::shared_ptr<Dataset> dataset;
567
+ std::shared_ptr<ScanOptions> scan_options;
568
+ bool require_sequenced_output;
569
+ };
570
+
571
+ /// @}
572
+
573
+ namespace internal {
574
+ ARROW_DS_EXPORT void InitializeScanner(arrow::acero::ExecFactoryRegistry* registry);
575
+ ARROW_DS_EXPORT void InitializeScannerV2(arrow::acero::ExecFactoryRegistry* registry);
576
+ } // namespace internal
577
+ } // namespace dataset
578
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/type_fwd.h ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <memory>
23
+ #include <vector>
24
+
25
+ #include "arrow/compute/type_fwd.h" // IWYU pragma: export
26
+ #include "arrow/dataset/visibility.h"
27
+ #include "arrow/filesystem/type_fwd.h" // IWYU pragma: export
28
+ #include "arrow/type_fwd.h" // IWYU pragma: export
29
+
30
+ namespace arrow {
31
+ namespace dataset {
32
+
33
+ class Dataset;
34
+ class DatasetFactory;
35
+ using DatasetVector = std::vector<std::shared_ptr<Dataset>>;
36
+
37
+ class UnionDataset;
38
+ class UnionDatasetFactory;
39
+
40
+ class Fragment;
41
+ using FragmentIterator = Iterator<std::shared_ptr<Fragment>>;
42
+ using FragmentVector = std::vector<std::shared_ptr<Fragment>>;
43
+
44
+ class FragmentScanOptions;
45
+
46
+ class FileSource;
47
+ class FileFormat;
48
+ class FileFragment;
49
+ class FileWriter;
50
+ class FileWriteOptions;
51
+ class FileSystemDataset;
52
+ class FileSystemDatasetFactory;
53
+ struct FileSystemDatasetWriteOptions;
54
+ class WriteNodeOptions;
55
+
56
+ /// \brief Controls what happens if files exist in an output directory during a dataset
57
+ /// write
58
+ enum class ExistingDataBehavior : int8_t {
59
+ /// Deletes all files in a directory the first time that directory is encountered
60
+ kDeleteMatchingPartitions,
61
+ /// Ignores existing files, overwriting any that happen to have the same name as an
62
+ /// output file
63
+ kOverwriteOrIgnore,
64
+ /// Returns an error if there are any files or subdirectories in the output directory
65
+ kError,
66
+ };
67
+
68
+ class InMemoryDataset;
69
+
70
+ class CsvFileFormat;
71
+ class CsvFileWriter;
72
+ class CsvFileWriteOptions;
73
+ struct CsvFragmentScanOptions;
74
+
75
+ class JsonFileFormat;
76
+ class JsonFileWriter;
77
+ class JsonFileWriteOptions;
78
+ struct JsonFragmentScanOptions;
79
+
80
+ class IpcFileFormat;
81
+ class IpcFileWriter;
82
+ class IpcFileWriteOptions;
83
+ class IpcFragmentScanOptions;
84
+
85
+ class ParquetFileFormat;
86
+ class ParquetFileFragment;
87
+ class ParquetFragmentScanOptions;
88
+ class ParquetFileWriter;
89
+ class ParquetFileWriteOptions;
90
+
91
+ class Partitioning;
92
+ class PartitioningFactory;
93
+ class PartitioningOrFactory;
94
+ struct KeyValuePartitioningOptions;
95
+ class DirectoryPartitioning;
96
+ class HivePartitioning;
97
+ struct HivePartitioningOptions;
98
+ class FilenamePartitioning;
99
+ struct FilenamePartitioningOptions;
100
+
101
+ class ScanNodeOptions;
102
+ struct ScanOptions;
103
+
104
+ class Scanner;
105
+
106
+ class ScannerBuilder;
107
+
108
+ class ScanTask;
109
+ using ScanTaskVector = std::vector<std::shared_ptr<ScanTask>>;
110
+ using ScanTaskIterator = Iterator<std::shared_ptr<ScanTask>>;
111
+
112
+ } // namespace dataset
113
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/dataset/visibility.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #if defined(_WIN32) || defined(__CYGWIN__)
23
+ #if defined(_MSC_VER)
24
+ #pragma warning(push)
25
+ #pragma warning(disable : 4251)
26
+ #else
27
+ #pragma GCC diagnostic ignored "-Wattributes"
28
+ #endif
29
+
30
+ #ifdef ARROW_DS_STATIC
31
+ #define ARROW_DS_EXPORT
32
+ #elif defined(ARROW_DS_EXPORTING)
33
+ #define ARROW_DS_EXPORT __declspec(dllexport)
34
+ #else
35
+ #define ARROW_DS_EXPORT __declspec(dllimport)
36
+ #endif
37
+
38
+ #define ARROW_DS_NO_EXPORT
39
+ #else // Not Windows
40
+ #ifndef ARROW_DS_EXPORT
41
+ #define ARROW_DS_EXPORT __attribute__((visibility("default")))
42
+ #endif
43
+ #ifndef ARROW_DS_NO_EXPORT
44
+ #define ARROW_DS_NO_EXPORT __attribute__((visibility("hidden")))
45
+ #endif
46
+ #endif // Non-Windows
47
+
48
+ #if defined(_MSC_VER)
49
+ #pragma warning(pop)
50
+ #endif
venv/lib/python3.10/site-packages/pyarrow/include/arrow/datum.h ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <type_traits>
24
+ #include <utility>
25
+ #include <variant>
26
+ #include <vector>
27
+
28
+ #include "arrow/array/data.h"
29
+ #include "arrow/scalar.h"
30
+ #include "arrow/type.h"
31
+ #include "arrow/type_traits.h"
32
+ #include "arrow/util/checked_cast.h"
33
+ #include "arrow/util/macros.h"
34
+ #include "arrow/util/visibility.h"
35
+
36
+ namespace arrow {
37
+
38
+ class Array;
39
+ class ChunkedArray;
40
+ class RecordBatch;
41
+ class Table;
42
+
43
+ /// \class Datum
44
+ /// \brief Variant type for various Arrow C++ data structures
45
+ struct ARROW_EXPORT Datum {
46
+ /// \brief The kind of datum stored
47
+ enum Kind { NONE, SCALAR, ARRAY, CHUNKED_ARRAY, RECORD_BATCH, TABLE };
48
+
49
+ /// \brief A placeholder type to represent empty datum
50
+ struct Empty {};
51
+
52
+ /// \brief Datums variants may have a length. This special value indicate that the
53
+ /// current variant does not have a length.
54
+ static constexpr int64_t kUnknownLength = -1;
55
+
56
+ /// \brief Storage of the actual datum.
57
+ ///
58
+ /// Note: For arrays, ArrayData is stored instead of Array for easier processing
59
+ std::variant<Empty, std::shared_ptr<Scalar>, std::shared_ptr<ArrayData>,
60
+ std::shared_ptr<ChunkedArray>, std::shared_ptr<RecordBatch>,
61
+ std::shared_ptr<Table>>
62
+ value;
63
+
64
+ /// \brief Empty datum, to be populated elsewhere
65
+ Datum() = default;
66
+
67
+ Datum(const Datum& other) = default;
68
+ Datum& operator=(const Datum& other) = default;
69
+ Datum(Datum&& other) = default;
70
+ Datum& operator=(Datum&& other) = default;
71
+
72
+ /// \brief Construct from a Scalar
73
+ Datum(std::shared_ptr<Scalar> value) // NOLINT implicit conversion
74
+ : value(std::move(value)) {}
75
+
76
+ /// \brief Construct from an ArrayData
77
+ Datum(std::shared_ptr<ArrayData> value) // NOLINT implicit conversion
78
+ : value(std::move(value)) {}
79
+
80
+ /// \brief Construct from an ArrayData
81
+ Datum(ArrayData arg) // NOLINT implicit conversion
82
+ : value(std::make_shared<ArrayData>(std::move(arg))) {}
83
+
84
+ /// \brief Construct from an Array
85
+ Datum(const Array& value); // NOLINT implicit conversion
86
+
87
+ /// \brief Construct from an Array
88
+ Datum(const std::shared_ptr<Array>& value); // NOLINT implicit conversion
89
+
90
+ /// \brief Construct from a ChunkedArray
91
+ Datum(std::shared_ptr<ChunkedArray> value); // NOLINT implicit conversion
92
+
93
+ /// \brief Construct from a RecordBatch
94
+ Datum(std::shared_ptr<RecordBatch> value); // NOLINT implicit conversion
95
+
96
+ /// \brief Construct from a Table
97
+ Datum(std::shared_ptr<Table> value); // NOLINT implicit conversion
98
+
99
+ /// \brief Construct from a ChunkedArray.
100
+ ///
101
+ /// This can be expensive, prefer the shared_ptr<ChunkedArray> constructor
102
+ explicit Datum(const ChunkedArray& value);
103
+
104
+ /// \brief Construct from a RecordBatch.
105
+ ///
106
+ /// This can be expensive, prefer the shared_ptr<RecordBatch> constructor
107
+ explicit Datum(const RecordBatch& value);
108
+
109
+ /// \brief Construct from a Table.
110
+ ///
111
+ /// This can be expensive, prefer the shared_ptr<Table> constructor
112
+ explicit Datum(const Table& value);
113
+
114
+ /// \brief Cast from concrete subtypes of Array or Scalar to Datum
115
+ template <typename T, bool IsArray = std::is_base_of_v<Array, T>,
116
+ bool IsScalar = std::is_base_of_v<Scalar, T>,
117
+ typename = enable_if_t<IsArray || IsScalar>>
118
+ Datum(std::shared_ptr<T> value) // NOLINT implicit conversion
119
+ : Datum(std::shared_ptr<typename std::conditional<IsArray, Array, Scalar>::type>(
120
+ std::move(value))) {}
121
+
122
+ /// \brief Cast from concrete subtypes of Array or Scalar to Datum
123
+ template <typename T, typename TV = typename std::remove_reference_t<T>,
124
+ bool IsArray = std::is_base_of_v<Array, T>,
125
+ bool IsScalar = std::is_base_of_v<Scalar, T>,
126
+ typename = enable_if_t<IsArray || IsScalar>>
127
+ Datum(T&& value) // NOLINT implicit conversion
128
+ : Datum(std::make_shared<TV>(std::forward<T>(value))) {}
129
+
130
+ /// \brief Copy from concrete subtypes of Scalar.
131
+ ///
132
+ /// The concrete scalar type must be copyable (not all of them are).
133
+ template <typename T, typename = enable_if_t<std::is_base_of_v<Scalar, T>>>
134
+ Datum(const T& value) // NOLINT implicit conversion
135
+ : Datum(std::make_shared<T>(value)) {}
136
+
137
+ // Convenience constructors
138
+ /// \brief Convenience constructor storing a bool scalar.
139
+ explicit Datum(bool value);
140
+ /// \brief Convenience constructor storing an int8 scalar.
141
+ explicit Datum(int8_t value);
142
+ /// \brief Convenience constructor storing a uint8 scalar.
143
+ explicit Datum(uint8_t value);
144
+ /// \brief Convenience constructor storing an int16 scalar.
145
+ explicit Datum(int16_t value);
146
+ /// \brief Convenience constructor storing a uint16 scalar.
147
+ explicit Datum(uint16_t value);
148
+ /// \brief Convenience constructor storing an int32 scalar.
149
+ explicit Datum(int32_t value);
150
+ /// \brief Convenience constructor storing a uint32 scalar.
151
+ explicit Datum(uint32_t value);
152
+ /// \brief Convenience constructor storing an int64 scalar.
153
+ explicit Datum(int64_t value);
154
+ /// \brief Convenience constructor storing a uint64 scalar.
155
+ explicit Datum(uint64_t value);
156
+ /// \brief Convenience constructor storing a float scalar.
157
+ explicit Datum(float value);
158
+ /// \brief Convenience constructor storing a double scalar.
159
+ explicit Datum(double value);
160
+ /// \brief Convenience constructor storing a string scalar.
161
+ explicit Datum(std::string value);
162
+ /// \brief Convenience constructor storing a string scalar.
163
+ explicit Datum(const char* value);
164
+
165
+ /// \brief Convenience constructor for a DurationScalar from std::chrono::duration
166
+ template <template <typename, typename> class StdDuration, typename Rep,
167
+ typename Period,
168
+ typename = decltype(DurationScalar{StdDuration<Rep, Period>{}})>
169
+ explicit Datum(StdDuration<Rep, Period> d) : Datum{DurationScalar(d)} {}
170
+
171
+ /// \brief The kind of data stored in Datum
172
+ Datum::Kind kind() const {
173
+ switch (this->value.index()) {
174
+ case 0:
175
+ return Datum::NONE;
176
+ case 1:
177
+ return Datum::SCALAR;
178
+ case 2:
179
+ return Datum::ARRAY;
180
+ case 3:
181
+ return Datum::CHUNKED_ARRAY;
182
+ case 4:
183
+ return Datum::RECORD_BATCH;
184
+ case 5:
185
+ return Datum::TABLE;
186
+ default:
187
+ return Datum::NONE;
188
+ }
189
+ }
190
+
191
+ /// \brief Retrieve the stored array as ArrayData
192
+ ///
193
+ /// Use make_array() if an Array is desired (which is more expensive).
194
+ /// \throws std::bad_variant_access if the datum is not an array
195
+ const std::shared_ptr<ArrayData>& array() const {
196
+ return std::get<std::shared_ptr<ArrayData>>(this->value);
197
+ }
198
+
199
+ /// \brief The sum of bytes in each buffer referenced by the datum
200
+ /// Note: Scalars report a size of 0
201
+ /// \see arrow::util::TotalBufferSize for caveats
202
+ int64_t TotalBufferSize() const;
203
+
204
+ /// \brief Get the stored ArrayData in mutable form
205
+ ///
206
+ /// For internal use primarily. Keep in mind a shared_ptr<Datum> may have multiple
207
+ /// owners.
208
+ ArrayData* mutable_array() const { return this->array().get(); }
209
+
210
+ /// \brief Retrieve the stored array as Array
211
+ /// \throws std::bad_variant_access if the datum is not an array
212
+ std::shared_ptr<Array> make_array() const;
213
+
214
+ /// \brief Retrieve the chunked array stored
215
+ /// \throws std::bad_variant_access if the datum is not a chunked array
216
+ const std::shared_ptr<ChunkedArray>& chunked_array() const {
217
+ return std::get<std::shared_ptr<ChunkedArray>>(this->value);
218
+ }
219
+
220
+ /// \brief Retrieve the record batch stored
221
+ /// \throws std::bad_variant_access if the datum is not a record batch
222
+ const std::shared_ptr<RecordBatch>& record_batch() const {
223
+ return std::get<std::shared_ptr<RecordBatch>>(this->value);
224
+ }
225
+
226
+ /// \brief Retrieve the table stored
227
+ /// \throws std::bad_variant_access if the datum is not a table
228
+ const std::shared_ptr<Table>& table() const {
229
+ return std::get<std::shared_ptr<Table>>(this->value);
230
+ }
231
+
232
+ /// \brief Retrieve the scalar stored
233
+ /// \throws std::bad_variant_access if the datum is not a scalar
234
+ const std::shared_ptr<Scalar>& scalar() const {
235
+ return std::get<std::shared_ptr<Scalar>>(this->value);
236
+ }
237
+
238
+ /// \brief Retrieve the datum as its concrete array type
239
+ /// \throws std::bad_variant_access if the datum is not an array
240
+ /// \tparam ExactType the expected array type, may cause undefined behavior if it is not
241
+ /// the type of the stored array
242
+ template <typename ExactType>
243
+ std::shared_ptr<ExactType> array_as() const {
244
+ return internal::checked_pointer_cast<ExactType>(this->make_array());
245
+ }
246
+
247
+ /// \brief Retrieve the datum as its concrete scalar type
248
+ /// \throws std::bad_variant_access if the datum is not a scalar
249
+ /// \tparam ExactType the expected scalar type, may cause undefined behavior if it is
250
+ /// not the type of the stored scalar
251
+ template <typename ExactType>
252
+ const ExactType& scalar_as() const {
253
+ return internal::checked_cast<const ExactType&>(*this->scalar());
254
+ }
255
+
256
+ /// \brief True if Datum contains an array
257
+ bool is_array() const { return this->kind() == Datum::ARRAY; }
258
+
259
+ /// \brief True if Datum contains a chunked array
260
+ bool is_chunked_array() const { return this->kind() == Datum::CHUNKED_ARRAY; }
261
+
262
+ /// \brief True if Datum contains an array or a chunked array
263
+ bool is_arraylike() const {
264
+ return this->kind() == Datum::ARRAY || this->kind() == Datum::CHUNKED_ARRAY;
265
+ }
266
+
267
+ /// \brief True if Datum contains a scalar
268
+ bool is_scalar() const { return this->kind() == Datum::SCALAR; }
269
+
270
+ /// \brief True if Datum contains a scalar or array-like data
271
+ bool is_value() const { return this->is_arraylike() || this->is_scalar(); }
272
+
273
+ /// \brief Return the null count.
274
+ ///
275
+ /// Only valid for scalar and array-like data.
276
+ int64_t null_count() const;
277
+
278
+ /// \brief The value type of the variant, if any
279
+ ///
280
+ /// \return nullptr if no type
281
+ const std::shared_ptr<DataType>& type() const;
282
+
283
+ /// \brief The schema of the variant, if any
284
+ ///
285
+ /// \return nullptr if no schema
286
+ const std::shared_ptr<Schema>& schema() const;
287
+
288
+ /// \brief The value length of the variant, if any
289
+ ///
290
+ /// \return kUnknownLength if no type
291
+ int64_t length() const;
292
+
293
+ /// \brief The array chunks of the variant, if any
294
+ ///
295
+ /// \return empty if not arraylike
296
+ ArrayVector chunks() const;
297
+
298
+ /// \brief True if the two data are equal
299
+ bool Equals(const Datum& other) const;
300
+
301
+ bool operator==(const Datum& other) const { return Equals(other); }
302
+ bool operator!=(const Datum& other) const { return !Equals(other); }
303
+
304
+ std::string ToString() const;
305
+ };
306
+
307
+ ARROW_EXPORT void PrintTo(const Datum&, std::ostream*);
308
+
309
+ ARROW_EXPORT std::string ToString(Datum::Kind kind);
310
+
311
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/device.h ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <functional>
22
+ #include <memory>
23
+ #include <string>
24
+
25
+ #include "arrow/io/type_fwd.h"
26
+ #include "arrow/result.h"
27
+ #include "arrow/status.h"
28
+ #include "arrow/type_fwd.h"
29
+ #include "arrow/util/compare.h"
30
+ #include "arrow/util/macros.h"
31
+ #include "arrow/util/visibility.h"
32
+
33
+ namespace arrow {
34
+
35
+ /// \brief EXPERIMENTAL: Device type enum which matches up with C Data Device types
36
+ enum class DeviceAllocationType : char {
37
+ kCPU = 1,
38
+ kCUDA = 2,
39
+ kCUDA_HOST = 3,
40
+ kOPENCL = 4,
41
+ kVULKAN = 7,
42
+ kMETAL = 8,
43
+ kVPI = 9,
44
+ kROCM = 10,
45
+ kROCM_HOST = 11,
46
+ kEXT_DEV = 12,
47
+ kCUDA_MANAGED = 13,
48
+ kONEAPI = 14,
49
+ kWEBGPU = 15,
50
+ kHEXAGON = 16,
51
+ };
52
+
53
+ class MemoryManager;
54
+
55
+ /// \brief EXPERIMENTAL: Abstract interface for hardware devices
56
+ ///
57
+ /// This object represents a device with access to some memory spaces.
58
+ /// When handling a Buffer or raw memory address, it allows deciding in which
59
+ /// context the raw memory address should be interpreted
60
+ /// (e.g. CPU-accessible memory, or embedded memory on some particular GPU).
61
+ class ARROW_EXPORT Device : public std::enable_shared_from_this<Device>,
62
+ public util::EqualityComparable<Device> {
63
+ public:
64
+ virtual ~Device();
65
+
66
+ /// \brief A shorthand for this device's type.
67
+ ///
68
+ /// The returned value is different for each device class, but is the
69
+ /// same for all instances of a given class. It can be used as a replacement
70
+ /// for RTTI.
71
+ virtual const char* type_name() const = 0;
72
+
73
+ /// \brief A human-readable description of the device.
74
+ ///
75
+ /// The returned value should be detailed enough to distinguish between
76
+ /// different instances, where necessary.
77
+ virtual std::string ToString() const = 0;
78
+
79
+ /// \brief Whether this instance points to the same device as another one.
80
+ virtual bool Equals(const Device&) const = 0;
81
+
82
+ /// \brief A device ID to identify this device if there are multiple of this type.
83
+ ///
84
+ /// If there is no "device_id" equivalent (such as for the main CPU device on
85
+ /// non-numa systems) returns -1.
86
+ virtual int64_t device_id() const { return -1; }
87
+
88
+ /// \brief Whether this device is the main CPU device.
89
+ ///
90
+ /// This shorthand method is very useful when deciding whether a memory address
91
+ /// is CPU-accessible.
92
+ bool is_cpu() const { return is_cpu_; }
93
+
94
+ /// \brief Return a MemoryManager instance tied to this device
95
+ ///
96
+ /// The returned instance uses default parameters for this device type's
97
+ /// MemoryManager implementation. Some devices also allow constructing
98
+ /// MemoryManager instances with non-default parameters.
99
+ virtual std::shared_ptr<MemoryManager> default_memory_manager() = 0;
100
+
101
+ /// \brief Return the DeviceAllocationType of this device
102
+ virtual DeviceAllocationType device_type() const = 0;
103
+
104
+ class SyncEvent;
105
+
106
+ /// \brief EXPERIMENTAL: An opaque wrapper for Device-specific streams
107
+ ///
108
+ /// In essence this is just a wrapper around a void* to represent the
109
+ /// standard concept of a stream/queue on a device. Derived classes
110
+ /// should be trivially constructible from it's device-specific counterparts.
111
+ class ARROW_EXPORT Stream {
112
+ public:
113
+ using release_fn_t = std::function<void(void*)>;
114
+
115
+ virtual ~Stream() = default;
116
+
117
+ virtual const void* get_raw() const { return stream_.get(); }
118
+
119
+ /// \brief Make the stream wait on the provided event.
120
+ ///
121
+ /// Tells the stream that it should wait until the synchronization
122
+ /// event is completed without blocking the CPU.
123
+ virtual Status WaitEvent(const SyncEvent&) = 0;
124
+
125
+ /// \brief Blocks the current thread until a stream's remaining tasks are completed
126
+ virtual Status Synchronize() const = 0;
127
+
128
+ protected:
129
+ explicit Stream(void* stream, release_fn_t release_stream)
130
+ : stream_{stream, release_stream} {}
131
+
132
+ std::unique_ptr<void, release_fn_t> stream_;
133
+ };
134
+
135
+ virtual Result<std::shared_ptr<Stream>> MakeStream() { return NULLPTR; }
136
+
137
+ /// \brief Create a new device stream
138
+ ///
139
+ /// This should create the appropriate stream type for the device,
140
+ /// derived from Device::Stream to allow for stream ordered events
141
+ /// and memory allocations.
142
+ virtual Result<std::shared_ptr<Stream>> MakeStream(unsigned int flags) {
143
+ return NULLPTR;
144
+ }
145
+
146
+ /// @brief Wrap an existing device stream alongside a release function
147
+ ///
148
+ /// @param device_stream a pointer to the stream to wrap
149
+ /// @param release_fn a function to call during destruction, `nullptr` or
150
+ /// a no-op function can be passed to indicate ownership is maintained
151
+ /// externally
152
+ virtual Result<std::shared_ptr<Stream>> WrapStream(void* device_stream,
153
+ Stream::release_fn_t release_fn) {
154
+ return NULLPTR;
155
+ }
156
+
157
+ /// \brief EXPERIMENTAL: An object that provides event/stream sync primitives
158
+ class ARROW_EXPORT SyncEvent {
159
+ public:
160
+ using release_fn_t = std::function<void(void*)>;
161
+
162
+ virtual ~SyncEvent() = default;
163
+
164
+ void* get_raw() { return sync_event_.get(); }
165
+
166
+ /// @brief Block until sync event is completed.
167
+ virtual Status Wait() = 0;
168
+
169
+ /// @brief Record the wrapped event on the stream so it triggers
170
+ /// the event when the stream gets to that point in its queue.
171
+ virtual Status Record(const Stream&) = 0;
172
+
173
+ protected:
174
+ /// If creating this with a passed in event, the caller must ensure
175
+ /// that the event lives until clear_event is called on this as it
176
+ /// won't own it.
177
+ explicit SyncEvent(void* sync_event, release_fn_t release_sync_event)
178
+ : sync_event_{sync_event, release_sync_event} {}
179
+
180
+ std::unique_ptr<void, release_fn_t> sync_event_;
181
+ };
182
+
183
+ protected:
184
+ ARROW_DISALLOW_COPY_AND_ASSIGN(Device);
185
+ explicit Device(bool is_cpu = false) : is_cpu_(is_cpu) {}
186
+
187
+ bool is_cpu_;
188
+ };
189
+
190
+ /// \brief EXPERIMENTAL: An object that provides memory management primitives
191
+ ///
192
+ /// A MemoryManager is always tied to a particular Device instance.
193
+ /// It can also have additional parameters (such as a MemoryPool to
194
+ /// allocate CPU memory).
195
+ class ARROW_EXPORT MemoryManager : public std::enable_shared_from_this<MemoryManager> {
196
+ public:
197
+ virtual ~MemoryManager();
198
+
199
+ /// \brief The device this MemoryManager is tied to
200
+ const std::shared_ptr<Device>& device() const { return device_; }
201
+
202
+ /// \brief Whether this MemoryManager is tied to the main CPU device.
203
+ ///
204
+ /// This shorthand method is very useful when deciding whether a memory address
205
+ /// is CPU-accessible.
206
+ bool is_cpu() const { return device_->is_cpu(); }
207
+
208
+ /// \brief Create a RandomAccessFile to read a particular buffer.
209
+ ///
210
+ /// The given buffer must be tied to this MemoryManager.
211
+ ///
212
+ /// See also the Buffer::GetReader shorthand.
213
+ virtual Result<std::shared_ptr<io::RandomAccessFile>> GetBufferReader(
214
+ std::shared_ptr<Buffer> buf) = 0;
215
+
216
+ /// \brief Create a OutputStream to write to a particular buffer.
217
+ ///
218
+ /// The given buffer must be mutable and tied to this MemoryManager.
219
+ /// The returned stream object writes into the buffer's underlying memory
220
+ /// (but it won't resize it).
221
+ ///
222
+ /// See also the Buffer::GetWriter shorthand.
223
+ virtual Result<std::shared_ptr<io::OutputStream>> GetBufferWriter(
224
+ std::shared_ptr<Buffer> buf) = 0;
225
+
226
+ /// \brief Allocate a (mutable) Buffer
227
+ ///
228
+ /// The buffer will be allocated in the device's memory.
229
+ virtual Result<std::unique_ptr<Buffer>> AllocateBuffer(int64_t size) = 0;
230
+
231
+ /// \brief Copy a Buffer to a destination MemoryManager
232
+ ///
233
+ /// See also the Buffer::Copy shorthand.
234
+ static Result<std::shared_ptr<Buffer>> CopyBuffer(
235
+ const std::shared_ptr<Buffer>& source, const std::shared_ptr<MemoryManager>& to);
236
+
237
+ /// \brief Copy a non-owned Buffer to a destination MemoryManager
238
+ ///
239
+ /// This is useful for cases where the source memory area is externally managed
240
+ /// (its lifetime not tied to the source Buffer), otherwise please use CopyBuffer().
241
+ static Result<std::unique_ptr<Buffer>> CopyNonOwned(
242
+ const Buffer& source, const std::shared_ptr<MemoryManager>& to);
243
+
244
+ /// \brief Make a no-copy Buffer view in a destination MemoryManager
245
+ ///
246
+ /// See also the Buffer::View shorthand.
247
+ static Result<std::shared_ptr<Buffer>> ViewBuffer(
248
+ const std::shared_ptr<Buffer>& source, const std::shared_ptr<MemoryManager>& to);
249
+
250
+ /// \brief Create a new SyncEvent.
251
+ ///
252
+ /// This version should construct the appropriate event for the device and
253
+ /// provide the unique_ptr with the correct deleter for the event type.
254
+ /// If the device does not require or work with any synchronization, it is
255
+ /// allowed for it to return a nullptr.
256
+ virtual Result<std::shared_ptr<Device::SyncEvent>> MakeDeviceSyncEvent();
257
+
258
+ /// \brief Wrap an event into a SyncEvent.
259
+ ///
260
+ /// @param sync_event passed in sync_event (should be a pointer to the appropriate type)
261
+ /// @param release_sync_event destructor to free sync_event. `nullptr` may be
262
+ /// passed to indicate that no destruction/freeing is necessary
263
+ virtual Result<std::shared_ptr<Device::SyncEvent>> WrapDeviceSyncEvent(
264
+ void* sync_event, Device::SyncEvent::release_fn_t release_sync_event);
265
+
266
+ protected:
267
+ ARROW_DISALLOW_COPY_AND_ASSIGN(MemoryManager);
268
+
269
+ explicit MemoryManager(const std::shared_ptr<Device>& device) : device_(device) {}
270
+
271
+ // Default implementations always return nullptr, should be overridden
272
+ // by subclasses that support data transfer.
273
+ // (returning nullptr means unsupported copy / view)
274
+ // In CopyBufferFrom and ViewBufferFrom, the `from` parameter is guaranteed to
275
+ // be equal to `buf->memory_manager()`.
276
+ virtual Result<std::shared_ptr<Buffer>> CopyBufferFrom(
277
+ const std::shared_ptr<Buffer>& buf, const std::shared_ptr<MemoryManager>& from);
278
+ virtual Result<std::shared_ptr<Buffer>> CopyBufferTo(
279
+ const std::shared_ptr<Buffer>& buf, const std::shared_ptr<MemoryManager>& to);
280
+ virtual Result<std::unique_ptr<Buffer>> CopyNonOwnedFrom(
281
+ const Buffer& buf, const std::shared_ptr<MemoryManager>& from);
282
+ virtual Result<std::unique_ptr<Buffer>> CopyNonOwnedTo(
283
+ const Buffer& buf, const std::shared_ptr<MemoryManager>& to);
284
+ virtual Result<std::shared_ptr<Buffer>> ViewBufferFrom(
285
+ const std::shared_ptr<Buffer>& buf, const std::shared_ptr<MemoryManager>& from);
286
+ virtual Result<std::shared_ptr<Buffer>> ViewBufferTo(
287
+ const std::shared_ptr<Buffer>& buf, const std::shared_ptr<MemoryManager>& to);
288
+
289
+ std::shared_ptr<Device> device_;
290
+ };
291
+
292
+ // ----------------------------------------------------------------------
293
+ // CPU backend implementation
294
+
295
+ class ARROW_EXPORT CPUDevice : public Device {
296
+ public:
297
+ const char* type_name() const override;
298
+ std::string ToString() const override;
299
+ bool Equals(const Device&) const override;
300
+ DeviceAllocationType device_type() const override { return DeviceAllocationType::kCPU; }
301
+
302
+ std::shared_ptr<MemoryManager> default_memory_manager() override;
303
+
304
+ /// \brief Return the global CPUDevice instance
305
+ static std::shared_ptr<Device> Instance();
306
+
307
+ /// \brief Create a MemoryManager
308
+ ///
309
+ /// The returned MemoryManager will use the given MemoryPool for allocations.
310
+ static std::shared_ptr<MemoryManager> memory_manager(MemoryPool* pool);
311
+
312
+ protected:
313
+ CPUDevice() : Device(true) {}
314
+ };
315
+
316
+ class ARROW_EXPORT CPUMemoryManager : public MemoryManager {
317
+ public:
318
+ Result<std::shared_ptr<io::RandomAccessFile>> GetBufferReader(
319
+ std::shared_ptr<Buffer> buf) override;
320
+ Result<std::shared_ptr<io::OutputStream>> GetBufferWriter(
321
+ std::shared_ptr<Buffer> buf) override;
322
+
323
+ Result<std::unique_ptr<Buffer>> AllocateBuffer(int64_t size) override;
324
+
325
+ /// \brief Return the MemoryPool associated with this MemoryManager.
326
+ MemoryPool* pool() const { return pool_; }
327
+
328
+ protected:
329
+ CPUMemoryManager(const std::shared_ptr<Device>& device, MemoryPool* pool)
330
+ : MemoryManager(device), pool_(pool) {}
331
+
332
+ static std::shared_ptr<MemoryManager> Make(const std::shared_ptr<Device>& device,
333
+ MemoryPool* pool = default_memory_pool());
334
+
335
+ Result<std::shared_ptr<Buffer>> CopyBufferFrom(
336
+ const std::shared_ptr<Buffer>& buf,
337
+ const std::shared_ptr<MemoryManager>& from) override;
338
+ Result<std::shared_ptr<Buffer>> CopyBufferTo(
339
+ const std::shared_ptr<Buffer>& buf,
340
+ const std::shared_ptr<MemoryManager>& to) override;
341
+ Result<std::unique_ptr<Buffer>> CopyNonOwnedFrom(
342
+ const Buffer& buf, const std::shared_ptr<MemoryManager>& from) override;
343
+ Result<std::unique_ptr<Buffer>> CopyNonOwnedTo(
344
+ const Buffer& buf, const std::shared_ptr<MemoryManager>& to) override;
345
+ Result<std::shared_ptr<Buffer>> ViewBufferFrom(
346
+ const std::shared_ptr<Buffer>& buf,
347
+ const std::shared_ptr<MemoryManager>& from) override;
348
+ Result<std::shared_ptr<Buffer>> ViewBufferTo(
349
+ const std::shared_ptr<Buffer>& buf,
350
+ const std::shared_ptr<MemoryManager>& to) override;
351
+
352
+ MemoryPool* pool_;
353
+
354
+ friend std::shared_ptr<MemoryManager> CPUDevice::memory_manager(MemoryPool* pool);
355
+ ARROW_FRIEND_EXPORT friend std::shared_ptr<MemoryManager> default_cpu_memory_manager();
356
+ };
357
+
358
+ /// \brief Return the default CPU MemoryManager instance
359
+ ///
360
+ /// The returned singleton instance uses the default MemoryPool.
361
+ /// This function is a faster spelling of
362
+ /// `CPUDevice::Instance()->default_memory_manager()`.
363
+ ARROW_EXPORT
364
+ std::shared_ptr<MemoryManager> default_cpu_memory_manager();
365
+
366
+ using DeviceMapper =
367
+ std::function<Result<std::shared_ptr<MemoryManager>>(int64_t device_id)>;
368
+
369
+ /// \brief Register a function to retrieve a MemoryManager for a Device type
370
+ ///
371
+ /// This registers the device type globally. A specific device type can only
372
+ /// be registered once. This method is thread-safe.
373
+ ///
374
+ /// Currently, this registry is only used for importing data through the C Device
375
+ /// Data Interface (for the default Device to MemoryManager mapper in
376
+ /// arrow::ImportDeviceArray/ImportDeviceRecordBatch).
377
+ ///
378
+ /// \param[in] device_type the device type for which to register a MemoryManager
379
+ /// \param[in] mapper function that takes a device id and returns the appropriate
380
+ /// MemoryManager for the registered device type and given device id
381
+ /// \return Status
382
+ ARROW_EXPORT
383
+ Status RegisterDeviceMapper(DeviceAllocationType device_type, DeviceMapper mapper);
384
+
385
+ /// \brief Get the registered function to retrieve a MemoryManager for the
386
+ /// given Device type
387
+ ///
388
+ /// \param[in] device_type the device type
389
+ /// \return function that takes a device id and returns the appropriate
390
+ /// MemoryManager for the registered device type and given device id
391
+ ARROW_EXPORT
392
+ Result<DeviceMapper> GetDeviceMapper(DeviceAllocationType device_type);
393
+
394
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/api.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include "arrow/engine/substrait/api.h"
venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/pch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Often-used headers, for precompiling.
19
+ // If updating this header, please make sure you check compilation speed
20
+ // before checking in. Adding headers which are not used extremely often
21
+ // may incur a slowdown, since it makes the precompiled header heavier to load.
22
+
23
+ #include "arrow/pch.h"
venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/api.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include "arrow/engine/substrait/extension_set.h"
23
+ #include "arrow/engine/substrait/extension_types.h"
24
+ #include "arrow/engine/substrait/options.h"
25
+ #include "arrow/engine/substrait/relation.h"
26
+ #include "arrow/engine/substrait/serde.h"
venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/extension_set.h ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <cstddef>
23
+ #include <cstdint>
24
+ #include <functional>
25
+ #include <memory>
26
+ #include <optional>
27
+ #include <string>
28
+ #include <string_view>
29
+ #include <unordered_map>
30
+ #include <utility>
31
+ #include <vector>
32
+
33
+ #include "arrow/compute/api_aggregate.h"
34
+ #include "arrow/compute/expression.h"
35
+ #include "arrow/engine/substrait/type_fwd.h"
36
+ #include "arrow/engine/substrait/visibility.h"
37
+ #include "arrow/result.h"
38
+ #include "arrow/status.h"
39
+ #include "arrow/type_fwd.h"
40
+ #include "arrow/util/macros.h"
41
+
42
+ namespace arrow {
43
+ namespace engine {
44
+
45
+ constexpr const char* kSubstraitArithmeticFunctionsUri =
46
+ "https://github.com/substrait-io/substrait/blob/main/extensions/"
47
+ "functions_arithmetic.yaml";
48
+ constexpr const char* kSubstraitBooleanFunctionsUri =
49
+ "https://github.com/substrait-io/substrait/blob/main/extensions/"
50
+ "functions_boolean.yaml";
51
+ constexpr const char* kSubstraitComparisonFunctionsUri =
52
+ "https://github.com/substrait-io/substrait/blob/main/extensions/"
53
+ "functions_comparison.yaml";
54
+ constexpr const char* kSubstraitDatetimeFunctionsUri =
55
+ "https://github.com/substrait-io/substrait/blob/main/extensions/"
56
+ "functions_datetime.yaml";
57
+ constexpr const char* kSubstraitLogarithmicFunctionsUri =
58
+ "https://github.com/substrait-io/substrait/blob/main/extensions/"
59
+ "functions_logarithmic.yaml";
60
+ constexpr const char* kSubstraitRoundingFunctionsUri =
61
+ "https://github.com/substrait-io/substrait/blob/main/extensions/"
62
+ "functions_rounding.yaml";
63
+ constexpr const char* kSubstraitStringFunctionsUri =
64
+ "https://github.com/substrait-io/substrait/blob/main/extensions/"
65
+ "functions_string.yaml";
66
+ constexpr const char* kSubstraitAggregateGenericFunctionsUri =
67
+ "https://github.com/substrait-io/substrait/blob/main/extensions/"
68
+ "functions_aggregate_generic.yaml";
69
+
70
+ /// If a function call contains this URI then the function is looked up
71
+ /// in the registry directly, all arguments are mapped as value arguments,
72
+ /// and any options are ignored.
73
+ constexpr const char* kArrowSimpleExtensionFunctionsUri =
74
+ "urn:arrow:substrait_simple_extension_function";
75
+
76
+ struct ARROW_ENGINE_EXPORT Id {
77
+ std::string_view uri, name;
78
+ bool empty() const { return uri.empty() && name.empty(); }
79
+ std::string ToString() const;
80
+ };
81
+ struct ARROW_ENGINE_EXPORT IdHashEq {
82
+ size_t operator()(Id id) const;
83
+ bool operator()(Id l, Id r) const;
84
+ };
85
+
86
+ /// \brief Owning storage for ids
87
+ ///
88
+ /// Substrait plans may reuse URIs and names in many places. For convenience
89
+ /// and performance Substrait ids are typically passed around as views. As we
90
+ /// convert a plan from Substrait to Arrow we need to copy these strings out of
91
+ /// the Substrait buffer and into owned storage. This class serves as that owned
92
+ /// storage.
93
+ class ARROW_ENGINE_EXPORT IdStorage {
94
+ public:
95
+ virtual ~IdStorage() = default;
96
+ /// \brief Get an equivalent id pointing into this storage
97
+ ///
98
+ /// This operation will copy the ids into storage if they do not already exist
99
+ virtual Id Emplace(Id id) = 0;
100
+ /// \brief Get an equivalent view pointing into this storage for a URI
101
+ ///
102
+ /// If no URI is found then the uri will be copied into storage
103
+ virtual std::string_view EmplaceUri(std::string_view uri) = 0;
104
+ /// \brief Get an equivalent id pointing into this storage
105
+ ///
106
+ /// If no id is found then nullopt will be returned
107
+ virtual std::optional<Id> Find(Id id) const = 0;
108
+ /// \brief Get an equivalent view pointing into this storage for a URI
109
+ ///
110
+ /// If no URI is found then nullopt will be returned
111
+ virtual std::optional<std::string_view> FindUri(std::string_view uri) const = 0;
112
+
113
+ static std::unique_ptr<IdStorage> Make();
114
+ };
115
+
116
+ /// \brief Describes a Substrait call
117
+ ///
118
+ /// Substrait call expressions contain a list of arguments which can either
119
+ /// be enum arguments (which are serialized as strings), value arguments (which)
120
+ /// are Arrow expressions, or type arguments (not yet implemented)
121
+ class ARROW_ENGINE_EXPORT SubstraitCall {
122
+ public:
123
+ SubstraitCall(Id id, std::shared_ptr<DataType> output_type, bool output_nullable,
124
+ bool is_hash = false)
125
+ : id_(id),
126
+ output_type_(std::move(output_type)),
127
+ output_nullable_(output_nullable),
128
+ is_hash_(is_hash) {}
129
+
130
+ const Id& id() const { return id_; }
131
+ const std::shared_ptr<DataType>& output_type() const { return output_type_; }
132
+ bool output_nullable() const { return output_nullable_; }
133
+ bool is_hash() const { return is_hash_; }
134
+ const std::unordered_map<std::string, std::vector<std::string>>& options() const {
135
+ return options_;
136
+ }
137
+
138
+ bool HasEnumArg(int index) const;
139
+ Result<std::string_view> GetEnumArg(int index) const;
140
+ void SetEnumArg(int index, std::string enum_arg);
141
+ Result<compute::Expression> GetValueArg(int index) const;
142
+ bool HasValueArg(int index) const;
143
+ void SetValueArg(int index, compute::Expression value_arg);
144
+ std::optional<std::vector<std::string> const*> GetOption(
145
+ std::string_view option_name) const;
146
+ void SetOption(std::string_view option_name,
147
+ const std::vector<std::string_view>& option_preferences);
148
+ bool HasOptions() const;
149
+ int size() const { return size_; }
150
+
151
+ private:
152
+ Id id_;
153
+ std::shared_ptr<DataType> output_type_;
154
+ bool output_nullable_;
155
+ // Only needed when converting from Substrait -> Arrow aggregates. The
156
+ // Arrow function name depends on whether or not there are any groups
157
+ bool is_hash_;
158
+ std::unordered_map<int, std::string> enum_args_;
159
+ std::unordered_map<int, compute::Expression> value_args_;
160
+ std::unordered_map<std::string, std::vector<std::string>> options_;
161
+ int size_ = 0;
162
+ };
163
+
164
+ /// Substrait identifies functions and custom data types using a (uri, name) pair.
165
+ ///
166
+ /// This registry is a bidirectional mapping between Substrait IDs and their
167
+ /// corresponding Arrow counterparts (arrow::DataType and function names in a function
168
+ /// registry)
169
+ ///
170
+ /// Substrait extension types and variations must be registered with their
171
+ /// corresponding arrow::DataType before they can be used!
172
+ ///
173
+ /// Conceptually this can be thought of as two pairs of `unordered_map`s. One pair to
174
+ /// go back and forth between Substrait ID and arrow::DataType and another pair to go
175
+ /// back and forth between Substrait ID and Arrow function names.
176
+ ///
177
+ /// Unlike an ExtensionSet this registry is not created automatically when consuming
178
+ /// Substrait plans and must be configured ahead of time (although there is a default
179
+ /// instance).
180
+ class ARROW_ENGINE_EXPORT ExtensionIdRegistry {
181
+ public:
182
+ using ArrowToSubstraitCall =
183
+ std::function<Result<SubstraitCall>(const arrow::compute::Expression::Call&)>;
184
+ using SubstraitCallToArrow =
185
+ std::function<Result<arrow::compute::Expression>(const SubstraitCall&)>;
186
+ using ArrowToSubstraitAggregate =
187
+ std::function<Result<SubstraitCall>(const arrow::compute::Aggregate&)>;
188
+ using SubstraitAggregateToArrow =
189
+ std::function<Result<arrow::compute::Aggregate>(const SubstraitCall&)>;
190
+
191
+ /// \brief A mapping between a Substrait ID and an arrow::DataType
192
+ struct TypeRecord {
193
+ Id id;
194
+ const std::shared_ptr<DataType>& type;
195
+ };
196
+
197
+ /// \brief Return a uri view owned by this registry
198
+ ///
199
+ /// If the URI has never been emplaced it will return nullopt
200
+ virtual std::optional<std::string_view> FindUri(std::string_view uri) const = 0;
201
+ /// \brief Return a id view owned by this registry
202
+ ///
203
+ /// If the id has never been emplaced it will return nullopt
204
+ virtual std::optional<Id> FindId(Id id) const = 0;
205
+ virtual std::optional<TypeRecord> GetType(const DataType&) const = 0;
206
+ virtual std::optional<TypeRecord> GetType(Id) const = 0;
207
+ virtual Status CanRegisterType(Id, const std::shared_ptr<DataType>& type) const = 0;
208
+ virtual Status RegisterType(Id, std::shared_ptr<DataType>) = 0;
209
+ /// \brief Register a converter that converts an Arrow call to a Substrait call
210
+ ///
211
+ /// Note that there may not be 1:1 parity between ArrowToSubstraitCall and
212
+ /// SubstraitCallToArrow because some standard functions (e.g. add) may map to
213
+ /// multiple Arrow functions (e.g. add, add_checked)
214
+ virtual Status AddArrowToSubstraitCall(std::string arrow_function_name,
215
+ ArrowToSubstraitCall conversion_func) = 0;
216
+ /// \brief Check to see if a converter can be registered
217
+ ///
218
+ /// \return Status::OK if there are no conflicts, otherwise an error is returned
219
+ virtual Status CanAddArrowToSubstraitCall(
220
+ const std::string& arrow_function_name) const = 0;
221
+
222
+ /// \brief Register a converter that converts an Arrow aggregate to a Substrait
223
+ /// aggregate
224
+ virtual Status AddArrowToSubstraitAggregate(
225
+ std::string arrow_function_name, ArrowToSubstraitAggregate conversion_func) = 0;
226
+ /// \brief Check to see if a converter can be registered
227
+ ///
228
+ /// \return Status::OK if there are no conflicts, otherwise an error is returned
229
+ virtual Status CanAddArrowToSubstraitAggregate(
230
+ const std::string& arrow_function_name) const = 0;
231
+
232
+ /// \brief Register a converter that converts a Substrait call to an Arrow call
233
+ virtual Status AddSubstraitCallToArrow(Id substrait_function_id,
234
+ SubstraitCallToArrow conversion_func) = 0;
235
+ /// \brief Check to see if a converter can be registered
236
+ ///
237
+ /// \return Status::OK if there are no conflicts, otherwise an error is returned
238
+ virtual Status CanAddSubstraitCallToArrow(Id substrait_function_id) const = 0;
239
+ /// \brief Register a simple mapping function
240
+ ///
241
+ /// All calls to the function must pass only value arguments. The arguments
242
+ /// will be converted to expressions and passed to the Arrow function
243
+ virtual Status AddSubstraitCallToArrow(Id substrait_function_id,
244
+ std::string arrow_function_name) = 0;
245
+
246
+ /// \brief Register a converter that converts a Substrait aggregate to an Arrow
247
+ /// aggregate
248
+ virtual Status AddSubstraitAggregateToArrow(
249
+ Id substrait_function_id, SubstraitAggregateToArrow conversion_func) = 0;
250
+ /// \brief Check to see if a converter can be registered
251
+ ///
252
+ /// \return Status::OK if there are no conflicts, otherwise an error is returned
253
+ virtual Status CanAddSubstraitAggregateToArrow(Id substrait_function_id) const = 0;
254
+
255
+ /// \brief Return a list of Substrait functions that have a converter
256
+ ///
257
+ /// The function ids are encoded as strings using the pattern {uri}#{name}
258
+ virtual std::vector<std::string> GetSupportedSubstraitFunctions() const = 0;
259
+
260
+ /// \brief Find a converter to map Arrow calls to Substrait calls
261
+ /// \return A converter function or an invalid status if no converter is registered
262
+ virtual Result<ArrowToSubstraitCall> GetArrowToSubstraitCall(
263
+ const std::string& arrow_function_name) const = 0;
264
+
265
+ /// \brief Find a converter to map Arrow aggregates to Substrait aggregates
266
+ /// \return A converter function or an invalid status if no converter is registered
267
+ virtual Result<ArrowToSubstraitAggregate> GetArrowToSubstraitAggregate(
268
+ const std::string& arrow_function_name) const = 0;
269
+
270
+ /// \brief Find a converter to map a Substrait aggregate to an Arrow aggregate
271
+ /// \return A converter function or an invalid status if no converter is registered
272
+ virtual Result<SubstraitAggregateToArrow> GetSubstraitAggregateToArrow(
273
+ Id substrait_function_id) const = 0;
274
+
275
+ /// \brief Find a converter to map a Substrait call to an Arrow call
276
+ /// \return A converter function or an invalid status if no converter is registered
277
+ virtual Result<SubstraitCallToArrow> GetSubstraitCallToArrow(
278
+ Id substrait_function_id) const = 0;
279
+
280
+ /// \brief Similar to \see GetSubstraitCallToArrow but only uses the name
281
+ ///
282
+ /// There may be multiple functions with the same name and this will return
283
+ /// the first. This is slower than GetSubstraitCallToArrow and should only
284
+ /// be used when the plan does not include a URI (or the URI is "/")
285
+ virtual Result<SubstraitCallToArrow> GetSubstraitCallToArrowFallback(
286
+ std::string_view function_name) const = 0;
287
+
288
+ /// \brief Similar to \see GetSubstraitAggregateToArrow but only uses the name
289
+ ///
290
+ /// \see GetSubstraitCallToArrowFallback for details on the fallback behavior
291
+ virtual Result<SubstraitAggregateToArrow> GetSubstraitAggregateToArrowFallback(
292
+ std::string_view function_name) const = 0;
293
+ };
294
+
295
+ constexpr std::string_view kArrowExtTypesUri =
296
+ "https://github.com/apache/arrow/blob/main/format/substrait/"
297
+ "extension_types.yaml";
298
+ // Extension types that don't match 1:1 with a data type (or the data type is
299
+ // parameterized)
300
+ constexpr std::string_view kTimeNanosTypeName = "time_nanos";
301
+ constexpr Id kTimeNanosId = {kArrowExtTypesUri, kTimeNanosTypeName};
302
+
303
+ /// A default registry with all supported functions and data types registered
304
+ ///
305
+ /// Note: Function support is currently very minimal, see ARROW-15538
306
+ ARROW_ENGINE_EXPORT ExtensionIdRegistry* default_extension_id_registry();
307
+
308
+ /// \brief Make a nested registry with a given parent.
309
+ ///
310
+ /// A nested registry supports registering types and functions other and on top of those
311
+ /// already registered in its parent registry. No conflicts in IDs and names used for
312
+ /// lookup are allowed. Normally, the given parent is the default registry.
313
+ ///
314
+ /// One use case for a nested registry is for dynamic registration of functions defined
315
+ /// within a Substrait plan while keeping these registrations specific to the plan. When
316
+ /// the Substrait plan is disposed of, normally after its execution, the nested registry
317
+ /// can be disposed of as well.
318
+ ARROW_ENGINE_EXPORT std::shared_ptr<ExtensionIdRegistry> nested_extension_id_registry(
319
+ const ExtensionIdRegistry* parent);
320
+
321
+ /// \brief A set of extensions used within a plan
322
+ ///
323
+ /// Each time an extension is used within a Substrait plan the extension
324
+ /// must be included in an extension set that is defined at the root of the
325
+ /// plan.
326
+ ///
327
+ /// The plan refers to a specific extension using an "anchor" which is an
328
+ /// arbitrary integer invented by the producer that has no meaning beyond a
329
+ /// plan but which should be consistent within a plan.
330
+ ///
331
+ /// To support serialization and deserialization this type serves as a
332
+ /// bidirectional map between Substrait ID and "anchor"s.
333
+ ///
334
+ /// When deserializing a Substrait plan the extension set should be extracted
335
+ /// after the plan has been converted from Protobuf and before the plan
336
+ /// is converted to an execution plan.
337
+ ///
338
+ /// The extension set can be kept and reused during serialization if a perfect
339
+ /// round trip is required. If serialization is not needed or round tripping
340
+ /// is not required then the extension set can be safely discarded after the
341
+ /// plan has been converted into an execution plan.
342
+ ///
343
+ /// When converting an execution plan into a Substrait plan an extension set
344
+ /// can be automatically generated or a previously generated extension set can
345
+ /// be used.
346
+ ///
347
+ /// ExtensionSet does not own strings; it only refers to strings in an
348
+ /// ExtensionIdRegistry.
349
+ class ARROW_ENGINE_EXPORT ExtensionSet {
350
+ public:
351
+ struct FunctionRecord {
352
+ Id id;
353
+ std::string_view name;
354
+ };
355
+
356
+ struct TypeRecord {
357
+ Id id;
358
+ std::shared_ptr<DataType> type;
359
+ };
360
+
361
+ /// Construct an empty ExtensionSet to be populated during serialization.
362
+ explicit ExtensionSet(const ExtensionIdRegistry* = default_extension_id_registry());
363
+ ARROW_DEFAULT_MOVE_AND_ASSIGN(ExtensionSet);
364
+
365
+ /// Construct an ExtensionSet with explicit extension ids for efficient referencing
366
+ /// during deserialization. Note that input vectors need not be densely packed; an empty
367
+ /// (default constructed) Id may be used as a placeholder to indicate an unused
368
+ /// _anchor/_reference. This factory will be used to wrap the extensions declared in a
369
+ /// substrait::Plan before deserializing the plan's relations.
370
+ ///
371
+ /// Views will be replaced with equivalent views pointing to memory owned by the
372
+ /// registry.
373
+ ///
374
+ /// Note: This is an advanced operation. The order of the ids, types, and functions
375
+ /// must match the anchor numbers chosen for a plan.
376
+ ///
377
+ /// An extension set should instead be created using
378
+ /// arrow::engine::GetExtensionSetFromPlan
379
+ static Result<ExtensionSet> Make(
380
+ std::unordered_map<uint32_t, std::string_view> uris,
381
+ std::unordered_map<uint32_t, Id> type_ids,
382
+ std::unordered_map<uint32_t, Id> function_ids,
383
+ const ConversionOptions& conversion_options,
384
+ const ExtensionIdRegistry* = default_extension_id_registry());
385
+
386
+ const std::unordered_map<uint32_t, std::string_view>& uris() const { return uris_; }
387
+
388
+ /// \brief Returns a data type given an anchor
389
+ ///
390
+ /// This is used when converting a Substrait plan to an Arrow execution plan.
391
+ ///
392
+ /// If the anchor does not exist in this extension set an error will be returned.
393
+ Result<TypeRecord> DecodeType(uint32_t anchor) const;
394
+
395
+ /// \brief Returns the number of custom type records in this extension set
396
+ ///
397
+ /// Note: the types are currently stored as a sparse vector, so this may return a value
398
+ /// larger than the actual number of types. This behavior may change in the future; see
399
+ /// ARROW-15583.
400
+ std::size_t num_types() const { return types_.size(); }
401
+
402
+ /// \brief Lookup the anchor for a given type
403
+ ///
404
+ /// This operation is used when converting an Arrow execution plan to a Substrait plan.
405
+ /// If the type has been previously encoded then the same anchor value will returned.
406
+ ///
407
+ /// If the type has not been previously encoded then a new anchor value will be created.
408
+ ///
409
+ /// If the type does not exist in the extension id registry then an error will be
410
+ /// returned.
411
+ ///
412
+ /// \return An anchor that can be used to refer to the type within a plan
413
+ Result<uint32_t> EncodeType(const DataType& type);
414
+
415
+ /// \brief Return a function id given an anchor
416
+ ///
417
+ /// This is used when converting a Substrait plan to an Arrow execution plan.
418
+ ///
419
+ /// If the anchor does not exist in this extension set an error will be returned.
420
+ Result<Id> DecodeFunction(uint32_t anchor) const;
421
+
422
+ /// \brief Lookup the anchor for a given function
423
+ ///
424
+ /// This operation is used when converting an Arrow execution plan to a Substrait plan.
425
+ /// If the function has been previously encoded then the same anchor value will be
426
+ /// returned.
427
+ ///
428
+ /// If the function has not been previously encoded then a new anchor value will be
429
+ /// created.
430
+ ///
431
+ /// If the function name is not in the extension id registry then an error will be
432
+ /// returned.
433
+ ///
434
+ /// \return An anchor that can be used to refer to the function within a plan
435
+ Result<uint32_t> EncodeFunction(Id function_id);
436
+
437
+ /// \brief Stores a plan-specific id that is not known to the registry
438
+ ///
439
+ /// This is used when converting an Arrow execution plan to a Substrait plan.
440
+ ///
441
+ /// If the function is a UDF, something that wasn't known to the registry,
442
+ /// then we need long term storage of the function name (the ids are just
443
+ /// views)
444
+ Id RegisterPlanSpecificId(Id id);
445
+
446
+ /// \brief Return the number of custom functions in this extension set
447
+ std::size_t num_functions() const { return functions_.size(); }
448
+
449
+ const ExtensionIdRegistry* registry() const { return registry_; }
450
+
451
+ private:
452
+ const ExtensionIdRegistry* registry_;
453
+ // If the registry is not aware of an id then we probably can't do anything
454
+ // with it. However, in some cases, these may represent extensions or features
455
+ // that we can safely ignore. For example, we can usually safely ignore
456
+ // extension type variations if we assume the plan is valid. These ignorable
457
+ // ids are stored here.
458
+ std::unique_ptr<IdStorage> plan_specific_ids_ = IdStorage::Make();
459
+
460
+ // Map from anchor values to URI values referenced by this extension set
461
+ std::unordered_map<uint32_t, std::string_view> uris_;
462
+ // Map from anchor values to type definitions, used during Substrait->Arrow
463
+ // and populated from the Substrait extension set
464
+ std::unordered_map<uint32_t, TypeRecord> types_;
465
+ // Map from anchor values to function ids, used during Substrait->Arrow
466
+ // and populated from the Substrait extension set
467
+ std::unordered_map<uint32_t, Id> functions_;
468
+ // Map from type names to anchor values. Used during Arrow->Substrait
469
+ // and built as the plan is created.
470
+ std::unordered_map<Id, uint32_t, IdHashEq, IdHashEq> types_map_;
471
+ // Map from function names to anchor values. Used during Arrow->Substrait
472
+ // and built as the plan is created.
473
+ std::unordered_map<Id, uint32_t, IdHashEq, IdHashEq> functions_map_;
474
+
475
+ Status CheckHasUri(std::string_view uri);
476
+ void AddUri(std::pair<uint32_t, std::string_view> uri);
477
+ Status AddUri(Id id);
478
+ };
479
+
480
+ } // namespace engine
481
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/extension_types.h ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <cstdint>
23
+ #include <memory>
24
+ #include <optional>
25
+
26
+ #include "arrow/engine/substrait/visibility.h"
27
+ #include "arrow/type_fwd.h"
28
+
29
+ namespace arrow {
30
+ namespace engine {
31
+
32
+ // arrow::ExtensionTypes are provided to wrap uuid, fixed_char, varchar, interval_year,
33
+ // and interval_day which are first-class types in substrait but do not appear in
34
+ // the arrow type system.
35
+ //
36
+ // Note that these are not automatically registered with arrow::RegisterExtensionType(),
37
+ // which means among other things that serialization of these types to IPC would fail.
38
+
39
+ /// fixed_size_binary(16) for storing Universally Unique IDentifiers
40
+ ARROW_ENGINE_EXPORT
41
+ std::shared_ptr<DataType> uuid();
42
+
43
+ /// fixed_size_binary(length) constrained to contain only valid UTF-8
44
+ ARROW_ENGINE_EXPORT
45
+ std::shared_ptr<DataType> fixed_char(int32_t length);
46
+
47
+ /// utf8() constrained to be shorter than `length`
48
+ ARROW_ENGINE_EXPORT
49
+ std::shared_ptr<DataType> varchar(int32_t length);
50
+
51
+ /// fixed_size_list(int32(), 2) storing a number of [years, months]
52
+ ARROW_ENGINE_EXPORT
53
+ std::shared_ptr<DataType> interval_year();
54
+
55
+ /// fixed_size_list(int32(), 2) storing a number of [days, seconds]
56
+ ARROW_ENGINE_EXPORT
57
+ std::shared_ptr<DataType> interval_day();
58
+
59
+ /// constructs the appropriate timestamp type given the precision
60
+ /// no time zone
61
+ ARROW_ENGINE_EXPORT
62
+ Result<std::shared_ptr<DataType>> precision_timestamp(int precision);
63
+
64
+ /// constructs the appropriate timestamp type given the precision
65
+ /// and the UTC time zone
66
+ ARROW_ENGINE_EXPORT
67
+ Result<std::shared_ptr<DataType>> precision_timestamp_tz(int precision);
68
+
69
+ /// Return true if t is Uuid, otherwise false
70
+ ARROW_ENGINE_EXPORT
71
+ bool UnwrapUuid(const DataType&);
72
+
73
+ /// Return FixedChar length if t is FixedChar, otherwise nullopt
74
+ ARROW_ENGINE_EXPORT
75
+ std::optional<int32_t> UnwrapFixedChar(const DataType&);
76
+
77
+ /// Return Varchar (max) length if t is VarChar, otherwise nullopt
78
+ ARROW_ENGINE_EXPORT
79
+ std::optional<int32_t> UnwrapVarChar(const DataType& t);
80
+
81
+ /// Return true if t is IntervalYear, otherwise false
82
+ ARROW_ENGINE_EXPORT
83
+ bool UnwrapIntervalYear(const DataType&);
84
+
85
+ /// Return true if t is IntervalDay, otherwise false
86
+ ARROW_ENGINE_EXPORT
87
+ bool UnwrapIntervalDay(const DataType&);
88
+
89
+ } // namespace engine
90
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/options.h ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <functional>
23
+ #include <string>
24
+ #include <vector>
25
+
26
+ #include "arrow/acero/exec_plan.h"
27
+ #include "arrow/acero/options.h"
28
+ #include "arrow/compute/type_fwd.h"
29
+ #include "arrow/engine/substrait/type_fwd.h"
30
+ #include "arrow/engine/substrait/visibility.h"
31
+ #include "arrow/type_fwd.h"
32
+
33
+ namespace arrow {
34
+ namespace engine {
35
+
36
+ /// How strictly to adhere to the input structure when converting between Substrait and
37
+ /// Acero representations of a plan. This allows the user to trade conversion accuracy
38
+ /// for performance and lenience.
39
+ enum class ARROW_ENGINE_EXPORT ConversionStrictness {
40
+ /// When a primitive is used at the input that doesn't have an exact match at the
41
+ /// output, reject the conversion. This effectively asserts that there is no (known)
42
+ /// information loss in the conversion, and that plans should either round-trip back and
43
+ /// forth exactly or not at all. This option is primarily intended for testing and
44
+ /// debugging.
45
+ EXACT_ROUNDTRIP,
46
+
47
+ /// When a primitive is used at the input that doesn't have an exact match at the
48
+ /// output, attempt to model it with some collection of primitives at the output. This
49
+ /// means that even if the incoming plan is completely optimal by some metric, the
50
+ /// returned plan is fairly likely to not be optimal anymore, and round-trips back and
51
+ /// forth may make the plan increasingly suboptimal. However, every primitive at the
52
+ /// output can be (manually) traced back to exactly one primitive at the input, which
53
+ /// may be useful when debugging.
54
+ PRESERVE_STRUCTURE,
55
+
56
+ /// Behaves like PRESERVE_STRUCTURE, but prefers performance over structural accuracy.
57
+ /// Basic optimizations *may* be applied, in order to attempt to not regress in terms of
58
+ /// plan performance: if the incoming plan was already aggressively optimized, the goal
59
+ /// is for the output plan to not be less performant. In practical use cases, this is
60
+ /// probably the option you want.
61
+ ///
62
+ /// Note that no guarantees are made on top of PRESERVE_STRUCTURE. Past and future
63
+ /// versions of Arrow may even ignore this option entirely and treat it exactly like
64
+ /// PRESERVE_STRUCTURE.
65
+ BEST_EFFORT,
66
+ };
67
+
68
+ using NamedTableProvider = std::function<Result<acero::Declaration>(
69
+ const std::vector<std::string>&, const Schema&)>;
70
+ static NamedTableProvider kDefaultNamedTableProvider;
71
+
72
+ using NamedTapProvider = std::function<Result<acero::Declaration>(
73
+ const std::string&, std::vector<acero::Declaration::Input>, const std::string&,
74
+ std::shared_ptr<Schema>)>;
75
+
76
+ class ARROW_ENGINE_EXPORT ExtensionDetails {
77
+ public:
78
+ virtual ~ExtensionDetails() = default;
79
+ };
80
+
81
+ class ARROW_ENGINE_EXPORT ExtensionProvider {
82
+ public:
83
+ virtual ~ExtensionProvider() = default;
84
+ virtual Result<DeclarationInfo> MakeRel(const ConversionOptions& conv_opts,
85
+ const std::vector<DeclarationInfo>& inputs,
86
+ const ExtensionDetails& ext_details,
87
+ const ExtensionSet& ext_set) = 0;
88
+ };
89
+
90
+ /// \brief Get the default extension provider
91
+ ARROW_ENGINE_EXPORT std::shared_ptr<ExtensionProvider> default_extension_provider();
92
+ /// \brief Set the default extension provider
93
+ ///
94
+ /// \param[in] provider the new provider to be set as default
95
+ ARROW_ENGINE_EXPORT void set_default_extension_provider(
96
+ const std::shared_ptr<ExtensionProvider>& provider);
97
+
98
+ ARROW_ENGINE_EXPORT NamedTapProvider default_named_tap_provider();
99
+
100
+ ARROW_ENGINE_EXPORT void set_default_named_tap_provider(NamedTapProvider provider);
101
+
102
+ /// Options that control the conversion between Substrait and Acero representations of a
103
+ /// plan.
104
+ struct ARROW_ENGINE_EXPORT ConversionOptions {
105
+ ConversionOptions()
106
+ : strictness(ConversionStrictness::BEST_EFFORT),
107
+ named_table_provider(kDefaultNamedTableProvider),
108
+ named_tap_provider(default_named_tap_provider()),
109
+ extension_provider(default_extension_provider()),
110
+ allow_arrow_extensions(false) {}
111
+
112
+ /// \brief How strictly the converter should adhere to the structure of the input.
113
+ ConversionStrictness strictness;
114
+ /// \brief A custom strategy to be used for providing named tables
115
+ ///
116
+ /// The default behavior will return an invalid status if the plan has any
117
+ /// named table relations.
118
+ NamedTableProvider named_table_provider;
119
+ /// \brief A custom strategy to be used for obtaining a tap declaration
120
+ ///
121
+ /// The default provider returns an error
122
+ NamedTapProvider named_tap_provider;
123
+ /// \brief A custom strategy to be used for providing relation infos.
124
+ ///
125
+ /// The default behavior will provide for relations known to Arrow.
126
+ std::shared_ptr<ExtensionProvider> extension_provider;
127
+ /// \brief If true then Arrow-specific types and functions will be allowed
128
+ ///
129
+ /// Set to false to create plans that are more likely to be compatible with non-Arrow
130
+ /// engines
131
+ bool allow_arrow_extensions;
132
+ };
133
+
134
+ } // namespace engine
135
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/relation.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+
22
+ #include "arrow/acero/exec_plan.h"
23
+ #include "arrow/compute/api_aggregate.h"
24
+ #include "arrow/engine/substrait/visibility.h"
25
+ #include "arrow/type_fwd.h"
26
+
27
+ namespace arrow {
28
+ namespace engine {
29
+
30
+ /// Execution information resulting from converting a Substrait relation.
31
+ struct ARROW_ENGINE_EXPORT DeclarationInfo {
32
+ /// The compute declaration produced thus far.
33
+ acero::Declaration declaration;
34
+
35
+ std::shared_ptr<Schema> output_schema;
36
+ };
37
+
38
+ /// Information resulting from converting a Substrait plan
39
+ struct ARROW_ENGINE_EXPORT PlanInfo {
40
+ /// The root declaration.
41
+ ///
42
+ /// Only plans containing a single top-level relation are supported and so this will
43
+ /// represent that relation.
44
+ ///
45
+ /// This should technically be a RelRoot but some producers use a simple Rel here and so
46
+ /// Acero currently supports that case.
47
+ DeclarationInfo root;
48
+ /// The names of the output fields
49
+ ///
50
+ /// If `root` was created from a simple Rel then this will be empty
51
+ std::vector<std::string> names;
52
+ };
53
+
54
+ /// An expression whose output has a name
55
+ struct ARROW_ENGINE_EXPORT NamedExpression {
56
+ /// An expression
57
+ compute::Expression expression;
58
+ // An optional name to assign to the output, may be the empty string
59
+ std::string name;
60
+ };
61
+
62
+ /// A collection of expressions bound to a common schema
63
+ struct ARROW_ENGINE_EXPORT BoundExpressions {
64
+ /// The expressions
65
+ std::vector<NamedExpression> named_expressions;
66
+ /// The schema that all the expressions are bound to
67
+ std::shared_ptr<Schema> schema;
68
+ };
69
+
70
+ } // namespace engine
71
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/serde.h ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <string>
25
+ #include <string_view>
26
+ #include <vector>
27
+
28
+ #include "arrow/compute/type_fwd.h"
29
+ #include "arrow/dataset/type_fwd.h"
30
+ #include "arrow/engine/substrait/options.h"
31
+ #include "arrow/engine/substrait/relation.h"
32
+ #include "arrow/engine/substrait/type_fwd.h"
33
+ #include "arrow/engine/substrait/visibility.h"
34
+ #include "arrow/result.h"
35
+ #include "arrow/status.h"
36
+ #include "arrow/type_fwd.h"
37
+ #include "arrow/util/macros.h"
38
+
39
+ namespace arrow {
40
+ namespace engine {
41
+
42
+ /// \brief Serialize an Acero Plan to a binary protobuf Substrait message
43
+ ///
44
+ /// \param[in] declaration the Acero declaration to serialize.
45
+ /// This declaration is the sink relation of the Acero plan.
46
+ /// \param[in,out] ext_set the extension mapping to use; may be updated to add
47
+ /// \param[in] conversion_options options to control how the conversion is done
48
+ ///
49
+ /// \return a buffer containing the protobuf serialization of the Acero relation
50
+ ARROW_ENGINE_EXPORT
51
+ Result<std::shared_ptr<Buffer>> SerializePlan(
52
+ const acero::Declaration& declaration, ExtensionSet* ext_set,
53
+ const ConversionOptions& conversion_options = {});
54
+
55
+ /// \brief Serialize expressions to a Substrait message
56
+ ///
57
+ /// \param[in] bound_expressions the expressions to serialize.
58
+ /// \param[in] conversion_options options to control how the conversion is done
59
+ /// \param[in,out] ext_set the extension mapping to use, optional, only needed
60
+ /// if you want to control the value of function anchors
61
+ /// to mirror a previous serialization / deserialization.
62
+ /// Will be updated if new functions are encountered
63
+ ARROW_ENGINE_EXPORT
64
+ Result<std::shared_ptr<Buffer>> SerializeExpressions(
65
+ const BoundExpressions& bound_expressions,
66
+ const ConversionOptions& conversion_options = {}, ExtensionSet* ext_set = NULLPTR);
67
+
68
+ /// Factory function type for generating the node that consumes the batches produced by
69
+ /// each toplevel Substrait relation when deserializing a Substrait Plan.
70
+ using ConsumerFactory = std::function<std::shared_ptr<acero::SinkNodeConsumer>()>;
71
+
72
+ /// \brief Deserializes a Substrait Plan message to a list of ExecNode declarations
73
+ ///
74
+ /// The output of each top-level Substrait relation will be sent to a caller supplied
75
+ /// consumer function provided by consumer_factory
76
+ ///
77
+ /// \param[in] buf a buffer containing the protobuf serialization of a Substrait Plan
78
+ /// message
79
+ /// \param[in] consumer_factory factory function for generating the node that consumes
80
+ /// the batches produced by each toplevel Substrait relation
81
+ /// \param[in] registry an extension-id-registry to use, or null for the default one.
82
+ /// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait
83
+ /// Plan is returned here.
84
+ /// \param[in] conversion_options options to control how the conversion is to be done.
85
+ /// \return a vector of ExecNode declarations, one for each toplevel relation in the
86
+ /// Substrait Plan
87
+ ARROW_ENGINE_EXPORT Result<std::vector<acero::Declaration>> DeserializePlans(
88
+ const Buffer& buf, const ConsumerFactory& consumer_factory,
89
+ const ExtensionIdRegistry* registry = NULLPTR, ExtensionSet* ext_set_out = NULLPTR,
90
+ const ConversionOptions& conversion_options = {});
91
+
92
+ /// \brief Deserializes a single-relation Substrait Plan message to an execution plan
93
+ ///
94
+ /// The output of each top-level Substrait relation will be sent to a caller supplied
95
+ /// consumer function provided by consumer_factory
96
+ ///
97
+ /// \param[in] buf a buffer containing the protobuf serialization of a Substrait Plan
98
+ /// message
99
+ /// \param[in] consumer node that consumes the batches produced by each toplevel Substrait
100
+ /// relation
101
+ /// \param[in] registry an extension-id-registry to use, or null for the default one.
102
+ /// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait
103
+ /// \param[in] conversion_options options to control how the conversion is to be done.
104
+ /// Plan is returned here.
105
+ /// \return an ExecPlan for the Substrait Plan
106
+ ARROW_ENGINE_EXPORT Result<std::shared_ptr<acero::ExecPlan>> DeserializePlan(
107
+ const Buffer& buf, const std::shared_ptr<acero::SinkNodeConsumer>& consumer,
108
+ const ExtensionIdRegistry* registry = NULLPTR, ExtensionSet* ext_set_out = NULLPTR,
109
+ const ConversionOptions& conversion_options = {});
110
+
111
+ /// Factory function type for generating the write options of a node consuming the batches
112
+ /// produced by each toplevel Substrait relation when deserializing a Substrait Plan.
113
+ using WriteOptionsFactory = std::function<std::shared_ptr<dataset::WriteNodeOptions>()>;
114
+
115
+ /// \brief Deserializes a Substrait Plan message to a list of ExecNode declarations
116
+ ///
117
+ /// The output of each top-level Substrait relation will be written to a filesystem.
118
+ /// `write_options_factory` can be used to control write behavior.
119
+ ///
120
+ /// \param[in] buf a buffer containing the protobuf serialization of a Substrait Plan
121
+ /// message
122
+ /// \param[in] write_options_factory factory function for generating the write options of
123
+ /// a node consuming the batches produced by each toplevel Substrait relation
124
+ /// \param[in] registry an extension-id-registry to use, or null for the default one.
125
+ /// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait
126
+ /// Plan is returned here.
127
+ /// \param[in] conversion_options options to control how the conversion is to be done.
128
+ /// \return a vector of ExecNode declarations, one for each toplevel relation in the
129
+ /// Substrait Plan
130
+ ARROW_ENGINE_EXPORT Result<std::vector<acero::Declaration>> DeserializePlans(
131
+ const Buffer& buf, const WriteOptionsFactory& write_options_factory,
132
+ const ExtensionIdRegistry* registry = NULLPTR, ExtensionSet* ext_set_out = NULLPTR,
133
+ const ConversionOptions& conversion_options = {});
134
+
135
+ /// \brief Deserializes a single-relation Substrait Plan message to an execution plan
136
+ ///
137
+ /// The output of the single Substrait relation will be written to a filesystem.
138
+ /// `write_options_factory` can be used to control write behavior.
139
+ ///
140
+ /// \param[in] buf a buffer containing the protobuf serialization of a Substrait Plan
141
+ /// message
142
+ /// \param[in] write_options write options of a node consuming the batches produced by
143
+ /// each toplevel Substrait relation
144
+ /// \param[in] registry an extension-id-registry to use, or null for the default one.
145
+ /// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait
146
+ /// Plan is returned here.
147
+ /// \param[in] conversion_options options to control how the conversion is to be done.
148
+ /// \return an ExecPlan for the Substrait Plan
149
+ ARROW_ENGINE_EXPORT Result<std::shared_ptr<acero::ExecPlan>> DeserializePlan(
150
+ const Buffer& buf, const std::shared_ptr<dataset::WriteNodeOptions>& write_options,
151
+ const ExtensionIdRegistry* registry = NULLPTR, ExtensionSet* ext_set_out = NULLPTR,
152
+ const ConversionOptions& conversion_options = {});
153
+
154
+ /// \brief Deserializes a Substrait Plan message to a Declaration
155
+ ///
156
+ /// The plan will not contain any sink nodes and will be suitable for use in any
157
+ /// of the arrow::compute::DeclarationToXyz methods.
158
+ ///
159
+ /// \param[in] buf a buffer containing the protobuf serialization of a Substrait Plan
160
+ /// message
161
+ /// \param[in] registry an extension-id-registry to use, or null for the default one.
162
+ /// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait
163
+ /// Plan is returned here.
164
+ /// \param[in] conversion_options options to control how the conversion is to be done.
165
+ /// \return A declaration representing the Substrait plan
166
+ ARROW_ENGINE_EXPORT Result<PlanInfo> DeserializePlan(
167
+ const Buffer& buf, const ExtensionIdRegistry* registry = NULLPTR,
168
+ ExtensionSet* ext_set_out = NULLPTR,
169
+ const ConversionOptions& conversion_options = {});
170
+
171
+ /// \brief Deserialize a Substrait ExtendedExpression message to the corresponding Arrow
172
+ /// type
173
+ ///
174
+ /// \param[in] buf a buffer containing the protobuf serialization of a collection of bound
175
+ /// expressions
176
+ /// \param[in] registry an extension-id-registry to use, or null for the default one
177
+ /// \param[in] conversion_options options to control how the conversion is done
178
+ /// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait
179
+ /// message is returned here.
180
+ /// \return A collection of expressions and a common input schema they are bound to
181
+ ARROW_ENGINE_EXPORT Result<BoundExpressions> DeserializeExpressions(
182
+ const Buffer& buf, const ExtensionIdRegistry* registry = NULLPTR,
183
+ const ConversionOptions& conversion_options = {},
184
+ ExtensionSet* ext_set_out = NULLPTR);
185
+
186
+ /// \brief Deserializes a Substrait Type message to the corresponding Arrow type
187
+ ///
188
+ /// \param[in] buf a buffer containing the protobuf serialization of a Substrait Type
189
+ /// message
190
+ /// \param[in] ext_set the extension mapping to use, normally provided by the
191
+ /// surrounding Plan message
192
+ /// \param[in] conversion_options options to control how the conversion is to be done.
193
+ /// \return the corresponding Arrow data type
194
+ ARROW_ENGINE_EXPORT
195
+ Result<std::shared_ptr<DataType>> DeserializeType(
196
+ const Buffer& buf, const ExtensionSet& ext_set,
197
+ const ConversionOptions& conversion_options = {});
198
+
199
+ /// \brief Serializes an Arrow type to a Substrait Type message
200
+ ///
201
+ /// \param[in] type the Arrow data type to serialize
202
+ /// \param[in,out] ext_set the extension mapping to use; may be updated to add a
203
+ /// mapping for the given type
204
+ /// \param[in] conversion_options options to control how the conversion is to be done.
205
+ /// \return a buffer containing the protobuf serialization of the corresponding Substrait
206
+ /// Type message
207
+ ARROW_ENGINE_EXPORT
208
+ Result<std::shared_ptr<Buffer>> SerializeType(
209
+ const DataType& type, ExtensionSet* ext_set,
210
+ const ConversionOptions& conversion_options = {});
211
+
212
+ /// \brief Deserializes a Substrait NamedStruct message to an Arrow schema
213
+ ///
214
+ /// \param[in] buf a buffer containing the protobuf serialization of a Substrait
215
+ /// NamedStruct message
216
+ /// \param[in] ext_set the extension mapping to use, normally provided by the
217
+ /// surrounding Plan message
218
+ /// \param[in] conversion_options options to control how the conversion is to be done.
219
+ /// \return the corresponding Arrow schema
220
+ ARROW_ENGINE_EXPORT
221
+ Result<std::shared_ptr<Schema>> DeserializeSchema(
222
+ const Buffer& buf, const ExtensionSet& ext_set,
223
+ const ConversionOptions& conversion_options = {});
224
+
225
+ /// \brief Serializes an Arrow schema to a Substrait NamedStruct message
226
+ ///
227
+ /// \param[in] schema the Arrow schema to serialize
228
+ /// \param[in,out] ext_set the extension mapping to use; may be updated to add
229
+ /// mappings for the types used in the schema
230
+ /// \param[in] conversion_options options to control how the conversion is to be done.
231
+ /// \return a buffer containing the protobuf serialization of the corresponding Substrait
232
+ /// NamedStruct message
233
+ ARROW_ENGINE_EXPORT
234
+ Result<std::shared_ptr<Buffer>> SerializeSchema(
235
+ const Schema& schema, ExtensionSet* ext_set,
236
+ const ConversionOptions& conversion_options = {});
237
+
238
+ /// \brief Deserializes a Substrait Expression message to a compute expression
239
+ ///
240
+ /// \param[in] buf a buffer containing the protobuf serialization of a Substrait
241
+ /// Expression message
242
+ /// \param[in] ext_set the extension mapping to use, normally provided by the
243
+ /// surrounding Plan message
244
+ /// \param[in] conversion_options options to control how the conversion is to be done.
245
+ /// \return the corresponding Arrow compute expression
246
+ ARROW_ENGINE_EXPORT
247
+ Result<compute::Expression> DeserializeExpression(
248
+ const Buffer& buf, const ExtensionSet& ext_set,
249
+ const ConversionOptions& conversion_options = {});
250
+
251
+ /// \brief Serializes an Arrow compute expression to a Substrait Expression message
252
+ ///
253
+ /// \param[in] expr the Arrow compute expression to serialize
254
+ /// \param[in,out] ext_set the extension mapping to use; may be updated to add
255
+ /// mappings for the types used in the expression
256
+ /// \param[in] conversion_options options to control how the conversion is to be done.
257
+ /// \return a buffer containing the protobuf serialization of the corresponding Substrait
258
+ /// Expression message
259
+ ARROW_ENGINE_EXPORT
260
+ Result<std::shared_ptr<Buffer>> SerializeExpression(
261
+ const compute::Expression& expr, ExtensionSet* ext_set,
262
+ const ConversionOptions& conversion_options = {});
263
+
264
+ /// \brief Serialize an Acero Declaration to a binary protobuf Substrait message
265
+ ///
266
+ /// \param[in] declaration the Acero declaration to serialize
267
+ /// \param[in,out] ext_set the extension mapping to use; may be updated to add
268
+ /// \param[in] conversion_options options to control how the conversion is done
269
+ ///
270
+ /// \return a buffer containing the protobuf serialization of the Acero relation
271
+ ARROW_ENGINE_EXPORT Result<std::shared_ptr<Buffer>> SerializeRelation(
272
+ const acero::Declaration& declaration, ExtensionSet* ext_set,
273
+ const ConversionOptions& conversion_options = {});
274
+
275
+ /// \brief Deserializes a Substrait Rel (relation) message to an ExecNode declaration
276
+ ///
277
+ /// \param[in] buf a buffer containing the protobuf serialization of a Substrait
278
+ /// Rel message
279
+ /// \param[in] ext_set the extension mapping to use, normally provided by the
280
+ /// surrounding Plan message
281
+ /// \param[in] conversion_options options to control how the conversion is to be done.
282
+ /// \return the corresponding ExecNode declaration
283
+ ARROW_ENGINE_EXPORT Result<acero::Declaration> DeserializeRelation(
284
+ const Buffer& buf, const ExtensionSet& ext_set,
285
+ const ConversionOptions& conversion_options = {});
286
+
287
+ namespace internal {
288
+
289
+ /// \brief Checks whether two protobuf serializations of a particular Substrait message
290
+ /// type are equivalent
291
+ ///
292
+ /// Note that a binary comparison of the two buffers is insufficient. One reason for this
293
+ /// is that the fields of a message can be specified in any order in the serialization.
294
+ ///
295
+ /// \param[in] message_name the name of the Substrait message type to check
296
+ /// \param[in] l_buf buffer containing the first protobuf serialization to compare
297
+ /// \param[in] r_buf buffer containing the second protobuf serialization to compare
298
+ /// \return success if equivalent, failure if not
299
+ ARROW_ENGINE_EXPORT
300
+ Status CheckMessagesEquivalent(std::string_view message_name, const Buffer& l_buf,
301
+ const Buffer& r_buf);
302
+
303
+ /// \brief Utility function to convert a JSON serialization of a Substrait message to
304
+ /// its binary serialization
305
+ ///
306
+ /// \param[in] type_name the name of the Substrait message type to convert
307
+ /// \param[in] json the JSON string to convert
308
+ /// \param[in] ignore_unknown_fields if true then unknown fields will be ignored and
309
+ /// will not cause an error
310
+ ///
311
+ /// This should generally be true to allow consumption of plans from newer
312
+ /// producers but setting to false can be useful if you are testing
313
+ /// conformance to a specific Substrait version
314
+ /// \return a buffer filled with the binary protobuf serialization of message
315
+ ARROW_ENGINE_EXPORT
316
+ Result<std::shared_ptr<Buffer>> SubstraitFromJSON(std::string_view type_name,
317
+ std::string_view json,
318
+ bool ignore_unknown_fields = true);
319
+
320
+ /// \brief Utility function to convert a binary protobuf serialization of a Substrait
321
+ /// message to JSON
322
+ ///
323
+ /// \param[in] type_name the name of the Substrait message type to convert
324
+ /// \param[in] buf the buffer containing the binary protobuf serialization of the message
325
+ /// \return a JSON string representing the message
326
+ ARROW_ENGINE_EXPORT
327
+ Result<std::string> SubstraitToJSON(std::string_view type_name, const Buffer& buf);
328
+
329
+ } // namespace internal
330
+ } // namespace engine
331
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/test_plan_builder.h ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // These utilities are for internal / unit test use only.
19
+ // They allow for the construction of simple Substrait plans
20
+ // programmatically without first requiring the construction
21
+ // of an ExecPlan
22
+
23
+ // These utilities have to be here, and not in a test_util.cc
24
+ // file (or in a unit test) because only one .so is allowed
25
+ // to include each .pb.h file or else protobuf will encounter
26
+ // global namespace conflicts.
27
+
28
+ #include <memory>
29
+ #include <string>
30
+ #include <unordered_map>
31
+ #include <vector>
32
+
33
+ #include "arrow/engine/substrait/visibility.h"
34
+ #include "arrow/result.h"
35
+ #include "arrow/type_fwd.h"
36
+
37
+ namespace arrow {
38
+ namespace engine {
39
+
40
+ struct Id;
41
+
42
+ namespace internal {
43
+
44
+ /// \brief Create a scan->project->sink plan for tests
45
+ ///
46
+ /// The plan will project one additional column using the function
47
+ /// defined by `function_id`, `arguments`, and data_types. `arguments`
48
+ /// and `data_types` should have the same length but only one of each
49
+ /// should be defined at each index.
50
+ ///
51
+ /// If `data_types` is defined at an index then the plan will create a
52
+ /// direct reference (starting at index 0 and increasing by 1 for each
53
+ /// argument of this type).
54
+ ///
55
+ /// If `arguments` is defined at an index then the plan will create an
56
+ /// enum argument with that value.
57
+ ARROW_ENGINE_EXPORT Result<std::shared_ptr<Buffer>> CreateScanProjectSubstrait(
58
+ Id function_id, const std::shared_ptr<Table>& input_table,
59
+ const std::vector<std::string>& arguments,
60
+ const std::unordered_map<std::string, std::vector<std::string>>& options,
61
+ const std::vector<std::shared_ptr<DataType>>& data_types,
62
+ const DataType& output_type);
63
+
64
+ /// \brief Create a scan->aggregate->sink plan for tests
65
+ ///
66
+ /// The plan will create an aggregate with one grouping set (defined by
67
+ /// key_idxs) and one measure. The measure will be a function
68
+ /// defined by `function_id` and direct references to `arg_idxs`.
69
+ ARROW_ENGINE_EXPORT Result<std::shared_ptr<Buffer>> CreateScanAggSubstrait(
70
+ Id function_id, const std::shared_ptr<Table>& input_table,
71
+ const std::vector<int>& key_idxs, const std::vector<int>& arg_idxs,
72
+ const DataType& output_type);
73
+
74
+ } // namespace internal
75
+ } // namespace engine
76
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/test_util.h ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/testing/gtest_util.h"
21
+ #include "arrow/util/vector.h"
22
+
23
+ #include <functional>
24
+ #include <random>
25
+ #include <string>
26
+ #include <string_view>
27
+ #include <vector>
28
+
29
+ #include "arrow/acero/exec_plan.h"
30
+ #include "arrow/compute/exec.h"
31
+ #include "arrow/compute/kernel.h"
32
+ #include "arrow/testing/visibility.h"
33
+ #include "arrow/util/async_generator.h"
34
+ #include "arrow/util/pcg_random.h"
35
+
36
+ namespace arrow {
37
+ namespace engine {
38
+
39
+ Result<std::shared_ptr<Table>> SortTableOnAllFields(const std::shared_ptr<Table>& tab);
40
+
41
+ void AssertTablesEqualIgnoringOrder(const std::shared_ptr<Table>& exp,
42
+ const std::shared_ptr<Table>& act);
43
+
44
+ } // namespace engine
45
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/type_fwd.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ namespace arrow {
23
+ namespace engine {
24
+
25
+ class ExtensionIdRegistry;
26
+ class ExtensionSet;
27
+
28
+ struct ConversionOptions;
29
+ struct DeclarationInfo;
30
+
31
+ } // namespace engine
32
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/util.h ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <vector>
24
+
25
+ #include "arrow/compute/type_fwd.h"
26
+ #include "arrow/engine/substrait/options.h"
27
+ #include "arrow/engine/substrait/type_fwd.h"
28
+ #include "arrow/engine/substrait/visibility.h"
29
+ #include "arrow/result.h"
30
+ #include "arrow/type_fwd.h"
31
+ #include "arrow/util/iterator.h"
32
+ #include "arrow/util/macros.h"
33
+
34
+ namespace arrow {
35
+
36
+ namespace engine {
37
+
38
+ using PythonTableProvider =
39
+ std::function<Result<std::shared_ptr<Table>>(const std::vector<std::string>&)>;
40
+
41
+ /// \brief Utility method to run a Substrait plan
42
+ /// \param substrait_buffer The plan to run, must be in binary protobuf format
43
+ /// \param registry A registry of extension functions to make available to the plan
44
+ /// If null then the default registry will be used.
45
+ /// \param memory_pool The memory pool the plan should use to make allocations.
46
+ /// \param func_registry A registry of functions used for execution expressions.
47
+ /// `registry` maps from Substrait function IDs to "names". These
48
+ /// names will be provided to `func_registry` to get the actual
49
+ /// kernel.
50
+ /// \param conversion_options Options to control plan deserialization
51
+ /// \param use_threads If True then the CPU thread pool will be used for CPU work. If
52
+ /// False then all work will be done on the calling thread.
53
+ /// \return A record batch reader that will read out the results
54
+ ARROW_ENGINE_EXPORT Result<std::shared_ptr<RecordBatchReader>> ExecuteSerializedPlan(
55
+ const Buffer& substrait_buffer, const ExtensionIdRegistry* registry = NULLPTR,
56
+ compute::FunctionRegistry* func_registry = NULLPTR,
57
+ const ConversionOptions& conversion_options = {}, bool use_threads = true,
58
+ MemoryPool* memory_pool = default_memory_pool());
59
+
60
+ /// \brief Get a Serialized Plan from a Substrait JSON plan.
61
+ /// This is a helper method for Python tests.
62
+ ARROW_ENGINE_EXPORT Result<std::shared_ptr<Buffer>> SerializeJsonPlan(
63
+ const std::string& substrait_json);
64
+
65
+ /// \brief Make a nested registry with the default registry as parent.
66
+ /// See arrow::engine::nested_extension_id_registry for details.
67
+ ARROW_ENGINE_EXPORT std::shared_ptr<ExtensionIdRegistry> MakeExtensionIdRegistry();
68
+
69
+ ARROW_ENGINE_EXPORT const std::string& default_extension_types_uri();
70
+
71
+ // TODO(ARROW-18145) Populate these from cmake files
72
+ constexpr uint32_t kSubstraitMajorVersion = 0;
73
+ constexpr uint32_t kSubstraitMinorVersion = 44;
74
+ constexpr uint32_t kSubstraitPatchVersion = 0;
75
+
76
+ constexpr uint32_t kSubstraitMinimumMajorVersion = 0;
77
+ constexpr uint32_t kSubstraitMinimumMinorVersion = 20;
78
+
79
+ Status CheckVersion(uint32_t major_version, uint32_t minor_version);
80
+
81
+ } // namespace engine
82
+
83
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/visibility.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // TODO(westonpace): Once we have a proper engine module this file
19
+ // should be renamed arrow/engine/visibility.h
20
+ // This API is EXPERIMENTAL.
21
+
22
+ #pragma once
23
+
24
+ #if defined(_WIN32) || defined(__CYGWIN__)
25
+ #if defined(_MSC_VER)
26
+ #pragma warning(push)
27
+ #pragma warning(disable : 4251)
28
+ #else
29
+ #pragma GCC diagnostic ignored "-Wattributes"
30
+ #endif
31
+
32
+ #ifdef ARROW_ENGINE_STATIC
33
+ #define ARROW_ENGINE_EXPORT
34
+ #elif defined(ARROW_ENGINE_EXPORTING)
35
+ #define ARROW_ENGINE_EXPORT __declspec(dllexport)
36
+ #else
37
+ #define ARROW_ENGINE_EXPORT __declspec(dllimport)
38
+ #endif
39
+
40
+ #define ARROW_ENGINE_NO_EXPORT
41
+ #else // Not Windows
42
+ #ifndef ARROW_ENGINE_EXPORT
43
+ #define ARROW_ENGINE_EXPORT __attribute__((visibility("default")))
44
+ #endif
45
+ #ifndef ARROW_ENGINE_NO_EXPORT
46
+ #define ARROW_ENGINE_NO_EXPORT __attribute__((visibility("hidden")))
47
+ #endif
48
+ #endif // Non-Windows
49
+
50
+ #if defined(_MSC_VER)
51
+ #pragma warning(pop)
52
+ #endif
venv/lib/python3.10/site-packages/pyarrow/include/arrow/extension_type.h ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ /// User-defined extension types.
19
+ /// \since 0.13.0
20
+
21
+ #pragma once
22
+
23
+ #include <memory>
24
+ #include <string>
25
+
26
+ #include "arrow/array/array_base.h"
27
+ #include "arrow/array/data.h"
28
+ #include "arrow/result.h"
29
+ #include "arrow/status.h"
30
+ #include "arrow/type.h"
31
+ #include "arrow/type_fwd.h"
32
+ #include "arrow/util/checked_cast.h"
33
+ #include "arrow/util/macros.h"
34
+ #include "arrow/util/visibility.h"
35
+
36
+ namespace arrow {
37
+
38
+ /// \brief The base class for custom / user-defined types.
39
+ class ARROW_EXPORT ExtensionType : public DataType {
40
+ public:
41
+ static constexpr Type::type type_id = Type::EXTENSION;
42
+
43
+ static constexpr const char* type_name() { return "extension"; }
44
+
45
+ /// \brief The type of array used to represent this extension type's data
46
+ const std::shared_ptr<DataType>& storage_type() const { return storage_type_; }
47
+
48
+ /// \brief Return the type category of the storage type
49
+ Type::type storage_id() const override { return storage_type_->id(); }
50
+
51
+ DataTypeLayout layout() const override;
52
+
53
+ std::string ToString(bool show_metadata = false) const override;
54
+
55
+ std::string name() const override { return "extension"; }
56
+
57
+ /// \brief Unique name of extension type used to identify type for
58
+ /// serialization
59
+ /// \return the string name of the extension
60
+ virtual std::string extension_name() const = 0;
61
+
62
+ /// \brief Determine if two instances of the same extension types are
63
+ /// equal. Invoked from ExtensionType::Equals
64
+ /// \param[in] other the type to compare this type with
65
+ /// \return bool true if type instances are equal
66
+ virtual bool ExtensionEquals(const ExtensionType& other) const = 0;
67
+
68
+ /// \brief Wrap built-in Array type in a user-defined ExtensionArray instance
69
+ /// \param[in] data the physical storage for the extension type
70
+ virtual std::shared_ptr<Array> MakeArray(std::shared_ptr<ArrayData> data) const = 0;
71
+
72
+ /// \brief Create an instance of the ExtensionType given the actual storage
73
+ /// type and the serialized representation
74
+ /// \param[in] storage_type the physical storage type of the extension
75
+ /// \param[in] serialized_data the serialized representation produced by
76
+ /// Serialize
77
+ virtual Result<std::shared_ptr<DataType>> Deserialize(
78
+ std::shared_ptr<DataType> storage_type,
79
+ const std::string& serialized_data) const = 0;
80
+
81
+ /// \brief Create a serialized representation of the extension type's
82
+ /// metadata. The storage type will be handled automatically in IPC code
83
+ /// paths
84
+ /// \return the serialized representation
85
+ virtual std::string Serialize() const = 0;
86
+
87
+ /// \brief Wrap the given storage array as an extension array
88
+ static std::shared_ptr<Array> WrapArray(const std::shared_ptr<DataType>& ext_type,
89
+ const std::shared_ptr<Array>& storage);
90
+
91
+ /// \brief Wrap the given chunked storage array as a chunked extension array
92
+ static std::shared_ptr<ChunkedArray> WrapArray(
93
+ const std::shared_ptr<DataType>& ext_type,
94
+ const std::shared_ptr<ChunkedArray>& storage);
95
+
96
+ protected:
97
+ explicit ExtensionType(std::shared_ptr<DataType> storage_type)
98
+ : DataType(Type::EXTENSION), storage_type_(storage_type) {}
99
+
100
+ std::shared_ptr<DataType> storage_type_;
101
+ };
102
+
103
+ /// \brief Base array class for user-defined extension types
104
+ class ARROW_EXPORT ExtensionArray : public Array {
105
+ public:
106
+ using TypeClass = ExtensionType;
107
+ /// \brief Construct an ExtensionArray from an ArrayData.
108
+ ///
109
+ /// The ArrayData must have the right ExtensionType.
110
+ explicit ExtensionArray(const std::shared_ptr<ArrayData>& data);
111
+
112
+ /// \brief Construct an ExtensionArray from a type and the underlying storage.
113
+ ExtensionArray(const std::shared_ptr<DataType>& type,
114
+ const std::shared_ptr<Array>& storage);
115
+
116
+ const ExtensionType* extension_type() const {
117
+ return internal::checked_cast<const ExtensionType*>(data_->type.get());
118
+ }
119
+
120
+ /// \brief The physical storage for the extension array
121
+ const std::shared_ptr<Array>& storage() const { return storage_; }
122
+
123
+ protected:
124
+ void SetData(const std::shared_ptr<ArrayData>& data);
125
+ std::shared_ptr<Array> storage_;
126
+ };
127
+
128
+ class ARROW_EXPORT ExtensionTypeRegistry {
129
+ public:
130
+ /// \brief Provide access to the global registry to allow code to control for
131
+ /// race conditions in registry teardown when some types need to be
132
+ /// unregistered and destroyed first
133
+ static std::shared_ptr<ExtensionTypeRegistry> GetGlobalRegistry();
134
+
135
+ virtual ~ExtensionTypeRegistry() = default;
136
+
137
+ virtual Status RegisterType(std::shared_ptr<ExtensionType> type) = 0;
138
+ virtual Status UnregisterType(const std::string& type_name) = 0;
139
+ virtual std::shared_ptr<ExtensionType> GetType(const std::string& type_name) = 0;
140
+ };
141
+
142
+ /// \brief Register an extension type globally. The name returned by the type's
143
+ /// extension_name() method should be unique. This method is thread-safe
144
+ /// \param[in] type an instance of the extension type
145
+ /// \return Status
146
+ ARROW_EXPORT
147
+ Status RegisterExtensionType(std::shared_ptr<ExtensionType> type);
148
+
149
+ /// \brief Delete an extension type from the global registry. This method is
150
+ /// thread-safe
151
+ /// \param[in] type_name the unique name of a registered extension type
152
+ /// \return Status error if the type name is unknown
153
+ ARROW_EXPORT
154
+ Status UnregisterExtensionType(const std::string& type_name);
155
+
156
+ /// \brief Retrieve an extension type from the global registry. Returns nullptr
157
+ /// if not found. This method is thread-safe
158
+ /// \return the globally-registered extension type
159
+ ARROW_EXPORT
160
+ std::shared_ptr<ExtensionType> GetExtensionType(const std::string& type_name);
161
+
162
+ ARROW_EXPORT extern const char kExtensionTypeKeyName[];
163
+ ARROW_EXPORT extern const char kExtensionMetadataKeyName[];
164
+
165
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/api.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/util/config.h" // IWYU pragma: export
21
+
22
+ #include "arrow/filesystem/filesystem.h" // IWYU pragma: export
23
+ #ifdef ARROW_AZURE
24
+ #include "arrow/filesystem/azurefs.h" // IWYU pragma: export
25
+ #endif
26
+ #ifdef ARROW_GCS
27
+ #include "arrow/filesystem/gcsfs.h" // IWYU pragma: export
28
+ #endif
29
+ #include "arrow/filesystem/hdfs.h" // IWYU pragma: export
30
+ #include "arrow/filesystem/localfs.h" // IWYU pragma: export
31
+ #include "arrow/filesystem/mockfs.h" // IWYU pragma: export
32
+ #ifdef ARROW_S3
33
+ #include "arrow/filesystem/s3fs.h" // IWYU pragma: export
34
+ #endif
venv/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/azurefs.h ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <string>
22
+ #include <vector>
23
+
24
+ #include "arrow/filesystem/filesystem.h"
25
+ #include "arrow/util/macros.h"
26
+ #include "arrow/util/uri.h"
27
+
28
+ namespace Azure::Core::Credentials {
29
+ class TokenCredential;
30
+ }
31
+
32
+ namespace Azure::Storage {
33
+ class StorageSharedKeyCredential;
34
+ }
35
+
36
+ namespace Azure::Storage::Blobs {
37
+ class BlobServiceClient;
38
+ }
39
+
40
+ namespace Azure::Storage::Files::DataLake {
41
+ class DataLakeFileSystemClient;
42
+ class DataLakeServiceClient;
43
+ } // namespace Azure::Storage::Files::DataLake
44
+
45
+ namespace arrow::fs {
46
+
47
+ class TestAzureFileSystem;
48
+ class TestAzureOptions;
49
+
50
+ /// Options for the AzureFileSystem implementation.
51
+ ///
52
+ /// By default, authentication is handled by the Azure SDK's credential chain
53
+ /// which may read from multiple environment variables, such as:
54
+ /// - `AZURE_TENANT_ID`
55
+ /// - `AZURE_CLIENT_ID`
56
+ /// - `AZURE_CLIENT_SECRET`
57
+ /// - `AZURE_AUTHORITY_HOST`
58
+ /// - `AZURE_CLIENT_CERTIFICATE_PATH`
59
+ /// - `AZURE_FEDERATED_TOKEN_FILE`
60
+ ///
61
+ /// Functions are provided for explicit configuration of credentials if that is preferred.
62
+ struct ARROW_EXPORT AzureOptions {
63
+ friend class TestAzureOptions;
64
+
65
+ /// \brief The name of the Azure Storage Account being accessed.
66
+ ///
67
+ /// All service URLs will be constructed using this storage account name.
68
+ /// `ConfigureAccountKeyCredential` assumes the user wants to authenticate
69
+ /// this account.
70
+ std::string account_name;
71
+
72
+ /// \brief hostname[:port] of the Azure Blob Storage Service.
73
+ ///
74
+ /// If the hostname is a relative domain name (one that starts with a '.'), then storage
75
+ /// account URLs will be constructed by prepending the account name to the hostname.
76
+ /// If the hostname is a fully qualified domain name, then the hostname will be used
77
+ /// as-is and the account name will follow the hostname in the URL path.
78
+ ///
79
+ /// Default: ".blob.core.windows.net"
80
+ std::string blob_storage_authority = ".blob.core.windows.net";
81
+
82
+ /// \brief hostname[:port] of the Azure Data Lake Storage Gen 2 Service.
83
+ ///
84
+ /// If the hostname is a relative domain name (one that starts with a '.'), then storage
85
+ /// account URLs will be constructed by prepending the account name to the hostname.
86
+ /// If the hostname is a fully qualified domain name, then the hostname will be used
87
+ /// as-is and the account name will follow the hostname in the URL path.
88
+ ///
89
+ /// Default: ".dfs.core.windows.net"
90
+ std::string dfs_storage_authority = ".dfs.core.windows.net";
91
+
92
+ /// \brief Azure Blob Storage connection transport.
93
+ ///
94
+ /// Default: "https"
95
+ std::string blob_storage_scheme = "https";
96
+
97
+ /// \brief Azure Data Lake Storage Gen 2 connection transport.
98
+ ///
99
+ /// Default: "https"
100
+ std::string dfs_storage_scheme = "https";
101
+
102
+ // TODO(GH-38598): Add support for more auth methods.
103
+ // std::string connection_string;
104
+ // std::string sas_token;
105
+
106
+ /// \brief Default metadata for OpenOutputStream.
107
+ ///
108
+ /// This will be ignored if non-empty metadata is passed to OpenOutputStream.
109
+ std::shared_ptr<const KeyValueMetadata> default_metadata;
110
+
111
+ private:
112
+ enum class CredentialKind {
113
+ kDefault,
114
+ kAnonymous,
115
+ kStorageSharedKey,
116
+ kClientSecret,
117
+ kManagedIdentity,
118
+ kWorkloadIdentity,
119
+ } credential_kind_ = CredentialKind::kDefault;
120
+
121
+ std::shared_ptr<Azure::Storage::StorageSharedKeyCredential>
122
+ storage_shared_key_credential_;
123
+ mutable std::shared_ptr<Azure::Core::Credentials::TokenCredential> token_credential_;
124
+
125
+ public:
126
+ AzureOptions();
127
+ ~AzureOptions();
128
+
129
+ private:
130
+ void ExtractFromUriSchemeAndHierPart(const Uri& uri, std::string* out_path);
131
+ Status ExtractFromUriQuery(const Uri& uri);
132
+
133
+ public:
134
+ /// \brief Construct a new AzureOptions from an URI.
135
+ ///
136
+ /// Supported formats:
137
+ ///
138
+ /// 1. abfs[s]://[:\<password\>@]\<account\>.blob.core.windows.net
139
+ /// [/\<container\>[/\<path\>]]
140
+ /// 2. abfs[s]://\<container\>[:\<password\>]@\<account\>.dfs.core.windows.net
141
+ /// [/path]
142
+ /// 3. abfs[s]://[\<account[:\<password\>]@]\<host[.domain]\>[\<:port\>]
143
+ /// [/\<container\>[/path]]
144
+ /// 4. abfs[s]://[\<account[:\<password\>]@]\<container\>[/path]
145
+ ///
146
+ /// 1. and 2. are compatible with the Azure Data Lake Storage Gen2 URIs:
147
+ /// https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction-abfs-uri
148
+ ///
149
+ /// 3. is for Azure Blob Storage compatible service including Azurite.
150
+ ///
151
+ /// 4. is a shorter version of 1. and 2.
152
+ ///
153
+ /// Note that there is no difference between abfs and abfss. HTTPS is
154
+ /// used with abfs by default. You can force to use HTTP by specifying
155
+ /// "enable_tls=false" query.
156
+ ///
157
+ /// Supported query parameters:
158
+ ///
159
+ /// * blob_storage_authority: Set AzureOptions::blob_storage_authority
160
+ /// * dfs_storage_authority: Set AzureOptions::dfs_storage_authority
161
+ /// * enable_tls: If it's "false" or "0", HTTP not HTTPS is used.
162
+ /// * credential_kind: One of "default", "anonymous",
163
+ /// "workload_identity". If "default" is specified, it's just
164
+ /// ignored. If "anonymous" is specified,
165
+ /// AzureOptions::ConfigureAnonymousCredential() is called. If
166
+ /// "workload_identity" is specified,
167
+ /// AzureOptions::ConfigureWorkloadIdentityCredential() is called.
168
+ /// * tenant_id: You must specify "client_id" and "client_secret"
169
+ /// too. AzureOptions::ConfigureClientSecretCredential() is called.
170
+ /// * client_id: If you don't specify "tenant_id" and
171
+ /// "client_secret",
172
+ /// AzureOptions::ConfigureManagedIdentityCredential() is
173
+ /// called. If you specify "tenant_id" and "client_secret" too,
174
+ /// AzureOptions::ConfigureClientSecretCredential() is called.
175
+ /// * client_secret: You must specify "tenant_id" and "client_id"
176
+ /// too. AzureOptions::ConfigureClientSecretCredential() is called.
177
+ static Result<AzureOptions> FromUri(const Uri& uri, std::string* out_path);
178
+ static Result<AzureOptions> FromUri(const std::string& uri, std::string* out_path);
179
+
180
+ Status ConfigureDefaultCredential();
181
+ Status ConfigureAnonymousCredential();
182
+ Status ConfigureAccountKeyCredential(const std::string& account_key);
183
+ Status ConfigureClientSecretCredential(const std::string& tenant_id,
184
+ const std::string& client_id,
185
+ const std::string& client_secret);
186
+ Status ConfigureManagedIdentityCredential(const std::string& client_id = std::string());
187
+ Status ConfigureWorkloadIdentityCredential();
188
+
189
+ bool Equals(const AzureOptions& other) const;
190
+
191
+ std::string AccountBlobUrl(const std::string& account_name) const;
192
+ std::string AccountDfsUrl(const std::string& account_name) const;
193
+
194
+ Result<std::unique_ptr<Azure::Storage::Blobs::BlobServiceClient>>
195
+ MakeBlobServiceClient() const;
196
+
197
+ Result<std::unique_ptr<Azure::Storage::Files::DataLake::DataLakeServiceClient>>
198
+ MakeDataLakeServiceClient() const;
199
+ };
200
+
201
+ /// \brief FileSystem implementation backed by Azure Blob Storage (ABS) [1] and
202
+ /// Azure Data Lake Storage Gen2 (ADLS Gen2) [2].
203
+ ///
204
+ /// ADLS Gen2 isn't a dedicated service or account type. It's a set of capabilities that
205
+ /// support high throughput analytic workloads, built on Azure Blob Storage. All the data
206
+ /// ingested via the ADLS Gen2 APIs is persisted as blobs in the storage account.
207
+ /// ADLS Gen2 provides filesystem semantics, file-level security, and Hadoop
208
+ /// compatibility. ADLS Gen1 exists as a separate object that will retired on 2024-02-29
209
+ /// and new ADLS accounts use Gen2 instead.
210
+ ///
211
+ /// ADLS Gen2 and Blob APIs can operate on the same data, but there are
212
+ /// some limitations [3]. The ones that are relevant to this
213
+ /// implementation are listed here:
214
+ ///
215
+ /// - You can't use Blob APIs, and ADLS APIs to write to the same instance of a file. If
216
+ /// you write to a file by using ADLS APIs then that file's blocks won't be visible
217
+ /// to calls to the GetBlockList Blob API. The only exception is when you're
218
+ /// overwriting.
219
+ /// - When you use the ListBlobs operation without specifying a delimiter, the results
220
+ /// include both directories and blobs. If you choose to use a delimiter, use only a
221
+ /// forward slash (/) -- the only supported delimiter.
222
+ /// - If you use the DeleteBlob API to delete a directory, that directory is deleted only
223
+ /// if it's empty. This means that you can't use the Blob API delete directories
224
+ /// recursively.
225
+ ///
226
+ /// [1]: https://azure.microsoft.com/en-us/products/storage/blobs
227
+ /// [2]: https://azure.microsoft.com/en-us/products/storage/data-lake-storage
228
+ /// [3]:
229
+ /// https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-known-issues
230
+ class ARROW_EXPORT AzureFileSystem : public FileSystem {
231
+ private:
232
+ class Impl;
233
+ std::unique_ptr<Impl> impl_;
234
+
235
+ explicit AzureFileSystem(std::unique_ptr<Impl>&& impl);
236
+
237
+ friend class TestAzureFileSystem;
238
+ void ForceCachedHierarchicalNamespaceSupport(int hns_support);
239
+
240
+ public:
241
+ ~AzureFileSystem() override = default;
242
+
243
+ static Result<std::shared_ptr<AzureFileSystem>> Make(
244
+ const AzureOptions& options, const io::IOContext& = io::default_io_context());
245
+
246
+ std::string type_name() const override { return "abfs"; }
247
+
248
+ /// Return the original Azure options when constructing the filesystem
249
+ const AzureOptions& options() const;
250
+
251
+ bool Equals(const FileSystem& other) const override;
252
+
253
+ /// \cond FALSE
254
+ using FileSystem::CreateDir;
255
+ using FileSystem::DeleteDirContents;
256
+ using FileSystem::GetFileInfo;
257
+ using FileSystem::OpenAppendStream;
258
+ using FileSystem::OpenOutputStream;
259
+ /// \endcond
260
+
261
+ Result<FileInfo> GetFileInfo(const std::string& path) override;
262
+
263
+ Result<FileInfoVector> GetFileInfo(const FileSelector& select) override;
264
+
265
+ Status CreateDir(const std::string& path, bool recursive) override;
266
+
267
+ /// \brief Delete a directory and its contents recursively.
268
+ ///
269
+ /// Atomicity is guaranteed only on Hierarchical Namespace Storage accounts.
270
+ Status DeleteDir(const std::string& path) override;
271
+
272
+ /// \brief Non-atomically deletes the contents of a directory.
273
+ ///
274
+ /// This function can return a bad Status after only partially deleting the
275
+ /// contents of the directory.
276
+ Status DeleteDirContents(const std::string& path, bool missing_dir_ok) override;
277
+
278
+ /// \brief Deletion of all the containers in the storage account (not
279
+ /// implemented for safety reasons).
280
+ ///
281
+ /// \return Status::NotImplemented
282
+ Status DeleteRootDirContents() override;
283
+
284
+ /// \brief Deletes a file.
285
+ ///
286
+ /// Supported on both flat namespace and Hierarchical Namespace storage
287
+ /// accounts. A check is made to guarantee the parent directory doesn't
288
+ /// disappear after the blob is deleted and while this operation is running,
289
+ /// no other client can delete the parent directory due to the use of leases.
290
+ ///
291
+ /// This means applications can safely retry this operation without coordination to
292
+ /// guarantee only one client/process is trying to delete the same file.
293
+ Status DeleteFile(const std::string& path) override;
294
+
295
+ /// \brief Move/rename a file or directory.
296
+ ///
297
+ /// There are no files immediately at the root directory, so paths like
298
+ /// "/segment" always refer to a container of the storage account and are
299
+ /// treated as directories.
300
+ ///
301
+ /// If `dest` exists but the operation fails for some reason, `Move`
302
+ /// guarantees `dest` is not lost.
303
+ ///
304
+ /// Conditions for a successful move:
305
+ ///
306
+ /// 1. `src` must exist.
307
+ /// 2. `dest` can't contain a strict path prefix of `src`. More generally,
308
+ /// a directory can't be made a subdirectory of itself.
309
+ /// 3. If `dest` already exists and it's a file, `src` must also be a file.
310
+ /// `dest` is then replaced by `src`.
311
+ /// 4. All components of `dest` must exist, except for the last.
312
+ /// 5. If `dest` already exists and it's a directory, `src` must also be a
313
+ /// directory and `dest` must be empty. `dest` is then replaced by `src`
314
+ /// and its contents.
315
+ ///
316
+ /// Leases are used to guarantee the pre-condition checks and the rename
317
+ /// operation are atomic: other clients can't invalidate the pre-condition in
318
+ /// the time between the checks and the actual rename operation.
319
+ ///
320
+ /// This is possible because Move() is only support on storage accounts with
321
+ /// Hierarchical Namespace Support enabled.
322
+ ///
323
+ /// ## Limitations
324
+ ///
325
+ /// - Moves are not supported on storage accounts without
326
+ /// Hierarchical Namespace support enabled
327
+ /// - Moves across different containers are not supported
328
+ /// - Moving a path of the form `/container` is not supported as it would
329
+ /// require moving all the files in a container to another container.
330
+ /// The only exception is a `Move("/container_a", "/container_b")` where
331
+ /// both containers are empty or `container_b` doesn't even exist.
332
+ /// The atomicity of the emptiness checks followed by the renaming operation
333
+ /// is guaranteed by the use of leases.
334
+ Status Move(const std::string& src, const std::string& dest) override;
335
+
336
+ Status CopyFile(const std::string& src, const std::string& dest) override;
337
+
338
+ Result<std::shared_ptr<io::InputStream>> OpenInputStream(
339
+ const std::string& path) override;
340
+
341
+ Result<std::shared_ptr<io::InputStream>> OpenInputStream(const FileInfo& info) override;
342
+
343
+ Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
344
+ const std::string& path) override;
345
+
346
+ Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
347
+ const FileInfo& info) override;
348
+
349
+ Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(
350
+ const std::string& path,
351
+ const std::shared_ptr<const KeyValueMetadata>& metadata) override;
352
+
353
+ Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(
354
+ const std::string& path,
355
+ const std::shared_ptr<const KeyValueMetadata>& metadata) override;
356
+ };
357
+
358
+ } // namespace arrow::fs
venv/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem.h ADDED
@@ -0,0 +1,697 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <chrono>
21
+ #include <cstdint>
22
+ #include <functional>
23
+ #include <iosfwd>
24
+ #include <memory>
25
+ #include <string>
26
+ #include <utility>
27
+ #include <vector>
28
+
29
+ #include "arrow/filesystem/type_fwd.h"
30
+ #include "arrow/io/interfaces.h"
31
+ #include "arrow/type_fwd.h"
32
+ #include "arrow/util/compare.h"
33
+ #include "arrow/util/macros.h"
34
+ #include "arrow/util/type_fwd.h"
35
+ #include "arrow/util/visibility.h"
36
+ #include "arrow/util/windows_fixup.h"
37
+
38
+ namespace arrow {
39
+ namespace fs {
40
+
41
+ using arrow::util::Uri;
42
+
43
+ // A system clock time point expressed as a 64-bit (or more) number of
44
+ // nanoseconds since the epoch.
45
+ using TimePoint =
46
+ std::chrono::time_point<std::chrono::system_clock, std::chrono::nanoseconds>;
47
+
48
+ ARROW_EXPORT std::string ToString(FileType);
49
+
50
+ ARROW_EXPORT std::ostream& operator<<(std::ostream& os, FileType);
51
+
52
+ static const int64_t kNoSize = -1;
53
+ static const TimePoint kNoTime = TimePoint(TimePoint::duration(-1));
54
+
55
+ /// \brief FileSystem entry info
56
+ struct ARROW_EXPORT FileInfo : public util::EqualityComparable<FileInfo> {
57
+ FileInfo() = default;
58
+ FileInfo(FileInfo&&) = default;
59
+ FileInfo& operator=(FileInfo&&) = default;
60
+ FileInfo(const FileInfo&) = default;
61
+ FileInfo& operator=(const FileInfo&) = default;
62
+
63
+ explicit FileInfo(std::string path, FileType type = FileType::Unknown)
64
+ : path_(std::move(path)), type_(type) {}
65
+
66
+ /// The file type
67
+ FileType type() const { return type_; }
68
+ void set_type(FileType type) { type_ = type; }
69
+
70
+ /// The full file path in the filesystem
71
+ const std::string& path() const { return path_; }
72
+ void set_path(std::string path) { path_ = std::move(path); }
73
+
74
+ /// The file base name (component after the last directory separator)
75
+ std::string base_name() const;
76
+
77
+ // The directory base name (component before the file base name).
78
+ std::string dir_name() const;
79
+
80
+ /// The size in bytes, if available
81
+ ///
82
+ /// Only regular files are guaranteed to have a size.
83
+ int64_t size() const { return size_; }
84
+ void set_size(int64_t size) { size_ = size; }
85
+
86
+ /// The file extension (excluding the dot)
87
+ std::string extension() const;
88
+
89
+ /// The time of last modification, if available
90
+ TimePoint mtime() const { return mtime_; }
91
+ void set_mtime(TimePoint mtime) { mtime_ = mtime; }
92
+
93
+ bool IsFile() const { return type_ == FileType::File; }
94
+ bool IsDirectory() const { return type_ == FileType::Directory; }
95
+
96
+ bool Equals(const FileInfo& other) const {
97
+ return type() == other.type() && path() == other.path() && size() == other.size() &&
98
+ mtime() == other.mtime();
99
+ }
100
+
101
+ std::string ToString() const;
102
+
103
+ /// Function object implementing less-than comparison and hashing by
104
+ /// path, to support sorting infos, using them as keys, and other
105
+ /// interactions with the STL.
106
+ struct ByPath {
107
+ bool operator()(const FileInfo& l, const FileInfo& r) const {
108
+ return l.path() < r.path();
109
+ }
110
+
111
+ size_t operator()(const FileInfo& i) const {
112
+ return std::hash<std::string>{}(i.path());
113
+ }
114
+ };
115
+
116
+ protected:
117
+ std::string path_;
118
+ FileType type_ = FileType::Unknown;
119
+ int64_t size_ = kNoSize;
120
+ TimePoint mtime_ = kNoTime;
121
+ };
122
+
123
+ ARROW_EXPORT std::ostream& operator<<(std::ostream& os, const FileInfo&);
124
+
125
+ /// \brief File selector for filesystem APIs
126
+ struct ARROW_EXPORT FileSelector {
127
+ /// The directory in which to select files.
128
+ /// If the path exists but doesn't point to a directory, this should be an error.
129
+ std::string base_dir;
130
+ /// The behavior if `base_dir` isn't found in the filesystem. If false,
131
+ /// an error is returned. If true, an empty selection is returned.
132
+ bool allow_not_found;
133
+ /// Whether to recurse into subdirectories.
134
+ bool recursive;
135
+ /// The maximum number of subdirectories to recurse into.
136
+ int32_t max_recursion;
137
+
138
+ FileSelector() : allow_not_found(false), recursive(false), max_recursion(INT32_MAX) {}
139
+ };
140
+
141
+ /// \brief FileSystem, path pair
142
+ struct ARROW_EXPORT FileLocator {
143
+ std::shared_ptr<FileSystem> filesystem;
144
+ std::string path;
145
+ };
146
+
147
+ using FileInfoVector = std::vector<FileInfo>;
148
+ using FileInfoGenerator = std::function<Future<FileInfoVector>()>;
149
+
150
+ } // namespace fs
151
+
152
+ template <>
153
+ struct IterationTraits<fs::FileInfoVector> {
154
+ static fs::FileInfoVector End() { return {}; }
155
+ static bool IsEnd(const fs::FileInfoVector& val) { return val.empty(); }
156
+ };
157
+
158
+ namespace fs {
159
+
160
+ /// \brief Abstract file system API
161
+ class ARROW_EXPORT FileSystem
162
+ /// \cond false
163
+ : public std::enable_shared_from_this<FileSystem>
164
+ /// \endcond
165
+ { // NOLINT
166
+ public:
167
+ virtual ~FileSystem();
168
+
169
+ virtual std::string type_name() const = 0;
170
+
171
+ /// EXPERIMENTAL: The IOContext associated with this filesystem.
172
+ const io::IOContext& io_context() const { return io_context_; }
173
+
174
+ /// Normalize path for the given filesystem
175
+ ///
176
+ /// The default implementation of this method is a no-op, but subclasses
177
+ /// may allow normalizing irregular path forms (such as Windows local paths).
178
+ virtual Result<std::string> NormalizePath(std::string path);
179
+
180
+ /// \brief Ensure a URI (or path) is compatible with the given filesystem and return the
181
+ /// path
182
+ ///
183
+ /// \param uri_string A URI representing a resource in the given filesystem.
184
+ ///
185
+ /// This method will check to ensure the given filesystem is compatible with the
186
+ /// URI. This can be useful when the user provides both a URI and a filesystem or
187
+ /// when a user provides multiple URIs that should be compatible with the same
188
+ /// filesystem.
189
+ ///
190
+ /// uri_string can be an absolute path instead of a URI. In that case it will ensure
191
+ /// the filesystem (if supplied) is the local filesystem (or some custom filesystem that
192
+ /// is capable of reading local paths) and will normalize the path's file separators.
193
+ ///
194
+ /// Note, this method only checks to ensure the URI scheme is valid. It will not detect
195
+ /// inconsistencies like a mismatching region or endpoint override.
196
+ ///
197
+ /// \return The path inside the filesystem that is indicated by the URI.
198
+ virtual Result<std::string> PathFromUri(const std::string& uri_string) const;
199
+
200
+ virtual bool Equals(const FileSystem& other) const = 0;
201
+
202
+ virtual bool Equals(const std::shared_ptr<FileSystem>& other) const {
203
+ return Equals(*other);
204
+ }
205
+
206
+ /// Get info for the given target.
207
+ ///
208
+ /// Any symlink is automatically dereferenced, recursively.
209
+ /// A nonexistent or unreachable file returns an Ok status and
210
+ /// has a FileType of value NotFound. An error status indicates
211
+ /// a truly exceptional condition (low-level I/O error, etc.).
212
+ virtual Result<FileInfo> GetFileInfo(const std::string& path) = 0;
213
+ /// Same, for many targets at once.
214
+ virtual Result<FileInfoVector> GetFileInfo(const std::vector<std::string>& paths);
215
+ /// Same, according to a selector.
216
+ ///
217
+ /// The selector's base directory will not be part of the results, even if
218
+ /// it exists.
219
+ /// If it doesn't exist, see `FileSelector::allow_not_found`.
220
+ virtual Result<FileInfoVector> GetFileInfo(const FileSelector& select) = 0;
221
+
222
+ /// Async version of GetFileInfo
223
+ virtual Future<FileInfoVector> GetFileInfoAsync(const std::vector<std::string>& paths);
224
+
225
+ /// Streaming async version of GetFileInfo
226
+ ///
227
+ /// The returned generator is not async-reentrant, i.e. you need to wait for
228
+ /// the returned future to complete before calling the generator again.
229
+ virtual FileInfoGenerator GetFileInfoGenerator(const FileSelector& select);
230
+
231
+ /// Create a directory and subdirectories.
232
+ ///
233
+ /// This function succeeds if the directory already exists.
234
+ virtual Status CreateDir(const std::string& path, bool recursive) = 0;
235
+ Status CreateDir(const std::string& path) { return CreateDir(path, true); }
236
+
237
+ /// Delete a directory and its contents, recursively.
238
+ virtual Status DeleteDir(const std::string& path) = 0;
239
+
240
+ /// Delete a directory's contents, recursively.
241
+ ///
242
+ /// Like DeleteDir, but doesn't delete the directory itself.
243
+ /// Passing an empty path ("" or "/") is disallowed, see DeleteRootDirContents.
244
+ virtual Status DeleteDirContents(const std::string& path, bool missing_dir_ok) = 0;
245
+ Status DeleteDirContents(const std::string& path) {
246
+ return DeleteDirContents(path, false);
247
+ }
248
+
249
+ /// Async version of DeleteDirContents.
250
+ virtual Future<> DeleteDirContentsAsync(const std::string& path, bool missing_dir_ok);
251
+
252
+ /// Async version of DeleteDirContents.
253
+ ///
254
+ /// This overload allows missing directories.
255
+ Future<> DeleteDirContentsAsync(const std::string& path);
256
+
257
+ /// EXPERIMENTAL: Delete the root directory's contents, recursively.
258
+ ///
259
+ /// Implementations may decide to raise an error if this operation is
260
+ /// too dangerous.
261
+ // NOTE: may decide to remove this if it's deemed not useful
262
+ virtual Status DeleteRootDirContents() = 0;
263
+
264
+ /// Delete a file.
265
+ virtual Status DeleteFile(const std::string& path) = 0;
266
+ /// Delete many files.
267
+ ///
268
+ /// The default implementation issues individual delete operations in sequence.
269
+ virtual Status DeleteFiles(const std::vector<std::string>& paths);
270
+
271
+ /// Move / rename a file or directory.
272
+ ///
273
+ /// If the destination exists:
274
+ /// - if it is a non-empty directory, an error is returned
275
+ /// - otherwise, if it has the same type as the source, it is replaced
276
+ /// - otherwise, behavior is unspecified (implementation-dependent).
277
+ virtual Status Move(const std::string& src, const std::string& dest) = 0;
278
+
279
+ /// Copy a file.
280
+ ///
281
+ /// If the destination exists and is a directory, an error is returned.
282
+ /// Otherwise, it is replaced.
283
+ virtual Status CopyFile(const std::string& src, const std::string& dest) = 0;
284
+
285
+ /// Open an input stream for sequential reading.
286
+ virtual Result<std::shared_ptr<io::InputStream>> OpenInputStream(
287
+ const std::string& path) = 0;
288
+
289
+ /// Open an input stream for sequential reading.
290
+ ///
291
+ /// This override assumes the given FileInfo validly represents the file's
292
+ /// characteristics, and may optimize access depending on them (for example
293
+ /// avoid querying the file size or its existence).
294
+ virtual Result<std::shared_ptr<io::InputStream>> OpenInputStream(const FileInfo& info);
295
+
296
+ /// Open an input file for random access reading.
297
+ virtual Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
298
+ const std::string& path) = 0;
299
+
300
+ /// Open an input file for random access reading.
301
+ ///
302
+ /// This override assumes the given FileInfo validly represents the file's
303
+ /// characteristics, and may optimize access depending on them (for example
304
+ /// avoid querying the file size or its existence).
305
+ virtual Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
306
+ const FileInfo& info);
307
+
308
+ /// Async version of OpenInputStream
309
+ virtual Future<std::shared_ptr<io::InputStream>> OpenInputStreamAsync(
310
+ const std::string& path);
311
+
312
+ /// Async version of OpenInputStream
313
+ virtual Future<std::shared_ptr<io::InputStream>> OpenInputStreamAsync(
314
+ const FileInfo& info);
315
+
316
+ /// Async version of OpenInputFile
317
+ virtual Future<std::shared_ptr<io::RandomAccessFile>> OpenInputFileAsync(
318
+ const std::string& path);
319
+
320
+ /// Async version of OpenInputFile
321
+ virtual Future<std::shared_ptr<io::RandomAccessFile>> OpenInputFileAsync(
322
+ const FileInfo& info);
323
+
324
+ /// Open an output stream for sequential writing.
325
+ ///
326
+ /// If the target already exists, existing data is truncated.
327
+ virtual Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(
328
+ const std::string& path,
329
+ const std::shared_ptr<const KeyValueMetadata>& metadata) = 0;
330
+ Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(const std::string& path);
331
+
332
+ /// Open an output stream for appending.
333
+ ///
334
+ /// If the target doesn't exist, a new empty file is created.
335
+ ///
336
+ /// Note: some filesystem implementations do not support efficient appending
337
+ /// to an existing file, in which case this method will return NotImplemented.
338
+ /// Consider writing to multiple files (using e.g. the dataset layer) instead.
339
+ virtual Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(
340
+ const std::string& path,
341
+ const std::shared_ptr<const KeyValueMetadata>& metadata) = 0;
342
+ Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(const std::string& path);
343
+
344
+ protected:
345
+ explicit FileSystem(io::IOContext io_context = io::default_io_context())
346
+ : io_context_(std::move(io_context)) {}
347
+
348
+ io::IOContext io_context_;
349
+ // Whether metadata operations (such as GetFileInfo or OpenInputStream)
350
+ // are cheap enough that the default async variants don't bother with
351
+ // a thread pool.
352
+ bool default_async_is_sync_ = true;
353
+ };
354
+
355
+ using FileSystemFactory = std::function<Result<std::shared_ptr<FileSystem>>(
356
+ const Uri& uri, const io::IOContext& io_context, std::string* out_path)>;
357
+
358
+ /// \brief A FileSystem implementation that delegates to another
359
+ /// implementation after prepending a fixed base path.
360
+ ///
361
+ /// This is useful to expose a logical view of a subtree of a filesystem,
362
+ /// for example a directory in a LocalFileSystem.
363
+ /// This works on abstract paths, i.e. paths using forward slashes and
364
+ /// and a single root "/". Windows paths are not guaranteed to work.
365
+ /// This makes no security guarantee. For example, symlinks may allow to
366
+ /// "escape" the subtree and access other parts of the underlying filesystem.
367
+ class ARROW_EXPORT SubTreeFileSystem : public FileSystem {
368
+ public:
369
+ // This constructor may abort if base_path is invalid.
370
+ explicit SubTreeFileSystem(const std::string& base_path,
371
+ std::shared_ptr<FileSystem> base_fs);
372
+ ~SubTreeFileSystem() override;
373
+
374
+ std::string type_name() const override { return "subtree"; }
375
+ std::string base_path() const { return base_path_; }
376
+ std::shared_ptr<FileSystem> base_fs() const { return base_fs_; }
377
+
378
+ Result<std::string> NormalizePath(std::string path) override;
379
+ Result<std::string> PathFromUri(const std::string& uri_string) const override;
380
+
381
+ bool Equals(const FileSystem& other) const override;
382
+
383
+ /// \cond FALSE
384
+ using FileSystem::CreateDir;
385
+ using FileSystem::DeleteDirContents;
386
+ using FileSystem::GetFileInfo;
387
+ using FileSystem::OpenAppendStream;
388
+ using FileSystem::OpenOutputStream;
389
+ /// \endcond
390
+
391
+ Result<FileInfo> GetFileInfo(const std::string& path) override;
392
+ Result<FileInfoVector> GetFileInfo(const FileSelector& select) override;
393
+
394
+ FileInfoGenerator GetFileInfoGenerator(const FileSelector& select) override;
395
+
396
+ Status CreateDir(const std::string& path, bool recursive) override;
397
+
398
+ Status DeleteDir(const std::string& path) override;
399
+ Status DeleteDirContents(const std::string& path, bool missing_dir_ok) override;
400
+ Status DeleteRootDirContents() override;
401
+
402
+ Status DeleteFile(const std::string& path) override;
403
+
404
+ Status Move(const std::string& src, const std::string& dest) override;
405
+
406
+ Status CopyFile(const std::string& src, const std::string& dest) override;
407
+
408
+ Result<std::shared_ptr<io::InputStream>> OpenInputStream(
409
+ const std::string& path) override;
410
+ Result<std::shared_ptr<io::InputStream>> OpenInputStream(const FileInfo& info) override;
411
+ Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
412
+ const std::string& path) override;
413
+ Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
414
+ const FileInfo& info) override;
415
+
416
+ Future<std::shared_ptr<io::InputStream>> OpenInputStreamAsync(
417
+ const std::string& path) override;
418
+ Future<std::shared_ptr<io::InputStream>> OpenInputStreamAsync(
419
+ const FileInfo& info) override;
420
+ Future<std::shared_ptr<io::RandomAccessFile>> OpenInputFileAsync(
421
+ const std::string& path) override;
422
+ Future<std::shared_ptr<io::RandomAccessFile>> OpenInputFileAsync(
423
+ const FileInfo& info) override;
424
+
425
+ Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(
426
+ const std::string& path,
427
+ const std::shared_ptr<const KeyValueMetadata>& metadata) override;
428
+ Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(
429
+ const std::string& path,
430
+ const std::shared_ptr<const KeyValueMetadata>& metadata) override;
431
+
432
+ protected:
433
+ SubTreeFileSystem() = default;
434
+
435
+ const std::string base_path_;
436
+ std::shared_ptr<FileSystem> base_fs_;
437
+
438
+ Result<std::string> PrependBase(const std::string& s) const;
439
+ Result<std::string> PrependBaseNonEmpty(const std::string& s) const;
440
+ Result<std::string> StripBase(const std::string& s) const;
441
+ Status FixInfo(FileInfo* info) const;
442
+
443
+ static Result<std::string> NormalizeBasePath(
444
+ std::string base_path, const std::shared_ptr<FileSystem>& base_fs);
445
+ };
446
+
447
+ /// \brief A FileSystem implementation that delegates to another
448
+ /// implementation but inserts latencies at various points.
449
+ class ARROW_EXPORT SlowFileSystem : public FileSystem {
450
+ public:
451
+ SlowFileSystem(std::shared_ptr<FileSystem> base_fs,
452
+ std::shared_ptr<io::LatencyGenerator> latencies);
453
+ SlowFileSystem(std::shared_ptr<FileSystem> base_fs, double average_latency);
454
+ SlowFileSystem(std::shared_ptr<FileSystem> base_fs, double average_latency,
455
+ int32_t seed);
456
+
457
+ std::string type_name() const override { return "slow"; }
458
+ bool Equals(const FileSystem& other) const override;
459
+ Result<std::string> PathFromUri(const std::string& uri_string) const override;
460
+
461
+ /// \cond FALSE
462
+ using FileSystem::CreateDir;
463
+ using FileSystem::DeleteDirContents;
464
+ using FileSystem::GetFileInfo;
465
+ using FileSystem::OpenAppendStream;
466
+ using FileSystem::OpenOutputStream;
467
+ /// \endcond
468
+
469
+ Result<FileInfo> GetFileInfo(const std::string& path) override;
470
+ Result<FileInfoVector> GetFileInfo(const FileSelector& select) override;
471
+
472
+ Status CreateDir(const std::string& path, bool recursive) override;
473
+
474
+ Status DeleteDir(const std::string& path) override;
475
+ Status DeleteDirContents(const std::string& path, bool missing_dir_ok) override;
476
+ Status DeleteRootDirContents() override;
477
+
478
+ Status DeleteFile(const std::string& path) override;
479
+
480
+ Status Move(const std::string& src, const std::string& dest) override;
481
+
482
+ Status CopyFile(const std::string& src, const std::string& dest) override;
483
+
484
+ Result<std::shared_ptr<io::InputStream>> OpenInputStream(
485
+ const std::string& path) override;
486
+ Result<std::shared_ptr<io::InputStream>> OpenInputStream(const FileInfo& info) override;
487
+ Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
488
+ const std::string& path) override;
489
+ Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
490
+ const FileInfo& info) override;
491
+ Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(
492
+ const std::string& path,
493
+ const std::shared_ptr<const KeyValueMetadata>& metadata) override;
494
+ Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(
495
+ const std::string& path,
496
+ const std::shared_ptr<const KeyValueMetadata>& metadata) override;
497
+
498
+ protected:
499
+ std::shared_ptr<FileSystem> base_fs_;
500
+ std::shared_ptr<io::LatencyGenerator> latencies_;
501
+ };
502
+
503
+ /// \brief Ensure all registered filesystem implementations are finalized.
504
+ ///
505
+ /// Individual finalizers may wait for concurrent calls to finish so as to avoid
506
+ /// race conditions. After this function has been called, all filesystem APIs
507
+ /// will fail with an error.
508
+ ///
509
+ /// The user is responsible for synchronization of calls to this function.
510
+ void EnsureFinalized();
511
+
512
+ /// \defgroup filesystem-factories Functions for creating FileSystem instances
513
+ ///
514
+ /// @{
515
+
516
+ /// \brief Create a new FileSystem by URI
517
+ ///
518
+ /// Recognized schemes are "file", "mock", "hdfs", "viewfs", "s3",
519
+ /// "gs" and "gcs".
520
+ ///
521
+ /// Support for other schemes can be added using RegisterFileSystemFactory.
522
+ ///
523
+ /// \param[in] uri a URI-based path, ex: file:///some/local/path
524
+ /// \param[out] out_path (optional) Path inside the filesystem.
525
+ /// \return out_fs FileSystem instance.
526
+ ARROW_EXPORT
527
+ Result<std::shared_ptr<FileSystem>> FileSystemFromUri(const std::string& uri,
528
+ std::string* out_path = NULLPTR);
529
+
530
+ /// \brief Create a new FileSystem by URI with a custom IO context
531
+ ///
532
+ /// Recognized schemes are "file", "mock", "hdfs", "viewfs", "s3",
533
+ /// "gs" and "gcs".
534
+ ///
535
+ /// Support for other schemes can be added using RegisterFileSystemFactory.
536
+ ///
537
+ /// \param[in] uri a URI-based path, ex: file:///some/local/path
538
+ /// \param[in] io_context an IOContext which will be associated with the filesystem
539
+ /// \param[out] out_path (optional) Path inside the filesystem.
540
+ /// \return out_fs FileSystem instance.
541
+ ARROW_EXPORT
542
+ Result<std::shared_ptr<FileSystem>> FileSystemFromUri(const std::string& uri,
543
+ const io::IOContext& io_context,
544
+ std::string* out_path = NULLPTR);
545
+
546
+ /// \brief Create a new FileSystem by URI
547
+ ///
548
+ /// Support for other schemes can be added using RegisterFileSystemFactory.
549
+ ///
550
+ /// Same as FileSystemFromUri, but in addition also recognize non-URIs
551
+ /// and treat them as local filesystem paths. Only absolute local filesystem
552
+ /// paths are allowed.
553
+ ARROW_EXPORT
554
+ Result<std::shared_ptr<FileSystem>> FileSystemFromUriOrPath(
555
+ const std::string& uri, std::string* out_path = NULLPTR);
556
+
557
+ /// \brief Create a new FileSystem by URI with a custom IO context
558
+ ///
559
+ /// Support for other schemes can be added using RegisterFileSystemFactory.
560
+ ///
561
+ /// Same as FileSystemFromUri, but in addition also recognize non-URIs
562
+ /// and treat them as local filesystem paths. Only absolute local filesystem
563
+ /// paths are allowed.
564
+ ARROW_EXPORT
565
+ Result<std::shared_ptr<FileSystem>> FileSystemFromUriOrPath(
566
+ const std::string& uri, const io::IOContext& io_context,
567
+ std::string* out_path = NULLPTR);
568
+
569
+ /// @}
570
+
571
+ /// \defgroup filesystem-factory-registration Helpers for FileSystem registration
572
+ ///
573
+ /// @{
574
+
575
+ /// \brief Register a FileSystem factory
576
+ ///
577
+ /// Support for custom URI schemes can be added by registering a factory
578
+ /// for the corresponding FileSystem.
579
+ ///
580
+ /// \param[in] scheme a Uri scheme which the factory will handle.
581
+ /// If a factory has already been registered for a scheme,
582
+ /// the new factory will be ignored.
583
+ /// \param[in] factory a function which can produce a FileSystem for Uris which match
584
+ /// scheme.
585
+ /// \param[in] finalizer a function which must be called to finalize the factory before
586
+ /// the process exits, or nullptr if no finalization is necessary.
587
+ /// \return raises KeyError if a name collision occurs.
588
+ ARROW_EXPORT Status RegisterFileSystemFactory(std::string scheme,
589
+ FileSystemFactory factory,
590
+ std::function<void()> finalizer = {});
591
+
592
+ /// \brief Register FileSystem factories from a shared library
593
+ ///
594
+ /// FileSystem implementations may be housed in separate shared libraries and only
595
+ /// registered when the shared library is explicitly loaded. FileSystemRegistrar is
596
+ /// provided to simplify definition of such libraries: each instance at namespace scope
597
+ /// in the library will register a factory for a scheme. Any library which uses
598
+ /// FileSystemRegistrars and which must be dynamically loaded should be loaded using
599
+ /// LoadFileSystemFactories(), which will additionally merge registries are if necessary
600
+ /// (static linkage to arrow can produce isolated registries).
601
+ ARROW_EXPORT Status LoadFileSystemFactories(const char* libpath);
602
+
603
+ struct ARROW_EXPORT FileSystemRegistrar {
604
+ /// \brief Register a FileSystem factory at load time
605
+ ///
606
+ /// Support for custom URI schemes can be added by registering a factory for the
607
+ /// corresponding FileSystem. An instance of this helper can be defined at namespace
608
+ /// scope to cause the factory to be registered at load time.
609
+ ///
610
+ /// Global constructors will finish execution before main() starts if the registrar is
611
+ /// linked into the same binary as main(), or before dlopen()/LoadLibrary() returns if
612
+ /// the library in which the registrar is defined is dynamically loaded.
613
+ ///
614
+ /// \code
615
+ /// FileSystemRegistrar kSlowFileSystemModule{
616
+ /// "slowfile",
617
+ /// [](const Uri& uri, const io::IOContext& io_context, std::string* out_path)
618
+ /// ->Result<std::shared_ptr<FileSystem>> {
619
+ /// auto local_uri = "file" + uri.ToString().substr(uri.scheme().size());
620
+ /// ARROW_ASSIGN_OR_RAISE(auto base_fs,
621
+ /// FileSystemFromUri(local_uri, io_context, out_path));
622
+ /// double average_latency = 1;
623
+ /// int32_t seed = 0xDEADBEEF;
624
+ /// ARROW_ASSIGN_OR_RAISE(auto params, uri.query_item());
625
+ /// for (const auto& [key, value] : params) {
626
+ /// if (key == "average_latency") {
627
+ /// average_latency = std::stod(value);
628
+ /// }
629
+ /// if (key == "seed") {
630
+ /// seed = std::stoi(value, nullptr, /*base=*/16);
631
+ /// }
632
+ /// }
633
+ /// return std::make_shared<SlowFileSystem>(base_fs, average_latency, seed);
634
+ /// }));
635
+ /// \endcode
636
+ ///
637
+ /// \param[in] scheme a Uri scheme which the factory will handle.
638
+ /// If a factory has already been registered for a scheme, the
639
+ /// new factory will be ignored.
640
+ /// \param[in] factory a function which can produce a FileSystem for Uris which match
641
+ /// scheme.
642
+ /// \param[in] finalizer a function which must be called to finalize the factory before
643
+ /// the process exits, or nullptr if no finalization is necessary.
644
+ FileSystemRegistrar(std::string scheme, FileSystemFactory factory,
645
+ std::function<void()> finalizer = {});
646
+ };
647
+
648
+ /// @}
649
+
650
+ namespace internal {
651
+ ARROW_EXPORT void* GetFileSystemRegistry();
652
+ } // namespace internal
653
+
654
+ /// \brief Copy files, including from one FileSystem to another
655
+ ///
656
+ /// If a source and destination are resident in the same FileSystem FileSystem::CopyFile
657
+ /// will be used, otherwise the file will be opened as a stream in both FileSystems and
658
+ /// chunks copied from the source to the destination. No directories will be created.
659
+ ARROW_EXPORT
660
+ Status CopyFiles(const std::vector<FileLocator>& sources,
661
+ const std::vector<FileLocator>& destinations,
662
+ const io::IOContext& io_context = io::default_io_context(),
663
+ int64_t chunk_size = 1024 * 1024, bool use_threads = true);
664
+
665
+ /// \brief Copy selected files, including from one FileSystem to another
666
+ ///
667
+ /// Directories will be created under the destination base directory as needed.
668
+ ARROW_EXPORT
669
+ Status CopyFiles(const std::shared_ptr<FileSystem>& source_fs,
670
+ const FileSelector& source_sel,
671
+ const std::shared_ptr<FileSystem>& destination_fs,
672
+ const std::string& destination_base_dir,
673
+ const io::IOContext& io_context = io::default_io_context(),
674
+ int64_t chunk_size = 1024 * 1024, bool use_threads = true);
675
+
676
+ struct FileSystemGlobalOptions {
677
+ /// Path to a single PEM file holding all TLS CA certificates
678
+ ///
679
+ /// If empty, the underlying TLS library's defaults will be used.
680
+ std::string tls_ca_file_path;
681
+
682
+ /// Path to a directory holding TLS CA certificates in individual PEM files
683
+ /// named along the OpenSSL "hashed" format.
684
+ ///
685
+ /// If empty, the underlying TLS library's defaults will be used.
686
+ std::string tls_ca_dir_path;
687
+ };
688
+
689
+ /// EXPERIMENTAL: optional global initialization routine
690
+ ///
691
+ /// This is for environments (such as manylinux) where the path
692
+ /// to TLS CA certificates needs to be configured at runtime.
693
+ ARROW_EXPORT
694
+ Status Initialize(const FileSystemGlobalOptions& options);
695
+
696
+ } // namespace fs
697
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/filesystem_library.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/filesystem/filesystem.h"
21
+
22
+ namespace arrow::fs {
23
+ extern "C" {
24
+
25
+ // ARROW_FORCE_EXPORT ensures this function's visibility is
26
+ // _declspec(dllexport)/[[gnu::visibility("default")]] even when
27
+ // this header is #included by a non-arrow source, as in a third
28
+ // party filesystem implementation.
29
+ ARROW_FORCE_EXPORT void* arrow_filesystem_get_registry() {
30
+ // In the case where libarrow is linked statically both to the executable and to a
31
+ // dynamically loaded filesystem implementation library, the library contains a
32
+ // duplicate definition of the registry into which the library's instances of
33
+ // FileSystemRegistrar insert their factories. This function is made accessible to
34
+ // dlsym/GetProcAddress to enable detection of such duplicate registries and merging
35
+ // into the registry accessible to the executable.
36
+ return internal::GetFileSystemRegistry();
37
+ }
38
+ }
39
+ } // namespace arrow::fs
venv/lib/python3.10/site-packages/pyarrow/include/arrow/filesystem/gcsfs.h ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <optional>
22
+ #include <string>
23
+ #include <vector>
24
+
25
+ #include "arrow/filesystem/filesystem.h"
26
+ #include "arrow/util/uri.h"
27
+
28
+ namespace arrow {
29
+ namespace fs {
30
+ namespace internal {
31
+
32
+ // Opaque wrapper for GCS's library credentials to avoid exposing in Arrow headers.
33
+ struct GcsCredentialsHolder;
34
+
35
+ } // namespace internal
36
+
37
+ class GcsFileSystem;
38
+
39
+ /// \brief Container for GCS Credentials and information necessary to recreate them.
40
+ class ARROW_EXPORT GcsCredentials {
41
+ public:
42
+ bool Equals(const GcsCredentials& other) const;
43
+ bool anonymous() const { return anonymous_; }
44
+ const std::string& access_token() const { return access_token_; }
45
+ TimePoint expiration() const { return expiration_; }
46
+ const std::string& target_service_account() const { return target_service_account_; }
47
+ const std::string& json_credentials() const { return json_credentials_; }
48
+ const std::shared_ptr<internal::GcsCredentialsHolder>& holder() const {
49
+ return holder_;
50
+ }
51
+
52
+ private:
53
+ GcsCredentials() = default;
54
+ bool anonymous_ = false;
55
+ std::string access_token_;
56
+ TimePoint expiration_;
57
+ std::string target_service_account_;
58
+ std::string json_credentials_;
59
+ std::shared_ptr<internal::GcsCredentialsHolder> holder_;
60
+ friend class GcsFileSystem;
61
+ friend struct GcsOptions;
62
+ };
63
+
64
+ /// Options for the GcsFileSystem implementation.
65
+ struct ARROW_EXPORT GcsOptions {
66
+ /// \brief Equivalent to GcsOptions::Defaults().
67
+ GcsOptions();
68
+ GcsCredentials credentials;
69
+
70
+ std::string endpoint_override;
71
+ std::string scheme;
72
+ /// \brief Location to use for creating buckets.
73
+ std::string default_bucket_location;
74
+
75
+ /// \brief If set used to control total time allowed for retrying underlying
76
+ /// errors.
77
+ ///
78
+ /// The default policy is to retry for up to 15 minutes.
79
+ std::optional<double> retry_limit_seconds;
80
+
81
+ /// \brief Default metadata for OpenOutputStream.
82
+ ///
83
+ /// This will be ignored if non-empty metadata is passed to OpenOutputStream.
84
+ std::shared_ptr<const KeyValueMetadata> default_metadata;
85
+
86
+ /// \brief The project to use for creating buckets.
87
+ ///
88
+ /// If not set, the library uses the GOOGLE_CLOUD_PROJECT environment
89
+ /// variable. Most I/O operations do not need a project id, only applications
90
+ /// that create new buckets need a project id.
91
+ std::optional<std::string> project_id;
92
+
93
+ bool Equals(const GcsOptions& other) const;
94
+
95
+ /// \brief Initialize with Google Default Credentials
96
+ ///
97
+ /// Create options configured to use [Application Default Credentials][aip/4110]. The
98
+ /// details of this mechanism are too involved to describe here, but suffice is to say
99
+ /// that applications can override any defaults using an environment variable
100
+ /// (`GOOGLE_APPLICATION_CREDENTIALS`), and that the defaults work with most Google
101
+ /// Cloud Platform deployment environments (GCE, GKE, Cloud Run, etc.), and that have
102
+ /// the same behavior as the `gcloud` CLI tool on your workstation.
103
+ ///
104
+ /// \see https://cloud.google.com/docs/authentication
105
+ ///
106
+ /// [aip/4110]: https://google.aip.dev/auth/4110
107
+ static GcsOptions Defaults();
108
+
109
+ /// \brief Initialize with anonymous credentials
110
+ static GcsOptions Anonymous();
111
+
112
+ /// \brief Initialize with access token
113
+ ///
114
+ /// These credentials are useful when using an out-of-band mechanism to fetch access
115
+ /// tokens. Note that access tokens are time limited, you will need to manually refresh
116
+ /// the tokens created by the out-of-band mechanism.
117
+ static GcsOptions FromAccessToken(const std::string& access_token,
118
+ TimePoint expiration);
119
+
120
+ /// \brief Initialize with service account impersonation
121
+ ///
122
+ /// Service account impersonation allows one principal (a user or service account) to
123
+ /// impersonate a service account. It requires that the calling principal has the
124
+ /// necessary permissions *on* the service account.
125
+ static GcsOptions FromImpersonatedServiceAccount(
126
+ const GcsCredentials& base_credentials, const std::string& target_service_account);
127
+
128
+ /// Creates service account credentials from a JSON object in string form.
129
+ ///
130
+ /// The @p json_object is expected to be in the format described by [aip/4112]. Such an
131
+ /// object contains the identity of a service account, as well as a private key that can
132
+ /// be used to sign tokens, showing the caller was holding the private key.
133
+ ///
134
+ /// In GCP one can create several "keys" for each service account, and these keys are
135
+ /// downloaded as a JSON "key file". The contents of such a file are in the format
136
+ /// required by this function. Remember that key files and their contents should be
137
+ /// treated as any other secret with security implications, think of them as passwords
138
+ /// (because they are!), don't store them or output them where unauthorized persons may
139
+ /// read them.
140
+ ///
141
+ /// Most applications should probably use default credentials, maybe pointing them to a
142
+ /// file with these contents. Using this function may be useful when the json object is
143
+ /// obtained from a Cloud Secret Manager or a similar service.
144
+ ///
145
+ /// [aip/4112]: https://google.aip.dev/auth/4112
146
+ static GcsOptions FromServiceAccountCredentials(const std::string& json_object);
147
+
148
+ /// Initialize from URIs such as "gs://bucket/object".
149
+ static Result<GcsOptions> FromUri(const arrow::util::Uri& uri, std::string* out_path);
150
+ static Result<GcsOptions> FromUri(const std::string& uri, std::string* out_path);
151
+ };
152
+
153
+ /// \brief GCS-backed FileSystem implementation.
154
+ ///
155
+ /// GCS (Google Cloud Storage - https://cloud.google.com/storage) is a scalable object
156
+ /// storage system for any amount of data. The main abstractions in GCS are buckets and
157
+ /// objects. A bucket is a namespace for objects, buckets can store any number of objects,
158
+ /// tens of millions and even billions is not uncommon. Each object contains a single
159
+ /// blob of data, up to 5TiB in size. Buckets are typically configured to keep a single
160
+ /// version of each object, but versioning can be enabled. Versioning is important because
161
+ /// objects are immutable, once created one cannot append data to the object or modify the
162
+ /// object data in any way.
163
+ ///
164
+ /// GCS buckets are in a global namespace, if a Google Cloud customer creates a bucket
165
+ /// named `foo` no other customer can create a bucket with the same name. Note that a
166
+ /// principal (a user or service account) may only list the buckets they are entitled to,
167
+ /// and then only within a project. It is not possible to list "all" the buckets.
168
+ ///
169
+ /// Within each bucket objects are in flat namespace. GCS does not have folders or
170
+ /// directories. However, following some conventions it is possible to emulate
171
+ /// directories. To this end, this class:
172
+ ///
173
+ /// - All buckets are treated as directories at the "root"
174
+ /// - Creating a root directory results in a new bucket being created, this may be slower
175
+ /// than most GCS operations.
176
+ /// - The class creates marker objects for a directory, using a metadata attribute to
177
+ /// annotate the file.
178
+ /// - GCS can list all the objects with a given prefix, this is used to emulate listing
179
+ /// of directories.
180
+ /// - In object lists GCS can summarize all the objects with a common prefix as a single
181
+ /// entry, this is used to emulate non-recursive lists. Note that GCS list time is
182
+ /// proportional to the number of objects in the prefix. Listing recursively takes
183
+ /// almost the same time as non-recursive lists.
184
+ ///
185
+ class ARROW_EXPORT GcsFileSystem : public FileSystem {
186
+ public:
187
+ ~GcsFileSystem() override = default;
188
+
189
+ std::string type_name() const override;
190
+ const GcsOptions& options() const;
191
+
192
+ bool Equals(const FileSystem& other) const override;
193
+ Result<std::string> PathFromUri(const std::string& uri_string) const override;
194
+
195
+ Result<FileInfo> GetFileInfo(const std::string& path) override;
196
+ Result<FileInfoVector> GetFileInfo(const FileSelector& select) override;
197
+
198
+ Status CreateDir(const std::string& path, bool recursive) override;
199
+
200
+ Status DeleteDir(const std::string& path) override;
201
+
202
+ Status DeleteDirContents(const std::string& path, bool missing_dir_ok = false) override;
203
+
204
+ /// This is not implemented in GcsFileSystem, as it would be too dangerous.
205
+ Status DeleteRootDirContents() override;
206
+
207
+ Status DeleteFile(const std::string& path) override;
208
+
209
+ Status Move(const std::string& src, const std::string& dest) override;
210
+
211
+ Status CopyFile(const std::string& src, const std::string& dest) override;
212
+
213
+ Result<std::shared_ptr<io::InputStream>> OpenInputStream(
214
+ const std::string& path) override;
215
+ Result<std::shared_ptr<io::InputStream>> OpenInputStream(const FileInfo& info) override;
216
+
217
+ Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
218
+ const std::string& path) override;
219
+ Result<std::shared_ptr<io::RandomAccessFile>> OpenInputFile(
220
+ const FileInfo& info) override;
221
+
222
+ Result<std::shared_ptr<io::OutputStream>> OpenOutputStream(
223
+ const std::string& path,
224
+ const std::shared_ptr<const KeyValueMetadata>& metadata) override;
225
+
226
+ ARROW_DEPRECATED(
227
+ "Deprecated. "
228
+ "OpenAppendStream is unsupported on the GCS FileSystem.")
229
+ Result<std::shared_ptr<io::OutputStream>> OpenAppendStream(
230
+ const std::string& path,
231
+ const std::shared_ptr<const KeyValueMetadata>& metadata) override;
232
+
233
+ /// Create a GcsFileSystem instance from the given options.
234
+ // TODO(ARROW-16884): make this return Result for consistency
235
+ static std::shared_ptr<GcsFileSystem> Make(
236
+ const GcsOptions& options, const io::IOContext& = io::default_io_context());
237
+
238
+ private:
239
+ explicit GcsFileSystem(const GcsOptions& options, const io::IOContext& io_context);
240
+
241
+ class Impl;
242
+ std::shared_ptr<Impl> impl_;
243
+ };
244
+
245
+ } // namespace fs
246
+ } // namespace arrow