applied-ai-018 commited on
Commit
9ad7093
·
verified ·
1 Parent(s): a1e6eab

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step80/zero/25.post_attention_layernorm.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step80/zero/25.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step80/zero/25.post_attention_layernorm.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step80/zero/9.attention.query_key_value.weight/exp_avg.pt +3 -0
  5. ckpts/universal/global_step80/zero/9.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  6. venv/lib/python3.10/site-packages/pyarrow/include/arrow/api.h +47 -0
  7. venv/lib/python3.10/site-packages/pyarrow/include/arrow/buffer_builder.h +484 -0
  8. venv/lib/python3.10/site-packages/pyarrow/include/arrow/chunk_resolver.h +164 -0
  9. venv/lib/python3.10/site-packages/pyarrow/include/arrow/memory_pool.h +296 -0
  10. venv/lib/python3.10/site-packages/pyarrow/include/arrow/record_batch.h +407 -0
  11. venv/lib/python3.10/site-packages/pyarrow/include/arrow/result.h +508 -0
  12. venv/lib/python3.10/site-packages/pyarrow/include/arrow/scalar.h +816 -0
  13. venv/lib/python3.10/site-packages/pyarrow/include/arrow/sparse_tensor.h +617 -0
  14. venv/lib/python3.10/site-packages/pyarrow/include/arrow/status.h +471 -0
  15. venv/lib/python3.10/site-packages/pyarrow/include/arrow/visit_array_inline.h +64 -0
  16. venv/lib/python3.10/site-packages/pyarrow/include/parquet/api/io.h +20 -0
  17. venv/lib/python3.10/site-packages/pyarrow/include/parquet/api/reader.h +35 -0
  18. venv/lib/python3.10/site-packages/pyarrow/include/parquet/api/schema.h +21 -0
  19. venv/lib/python3.10/site-packages/pyarrow/include/parquet/api/writer.h +25 -0
  20. venv/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/reader.h +379 -0
  21. venv/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/schema.h +184 -0
  22. venv/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/test_util.h +524 -0
  23. venv/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/writer.h +180 -0
  24. venv/lib/python3.10/site-packages/pyarrow/include/parquet/benchmark_util.h +47 -0
  25. venv/lib/python3.10/site-packages/pyarrow/include/parquet/bloom_filter.h +363 -0
  26. venv/lib/python3.10/site-packages/pyarrow/include/parquet/bloom_filter_reader.h +68 -0
  27. venv/lib/python3.10/site-packages/pyarrow/include/parquet/column_page.h +171 -0
  28. venv/lib/python3.10/site-packages/pyarrow/include/parquet/column_reader.h +501 -0
  29. venv/lib/python3.10/site-packages/pyarrow/include/parquet/column_scanner.h +264 -0
  30. venv/lib/python3.10/site-packages/pyarrow/include/parquet/column_writer.h +307 -0
  31. venv/lib/python3.10/site-packages/pyarrow/include/parquet/encoding.h +471 -0
  32. venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/crypto_factory.h +152 -0
  33. venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/encryption.h +510 -0
  34. venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_material_store.h +57 -0
  35. venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_unwrapper.h +94 -0
  36. venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_wrapper.h +84 -0
  37. venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_system_key_material_store.h +89 -0
  38. venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_encryption_key.h +57 -0
  39. venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_material.h +129 -0
  40. venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_metadata.h +91 -0
  41. venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_toolkit.h +106 -0
  42. venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/kms_client.h +93 -0
  43. venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/kms_client_factory.h +38 -0
  44. venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/local_wrap_kms_client.h +94 -0
  45. venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/test_encryption_util.h +133 -0
  46. venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/test_in_memory_kms.h +94 -0
  47. venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/two_level_cache_with_expiration.h +157 -0
  48. venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/type_fwd.h +28 -0
  49. venv/lib/python3.10/site-packages/pyarrow/include/parquet/exception.h +158 -0
  50. venv/lib/python3.10/site-packages/pyarrow/include/parquet/file_reader.h +231 -0
ckpts/universal/global_step80/zero/25.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de2e4a118778979e61962be182c6a34e109e97b19ed65a8d7ee3c0eeaec1a017
3
+ size 9372
ckpts/universal/global_step80/zero/25.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cd0beb0156ea8163d8d4560f1b7c52678cc3c1be4418a906291c13e358a6d84
3
+ size 9387
ckpts/universal/global_step80/zero/25.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a5ad245e1625dcbee5a55def4c3422dd0f9a8750a807134eefe00304bd1a9c7
3
+ size 9293
ckpts/universal/global_step80/zero/9.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e311bc820a6ada17593a8140ce8f2c6ff3698176cdc0fe3d1e182ff87080844
3
+ size 50332828
ckpts/universal/global_step80/zero/9.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e04ac1bcf845beed7bf1c5a887de05500b599ff0df45f92f22182b42f9724d3
3
+ size 50332843
venv/lib/python3.10/site-packages/pyarrow/include/arrow/api.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Coarse public API while the library is in development
19
+
20
+ #pragma once
21
+
22
+ #include "arrow/array.h" // IWYU pragma: export
23
+ #include "arrow/array/array_run_end.h" // IWYU pragma: export
24
+ #include "arrow/array/concatenate.h" // IWYU pragma: export
25
+ #include "arrow/buffer.h" // IWYU pragma: export
26
+ #include "arrow/builder.h" // IWYU pragma: export
27
+ #include "arrow/chunked_array.h" // IWYU pragma: export
28
+ #include "arrow/compare.h" // IWYU pragma: export
29
+ #include "arrow/config.h" // IWYU pragma: export
30
+ #include "arrow/datum.h" // IWYU pragma: export
31
+ #include "arrow/extension_type.h" // IWYU pragma: export
32
+ #include "arrow/memory_pool.h" // IWYU pragma: export
33
+ #include "arrow/pretty_print.h" // IWYU pragma: export
34
+ #include "arrow/record_batch.h" // IWYU pragma: export
35
+ #include "arrow/result.h" // IWYU pragma: export
36
+ #include "arrow/status.h" // IWYU pragma: export
37
+ #include "arrow/table.h" // IWYU pragma: export
38
+ #include "arrow/table_builder.h" // IWYU pragma: export
39
+ #include "arrow/tensor.h" // IWYU pragma: export
40
+ #include "arrow/type.h" // IWYU pragma: export
41
+ #include "arrow/util/key_value_metadata.h" // IWYU pragma: export
42
+ #include "arrow/visit_array_inline.h" // IWYU pragma: export
43
+ #include "arrow/visit_scalar_inline.h" // IWYU pragma: export
44
+ #include "arrow/visitor.h" // IWYU pragma: export
45
+
46
+ /// \brief Top-level namespace for Apache Arrow C++ API
47
+ namespace arrow {}
venv/lib/python3.10/site-packages/pyarrow/include/arrow/buffer_builder.h ADDED
@@ -0,0 +1,484 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+ #include <cstdint>
22
+ #include <cstring>
23
+ #include <memory>
24
+ #include <string>
25
+ #include <utility>
26
+
27
+ #include "arrow/buffer.h"
28
+ #include "arrow/status.h"
29
+ #include "arrow/util/bit_util.h"
30
+ #include "arrow/util/bitmap_generate.h"
31
+ #include "arrow/util/bitmap_ops.h"
32
+ #include "arrow/util/macros.h"
33
+ #include "arrow/util/ubsan.h"
34
+ #include "arrow/util/visibility.h"
35
+
36
+ namespace arrow {
37
+
38
+ // ----------------------------------------------------------------------
39
+ // Buffer builder classes
40
+
41
+ /// \class BufferBuilder
42
+ /// \brief A class for incrementally building a contiguous chunk of in-memory
43
+ /// data
44
+ class ARROW_EXPORT BufferBuilder {
45
+ public:
46
+ explicit BufferBuilder(MemoryPool* pool = default_memory_pool(),
47
+ int64_t alignment = kDefaultBufferAlignment)
48
+ : pool_(pool),
49
+ data_(/*ensure never null to make ubsan happy and avoid check penalties below*/
50
+ util::MakeNonNull<uint8_t>()),
51
+ capacity_(0),
52
+ size_(0),
53
+ alignment_(alignment) {}
54
+
55
+ /// \brief Constructs new Builder that will start using
56
+ /// the provided buffer until Finish/Reset are called.
57
+ /// The buffer is not resized.
58
+ explicit BufferBuilder(std::shared_ptr<ResizableBuffer> buffer,
59
+ MemoryPool* pool = default_memory_pool(),
60
+ int64_t alignment = kDefaultBufferAlignment)
61
+ : buffer_(std::move(buffer)),
62
+ pool_(pool),
63
+ data_(buffer_->mutable_data()),
64
+ capacity_(buffer_->capacity()),
65
+ size_(buffer_->size()),
66
+ alignment_(alignment) {}
67
+
68
+ /// \brief Resize the buffer to the nearest multiple of 64 bytes
69
+ ///
70
+ /// \param new_capacity the new capacity of the of the builder. Will be
71
+ /// rounded up to a multiple of 64 bytes for padding
72
+ /// \param shrink_to_fit if new capacity is smaller than the existing,
73
+ /// reallocate internal buffer. Set to false to avoid reallocations when
74
+ /// shrinking the builder.
75
+ /// \return Status
76
+ Status Resize(const int64_t new_capacity, bool shrink_to_fit = true) {
77
+ if (buffer_ == NULLPTR) {
78
+ ARROW_ASSIGN_OR_RAISE(buffer_,
79
+ AllocateResizableBuffer(new_capacity, alignment_, pool_));
80
+ } else {
81
+ ARROW_RETURN_NOT_OK(buffer_->Resize(new_capacity, shrink_to_fit));
82
+ }
83
+ capacity_ = buffer_->capacity();
84
+ data_ = buffer_->mutable_data();
85
+ return Status::OK();
86
+ }
87
+
88
+ /// \brief Ensure that builder can accommodate the additional number of bytes
89
+ /// without the need to perform allocations
90
+ ///
91
+ /// \param[in] additional_bytes number of additional bytes to make space for
92
+ /// \return Status
93
+ Status Reserve(const int64_t additional_bytes) {
94
+ auto min_capacity = size_ + additional_bytes;
95
+ if (min_capacity <= capacity_) {
96
+ return Status::OK();
97
+ }
98
+ return Resize(GrowByFactor(capacity_, min_capacity), false);
99
+ }
100
+
101
+ /// \brief Return a capacity expanded by the desired growth factor
102
+ static int64_t GrowByFactor(int64_t current_capacity, int64_t new_capacity) {
103
+ // Doubling capacity except for large Reserve requests. 2x growth strategy
104
+ // (versus 1.5x) seems to have slightly better performance when using
105
+ // jemalloc, but significantly better performance when using the system
106
+ // allocator. See ARROW-6450 for further discussion
107
+ return std::max(new_capacity, current_capacity * 2);
108
+ }
109
+
110
+ /// \brief Append the given data to the buffer
111
+ ///
112
+ /// The buffer is automatically expanded if necessary.
113
+ Status Append(const void* data, const int64_t length) {
114
+ if (ARROW_PREDICT_FALSE(size_ + length > capacity_)) {
115
+ ARROW_RETURN_NOT_OK(Resize(GrowByFactor(capacity_, size_ + length), false));
116
+ }
117
+ UnsafeAppend(data, length);
118
+ return Status::OK();
119
+ }
120
+
121
+ /// \brief Append the given data to the buffer
122
+ ///
123
+ /// The buffer is automatically expanded if necessary.
124
+ Status Append(std::string_view v) { return Append(v.data(), v.size()); }
125
+
126
+ /// \brief Append copies of a value to the buffer
127
+ ///
128
+ /// The buffer is automatically expanded if necessary.
129
+ Status Append(const int64_t num_copies, uint8_t value) {
130
+ ARROW_RETURN_NOT_OK(Reserve(num_copies));
131
+ UnsafeAppend(num_copies, value);
132
+ return Status::OK();
133
+ }
134
+
135
+ // Advance pointer and zero out memory
136
+ Status Advance(const int64_t length) { return Append(length, 0); }
137
+
138
+ // Advance pointer, but don't allocate or zero memory
139
+ void UnsafeAdvance(const int64_t length) { size_ += length; }
140
+
141
+ // Unsafe methods don't check existing size
142
+ void UnsafeAppend(const void* data, const int64_t length) {
143
+ memcpy(data_ + size_, data, static_cast<size_t>(length));
144
+ size_ += length;
145
+ }
146
+
147
+ void UnsafeAppend(std::string_view v) {
148
+ UnsafeAppend(v.data(), static_cast<int64_t>(v.size()));
149
+ }
150
+
151
+ void UnsafeAppend(const int64_t num_copies, uint8_t value) {
152
+ memset(data_ + size_, value, static_cast<size_t>(num_copies));
153
+ size_ += num_copies;
154
+ }
155
+
156
+ /// \brief Return result of builder as a Buffer object.
157
+ ///
158
+ /// The builder is reset and can be reused afterwards.
159
+ ///
160
+ /// \param[out] out the finalized Buffer object
161
+ /// \param shrink_to_fit if the buffer size is smaller than its capacity,
162
+ /// reallocate to fit more tightly in memory. Set to false to avoid
163
+ /// a reallocation, at the expense of potentially more memory consumption.
164
+ /// \return Status
165
+ Status Finish(std::shared_ptr<Buffer>* out, bool shrink_to_fit = true) {
166
+ ARROW_RETURN_NOT_OK(Resize(size_, shrink_to_fit));
167
+ if (size_ != 0) buffer_->ZeroPadding();
168
+ *out = buffer_;
169
+ if (*out == NULLPTR) {
170
+ ARROW_ASSIGN_OR_RAISE(*out, AllocateBuffer(0, alignment_, pool_));
171
+ }
172
+ Reset();
173
+ return Status::OK();
174
+ }
175
+
176
+ Result<std::shared_ptr<Buffer>> Finish(bool shrink_to_fit = true) {
177
+ std::shared_ptr<Buffer> out;
178
+ ARROW_RETURN_NOT_OK(Finish(&out, shrink_to_fit));
179
+ return out;
180
+ }
181
+
182
+ /// \brief Like Finish, but override the final buffer size
183
+ ///
184
+ /// This is useful after writing data directly into the builder memory
185
+ /// without calling the Append methods (basically, when using BufferBuilder
186
+ /// mostly for memory allocation).
187
+ Result<std::shared_ptr<Buffer>> FinishWithLength(int64_t final_length,
188
+ bool shrink_to_fit = true) {
189
+ size_ = final_length;
190
+ return Finish(shrink_to_fit);
191
+ }
192
+
193
+ void Reset() {
194
+ buffer_ = NULLPTR;
195
+ capacity_ = size_ = 0;
196
+ }
197
+
198
+ /// \brief Set size to a smaller value without modifying builder
199
+ /// contents. For reusable BufferBuilder classes
200
+ /// \param[in] position must be non-negative and less than or equal
201
+ /// to the current length()
202
+ void Rewind(int64_t position) { size_ = position; }
203
+
204
+ int64_t capacity() const { return capacity_; }
205
+ int64_t length() const { return size_; }
206
+ const uint8_t* data() const { return data_; }
207
+ uint8_t* mutable_data() { return data_; }
208
+ template <typename T>
209
+ const T* data_as() const {
210
+ return reinterpret_cast<const T*>(data_);
211
+ }
212
+ template <typename T>
213
+ T* mutable_data_as() {
214
+ return reinterpret_cast<T*>(data_);
215
+ }
216
+
217
+ private:
218
+ std::shared_ptr<ResizableBuffer> buffer_;
219
+ MemoryPool* pool_;
220
+ uint8_t* data_;
221
+ int64_t capacity_;
222
+ int64_t size_;
223
+ int64_t alignment_;
224
+ };
225
+
226
+ template <typename T, typename Enable = void>
227
+ class TypedBufferBuilder;
228
+
229
+ /// \brief A BufferBuilder for building a buffer of arithmetic elements
230
+ template <typename T>
231
+ class TypedBufferBuilder<
232
+ T, typename std::enable_if<std::is_arithmetic<T>::value ||
233
+ std::is_standard_layout<T>::value>::type> {
234
+ public:
235
+ explicit TypedBufferBuilder(MemoryPool* pool = default_memory_pool(),
236
+ int64_t alignment = kDefaultBufferAlignment)
237
+ : bytes_builder_(pool, alignment) {}
238
+
239
+ explicit TypedBufferBuilder(std::shared_ptr<ResizableBuffer> buffer,
240
+ MemoryPool* pool = default_memory_pool())
241
+ : bytes_builder_(std::move(buffer), pool) {}
242
+
243
+ explicit TypedBufferBuilder(BufferBuilder builder)
244
+ : bytes_builder_(std::move(builder)) {}
245
+
246
+ BufferBuilder* bytes_builder() { return &bytes_builder_; }
247
+
248
+ Status Append(T value) {
249
+ return bytes_builder_.Append(reinterpret_cast<uint8_t*>(&value), sizeof(T));
250
+ }
251
+
252
+ Status Append(const T* values, int64_t num_elements) {
253
+ return bytes_builder_.Append(reinterpret_cast<const uint8_t*>(values),
254
+ num_elements * sizeof(T));
255
+ }
256
+
257
+ Status Append(const int64_t num_copies, T value) {
258
+ ARROW_RETURN_NOT_OK(Reserve(num_copies + length()));
259
+ UnsafeAppend(num_copies, value);
260
+ return Status::OK();
261
+ }
262
+
263
+ void UnsafeAppend(T value) {
264
+ bytes_builder_.UnsafeAppend(reinterpret_cast<uint8_t*>(&value), sizeof(T));
265
+ }
266
+
267
+ void UnsafeAppend(const T* values, int64_t num_elements) {
268
+ bytes_builder_.UnsafeAppend(reinterpret_cast<const uint8_t*>(values),
269
+ num_elements * sizeof(T));
270
+ }
271
+
272
+ template <typename Iter>
273
+ void UnsafeAppend(Iter values_begin, Iter values_end) {
274
+ auto num_elements = static_cast<int64_t>(std::distance(values_begin, values_end));
275
+ auto data = mutable_data() + length();
276
+ bytes_builder_.UnsafeAdvance(num_elements * sizeof(T));
277
+ std::copy(values_begin, values_end, data);
278
+ }
279
+
280
+ void UnsafeAppend(const int64_t num_copies, T value) {
281
+ auto data = mutable_data() + length();
282
+ bytes_builder_.UnsafeAdvance(num_copies * sizeof(T));
283
+ std::fill(data, data + num_copies, value);
284
+ }
285
+
286
+ Status Resize(const int64_t new_capacity, bool shrink_to_fit = true) {
287
+ return bytes_builder_.Resize(new_capacity * sizeof(T), shrink_to_fit);
288
+ }
289
+
290
+ Status Reserve(const int64_t additional_elements) {
291
+ return bytes_builder_.Reserve(additional_elements * sizeof(T));
292
+ }
293
+
294
+ Status Advance(const int64_t length) {
295
+ return bytes_builder_.Advance(length * sizeof(T));
296
+ }
297
+
298
+ Status Finish(std::shared_ptr<Buffer>* out, bool shrink_to_fit = true) {
299
+ return bytes_builder_.Finish(out, shrink_to_fit);
300
+ }
301
+
302
+ Result<std::shared_ptr<Buffer>> Finish(bool shrink_to_fit = true) {
303
+ std::shared_ptr<Buffer> out;
304
+ ARROW_RETURN_NOT_OK(Finish(&out, shrink_to_fit));
305
+ return out;
306
+ }
307
+
308
+ /// \brief Like Finish, but override the final buffer size
309
+ ///
310
+ /// This is useful after writing data directly into the builder memory
311
+ /// without calling the Append methods (basically, when using TypedBufferBuilder
312
+ /// only for memory allocation).
313
+ Result<std::shared_ptr<Buffer>> FinishWithLength(int64_t final_length,
314
+ bool shrink_to_fit = true) {
315
+ return bytes_builder_.FinishWithLength(final_length * sizeof(T), shrink_to_fit);
316
+ }
317
+
318
+ void Reset() { bytes_builder_.Reset(); }
319
+
320
+ int64_t length() const { return bytes_builder_.length() / sizeof(T); }
321
+ int64_t capacity() const { return bytes_builder_.capacity() / sizeof(T); }
322
+ const T* data() const { return reinterpret_cast<const T*>(bytes_builder_.data()); }
323
+ T* mutable_data() { return reinterpret_cast<T*>(bytes_builder_.mutable_data()); }
324
+
325
+ private:
326
+ BufferBuilder bytes_builder_;
327
+ };
328
+
329
+ /// \brief A BufferBuilder for building a buffer containing a bitmap
330
+ template <>
331
+ class TypedBufferBuilder<bool> {
332
+ public:
333
+ explicit TypedBufferBuilder(MemoryPool* pool = default_memory_pool(),
334
+ int64_t alignment = kDefaultBufferAlignment)
335
+ : bytes_builder_(pool, alignment) {}
336
+
337
+ explicit TypedBufferBuilder(BufferBuilder builder)
338
+ : bytes_builder_(std::move(builder)) {}
339
+
340
+ BufferBuilder* bytes_builder() { return &bytes_builder_; }
341
+
342
+ Status Append(bool value) {
343
+ ARROW_RETURN_NOT_OK(Reserve(1));
344
+ UnsafeAppend(value);
345
+ return Status::OK();
346
+ }
347
+
348
+ Status Append(const uint8_t* valid_bytes, int64_t num_elements) {
349
+ ARROW_RETURN_NOT_OK(Reserve(num_elements));
350
+ UnsafeAppend(valid_bytes, num_elements);
351
+ return Status::OK();
352
+ }
353
+
354
+ Status Append(const int64_t num_copies, bool value) {
355
+ ARROW_RETURN_NOT_OK(Reserve(num_copies));
356
+ UnsafeAppend(num_copies, value);
357
+ return Status::OK();
358
+ }
359
+
360
+ void UnsafeAppend(bool value) {
361
+ bit_util::SetBitTo(mutable_data(), bit_length_, value);
362
+ if (!value) {
363
+ ++false_count_;
364
+ }
365
+ ++bit_length_;
366
+ }
367
+
368
+ /// \brief Append bits from an array of bytes (one value per byte)
369
+ void UnsafeAppend(const uint8_t* bytes, int64_t num_elements) {
370
+ if (num_elements == 0) return;
371
+ int64_t i = 0;
372
+ internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements, [&] {
373
+ bool value = bytes[i++];
374
+ false_count_ += !value;
375
+ return value;
376
+ });
377
+ bit_length_ += num_elements;
378
+ }
379
+
380
+ /// \brief Append bits from a packed bitmap
381
+ void UnsafeAppend(const uint8_t* bitmap, int64_t offset, int64_t num_elements) {
382
+ if (num_elements == 0) return;
383
+ internal::CopyBitmap(bitmap, offset, num_elements, mutable_data(), bit_length_);
384
+ false_count_ += num_elements - internal::CountSetBits(bitmap, offset, num_elements);
385
+ bit_length_ += num_elements;
386
+ }
387
+
388
+ void UnsafeAppend(const int64_t num_copies, bool value) {
389
+ bit_util::SetBitsTo(mutable_data(), bit_length_, num_copies, value);
390
+ false_count_ += num_copies * !value;
391
+ bit_length_ += num_copies;
392
+ }
393
+
394
+ template <bool count_falses, typename Generator>
395
+ void UnsafeAppend(const int64_t num_elements, Generator&& gen) {
396
+ if (num_elements == 0) return;
397
+
398
+ if (count_falses) {
399
+ internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements, [&] {
400
+ bool value = gen();
401
+ false_count_ += !value;
402
+ return value;
403
+ });
404
+ } else {
405
+ internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements,
406
+ std::forward<Generator>(gen));
407
+ }
408
+ bit_length_ += num_elements;
409
+ }
410
+
411
+ Status Resize(const int64_t new_capacity, bool shrink_to_fit = true) {
412
+ const int64_t old_byte_capacity = bytes_builder_.capacity();
413
+ ARROW_RETURN_NOT_OK(
414
+ bytes_builder_.Resize(bit_util::BytesForBits(new_capacity), shrink_to_fit));
415
+ // Resize() may have chosen a larger capacity (e.g. for padding),
416
+ // so ask it again before calling memset().
417
+ const int64_t new_byte_capacity = bytes_builder_.capacity();
418
+ if (new_byte_capacity > old_byte_capacity) {
419
+ // The additional buffer space is 0-initialized for convenience,
420
+ // so that other methods can simply bump the length.
421
+ memset(mutable_data() + old_byte_capacity, 0,
422
+ static_cast<size_t>(new_byte_capacity - old_byte_capacity));
423
+ }
424
+ return Status::OK();
425
+ }
426
+
427
+ Status Reserve(const int64_t additional_elements) {
428
+ return Resize(
429
+ BufferBuilder::GrowByFactor(bit_length_, bit_length_ + additional_elements),
430
+ false);
431
+ }
432
+
433
+ Status Advance(const int64_t length) {
434
+ ARROW_RETURN_NOT_OK(Reserve(length));
435
+ bit_length_ += length;
436
+ false_count_ += length;
437
+ return Status::OK();
438
+ }
439
+
440
+ Status Finish(std::shared_ptr<Buffer>* out, bool shrink_to_fit = true) {
441
+ // set bytes_builder_.size_ == byte size of data
442
+ bytes_builder_.UnsafeAdvance(bit_util::BytesForBits(bit_length_) -
443
+ bytes_builder_.length());
444
+ bit_length_ = false_count_ = 0;
445
+ return bytes_builder_.Finish(out, shrink_to_fit);
446
+ }
447
+
448
+ Result<std::shared_ptr<Buffer>> Finish(bool shrink_to_fit = true) {
449
+ std::shared_ptr<Buffer> out;
450
+ ARROW_RETURN_NOT_OK(Finish(&out, shrink_to_fit));
451
+ return out;
452
+ }
453
+
454
+ /// \brief Like Finish, but override the final buffer size
455
+ ///
456
+ /// This is useful after writing data directly into the builder memory
457
+ /// without calling the Append methods (basically, when using TypedBufferBuilder
458
+ /// only for memory allocation).
459
+ Result<std::shared_ptr<Buffer>> FinishWithLength(int64_t final_length,
460
+ bool shrink_to_fit = true) {
461
+ const auto final_byte_length = bit_util::BytesForBits(final_length);
462
+ bytes_builder_.UnsafeAdvance(final_byte_length - bytes_builder_.length());
463
+ bit_length_ = false_count_ = 0;
464
+ return bytes_builder_.FinishWithLength(final_byte_length, shrink_to_fit);
465
+ }
466
+
467
+ void Reset() {
468
+ bytes_builder_.Reset();
469
+ bit_length_ = false_count_ = 0;
470
+ }
471
+
472
+ int64_t length() const { return bit_length_; }
473
+ int64_t capacity() const { return bytes_builder_.capacity() * 8; }
474
+ const uint8_t* data() const { return bytes_builder_.data(); }
475
+ uint8_t* mutable_data() { return bytes_builder_.mutable_data(); }
476
+ int64_t false_count() const { return false_count_; }
477
+
478
+ private:
479
+ BufferBuilder bytes_builder_;
480
+ int64_t bit_length_ = 0;
481
+ int64_t false_count_ = 0;
482
+ };
483
+
484
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/chunk_resolver.h ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <atomic>
21
+ #include <cassert>
22
+ #include <cstdint>
23
+ #include <vector>
24
+
25
+ #include "arrow/type_fwd.h"
26
+ #include "arrow/util/macros.h"
27
+
28
+ namespace arrow::internal {
29
+
30
+ struct ChunkLocation {
31
+ /// \brief Index of the chunk in the array of chunks
32
+ ///
33
+ /// The value is always in the range `[0, chunks.size()]`. `chunks.size()` is used
34
+ /// to represent out-of-bounds locations.
35
+ int64_t chunk_index = 0;
36
+
37
+ /// \brief Index of the value in the chunk
38
+ ///
39
+ /// The value is undefined if chunk_index >= chunks.size()
40
+ int64_t index_in_chunk = 0;
41
+ };
42
+
43
+ /// \brief An utility that incrementally resolves logical indices into
44
+ /// physical indices in a chunked array.
45
+ struct ARROW_EXPORT ChunkResolver {
46
+ private:
47
+ /// \brief Array containing `chunks.size() + 1` offsets.
48
+ ///
49
+ /// `offsets_[i]` is the starting logical index of chunk `i`. `offsets_[0]` is always 0
50
+ /// and `offsets_[chunks.size()]` is the logical length of the chunked array.
51
+ std::vector<int64_t> offsets_;
52
+
53
+ /// \brief Cache of the index of the last resolved chunk.
54
+ ///
55
+ /// \invariant `cached_chunk_ in [0, chunks.size()]`
56
+ mutable std::atomic<int64_t> cached_chunk_;
57
+
58
+ public:
59
+ explicit ChunkResolver(const ArrayVector& chunks) noexcept;
60
+ explicit ChunkResolver(const std::vector<const Array*>& chunks) noexcept;
61
+ explicit ChunkResolver(const RecordBatchVector& batches) noexcept;
62
+
63
+ ChunkResolver(ChunkResolver&& other) noexcept;
64
+ ChunkResolver& operator=(ChunkResolver&& other) noexcept;
65
+
66
+ ChunkResolver(const ChunkResolver& other) noexcept;
67
+ ChunkResolver& operator=(const ChunkResolver& other) noexcept;
68
+
69
+ /// \brief Resolve a logical index to a ChunkLocation.
70
+ ///
71
+ /// The returned ChunkLocation contains the chunk index and the within-chunk index
72
+ /// equivalent to the logical index.
73
+ ///
74
+ /// \pre index >= 0
75
+ /// \post location.chunk_index in [0, chunks.size()]
76
+ /// \param index The logical index to resolve
77
+ /// \return ChunkLocation with a valid chunk_index if index is within
78
+ /// bounds, or with chunk_index == chunks.size() if logical index is
79
+ /// `>= chunked_array.length()`.
80
+ inline ChunkLocation Resolve(int64_t index) const {
81
+ const auto cached_chunk = cached_chunk_.load(std::memory_order_relaxed);
82
+ const auto chunk_index =
83
+ ResolveChunkIndex</*StoreCachedChunk=*/true>(index, cached_chunk);
84
+ return {chunk_index, index - offsets_[chunk_index]};
85
+ }
86
+
87
+ /// \brief Resolve a logical index to a ChunkLocation.
88
+ ///
89
+ /// The returned ChunkLocation contains the chunk index and the within-chunk index
90
+ /// equivalent to the logical index.
91
+ ///
92
+ /// \pre index >= 0
93
+ /// \post location.chunk_index in [0, chunks.size()]
94
+ /// \param index The logical index to resolve
95
+ /// \param hint ChunkLocation{} or the last ChunkLocation returned by
96
+ /// this ChunkResolver.
97
+ /// \return ChunkLocation with a valid chunk_index if index is within
98
+ /// bounds, or with chunk_index == chunks.size() if logical index is
99
+ /// `>= chunked_array.length()`.
100
+ inline ChunkLocation ResolveWithChunkIndexHint(int64_t index,
101
+ ChunkLocation hint) const {
102
+ assert(hint.chunk_index < static_cast<int64_t>(offsets_.size()));
103
+ const auto chunk_index =
104
+ ResolveChunkIndex</*StoreCachedChunk=*/false>(index, hint.chunk_index);
105
+ return {chunk_index, index - offsets_[chunk_index]};
106
+ }
107
+
108
+ private:
109
+ template <bool StoreCachedChunk>
110
+ inline int64_t ResolveChunkIndex(int64_t index, int64_t cached_chunk) const {
111
+ // It is common for algorithms sequentially processing arrays to make consecutive
112
+ // accesses at a relatively small distance from each other, hence often falling in the
113
+ // same chunk.
114
+ //
115
+ // This is guaranteed when merging (assuming each side of the merge uses its
116
+ // own resolver), and is the most common case in recursive invocations of
117
+ // partitioning.
118
+ const auto num_offsets = static_cast<int64_t>(offsets_.size());
119
+ const int64_t* offsets = offsets_.data();
120
+ if (ARROW_PREDICT_TRUE(index >= offsets[cached_chunk]) &&
121
+ (cached_chunk + 1 == num_offsets || index < offsets[cached_chunk + 1])) {
122
+ return cached_chunk;
123
+ }
124
+ // lo < hi is guaranteed by `num_offsets = chunks.size() + 1`
125
+ const auto chunk_index = Bisect(index, offsets, /*lo=*/0, /*hi=*/num_offsets);
126
+ if constexpr (StoreCachedChunk) {
127
+ assert(chunk_index < static_cast<int64_t>(offsets_.size()));
128
+ cached_chunk_.store(chunk_index, std::memory_order_relaxed);
129
+ }
130
+ return chunk_index;
131
+ }
132
+
133
+ /// \brief Find the index of the chunk that contains the logical index.
134
+ ///
135
+ /// Any non-negative index is accepted. When `hi=num_offsets`, the largest
136
+ /// possible return value is `num_offsets-1` which is equal to
137
+ /// `chunks.size()`. The is returned when the logical index is out-of-bounds.
138
+ ///
139
+ /// \pre index >= 0
140
+ /// \pre lo < hi
141
+ /// \pre lo >= 0 && hi <= offsets_.size()
142
+ static inline int64_t Bisect(int64_t index, const int64_t* offsets, int64_t lo,
143
+ int64_t hi) {
144
+ // Similar to std::upper_bound(), but slightly different as our offsets
145
+ // array always starts with 0.
146
+ auto n = hi - lo;
147
+ // First iteration does not need to check for n > 1
148
+ // (lo < hi is guaranteed by the precondition).
149
+ assert(n > 1 && "lo < hi is a precondition of Bisect");
150
+ do {
151
+ const int64_t m = n >> 1;
152
+ const int64_t mid = lo + m;
153
+ if (index >= offsets[mid]) {
154
+ lo = mid;
155
+ n -= m;
156
+ } else {
157
+ n = m;
158
+ }
159
+ } while (n > 1);
160
+ return lo;
161
+ }
162
+ };
163
+
164
+ } // namespace arrow::internal
venv/lib/python3.10/site-packages/pyarrow/include/arrow/memory_pool.h ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <atomic>
21
+ #include <cstdint>
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <string>
25
+
26
+ #include "arrow/result.h"
27
+ #include "arrow/status.h"
28
+ #include "arrow/type_fwd.h"
29
+ #include "arrow/util/visibility.h"
30
+
31
+ namespace arrow {
32
+
33
+ namespace internal {
34
+
35
+ ///////////////////////////////////////////////////////////////////////
36
+ // Helper tracking memory statistics
37
+
38
+ /// \brief Memory pool statistics
39
+ ///
40
+ /// 64-byte aligned so that all atomic values are on the same cache line.
41
+ class alignas(64) MemoryPoolStats {
42
+ private:
43
+ // All atomics are updated according to Acquire-Release ordering.
44
+ // https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
45
+ //
46
+ // max_memory_, total_allocated_bytes_, and num_allocs_ only go up (they are
47
+ // monotonically increasing) which can allow some optimizations.
48
+ std::atomic<int64_t> max_memory_{0};
49
+ std::atomic<int64_t> bytes_allocated_{0};
50
+ std::atomic<int64_t> total_allocated_bytes_{0};
51
+ std::atomic<int64_t> num_allocs_{0};
52
+
53
+ public:
54
+ int64_t max_memory() const { return max_memory_.load(std::memory_order_acquire); }
55
+
56
+ int64_t bytes_allocated() const {
57
+ return bytes_allocated_.load(std::memory_order_acquire);
58
+ }
59
+
60
+ int64_t total_bytes_allocated() const {
61
+ return total_allocated_bytes_.load(std::memory_order_acquire);
62
+ }
63
+
64
+ int64_t num_allocations() const { return num_allocs_.load(std::memory_order_acquire); }
65
+
66
+ inline void DidAllocateBytes(int64_t size) {
67
+ // Issue the load before everything else. max_memory_ is monotonically increasing,
68
+ // so we can use a relaxed load before the read-modify-write.
69
+ auto max_memory = max_memory_.load(std::memory_order_relaxed);
70
+ const auto old_bytes_allocated =
71
+ bytes_allocated_.fetch_add(size, std::memory_order_acq_rel);
72
+ // Issue store operations on values that we don't depend on to proceed
73
+ // with execution. When done, max_memory and old_bytes_allocated have
74
+ // a higher chance of being available on CPU registers. This also has the
75
+ // nice side-effect of putting 3 atomic stores close to each other in the
76
+ // instruction stream.
77
+ total_allocated_bytes_.fetch_add(size, std::memory_order_acq_rel);
78
+ num_allocs_.fetch_add(1, std::memory_order_acq_rel);
79
+
80
+ // If other threads are updating max_memory_ concurrently we leave the loop without
81
+ // updating knowing that it already reached a value even higher than ours.
82
+ const auto allocated = old_bytes_allocated + size;
83
+ while (max_memory < allocated && !max_memory_.compare_exchange_weak(
84
+ /*expected=*/max_memory, /*desired=*/allocated,
85
+ std::memory_order_acq_rel)) {
86
+ }
87
+ }
88
+
89
+ inline void DidReallocateBytes(int64_t old_size, int64_t new_size) {
90
+ if (new_size > old_size) {
91
+ DidAllocateBytes(new_size - old_size);
92
+ } else {
93
+ DidFreeBytes(old_size - new_size);
94
+ }
95
+ }
96
+
97
+ inline void DidFreeBytes(int64_t size) {
98
+ bytes_allocated_.fetch_sub(size, std::memory_order_acq_rel);
99
+ }
100
+ };
101
+
102
+ } // namespace internal
103
+
104
+ /// Base class for memory allocation on the CPU.
105
+ ///
106
+ /// Besides tracking the number of allocated bytes, the allocator also should
107
+ /// take care of the required 64-byte alignment.
108
+ class ARROW_EXPORT MemoryPool {
109
+ public:
110
+ virtual ~MemoryPool() = default;
111
+
112
+ /// \brief EXPERIMENTAL. Create a new instance of the default MemoryPool
113
+ static std::unique_ptr<MemoryPool> CreateDefault();
114
+
115
+ /// Allocate a new memory region of at least size bytes.
116
+ ///
117
+ /// The allocated region shall be 64-byte aligned.
118
+ Status Allocate(int64_t size, uint8_t** out) {
119
+ return Allocate(size, kDefaultBufferAlignment, out);
120
+ }
121
+
122
+ /// Allocate a new memory region of at least size bytes aligned to alignment.
123
+ virtual Status Allocate(int64_t size, int64_t alignment, uint8_t** out) = 0;
124
+
125
+ /// Resize an already allocated memory section.
126
+ ///
127
+ /// As by default most default allocators on a platform don't support aligned
128
+ /// reallocation, this function can involve a copy of the underlying data.
129
+ virtual Status Reallocate(int64_t old_size, int64_t new_size, int64_t alignment,
130
+ uint8_t** ptr) = 0;
131
+ Status Reallocate(int64_t old_size, int64_t new_size, uint8_t** ptr) {
132
+ return Reallocate(old_size, new_size, kDefaultBufferAlignment, ptr);
133
+ }
134
+
135
+ /// Free an allocated region.
136
+ ///
137
+ /// @param buffer Pointer to the start of the allocated memory region
138
+ /// @param size Allocated size located at buffer. An allocator implementation
139
+ /// may use this for tracking the amount of allocated bytes as well as for
140
+ /// faster deallocation if supported by its backend.
141
+ /// @param alignment The alignment of the allocation. Defaults to 64 bytes.
142
+ virtual void Free(uint8_t* buffer, int64_t size, int64_t alignment) = 0;
143
+ void Free(uint8_t* buffer, int64_t size) {
144
+ Free(buffer, size, kDefaultBufferAlignment);
145
+ }
146
+
147
+ /// Return unused memory to the OS
148
+ ///
149
+ /// Only applies to allocators that hold onto unused memory. This will be
150
+ /// best effort, a memory pool may not implement this feature or may be
151
+ /// unable to fulfill the request due to fragmentation.
152
+ virtual void ReleaseUnused() {}
153
+
154
+ /// The number of bytes that were allocated and not yet free'd through
155
+ /// this allocator.
156
+ virtual int64_t bytes_allocated() const = 0;
157
+
158
+ /// Return peak memory allocation in this memory pool
159
+ ///
160
+ /// \return Maximum bytes allocated. If not known (or not implemented),
161
+ /// returns -1
162
+ virtual int64_t max_memory() const;
163
+
164
+ /// The number of bytes that were allocated.
165
+ virtual int64_t total_bytes_allocated() const = 0;
166
+
167
+ /// The number of allocations or reallocations that were requested.
168
+ virtual int64_t num_allocations() const = 0;
169
+
170
+ /// The name of the backend used by this MemoryPool (e.g. "system" or "jemalloc").
171
+ virtual std::string backend_name() const = 0;
172
+
173
+ protected:
174
+ MemoryPool() = default;
175
+ };
176
+
177
+ class ARROW_EXPORT LoggingMemoryPool : public MemoryPool {
178
+ public:
179
+ explicit LoggingMemoryPool(MemoryPool* pool);
180
+ ~LoggingMemoryPool() override = default;
181
+
182
+ using MemoryPool::Allocate;
183
+ using MemoryPool::Free;
184
+ using MemoryPool::Reallocate;
185
+
186
+ Status Allocate(int64_t size, int64_t alignment, uint8_t** out) override;
187
+ Status Reallocate(int64_t old_size, int64_t new_size, int64_t alignment,
188
+ uint8_t** ptr) override;
189
+ void Free(uint8_t* buffer, int64_t size, int64_t alignment) override;
190
+
191
+ int64_t bytes_allocated() const override;
192
+
193
+ int64_t max_memory() const override;
194
+
195
+ int64_t total_bytes_allocated() const override;
196
+
197
+ int64_t num_allocations() const override;
198
+
199
+ std::string backend_name() const override;
200
+
201
+ private:
202
+ MemoryPool* pool_;
203
+ };
204
+
205
+ /// Derived class for memory allocation.
206
+ ///
207
+ /// Tracks the number of bytes and maximum memory allocated through its direct
208
+ /// calls. Actual allocation is delegated to MemoryPool class.
209
+ class ARROW_EXPORT ProxyMemoryPool : public MemoryPool {
210
+ public:
211
+ explicit ProxyMemoryPool(MemoryPool* pool);
212
+ ~ProxyMemoryPool() override;
213
+
214
+ using MemoryPool::Allocate;
215
+ using MemoryPool::Free;
216
+ using MemoryPool::Reallocate;
217
+
218
+ Status Allocate(int64_t size, int64_t alignment, uint8_t** out) override;
219
+ Status Reallocate(int64_t old_size, int64_t new_size, int64_t alignment,
220
+ uint8_t** ptr) override;
221
+ void Free(uint8_t* buffer, int64_t size, int64_t alignment) override;
222
+
223
+ int64_t bytes_allocated() const override;
224
+
225
+ int64_t max_memory() const override;
226
+
227
+ int64_t total_bytes_allocated() const override;
228
+
229
+ int64_t num_allocations() const override;
230
+
231
+ std::string backend_name() const override;
232
+
233
+ private:
234
+ class ProxyMemoryPoolImpl;
235
+ std::unique_ptr<ProxyMemoryPoolImpl> impl_;
236
+ };
237
+
238
+ /// \brief Return a process-wide memory pool based on the system allocator.
239
+ ARROW_EXPORT MemoryPool* system_memory_pool();
240
+
241
+ /// \brief Return a process-wide memory pool based on jemalloc.
242
+ ///
243
+ /// May return NotImplemented if jemalloc is not available.
244
+ ARROW_EXPORT Status jemalloc_memory_pool(MemoryPool** out);
245
+
246
+ /// \brief Set jemalloc memory page purging behavior for future-created arenas
247
+ /// to the indicated number of milliseconds. See dirty_decay_ms and
248
+ /// muzzy_decay_ms options in jemalloc for a description of what these do. The
249
+ /// default is configured to 1000 (1 second) which releases memory more
250
+ /// aggressively to the operating system than the jemalloc default of 10
251
+ /// seconds. If you set the value to 0, dirty / muzzy pages will be released
252
+ /// immediately rather than with a time decay, but this may reduce application
253
+ /// performance.
254
+ ARROW_EXPORT
255
+ Status jemalloc_set_decay_ms(int ms);
256
+
257
+ /// \brief Get basic statistics from jemalloc's mallctl.
258
+ /// See the MALLCTL NAMESPACE section in jemalloc project documentation for
259
+ /// available stats.
260
+ ARROW_EXPORT
261
+ Result<int64_t> jemalloc_get_stat(const char* name);
262
+
263
+ /// \brief Reset the counter for peak bytes allocated in the calling thread to zero.
264
+ /// This affects subsequent calls to thread.peak.read, but not the values returned by
265
+ /// thread.allocated or thread.deallocated.
266
+ ARROW_EXPORT
267
+ Status jemalloc_peak_reset();
268
+
269
+ /// \brief Print summary statistics in human-readable form to stderr.
270
+ /// See malloc_stats_print documentation in jemalloc project documentation for
271
+ /// available opt flags.
272
+ ARROW_EXPORT
273
+ Status jemalloc_stats_print(const char* opts = "");
274
+
275
+ /// \brief Print summary statistics in human-readable form using a callback
276
+ /// See malloc_stats_print documentation in jemalloc project documentation for
277
+ /// available opt flags.
278
+ ARROW_EXPORT
279
+ Status jemalloc_stats_print(std::function<void(const char*)> write_cb,
280
+ const char* opts = "");
281
+
282
+ /// \brief Get summary statistics in human-readable form.
283
+ /// See malloc_stats_print documentation in jemalloc project documentation for
284
+ /// available opt flags.
285
+ ARROW_EXPORT
286
+ Result<std::string> jemalloc_stats_string(const char* opts = "");
287
+
288
+ /// \brief Return a process-wide memory pool based on mimalloc.
289
+ ///
290
+ /// May return NotImplemented if mimalloc is not available.
291
+ ARROW_EXPORT Status mimalloc_memory_pool(MemoryPool** out);
292
+
293
+ /// \brief Return the names of the backends supported by this Arrow build.
294
+ ARROW_EXPORT std::vector<std::string> SupportedMemoryBackendNames();
295
+
296
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/record_batch.h ADDED
@@ -0,0 +1,407 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <vector>
24
+
25
+ #include "arrow/compare.h"
26
+ #include "arrow/result.h"
27
+ #include "arrow/status.h"
28
+ #include "arrow/type_fwd.h"
29
+ #include "arrow/util/iterator.h"
30
+ #include "arrow/util/macros.h"
31
+ #include "arrow/util/visibility.h"
32
+
33
+ namespace arrow {
34
+
35
+ /// \class RecordBatch
36
+ /// \brief Collection of equal-length arrays matching a particular Schema
37
+ ///
38
+ /// A record batch is table-like data structure that is semantically a sequence
39
+ /// of fields, each a contiguous Arrow array
40
+ class ARROW_EXPORT RecordBatch {
41
+ public:
42
+ virtual ~RecordBatch() = default;
43
+
44
+ /// \param[in] schema The record batch schema
45
+ /// \param[in] num_rows length of fields in the record batch. Each array
46
+ /// should have the same length as num_rows
47
+ /// \param[in] columns the record batch fields as vector of arrays
48
+ static std::shared_ptr<RecordBatch> Make(std::shared_ptr<Schema> schema,
49
+ int64_t num_rows,
50
+ std::vector<std::shared_ptr<Array>> columns);
51
+
52
+ /// \brief Construct record batch from vector of internal data structures
53
+ /// \since 0.5.0
54
+ ///
55
+ /// This class is intended for internal use, or advanced users.
56
+ ///
57
+ /// \param schema the record batch schema
58
+ /// \param num_rows the number of semantic rows in the record batch. This
59
+ /// should be equal to the length of each field
60
+ /// \param columns the data for the batch's columns
61
+ static std::shared_ptr<RecordBatch> Make(
62
+ std::shared_ptr<Schema> schema, int64_t num_rows,
63
+ std::vector<std::shared_ptr<ArrayData>> columns);
64
+
65
+ /// \brief Create an empty RecordBatch of a given schema
66
+ ///
67
+ /// The output RecordBatch will be created with DataTypes from
68
+ /// the given schema.
69
+ ///
70
+ /// \param[in] schema the schema of the empty RecordBatch
71
+ /// \param[in] pool the memory pool to allocate memory from
72
+ /// \return the resulting RecordBatch
73
+ static Result<std::shared_ptr<RecordBatch>> MakeEmpty(
74
+ std::shared_ptr<Schema> schema, MemoryPool* pool = default_memory_pool());
75
+
76
+ /// \brief Convert record batch to struct array
77
+ ///
78
+ /// Create a struct array whose child arrays are the record batch's columns.
79
+ /// Note that the record batch's top-level field metadata cannot be reflected
80
+ /// in the resulting struct array.
81
+ Result<std::shared_ptr<StructArray>> ToStructArray() const;
82
+
83
+ /// \brief Convert record batch with one data type to Tensor
84
+ ///
85
+ /// Create a Tensor object with shape (number of rows, number of columns) and
86
+ /// strides (type size in bytes, type size in bytes * number of rows).
87
+ /// Generated Tensor will have column-major layout.
88
+ ///
89
+ /// \param[in] null_to_nan if true, convert nulls to NaN
90
+ /// \param[in] row_major if true, create row-major Tensor else column-major Tensor
91
+ /// \param[in] pool the memory pool to allocate the tensor buffer
92
+ /// \return the resulting Tensor
93
+ Result<std::shared_ptr<Tensor>> ToTensor(
94
+ bool null_to_nan = false, bool row_major = true,
95
+ MemoryPool* pool = default_memory_pool()) const;
96
+
97
+ /// \brief Construct record batch from struct array
98
+ ///
99
+ /// This constructs a record batch using the child arrays of the given
100
+ /// array, which must be a struct array.
101
+ ///
102
+ /// \param[in] array the source array, must be a StructArray
103
+ /// \param[in] pool the memory pool to allocate new validity bitmaps
104
+ ///
105
+ /// This operation will usually be zero-copy. However, if the struct array has an
106
+ /// offset or a validity bitmap then these will need to be pushed into the child arrays.
107
+ /// Pushing the offset is zero-copy but pushing the validity bitmap is not.
108
+ static Result<std::shared_ptr<RecordBatch>> FromStructArray(
109
+ const std::shared_ptr<Array>& array, MemoryPool* pool = default_memory_pool());
110
+
111
+ /// \brief Determine if two record batches are exactly equal
112
+ ///
113
+ /// \param[in] other the RecordBatch to compare with
114
+ /// \param[in] check_metadata if true, check that Schema metadata is the same
115
+ /// \param[in] opts the options for equality comparisons
116
+ /// \return true if batches are equal
117
+ bool Equals(const RecordBatch& other, bool check_metadata = false,
118
+ const EqualOptions& opts = EqualOptions::Defaults()) const;
119
+
120
+ /// \brief Determine if two record batches are approximately equal
121
+ ///
122
+ /// \param[in] other the RecordBatch to compare with
123
+ /// \param[in] opts the options for equality comparisons
124
+ /// \return true if batches are approximately equal
125
+ bool ApproxEquals(const RecordBatch& other,
126
+ const EqualOptions& opts = EqualOptions::Defaults()) const;
127
+
128
+ /// \return the record batch's schema
129
+ const std::shared_ptr<Schema>& schema() const { return schema_; }
130
+
131
+ /// \brief Replace the schema with another schema with the same types, but potentially
132
+ /// different field names and/or metadata.
133
+ Result<std::shared_ptr<RecordBatch>> ReplaceSchema(
134
+ std::shared_ptr<Schema> schema) const;
135
+
136
+ /// \brief Retrieve all columns at once
137
+ virtual const std::vector<std::shared_ptr<Array>>& columns() const = 0;
138
+
139
+ /// \brief Retrieve an array from the record batch
140
+ /// \param[in] i field index, does not boundscheck
141
+ /// \return an Array object
142
+ virtual std::shared_ptr<Array> column(int i) const = 0;
143
+
144
+ /// \brief Retrieve an array from the record batch
145
+ /// \param[in] name field name
146
+ /// \return an Array or null if no field was found
147
+ std::shared_ptr<Array> GetColumnByName(const std::string& name) const;
148
+
149
+ /// \brief Retrieve an array's internal data from the record batch
150
+ /// \param[in] i field index, does not boundscheck
151
+ /// \return an internal ArrayData object
152
+ virtual std::shared_ptr<ArrayData> column_data(int i) const = 0;
153
+
154
+ /// \brief Retrieve all arrays' internal data from the record batch.
155
+ virtual const ArrayDataVector& column_data() const = 0;
156
+
157
+ /// \brief Add column to the record batch, producing a new RecordBatch
158
+ ///
159
+ /// \param[in] i field index, which will be boundschecked
160
+ /// \param[in] field field to be added
161
+ /// \param[in] column column to be added
162
+ virtual Result<std::shared_ptr<RecordBatch>> AddColumn(
163
+ int i, const std::shared_ptr<Field>& field,
164
+ const std::shared_ptr<Array>& column) const = 0;
165
+
166
+ /// \brief Add new nullable column to the record batch, producing a new
167
+ /// RecordBatch.
168
+ ///
169
+ /// For non-nullable columns, use the Field-based version of this method.
170
+ ///
171
+ /// \param[in] i field index, which will be boundschecked
172
+ /// \param[in] field_name name of field to be added
173
+ /// \param[in] column column to be added
174
+ virtual Result<std::shared_ptr<RecordBatch>> AddColumn(
175
+ int i, std::string field_name, const std::shared_ptr<Array>& column) const;
176
+
177
+ /// \brief Replace a column in the record batch, producing a new RecordBatch
178
+ ///
179
+ /// \param[in] i field index, does boundscheck
180
+ /// \param[in] field field to be replaced
181
+ /// \param[in] column column to be replaced
182
+ virtual Result<std::shared_ptr<RecordBatch>> SetColumn(
183
+ int i, const std::shared_ptr<Field>& field,
184
+ const std::shared_ptr<Array>& column) const = 0;
185
+
186
+ /// \brief Remove column from the record batch, producing a new RecordBatch
187
+ ///
188
+ /// \param[in] i field index, does boundscheck
189
+ virtual Result<std::shared_ptr<RecordBatch>> RemoveColumn(int i) const = 0;
190
+
191
+ virtual std::shared_ptr<RecordBatch> ReplaceSchemaMetadata(
192
+ const std::shared_ptr<const KeyValueMetadata>& metadata) const = 0;
193
+
194
+ /// \brief Name in i-th column
195
+ const std::string& column_name(int i) const;
196
+
197
+ /// \return the number of columns in the table
198
+ int num_columns() const;
199
+
200
+ /// \return the number of rows (the corresponding length of each column)
201
+ int64_t num_rows() const { return num_rows_; }
202
+
203
+ /// \brief Copy the entire RecordBatch to destination MemoryManager
204
+ ///
205
+ /// This uses Array::CopyTo on each column of the record batch to create
206
+ /// a new record batch where all underlying buffers for the columns have
207
+ /// been copied to the destination MemoryManager. This uses
208
+ /// MemoryManager::CopyBuffer under the hood.
209
+ Result<std::shared_ptr<RecordBatch>> CopyTo(
210
+ const std::shared_ptr<MemoryManager>& to) const;
211
+
212
+ /// \brief View or Copy the entire RecordBatch to destination MemoryManager
213
+ ///
214
+ /// This uses Array::ViewOrCopyTo on each column of the record batch to create
215
+ /// a new record batch where all underlying buffers for the columns have
216
+ /// been zero-copy viewed on the destination MemoryManager, falling back
217
+ /// to performing a copy if it can't be viewed as a zero-copy buffer. This uses
218
+ /// Buffer::ViewOrCopy under the hood.
219
+ Result<std::shared_ptr<RecordBatch>> ViewOrCopyTo(
220
+ const std::shared_ptr<MemoryManager>& to) const;
221
+
222
+ /// \brief Slice each of the arrays in the record batch
223
+ /// \param[in] offset the starting offset to slice, through end of batch
224
+ /// \return new record batch
225
+ virtual std::shared_ptr<RecordBatch> Slice(int64_t offset) const;
226
+
227
+ /// \brief Slice each of the arrays in the record batch
228
+ /// \param[in] offset the starting offset to slice
229
+ /// \param[in] length the number of elements to slice from offset
230
+ /// \return new record batch
231
+ virtual std::shared_ptr<RecordBatch> Slice(int64_t offset, int64_t length) const = 0;
232
+
233
+ /// \return PrettyPrint representation suitable for debugging
234
+ std::string ToString() const;
235
+
236
+ /// \brief Return names of all columns
237
+ std::vector<std::string> ColumnNames() const;
238
+
239
+ /// \brief Rename columns with provided names
240
+ Result<std::shared_ptr<RecordBatch>> RenameColumns(
241
+ const std::vector<std::string>& names) const;
242
+
243
+ /// \brief Return new record batch with specified columns
244
+ Result<std::shared_ptr<RecordBatch>> SelectColumns(
245
+ const std::vector<int>& indices) const;
246
+
247
+ /// \brief Perform cheap validation checks to determine obvious inconsistencies
248
+ /// within the record batch's schema and internal data.
249
+ ///
250
+ /// This is O(k) where k is the total number of fields and array descendents.
251
+ ///
252
+ /// \return Status
253
+ virtual Status Validate() const;
254
+
255
+ /// \brief Perform extensive validation checks to determine inconsistencies
256
+ /// within the record batch's schema and internal data.
257
+ ///
258
+ /// This is potentially O(k*n) where n is the number of rows.
259
+ ///
260
+ /// \return Status
261
+ virtual Status ValidateFull() const;
262
+
263
+ protected:
264
+ RecordBatch(const std::shared_ptr<Schema>& schema, int64_t num_rows);
265
+
266
+ std::shared_ptr<Schema> schema_;
267
+ int64_t num_rows_;
268
+
269
+ private:
270
+ ARROW_DISALLOW_COPY_AND_ASSIGN(RecordBatch);
271
+ };
272
+
273
+ struct ARROW_EXPORT RecordBatchWithMetadata {
274
+ std::shared_ptr<RecordBatch> batch;
275
+ std::shared_ptr<KeyValueMetadata> custom_metadata;
276
+ };
277
+
278
+ /// \brief Abstract interface for reading stream of record batches
279
+ class ARROW_EXPORT RecordBatchReader {
280
+ public:
281
+ using ValueType = std::shared_ptr<RecordBatch>;
282
+
283
+ virtual ~RecordBatchReader();
284
+
285
+ /// \return the shared schema of the record batches in the stream
286
+ virtual std::shared_ptr<Schema> schema() const = 0;
287
+
288
+ /// \brief Read the next record batch in the stream. Return null for batch
289
+ /// when reaching end of stream
290
+ ///
291
+ /// \param[out] batch the next loaded batch, null at end of stream
292
+ /// \return Status
293
+ virtual Status ReadNext(std::shared_ptr<RecordBatch>* batch) = 0;
294
+
295
+ virtual Result<RecordBatchWithMetadata> ReadNext() {
296
+ return Status::NotImplemented("ReadNext with custom metadata");
297
+ }
298
+
299
+ /// \brief Iterator interface
300
+ Result<std::shared_ptr<RecordBatch>> Next() {
301
+ std::shared_ptr<RecordBatch> batch;
302
+ ARROW_RETURN_NOT_OK(ReadNext(&batch));
303
+ return batch;
304
+ }
305
+
306
+ /// \brief finalize reader
307
+ virtual Status Close() { return Status::OK(); }
308
+
309
+ class RecordBatchReaderIterator {
310
+ public:
311
+ using iterator_category = std::input_iterator_tag;
312
+ using difference_type = std::ptrdiff_t;
313
+ using value_type = std::shared_ptr<RecordBatch>;
314
+ using pointer = value_type const*;
315
+ using reference = value_type const&;
316
+
317
+ RecordBatchReaderIterator() : batch_(RecordBatchEnd()), reader_(NULLPTR) {}
318
+
319
+ explicit RecordBatchReaderIterator(RecordBatchReader* reader)
320
+ : batch_(RecordBatchEnd()), reader_(reader) {
321
+ Next();
322
+ }
323
+
324
+ bool operator==(const RecordBatchReaderIterator& other) const {
325
+ return batch_ == other.batch_;
326
+ }
327
+
328
+ bool operator!=(const RecordBatchReaderIterator& other) const {
329
+ return !(*this == other);
330
+ }
331
+
332
+ Result<std::shared_ptr<RecordBatch>> operator*() {
333
+ ARROW_RETURN_NOT_OK(batch_.status());
334
+
335
+ return batch_;
336
+ }
337
+
338
+ RecordBatchReaderIterator& operator++() {
339
+ Next();
340
+ return *this;
341
+ }
342
+
343
+ RecordBatchReaderIterator operator++(int) {
344
+ RecordBatchReaderIterator tmp(*this);
345
+ Next();
346
+ return tmp;
347
+ }
348
+
349
+ private:
350
+ std::shared_ptr<RecordBatch> RecordBatchEnd() {
351
+ return std::shared_ptr<RecordBatch>(NULLPTR);
352
+ }
353
+
354
+ void Next() {
355
+ if (reader_ == NULLPTR) {
356
+ batch_ = RecordBatchEnd();
357
+ return;
358
+ }
359
+ batch_ = reader_->Next();
360
+ }
361
+
362
+ Result<std::shared_ptr<RecordBatch>> batch_;
363
+ RecordBatchReader* reader_;
364
+ };
365
+ /// \brief Return an iterator to the first record batch in the stream
366
+ RecordBatchReaderIterator begin() { return RecordBatchReaderIterator(this); }
367
+
368
+ /// \brief Return an iterator to the end of the stream
369
+ RecordBatchReaderIterator end() { return RecordBatchReaderIterator(); }
370
+
371
+ /// \brief Consume entire stream as a vector of record batches
372
+ Result<RecordBatchVector> ToRecordBatches();
373
+
374
+ /// \brief Read all batches and concatenate as arrow::Table
375
+ Result<std::shared_ptr<Table>> ToTable();
376
+
377
+ /// \brief Create a RecordBatchReader from a vector of RecordBatch.
378
+ ///
379
+ /// \param[in] batches the vector of RecordBatch to read from
380
+ /// \param[in] schema schema to conform to. Will be inferred from the first
381
+ /// element if not provided.
382
+ static Result<std::shared_ptr<RecordBatchReader>> Make(
383
+ RecordBatchVector batches, std::shared_ptr<Schema> schema = NULLPTR);
384
+
385
+ /// \brief Create a RecordBatchReader from an Iterator of RecordBatch.
386
+ ///
387
+ /// \param[in] batches an iterator of RecordBatch to read from.
388
+ /// \param[in] schema schema that each record batch in iterator will conform to.
389
+ static Result<std::shared_ptr<RecordBatchReader>> MakeFromIterator(
390
+ Iterator<std::shared_ptr<RecordBatch>> batches, std::shared_ptr<Schema> schema);
391
+ };
392
+
393
+ /// \brief Concatenate record batches
394
+ ///
395
+ /// The columns of the new batch are formed by concatenate the same columns of each input
396
+ /// batch. Concatenate multiple batches into a new batch requires that the schema must be
397
+ /// consistent. It supports merging batches without columns (only length, scenarios such
398
+ /// as count(*)).
399
+ ///
400
+ /// \param[in] batches a vector of record batches to be concatenated
401
+ /// \param[in] pool memory to store the result will be allocated from this memory pool
402
+ /// \return the concatenated record batch
403
+ ARROW_EXPORT
404
+ Result<std::shared_ptr<RecordBatch>> ConcatenateRecordBatches(
405
+ const RecordBatchVector& batches, MemoryPool* pool = default_memory_pool());
406
+
407
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/result.h ADDED
@@ -0,0 +1,508 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //
2
+ // Copyright 2017 Asylo authors
3
+ //
4
+ // Licensed under the Apache License, Version 2.0 (the "License");
5
+ // you may not use this file except in compliance with the License.
6
+ // You may obtain a copy of the License at
7
+ //
8
+ // http://www.apache.org/licenses/LICENSE-2.0
9
+ //
10
+ // Unless required by applicable law or agreed to in writing, software
11
+ // distributed under the License is distributed on an "AS IS" BASIS,
12
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ // See the License for the specific language governing permissions and
14
+ // limitations under the License.
15
+ //
16
+
17
+ // Adapted from Asylo
18
+
19
+ #pragma once
20
+
21
+ #include <cstddef>
22
+ #include <new>
23
+ #include <string>
24
+ #include <type_traits>
25
+ #include <utility>
26
+
27
+ #include "arrow/status.h"
28
+ #include "arrow/util/aligned_storage.h"
29
+ #include "arrow/util/compare.h"
30
+
31
+ namespace arrow {
32
+
33
+ template <typename>
34
+ struct EnsureResult;
35
+
36
+ namespace internal {
37
+
38
+ ARROW_EXPORT void DieWithMessage(const std::string& msg);
39
+
40
+ ARROW_EXPORT void InvalidValueOrDie(const Status& st);
41
+
42
+ } // namespace internal
43
+
44
+ /// A class for representing either a usable value, or an error.
45
+ ///
46
+ /// A Result object either contains a value of type `T` or a Status object
47
+ /// explaining why such a value is not present. The type `T` must be
48
+ /// copy-constructible and/or move-constructible.
49
+ ///
50
+ /// The state of a Result object may be determined by calling ok() or
51
+ /// status(). The ok() method returns true if the object contains a valid value.
52
+ /// The status() method returns the internal Status object. A Result object
53
+ /// that contains a valid value will return an OK Status for a call to status().
54
+ ///
55
+ /// A value of type `T` may be extracted from a Result object through a call
56
+ /// to ValueOrDie(). This function should only be called if a call to ok()
57
+ /// returns true. Sample usage:
58
+ ///
59
+ /// ```
60
+ /// arrow::Result<Foo> result = CalculateFoo();
61
+ /// if (result.ok()) {
62
+ /// Foo foo = result.ValueOrDie();
63
+ /// foo.DoSomethingCool();
64
+ /// } else {
65
+ /// ARROW_LOG(ERROR) << result.status();
66
+ /// }
67
+ /// ```
68
+ ///
69
+ /// If `T` is a move-only type, like `std::unique_ptr<>`, then the value should
70
+ /// only be extracted after invoking `std::move()` on the Result object.
71
+ /// Sample usage:
72
+ ///
73
+ /// ```
74
+ /// arrow::Result<std::unique_ptr<Foo>> result = CalculateFoo();
75
+ /// if (result.ok()) {
76
+ /// std::unique_ptr<Foo> foo = std::move(result).ValueOrDie();
77
+ /// foo->DoSomethingCool();
78
+ /// } else {
79
+ /// ARROW_LOG(ERROR) << result.status();
80
+ /// }
81
+ /// ```
82
+ ///
83
+ /// Result is provided for the convenience of implementing functions that
84
+ /// return some value but may fail during execution. For instance, consider a
85
+ /// function with the following signature:
86
+ ///
87
+ /// ```
88
+ /// arrow::Status CalculateFoo(int *output);
89
+ /// ```
90
+ ///
91
+ /// This function may instead be written as:
92
+ ///
93
+ /// ```
94
+ /// arrow::Result<int> CalculateFoo();
95
+ /// ```
96
+ template <class T>
97
+ class [[nodiscard]] Result : public util::EqualityComparable<Result<T>> {
98
+ template <typename U>
99
+ friend class Result;
100
+
101
+ static_assert(!std::is_same<T, Status>::value,
102
+ "this assert indicates you have probably made a metaprogramming error");
103
+
104
+ public:
105
+ using ValueType = T;
106
+
107
+ /// Constructs a Result object that contains a non-OK status.
108
+ ///
109
+ /// This constructor is marked `explicit` to prevent attempts to `return {}`
110
+ /// from a function with a return type of, for example,
111
+ /// `Result<std::vector<int>>`. While `return {}` seems like it would return
112
+ /// an empty vector, it will actually invoke the default constructor of
113
+ /// Result.
114
+ explicit Result() noexcept // NOLINT(runtime/explicit)
115
+ : status_(Status::UnknownError("Uninitialized Result<T>")) {}
116
+
117
+ ~Result() noexcept { Destroy(); }
118
+
119
+ /// Constructs a Result object with the given non-OK Status object. All
120
+ /// calls to ValueOrDie() on this object will abort. The given `status` must
121
+ /// not be an OK status, otherwise this constructor will abort.
122
+ ///
123
+ /// This constructor is not declared explicit so that a function with a return
124
+ /// type of `Result<T>` can return a Status object, and the status will be
125
+ /// implicitly converted to the appropriate return type as a matter of
126
+ /// convenience.
127
+ ///
128
+ /// \param status The non-OK Status object to initialize to.
129
+ Result(const Status& status) noexcept // NOLINT(runtime/explicit)
130
+ : status_(status) {
131
+ if (ARROW_PREDICT_FALSE(status.ok())) {
132
+ internal::DieWithMessage(std::string("Constructed with a non-error status: ") +
133
+ status.ToString());
134
+ }
135
+ }
136
+
137
+ /// Constructs a Result object that contains `value`. The resulting object
138
+ /// is considered to have an OK status. The wrapped element can be accessed
139
+ /// with ValueOrDie().
140
+ ///
141
+ /// This constructor is made implicit so that a function with a return type of
142
+ /// `Result<T>` can return an object of type `U &&`, implicitly converting
143
+ /// it to a `Result<T>` object.
144
+ ///
145
+ /// Note that `T` must be implicitly constructible from `U`, and `U` must not
146
+ /// be a (cv-qualified) Status or Status-reference type. Due to C++
147
+ /// reference-collapsing rules and perfect-forwarding semantics, this
148
+ /// constructor matches invocations that pass `value` either as a const
149
+ /// reference or as an rvalue reference. Since Result needs to work for both
150
+ /// reference and rvalue-reference types, the constructor uses perfect
151
+ /// forwarding to avoid invalidating arguments that were passed by reference.
152
+ /// See http://thbecker.net/articles/rvalue_references/section_08.html for
153
+ /// additional details.
154
+ ///
155
+ /// \param value The value to initialize to.
156
+ template <typename U,
157
+ typename E = typename std::enable_if<
158
+ std::is_constructible<T, U>::value && std::is_convertible<U, T>::value &&
159
+ !std::is_same<typename std::remove_reference<
160
+ typename std::remove_cv<U>::type>::type,
161
+ Status>::value>::type>
162
+ Result(U&& value) noexcept { // NOLINT(runtime/explicit)
163
+ ConstructValue(std::forward<U>(value));
164
+ }
165
+
166
+ /// Constructs a Result object that contains `value`. The resulting object
167
+ /// is considered to have an OK status. The wrapped element can be accessed
168
+ /// with ValueOrDie().
169
+ ///
170
+ /// This constructor is made implicit so that a function with a return type of
171
+ /// `Result<T>` can return an object of type `T`, implicitly converting
172
+ /// it to a `Result<T>` object.
173
+ ///
174
+ /// \param value The value to initialize to.
175
+ // NOTE `Result(U&& value)` above should be sufficient, but some compilers
176
+ // fail matching it.
177
+ Result(T&& value) noexcept { // NOLINT(runtime/explicit)
178
+ ConstructValue(std::move(value));
179
+ }
180
+
181
+ /// Copy constructor.
182
+ ///
183
+ /// This constructor needs to be explicitly defined because the presence of
184
+ /// the move-assignment operator deletes the default copy constructor. In such
185
+ /// a scenario, since the deleted copy constructor has stricter binding rules
186
+ /// than the templated copy constructor, the templated constructor cannot act
187
+ /// as a copy constructor, and any attempt to copy-construct a `Result`
188
+ /// object results in a compilation error.
189
+ ///
190
+ /// \param other The value to copy from.
191
+ Result(const Result& other) noexcept : status_(other.status_) {
192
+ if (ARROW_PREDICT_TRUE(status_.ok())) {
193
+ ConstructValue(other.ValueUnsafe());
194
+ }
195
+ }
196
+
197
+ /// Templatized constructor that constructs a `Result<T>` from a const
198
+ /// reference to a `Result<U>`.
199
+ ///
200
+ /// `T` must be implicitly constructible from `const U &`.
201
+ ///
202
+ /// \param other The value to copy from.
203
+ template <typename U, typename E = typename std::enable_if<
204
+ std::is_constructible<T, const U&>::value &&
205
+ std::is_convertible<U, T>::value>::type>
206
+ Result(const Result<U>& other) noexcept : status_(other.status_) {
207
+ if (ARROW_PREDICT_TRUE(status_.ok())) {
208
+ ConstructValue(other.ValueUnsafe());
209
+ }
210
+ }
211
+
212
+ /// Copy-assignment operator.
213
+ ///
214
+ /// \param other The Result object to copy.
215
+ Result& operator=(const Result& other) noexcept {
216
+ // Check for self-assignment.
217
+ if (ARROW_PREDICT_FALSE(this == &other)) {
218
+ return *this;
219
+ }
220
+ Destroy();
221
+ status_ = other.status_;
222
+ if (ARROW_PREDICT_TRUE(status_.ok())) {
223
+ ConstructValue(other.ValueUnsafe());
224
+ }
225
+ return *this;
226
+ }
227
+
228
+ /// Templatized constructor which constructs a `Result<T>` by moving the
229
+ /// contents of a `Result<U>`. `T` must be implicitly constructible from `U
230
+ /// &&`.
231
+ ///
232
+ /// Sets `other` to contain a non-OK status with a`StatusError::Invalid`
233
+ /// error code.
234
+ ///
235
+ /// \param other The Result object to move from and set to a non-OK status.
236
+ template <typename U,
237
+ typename E = typename std::enable_if<std::is_constructible<T, U&&>::value &&
238
+ std::is_convertible<U, T>::value>::type>
239
+ Result(Result<U>&& other) noexcept {
240
+ if (ARROW_PREDICT_TRUE(other.status_.ok())) {
241
+ status_ = std::move(other.status_);
242
+ ConstructValue(other.MoveValueUnsafe());
243
+ } else {
244
+ // If we moved the status, the other status may become ok but the other
245
+ // value hasn't been constructed => crash on other destructor.
246
+ status_ = other.status_;
247
+ }
248
+ }
249
+
250
+ /// Move-assignment operator.
251
+ ///
252
+ /// Sets `other` to an invalid state..
253
+ ///
254
+ /// \param other The Result object to assign from and set to a non-OK
255
+ /// status.
256
+ Result& operator=(Result&& other) noexcept {
257
+ // Check for self-assignment.
258
+ if (ARROW_PREDICT_FALSE(this == &other)) {
259
+ return *this;
260
+ }
261
+ Destroy();
262
+ if (ARROW_PREDICT_TRUE(other.status_.ok())) {
263
+ status_ = std::move(other.status_);
264
+ ConstructValue(other.MoveValueUnsafe());
265
+ } else {
266
+ // If we moved the status, the other status may become ok but the other
267
+ // value hasn't been constructed => crash on other destructor.
268
+ status_ = other.status_;
269
+ }
270
+ return *this;
271
+ }
272
+
273
+ /// Compare to another Result.
274
+ bool Equals(const Result& other) const {
275
+ if (ARROW_PREDICT_TRUE(status_.ok())) {
276
+ return other.status_.ok() && ValueUnsafe() == other.ValueUnsafe();
277
+ }
278
+ return status_ == other.status_;
279
+ }
280
+
281
+ /// Indicates whether the object contains a `T` value. Generally instead
282
+ /// of accessing this directly you will want to use ASSIGN_OR_RAISE defined
283
+ /// below.
284
+ ///
285
+ /// \return True if this Result object's status is OK (i.e. a call to ok()
286
+ /// returns true). If this function returns true, then it is safe to access
287
+ /// the wrapped element through a call to ValueOrDie().
288
+ constexpr bool ok() const { return status_.ok(); }
289
+
290
+ /// \brief Equivalent to ok().
291
+ // operator bool() const { return ok(); }
292
+
293
+ /// Gets the stored status object, or an OK status if a `T` value is stored.
294
+ ///
295
+ /// \return The stored non-OK status object, or an OK status if this object
296
+ /// has a value.
297
+ constexpr const Status& status() const { return status_; }
298
+
299
+ /// Gets the stored `T` value.
300
+ ///
301
+ /// This method should only be called if this Result object's status is OK
302
+ /// (i.e. a call to ok() returns true), otherwise this call will abort.
303
+ ///
304
+ /// \return The stored `T` value.
305
+ const T& ValueOrDie() const& {
306
+ if (ARROW_PREDICT_FALSE(!ok())) {
307
+ internal::InvalidValueOrDie(status_);
308
+ }
309
+ return ValueUnsafe();
310
+ }
311
+ const T& operator*() const& { return ValueOrDie(); }
312
+ const T* operator->() const { return &ValueOrDie(); }
313
+
314
+ /// Gets a mutable reference to the stored `T` value.
315
+ ///
316
+ /// This method should only be called if this Result object's status is OK
317
+ /// (i.e. a call to ok() returns true), otherwise this call will abort.
318
+ ///
319
+ /// \return The stored `T` value.
320
+ T& ValueOrDie() & {
321
+ if (ARROW_PREDICT_FALSE(!ok())) {
322
+ internal::InvalidValueOrDie(status_);
323
+ }
324
+ return ValueUnsafe();
325
+ }
326
+ T& operator*() & { return ValueOrDie(); }
327
+ T* operator->() { return &ValueOrDie(); }
328
+
329
+ /// Moves and returns the internally-stored `T` value.
330
+ ///
331
+ /// This method should only be called if this Result object's status is OK
332
+ /// (i.e. a call to ok() returns true), otherwise this call will abort. The
333
+ /// Result object is invalidated after this call and will be updated to
334
+ /// contain a non-OK status.
335
+ ///
336
+ /// \return The stored `T` value.
337
+ T ValueOrDie() && {
338
+ if (ARROW_PREDICT_FALSE(!ok())) {
339
+ internal::InvalidValueOrDie(status_);
340
+ }
341
+ return MoveValueUnsafe();
342
+ }
343
+ T operator*() && { return std::move(*this).ValueOrDie(); }
344
+
345
+ /// Helper method for implementing Status returning functions in terms of semantically
346
+ /// equivalent Result returning functions. For example:
347
+ ///
348
+ /// Status GetInt(int *out) { return GetInt().Value(out); }
349
+ template <typename U, typename E = typename std::enable_if<
350
+ std::is_constructible<U, T>::value>::type>
351
+ Status Value(U* out) && {
352
+ if (!ok()) {
353
+ return status();
354
+ }
355
+ *out = U(MoveValueUnsafe());
356
+ return Status::OK();
357
+ }
358
+
359
+ /// Move and return the internally stored value or alternative if an error is stored.
360
+ T ValueOr(T alternative) && {
361
+ if (!ok()) {
362
+ return alternative;
363
+ }
364
+ return MoveValueUnsafe();
365
+ }
366
+
367
+ /// Retrieve the value if ok(), falling back to an alternative generated by the provided
368
+ /// factory
369
+ template <typename G>
370
+ T ValueOrElse(G&& generate_alternative) && {
371
+ if (ok()) {
372
+ return MoveValueUnsafe();
373
+ }
374
+ return std::forward<G>(generate_alternative)();
375
+ }
376
+
377
+ /// Apply a function to the internally stored value to produce a new result or propagate
378
+ /// the stored error.
379
+ template <typename M>
380
+ typename EnsureResult<decltype(std::declval<M&&>()(std::declval<T&&>()))>::type Map(
381
+ M&& m) && {
382
+ if (!ok()) {
383
+ return status();
384
+ }
385
+ return std::forward<M>(m)(MoveValueUnsafe());
386
+ }
387
+
388
+ /// Apply a function to the internally stored value to produce a new result or propagate
389
+ /// the stored error.
390
+ template <typename M>
391
+ typename EnsureResult<decltype(std::declval<M&&>()(std::declval<const T&>()))>::type
392
+ Map(M&& m) const& {
393
+ if (!ok()) {
394
+ return status();
395
+ }
396
+ return std::forward<M>(m)(ValueUnsafe());
397
+ }
398
+
399
+ /// Cast the internally stored value to produce a new result or propagate the stored
400
+ /// error.
401
+ template <typename U, typename E = typename std::enable_if<
402
+ std::is_constructible<U, T>::value>::type>
403
+ Result<U> As() && {
404
+ if (!ok()) {
405
+ return status();
406
+ }
407
+ return U(MoveValueUnsafe());
408
+ }
409
+
410
+ /// Cast the internally stored value to produce a new result or propagate the stored
411
+ /// error.
412
+ template <typename U, typename E = typename std::enable_if<
413
+ std::is_constructible<U, const T&>::value>::type>
414
+ Result<U> As() const& {
415
+ if (!ok()) {
416
+ return status();
417
+ }
418
+ return U(ValueUnsafe());
419
+ }
420
+
421
+ constexpr const T& ValueUnsafe() const& { return *storage_.get(); }
422
+
423
+ constexpr T& ValueUnsafe() & { return *storage_.get(); }
424
+
425
+ T ValueUnsafe() && { return MoveValueUnsafe(); }
426
+
427
+ T MoveValueUnsafe() { return std::move(*storage_.get()); }
428
+
429
+ private:
430
+ Status status_; // pointer-sized
431
+ internal::AlignedStorage<T> storage_;
432
+
433
+ template <typename U>
434
+ void ConstructValue(U&& u) noexcept {
435
+ storage_.construct(std::forward<U>(u));
436
+ }
437
+
438
+ void Destroy() noexcept {
439
+ if (ARROW_PREDICT_TRUE(status_.ok())) {
440
+ static_assert(offsetof(Result<T>, status_) == 0,
441
+ "Status is guaranteed to be at the start of Result<>");
442
+ storage_.destroy();
443
+ }
444
+ }
445
+ };
446
+
447
+ #define ARROW_ASSIGN_OR_RAISE_IMPL(result_name, lhs, rexpr) \
448
+ auto&& result_name = (rexpr); \
449
+ ARROW_RETURN_IF_(!(result_name).ok(), (result_name).status(), ARROW_STRINGIFY(rexpr)); \
450
+ lhs = std::move(result_name).ValueUnsafe();
451
+
452
+ #define ARROW_ASSIGN_OR_RAISE_NAME(x, y) ARROW_CONCAT(x, y)
453
+
454
+ /// \brief Execute an expression that returns a Result, extracting its value
455
+ /// into the variable defined by `lhs` (or returning a Status on error).
456
+ ///
457
+ /// Example: Assigning to a new value:
458
+ /// ARROW_ASSIGN_OR_RAISE(auto value, MaybeGetValue(arg));
459
+ ///
460
+ /// Example: Assigning to an existing value:
461
+ /// ValueType value;
462
+ /// ARROW_ASSIGN_OR_RAISE(value, MaybeGetValue(arg));
463
+ ///
464
+ /// WARNING: ARROW_ASSIGN_OR_RAISE expands into multiple statements;
465
+ /// it cannot be used in a single statement (e.g. as the body of an if
466
+ /// statement without {})!
467
+ ///
468
+ /// WARNING: ARROW_ASSIGN_OR_RAISE `std::move`s its right operand. If you have
469
+ /// an lvalue Result which you *don't* want to move out of cast appropriately.
470
+ ///
471
+ /// WARNING: ARROW_ASSIGN_OR_RAISE is not a single expression; it will not
472
+ /// maintain lifetimes of all temporaries in `rexpr` (e.g.
473
+ /// `ARROW_ASSIGN_OR_RAISE(auto x, MakeTemp().GetResultRef());`
474
+ /// will most likely segfault)!
475
+ #define ARROW_ASSIGN_OR_RAISE(lhs, rexpr) \
476
+ ARROW_ASSIGN_OR_RAISE_IMPL(ARROW_ASSIGN_OR_RAISE_NAME(_error_or_value, __COUNTER__), \
477
+ lhs, rexpr);
478
+
479
+ namespace internal {
480
+
481
+ template <typename T>
482
+ inline const Status& GenericToStatus(const Result<T>& res) {
483
+ return res.status();
484
+ }
485
+
486
+ template <typename T>
487
+ inline Status GenericToStatus(Result<T>&& res) {
488
+ return std::move(res).status();
489
+ }
490
+
491
+ } // namespace internal
492
+
493
+ template <typename T, typename R = typename EnsureResult<T>::type>
494
+ R ToResult(T t) {
495
+ return R(std::move(t));
496
+ }
497
+
498
+ template <typename T>
499
+ struct EnsureResult {
500
+ using type = Result<T>;
501
+ };
502
+
503
+ template <typename T>
504
+ struct EnsureResult<Result<T>> {
505
+ using type = Result<T>;
506
+ };
507
+
508
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/scalar.h ADDED
@@ -0,0 +1,816 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Object model for scalar (non-Array) values. Not intended for use with large
19
+ // amounts of data
20
+
21
+ #pragma once
22
+
23
+ #include <iosfwd>
24
+ #include <memory>
25
+ #include <ratio>
26
+ #include <string>
27
+ #include <string_view>
28
+ #include <utility>
29
+ #include <vector>
30
+
31
+ #include "arrow/compare.h"
32
+ #include "arrow/extension_type.h"
33
+ #include "arrow/result.h"
34
+ #include "arrow/status.h"
35
+ #include "arrow/type.h"
36
+ #include "arrow/type_fwd.h"
37
+ #include "arrow/type_traits.h"
38
+ #include "arrow/util/compare.h"
39
+ #include "arrow/util/decimal.h"
40
+ #include "arrow/util/visibility.h"
41
+ #include "arrow/visit_type_inline.h"
42
+
43
+ namespace arrow {
44
+
45
+ class Array;
46
+
47
+ /// \brief Base class for scalar values
48
+ ///
49
+ /// A Scalar represents a single value with a specific DataType.
50
+ /// Scalars are useful for passing single value inputs to compute functions,
51
+ /// or for representing individual array elements (with a non-trivial
52
+ /// wrapping cost, though).
53
+ struct ARROW_EXPORT Scalar : public std::enable_shared_from_this<Scalar>,
54
+ public util::EqualityComparable<Scalar> {
55
+ virtual ~Scalar() = default;
56
+
57
+ /// \brief The type of the scalar value
58
+ std::shared_ptr<DataType> type;
59
+
60
+ /// \brief Whether the value is valid (not null) or not
61
+ bool is_valid = false;
62
+
63
+ bool Equals(const Scalar& other,
64
+ const EqualOptions& options = EqualOptions::Defaults()) const;
65
+
66
+ bool ApproxEquals(const Scalar& other,
67
+ const EqualOptions& options = EqualOptions::Defaults()) const;
68
+
69
+ struct ARROW_EXPORT Hash {
70
+ size_t operator()(const Scalar& scalar) const { return scalar.hash(); }
71
+
72
+ size_t operator()(const std::shared_ptr<Scalar>& scalar) const {
73
+ return scalar->hash();
74
+ }
75
+ };
76
+
77
+ size_t hash() const;
78
+
79
+ std::string ToString() const;
80
+
81
+ /// \brief Perform cheap validation checks
82
+ ///
83
+ /// This is O(k) where k is the number of descendents.
84
+ ///
85
+ /// \return Status
86
+ Status Validate() const;
87
+
88
+ /// \brief Perform extensive data validation checks
89
+ ///
90
+ /// This is potentially O(k*n) where k is the number of descendents and n
91
+ /// is the length of descendents (if list scalars are involved).
92
+ ///
93
+ /// \return Status
94
+ Status ValidateFull() const;
95
+
96
+ static Result<std::shared_ptr<Scalar>> Parse(const std::shared_ptr<DataType>& type,
97
+ std::string_view repr);
98
+
99
+ // TODO(bkietz) add compute::CastOptions
100
+ Result<std::shared_ptr<Scalar>> CastTo(std::shared_ptr<DataType> to) const;
101
+
102
+ /// \brief Apply the ScalarVisitor::Visit() method specialized to the scalar type
103
+ Status Accept(ScalarVisitor* visitor) const;
104
+
105
+ /// \brief EXPERIMENTAL Enable obtaining shared_ptr<Scalar> from a const
106
+ /// Scalar& context.
107
+ std::shared_ptr<Scalar> GetSharedPtr() const {
108
+ return const_cast<Scalar*>(this)->shared_from_this();
109
+ }
110
+
111
+ protected:
112
+ Scalar(std::shared_ptr<DataType> type, bool is_valid)
113
+ : type(std::move(type)), is_valid(is_valid) {}
114
+ };
115
+
116
+ ARROW_EXPORT void PrintTo(const Scalar& scalar, std::ostream* os);
117
+
118
+ /// \defgroup concrete-scalar-classes Concrete Scalar subclasses
119
+ ///
120
+ /// @{
121
+
122
+ /// \brief A scalar value for NullType. Never valid
123
+ struct ARROW_EXPORT NullScalar : public Scalar {
124
+ public:
125
+ using TypeClass = NullType;
126
+
127
+ NullScalar() : Scalar{null(), false} {}
128
+ };
129
+
130
+ /// @}
131
+
132
+ namespace internal {
133
+
134
+ struct ARROW_EXPORT ArraySpanFillFromScalarScratchSpace {
135
+ // 16 bytes of scratch space to enable ArraySpan to be a view onto any
136
+ // Scalar- including binary scalars where we need to create a buffer
137
+ // that looks like two 32-bit or 64-bit offsets.
138
+ alignas(int64_t) mutable uint8_t scratch_space_[sizeof(int64_t) * 2];
139
+ };
140
+
141
+ struct ARROW_EXPORT PrimitiveScalarBase : public Scalar {
142
+ explicit PrimitiveScalarBase(std::shared_ptr<DataType> type)
143
+ : Scalar(std::move(type), false) {}
144
+
145
+ using Scalar::Scalar;
146
+ /// \brief Get a const pointer to the value of this scalar. May be null.
147
+ virtual const void* data() const = 0;
148
+ /// \brief Get a mutable pointer to the value of this scalar. May be null.
149
+ virtual void* mutable_data() = 0;
150
+ /// \brief Get an immutable view of the value of this scalar as bytes.
151
+ virtual std::string_view view() const = 0;
152
+ };
153
+
154
+ template <typename T, typename CType = typename T::c_type>
155
+ struct ARROW_EXPORT PrimitiveScalar : public PrimitiveScalarBase {
156
+ using PrimitiveScalarBase::PrimitiveScalarBase;
157
+ using TypeClass = T;
158
+ using ValueType = CType;
159
+
160
+ // Non-null constructor.
161
+ PrimitiveScalar(ValueType value, std::shared_ptr<DataType> type)
162
+ : PrimitiveScalarBase(std::move(type), true), value(value) {}
163
+
164
+ explicit PrimitiveScalar(std::shared_ptr<DataType> type)
165
+ : PrimitiveScalarBase(std::move(type), false) {}
166
+
167
+ ValueType value{};
168
+
169
+ const void* data() const override { return &value; }
170
+ void* mutable_data() override { return &value; }
171
+ std::string_view view() const override {
172
+ return std::string_view(reinterpret_cast<const char*>(&value), sizeof(ValueType));
173
+ };
174
+ };
175
+
176
+ } // namespace internal
177
+
178
+ /// \addtogroup concrete-scalar-classes Concrete Scalar subclasses
179
+ ///
180
+ /// @{
181
+
182
+ struct ARROW_EXPORT BooleanScalar : public internal::PrimitiveScalar<BooleanType, bool> {
183
+ using Base = internal::PrimitiveScalar<BooleanType, bool>;
184
+ using Base::Base;
185
+
186
+ explicit BooleanScalar(bool value) : Base(value, boolean()) {}
187
+
188
+ BooleanScalar() : Base(boolean()) {}
189
+ };
190
+
191
+ template <typename T>
192
+ struct NumericScalar : public internal::PrimitiveScalar<T> {
193
+ using Base = typename internal::PrimitiveScalar<T>;
194
+ using Base::Base;
195
+ using TypeClass = typename Base::TypeClass;
196
+ using ValueType = typename Base::ValueType;
197
+
198
+ explicit NumericScalar(ValueType value)
199
+ : Base(value, TypeTraits<T>::type_singleton()) {}
200
+
201
+ NumericScalar() : Base(TypeTraits<T>::type_singleton()) {}
202
+ };
203
+
204
+ struct ARROW_EXPORT Int8Scalar : public NumericScalar<Int8Type> {
205
+ using NumericScalar<Int8Type>::NumericScalar;
206
+ };
207
+
208
+ struct ARROW_EXPORT Int16Scalar : public NumericScalar<Int16Type> {
209
+ using NumericScalar<Int16Type>::NumericScalar;
210
+ };
211
+
212
+ struct ARROW_EXPORT Int32Scalar : public NumericScalar<Int32Type> {
213
+ using NumericScalar<Int32Type>::NumericScalar;
214
+ };
215
+
216
+ struct ARROW_EXPORT Int64Scalar : public NumericScalar<Int64Type> {
217
+ using NumericScalar<Int64Type>::NumericScalar;
218
+ };
219
+
220
+ struct ARROW_EXPORT UInt8Scalar : public NumericScalar<UInt8Type> {
221
+ using NumericScalar<UInt8Type>::NumericScalar;
222
+ };
223
+
224
+ struct ARROW_EXPORT UInt16Scalar : public NumericScalar<UInt16Type> {
225
+ using NumericScalar<UInt16Type>::NumericScalar;
226
+ };
227
+
228
+ struct ARROW_EXPORT UInt32Scalar : public NumericScalar<UInt32Type> {
229
+ using NumericScalar<UInt32Type>::NumericScalar;
230
+ };
231
+
232
+ struct ARROW_EXPORT UInt64Scalar : public NumericScalar<UInt64Type> {
233
+ using NumericScalar<UInt64Type>::NumericScalar;
234
+ };
235
+
236
+ struct ARROW_EXPORT HalfFloatScalar : public NumericScalar<HalfFloatType> {
237
+ using NumericScalar<HalfFloatType>::NumericScalar;
238
+ };
239
+
240
+ struct ARROW_EXPORT FloatScalar : public NumericScalar<FloatType> {
241
+ using NumericScalar<FloatType>::NumericScalar;
242
+ };
243
+
244
+ struct ARROW_EXPORT DoubleScalar : public NumericScalar<DoubleType> {
245
+ using NumericScalar<DoubleType>::NumericScalar;
246
+ };
247
+
248
+ struct ARROW_EXPORT BaseBinaryScalar
249
+ : public internal::PrimitiveScalarBase,
250
+ private internal::ArraySpanFillFromScalarScratchSpace {
251
+ using internal::PrimitiveScalarBase::PrimitiveScalarBase;
252
+ using ValueType = std::shared_ptr<Buffer>;
253
+
254
+ std::shared_ptr<Buffer> value;
255
+
256
+ const void* data() const override {
257
+ return value ? reinterpret_cast<const void*>(value->data()) : NULLPTR;
258
+ }
259
+ void* mutable_data() override {
260
+ return value ? reinterpret_cast<void*>(value->mutable_data()) : NULLPTR;
261
+ }
262
+ std::string_view view() const override {
263
+ return value ? std::string_view(*value) : std::string_view();
264
+ }
265
+
266
+ BaseBinaryScalar(std::shared_ptr<Buffer> value, std::shared_ptr<DataType> type)
267
+ : internal::PrimitiveScalarBase{std::move(type), true}, value(std::move(value)) {}
268
+
269
+ friend ArraySpan;
270
+ BaseBinaryScalar(std::string s, std::shared_ptr<DataType> type);
271
+ };
272
+
273
+ struct ARROW_EXPORT BinaryScalar : public BaseBinaryScalar {
274
+ using BaseBinaryScalar::BaseBinaryScalar;
275
+ using TypeClass = BinaryType;
276
+
277
+ explicit BinaryScalar(std::shared_ptr<Buffer> value)
278
+ : BinaryScalar(std::move(value), binary()) {}
279
+
280
+ explicit BinaryScalar(std::string s) : BaseBinaryScalar(std::move(s), binary()) {}
281
+
282
+ BinaryScalar() : BinaryScalar(binary()) {}
283
+ };
284
+
285
+ struct ARROW_EXPORT StringScalar : public BinaryScalar {
286
+ using BinaryScalar::BinaryScalar;
287
+ using TypeClass = StringType;
288
+
289
+ explicit StringScalar(std::shared_ptr<Buffer> value)
290
+ : StringScalar(std::move(value), utf8()) {}
291
+
292
+ explicit StringScalar(std::string s) : BinaryScalar(std::move(s), utf8()) {}
293
+
294
+ StringScalar() : StringScalar(utf8()) {}
295
+ };
296
+
297
+ struct ARROW_EXPORT BinaryViewScalar : public BaseBinaryScalar {
298
+ using BaseBinaryScalar::BaseBinaryScalar;
299
+ using TypeClass = BinaryViewType;
300
+
301
+ explicit BinaryViewScalar(std::shared_ptr<Buffer> value)
302
+ : BinaryViewScalar(std::move(value), binary_view()) {}
303
+
304
+ explicit BinaryViewScalar(std::string s)
305
+ : BaseBinaryScalar(std::move(s), binary_view()) {}
306
+
307
+ BinaryViewScalar() : BinaryViewScalar(binary_view()) {}
308
+
309
+ std::string_view view() const override { return std::string_view(*this->value); }
310
+ };
311
+
312
+ struct ARROW_EXPORT StringViewScalar : public BinaryViewScalar {
313
+ using BinaryViewScalar::BinaryViewScalar;
314
+ using TypeClass = StringViewType;
315
+
316
+ explicit StringViewScalar(std::shared_ptr<Buffer> value)
317
+ : StringViewScalar(std::move(value), utf8_view()) {}
318
+
319
+ explicit StringViewScalar(std::string s)
320
+ : BinaryViewScalar(std::move(s), utf8_view()) {}
321
+
322
+ StringViewScalar() : StringViewScalar(utf8_view()) {}
323
+ };
324
+
325
+ struct ARROW_EXPORT LargeBinaryScalar : public BaseBinaryScalar {
326
+ using BaseBinaryScalar::BaseBinaryScalar;
327
+ using TypeClass = LargeBinaryType;
328
+
329
+ LargeBinaryScalar(std::shared_ptr<Buffer> value, std::shared_ptr<DataType> type)
330
+ : BaseBinaryScalar(std::move(value), std::move(type)) {}
331
+
332
+ explicit LargeBinaryScalar(std::shared_ptr<Buffer> value)
333
+ : LargeBinaryScalar(std::move(value), large_binary()) {}
334
+
335
+ explicit LargeBinaryScalar(std::string s)
336
+ : BaseBinaryScalar(std::move(s), large_binary()) {}
337
+
338
+ LargeBinaryScalar() : LargeBinaryScalar(large_binary()) {}
339
+ };
340
+
341
+ struct ARROW_EXPORT LargeStringScalar : public LargeBinaryScalar {
342
+ using LargeBinaryScalar::LargeBinaryScalar;
343
+ using TypeClass = LargeStringType;
344
+
345
+ explicit LargeStringScalar(std::shared_ptr<Buffer> value)
346
+ : LargeStringScalar(std::move(value), large_utf8()) {}
347
+
348
+ explicit LargeStringScalar(std::string s)
349
+ : LargeBinaryScalar(std::move(s), large_utf8()) {}
350
+
351
+ LargeStringScalar() : LargeStringScalar(large_utf8()) {}
352
+ };
353
+
354
+ struct ARROW_EXPORT FixedSizeBinaryScalar : public BinaryScalar {
355
+ using TypeClass = FixedSizeBinaryType;
356
+
357
+ FixedSizeBinaryScalar(std::shared_ptr<Buffer> value, std::shared_ptr<DataType> type,
358
+ bool is_valid = true);
359
+
360
+ explicit FixedSizeBinaryScalar(const std::shared_ptr<Buffer>& value,
361
+ bool is_valid = true);
362
+
363
+ explicit FixedSizeBinaryScalar(std::string s, bool is_valid = true);
364
+ };
365
+
366
+ template <typename T>
367
+ struct TemporalScalar : internal::PrimitiveScalar<T> {
368
+ using internal::PrimitiveScalar<T>::PrimitiveScalar;
369
+ using ValueType = typename internal::PrimitiveScalar<T>::ValueType;
370
+
371
+ TemporalScalar(ValueType value, std::shared_ptr<DataType> type)
372
+ : internal::PrimitiveScalar<T>(std::move(value), type) {}
373
+ };
374
+
375
+ template <typename T>
376
+ struct DateScalar : public TemporalScalar<T> {
377
+ using TemporalScalar<T>::TemporalScalar;
378
+ using ValueType = typename TemporalScalar<T>::ValueType;
379
+
380
+ explicit DateScalar(ValueType value)
381
+ : TemporalScalar<T>(std::move(value), TypeTraits<T>::type_singleton()) {}
382
+ DateScalar() : TemporalScalar<T>(TypeTraits<T>::type_singleton()) {}
383
+ };
384
+
385
+ struct ARROW_EXPORT Date32Scalar : public DateScalar<Date32Type> {
386
+ using DateScalar<Date32Type>::DateScalar;
387
+ };
388
+
389
+ struct ARROW_EXPORT Date64Scalar : public DateScalar<Date64Type> {
390
+ using DateScalar<Date64Type>::DateScalar;
391
+ };
392
+
393
+ template <typename T>
394
+ struct ARROW_EXPORT TimeScalar : public TemporalScalar<T> {
395
+ using TemporalScalar<T>::TemporalScalar;
396
+
397
+ TimeScalar(typename TemporalScalar<T>::ValueType value, TimeUnit::type unit)
398
+ : TimeScalar(std::move(value), std::make_shared<T>(unit)) {}
399
+ };
400
+
401
+ struct ARROW_EXPORT Time32Scalar : public TimeScalar<Time32Type> {
402
+ using TimeScalar<Time32Type>::TimeScalar;
403
+ };
404
+
405
+ struct ARROW_EXPORT Time64Scalar : public TimeScalar<Time64Type> {
406
+ using TimeScalar<Time64Type>::TimeScalar;
407
+ };
408
+
409
+ struct ARROW_EXPORT TimestampScalar : public TemporalScalar<TimestampType> {
410
+ using TemporalScalar<TimestampType>::TemporalScalar;
411
+
412
+ TimestampScalar(typename TemporalScalar<TimestampType>::ValueType value,
413
+ TimeUnit::type unit, std::string tz = "")
414
+ : TimestampScalar(std::move(value), timestamp(unit, std::move(tz))) {}
415
+
416
+ static Result<TimestampScalar> FromISO8601(std::string_view iso8601,
417
+ TimeUnit::type unit);
418
+ };
419
+
420
+ template <typename T>
421
+ struct IntervalScalar : public TemporalScalar<T> {
422
+ using TemporalScalar<T>::TemporalScalar;
423
+ using ValueType = typename TemporalScalar<T>::ValueType;
424
+
425
+ explicit IntervalScalar(ValueType value)
426
+ : TemporalScalar<T>(value, TypeTraits<T>::type_singleton()) {}
427
+ IntervalScalar() : TemporalScalar<T>(TypeTraits<T>::type_singleton()) {}
428
+ };
429
+
430
+ struct ARROW_EXPORT MonthIntervalScalar : public IntervalScalar<MonthIntervalType> {
431
+ using IntervalScalar<MonthIntervalType>::IntervalScalar;
432
+ };
433
+
434
+ struct ARROW_EXPORT DayTimeIntervalScalar : public IntervalScalar<DayTimeIntervalType> {
435
+ using IntervalScalar<DayTimeIntervalType>::IntervalScalar;
436
+ };
437
+
438
+ struct ARROW_EXPORT MonthDayNanoIntervalScalar
439
+ : public IntervalScalar<MonthDayNanoIntervalType> {
440
+ using IntervalScalar<MonthDayNanoIntervalType>::IntervalScalar;
441
+ };
442
+
443
+ struct ARROW_EXPORT DurationScalar : public TemporalScalar<DurationType> {
444
+ using TemporalScalar<DurationType>::TemporalScalar;
445
+
446
+ DurationScalar(typename TemporalScalar<DurationType>::ValueType value,
447
+ TimeUnit::type unit)
448
+ : DurationScalar(std::move(value), duration(unit)) {}
449
+
450
+ // Convenience constructors for a DurationScalar from std::chrono::nanoseconds
451
+ template <template <typename, typename> class StdDuration, typename Rep>
452
+ explicit DurationScalar(StdDuration<Rep, std::nano> d)
453
+ : DurationScalar{DurationScalar(d.count(), duration(TimeUnit::NANO))} {}
454
+
455
+ // Convenience constructors for a DurationScalar from std::chrono::microseconds
456
+ template <template <typename, typename> class StdDuration, typename Rep>
457
+ explicit DurationScalar(StdDuration<Rep, std::micro> d)
458
+ : DurationScalar{DurationScalar(d.count(), duration(TimeUnit::MICRO))} {}
459
+
460
+ // Convenience constructors for a DurationScalar from std::chrono::milliseconds
461
+ template <template <typename, typename> class StdDuration, typename Rep>
462
+ explicit DurationScalar(StdDuration<Rep, std::milli> d)
463
+ : DurationScalar{DurationScalar(d.count(), duration(TimeUnit::MILLI))} {}
464
+
465
+ // Convenience constructors for a DurationScalar from std::chrono::seconds
466
+ // or from units which are whole numbers of seconds
467
+ template <template <typename, typename> class StdDuration, typename Rep, intmax_t Num>
468
+ explicit DurationScalar(StdDuration<Rep, std::ratio<Num, 1>> d)
469
+ : DurationScalar{DurationScalar(d.count() * Num, duration(TimeUnit::SECOND))} {}
470
+ };
471
+
472
+ template <typename TYPE_CLASS, typename VALUE_TYPE>
473
+ struct ARROW_EXPORT DecimalScalar : public internal::PrimitiveScalarBase {
474
+ using internal::PrimitiveScalarBase::PrimitiveScalarBase;
475
+ using TypeClass = TYPE_CLASS;
476
+ using ValueType = VALUE_TYPE;
477
+
478
+ DecimalScalar(ValueType value, std::shared_ptr<DataType> type)
479
+ : internal::PrimitiveScalarBase(std::move(type), true), value(value) {}
480
+
481
+ const void* data() const override {
482
+ return reinterpret_cast<const void*>(value.native_endian_bytes());
483
+ }
484
+
485
+ void* mutable_data() override {
486
+ return reinterpret_cast<void*>(value.mutable_native_endian_bytes());
487
+ }
488
+
489
+ std::string_view view() const override {
490
+ return std::string_view(reinterpret_cast<const char*>(value.native_endian_bytes()),
491
+ ValueType::kByteWidth);
492
+ }
493
+
494
+ ValueType value;
495
+ };
496
+
497
+ struct ARROW_EXPORT Decimal128Scalar : public DecimalScalar<Decimal128Type, Decimal128> {
498
+ using DecimalScalar::DecimalScalar;
499
+ };
500
+
501
+ struct ARROW_EXPORT Decimal256Scalar : public DecimalScalar<Decimal256Type, Decimal256> {
502
+ using DecimalScalar::DecimalScalar;
503
+ };
504
+
505
+ struct ARROW_EXPORT BaseListScalar
506
+ : public Scalar,
507
+ private internal::ArraySpanFillFromScalarScratchSpace {
508
+ using Scalar::Scalar;
509
+ using ValueType = std::shared_ptr<Array>;
510
+
511
+ BaseListScalar(std::shared_ptr<Array> value, std::shared_ptr<DataType> type,
512
+ bool is_valid = true);
513
+
514
+ std::shared_ptr<Array> value;
515
+
516
+ private:
517
+ friend struct ArraySpan;
518
+ };
519
+
520
+ struct ARROW_EXPORT ListScalar : public BaseListScalar {
521
+ using TypeClass = ListType;
522
+ using BaseListScalar::BaseListScalar;
523
+
524
+ explicit ListScalar(std::shared_ptr<Array> value, bool is_valid = true);
525
+ };
526
+
527
+ struct ARROW_EXPORT LargeListScalar : public BaseListScalar {
528
+ using TypeClass = LargeListType;
529
+ using BaseListScalar::BaseListScalar;
530
+
531
+ explicit LargeListScalar(std::shared_ptr<Array> value, bool is_valid = true);
532
+ };
533
+
534
+ struct ARROW_EXPORT ListViewScalar : public BaseListScalar {
535
+ using TypeClass = ListViewType;
536
+ using BaseListScalar::BaseListScalar;
537
+
538
+ explicit ListViewScalar(std::shared_ptr<Array> value, bool is_valid = true);
539
+ };
540
+
541
+ struct ARROW_EXPORT LargeListViewScalar : public BaseListScalar {
542
+ using TypeClass = LargeListViewType;
543
+ using BaseListScalar::BaseListScalar;
544
+
545
+ explicit LargeListViewScalar(std::shared_ptr<Array> value, bool is_valid = true);
546
+ };
547
+
548
+ struct ARROW_EXPORT MapScalar : public BaseListScalar {
549
+ using TypeClass = MapType;
550
+ using BaseListScalar::BaseListScalar;
551
+
552
+ explicit MapScalar(std::shared_ptr<Array> value, bool is_valid = true);
553
+ };
554
+
555
+ struct ARROW_EXPORT FixedSizeListScalar : public BaseListScalar {
556
+ using TypeClass = FixedSizeListType;
557
+
558
+ FixedSizeListScalar(std::shared_ptr<Array> value, std::shared_ptr<DataType> type,
559
+ bool is_valid = true);
560
+
561
+ explicit FixedSizeListScalar(std::shared_ptr<Array> value, bool is_valid = true);
562
+ };
563
+
564
+ struct ARROW_EXPORT StructScalar : public Scalar {
565
+ using TypeClass = StructType;
566
+ using ValueType = std::vector<std::shared_ptr<Scalar>>;
567
+
568
+ ScalarVector value;
569
+
570
+ Result<std::shared_ptr<Scalar>> field(FieldRef ref) const;
571
+
572
+ StructScalar(ValueType value, std::shared_ptr<DataType> type, bool is_valid = true)
573
+ : Scalar(std::move(type), is_valid), value(std::move(value)) {}
574
+
575
+ static Result<std::shared_ptr<StructScalar>> Make(ValueType value,
576
+ std::vector<std::string> field_names);
577
+ };
578
+
579
+ struct ARROW_EXPORT UnionScalar : public Scalar,
580
+ private internal::ArraySpanFillFromScalarScratchSpace {
581
+ int8_t type_code;
582
+
583
+ virtual const std::shared_ptr<Scalar>& child_value() const = 0;
584
+
585
+ protected:
586
+ UnionScalar(std::shared_ptr<DataType> type, int8_t type_code, bool is_valid)
587
+ : Scalar(std::move(type), is_valid), type_code(type_code) {}
588
+
589
+ friend struct ArraySpan;
590
+ };
591
+
592
+ struct ARROW_EXPORT SparseUnionScalar : public UnionScalar {
593
+ using TypeClass = SparseUnionType;
594
+
595
+ // Even though only one of the union values is relevant for this scalar, we
596
+ // nonetheless construct a vector of scalars, one per union value, to have
597
+ // enough data to reconstruct a valid ArraySpan of length 1 from this scalar
598
+ using ValueType = std::vector<std::shared_ptr<Scalar>>;
599
+ ValueType value;
600
+
601
+ // The value index corresponding to the active type code
602
+ int child_id;
603
+
604
+ SparseUnionScalar(ValueType value, int8_t type_code, std::shared_ptr<DataType> type);
605
+
606
+ const std::shared_ptr<Scalar>& child_value() const override {
607
+ return this->value[this->child_id];
608
+ }
609
+
610
+ /// \brief Construct a SparseUnionScalar from a single value, versus having
611
+ /// to construct a vector of scalars
612
+ static std::shared_ptr<Scalar> FromValue(std::shared_ptr<Scalar> value, int field_index,
613
+ std::shared_ptr<DataType> type);
614
+ };
615
+
616
+ struct ARROW_EXPORT DenseUnionScalar : public UnionScalar {
617
+ using TypeClass = DenseUnionType;
618
+
619
+ // For DenseUnionScalar, we can make a valid ArraySpan of length 1 from this
620
+ // scalar
621
+ using ValueType = std::shared_ptr<Scalar>;
622
+ ValueType value;
623
+
624
+ const std::shared_ptr<Scalar>& child_value() const override { return this->value; }
625
+
626
+ DenseUnionScalar(ValueType value, int8_t type_code, std::shared_ptr<DataType> type)
627
+ : UnionScalar(std::move(type), type_code, value->is_valid),
628
+ value(std::move(value)) {}
629
+ };
630
+
631
+ struct ARROW_EXPORT RunEndEncodedScalar
632
+ : public Scalar,
633
+ private internal::ArraySpanFillFromScalarScratchSpace {
634
+ using TypeClass = RunEndEncodedType;
635
+ using ValueType = std::shared_ptr<Scalar>;
636
+
637
+ ValueType value;
638
+
639
+ RunEndEncodedScalar(std::shared_ptr<Scalar> value, std::shared_ptr<DataType> type);
640
+
641
+ /// \brief Constructs a NULL RunEndEncodedScalar
642
+ explicit RunEndEncodedScalar(const std::shared_ptr<DataType>& type);
643
+
644
+ ~RunEndEncodedScalar() override;
645
+
646
+ const std::shared_ptr<DataType>& run_end_type() const {
647
+ return ree_type().run_end_type();
648
+ }
649
+
650
+ const std::shared_ptr<DataType>& value_type() const { return ree_type().value_type(); }
651
+
652
+ private:
653
+ const TypeClass& ree_type() const { return internal::checked_cast<TypeClass&>(*type); }
654
+
655
+ friend ArraySpan;
656
+ };
657
+
658
+ /// \brief A Scalar value for DictionaryType
659
+ ///
660
+ /// `is_valid` denotes the validity of the `index`, regardless of
661
+ /// the corresponding value in the `dictionary`.
662
+ struct ARROW_EXPORT DictionaryScalar : public internal::PrimitiveScalarBase {
663
+ using TypeClass = DictionaryType;
664
+ struct ValueType {
665
+ std::shared_ptr<Scalar> index;
666
+ std::shared_ptr<Array> dictionary;
667
+ } value;
668
+
669
+ explicit DictionaryScalar(std::shared_ptr<DataType> type);
670
+
671
+ DictionaryScalar(ValueType value, std::shared_ptr<DataType> type, bool is_valid = true)
672
+ : internal::PrimitiveScalarBase(std::move(type), is_valid),
673
+ value(std::move(value)) {}
674
+
675
+ static std::shared_ptr<DictionaryScalar> Make(std::shared_ptr<Scalar> index,
676
+ std::shared_ptr<Array> dict);
677
+
678
+ Result<std::shared_ptr<Scalar>> GetEncodedValue() const;
679
+
680
+ const void* data() const override {
681
+ return internal::checked_cast<internal::PrimitiveScalarBase&>(*value.index).data();
682
+ }
683
+ void* mutable_data() override {
684
+ return internal::checked_cast<internal::PrimitiveScalarBase&>(*value.index)
685
+ .mutable_data();
686
+ }
687
+ std::string_view view() const override {
688
+ return internal::checked_cast<const internal::PrimitiveScalarBase&>(*value.index)
689
+ .view();
690
+ }
691
+ };
692
+
693
+ /// \brief A Scalar value for ExtensionType
694
+ ///
695
+ /// The value is the underlying storage scalar.
696
+ /// `is_valid` must only be true if `value` is non-null and `value->is_valid` is true
697
+ struct ARROW_EXPORT ExtensionScalar : public Scalar {
698
+ using TypeClass = ExtensionType;
699
+ using ValueType = std::shared_ptr<Scalar>;
700
+
701
+ ExtensionScalar(std::shared_ptr<Scalar> storage, std::shared_ptr<DataType> type,
702
+ bool is_valid = true)
703
+ : Scalar(std::move(type), is_valid), value(std::move(storage)) {}
704
+
705
+ template <typename Storage,
706
+ typename = enable_if_t<std::is_base_of<Scalar, Storage>::value>>
707
+ ExtensionScalar(Storage&& storage, std::shared_ptr<DataType> type, bool is_valid = true)
708
+ : ExtensionScalar(std::make_shared<Storage>(std::move(storage)), std::move(type),
709
+ is_valid) {}
710
+
711
+ std::shared_ptr<Scalar> value;
712
+ };
713
+
714
+ /// @}
715
+
716
+ namespace internal {
717
+
718
+ inline Status CheckBufferLength(...) { return Status::OK(); }
719
+
720
+ ARROW_EXPORT Status CheckBufferLength(const FixedSizeBinaryType* t,
721
+ const std::shared_ptr<Buffer>* b);
722
+
723
+ } // namespace internal
724
+
725
+ template <typename ValueRef>
726
+ struct MakeScalarImpl;
727
+
728
+ /// \defgroup scalar-factories Scalar factory functions
729
+ ///
730
+ /// @{
731
+
732
+ /// \brief Scalar factory for null scalars
733
+ ARROW_EXPORT
734
+ std::shared_ptr<Scalar> MakeNullScalar(std::shared_ptr<DataType> type);
735
+
736
+ /// \brief Scalar factory for non-null scalars
737
+ template <typename Value>
738
+ Result<std::shared_ptr<Scalar>> MakeScalar(std::shared_ptr<DataType> type,
739
+ Value&& value) {
740
+ return MakeScalarImpl<Value&&>{type, std::forward<Value>(value), NULLPTR}.Finish();
741
+ }
742
+
743
+ /// \brief Type-inferring scalar factory for non-null scalars
744
+ ///
745
+ /// Construct a Scalar instance with a DataType determined by the input C++ type.
746
+ /// (for example Int8Scalar for a int8_t input).
747
+ /// Only non-parametric primitive types and String are supported.
748
+ template <typename Value, typename Traits = CTypeTraits<typename std::decay<Value>::type>,
749
+ typename ScalarType = typename Traits::ScalarType,
750
+ typename Enable = decltype(ScalarType(std::declval<Value>(),
751
+ Traits::type_singleton()))>
752
+ std::shared_ptr<Scalar> MakeScalar(Value value) {
753
+ return std::make_shared<ScalarType>(std::move(value), Traits::type_singleton());
754
+ }
755
+
756
+ inline std::shared_ptr<Scalar> MakeScalar(std::string value) {
757
+ return std::make_shared<StringScalar>(std::move(value));
758
+ }
759
+
760
+ inline std::shared_ptr<Scalar> MakeScalar(const std::shared_ptr<Scalar>& scalar) {
761
+ return scalar;
762
+ }
763
+ /// @}
764
+
765
+ template <typename ValueRef>
766
+ struct MakeScalarImpl {
767
+ template <typename T, typename ScalarType = typename TypeTraits<T>::ScalarType,
768
+ typename ValueType = typename ScalarType::ValueType,
769
+ typename Enable = typename std::enable_if<
770
+ std::is_constructible<ScalarType, ValueType,
771
+ std::shared_ptr<DataType>>::value &&
772
+ std::is_convertible<ValueRef, ValueType>::value>::type>
773
+ Status Visit(const T& t) {
774
+ ARROW_RETURN_NOT_OK(internal::CheckBufferLength(&t, &value_));
775
+ // `static_cast<ValueRef>` makes a rvalue if ValueRef is `ValueType&&`
776
+ out_ = std::make_shared<ScalarType>(
777
+ static_cast<ValueType>(static_cast<ValueRef>(value_)), std::move(type_));
778
+ return Status::OK();
779
+ }
780
+
781
+ Status Visit(const ExtensionType& t) {
782
+ ARROW_ASSIGN_OR_RAISE(auto storage,
783
+ MakeScalar(t.storage_type(), static_cast<ValueRef>(value_)));
784
+ out_ = std::make_shared<ExtensionScalar>(std::move(storage), type_);
785
+ return Status::OK();
786
+ }
787
+
788
+ // Enable constructing string/binary scalars (but not decimal, etc) from std::string
789
+ template <typename T>
790
+ enable_if_t<
791
+ std::is_same<typename std::remove_reference<ValueRef>::type, std::string>::value &&
792
+ (is_base_binary_type<T>::value || std::is_same<T, FixedSizeBinaryType>::value),
793
+ Status>
794
+ Visit(const T& t) {
795
+ using ScalarType = typename TypeTraits<T>::ScalarType;
796
+ out_ = std::make_shared<ScalarType>(Buffer::FromString(std::move(value_)),
797
+ std::move(type_));
798
+ return Status::OK();
799
+ }
800
+
801
+ Status Visit(const DataType& t) {
802
+ return Status::NotImplemented("constructing scalars of type ", t,
803
+ " from unboxed values");
804
+ }
805
+
806
+ Result<std::shared_ptr<Scalar>> Finish() && {
807
+ ARROW_RETURN_NOT_OK(VisitTypeInline(*type_, this));
808
+ return std::move(out_);
809
+ }
810
+
811
+ std::shared_ptr<DataType> type_;
812
+ ValueRef value_;
813
+ std::shared_ptr<Scalar> out_;
814
+ };
815
+
816
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/sparse_tensor.h ADDED
@@ -0,0 +1,617 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstddef>
21
+ #include <cstdint>
22
+ #include <memory>
23
+ #include <string>
24
+ #include <utility>
25
+ #include <vector>
26
+
27
+ #include "arrow/buffer.h"
28
+ #include "arrow/compare.h"
29
+ #include "arrow/result.h"
30
+ #include "arrow/status.h"
31
+ #include "arrow/tensor.h" // IWYU pragma: export
32
+ #include "arrow/type.h"
33
+ #include "arrow/util/checked_cast.h"
34
+ #include "arrow/util/macros.h"
35
+ #include "arrow/util/visibility.h"
36
+
37
+ namespace arrow {
38
+
39
+ class MemoryPool;
40
+
41
+ namespace internal {
42
+
43
+ ARROW_EXPORT
44
+ Status CheckSparseIndexMaximumValue(const std::shared_ptr<DataType>& index_value_type,
45
+ const std::vector<int64_t>& shape);
46
+
47
+ } // namespace internal
48
+
49
+ // ----------------------------------------------------------------------
50
+ // SparseIndex class
51
+
52
+ struct SparseTensorFormat {
53
+ /// EXPERIMENTAL: The index format type of SparseTensor
54
+ enum type {
55
+ /// Coordinate list (COO) format.
56
+ COO,
57
+ /// Compressed sparse row (CSR) format.
58
+ CSR,
59
+ /// Compressed sparse column (CSC) format.
60
+ CSC,
61
+ /// Compressed sparse fiber (CSF) format.
62
+ CSF
63
+ };
64
+ };
65
+
66
+ /// \brief EXPERIMENTAL: The base class for the index of a sparse tensor
67
+ ///
68
+ /// SparseIndex describes where the non-zero elements are within a SparseTensor.
69
+ ///
70
+ /// There are several ways to represent this. The format_id is used to
71
+ /// distinguish what kind of representation is used. Each possible value of
72
+ /// format_id must have only one corresponding concrete subclass of SparseIndex.
73
+ class ARROW_EXPORT SparseIndex {
74
+ public:
75
+ explicit SparseIndex(SparseTensorFormat::type format_id) : format_id_(format_id) {}
76
+
77
+ virtual ~SparseIndex() = default;
78
+
79
+ /// \brief Return the identifier of the format type
80
+ SparseTensorFormat::type format_id() const { return format_id_; }
81
+
82
+ /// \brief Return the number of non zero values in the sparse tensor related
83
+ /// to this sparse index
84
+ virtual int64_t non_zero_length() const = 0;
85
+
86
+ /// \brief Return the string representation of the sparse index
87
+ virtual std::string ToString() const = 0;
88
+
89
+ virtual Status ValidateShape(const std::vector<int64_t>& shape) const;
90
+
91
+ protected:
92
+ const SparseTensorFormat::type format_id_;
93
+ };
94
+
95
+ namespace internal {
96
+ template <typename SparseIndexType>
97
+ class SparseIndexBase : public SparseIndex {
98
+ public:
99
+ SparseIndexBase() : SparseIndex(SparseIndexType::format_id) {}
100
+ };
101
+ } // namespace internal
102
+
103
+ // ----------------------------------------------------------------------
104
+ // SparseCOOIndex class
105
+
106
+ /// \brief EXPERIMENTAL: The index data for a COO sparse tensor
107
+ ///
108
+ /// A COO sparse index manages the location of its non-zero values by their
109
+ /// coordinates.
110
+ class ARROW_EXPORT SparseCOOIndex : public internal::SparseIndexBase<SparseCOOIndex> {
111
+ public:
112
+ static constexpr SparseTensorFormat::type format_id = SparseTensorFormat::COO;
113
+
114
+ /// \brief Make SparseCOOIndex from a coords tensor and canonicality
115
+ static Result<std::shared_ptr<SparseCOOIndex>> Make(
116
+ const std::shared_ptr<Tensor>& coords, bool is_canonical);
117
+
118
+ /// \brief Make SparseCOOIndex from a coords tensor with canonicality auto-detection
119
+ static Result<std::shared_ptr<SparseCOOIndex>> Make(
120
+ const std::shared_ptr<Tensor>& coords);
121
+
122
+ /// \brief Make SparseCOOIndex from raw properties with canonicality auto-detection
123
+ static Result<std::shared_ptr<SparseCOOIndex>> Make(
124
+ const std::shared_ptr<DataType>& indices_type,
125
+ const std::vector<int64_t>& indices_shape,
126
+ const std::vector<int64_t>& indices_strides, std::shared_ptr<Buffer> indices_data);
127
+
128
+ /// \brief Make SparseCOOIndex from raw properties
129
+ static Result<std::shared_ptr<SparseCOOIndex>> Make(
130
+ const std::shared_ptr<DataType>& indices_type,
131
+ const std::vector<int64_t>& indices_shape,
132
+ const std::vector<int64_t>& indices_strides, std::shared_ptr<Buffer> indices_data,
133
+ bool is_canonical);
134
+
135
+ /// \brief Make SparseCOOIndex from sparse tensor's shape properties and data
136
+ /// with canonicality auto-detection
137
+ ///
138
+ /// The indices_data should be in row-major (C-like) order. If not,
139
+ /// use the raw properties constructor.
140
+ static Result<std::shared_ptr<SparseCOOIndex>> Make(
141
+ const std::shared_ptr<DataType>& indices_type, const std::vector<int64_t>& shape,
142
+ int64_t non_zero_length, std::shared_ptr<Buffer> indices_data);
143
+
144
+ /// \brief Make SparseCOOIndex from sparse tensor's shape properties and data
145
+ ///
146
+ /// The indices_data should be in row-major (C-like) order. If not,
147
+ /// use the raw properties constructor.
148
+ static Result<std::shared_ptr<SparseCOOIndex>> Make(
149
+ const std::shared_ptr<DataType>& indices_type, const std::vector<int64_t>& shape,
150
+ int64_t non_zero_length, std::shared_ptr<Buffer> indices_data, bool is_canonical);
151
+
152
+ /// \brief Construct SparseCOOIndex from column-major NumericTensor
153
+ explicit SparseCOOIndex(const std::shared_ptr<Tensor>& coords, bool is_canonical);
154
+
155
+ /// \brief Return a tensor that has the coordinates of the non-zero values
156
+ ///
157
+ /// The returned tensor is a N x D tensor where N is the number of non-zero
158
+ /// values and D is the number of dimensions in the logical data.
159
+ /// The column at index `i` is a D-tuple of coordinates indicating that the
160
+ /// logical value at those coordinates should be found at physical index `i`.
161
+ const std::shared_ptr<Tensor>& indices() const { return coords_; }
162
+
163
+ /// \brief Return the number of non zero values in the sparse tensor related
164
+ /// to this sparse index
165
+ int64_t non_zero_length() const override { return coords_->shape()[0]; }
166
+
167
+ /// \brief Return whether a sparse tensor index is canonical, or not.
168
+ /// If a sparse tensor index is canonical, it is sorted in the lexicographical order,
169
+ /// and the corresponding sparse tensor doesn't have duplicated entries.
170
+ bool is_canonical() const { return is_canonical_; }
171
+
172
+ /// \brief Return a string representation of the sparse index
173
+ std::string ToString() const override;
174
+
175
+ /// \brief Return whether the COO indices are equal
176
+ bool Equals(const SparseCOOIndex& other) const {
177
+ return indices()->Equals(*other.indices());
178
+ }
179
+
180
+ inline Status ValidateShape(const std::vector<int64_t>& shape) const override {
181
+ ARROW_RETURN_NOT_OK(SparseIndex::ValidateShape(shape));
182
+
183
+ if (static_cast<size_t>(coords_->shape()[1]) == shape.size()) {
184
+ return Status::OK();
185
+ }
186
+
187
+ return Status::Invalid(
188
+ "shape length is inconsistent with the coords matrix in COO index");
189
+ }
190
+
191
+ protected:
192
+ std::shared_ptr<Tensor> coords_;
193
+ bool is_canonical_;
194
+ };
195
+
196
+ namespace internal {
197
+
198
+ /// EXPERIMENTAL: The axis to be compressed
199
+ enum class SparseMatrixCompressedAxis : char {
200
+ /// The value for CSR matrix
201
+ ROW,
202
+ /// The value for CSC matrix
203
+ COLUMN
204
+ };
205
+
206
+ ARROW_EXPORT
207
+ Status ValidateSparseCSXIndex(const std::shared_ptr<DataType>& indptr_type,
208
+ const std::shared_ptr<DataType>& indices_type,
209
+ const std::vector<int64_t>& indptr_shape,
210
+ const std::vector<int64_t>& indices_shape,
211
+ char const* type_name);
212
+
213
+ ARROW_EXPORT
214
+ void CheckSparseCSXIndexValidity(const std::shared_ptr<DataType>& indptr_type,
215
+ const std::shared_ptr<DataType>& indices_type,
216
+ const std::vector<int64_t>& indptr_shape,
217
+ const std::vector<int64_t>& indices_shape,
218
+ char const* type_name);
219
+
220
+ template <typename SparseIndexType, SparseMatrixCompressedAxis COMPRESSED_AXIS>
221
+ class SparseCSXIndex : public SparseIndexBase<SparseIndexType> {
222
+ public:
223
+ static constexpr SparseMatrixCompressedAxis kCompressedAxis = COMPRESSED_AXIS;
224
+
225
+ /// \brief Make a subclass of SparseCSXIndex from raw properties
226
+ static Result<std::shared_ptr<SparseIndexType>> Make(
227
+ const std::shared_ptr<DataType>& indptr_type,
228
+ const std::shared_ptr<DataType>& indices_type,
229
+ const std::vector<int64_t>& indptr_shape, const std::vector<int64_t>& indices_shape,
230
+ std::shared_ptr<Buffer> indptr_data, std::shared_ptr<Buffer> indices_data) {
231
+ ARROW_RETURN_NOT_OK(ValidateSparseCSXIndex(indptr_type, indices_type, indptr_shape,
232
+ indices_shape,
233
+ SparseIndexType::kTypeName));
234
+ return std::make_shared<SparseIndexType>(
235
+ std::make_shared<Tensor>(indptr_type, indptr_data, indptr_shape),
236
+ std::make_shared<Tensor>(indices_type, indices_data, indices_shape));
237
+ }
238
+
239
+ /// \brief Make a subclass of SparseCSXIndex from raw properties
240
+ static Result<std::shared_ptr<SparseIndexType>> Make(
241
+ const std::shared_ptr<DataType>& indices_type,
242
+ const std::vector<int64_t>& indptr_shape, const std::vector<int64_t>& indices_shape,
243
+ std::shared_ptr<Buffer> indptr_data, std::shared_ptr<Buffer> indices_data) {
244
+ return Make(indices_type, indices_type, indptr_shape, indices_shape, indptr_data,
245
+ indices_data);
246
+ }
247
+
248
+ /// \brief Make a subclass of SparseCSXIndex from sparse tensor's shape properties and
249
+ /// data
250
+ static Result<std::shared_ptr<SparseIndexType>> Make(
251
+ const std::shared_ptr<DataType>& indptr_type,
252
+ const std::shared_ptr<DataType>& indices_type, const std::vector<int64_t>& shape,
253
+ int64_t non_zero_length, std::shared_ptr<Buffer> indptr_data,
254
+ std::shared_ptr<Buffer> indices_data) {
255
+ std::vector<int64_t> indptr_shape({shape[0] + 1});
256
+ std::vector<int64_t> indices_shape({non_zero_length});
257
+ return Make(indptr_type, indices_type, indptr_shape, indices_shape, indptr_data,
258
+ indices_data);
259
+ }
260
+
261
+ /// \brief Make a subclass of SparseCSXIndex from sparse tensor's shape properties and
262
+ /// data
263
+ static Result<std::shared_ptr<SparseIndexType>> Make(
264
+ const std::shared_ptr<DataType>& indices_type, const std::vector<int64_t>& shape,
265
+ int64_t non_zero_length, std::shared_ptr<Buffer> indptr_data,
266
+ std::shared_ptr<Buffer> indices_data) {
267
+ return Make(indices_type, indices_type, shape, non_zero_length, indptr_data,
268
+ indices_data);
269
+ }
270
+
271
+ /// \brief Construct SparseCSXIndex from two index vectors
272
+ explicit SparseCSXIndex(const std::shared_ptr<Tensor>& indptr,
273
+ const std::shared_ptr<Tensor>& indices)
274
+ : SparseIndexBase<SparseIndexType>(), indptr_(indptr), indices_(indices) {
275
+ CheckSparseCSXIndexValidity(indptr_->type(), indices_->type(), indptr_->shape(),
276
+ indices_->shape(), SparseIndexType::kTypeName);
277
+ }
278
+
279
+ /// \brief Return a 1D tensor of indptr vector
280
+ const std::shared_ptr<Tensor>& indptr() const { return indptr_; }
281
+
282
+ /// \brief Return a 1D tensor of indices vector
283
+ const std::shared_ptr<Tensor>& indices() const { return indices_; }
284
+
285
+ /// \brief Return the number of non zero values in the sparse tensor related
286
+ /// to this sparse index
287
+ int64_t non_zero_length() const override { return indices_->shape()[0]; }
288
+
289
+ /// \brief Return a string representation of the sparse index
290
+ std::string ToString() const override {
291
+ return std::string(SparseIndexType::kTypeName);
292
+ }
293
+
294
+ /// \brief Return whether the CSR indices are equal
295
+ bool Equals(const SparseIndexType& other) const {
296
+ return indptr()->Equals(*other.indptr()) && indices()->Equals(*other.indices());
297
+ }
298
+
299
+ inline Status ValidateShape(const std::vector<int64_t>& shape) const override {
300
+ ARROW_RETURN_NOT_OK(SparseIndex::ValidateShape(shape));
301
+
302
+ if (shape.size() < 2) {
303
+ return Status::Invalid("shape length is too short");
304
+ }
305
+
306
+ if (shape.size() > 2) {
307
+ return Status::Invalid("shape length is too long");
308
+ }
309
+
310
+ if (indptr_->shape()[0] == shape[static_cast<int64_t>(kCompressedAxis)] + 1) {
311
+ return Status::OK();
312
+ }
313
+
314
+ return Status::Invalid("shape length is inconsistent with the ", ToString());
315
+ }
316
+
317
+ protected:
318
+ std::shared_ptr<Tensor> indptr_;
319
+ std::shared_ptr<Tensor> indices_;
320
+ };
321
+
322
+ } // namespace internal
323
+
324
+ // ----------------------------------------------------------------------
325
+ // SparseCSRIndex class
326
+
327
+ /// \brief EXPERIMENTAL: The index data for a CSR sparse matrix
328
+ ///
329
+ /// A CSR sparse index manages the location of its non-zero values by two
330
+ /// vectors.
331
+ ///
332
+ /// The first vector, called indptr, represents the range of the rows; the i-th
333
+ /// row spans from indptr[i] to indptr[i+1] in the corresponding value vector.
334
+ /// So the length of an indptr vector is the number of rows + 1.
335
+ ///
336
+ /// The other vector, called indices, represents the column indices of the
337
+ /// corresponding non-zero values. So the length of an indices vector is same
338
+ /// as the number of non-zero-values.
339
+ class ARROW_EXPORT SparseCSRIndex
340
+ : public internal::SparseCSXIndex<SparseCSRIndex,
341
+ internal::SparseMatrixCompressedAxis::ROW> {
342
+ public:
343
+ using BaseClass =
344
+ internal::SparseCSXIndex<SparseCSRIndex, internal::SparseMatrixCompressedAxis::ROW>;
345
+
346
+ static constexpr SparseTensorFormat::type format_id = SparseTensorFormat::CSR;
347
+ static constexpr char const* kTypeName = "SparseCSRIndex";
348
+
349
+ using SparseCSXIndex::kCompressedAxis;
350
+ using SparseCSXIndex::Make;
351
+ using SparseCSXIndex::SparseCSXIndex;
352
+ };
353
+
354
+ // ----------------------------------------------------------------------
355
+ // SparseCSCIndex class
356
+
357
+ /// \brief EXPERIMENTAL: The index data for a CSC sparse matrix
358
+ ///
359
+ /// A CSC sparse index manages the location of its non-zero values by two
360
+ /// vectors.
361
+ ///
362
+ /// The first vector, called indptr, represents the range of the column; the i-th
363
+ /// column spans from indptr[i] to indptr[i+1] in the corresponding value vector.
364
+ /// So the length of an indptr vector is the number of columns + 1.
365
+ ///
366
+ /// The other vector, called indices, represents the row indices of the
367
+ /// corresponding non-zero values. So the length of an indices vector is same
368
+ /// as the number of non-zero-values.
369
+ class ARROW_EXPORT SparseCSCIndex
370
+ : public internal::SparseCSXIndex<SparseCSCIndex,
371
+ internal::SparseMatrixCompressedAxis::COLUMN> {
372
+ public:
373
+ using BaseClass =
374
+ internal::SparseCSXIndex<SparseCSCIndex,
375
+ internal::SparseMatrixCompressedAxis::COLUMN>;
376
+
377
+ static constexpr SparseTensorFormat::type format_id = SparseTensorFormat::CSC;
378
+ static constexpr char const* kTypeName = "SparseCSCIndex";
379
+
380
+ using SparseCSXIndex::kCompressedAxis;
381
+ using SparseCSXIndex::Make;
382
+ using SparseCSXIndex::SparseCSXIndex;
383
+ };
384
+
385
+ // ----------------------------------------------------------------------
386
+ // SparseCSFIndex class
387
+
388
+ /// \brief EXPERIMENTAL: The index data for a CSF sparse tensor
389
+ ///
390
+ /// A CSF sparse index manages the location of its non-zero values by set of
391
+ /// prefix trees. Each path from a root to leaf forms one tensor non-zero index.
392
+ /// CSF is implemented with three vectors.
393
+ ///
394
+ /// Vectors inptr and indices contain N-1 and N buffers respectively, where N is the
395
+ /// number of dimensions. Axis_order is a vector of integers of length N. Indptr and
396
+ /// indices describe the set of prefix trees. Trees traverse dimensions in order given by
397
+ /// axis_order.
398
+ class ARROW_EXPORT SparseCSFIndex : public internal::SparseIndexBase<SparseCSFIndex> {
399
+ public:
400
+ static constexpr SparseTensorFormat::type format_id = SparseTensorFormat::CSF;
401
+ static constexpr char const* kTypeName = "SparseCSFIndex";
402
+
403
+ /// \brief Make SparseCSFIndex from raw properties
404
+ static Result<std::shared_ptr<SparseCSFIndex>> Make(
405
+ const std::shared_ptr<DataType>& indptr_type,
406
+ const std::shared_ptr<DataType>& indices_type,
407
+ const std::vector<int64_t>& indices_shapes, const std::vector<int64_t>& axis_order,
408
+ const std::vector<std::shared_ptr<Buffer>>& indptr_data,
409
+ const std::vector<std::shared_ptr<Buffer>>& indices_data);
410
+
411
+ /// \brief Make SparseCSFIndex from raw properties
412
+ static Result<std::shared_ptr<SparseCSFIndex>> Make(
413
+ const std::shared_ptr<DataType>& indices_type,
414
+ const std::vector<int64_t>& indices_shapes, const std::vector<int64_t>& axis_order,
415
+ const std::vector<std::shared_ptr<Buffer>>& indptr_data,
416
+ const std::vector<std::shared_ptr<Buffer>>& indices_data) {
417
+ return Make(indices_type, indices_type, indices_shapes, axis_order, indptr_data,
418
+ indices_data);
419
+ }
420
+
421
+ /// \brief Construct SparseCSFIndex from two index vectors
422
+ explicit SparseCSFIndex(const std::vector<std::shared_ptr<Tensor>>& indptr,
423
+ const std::vector<std::shared_ptr<Tensor>>& indices,
424
+ const std::vector<int64_t>& axis_order);
425
+
426
+ /// \brief Return a 1D vector of indptr tensors
427
+ const std::vector<std::shared_ptr<Tensor>>& indptr() const { return indptr_; }
428
+
429
+ /// \brief Return a 1D vector of indices tensors
430
+ const std::vector<std::shared_ptr<Tensor>>& indices() const { return indices_; }
431
+
432
+ /// \brief Return a 1D vector specifying the order of axes
433
+ const std::vector<int64_t>& axis_order() const { return axis_order_; }
434
+
435
+ /// \brief Return the number of non zero values in the sparse tensor related
436
+ /// to this sparse index
437
+ int64_t non_zero_length() const override { return indices_.back()->shape()[0]; }
438
+
439
+ /// \brief Return a string representation of the sparse index
440
+ std::string ToString() const override;
441
+
442
+ /// \brief Return whether the CSF indices are equal
443
+ bool Equals(const SparseCSFIndex& other) const;
444
+
445
+ protected:
446
+ std::vector<std::shared_ptr<Tensor>> indptr_;
447
+ std::vector<std::shared_ptr<Tensor>> indices_;
448
+ std::vector<int64_t> axis_order_;
449
+ };
450
+
451
+ // ----------------------------------------------------------------------
452
+ // SparseTensor class
453
+
454
+ /// \brief EXPERIMENTAL: The base class of sparse tensor container
455
+ class ARROW_EXPORT SparseTensor {
456
+ public:
457
+ virtual ~SparseTensor() = default;
458
+
459
+ SparseTensorFormat::type format_id() const { return sparse_index_->format_id(); }
460
+
461
+ /// \brief Return a value type of the sparse tensor
462
+ std::shared_ptr<DataType> type() const { return type_; }
463
+
464
+ /// \brief Return a buffer that contains the value vector of the sparse tensor
465
+ std::shared_ptr<Buffer> data() const { return data_; }
466
+
467
+ /// \brief Return an immutable raw data pointer
468
+ const uint8_t* raw_data() const { return data_->data(); }
469
+
470
+ /// \brief Return a mutable raw data pointer
471
+ uint8_t* raw_mutable_data() const { return data_->mutable_data(); }
472
+
473
+ /// \brief Return a shape vector of the sparse tensor
474
+ const std::vector<int64_t>& shape() const { return shape_; }
475
+
476
+ /// \brief Return a sparse index of the sparse tensor
477
+ const std::shared_ptr<SparseIndex>& sparse_index() const { return sparse_index_; }
478
+
479
+ /// \brief Return a number of dimensions of the sparse tensor
480
+ int ndim() const { return static_cast<int>(shape_.size()); }
481
+
482
+ /// \brief Return a vector of dimension names
483
+ const std::vector<std::string>& dim_names() const { return dim_names_; }
484
+
485
+ /// \brief Return the name of the i-th dimension
486
+ const std::string& dim_name(int i) const;
487
+
488
+ /// \brief Total number of value cells in the sparse tensor
489
+ int64_t size() const;
490
+
491
+ /// \brief Return true if the underlying data buffer is mutable
492
+ bool is_mutable() const { return data_->is_mutable(); }
493
+
494
+ /// \brief Total number of non-zero cells in the sparse tensor
495
+ int64_t non_zero_length() const {
496
+ return sparse_index_ ? sparse_index_->non_zero_length() : 0;
497
+ }
498
+
499
+ /// \brief Return whether sparse tensors are equal
500
+ bool Equals(const SparseTensor& other,
501
+ const EqualOptions& = EqualOptions::Defaults()) const;
502
+
503
+ /// \brief Return dense representation of sparse tensor as tensor
504
+ ///
505
+ /// The returned Tensor has row-major order (C-like).
506
+ Result<std::shared_ptr<Tensor>> ToTensor(MemoryPool* pool) const;
507
+ Result<std::shared_ptr<Tensor>> ToTensor() const {
508
+ return ToTensor(default_memory_pool());
509
+ }
510
+
511
+ protected:
512
+ // Constructor with all attributes
513
+ SparseTensor(const std::shared_ptr<DataType>& type, const std::shared_ptr<Buffer>& data,
514
+ const std::vector<int64_t>& shape,
515
+ const std::shared_ptr<SparseIndex>& sparse_index,
516
+ const std::vector<std::string>& dim_names);
517
+
518
+ std::shared_ptr<DataType> type_;
519
+ std::shared_ptr<Buffer> data_;
520
+ std::vector<int64_t> shape_;
521
+ std::shared_ptr<SparseIndex> sparse_index_;
522
+
523
+ // These names are optional
524
+ std::vector<std::string> dim_names_;
525
+ };
526
+
527
+ // ----------------------------------------------------------------------
528
+ // SparseTensorImpl class
529
+
530
+ namespace internal {
531
+
532
+ ARROW_EXPORT
533
+ Status MakeSparseTensorFromTensor(const Tensor& tensor,
534
+ SparseTensorFormat::type sparse_format_id,
535
+ const std::shared_ptr<DataType>& index_value_type,
536
+ MemoryPool* pool,
537
+ std::shared_ptr<SparseIndex>* out_sparse_index,
538
+ std::shared_ptr<Buffer>* out_data);
539
+
540
+ } // namespace internal
541
+
542
+ /// \brief EXPERIMENTAL: Concrete sparse tensor implementation classes with sparse index
543
+ /// type
544
+ template <typename SparseIndexType>
545
+ class SparseTensorImpl : public SparseTensor {
546
+ public:
547
+ virtual ~SparseTensorImpl() = default;
548
+
549
+ /// \brief Construct a sparse tensor from physical data buffer and logical index
550
+ SparseTensorImpl(const std::shared_ptr<SparseIndexType>& sparse_index,
551
+ const std::shared_ptr<DataType>& type,
552
+ const std::shared_ptr<Buffer>& data, const std::vector<int64_t>& shape,
553
+ const std::vector<std::string>& dim_names)
554
+ : SparseTensor(type, data, shape, sparse_index, dim_names) {}
555
+
556
+ /// \brief Construct an empty sparse tensor
557
+ SparseTensorImpl(const std::shared_ptr<DataType>& type,
558
+ const std::vector<int64_t>& shape,
559
+ const std::vector<std::string>& dim_names = {})
560
+ : SparseTensorImpl(NULLPTR, type, NULLPTR, shape, dim_names) {}
561
+
562
+ /// \brief Create a SparseTensor with full parameters
563
+ static inline Result<std::shared_ptr<SparseTensorImpl<SparseIndexType>>> Make(
564
+ const std::shared_ptr<SparseIndexType>& sparse_index,
565
+ const std::shared_ptr<DataType>& type, const std::shared_ptr<Buffer>& data,
566
+ const std::vector<int64_t>& shape, const std::vector<std::string>& dim_names) {
567
+ if (!is_tensor_supported(type->id())) {
568
+ return Status::Invalid(type->ToString(),
569
+ " is not valid data type for a sparse tensor");
570
+ }
571
+ ARROW_RETURN_NOT_OK(sparse_index->ValidateShape(shape));
572
+ if (dim_names.size() > 0 && dim_names.size() != shape.size()) {
573
+ return Status::Invalid("dim_names length is inconsistent with shape");
574
+ }
575
+ return std::make_shared<SparseTensorImpl<SparseIndexType>>(sparse_index, type, data,
576
+ shape, dim_names);
577
+ }
578
+
579
+ /// \brief Create a sparse tensor from a dense tensor
580
+ ///
581
+ /// The dense tensor is re-encoded as a sparse index and a physical
582
+ /// data buffer for the non-zero value.
583
+ static inline Result<std::shared_ptr<SparseTensorImpl<SparseIndexType>>> Make(
584
+ const Tensor& tensor, const std::shared_ptr<DataType>& index_value_type,
585
+ MemoryPool* pool = default_memory_pool()) {
586
+ std::shared_ptr<SparseIndex> sparse_index;
587
+ std::shared_ptr<Buffer> data;
588
+ ARROW_RETURN_NOT_OK(internal::MakeSparseTensorFromTensor(
589
+ tensor, SparseIndexType::format_id, index_value_type, pool, &sparse_index,
590
+ &data));
591
+ return std::make_shared<SparseTensorImpl<SparseIndexType>>(
592
+ internal::checked_pointer_cast<SparseIndexType>(sparse_index), tensor.type(),
593
+ data, tensor.shape(), tensor.dim_names_);
594
+ }
595
+
596
+ static inline Result<std::shared_ptr<SparseTensorImpl<SparseIndexType>>> Make(
597
+ const Tensor& tensor, MemoryPool* pool = default_memory_pool()) {
598
+ return Make(tensor, int64(), pool);
599
+ }
600
+
601
+ private:
602
+ ARROW_DISALLOW_COPY_AND_ASSIGN(SparseTensorImpl);
603
+ };
604
+
605
+ /// \brief EXPERIMENTAL: Type alias for COO sparse tensor
606
+ using SparseCOOTensor = SparseTensorImpl<SparseCOOIndex>;
607
+
608
+ /// \brief EXPERIMENTAL: Type alias for CSR sparse matrix
609
+ using SparseCSRMatrix = SparseTensorImpl<SparseCSRIndex>;
610
+
611
+ /// \brief EXPERIMENTAL: Type alias for CSC sparse matrix
612
+ using SparseCSCMatrix = SparseTensorImpl<SparseCSCIndex>;
613
+
614
+ /// \brief EXPERIMENTAL: Type alias for CSF sparse matrix
615
+ using SparseCSFTensor = SparseTensorImpl<SparseCSFIndex>;
616
+
617
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/status.h ADDED
@@ -0,0 +1,471 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style license that can be
3
+ // found in the LICENSE file. See the AUTHORS file for names of contributors.
4
+ //
5
+ // A Status encapsulates the result of an operation. It may indicate success,
6
+ // or it may indicate an error with an associated error message.
7
+ //
8
+ // Multiple threads can invoke const methods on a Status without
9
+ // external synchronization, but if any of the threads may call a
10
+ // non-const method, all threads accessing the same Status must use
11
+ // external synchronization.
12
+
13
+ // Adapted from Apache Kudu, TensorFlow
14
+
15
+ #pragma once
16
+
17
+ #include <cstring>
18
+ #include <iosfwd>
19
+ #include <memory>
20
+ #include <string>
21
+ #include <utility>
22
+
23
+ #include "arrow/util/compare.h"
24
+ #include "arrow/util/macros.h"
25
+ #include "arrow/util/string_builder.h"
26
+ #include "arrow/util/visibility.h"
27
+
28
+ #ifdef ARROW_EXTRA_ERROR_CONTEXT
29
+
30
+ /// \brief Return with given status if condition is met.
31
+ #define ARROW_RETURN_IF_(condition, status, expr) \
32
+ do { \
33
+ if (ARROW_PREDICT_FALSE(condition)) { \
34
+ ::arrow::Status _st = (status); \
35
+ _st.AddContextLine(__FILE__, __LINE__, expr); \
36
+ return _st; \
37
+ } \
38
+ } while (0)
39
+
40
+ #else
41
+
42
+ #define ARROW_RETURN_IF_(condition, status, _) \
43
+ do { \
44
+ if (ARROW_PREDICT_FALSE(condition)) { \
45
+ return (status); \
46
+ } \
47
+ } while (0)
48
+
49
+ #endif // ARROW_EXTRA_ERROR_CONTEXT
50
+
51
+ #define ARROW_RETURN_IF(condition, status) \
52
+ ARROW_RETURN_IF_(condition, status, ARROW_STRINGIFY(status))
53
+
54
+ /// \brief Propagate any non-successful Status to the caller
55
+ #define ARROW_RETURN_NOT_OK(status) \
56
+ do { \
57
+ ::arrow::Status __s = ::arrow::internal::GenericToStatus(status); \
58
+ ARROW_RETURN_IF_(!__s.ok(), __s, ARROW_STRINGIFY(status)); \
59
+ } while (false)
60
+
61
+ /// \brief Given `expr` and `warn_msg`; log `warn_msg` if `expr` is a non-ok status
62
+ #define ARROW_WARN_NOT_OK(expr, warn_msg) \
63
+ do { \
64
+ ::arrow::Status _s = (expr); \
65
+ if (ARROW_PREDICT_FALSE(!_s.ok())) { \
66
+ _s.Warn(warn_msg); \
67
+ } \
68
+ } while (false)
69
+
70
+ #define RETURN_NOT_OK_ELSE(s, else_) \
71
+ do { \
72
+ ::arrow::Status _s = ::arrow::internal::GenericToStatus(s); \
73
+ if (!_s.ok()) { \
74
+ else_; \
75
+ return _s; \
76
+ } \
77
+ } while (false)
78
+
79
+ // This is an internal-use macro and should not be used in public headers.
80
+ #ifndef RETURN_NOT_OK
81
+ #define RETURN_NOT_OK(s) ARROW_RETURN_NOT_OK(s)
82
+ #endif
83
+
84
+ namespace arrow {
85
+
86
+ enum class StatusCode : char {
87
+ OK = 0,
88
+ OutOfMemory = 1,
89
+ KeyError = 2,
90
+ TypeError = 3,
91
+ Invalid = 4,
92
+ IOError = 5,
93
+ CapacityError = 6,
94
+ IndexError = 7,
95
+ Cancelled = 8,
96
+ UnknownError = 9,
97
+ NotImplemented = 10,
98
+ SerializationError = 11,
99
+ RError = 13,
100
+ // Gandiva range of errors
101
+ CodeGenError = 40,
102
+ ExpressionValidationError = 41,
103
+ ExecutionError = 42,
104
+ // Continue generic codes.
105
+ AlreadyExists = 45
106
+ };
107
+
108
+ /// \brief An opaque class that allows subsystems to retain
109
+ /// additional information inside the Status.
110
+ class ARROW_EXPORT StatusDetail {
111
+ public:
112
+ virtual ~StatusDetail() = default;
113
+ /// \brief Return a unique id for the type of the StatusDetail
114
+ /// (effectively a poor man's substitute for RTTI).
115
+ virtual const char* type_id() const = 0;
116
+ /// \brief Produce a human-readable description of this status.
117
+ virtual std::string ToString() const = 0;
118
+
119
+ bool operator==(const StatusDetail& other) const noexcept {
120
+ return std::string(type_id()) == other.type_id() && ToString() == other.ToString();
121
+ }
122
+ };
123
+
124
+ /// \brief Status outcome object (success or error)
125
+ ///
126
+ /// The Status object is an object holding the outcome of an operation.
127
+ /// The outcome is represented as a StatusCode, either success
128
+ /// (StatusCode::OK) or an error (any other of the StatusCode enumeration values).
129
+ ///
130
+ /// Additionally, if an error occurred, a specific error message is generally
131
+ /// attached.
132
+ class ARROW_EXPORT [[nodiscard]] Status : public util::EqualityComparable<Status>,
133
+ public util::ToStringOstreamable<Status> {
134
+ public:
135
+ // Create a success status.
136
+ constexpr Status() noexcept : state_(NULLPTR) {}
137
+ ~Status() noexcept {
138
+ // ARROW-2400: On certain compilers, splitting off the slow path improves
139
+ // performance significantly.
140
+ if (ARROW_PREDICT_FALSE(state_ != NULL)) {
141
+ DeleteState();
142
+ }
143
+ }
144
+
145
+ Status(StatusCode code, const std::string& msg);
146
+ /// \brief Pluggable constructor for use by sub-systems. detail cannot be null.
147
+ Status(StatusCode code, std::string msg, std::shared_ptr<StatusDetail> detail);
148
+
149
+ // Copy the specified status.
150
+ inline Status(const Status& s);
151
+ inline Status& operator=(const Status& s);
152
+
153
+ // Move the specified status.
154
+ inline Status(Status&& s) noexcept;
155
+ inline Status& operator=(Status&& s) noexcept;
156
+
157
+ inline bool Equals(const Status& s) const;
158
+
159
+ // AND the statuses.
160
+ inline Status operator&(const Status& s) const noexcept;
161
+ inline Status operator&(Status&& s) const noexcept;
162
+ inline Status& operator&=(const Status& s) noexcept;
163
+ inline Status& operator&=(Status&& s) noexcept;
164
+
165
+ /// Return a success status
166
+ static Status OK() { return Status(); }
167
+
168
+ template <typename... Args>
169
+ static Status FromArgs(StatusCode code, Args&&... args) {
170
+ return Status(code, util::StringBuilder(std::forward<Args>(args)...));
171
+ }
172
+
173
+ template <typename... Args>
174
+ static Status FromDetailAndArgs(StatusCode code, std::shared_ptr<StatusDetail> detail,
175
+ Args&&... args) {
176
+ return Status(code, util::StringBuilder(std::forward<Args>(args)...),
177
+ std::move(detail));
178
+ }
179
+
180
+ /// Return an error status for out-of-memory conditions
181
+ template <typename... Args>
182
+ static Status OutOfMemory(Args&&... args) {
183
+ return Status::FromArgs(StatusCode::OutOfMemory, std::forward<Args>(args)...);
184
+ }
185
+
186
+ /// Return an error status for failed key lookups (e.g. column name in a table)
187
+ template <typename... Args>
188
+ static Status KeyError(Args&&... args) {
189
+ return Status::FromArgs(StatusCode::KeyError, std::forward<Args>(args)...);
190
+ }
191
+
192
+ /// Return an error status for type errors (such as mismatching data types)
193
+ template <typename... Args>
194
+ static Status TypeError(Args&&... args) {
195
+ return Status::FromArgs(StatusCode::TypeError, std::forward<Args>(args)...);
196
+ }
197
+
198
+ /// Return an error status for unknown errors
199
+ template <typename... Args>
200
+ static Status UnknownError(Args&&... args) {
201
+ return Status::FromArgs(StatusCode::UnknownError, std::forward<Args>(args)...);
202
+ }
203
+
204
+ /// Return an error status when an operation or a combination of operation and
205
+ /// data types is unimplemented
206
+ template <typename... Args>
207
+ static Status NotImplemented(Args&&... args) {
208
+ return Status::FromArgs(StatusCode::NotImplemented, std::forward<Args>(args)...);
209
+ }
210
+
211
+ /// Return an error status for invalid data (for example a string that fails parsing)
212
+ template <typename... Args>
213
+ static Status Invalid(Args&&... args) {
214
+ return Status::FromArgs(StatusCode::Invalid, std::forward<Args>(args)...);
215
+ }
216
+
217
+ /// Return an error status for cancelled operation
218
+ template <typename... Args>
219
+ static Status Cancelled(Args&&... args) {
220
+ return Status::FromArgs(StatusCode::Cancelled, std::forward<Args>(args)...);
221
+ }
222
+
223
+ /// Return an error status when an index is out of bounds
224
+ template <typename... Args>
225
+ static Status IndexError(Args&&... args) {
226
+ return Status::FromArgs(StatusCode::IndexError, std::forward<Args>(args)...);
227
+ }
228
+
229
+ /// Return an error status when a container's capacity would exceed its limits
230
+ template <typename... Args>
231
+ static Status CapacityError(Args&&... args) {
232
+ return Status::FromArgs(StatusCode::CapacityError, std::forward<Args>(args)...);
233
+ }
234
+
235
+ /// Return an error status when some IO-related operation failed
236
+ template <typename... Args>
237
+ static Status IOError(Args&&... args) {
238
+ return Status::FromArgs(StatusCode::IOError, std::forward<Args>(args)...);
239
+ }
240
+
241
+ /// Return an error status when some (de)serialization operation failed
242
+ template <typename... Args>
243
+ static Status SerializationError(Args&&... args) {
244
+ return Status::FromArgs(StatusCode::SerializationError, std::forward<Args>(args)...);
245
+ }
246
+
247
+ template <typename... Args>
248
+ static Status RError(Args&&... args) {
249
+ return Status::FromArgs(StatusCode::RError, std::forward<Args>(args)...);
250
+ }
251
+
252
+ template <typename... Args>
253
+ static Status CodeGenError(Args&&... args) {
254
+ return Status::FromArgs(StatusCode::CodeGenError, std::forward<Args>(args)...);
255
+ }
256
+
257
+ template <typename... Args>
258
+ static Status ExpressionValidationError(Args&&... args) {
259
+ return Status::FromArgs(StatusCode::ExpressionValidationError,
260
+ std::forward<Args>(args)...);
261
+ }
262
+
263
+ template <typename... Args>
264
+ static Status ExecutionError(Args&&... args) {
265
+ return Status::FromArgs(StatusCode::ExecutionError, std::forward<Args>(args)...);
266
+ }
267
+
268
+ template <typename... Args>
269
+ static Status AlreadyExists(Args&&... args) {
270
+ return Status::FromArgs(StatusCode::AlreadyExists, std::forward<Args>(args)...);
271
+ }
272
+
273
+ /// Return true iff the status indicates success.
274
+ constexpr bool ok() const { return (state_ == NULLPTR); }
275
+
276
+ /// Return true iff the status indicates an out-of-memory error.
277
+ constexpr bool IsOutOfMemory() const { return code() == StatusCode::OutOfMemory; }
278
+ /// Return true iff the status indicates a key lookup error.
279
+ constexpr bool IsKeyError() const { return code() == StatusCode::KeyError; }
280
+ /// Return true iff the status indicates invalid data.
281
+ constexpr bool IsInvalid() const { return code() == StatusCode::Invalid; }
282
+ /// Return true iff the status indicates a cancelled operation.
283
+ constexpr bool IsCancelled() const { return code() == StatusCode::Cancelled; }
284
+ /// Return true iff the status indicates an IO-related failure.
285
+ constexpr bool IsIOError() const { return code() == StatusCode::IOError; }
286
+ /// Return true iff the status indicates a container reaching capacity limits.
287
+ constexpr bool IsCapacityError() const { return code() == StatusCode::CapacityError; }
288
+ /// Return true iff the status indicates an out of bounds index.
289
+ constexpr bool IsIndexError() const { return code() == StatusCode::IndexError; }
290
+ /// Return true iff the status indicates a type error.
291
+ constexpr bool IsTypeError() const { return code() == StatusCode::TypeError; }
292
+ /// Return true iff the status indicates an unknown error.
293
+ constexpr bool IsUnknownError() const { return code() == StatusCode::UnknownError; }
294
+ /// Return true iff the status indicates an unimplemented operation.
295
+ constexpr bool IsNotImplemented() const { return code() == StatusCode::NotImplemented; }
296
+ /// Return true iff the status indicates a (de)serialization failure
297
+ constexpr bool IsSerializationError() const {
298
+ return code() == StatusCode::SerializationError;
299
+ }
300
+ /// Return true iff the status indicates a R-originated error.
301
+ constexpr bool IsRError() const { return code() == StatusCode::RError; }
302
+
303
+ constexpr bool IsCodeGenError() const { return code() == StatusCode::CodeGenError; }
304
+
305
+ constexpr bool IsExpressionValidationError() const {
306
+ return code() == StatusCode::ExpressionValidationError;
307
+ }
308
+
309
+ constexpr bool IsExecutionError() const { return code() == StatusCode::ExecutionError; }
310
+ constexpr bool IsAlreadyExists() const { return code() == StatusCode::AlreadyExists; }
311
+
312
+ /// \brief Return a string representation of this status suitable for printing.
313
+ ///
314
+ /// The string "OK" is returned for success.
315
+ std::string ToString() const;
316
+
317
+ /// \brief Return a string representation of this status without
318
+ /// context lines suitable for printing.
319
+ ///
320
+ /// The string "OK" is returned for success.
321
+ std::string ToStringWithoutContextLines() const;
322
+
323
+ /// \brief Return a string representation of the status code, without the message
324
+ /// text or POSIX code information.
325
+ std::string CodeAsString() const;
326
+ static std::string CodeAsString(StatusCode);
327
+
328
+ /// \brief Return the StatusCode value attached to this status.
329
+ constexpr StatusCode code() const { return ok() ? StatusCode::OK : state_->code; }
330
+
331
+ /// \brief Return the specific error message attached to this status.
332
+ const std::string& message() const {
333
+ static const std::string no_message = "";
334
+ return ok() ? no_message : state_->msg;
335
+ }
336
+
337
+ /// \brief Return the status detail attached to this message.
338
+ const std::shared_ptr<StatusDetail>& detail() const {
339
+ static std::shared_ptr<StatusDetail> no_detail = NULLPTR;
340
+ return state_ ? state_->detail : no_detail;
341
+ }
342
+
343
+ /// \brief Return a new Status copying the existing status, but
344
+ /// updating with the existing detail.
345
+ Status WithDetail(std::shared_ptr<StatusDetail> new_detail) const {
346
+ return Status(code(), message(), std::move(new_detail));
347
+ }
348
+
349
+ /// \brief Return a new Status with changed message, copying the
350
+ /// existing status code and detail.
351
+ template <typename... Args>
352
+ Status WithMessage(Args&&... args) const {
353
+ return FromArgs(code(), std::forward<Args>(args)...).WithDetail(detail());
354
+ }
355
+
356
+ void Warn() const;
357
+ void Warn(const std::string& message) const;
358
+
359
+ [[noreturn]] void Abort() const;
360
+ [[noreturn]] void Abort(const std::string& message) const;
361
+
362
+ #ifdef ARROW_EXTRA_ERROR_CONTEXT
363
+ void AddContextLine(const char* filename, int line, const char* expr);
364
+ #endif
365
+
366
+ private:
367
+ struct State {
368
+ StatusCode code;
369
+ std::string msg;
370
+ std::shared_ptr<StatusDetail> detail;
371
+ };
372
+ // OK status has a `NULL` state_. Otherwise, `state_` points to
373
+ // a `State` structure containing the error code and message(s)
374
+ State* state_;
375
+
376
+ void DeleteState() {
377
+ delete state_;
378
+ state_ = NULLPTR;
379
+ }
380
+ void CopyFrom(const Status& s);
381
+ inline void MoveFrom(Status& s);
382
+ };
383
+
384
+ void Status::MoveFrom(Status& s) {
385
+ delete state_;
386
+ state_ = s.state_;
387
+ s.state_ = NULLPTR;
388
+ }
389
+
390
+ Status::Status(const Status& s)
391
+ : state_((s.state_ == NULLPTR) ? NULLPTR : new State(*s.state_)) {}
392
+
393
+ Status& Status::operator=(const Status& s) {
394
+ // The following condition catches both aliasing (when this == &s),
395
+ // and the common case where both s and *this are ok.
396
+ if (state_ != s.state_) {
397
+ CopyFrom(s);
398
+ }
399
+ return *this;
400
+ }
401
+
402
+ Status::Status(Status&& s) noexcept : state_(s.state_) { s.state_ = NULLPTR; }
403
+
404
+ Status& Status::operator=(Status&& s) noexcept {
405
+ MoveFrom(s);
406
+ return *this;
407
+ }
408
+
409
+ bool Status::Equals(const Status& s) const {
410
+ if (state_ == s.state_) {
411
+ return true;
412
+ }
413
+
414
+ if (ok() || s.ok()) {
415
+ return false;
416
+ }
417
+
418
+ if (detail() != s.detail()) {
419
+ if ((detail() && !s.detail()) || (!detail() && s.detail())) {
420
+ return false;
421
+ }
422
+ return *detail() == *s.detail();
423
+ }
424
+
425
+ return code() == s.code() && message() == s.message();
426
+ }
427
+
428
+ /// \cond FALSE
429
+ // (note: emits warnings on Doxygen < 1.8.15,
430
+ // see https://github.com/doxygen/doxygen/issues/6295)
431
+ Status Status::operator&(const Status& s) const noexcept {
432
+ if (ok()) {
433
+ return s;
434
+ } else {
435
+ return *this;
436
+ }
437
+ }
438
+
439
+ Status Status::operator&(Status&& s) const noexcept {
440
+ if (ok()) {
441
+ return std::move(s);
442
+ } else {
443
+ return *this;
444
+ }
445
+ }
446
+
447
+ Status& Status::operator&=(const Status& s) noexcept {
448
+ if (ok() && !s.ok()) {
449
+ CopyFrom(s);
450
+ }
451
+ return *this;
452
+ }
453
+
454
+ Status& Status::operator&=(Status&& s) noexcept {
455
+ if (ok() && !s.ok()) {
456
+ MoveFrom(s);
457
+ }
458
+ return *this;
459
+ }
460
+ /// \endcond
461
+
462
+ namespace internal {
463
+
464
+ // Extract Status from Status or Result<T>
465
+ // Useful for the status check macros such as RETURN_NOT_OK.
466
+ inline const Status& GenericToStatus(const Status& st) { return st; }
467
+ inline Status GenericToStatus(Status&& st) { return std::move(st); }
468
+
469
+ } // namespace internal
470
+
471
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/arrow/visit_array_inline.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/array.h"
21
+ #include "arrow/extension_type.h"
22
+ #include "arrow/visitor_generate.h"
23
+
24
+ namespace arrow {
25
+
26
+ #define ARRAY_VISIT_INLINE(TYPE_CLASS) \
27
+ case TYPE_CLASS##Type::type_id: \
28
+ return visitor->Visit( \
29
+ internal::checked_cast<const typename TypeTraits<TYPE_CLASS##Type>::ArrayType&>( \
30
+ array), \
31
+ std::forward<ARGS>(args)...);
32
+
33
+ /// \brief Apply the visitors Visit() method specialized to the array type
34
+ ///
35
+ /// \tparam VISITOR Visitor type that implements Visit() for all array types.
36
+ /// \tparam ARGS Additional arguments, if any, will be passed to the Visit function after
37
+ /// the `arr` argument
38
+ /// \return Status
39
+ ///
40
+ /// A visitor is a type that implements specialized logic for each Arrow type.
41
+ /// Example usage:
42
+ ///
43
+ /// ```
44
+ /// class ExampleVisitor {
45
+ /// arrow::Status Visit(arrow::NumericArray<Int32Type> arr) { ... }
46
+ /// arrow::Status Visit(arrow::NumericArray<Int64Type> arr) { ... }
47
+ /// ...
48
+ /// }
49
+ /// ExampleVisitor visitor;
50
+ /// VisitArrayInline(some_array, &visitor);
51
+ /// ```
52
+ template <typename VISITOR, typename... ARGS>
53
+ inline Status VisitArrayInline(const Array& array, VISITOR* visitor, ARGS&&... args) {
54
+ switch (array.type_id()) {
55
+ ARROW_GENERATE_FOR_ALL_TYPES(ARRAY_VISIT_INLINE);
56
+ default:
57
+ break;
58
+ }
59
+ return Status::NotImplemented("Type not implemented");
60
+ }
61
+
62
+ #undef ARRAY_VISIT_INLINE
63
+
64
+ } // namespace arrow
venv/lib/python3.10/site-packages/pyarrow/include/parquet/api/io.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "parquet/exception.h"
venv/lib/python3.10/site-packages/pyarrow/include/parquet/api/reader.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ // Column reader API
21
+ #include "parquet/column_reader.h"
22
+ #include "parquet/column_scanner.h"
23
+ #include "parquet/exception.h"
24
+ #include "parquet/file_reader.h"
25
+ #include "parquet/metadata.h"
26
+ #include "parquet/platform.h"
27
+ #include "parquet/printer.h"
28
+ #include "parquet/properties.h"
29
+ #include "parquet/statistics.h"
30
+
31
+ // Schemas
32
+ #include "parquet/api/schema.h"
33
+
34
+ // IO
35
+ #include "parquet/api/io.h"
venv/lib/python3.10/site-packages/pyarrow/include/parquet/api/schema.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ // Schemas
21
+ #include "parquet/schema.h"
venv/lib/python3.10/site-packages/pyarrow/include/parquet/api/writer.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "parquet/api/io.h"
21
+ #include "parquet/api/schema.h"
22
+ #include "parquet/column_writer.h"
23
+ #include "parquet/exception.h"
24
+ #include "parquet/file_writer.h"
25
+ #include "parquet/statistics.h"
venv/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/reader.h ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ // N.B. we don't include async_generator.h as it's relatively heavy
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <vector>
25
+
26
+ #include "parquet/file_reader.h"
27
+ #include "parquet/platform.h"
28
+ #include "parquet/properties.h"
29
+
30
+ namespace arrow {
31
+
32
+ class ChunkedArray;
33
+ class KeyValueMetadata;
34
+ class RecordBatchReader;
35
+ struct Scalar;
36
+ class Schema;
37
+ class Table;
38
+ class RecordBatch;
39
+
40
+ } // namespace arrow
41
+
42
+ namespace parquet {
43
+
44
+ class FileMetaData;
45
+ class SchemaDescriptor;
46
+
47
+ namespace arrow {
48
+
49
+ class ColumnChunkReader;
50
+ class ColumnReader;
51
+ struct SchemaManifest;
52
+ class RowGroupReader;
53
+
54
+ /// \brief Arrow read adapter class for deserializing Parquet files as Arrow row batches.
55
+ ///
56
+ /// This interfaces caters for different use cases and thus provides different
57
+ /// interfaces. In its most simplistic form, we cater for a user that wants to
58
+ /// read the whole Parquet at once with the `FileReader::ReadTable` method.
59
+ ///
60
+ /// More advanced users that also want to implement parallelism on top of each
61
+ /// single Parquet files should do this on the RowGroup level. For this, they can
62
+ /// call `FileReader::RowGroup(i)->ReadTable` to receive only the specified
63
+ /// RowGroup as a table.
64
+ ///
65
+ /// In the most advanced situation, where a consumer wants to independently read
66
+ /// RowGroups in parallel and consume each column individually, they can call
67
+ /// `FileReader::RowGroup(i)->Column(j)->Read` and receive an `arrow::Column`
68
+ /// instance.
69
+ ///
70
+ /// Finally, one can also get a stream of record batches using
71
+ /// `FileReader::GetRecordBatchReader()`. This can internally decode columns
72
+ /// in parallel if use_threads was enabled in the ArrowReaderProperties.
73
+ ///
74
+ /// The parquet format supports an optional integer field_id which can be assigned
75
+ /// to a field. Arrow will convert these field IDs to a metadata key named
76
+ /// PARQUET:field_id on the appropriate field.
77
+ // TODO(wesm): nested data does not always make sense with this user
78
+ // interface unless you are only reading a single leaf node from a branch of
79
+ // a table. For example:
80
+ //
81
+ // repeated group data {
82
+ // optional group record {
83
+ // optional int32 val1;
84
+ // optional byte_array val2;
85
+ // optional bool val3;
86
+ // }
87
+ // optional int32 val4;
88
+ // }
89
+ //
90
+ // In the Parquet file, there are 4 leaf nodes:
91
+ //
92
+ // * data.record.val1
93
+ // * data.record.val2
94
+ // * data.record.val3
95
+ // * data.val4
96
+ //
97
+ // When materializing this data in an Arrow array, we would have:
98
+ //
99
+ // data: list<struct<
100
+ // record: struct<
101
+ // val1: int32,
102
+ // val2: string (= list<uint8>),
103
+ // val3: bool,
104
+ // >,
105
+ // val4: int32
106
+ // >>
107
+ //
108
+ // However, in the Parquet format, each leaf node has its own repetition and
109
+ // definition levels describing the structure of the intermediate nodes in
110
+ // this array structure. Thus, we will need to scan the leaf data for a group
111
+ // of leaf nodes part of the same type tree to create a single result Arrow
112
+ // nested array structure.
113
+ //
114
+ // This is additionally complicated "chunky" repeated fields or very large byte
115
+ // arrays
116
+ class PARQUET_EXPORT FileReader {
117
+ public:
118
+ /// Factory function to create a FileReader from a ParquetFileReader and properties
119
+ static ::arrow::Status Make(::arrow::MemoryPool* pool,
120
+ std::unique_ptr<ParquetFileReader> reader,
121
+ const ArrowReaderProperties& properties,
122
+ std::unique_ptr<FileReader>* out);
123
+
124
+ /// Factory function to create a FileReader from a ParquetFileReader
125
+ static ::arrow::Status Make(::arrow::MemoryPool* pool,
126
+ std::unique_ptr<ParquetFileReader> reader,
127
+ std::unique_ptr<FileReader>* out);
128
+
129
+ // Since the distribution of columns amongst a Parquet file's row groups may
130
+ // be uneven (the number of values in each column chunk can be different), we
131
+ // provide a column-oriented read interface. The ColumnReader hides the
132
+ // details of paging through the file's row groups and yielding
133
+ // fully-materialized arrow::Array instances
134
+ //
135
+ // Returns error status if the column of interest is not flat.
136
+ // The indicated column index is relative to the schema
137
+ virtual ::arrow::Status GetColumn(int i, std::unique_ptr<ColumnReader>* out) = 0;
138
+
139
+ /// \brief Return arrow schema for all the columns.
140
+ virtual ::arrow::Status GetSchema(std::shared_ptr<::arrow::Schema>* out) = 0;
141
+
142
+ /// \brief Read column as a whole into a chunked array.
143
+ ///
144
+ /// The index i refers the index of the top level schema field, which may
145
+ /// be nested or flat - e.g.
146
+ ///
147
+ /// 0 foo.bar
148
+ /// foo.bar.baz
149
+ /// foo.qux
150
+ /// 1 foo2
151
+ /// 2 foo3
152
+ ///
153
+ /// i=0 will read the entire foo struct, i=1 the foo2 primitive column etc
154
+ virtual ::arrow::Status ReadColumn(int i,
155
+ std::shared_ptr<::arrow::ChunkedArray>* out) = 0;
156
+
157
+ /// \brief Return a RecordBatchReader of all row groups and columns.
158
+ virtual ::arrow::Status GetRecordBatchReader(
159
+ std::unique_ptr<::arrow::RecordBatchReader>* out) = 0;
160
+
161
+ /// \brief Return a RecordBatchReader of row groups selected from row_group_indices.
162
+ ///
163
+ /// Note that the ordering in row_group_indices matters. FileReaders must outlive
164
+ /// their RecordBatchReaders.
165
+ ///
166
+ /// \returns error Status if row_group_indices contains an invalid index
167
+ virtual ::arrow::Status GetRecordBatchReader(
168
+ const std::vector<int>& row_group_indices,
169
+ std::unique_ptr<::arrow::RecordBatchReader>* out) = 0;
170
+
171
+ /// \brief Return a RecordBatchReader of row groups selected from
172
+ /// row_group_indices, whose columns are selected by column_indices.
173
+ ///
174
+ /// Note that the ordering in row_group_indices and column_indices
175
+ /// matter. FileReaders must outlive their RecordBatchReaders.
176
+ ///
177
+ /// \returns error Status if either row_group_indices or column_indices
178
+ /// contains an invalid index
179
+ virtual ::arrow::Status GetRecordBatchReader(
180
+ const std::vector<int>& row_group_indices, const std::vector<int>& column_indices,
181
+ std::unique_ptr<::arrow::RecordBatchReader>* out) = 0;
182
+
183
+ /// \brief Return a RecordBatchReader of row groups selected from
184
+ /// row_group_indices, whose columns are selected by column_indices.
185
+ ///
186
+ /// Note that the ordering in row_group_indices and column_indices
187
+ /// matter. FileReaders must outlive their RecordBatchReaders.
188
+ ///
189
+ /// \param row_group_indices which row groups to read (order determines read order).
190
+ /// \param column_indices which columns to read (order determines output schema).
191
+ /// \param[out] out record batch stream from parquet data.
192
+ ///
193
+ /// \returns error Status if either row_group_indices or column_indices
194
+ /// contains an invalid index
195
+ ::arrow::Status GetRecordBatchReader(const std::vector<int>& row_group_indices,
196
+ const std::vector<int>& column_indices,
197
+ std::shared_ptr<::arrow::RecordBatchReader>* out);
198
+ ::arrow::Status GetRecordBatchReader(const std::vector<int>& row_group_indices,
199
+ std::shared_ptr<::arrow::RecordBatchReader>* out);
200
+ ::arrow::Status GetRecordBatchReader(std::shared_ptr<::arrow::RecordBatchReader>* out);
201
+
202
+ /// \brief Return a generator of record batches.
203
+ ///
204
+ /// The FileReader must outlive the generator, so this requires that you pass in a
205
+ /// shared_ptr.
206
+ ///
207
+ /// \returns error Result if either row_group_indices or column_indices contains an
208
+ /// invalid index
209
+ virtual ::arrow::Result<
210
+ std::function<::arrow::Future<std::shared_ptr<::arrow::RecordBatch>>()>>
211
+ GetRecordBatchGenerator(std::shared_ptr<FileReader> reader,
212
+ const std::vector<int> row_group_indices,
213
+ const std::vector<int> column_indices,
214
+ ::arrow::internal::Executor* cpu_executor = NULLPTR,
215
+ int64_t rows_to_readahead = 0) = 0;
216
+
217
+ /// Read all columns into a Table
218
+ virtual ::arrow::Status ReadTable(std::shared_ptr<::arrow::Table>* out) = 0;
219
+
220
+ /// \brief Read the given columns into a Table
221
+ ///
222
+ /// The indicated column indices are relative to the internal representation
223
+ /// of the parquet table. For instance :
224
+ /// 0 foo.bar
225
+ /// foo.bar.baz 0
226
+ /// foo.bar.baz2 1
227
+ /// foo.qux 2
228
+ /// 1 foo2 3
229
+ /// 2 foo3 4
230
+ ///
231
+ /// i=0 will read foo.bar.baz, i=1 will read only foo.bar.baz2 and so on.
232
+ /// Only leaf fields have indices; foo itself doesn't have an index.
233
+ /// To get the index for a particular leaf field, one can use
234
+ /// manifest().schema_fields to get the top level fields, and then walk the
235
+ /// tree to identify the relevant leaf fields and access its column_index.
236
+ /// To get the total number of leaf fields, use FileMetadata.num_columns().
237
+ virtual ::arrow::Status ReadTable(const std::vector<int>& column_indices,
238
+ std::shared_ptr<::arrow::Table>* out) = 0;
239
+
240
+ virtual ::arrow::Status ReadRowGroup(int i, const std::vector<int>& column_indices,
241
+ std::shared_ptr<::arrow::Table>* out) = 0;
242
+
243
+ virtual ::arrow::Status ReadRowGroup(int i, std::shared_ptr<::arrow::Table>* out) = 0;
244
+
245
+ virtual ::arrow::Status ReadRowGroups(const std::vector<int>& row_groups,
246
+ const std::vector<int>& column_indices,
247
+ std::shared_ptr<::arrow::Table>* out) = 0;
248
+
249
+ virtual ::arrow::Status ReadRowGroups(const std::vector<int>& row_groups,
250
+ std::shared_ptr<::arrow::Table>* out) = 0;
251
+
252
+ /// \brief Scan file contents with one thread, return number of rows
253
+ virtual ::arrow::Status ScanContents(std::vector<int> columns,
254
+ const int32_t column_batch_size,
255
+ int64_t* num_rows) = 0;
256
+
257
+ /// \brief Return a reader for the RowGroup, this object must not outlive the
258
+ /// FileReader.
259
+ virtual std::shared_ptr<RowGroupReader> RowGroup(int row_group_index) = 0;
260
+
261
+ /// \brief The number of row groups in the file
262
+ virtual int num_row_groups() const = 0;
263
+
264
+ virtual ParquetFileReader* parquet_reader() const = 0;
265
+
266
+ /// Set whether to use multiple threads during reads of multiple columns.
267
+ /// By default only one thread is used.
268
+ virtual void set_use_threads(bool use_threads) = 0;
269
+
270
+ /// Set number of records to read per batch for the RecordBatchReader.
271
+ virtual void set_batch_size(int64_t batch_size) = 0;
272
+
273
+ virtual const ArrowReaderProperties& properties() const = 0;
274
+
275
+ virtual const SchemaManifest& manifest() const = 0;
276
+
277
+ virtual ~FileReader() = default;
278
+ };
279
+
280
+ class RowGroupReader {
281
+ public:
282
+ virtual ~RowGroupReader() = default;
283
+ virtual std::shared_ptr<ColumnChunkReader> Column(int column_index) = 0;
284
+ virtual ::arrow::Status ReadTable(const std::vector<int>& column_indices,
285
+ std::shared_ptr<::arrow::Table>* out) = 0;
286
+ virtual ::arrow::Status ReadTable(std::shared_ptr<::arrow::Table>* out) = 0;
287
+
288
+ private:
289
+ struct Iterator;
290
+ };
291
+
292
+ class ColumnChunkReader {
293
+ public:
294
+ virtual ~ColumnChunkReader() = default;
295
+ virtual ::arrow::Status Read(std::shared_ptr<::arrow::ChunkedArray>* out) = 0;
296
+ };
297
+
298
+ // At this point, the column reader is a stream iterator. It only knows how to
299
+ // read the next batch of values for a particular column from the file until it
300
+ // runs out.
301
+ //
302
+ // We also do not expose any internal Parquet details, such as row groups. This
303
+ // might change in the future.
304
+ class PARQUET_EXPORT ColumnReader {
305
+ public:
306
+ virtual ~ColumnReader() = default;
307
+
308
+ // Scan the next array of the indicated size. The actual size of the
309
+ // returned array may be less than the passed size depending how much data is
310
+ // available in the file.
311
+ //
312
+ // When all the data in the file has been exhausted, the result is set to
313
+ // nullptr.
314
+ //
315
+ // Returns Status::OK on a successful read, including if you have exhausted
316
+ // the data available in the file.
317
+ virtual ::arrow::Status NextBatch(int64_t batch_size,
318
+ std::shared_ptr<::arrow::ChunkedArray>* out) = 0;
319
+ };
320
+
321
+ /// \brief Experimental helper class for bindings (like Python) that struggle
322
+ /// either with std::move or C++ exceptions
323
+ class PARQUET_EXPORT FileReaderBuilder {
324
+ public:
325
+ FileReaderBuilder();
326
+
327
+ /// Create FileReaderBuilder from Arrow file and optional properties / metadata
328
+ ::arrow::Status Open(std::shared_ptr<::arrow::io::RandomAccessFile> file,
329
+ const ReaderProperties& properties = default_reader_properties(),
330
+ std::shared_ptr<FileMetaData> metadata = NULLPTR);
331
+
332
+ /// Create FileReaderBuilder from file path and optional properties / metadata
333
+ ::arrow::Status OpenFile(const std::string& path, bool memory_map = false,
334
+ const ReaderProperties& props = default_reader_properties(),
335
+ std::shared_ptr<FileMetaData> metadata = NULLPTR);
336
+
337
+ ParquetFileReader* raw_reader() { return raw_reader_.get(); }
338
+
339
+ /// Set Arrow MemoryPool for memory allocation
340
+ FileReaderBuilder* memory_pool(::arrow::MemoryPool* pool);
341
+ /// Set Arrow reader properties
342
+ FileReaderBuilder* properties(const ArrowReaderProperties& arg_properties);
343
+ /// Build FileReader instance
344
+ ::arrow::Status Build(std::unique_ptr<FileReader>* out);
345
+ ::arrow::Result<std::unique_ptr<FileReader>> Build();
346
+
347
+ private:
348
+ ::arrow::MemoryPool* pool_;
349
+ ArrowReaderProperties properties_;
350
+ std::unique_ptr<ParquetFileReader> raw_reader_;
351
+ };
352
+
353
+ /// \defgroup parquet-arrow-reader-factories Factory functions for Parquet Arrow readers
354
+ ///
355
+ /// @{
356
+
357
+ /// \brief Build FileReader from Arrow file and MemoryPool
358
+ ///
359
+ /// Advanced settings are supported through the FileReaderBuilder class.
360
+ PARQUET_EXPORT
361
+ ::arrow::Status OpenFile(std::shared_ptr<::arrow::io::RandomAccessFile>,
362
+ ::arrow::MemoryPool* allocator,
363
+ std::unique_ptr<FileReader>* reader);
364
+
365
+ /// @}
366
+
367
+ PARQUET_EXPORT
368
+ ::arrow::Status StatisticsAsScalars(const Statistics& Statistics,
369
+ std::shared_ptr<::arrow::Scalar>* min,
370
+ std::shared_ptr<::arrow::Scalar>* max);
371
+
372
+ namespace internal {
373
+
374
+ PARQUET_EXPORT
375
+ ::arrow::Status FuzzReader(const uint8_t* data, int64_t size);
376
+
377
+ } // namespace internal
378
+ } // namespace arrow
379
+ } // namespace parquet
venv/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/schema.h ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cassert>
21
+ #include <memory>
22
+ #include <unordered_map>
23
+ #include <unordered_set>
24
+ #include <vector>
25
+
26
+ #include "arrow/result.h"
27
+ #include "arrow/status.h"
28
+ #include "arrow/type.h"
29
+ #include "arrow/type_fwd.h"
30
+
31
+ #include "parquet/level_conversion.h"
32
+ #include "parquet/platform.h"
33
+ #include "parquet/schema.h"
34
+
35
+ namespace parquet {
36
+
37
+ class ArrowReaderProperties;
38
+ class ArrowWriterProperties;
39
+ class WriterProperties;
40
+
41
+ namespace arrow {
42
+
43
+ /// \defgroup arrow-to-parquet-schema-conversion Functions to convert an Arrow
44
+ /// schema into a Parquet schema.
45
+ ///
46
+ /// @{
47
+
48
+ PARQUET_EXPORT
49
+ ::arrow::Status FieldToNode(const std::shared_ptr<::arrow::Field>& field,
50
+ const WriterProperties& properties,
51
+ const ArrowWriterProperties& arrow_properties,
52
+ schema::NodePtr* out);
53
+
54
+ PARQUET_EXPORT
55
+ ::arrow::Status ToParquetSchema(const ::arrow::Schema* arrow_schema,
56
+ const WriterProperties& properties,
57
+ const ArrowWriterProperties& arrow_properties,
58
+ std::shared_ptr<SchemaDescriptor>* out);
59
+
60
+ PARQUET_EXPORT
61
+ ::arrow::Status ToParquetSchema(const ::arrow::Schema* arrow_schema,
62
+ const WriterProperties& properties,
63
+ std::shared_ptr<SchemaDescriptor>* out);
64
+
65
+ /// @}
66
+
67
+ /// \defgroup parquet-to-arrow-schema-conversion Functions to convert a Parquet
68
+ /// schema into an Arrow schema.
69
+ ///
70
+ /// @{
71
+
72
+ PARQUET_EXPORT
73
+ ::arrow::Status FromParquetSchema(
74
+ const SchemaDescriptor* parquet_schema, const ArrowReaderProperties& properties,
75
+ const std::shared_ptr<const ::arrow::KeyValueMetadata>& key_value_metadata,
76
+ std::shared_ptr<::arrow::Schema>* out);
77
+
78
+ PARQUET_EXPORT
79
+ ::arrow::Status FromParquetSchema(const SchemaDescriptor* parquet_schema,
80
+ const ArrowReaderProperties& properties,
81
+ std::shared_ptr<::arrow::Schema>* out);
82
+
83
+ PARQUET_EXPORT
84
+ ::arrow::Status FromParquetSchema(const SchemaDescriptor* parquet_schema,
85
+ std::shared_ptr<::arrow::Schema>* out);
86
+
87
+ /// @}
88
+
89
+ /// \brief Bridge between an arrow::Field and parquet column indices.
90
+ struct PARQUET_EXPORT SchemaField {
91
+ std::shared_ptr<::arrow::Field> field;
92
+ std::vector<SchemaField> children;
93
+
94
+ // Only set for leaf nodes
95
+ int column_index = -1;
96
+
97
+ parquet::internal::LevelInfo level_info;
98
+
99
+ bool is_leaf() const { return column_index != -1; }
100
+ };
101
+
102
+ /// \brief Bridge between a parquet Schema and an arrow Schema.
103
+ ///
104
+ /// Expose parquet columns as a tree structure. Useful traverse and link
105
+ /// between arrow's Schema and parquet's Schema.
106
+ struct PARQUET_EXPORT SchemaManifest {
107
+ static ::arrow::Status Make(
108
+ const SchemaDescriptor* schema,
109
+ const std::shared_ptr<const ::arrow::KeyValueMetadata>& metadata,
110
+ const ArrowReaderProperties& properties, SchemaManifest* manifest);
111
+
112
+ const SchemaDescriptor* descr;
113
+ std::shared_ptr<::arrow::Schema> origin_schema;
114
+ std::shared_ptr<const ::arrow::KeyValueMetadata> schema_metadata;
115
+ std::vector<SchemaField> schema_fields;
116
+
117
+ std::unordered_map<int, const SchemaField*> column_index_to_field;
118
+ std::unordered_map<const SchemaField*, const SchemaField*> child_to_parent;
119
+
120
+ ::arrow::Status GetColumnField(int column_index, const SchemaField** out) const {
121
+ auto it = column_index_to_field.find(column_index);
122
+ if (it == column_index_to_field.end()) {
123
+ return ::arrow::Status::KeyError("Column index ", column_index,
124
+ " not found in schema manifest, may be malformed");
125
+ }
126
+ *out = it->second;
127
+ return ::arrow::Status::OK();
128
+ }
129
+
130
+ const SchemaField* GetParent(const SchemaField* field) const {
131
+ // Returns nullptr also if not found
132
+ auto it = child_to_parent.find(field);
133
+ if (it == child_to_parent.end()) {
134
+ return NULLPTR;
135
+ }
136
+ return it->second;
137
+ }
138
+
139
+ /// Coalesce a list of field indices (relative to the equivalent arrow::Schema) which
140
+ /// correspond to the column root (first node below the parquet schema's root group) of
141
+ /// each leaf referenced in column_indices.
142
+ ///
143
+ /// For example, for leaves `a.b.c`, `a.b.d.e`, and `i.j.k` (column_indices=[0,1,3])
144
+ /// the roots are `a` and `i` (return=[0,2]).
145
+ ///
146
+ /// root
147
+ /// -- a <------
148
+ /// -- -- b | |
149
+ /// -- -- -- c |
150
+ /// -- -- -- d |
151
+ /// -- -- -- -- e
152
+ /// -- f
153
+ /// -- -- g
154
+ /// -- -- -- h
155
+ /// -- i <---
156
+ /// -- -- j |
157
+ /// -- -- -- k
158
+ ::arrow::Result<std::vector<int>> GetFieldIndices(
159
+ const std::vector<int>& column_indices) const {
160
+ const schema::GroupNode* group = descr->group_node();
161
+ std::unordered_set<int> already_added;
162
+
163
+ std::vector<int> out;
164
+ for (int column_idx : column_indices) {
165
+ if (column_idx < 0 || column_idx >= descr->num_columns()) {
166
+ return ::arrow::Status::IndexError("Column index ", column_idx, " is not valid");
167
+ }
168
+
169
+ auto field_node = descr->GetColumnRoot(column_idx);
170
+ auto field_idx = group->FieldIndex(*field_node);
171
+ if (field_idx == -1) {
172
+ return ::arrow::Status::IndexError("Column index ", column_idx, " is not valid");
173
+ }
174
+
175
+ if (already_added.insert(field_idx).second) {
176
+ out.push_back(field_idx);
177
+ }
178
+ }
179
+ return out;
180
+ }
181
+ };
182
+
183
+ } // namespace arrow
184
+ } // namespace parquet
venv/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/test_util.h ADDED
@@ -0,0 +1,524 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <limits>
21
+ #include <memory>
22
+ #include <random>
23
+ #include <string>
24
+ #include <utility>
25
+ #include <vector>
26
+
27
+ #include "arrow/array.h"
28
+ #include "arrow/array/builder_binary.h"
29
+ #include "arrow/array/builder_decimal.h"
30
+ #include "arrow/array/builder_primitive.h"
31
+ #include "arrow/testing/gtest_util.h"
32
+ #include "arrow/testing/random.h"
33
+ #include "arrow/type_fwd.h"
34
+ #include "arrow/type_traits.h"
35
+ #include "arrow/util/decimal.h"
36
+ #include "arrow/util/float16.h"
37
+ #include "parquet/column_reader.h"
38
+ #include "parquet/test_util.h"
39
+
40
+ namespace parquet {
41
+
42
+ using internal::RecordReader;
43
+
44
+ namespace arrow {
45
+
46
+ using ::arrow::Array;
47
+ using ::arrow::ChunkedArray;
48
+ using ::arrow::Status;
49
+
50
+ template <int32_t PRECISION>
51
+ struct DecimalWithPrecisionAndScale {
52
+ static_assert(PRECISION >= 1 && PRECISION <= 38, "Invalid precision value");
53
+
54
+ using type = ::arrow::Decimal128Type;
55
+ static constexpr ::arrow::Type::type type_id = ::arrow::Decimal128Type::type_id;
56
+ static constexpr int32_t precision = PRECISION;
57
+ static constexpr int32_t scale = PRECISION - 1;
58
+ };
59
+
60
+ template <int32_t PRECISION>
61
+ struct Decimal256WithPrecisionAndScale {
62
+ static_assert(PRECISION >= 1 && PRECISION <= 76, "Invalid precision value");
63
+
64
+ using type = ::arrow::Decimal256Type;
65
+ static constexpr ::arrow::Type::type type_id = ::arrow::Decimal256Type::type_id;
66
+ static constexpr int32_t precision = PRECISION;
67
+ static constexpr int32_t scale = PRECISION - 1;
68
+ };
69
+
70
+ template <class ArrowType>
71
+ ::arrow::enable_if_floating_point<ArrowType, Status> NonNullArray(
72
+ size_t size, std::shared_ptr<Array>* out) {
73
+ using c_type = typename ArrowType::c_type;
74
+ std::vector<c_type> values;
75
+ if constexpr (::arrow::is_half_float_type<ArrowType>::value) {
76
+ values.resize(size);
77
+ test::random_float16_numbers(static_cast<int>(size), 0, ::arrow::util::Float16(0.0f),
78
+ ::arrow::util::Float16(1.0f), values.data());
79
+ } else {
80
+ ::arrow::random_real(size, 0, static_cast<c_type>(0), static_cast<c_type>(1),
81
+ &values);
82
+ }
83
+ ::arrow::NumericBuilder<ArrowType> builder;
84
+ RETURN_NOT_OK(builder.AppendValues(values.data(), values.size()));
85
+ return builder.Finish(out);
86
+ }
87
+
88
+ template <class ArrowType>
89
+ ::arrow::enable_if_integer<ArrowType, Status> NonNullArray(size_t size,
90
+ std::shared_ptr<Array>* out) {
91
+ std::vector<typename ArrowType::c_type> values;
92
+ ::arrow::randint(size, 0, 64, &values);
93
+
94
+ // Passing data type so this will work with TimestampType too
95
+ ::arrow::NumericBuilder<ArrowType> builder(std::make_shared<ArrowType>(),
96
+ ::arrow::default_memory_pool());
97
+ RETURN_NOT_OK(builder.AppendValues(values.data(), values.size()));
98
+ return builder.Finish(out);
99
+ }
100
+
101
+ template <class ArrowType>
102
+ ::arrow::enable_if_date<ArrowType, Status> NonNullArray(size_t size,
103
+ std::shared_ptr<Array>* out) {
104
+ std::vector<typename ArrowType::c_type> values;
105
+ ::arrow::randint(size, 0, 24, &values);
106
+ for (size_t i = 0; i < size; i++) {
107
+ values[i] *= 86400000;
108
+ }
109
+
110
+ // Passing data type so this will work with TimestampType too
111
+ ::arrow::NumericBuilder<ArrowType> builder(std::make_shared<ArrowType>(),
112
+ ::arrow::default_memory_pool());
113
+ RETURN_NOT_OK(builder.AppendValues(values.data(), values.size()));
114
+ return builder.Finish(out);
115
+ }
116
+
117
+ template <class ArrowType>
118
+ ::arrow::enable_if_base_binary<ArrowType, Status> NonNullArray(
119
+ size_t size, std::shared_ptr<Array>* out) {
120
+ using BuilderType = typename ::arrow::TypeTraits<ArrowType>::BuilderType;
121
+ BuilderType builder;
122
+ for (size_t i = 0; i < size; i++) {
123
+ RETURN_NOT_OK(builder.Append("test-string"));
124
+ }
125
+ return builder.Finish(out);
126
+ }
127
+
128
+ template <typename ArrowType>
129
+ ::arrow::enable_if_fixed_size_binary<ArrowType, Status> NonNullArray(
130
+ size_t size, std::shared_ptr<Array>* out) {
131
+ using BuilderType = typename ::arrow::TypeTraits<ArrowType>::BuilderType;
132
+ // set byte_width to the length of "fixed": 5
133
+ // todo: find a way to generate test data with more diversity.
134
+ BuilderType builder(::arrow::fixed_size_binary(5));
135
+ for (size_t i = 0; i < size; i++) {
136
+ RETURN_NOT_OK(builder.Append("fixed"));
137
+ }
138
+ return builder.Finish(out);
139
+ }
140
+
141
+ template <int32_t byte_width>
142
+ static void random_decimals(int64_t n, uint32_t seed, int32_t precision, uint8_t* out) {
143
+ auto gen = ::arrow::random::RandomArrayGenerator(seed);
144
+ std::shared_ptr<Array> decimals;
145
+ if constexpr (byte_width == 16) {
146
+ decimals = gen.Decimal128(::arrow::decimal128(precision, 0), n);
147
+ } else {
148
+ decimals = gen.Decimal256(::arrow::decimal256(precision, 0), n);
149
+ }
150
+ std::memcpy(out, decimals->data()->GetValues<uint8_t>(1, 0), byte_width * n);
151
+ }
152
+
153
+ template <typename ArrowType, int32_t precision = ArrowType::precision>
154
+ ::arrow::enable_if_t<
155
+ std::is_same<ArrowType, DecimalWithPrecisionAndScale<precision>>::value, Status>
156
+ NonNullArray(size_t size, std::shared_ptr<Array>* out) {
157
+ constexpr int32_t kDecimalPrecision = precision;
158
+ constexpr int32_t kDecimalScale = DecimalWithPrecisionAndScale<precision>::scale;
159
+
160
+ const auto type = ::arrow::decimal(kDecimalPrecision, kDecimalScale);
161
+ ::arrow::Decimal128Builder builder(type);
162
+ const int32_t byte_width =
163
+ static_cast<const ::arrow::Decimal128Type&>(*type).byte_width();
164
+
165
+ constexpr int32_t seed = 0;
166
+
167
+ ARROW_ASSIGN_OR_RAISE(auto out_buf, ::arrow::AllocateBuffer(size * byte_width));
168
+ random_decimals<::arrow::Decimal128Type::kByteWidth>(size, seed, kDecimalPrecision,
169
+ out_buf->mutable_data());
170
+
171
+ RETURN_NOT_OK(builder.AppendValues(out_buf->data(), size));
172
+ return builder.Finish(out);
173
+ }
174
+
175
+ template <typename ArrowType, int32_t precision = ArrowType::precision>
176
+ ::arrow::enable_if_t<
177
+ std::is_same<ArrowType, Decimal256WithPrecisionAndScale<precision>>::value, Status>
178
+ NonNullArray(size_t size, std::shared_ptr<Array>* out) {
179
+ constexpr int32_t kDecimalPrecision = precision;
180
+ constexpr int32_t kDecimalScale = Decimal256WithPrecisionAndScale<precision>::scale;
181
+
182
+ const auto type = ::arrow::decimal256(kDecimalPrecision, kDecimalScale);
183
+ ::arrow::Decimal256Builder builder(type);
184
+ const int32_t byte_width =
185
+ static_cast<const ::arrow::Decimal256Type&>(*type).byte_width();
186
+
187
+ constexpr int32_t seed = 0;
188
+
189
+ ARROW_ASSIGN_OR_RAISE(auto out_buf, ::arrow::AllocateBuffer(size * byte_width));
190
+ random_decimals<::arrow::Decimal256Type::kByteWidth>(size, seed, kDecimalPrecision,
191
+ out_buf->mutable_data());
192
+
193
+ RETURN_NOT_OK(builder.AppendValues(out_buf->data(), size));
194
+ return builder.Finish(out);
195
+ }
196
+
197
+ template <class ArrowType>
198
+ ::arrow::enable_if_boolean<ArrowType, Status> NonNullArray(size_t size,
199
+ std::shared_ptr<Array>* out) {
200
+ std::vector<uint8_t> values;
201
+ ::arrow::randint(size, 0, 1, &values);
202
+ ::arrow::BooleanBuilder builder;
203
+ RETURN_NOT_OK(builder.AppendValues(values.data(), values.size()));
204
+ return builder.Finish(out);
205
+ }
206
+
207
+ // This helper function only supports (size/2) nulls.
208
+ template <typename ArrowType>
209
+ ::arrow::enable_if_floating_point<ArrowType, Status> NullableArray(
210
+ size_t size, size_t num_nulls, uint32_t seed, std::shared_ptr<Array>* out) {
211
+ using c_type = typename ArrowType::c_type;
212
+ std::vector<c_type> values;
213
+ if constexpr (::arrow::is_half_float_type<ArrowType>::value) {
214
+ values.resize(size);
215
+ test::random_float16_numbers(static_cast<int>(size), 0, ::arrow::util::Float16(-1e4f),
216
+ ::arrow::util::Float16(1e4f), values.data());
217
+ } else {
218
+ ::arrow::random_real(size, seed, static_cast<c_type>(-1e10),
219
+ static_cast<c_type>(1e10), &values);
220
+ }
221
+ std::vector<uint8_t> valid_bytes(size, 1);
222
+
223
+ for (size_t i = 0; i < num_nulls; i++) {
224
+ valid_bytes[i * 2] = 0;
225
+ }
226
+
227
+ ::arrow::NumericBuilder<ArrowType> builder;
228
+ RETURN_NOT_OK(builder.AppendValues(values.data(), values.size(), valid_bytes.data()));
229
+ return builder.Finish(out);
230
+ }
231
+
232
+ // This helper function only supports (size/2) nulls.
233
+ template <typename ArrowType>
234
+ ::arrow::enable_if_integer<ArrowType, Status> NullableArray(size_t size, size_t num_nulls,
235
+ uint32_t seed,
236
+ std::shared_ptr<Array>* out) {
237
+ std::vector<typename ArrowType::c_type> values;
238
+
239
+ // Seed is random in Arrow right now
240
+ (void)seed;
241
+ ::arrow::randint(size, 0, 64, &values);
242
+ std::vector<uint8_t> valid_bytes(size, 1);
243
+
244
+ for (size_t i = 0; i < num_nulls; i++) {
245
+ valid_bytes[i * 2] = 0;
246
+ }
247
+
248
+ // Passing data type so this will work with TimestampType too
249
+ ::arrow::NumericBuilder<ArrowType> builder(std::make_shared<ArrowType>(),
250
+ ::arrow::default_memory_pool());
251
+ RETURN_NOT_OK(builder.AppendValues(values.data(), values.size(), valid_bytes.data()));
252
+ return builder.Finish(out);
253
+ }
254
+
255
+ template <typename ArrowType>
256
+ ::arrow::enable_if_date<ArrowType, Status> NullableArray(size_t size, size_t num_nulls,
257
+ uint32_t seed,
258
+ std::shared_ptr<Array>* out) {
259
+ std::vector<typename ArrowType::c_type> values;
260
+
261
+ // Seed is random in Arrow right now
262
+ (void)seed;
263
+ ::arrow::randint(size, 0, 24, &values);
264
+ for (size_t i = 0; i < size; i++) {
265
+ values[i] *= 86400000;
266
+ }
267
+ std::vector<uint8_t> valid_bytes(size, 1);
268
+
269
+ for (size_t i = 0; i < num_nulls; i++) {
270
+ valid_bytes[i * 2] = 0;
271
+ }
272
+
273
+ // Passing data type so this will work with TimestampType too
274
+ ::arrow::NumericBuilder<ArrowType> builder(std::make_shared<ArrowType>(),
275
+ ::arrow::default_memory_pool());
276
+ RETURN_NOT_OK(builder.AppendValues(values.data(), values.size(), valid_bytes.data()));
277
+ return builder.Finish(out);
278
+ }
279
+
280
+ // This helper function only supports (size/2) nulls yet.
281
+ template <typename ArrowType>
282
+ ::arrow::enable_if_base_binary<ArrowType, Status> NullableArray(
283
+ size_t size, size_t num_nulls, uint32_t seed, std::shared_ptr<::arrow::Array>* out) {
284
+ std::vector<uint8_t> valid_bytes(size, 1);
285
+
286
+ for (size_t i = 0; i < num_nulls; i++) {
287
+ valid_bytes[i * 2] = 0;
288
+ }
289
+
290
+ using BuilderType = typename ::arrow::TypeTraits<ArrowType>::BuilderType;
291
+ BuilderType builder;
292
+
293
+ const int kBufferSize = 10;
294
+ uint8_t buffer[kBufferSize];
295
+ for (size_t i = 0; i < size; i++) {
296
+ if (!valid_bytes[i]) {
297
+ RETURN_NOT_OK(builder.AppendNull());
298
+ } else {
299
+ ::arrow::random_bytes(kBufferSize, seed + static_cast<uint32_t>(i), buffer);
300
+ if (ArrowType::is_utf8) {
301
+ // Trivially force data to be valid UTF8 by making it all ASCII
302
+ for (auto& byte : buffer) {
303
+ byte &= 0x7f;
304
+ }
305
+ }
306
+ RETURN_NOT_OK(builder.Append(buffer, kBufferSize));
307
+ }
308
+ }
309
+ return builder.Finish(out);
310
+ }
311
+
312
+ // This helper function only supports (size/2) nulls yet,
313
+ // same as NullableArray<String|Binary>(..)
314
+ template <typename ArrowType>
315
+ ::arrow::enable_if_fixed_size_binary<ArrowType, Status> NullableArray(
316
+ size_t size, size_t num_nulls, uint32_t seed, std::shared_ptr<::arrow::Array>* out) {
317
+ std::vector<uint8_t> valid_bytes(size, 1);
318
+
319
+ for (size_t i = 0; i < num_nulls; i++) {
320
+ valid_bytes[i * 2] = 0;
321
+ }
322
+
323
+ using BuilderType = typename ::arrow::TypeTraits<ArrowType>::BuilderType;
324
+ const int byte_width = 10;
325
+ BuilderType builder(::arrow::fixed_size_binary(byte_width));
326
+
327
+ const int kBufferSize = byte_width;
328
+ uint8_t buffer[kBufferSize];
329
+ for (size_t i = 0; i < size; i++) {
330
+ if (!valid_bytes[i]) {
331
+ RETURN_NOT_OK(builder.AppendNull());
332
+ } else {
333
+ ::arrow::random_bytes(kBufferSize, seed + static_cast<uint32_t>(i), buffer);
334
+ RETURN_NOT_OK(builder.Append(buffer));
335
+ }
336
+ }
337
+ return builder.Finish(out);
338
+ }
339
+
340
+ template <typename ArrowType, int32_t precision = ArrowType::precision>
341
+ ::arrow::enable_if_t<
342
+ std::is_same<ArrowType, DecimalWithPrecisionAndScale<precision>>::value, Status>
343
+ NullableArray(size_t size, size_t num_nulls, uint32_t seed,
344
+ std::shared_ptr<::arrow::Array>* out) {
345
+ std::vector<uint8_t> valid_bytes(size, '\1');
346
+
347
+ for (size_t i = 0; i < num_nulls; ++i) {
348
+ valid_bytes[i * 2] = '\0';
349
+ }
350
+
351
+ constexpr int32_t kDecimalPrecision = precision;
352
+ constexpr int32_t kDecimalScale = DecimalWithPrecisionAndScale<precision>::scale;
353
+ const auto type = ::arrow::decimal(kDecimalPrecision, kDecimalScale);
354
+ const int32_t byte_width =
355
+ static_cast<const ::arrow::Decimal128Type&>(*type).byte_width();
356
+
357
+ ARROW_ASSIGN_OR_RAISE(auto out_buf, ::arrow::AllocateBuffer(size * byte_width));
358
+
359
+ random_decimals<::arrow::Decimal128Type::kByteWidth>(size, seed, precision,
360
+ out_buf->mutable_data());
361
+
362
+ ::arrow::Decimal128Builder builder(type);
363
+ RETURN_NOT_OK(builder.AppendValues(out_buf->data(), size, valid_bytes.data()));
364
+ return builder.Finish(out);
365
+ }
366
+
367
+ template <typename ArrowType, int32_t precision = ArrowType::precision>
368
+ ::arrow::enable_if_t<
369
+ std::is_same<ArrowType, Decimal256WithPrecisionAndScale<precision>>::value, Status>
370
+ NullableArray(size_t size, size_t num_nulls, uint32_t seed,
371
+ std::shared_ptr<::arrow::Array>* out) {
372
+ std::vector<uint8_t> valid_bytes(size, '\1');
373
+
374
+ for (size_t i = 0; i < num_nulls; ++i) {
375
+ valid_bytes[i * 2] = '\0';
376
+ }
377
+
378
+ constexpr int32_t kDecimalPrecision = precision;
379
+ constexpr int32_t kDecimalScale = Decimal256WithPrecisionAndScale<precision>::scale;
380
+ const auto type = ::arrow::decimal256(kDecimalPrecision, kDecimalScale);
381
+ const int32_t byte_width =
382
+ static_cast<const ::arrow::Decimal256Type&>(*type).byte_width();
383
+
384
+ ARROW_ASSIGN_OR_RAISE(auto out_buf, ::arrow::AllocateBuffer(size * byte_width));
385
+
386
+ random_decimals<::arrow::Decimal256Type::kByteWidth>(size, seed, precision,
387
+ out_buf->mutable_data());
388
+
389
+ ::arrow::Decimal256Builder builder(type);
390
+ RETURN_NOT_OK(builder.AppendValues(out_buf->data(), size, valid_bytes.data()));
391
+ return builder.Finish(out);
392
+ }
393
+
394
+ // This helper function only supports (size/2) nulls yet.
395
+ template <class ArrowType>
396
+ ::arrow::enable_if_boolean<ArrowType, Status> NullableArray(size_t size, size_t num_nulls,
397
+ uint32_t seed,
398
+ std::shared_ptr<Array>* out) {
399
+ std::vector<uint8_t> values;
400
+
401
+ // Seed is random in Arrow right now
402
+ (void)seed;
403
+
404
+ ::arrow::randint(size, 0, 1, &values);
405
+ std::vector<uint8_t> valid_bytes(size, 1);
406
+
407
+ for (size_t i = 0; i < num_nulls; i++) {
408
+ valid_bytes[i * 2] = 0;
409
+ }
410
+
411
+ ::arrow::BooleanBuilder builder;
412
+ RETURN_NOT_OK(builder.AppendValues(values.data(), values.size(), valid_bytes.data()));
413
+ return builder.Finish(out);
414
+ }
415
+
416
+ /// Wrap an Array into a ListArray by splitting it up into size lists.
417
+ ///
418
+ /// This helper function only supports (size/2) nulls.
419
+ Status MakeListArray(const std::shared_ptr<Array>& values, int64_t size,
420
+ int64_t null_count, const std::string& item_name,
421
+ bool nullable_values, std::shared_ptr<::arrow::ListArray>* out) {
422
+ // We always include an empty list
423
+ int64_t non_null_entries = size - null_count - 1;
424
+ int64_t length_per_entry = values->length() / non_null_entries;
425
+
426
+ auto offsets = AllocateBuffer();
427
+ RETURN_NOT_OK(offsets->Resize((size + 1) * sizeof(int32_t)));
428
+ int32_t* offsets_ptr = reinterpret_cast<int32_t*>(offsets->mutable_data());
429
+
430
+ auto null_bitmap = AllocateBuffer();
431
+ int64_t bitmap_size = ::arrow::bit_util::BytesForBits(size);
432
+ RETURN_NOT_OK(null_bitmap->Resize(bitmap_size));
433
+ uint8_t* null_bitmap_ptr = null_bitmap->mutable_data();
434
+ memset(null_bitmap_ptr, 0, bitmap_size);
435
+
436
+ int32_t current_offset = 0;
437
+ for (int64_t i = 0; i < size; i++) {
438
+ offsets_ptr[i] = current_offset;
439
+ if (!(((i % 2) == 0) && ((i / 2) < null_count))) {
440
+ // Non-null list (list with index 1 is always empty).
441
+ ::arrow::bit_util::SetBit(null_bitmap_ptr, i);
442
+ if (i != 1) {
443
+ current_offset += static_cast<int32_t>(length_per_entry);
444
+ }
445
+ }
446
+ }
447
+ offsets_ptr[size] = static_cast<int32_t>(values->length());
448
+
449
+ auto value_field = ::arrow::field(item_name, values->type(), nullable_values);
450
+ *out = std::make_shared<::arrow::ListArray>(::arrow::list(value_field), size, offsets,
451
+ values, null_bitmap, null_count);
452
+
453
+ return Status::OK();
454
+ }
455
+
456
+ // Make an array containing only empty lists, with a null values array
457
+ Status MakeEmptyListsArray(int64_t size, std::shared_ptr<Array>* out_array) {
458
+ // Allocate an offsets buffer containing only zeroes
459
+ const int64_t offsets_nbytes = (size + 1) * sizeof(int32_t);
460
+ ARROW_ASSIGN_OR_RAISE(auto offsets_buffer, ::arrow::AllocateBuffer(offsets_nbytes));
461
+ memset(offsets_buffer->mutable_data(), 0, offsets_nbytes);
462
+
463
+ auto value_field =
464
+ ::arrow::field("item", ::arrow::float64(), false /* nullable_values */);
465
+ auto list_type = ::arrow::list(value_field);
466
+
467
+ std::vector<std::shared_ptr<Buffer>> child_buffers = {nullptr /* null bitmap */,
468
+ nullptr /* values */};
469
+ auto child_data =
470
+ ::arrow::ArrayData::Make(value_field->type(), 0, std::move(child_buffers));
471
+
472
+ std::vector<std::shared_ptr<Buffer>> buffers = {nullptr /* bitmap */,
473
+ std::move(offsets_buffer)};
474
+ auto array_data = ::arrow::ArrayData::Make(list_type, size, std::move(buffers));
475
+ array_data->child_data.push_back(child_data);
476
+
477
+ *out_array = ::arrow::MakeArray(array_data);
478
+ return Status::OK();
479
+ }
480
+
481
+ std::shared_ptr<::arrow::Table> MakeSimpleTable(
482
+ const std::shared_ptr<ChunkedArray>& values, bool nullable) {
483
+ auto schema = ::arrow::schema({::arrow::field("col", values->type(), nullable)});
484
+ return ::arrow::Table::Make(schema, {values});
485
+ }
486
+
487
+ std::shared_ptr<::arrow::Table> MakeSimpleTable(const std::shared_ptr<Array>& values,
488
+ bool nullable) {
489
+ auto carr = std::make_shared<::arrow::ChunkedArray>(values);
490
+ return MakeSimpleTable(carr, nullable);
491
+ }
492
+
493
+ template <typename T>
494
+ void ExpectArray(T* expected, Array* result) {
495
+ auto p_array = static_cast<::arrow::PrimitiveArray*>(result);
496
+ for (int i = 0; i < result->length(); i++) {
497
+ EXPECT_EQ(expected[i], reinterpret_cast<const T*>(p_array->values()->data())[i]);
498
+ }
499
+ }
500
+
501
+ template <typename ArrowType>
502
+ void ExpectArrayT(void* expected, Array* result) {
503
+ ::arrow::PrimitiveArray* p_array = static_cast<::arrow::PrimitiveArray*>(result);
504
+ for (int64_t i = 0; i < result->length(); i++) {
505
+ EXPECT_EQ(reinterpret_cast<typename ArrowType::c_type*>(expected)[i],
506
+ reinterpret_cast<const typename ArrowType::c_type*>(
507
+ p_array->values()->data())[i]);
508
+ }
509
+ }
510
+
511
+ template <>
512
+ void ExpectArrayT<::arrow::BooleanType>(void* expected, Array* result) {
513
+ ::arrow::BooleanBuilder builder;
514
+ ARROW_EXPECT_OK(
515
+ builder.AppendValues(reinterpret_cast<uint8_t*>(expected), result->length()));
516
+
517
+ std::shared_ptr<Array> expected_array;
518
+ ARROW_EXPECT_OK(builder.Finish(&expected_array));
519
+ EXPECT_TRUE(result->Equals(*expected_array));
520
+ }
521
+
522
+ } // namespace arrow
523
+
524
+ } // namespace parquet
venv/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/writer.h ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+
23
+ #include "parquet/platform.h"
24
+ #include "parquet/properties.h"
25
+
26
+ namespace arrow {
27
+
28
+ class Array;
29
+ class ChunkedArray;
30
+ class RecordBatch;
31
+ class Schema;
32
+ class Table;
33
+
34
+ } // namespace arrow
35
+
36
+ namespace parquet {
37
+
38
+ class FileMetaData;
39
+ class ParquetFileWriter;
40
+
41
+ namespace arrow {
42
+
43
+ /// \brief Iterative FileWriter class
44
+ ///
45
+ /// For basic usage, can write a Table at a time, creating one or more row
46
+ /// groups per write call.
47
+ ///
48
+ /// For advanced usage, can write column-by-column: Start a new RowGroup or
49
+ /// Chunk with NewRowGroup, then write column-by-column the whole column chunk.
50
+ ///
51
+ /// If PARQUET:field_id is present as a metadata key on a field, and the corresponding
52
+ /// value is a nonnegative integer, then it will be used as the field_id in the parquet
53
+ /// file.
54
+ class PARQUET_EXPORT FileWriter {
55
+ public:
56
+ static ::arrow::Status Make(MemoryPool* pool, std::unique_ptr<ParquetFileWriter> writer,
57
+ std::shared_ptr<::arrow::Schema> schema,
58
+ std::shared_ptr<ArrowWriterProperties> arrow_properties,
59
+ std::unique_ptr<FileWriter>* out);
60
+
61
+ /// \brief Try to create an Arrow to Parquet file writer.
62
+ ///
63
+ /// \param schema schema of data that will be passed.
64
+ /// \param pool memory pool to use.
65
+ /// \param sink output stream to write Parquet data.
66
+ /// \param properties general Parquet writer properties.
67
+ /// \param arrow_properties Arrow-specific writer properties.
68
+ ///
69
+ /// \since 11.0.0
70
+ static ::arrow::Result<std::unique_ptr<FileWriter>> Open(
71
+ const ::arrow::Schema& schema, MemoryPool* pool,
72
+ std::shared_ptr<::arrow::io::OutputStream> sink,
73
+ std::shared_ptr<WriterProperties> properties = default_writer_properties(),
74
+ std::shared_ptr<ArrowWriterProperties> arrow_properties =
75
+ default_arrow_writer_properties());
76
+
77
+ ARROW_DEPRECATED("Deprecated in 11.0.0. Use Result-returning variants instead.")
78
+ static ::arrow::Status Open(const ::arrow::Schema& schema, MemoryPool* pool,
79
+ std::shared_ptr<::arrow::io::OutputStream> sink,
80
+ std::shared_ptr<WriterProperties> properties,
81
+ std::unique_ptr<FileWriter>* writer);
82
+ ARROW_DEPRECATED("Deprecated in 11.0.0. Use Result-returning variants instead.")
83
+ static ::arrow::Status Open(const ::arrow::Schema& schema, MemoryPool* pool,
84
+ std::shared_ptr<::arrow::io::OutputStream> sink,
85
+ std::shared_ptr<WriterProperties> properties,
86
+ std::shared_ptr<ArrowWriterProperties> arrow_properties,
87
+ std::unique_ptr<FileWriter>* writer);
88
+
89
+ /// Return the Arrow schema to be written to.
90
+ virtual std::shared_ptr<::arrow::Schema> schema() const = 0;
91
+
92
+ /// \brief Write a Table to Parquet.
93
+ ///
94
+ /// \param table Arrow table to write.
95
+ /// \param chunk_size maximum number of rows to write per row group.
96
+ virtual ::arrow::Status WriteTable(
97
+ const ::arrow::Table& table, int64_t chunk_size = DEFAULT_MAX_ROW_GROUP_LENGTH) = 0;
98
+
99
+ /// \brief Start a new row group.
100
+ ///
101
+ /// Returns an error if not all columns have been written.
102
+ ///
103
+ /// \param chunk_size the number of rows in the next row group.
104
+ virtual ::arrow::Status NewRowGroup(int64_t chunk_size) = 0;
105
+
106
+ /// \brief Write ColumnChunk in row group using an array.
107
+ virtual ::arrow::Status WriteColumnChunk(const ::arrow::Array& data) = 0;
108
+
109
+ /// \brief Write ColumnChunk in row group using slice of a ChunkedArray
110
+ virtual ::arrow::Status WriteColumnChunk(
111
+ const std::shared_ptr<::arrow::ChunkedArray>& data, int64_t offset,
112
+ int64_t size) = 0;
113
+
114
+ /// \brief Write ColumnChunk in a row group using a ChunkedArray
115
+ virtual ::arrow::Status WriteColumnChunk(
116
+ const std::shared_ptr<::arrow::ChunkedArray>& data) = 0;
117
+
118
+ /// \brief Start a new buffered row group.
119
+ ///
120
+ /// Returns an error if not all columns have been written.
121
+ virtual ::arrow::Status NewBufferedRowGroup() = 0;
122
+
123
+ /// \brief Write a RecordBatch into the buffered row group.
124
+ ///
125
+ /// Multiple RecordBatches can be written into the same row group
126
+ /// through this method.
127
+ ///
128
+ /// WriterProperties.max_row_group_length() is respected and a new
129
+ /// row group will be created if the current row group exceeds the
130
+ /// limit.
131
+ ///
132
+ /// Batches get flushed to the output stream once NewBufferedRowGroup()
133
+ /// or Close() is called.
134
+ ///
135
+ /// WARNING: If you are writing multiple files in parallel in the same
136
+ /// executor, deadlock may occur if ArrowWriterProperties::use_threads
137
+ /// is set to true to write columns in parallel. Please disable use_threads
138
+ /// option in this case.
139
+ virtual ::arrow::Status WriteRecordBatch(const ::arrow::RecordBatch& batch) = 0;
140
+
141
+ /// \brief Write the footer and close the file.
142
+ virtual ::arrow::Status Close() = 0;
143
+ virtual ~FileWriter();
144
+
145
+ virtual MemoryPool* memory_pool() const = 0;
146
+ /// \brief Return the file metadata, only available after calling Close().
147
+ virtual const std::shared_ptr<FileMetaData> metadata() const = 0;
148
+ };
149
+
150
+ /// \brief Write Parquet file metadata only to indicated Arrow OutputStream
151
+ PARQUET_EXPORT
152
+ ::arrow::Status WriteFileMetaData(const FileMetaData& file_metadata,
153
+ ::arrow::io::OutputStream* sink);
154
+
155
+ /// \brief Write metadata-only Parquet file to indicated Arrow OutputStream
156
+ PARQUET_EXPORT
157
+ ::arrow::Status WriteMetaDataFile(const FileMetaData& file_metadata,
158
+ ::arrow::io::OutputStream* sink);
159
+
160
+ /// \brief Write a Table to Parquet.
161
+ ///
162
+ /// This writes one table in a single shot. To write a Parquet file with
163
+ /// multiple tables iteratively, see parquet::arrow::FileWriter.
164
+ ///
165
+ /// \param table Table to write.
166
+ /// \param pool memory pool to use.
167
+ /// \param sink output stream to write Parquet data.
168
+ /// \param chunk_size maximum number of rows to write per row group.
169
+ /// \param properties general Parquet writer properties.
170
+ /// \param arrow_properties Arrow-specific writer properties.
171
+ ::arrow::Status PARQUET_EXPORT
172
+ WriteTable(const ::arrow::Table& table, MemoryPool* pool,
173
+ std::shared_ptr<::arrow::io::OutputStream> sink,
174
+ int64_t chunk_size = DEFAULT_MAX_ROW_GROUP_LENGTH,
175
+ std::shared_ptr<WriterProperties> properties = default_writer_properties(),
176
+ std::shared_ptr<ArrowWriterProperties> arrow_properties =
177
+ default_arrow_writer_properties());
178
+
179
+ } // namespace arrow
180
+ } // namespace parquet
venv/lib/python3.10/site-packages/pyarrow/include/parquet/benchmark_util.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <random>
21
+ #include <string>
22
+ #include <vector>
23
+
24
+ #include "parquet/types.h"
25
+
26
+ namespace parquet::benchmark {
27
+
28
+ template <typename T>
29
+ void GenerateBenchmarkData(uint32_t size, uint32_t seed, T* data,
30
+ std::vector<uint8_t>* heap, uint32_t data_string_length);
31
+
32
+ #define _GENERATE_BENCHMARK_DATA_DECL(KLASS) \
33
+ template <> \
34
+ void GenerateBenchmarkData(uint32_t size, uint32_t seed, KLASS* data, \
35
+ std::vector<uint8_t>* heap, uint32_t data_string_length);
36
+
37
+ _GENERATE_BENCHMARK_DATA_DECL(int32_t)
38
+ _GENERATE_BENCHMARK_DATA_DECL(int64_t)
39
+ _GENERATE_BENCHMARK_DATA_DECL(float)
40
+ _GENERATE_BENCHMARK_DATA_DECL(double)
41
+ _GENERATE_BENCHMARK_DATA_DECL(ByteArray)
42
+ _GENERATE_BENCHMARK_DATA_DECL(FLBA)
43
+ _GENERATE_BENCHMARK_DATA_DECL(Int96)
44
+
45
+ #undef _GENERATE_BENCHMARK_DATA_DECL
46
+
47
+ } // namespace parquet::benchmark
venv/lib/python3.10/site-packages/pyarrow/include/parquet/bloom_filter.h ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cmath>
21
+ #include <cstdint>
22
+ #include <memory>
23
+
24
+ #include "arrow/util/bit_util.h"
25
+ #include "arrow/util/logging.h"
26
+ #include "parquet/hasher.h"
27
+ #include "parquet/platform.h"
28
+ #include "parquet/types.h"
29
+
30
+ namespace parquet {
31
+
32
+ // A Bloom filter is a compact structure to indicate whether an item is not in a set or
33
+ // probably in a set. The Bloom filter usually consists of a bit set that represents a
34
+ // set of elements, a hash strategy and a Bloom filter algorithm.
35
+ class PARQUET_EXPORT BloomFilter {
36
+ public:
37
+ // Maximum Bloom filter size, it sets to HDFS default block size 128MB
38
+ // This value will be reconsidered when implementing Bloom filter producer.
39
+ static constexpr uint32_t kMaximumBloomFilterBytes = 128 * 1024 * 1024;
40
+
41
+ /// Determine whether an element exist in set or not.
42
+ ///
43
+ /// @param hash the element to contain.
44
+ /// @return false if value is definitely not in set, and true means PROBABLY
45
+ /// in set.
46
+ virtual bool FindHash(uint64_t hash) const = 0;
47
+
48
+ /// Insert element to set represented by Bloom filter bitset.
49
+ /// @param hash the hash of value to insert into Bloom filter.
50
+ virtual void InsertHash(uint64_t hash) = 0;
51
+
52
+ /// Insert elements to set represented by Bloom filter bitset.
53
+ /// @param hashes the hash values to insert into Bloom filter.
54
+ /// @param num_values the number of hash values to insert.
55
+ virtual void InsertHashes(const uint64_t* hashes, int num_values) = 0;
56
+
57
+ /// Write this Bloom filter to an output stream. A Bloom filter structure should
58
+ /// include bitset length, hash strategy, algorithm, and bitset.
59
+ ///
60
+ /// @param sink the output stream to write
61
+ virtual void WriteTo(ArrowOutputStream* sink) const = 0;
62
+
63
+ /// Get the number of bytes of bitset
64
+ virtual uint32_t GetBitsetSize() const = 0;
65
+
66
+ /// Compute hash for 32 bits value by using its plain encoding result.
67
+ ///
68
+ /// @param value the value to hash.
69
+ /// @return hash result.
70
+ virtual uint64_t Hash(int32_t value) const = 0;
71
+
72
+ /// Compute hash for 64 bits value by using its plain encoding result.
73
+ ///
74
+ /// @param value the value to hash.
75
+ /// @return hash result.
76
+ virtual uint64_t Hash(int64_t value) const = 0;
77
+
78
+ /// Compute hash for float value by using its plain encoding result.
79
+ ///
80
+ /// @param value the value to hash.
81
+ /// @return hash result.
82
+ virtual uint64_t Hash(float value) const = 0;
83
+
84
+ /// Compute hash for double value by using its plain encoding result.
85
+ ///
86
+ /// @param value the value to hash.
87
+ /// @return hash result.
88
+ virtual uint64_t Hash(double value) const = 0;
89
+
90
+ /// Compute hash for Int96 value by using its plain encoding result.
91
+ ///
92
+ /// @param value the value to hash.
93
+ /// @return hash result.
94
+ virtual uint64_t Hash(const Int96* value) const = 0;
95
+
96
+ /// Compute hash for ByteArray value by using its plain encoding result.
97
+ ///
98
+ /// @param value the value to hash.
99
+ /// @return hash result.
100
+ virtual uint64_t Hash(const ByteArray* value) const = 0;
101
+
102
+ /// Compute hash for fixed byte array value by using its plain encoding result.
103
+ ///
104
+ /// @param value the value address.
105
+ /// @param len the value length.
106
+ /// @return hash result.
107
+ virtual uint64_t Hash(const FLBA* value, uint32_t len) const = 0;
108
+
109
+ /// Batch compute hashes for 32 bits values by using its plain encoding result.
110
+ ///
111
+ /// @param values values a pointer to the values to hash.
112
+ /// @param num_values the number of values to hash.
113
+ /// @param hashes a pointer to the output hash values, its length should be equal to
114
+ /// num_values.
115
+ virtual void Hashes(const int32_t* values, int num_values, uint64_t* hashes) const = 0;
116
+
117
+ /// Batch compute hashes for 64 bits values by using its plain encoding result.
118
+ ///
119
+ /// @param values values a pointer to the values to hash.
120
+ /// @param num_values the number of values to hash.
121
+ /// @param hashes a pointer to the output hash values, its length should be equal to
122
+ /// num_values.
123
+ virtual void Hashes(const int64_t* values, int num_values, uint64_t* hashes) const = 0;
124
+
125
+ /// Batch compute hashes for float values by using its plain encoding result.
126
+ ///
127
+ /// @param values values a pointer to the values to hash.
128
+ /// @param num_values the number of values to hash.
129
+ /// @param hashes a pointer to the output hash values, its length should be equal to
130
+ /// num_values.
131
+ virtual void Hashes(const float* values, int num_values, uint64_t* hashes) const = 0;
132
+
133
+ /// Batch compute hashes for double values by using its plain encoding result.
134
+ ///
135
+ /// @param values values a pointer to the values to hash.
136
+ /// @param num_values the number of values to hash.
137
+ /// @param hashes a pointer to the output hash values, its length should be equal to
138
+ /// num_values.
139
+ virtual void Hashes(const double* values, int num_values, uint64_t* hashes) const = 0;
140
+
141
+ /// Batch compute hashes for Int96 values by using its plain encoding result.
142
+ ///
143
+ /// @param values values a pointer to the values to hash.
144
+ /// @param num_values the number of values to hash.
145
+ /// @param hashes a pointer to the output hash values, its length should be equal to
146
+ /// num_values.
147
+ virtual void Hashes(const Int96* values, int num_values, uint64_t* hashes) const = 0;
148
+
149
+ /// Batch compute hashes for ByteArray values by using its plain encoding result.
150
+ ///
151
+ /// @param values values a pointer to the values to hash.
152
+ /// @param num_values the number of values to hash.
153
+ /// @param hashes a pointer to the output hash values, its length should be equal to
154
+ /// num_values.
155
+ virtual void Hashes(const ByteArray* values, int num_values,
156
+ uint64_t* hashes) const = 0;
157
+
158
+ /// Batch compute hashes for fixed byte array values by using its plain encoding result.
159
+ ///
160
+ /// @param values values a pointer to the values to hash.
161
+ /// @param type_len the value length.
162
+ /// @param num_values the number of values to hash.
163
+ /// @param hashes a pointer to the output hash values, its length should be equal to
164
+ /// num_values.
165
+ virtual void Hashes(const FLBA* values, uint32_t type_len, int num_values,
166
+ uint64_t* hashes) const = 0;
167
+
168
+ virtual ~BloomFilter() = default;
169
+
170
+ protected:
171
+ // Hash strategy available for Bloom filter.
172
+ enum class HashStrategy : uint32_t { XXHASH = 0 };
173
+
174
+ // Bloom filter algorithm.
175
+ enum class Algorithm : uint32_t { BLOCK = 0 };
176
+
177
+ enum class CompressionStrategy : uint32_t { UNCOMPRESSED = 0 };
178
+ };
179
+
180
+ /// The BlockSplitBloomFilter is implemented using block-based Bloom filters from
181
+ /// Putze et al.'s "Cache-,Hash- and Space-Efficient Bloom filters". The basic idea is to
182
+ /// hash the item to a tiny Bloom filter which size fit a single cache line or smaller.
183
+ ///
184
+ /// This implementation sets 8 bits in each tiny Bloom filter. Each tiny Bloom
185
+ /// filter is 32 bytes to take advantage of 32-byte SIMD instructions.
186
+ class PARQUET_EXPORT BlockSplitBloomFilter : public BloomFilter {
187
+ public:
188
+ /// The constructor of BlockSplitBloomFilter. It uses XXH64 as hash function.
189
+ ///
190
+ /// \param pool memory pool to use.
191
+ explicit BlockSplitBloomFilter(
192
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool());
193
+
194
+ /// Initialize the BlockSplitBloomFilter. The range of num_bytes should be within
195
+ /// [kMinimumBloomFilterBytes, kMaximumBloomFilterBytes], it will be
196
+ /// rounded up/down to lower/upper bound if num_bytes is out of range and also
197
+ /// will be rounded up to a power of 2.
198
+ ///
199
+ /// @param num_bytes The number of bytes to store Bloom filter bitset.
200
+ void Init(uint32_t num_bytes);
201
+
202
+ /// Initialize the BlockSplitBloomFilter. It copies the bitset as underlying
203
+ /// bitset because the given bitset may not satisfy the 32-byte alignment requirement
204
+ /// which may lead to segfault when performing SIMD instructions. It is the caller's
205
+ /// responsibility to free the bitset passed in. This is used when reconstructing
206
+ /// a Bloom filter from a parquet file.
207
+ ///
208
+ /// @param bitset The given bitset to initialize the Bloom filter.
209
+ /// @param num_bytes The number of bytes of given bitset.
210
+ void Init(const uint8_t* bitset, uint32_t num_bytes);
211
+
212
+ /// Minimum Bloom filter size, it sets to 32 bytes to fit a tiny Bloom filter.
213
+ static constexpr uint32_t kMinimumBloomFilterBytes = 32;
214
+
215
+ /// Calculate optimal size according to the number of distinct values and false
216
+ /// positive probability.
217
+ ///
218
+ /// @param ndv The number of distinct values.
219
+ /// @param fpp The false positive probability.
220
+ /// @return it always return a value between kMinimumBloomFilterBytes and
221
+ /// kMaximumBloomFilterBytes, and the return value is always a power of 2
222
+ static uint32_t OptimalNumOfBytes(uint32_t ndv, double fpp) {
223
+ uint32_t optimal_num_of_bits = OptimalNumOfBits(ndv, fpp);
224
+ DCHECK(::arrow::bit_util::IsMultipleOf8(optimal_num_of_bits));
225
+ return optimal_num_of_bits >> 3;
226
+ }
227
+
228
+ /// Calculate optimal size according to the number of distinct values and false
229
+ /// positive probability.
230
+ ///
231
+ /// @param ndv The number of distinct values.
232
+ /// @param fpp The false positive probability.
233
+ /// @return it always return a value between kMinimumBloomFilterBytes * 8 and
234
+ /// kMaximumBloomFilterBytes * 8, and the return value is always a power of 16
235
+ static uint32_t OptimalNumOfBits(uint32_t ndv, double fpp) {
236
+ DCHECK(fpp > 0.0 && fpp < 1.0);
237
+ const double m = -8.0 * ndv / log(1 - pow(fpp, 1.0 / 8));
238
+ uint32_t num_bits;
239
+
240
+ // Handle overflow.
241
+ if (m < 0 || m > kMaximumBloomFilterBytes << 3) {
242
+ num_bits = static_cast<uint32_t>(kMaximumBloomFilterBytes << 3);
243
+ } else {
244
+ num_bits = static_cast<uint32_t>(m);
245
+ }
246
+
247
+ // Round up to lower bound
248
+ if (num_bits < kMinimumBloomFilterBytes << 3) {
249
+ num_bits = kMinimumBloomFilterBytes << 3;
250
+ }
251
+
252
+ // Get next power of 2 if bits is not power of 2.
253
+ if ((num_bits & (num_bits - 1)) != 0) {
254
+ num_bits = static_cast<uint32_t>(::arrow::bit_util::NextPower2(num_bits));
255
+ }
256
+
257
+ // Round down to upper bound
258
+ if (num_bits > kMaximumBloomFilterBytes << 3) {
259
+ num_bits = kMaximumBloomFilterBytes << 3;
260
+ }
261
+
262
+ return num_bits;
263
+ }
264
+
265
+ bool FindHash(uint64_t hash) const override;
266
+ void InsertHash(uint64_t hash) override;
267
+ void InsertHashes(const uint64_t* hashes, int num_values) override;
268
+ void WriteTo(ArrowOutputStream* sink) const override;
269
+ uint32_t GetBitsetSize() const override { return num_bytes_; }
270
+
271
+ uint64_t Hash(int32_t value) const override { return hasher_->Hash(value); }
272
+ uint64_t Hash(int64_t value) const override { return hasher_->Hash(value); }
273
+ uint64_t Hash(float value) const override { return hasher_->Hash(value); }
274
+ uint64_t Hash(double value) const override { return hasher_->Hash(value); }
275
+ uint64_t Hash(const Int96* value) const override { return hasher_->Hash(value); }
276
+ uint64_t Hash(const ByteArray* value) const override { return hasher_->Hash(value); }
277
+ uint64_t Hash(const FLBA* value, uint32_t len) const override {
278
+ return hasher_->Hash(value, len);
279
+ }
280
+
281
+ void Hashes(const int32_t* values, int num_values, uint64_t* hashes) const override {
282
+ hasher_->Hashes(values, num_values, hashes);
283
+ }
284
+ void Hashes(const int64_t* values, int num_values, uint64_t* hashes) const override {
285
+ hasher_->Hashes(values, num_values, hashes);
286
+ }
287
+ void Hashes(const float* values, int num_values, uint64_t* hashes) const override {
288
+ hasher_->Hashes(values, num_values, hashes);
289
+ }
290
+ void Hashes(const double* values, int num_values, uint64_t* hashes) const override {
291
+ hasher_->Hashes(values, num_values, hashes);
292
+ }
293
+ void Hashes(const Int96* values, int num_values, uint64_t* hashes) const override {
294
+ hasher_->Hashes(values, num_values, hashes);
295
+ }
296
+ void Hashes(const ByteArray* values, int num_values, uint64_t* hashes) const override {
297
+ hasher_->Hashes(values, num_values, hashes);
298
+ }
299
+ void Hashes(const FLBA* values, uint32_t type_len, int num_values,
300
+ uint64_t* hashes) const override {
301
+ hasher_->Hashes(values, type_len, num_values, hashes);
302
+ }
303
+
304
+ uint64_t Hash(const int32_t* value) const { return hasher_->Hash(*value); }
305
+ uint64_t Hash(const int64_t* value) const { return hasher_->Hash(*value); }
306
+ uint64_t Hash(const float* value) const { return hasher_->Hash(*value); }
307
+ uint64_t Hash(const double* value) const { return hasher_->Hash(*value); }
308
+
309
+ /// Deserialize the Bloom filter from an input stream. It is used when reconstructing
310
+ /// a Bloom filter from a parquet filter.
311
+ ///
312
+ /// @param properties The parquet reader properties.
313
+ /// @param input_stream The input stream from which to construct the bloom filter.
314
+ /// @param bloom_filter_length The length of the serialized bloom filter including
315
+ /// header.
316
+ /// @return The BlockSplitBloomFilter.
317
+ static BlockSplitBloomFilter Deserialize(
318
+ const ReaderProperties& properties, ArrowInputStream* input_stream,
319
+ std::optional<int64_t> bloom_filter_length = std::nullopt);
320
+
321
+ private:
322
+ inline void InsertHashImpl(uint64_t hash);
323
+
324
+ // Bytes in a tiny Bloom filter block.
325
+ static constexpr int kBytesPerFilterBlock = 32;
326
+
327
+ // The number of bits to be set in each tiny Bloom filter
328
+ static constexpr int kBitsSetPerBlock = 8;
329
+
330
+ // A mask structure used to set bits in each tiny Bloom filter.
331
+ struct BlockMask {
332
+ uint32_t item[kBitsSetPerBlock];
333
+ };
334
+
335
+ // The block-based algorithm needs eight odd SALT values to calculate eight indexes
336
+ // of bit to set, one bit in each 32-bit word.
337
+ static constexpr uint32_t SALT[kBitsSetPerBlock] = {
338
+ 0x47b6137bU, 0x44974d91U, 0x8824ad5bU, 0xa2b7289dU,
339
+ 0x705495c7U, 0x2df1424bU, 0x9efc4947U, 0x5c6bfb31U};
340
+
341
+ // Memory pool to allocate aligned buffer for bitset
342
+ ::arrow::MemoryPool* pool_;
343
+
344
+ // The underlying buffer of bitset.
345
+ std::shared_ptr<Buffer> data_;
346
+
347
+ // The number of bytes of Bloom filter bitset.
348
+ uint32_t num_bytes_;
349
+
350
+ // Hash strategy used in this Bloom filter.
351
+ HashStrategy hash_strategy_;
352
+
353
+ // Algorithm used in this Bloom filter.
354
+ Algorithm algorithm_;
355
+
356
+ // Compression used in this Bloom filter.
357
+ CompressionStrategy compression_strategy_;
358
+
359
+ // The hash pointer points to actual hash class used.
360
+ std::unique_ptr<Hasher> hasher_;
361
+ };
362
+
363
+ } // namespace parquet
venv/lib/python3.10/site-packages/pyarrow/include/parquet/bloom_filter_reader.h ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/io/interfaces.h"
21
+ #include "parquet/properties.h"
22
+ #include "parquet/type_fwd.h"
23
+
24
+ namespace parquet {
25
+
26
+ class InternalFileDecryptor;
27
+ class BloomFilter;
28
+
29
+ class PARQUET_EXPORT RowGroupBloomFilterReader {
30
+ public:
31
+ virtual ~RowGroupBloomFilterReader() = default;
32
+
33
+ /// \brief Read bloom filter of a column chunk.
34
+ ///
35
+ /// \param[in] i column ordinal of the column chunk.
36
+ /// \returns bloom filter of the column or nullptr if it does not exist.
37
+ /// \throws ParquetException if the index is out of bound, or read bloom
38
+ /// filter failed.
39
+ virtual std::unique_ptr<BloomFilter> GetColumnBloomFilter(int i) = 0;
40
+ };
41
+
42
+ /// \brief Interface for reading the bloom filter for a Parquet file.
43
+ class PARQUET_EXPORT BloomFilterReader {
44
+ public:
45
+ virtual ~BloomFilterReader() = default;
46
+
47
+ /// \brief Create a BloomFilterReader instance.
48
+ /// \returns a BloomFilterReader instance.
49
+ /// WARNING: The returned BloomFilterReader references to all the input parameters, so
50
+ /// it must not outlive all of the input parameters. Usually these input parameters
51
+ /// come from the same ParquetFileReader object, so it must not outlive the reader
52
+ /// that creates this BloomFilterReader.
53
+ static std::unique_ptr<BloomFilterReader> Make(
54
+ std::shared_ptr<::arrow::io::RandomAccessFile> input,
55
+ std::shared_ptr<FileMetaData> file_metadata, const ReaderProperties& properties,
56
+ std::shared_ptr<InternalFileDecryptor> file_decryptor = NULLPTR);
57
+
58
+ /// \brief Get the bloom filter reader of a specific row group.
59
+ /// \param[in] i row group ordinal to get bloom filter reader.
60
+ /// \returns RowGroupBloomFilterReader of the specified row group. A nullptr may or may
61
+ /// not be returned if the bloom filter for the row group is unavailable. It
62
+ /// is the caller's responsibility to check the return value of follow-up calls
63
+ /// to the RowGroupBloomFilterReader.
64
+ /// \throws ParquetException if the index is out of bound.
65
+ virtual std::shared_ptr<RowGroupBloomFilterReader> RowGroup(int i) = 0;
66
+ };
67
+
68
+ } // namespace parquet
venv/lib/python3.10/site-packages/pyarrow/include/parquet/column_page.h ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This module defines an abstract interface for iterating through pages in a
19
+ // Parquet column chunk within a row group. It could be extended in the future
20
+ // to iterate through all data pages in all chunks in a file.
21
+
22
+ #pragma once
23
+
24
+ #include <cstdint>
25
+ #include <memory>
26
+ #include <optional>
27
+ #include <string>
28
+
29
+ #include "parquet/statistics.h"
30
+ #include "parquet/types.h"
31
+
32
+ namespace parquet {
33
+
34
+ // TODO: Parallel processing is not yet safe because of memory-ownership
35
+ // semantics (the PageReader may or may not own the memory referenced by a
36
+ // page)
37
+ //
38
+ // TODO(wesm): In the future Parquet implementations may store the crc code
39
+ // in format::PageHeader. parquet-mr currently does not, so we also skip it
40
+ // here, both on the read and write path
41
+ class Page {
42
+ public:
43
+ Page(const std::shared_ptr<Buffer>& buffer, PageType::type type)
44
+ : buffer_(buffer), type_(type) {}
45
+
46
+ PageType::type type() const { return type_; }
47
+
48
+ std::shared_ptr<Buffer> buffer() const { return buffer_; }
49
+
50
+ // @returns: a pointer to the page's data
51
+ const uint8_t* data() const { return buffer_->data(); }
52
+
53
+ // @returns: the total size in bytes of the page's data buffer
54
+ int32_t size() const { return static_cast<int32_t>(buffer_->size()); }
55
+
56
+ private:
57
+ std::shared_ptr<Buffer> buffer_;
58
+ PageType::type type_;
59
+ };
60
+
61
+ /// \brief Base type for DataPageV1 and DataPageV2 including common attributes
62
+ class DataPage : public Page {
63
+ public:
64
+ int32_t num_values() const { return num_values_; }
65
+ Encoding::type encoding() const { return encoding_; }
66
+ int64_t uncompressed_size() const { return uncompressed_size_; }
67
+ const EncodedStatistics& statistics() const { return statistics_; }
68
+ /// Return the row ordinal within the row group to the first row in the data page.
69
+ /// Currently it is only present from data pages created by ColumnWriter in order
70
+ /// to collect page index.
71
+ std::optional<int64_t> first_row_index() const { return first_row_index_; }
72
+
73
+ virtual ~DataPage() = default;
74
+
75
+ protected:
76
+ DataPage(PageType::type type, const std::shared_ptr<Buffer>& buffer, int32_t num_values,
77
+ Encoding::type encoding, int64_t uncompressed_size,
78
+ const EncodedStatistics& statistics = EncodedStatistics(),
79
+ std::optional<int64_t> first_row_index = std::nullopt)
80
+ : Page(buffer, type),
81
+ num_values_(num_values),
82
+ encoding_(encoding),
83
+ uncompressed_size_(uncompressed_size),
84
+ statistics_(statistics),
85
+ first_row_index_(std::move(first_row_index)) {}
86
+
87
+ int32_t num_values_;
88
+ Encoding::type encoding_;
89
+ int64_t uncompressed_size_;
90
+ EncodedStatistics statistics_;
91
+ /// Row ordinal within the row group to the first row in the data page.
92
+ std::optional<int64_t> first_row_index_;
93
+ };
94
+
95
+ class DataPageV1 : public DataPage {
96
+ public:
97
+ DataPageV1(const std::shared_ptr<Buffer>& buffer, int32_t num_values,
98
+ Encoding::type encoding, Encoding::type definition_level_encoding,
99
+ Encoding::type repetition_level_encoding, int64_t uncompressed_size,
100
+ const EncodedStatistics& statistics = EncodedStatistics(),
101
+ std::optional<int64_t> first_row_index = std::nullopt)
102
+ : DataPage(PageType::DATA_PAGE, buffer, num_values, encoding, uncompressed_size,
103
+ statistics, std::move(first_row_index)),
104
+ definition_level_encoding_(definition_level_encoding),
105
+ repetition_level_encoding_(repetition_level_encoding) {}
106
+
107
+ Encoding::type repetition_level_encoding() const { return repetition_level_encoding_; }
108
+
109
+ Encoding::type definition_level_encoding() const { return definition_level_encoding_; }
110
+
111
+ private:
112
+ Encoding::type definition_level_encoding_;
113
+ Encoding::type repetition_level_encoding_;
114
+ };
115
+
116
+ class DataPageV2 : public DataPage {
117
+ public:
118
+ DataPageV2(const std::shared_ptr<Buffer>& buffer, int32_t num_values, int32_t num_nulls,
119
+ int32_t num_rows, Encoding::type encoding,
120
+ int32_t definition_levels_byte_length, int32_t repetition_levels_byte_length,
121
+ int64_t uncompressed_size, bool is_compressed = false,
122
+ const EncodedStatistics& statistics = EncodedStatistics(),
123
+ std::optional<int64_t> first_row_index = std::nullopt)
124
+ : DataPage(PageType::DATA_PAGE_V2, buffer, num_values, encoding, uncompressed_size,
125
+ statistics, std::move(first_row_index)),
126
+ num_nulls_(num_nulls),
127
+ num_rows_(num_rows),
128
+ definition_levels_byte_length_(definition_levels_byte_length),
129
+ repetition_levels_byte_length_(repetition_levels_byte_length),
130
+ is_compressed_(is_compressed) {}
131
+
132
+ int32_t num_nulls() const { return num_nulls_; }
133
+
134
+ int32_t num_rows() const { return num_rows_; }
135
+
136
+ int32_t definition_levels_byte_length() const { return definition_levels_byte_length_; }
137
+
138
+ int32_t repetition_levels_byte_length() const { return repetition_levels_byte_length_; }
139
+
140
+ bool is_compressed() const { return is_compressed_; }
141
+
142
+ private:
143
+ int32_t num_nulls_;
144
+ int32_t num_rows_;
145
+ int32_t definition_levels_byte_length_;
146
+ int32_t repetition_levels_byte_length_;
147
+ bool is_compressed_;
148
+ };
149
+
150
+ class DictionaryPage : public Page {
151
+ public:
152
+ DictionaryPage(const std::shared_ptr<Buffer>& buffer, int32_t num_values,
153
+ Encoding::type encoding, bool is_sorted = false)
154
+ : Page(buffer, PageType::DICTIONARY_PAGE),
155
+ num_values_(num_values),
156
+ encoding_(encoding),
157
+ is_sorted_(is_sorted) {}
158
+
159
+ int32_t num_values() const { return num_values_; }
160
+
161
+ Encoding::type encoding() const { return encoding_; }
162
+
163
+ bool is_sorted() const { return is_sorted_; }
164
+
165
+ private:
166
+ int32_t num_values_;
167
+ Encoding::type encoding_;
168
+ bool is_sorted_;
169
+ };
170
+
171
+ } // namespace parquet
venv/lib/python3.10/site-packages/pyarrow/include/parquet/column_reader.h ADDED
@@ -0,0 +1,501 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <utility>
23
+ #include <vector>
24
+
25
+ #include "parquet/exception.h"
26
+ #include "parquet/level_conversion.h"
27
+ #include "parquet/metadata.h"
28
+ #include "parquet/platform.h"
29
+ #include "parquet/properties.h"
30
+ #include "parquet/schema.h"
31
+ #include "parquet/types.h"
32
+
33
+ namespace arrow {
34
+
35
+ class Array;
36
+ class ChunkedArray;
37
+
38
+ namespace bit_util {
39
+ class BitReader;
40
+ } // namespace bit_util
41
+
42
+ namespace util {
43
+ class RleDecoder;
44
+ } // namespace util
45
+
46
+ } // namespace arrow
47
+
48
+ namespace parquet {
49
+
50
+ class Decryptor;
51
+ class Page;
52
+
53
+ // 16 MB is the default maximum page header size
54
+ static constexpr uint32_t kDefaultMaxPageHeaderSize = 16 * 1024 * 1024;
55
+
56
+ // 16 KB is the default expected page header size
57
+ static constexpr uint32_t kDefaultPageHeaderSize = 16 * 1024;
58
+
59
+ // \brief DataPageStats stores encoded statistics and number of values/rows for
60
+ // a page.
61
+ struct PARQUET_EXPORT DataPageStats {
62
+ DataPageStats(const EncodedStatistics* encoded_statistics, int32_t num_values,
63
+ std::optional<int32_t> num_rows)
64
+ : encoded_statistics(encoded_statistics),
65
+ num_values(num_values),
66
+ num_rows(num_rows) {}
67
+
68
+ // Encoded statistics extracted from the page header.
69
+ // Nullptr if there are no statistics in the page header.
70
+ const EncodedStatistics* encoded_statistics;
71
+ // Number of values stored in the page. Filled for both V1 and V2 data pages.
72
+ // For repeated fields, this can be greater than number of rows. For
73
+ // non-repeated fields, this will be the same as the number of rows.
74
+ int32_t num_values;
75
+ // Number of rows stored in the page. std::nullopt if not available.
76
+ std::optional<int32_t> num_rows;
77
+ };
78
+
79
+ class PARQUET_EXPORT LevelDecoder {
80
+ public:
81
+ LevelDecoder();
82
+ ~LevelDecoder();
83
+
84
+ // Initialize the LevelDecoder state with new data
85
+ // and return the number of bytes consumed
86
+ int SetData(Encoding::type encoding, int16_t max_level, int num_buffered_values,
87
+ const uint8_t* data, int32_t data_size);
88
+
89
+ void SetDataV2(int32_t num_bytes, int16_t max_level, int num_buffered_values,
90
+ const uint8_t* data);
91
+
92
+ // Decodes a batch of levels into an array and returns the number of levels decoded
93
+ int Decode(int batch_size, int16_t* levels);
94
+
95
+ private:
96
+ int bit_width_;
97
+ int num_values_remaining_;
98
+ Encoding::type encoding_;
99
+ std::unique_ptr<::arrow::util::RleDecoder> rle_decoder_;
100
+ std::unique_ptr<::arrow::bit_util::BitReader> bit_packed_decoder_;
101
+ int16_t max_level_;
102
+ };
103
+
104
+ struct CryptoContext {
105
+ CryptoContext(bool start_with_dictionary_page, int16_t rg_ordinal, int16_t col_ordinal,
106
+ std::shared_ptr<Decryptor> meta, std::shared_ptr<Decryptor> data)
107
+ : start_decrypt_with_dictionary_page(start_with_dictionary_page),
108
+ row_group_ordinal(rg_ordinal),
109
+ column_ordinal(col_ordinal),
110
+ meta_decryptor(std::move(meta)),
111
+ data_decryptor(std::move(data)) {}
112
+ CryptoContext() {}
113
+
114
+ bool start_decrypt_with_dictionary_page = false;
115
+ int16_t row_group_ordinal = -1;
116
+ int16_t column_ordinal = -1;
117
+ std::shared_ptr<Decryptor> meta_decryptor;
118
+ std::shared_ptr<Decryptor> data_decryptor;
119
+ };
120
+
121
+ // Abstract page iterator interface. This way, we can feed column pages to the
122
+ // ColumnReader through whatever mechanism we choose
123
+ class PARQUET_EXPORT PageReader {
124
+ using DataPageFilter = std::function<bool(const DataPageStats&)>;
125
+
126
+ public:
127
+ virtual ~PageReader() = default;
128
+
129
+ static std::unique_ptr<PageReader> Open(
130
+ std::shared_ptr<ArrowInputStream> stream, int64_t total_num_values,
131
+ Compression::type codec, bool always_compressed = false,
132
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool(),
133
+ const CryptoContext* ctx = NULLPTR);
134
+ static std::unique_ptr<PageReader> Open(std::shared_ptr<ArrowInputStream> stream,
135
+ int64_t total_num_values,
136
+ Compression::type codec,
137
+ const ReaderProperties& properties,
138
+ bool always_compressed = false,
139
+ const CryptoContext* ctx = NULLPTR);
140
+
141
+ // If data_page_filter is present (not null), NextPage() will call the
142
+ // callback function exactly once per page in the order the pages appear in
143
+ // the column. If the callback function returns true the page will be
144
+ // skipped. The callback will be called only if the page type is DATA_PAGE or
145
+ // DATA_PAGE_V2. Dictionary pages will not be skipped.
146
+ // Caller is responsible for checking that statistics are correct using
147
+ // ApplicationVersion::HasCorrectStatistics().
148
+ // \note API EXPERIMENTAL
149
+ void set_data_page_filter(DataPageFilter data_page_filter) {
150
+ data_page_filter_ = std::move(data_page_filter);
151
+ }
152
+
153
+ // @returns: shared_ptr<Page>(nullptr) on EOS, std::shared_ptr<Page>
154
+ // containing new Page otherwise
155
+ //
156
+ // The returned Page may contain references that aren't guaranteed to live
157
+ // beyond the next call to NextPage().
158
+ virtual std::shared_ptr<Page> NextPage() = 0;
159
+
160
+ virtual void set_max_page_header_size(uint32_t size) = 0;
161
+
162
+ protected:
163
+ // Callback that decides if we should skip a page or not.
164
+ DataPageFilter data_page_filter_;
165
+ };
166
+
167
+ class PARQUET_EXPORT ColumnReader {
168
+ public:
169
+ virtual ~ColumnReader() = default;
170
+
171
+ static std::shared_ptr<ColumnReader> Make(
172
+ const ColumnDescriptor* descr, std::unique_ptr<PageReader> pager,
173
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool());
174
+
175
+ // Returns true if there are still values in this column.
176
+ virtual bool HasNext() = 0;
177
+
178
+ virtual Type::type type() const = 0;
179
+
180
+ virtual const ColumnDescriptor* descr() const = 0;
181
+
182
+ // Get the encoding that can be exposed by this reader. If it returns
183
+ // dictionary encoding, then ReadBatchWithDictionary can be used to read data.
184
+ //
185
+ // \note API EXPERIMENTAL
186
+ virtual ExposedEncoding GetExposedEncoding() = 0;
187
+
188
+ protected:
189
+ friend class RowGroupReader;
190
+ // Set the encoding that can be exposed by this reader.
191
+ //
192
+ // \note API EXPERIMENTAL
193
+ virtual void SetExposedEncoding(ExposedEncoding encoding) = 0;
194
+ };
195
+
196
+ // API to read values from a single column. This is a main client facing API.
197
+ template <typename DType>
198
+ class TypedColumnReader : public ColumnReader {
199
+ public:
200
+ typedef typename DType::c_type T;
201
+
202
+ // Read a batch of repetition levels, definition levels, and values from the
203
+ // column.
204
+ //
205
+ // Since null values are not stored in the values, the number of values read
206
+ // may be less than the number of repetition and definition levels. With
207
+ // nested data this is almost certainly true.
208
+ //
209
+ // Set def_levels or rep_levels to nullptr if you want to skip reading them.
210
+ // This is only safe if you know through some other source that there are no
211
+ // undefined values.
212
+ //
213
+ // To fully exhaust a row group, you must read batches until the number of
214
+ // values read reaches the number of stored values according to the metadata.
215
+ //
216
+ // This API is the same for both V1 and V2 of the DataPage
217
+ //
218
+ // @returns: actual number of levels read (see values_read for number of values read)
219
+ virtual int64_t ReadBatch(int64_t batch_size, int16_t* def_levels, int16_t* rep_levels,
220
+ T* values, int64_t* values_read) = 0;
221
+
222
+ /// Read a batch of repetition levels, definition levels, and values from the
223
+ /// column and leave spaces for null entries on the lowest level in the values
224
+ /// buffer.
225
+ ///
226
+ /// In comparison to ReadBatch the length of repetition and definition levels
227
+ /// is the same as of the number of values read for max_definition_level == 1.
228
+ /// In the case of max_definition_level > 1, the repetition and definition
229
+ /// levels are larger than the values but the values include the null entries
230
+ /// with definition_level == (max_definition_level - 1).
231
+ ///
232
+ /// To fully exhaust a row group, you must read batches until the number of
233
+ /// values read reaches the number of stored values according to the metadata.
234
+ ///
235
+ /// @param batch_size the number of levels to read
236
+ /// @param[out] def_levels The Parquet definition levels, output has
237
+ /// the length levels_read.
238
+ /// @param[out] rep_levels The Parquet repetition levels, output has
239
+ /// the length levels_read.
240
+ /// @param[out] values The values in the lowest nested level including
241
+ /// spacing for nulls on the lowest levels; output has the length
242
+ /// values_read.
243
+ /// @param[out] valid_bits Memory allocated for a bitmap that indicates if
244
+ /// the row is null or on the maximum definition level. For performance
245
+ /// reasons the underlying buffer should be able to store 1 bit more than
246
+ /// required. If this requires an additional byte, this byte is only read
247
+ /// but never written to.
248
+ /// @param valid_bits_offset The offset in bits of the valid_bits where the
249
+ /// first relevant bit resides.
250
+ /// @param[out] levels_read The number of repetition/definition levels that were read.
251
+ /// @param[out] values_read The number of values read, this includes all
252
+ /// non-null entries as well as all null-entries on the lowest level
253
+ /// (i.e. definition_level == max_definition_level - 1)
254
+ /// @param[out] null_count The number of nulls on the lowest levels.
255
+ /// (i.e. (values_read - null_count) is total number of non-null entries)
256
+ ///
257
+ /// \deprecated Since 4.0.0
258
+ ARROW_DEPRECATED("Doesn't handle nesting correctly and unused outside of unit tests.")
259
+ virtual int64_t ReadBatchSpaced(int64_t batch_size, int16_t* def_levels,
260
+ int16_t* rep_levels, T* values, uint8_t* valid_bits,
261
+ int64_t valid_bits_offset, int64_t* levels_read,
262
+ int64_t* values_read, int64_t* null_count) = 0;
263
+
264
+ // Skip reading values. This method will work for both repeated and
265
+ // non-repeated fields. Note that this method is skipping values and not
266
+ // records. This distinction is important for repeated fields, meaning that
267
+ // we are not skipping over the values to the next record. For example,
268
+ // consider the following two consecutive records containing one repeated field:
269
+ // {[1, 2, 3]}, {[4, 5]}. If we Skip(2), our next read value will be 3, which
270
+ // is inside the first record.
271
+ // Returns the number of values skipped.
272
+ virtual int64_t Skip(int64_t num_values_to_skip) = 0;
273
+
274
+ // Read a batch of repetition levels, definition levels, and indices from the
275
+ // column. And read the dictionary if a dictionary page is encountered during
276
+ // reading pages. This API is similar to ReadBatch(), with ability to read
277
+ // dictionary and indices. It is only valid to call this method when the reader can
278
+ // expose dictionary encoding. (i.e., the reader's GetExposedEncoding() returns
279
+ // DICTIONARY).
280
+ //
281
+ // The dictionary is read along with the data page. When there's no data page,
282
+ // the dictionary won't be returned.
283
+ //
284
+ // @param batch_size The batch size to read
285
+ // @param[out] def_levels The Parquet definition levels.
286
+ // @param[out] rep_levels The Parquet repetition levels.
287
+ // @param[out] indices The dictionary indices.
288
+ // @param[out] indices_read The number of indices read.
289
+ // @param[out] dict The pointer to dictionary values. It will return nullptr if
290
+ // there's no data page. Each column chunk only has one dictionary page. The dictionary
291
+ // is owned by the reader, so the caller is responsible for copying the dictionary
292
+ // values before the reader gets destroyed.
293
+ // @param[out] dict_len The dictionary length. It will return 0 if there's no data
294
+ // page.
295
+ // @returns: actual number of levels read (see indices_read for number of
296
+ // indices read
297
+ //
298
+ // \note API EXPERIMENTAL
299
+ virtual int64_t ReadBatchWithDictionary(int64_t batch_size, int16_t* def_levels,
300
+ int16_t* rep_levels, int32_t* indices,
301
+ int64_t* indices_read, const T** dict,
302
+ int32_t* dict_len) = 0;
303
+ };
304
+
305
+ namespace internal {
306
+
307
+ /// \brief Stateful column reader that delimits semantic records for both flat
308
+ /// and nested columns
309
+ ///
310
+ /// \note API EXPERIMENTAL
311
+ /// \since 1.3.0
312
+ class PARQUET_EXPORT RecordReader {
313
+ public:
314
+ /// \brief Creates a record reader.
315
+ /// @param descr Column descriptor
316
+ /// @param leaf_info Level info, used to determine if a column is nullable or not
317
+ /// @param pool Memory pool to use for buffering values and rep/def levels
318
+ /// @param read_dictionary True if reading directly as Arrow dictionary-encoded
319
+ /// @param read_dense_for_nullable True if reading dense and not leaving space for null
320
+ /// values
321
+ static std::shared_ptr<RecordReader> Make(
322
+ const ColumnDescriptor* descr, LevelInfo leaf_info,
323
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool(),
324
+ bool read_dictionary = false, bool read_dense_for_nullable = false);
325
+
326
+ virtual ~RecordReader() = default;
327
+
328
+ /// \brief Attempt to read indicated number of records from column chunk
329
+ /// Note that for repeated fields, a record may have more than one value
330
+ /// and all of them are read. If read_dense_for_nullable() it will
331
+ /// not leave any space for null values. Otherwise, it will read spaced.
332
+ /// \return number of records read
333
+ virtual int64_t ReadRecords(int64_t num_records) = 0;
334
+
335
+ /// \brief Attempt to skip indicated number of records from column chunk.
336
+ /// Note that for repeated fields, a record may have more than one value
337
+ /// and all of them are skipped.
338
+ /// \return number of records skipped
339
+ virtual int64_t SkipRecords(int64_t num_records) = 0;
340
+
341
+ /// \brief Pre-allocate space for data. Results in better flat read performance
342
+ virtual void Reserve(int64_t num_values) = 0;
343
+
344
+ /// \brief Clear consumed values and repetition/definition levels as the
345
+ /// result of calling ReadRecords
346
+ /// For FLBA and ByteArray types, call GetBuilderChunks() to reset them.
347
+ virtual void Reset() = 0;
348
+
349
+ /// \brief Transfer filled values buffer to caller. A new one will be
350
+ /// allocated in subsequent ReadRecords calls
351
+ virtual std::shared_ptr<ResizableBuffer> ReleaseValues() = 0;
352
+
353
+ /// \brief Transfer filled validity bitmap buffer to caller. A new one will
354
+ /// be allocated in subsequent ReadRecords calls
355
+ virtual std::shared_ptr<ResizableBuffer> ReleaseIsValid() = 0;
356
+
357
+ /// \brief Return true if the record reader has more internal data yet to
358
+ /// process
359
+ virtual bool HasMoreData() const = 0;
360
+
361
+ /// \brief Advance record reader to the next row group. Must be set before
362
+ /// any records could be read/skipped.
363
+ /// \param[in] reader obtained from RowGroupReader::GetColumnPageReader
364
+ virtual void SetPageReader(std::unique_ptr<PageReader> reader) = 0;
365
+
366
+ /// \brief Returns the underlying column reader's descriptor.
367
+ virtual const ColumnDescriptor* descr() const = 0;
368
+
369
+ virtual void DebugPrintState() = 0;
370
+
371
+ /// \brief Returns the dictionary owned by the current decoder. Throws an
372
+ /// exception if the current decoder is not for dictionary encoding. The caller is
373
+ /// responsible for casting the returned pointer to proper type depending on the
374
+ /// column's physical type. An example:
375
+ /// const ByteArray* dict = reinterpret_cast<const ByteArray*>(ReadDictionary(&len));
376
+ /// or:
377
+ /// const float* dict = reinterpret_cast<const float*>(ReadDictionary(&len));
378
+ /// \param[out] dictionary_length The number of dictionary entries.
379
+ virtual const void* ReadDictionary(int32_t* dictionary_length) = 0;
380
+
381
+ /// \brief Decoded definition levels
382
+ int16_t* def_levels() const {
383
+ return reinterpret_cast<int16_t*>(def_levels_->mutable_data());
384
+ }
385
+
386
+ /// \brief Decoded repetition levels
387
+ int16_t* rep_levels() const {
388
+ return reinterpret_cast<int16_t*>(rep_levels_->mutable_data());
389
+ }
390
+
391
+ /// \brief Decoded values, including nulls, if any
392
+ /// FLBA and ByteArray types do not use this array and read into their own
393
+ /// builders.
394
+ uint8_t* values() const { return values_->mutable_data(); }
395
+
396
+ /// \brief Number of values written, including space left for nulls if any.
397
+ /// If this Reader was constructed with read_dense_for_nullable(), there is no space for
398
+ /// nulls and null_count() will be 0. There is no read-ahead/buffering for values. For
399
+ /// FLBA and ByteArray types this value reflects the values written with the last
400
+ /// ReadRecords call since those readers will reset the values after each call.
401
+ int64_t values_written() const { return values_written_; }
402
+
403
+ /// \brief Number of definition / repetition levels (from those that have
404
+ /// been decoded) that have been consumed inside the reader.
405
+ int64_t levels_position() const { return levels_position_; }
406
+
407
+ /// \brief Number of definition / repetition levels that have been written
408
+ /// internally in the reader. This may be larger than values_written() because
409
+ /// for repeated fields we need to look at the levels in advance to figure out
410
+ /// the record boundaries.
411
+ int64_t levels_written() const { return levels_written_; }
412
+
413
+ /// \brief Number of nulls in the leaf that we have read so far into the
414
+ /// values vector. This is only valid when !read_dense_for_nullable(). When
415
+ /// read_dense_for_nullable() it will always be 0.
416
+ int64_t null_count() const { return null_count_; }
417
+
418
+ /// \brief True if the leaf values are nullable
419
+ bool nullable_values() const { return nullable_values_; }
420
+
421
+ /// \brief True if reading directly as Arrow dictionary-encoded
422
+ bool read_dictionary() const { return read_dictionary_; }
423
+
424
+ /// \brief True if reading dense for nullable columns.
425
+ bool read_dense_for_nullable() const { return read_dense_for_nullable_; }
426
+
427
+ protected:
428
+ /// \brief Indicates if we can have nullable values. Note that repeated fields
429
+ /// may or may not be nullable.
430
+ bool nullable_values_;
431
+
432
+ bool at_record_start_;
433
+ int64_t records_read_;
434
+
435
+ /// \brief Stores values. These values are populated based on each ReadRecords
436
+ /// call. No extra values are buffered for the next call. SkipRecords will not
437
+ /// add any value to this buffer.
438
+ std::shared_ptr<::arrow::ResizableBuffer> values_;
439
+ /// \brief False for BYTE_ARRAY, in which case we don't allocate the values
440
+ /// buffer and we directly read into builder classes.
441
+ bool uses_values_;
442
+
443
+ /// \brief Values that we have read into 'values_' + 'null_count_'.
444
+ int64_t values_written_;
445
+ int64_t values_capacity_;
446
+ int64_t null_count_;
447
+
448
+ /// \brief Each bit corresponds to one element in 'values_' and specifies if it
449
+ /// is null or not null. Not set if read_dense_for_nullable_ is true.
450
+ std::shared_ptr<::arrow::ResizableBuffer> valid_bits_;
451
+
452
+ /// \brief Buffer for definition levels. May contain more levels than
453
+ /// is actually read. This is because we read levels ahead to
454
+ /// figure out record boundaries for repeated fields.
455
+ /// For flat required fields, 'def_levels_' and 'rep_levels_' are not
456
+ /// populated. For non-repeated fields 'rep_levels_' is not populated.
457
+ /// 'def_levels_' and 'rep_levels_' must be of the same size if present.
458
+ std::shared_ptr<::arrow::ResizableBuffer> def_levels_;
459
+ /// \brief Buffer for repetition levels. Only populated for repeated
460
+ /// fields.
461
+ std::shared_ptr<::arrow::ResizableBuffer> rep_levels_;
462
+
463
+ /// \brief Number of definition / repetition levels that have been written
464
+ /// internally in the reader. This may be larger than values_written() since
465
+ /// for repeated fields we need to look at the levels in advance to figure out
466
+ /// the record boundaries.
467
+ int64_t levels_written_;
468
+ /// \brief Position of the next level that should be consumed.
469
+ int64_t levels_position_;
470
+ int64_t levels_capacity_;
471
+
472
+ bool read_dictionary_ = false;
473
+ // If true, we will not leave any space for the null values in the values_
474
+ // vector.
475
+ bool read_dense_for_nullable_ = false;
476
+ };
477
+
478
+ class BinaryRecordReader : virtual public RecordReader {
479
+ public:
480
+ virtual std::vector<std::shared_ptr<::arrow::Array>> GetBuilderChunks() = 0;
481
+ };
482
+
483
+ /// \brief Read records directly to dictionary-encoded Arrow form (int32
484
+ /// indices). Only valid for BYTE_ARRAY columns
485
+ class DictionaryRecordReader : virtual public RecordReader {
486
+ public:
487
+ virtual std::shared_ptr<::arrow::ChunkedArray> GetResult() = 0;
488
+ };
489
+
490
+ } // namespace internal
491
+
492
+ using BoolReader = TypedColumnReader<BooleanType>;
493
+ using Int32Reader = TypedColumnReader<Int32Type>;
494
+ using Int64Reader = TypedColumnReader<Int64Type>;
495
+ using Int96Reader = TypedColumnReader<Int96Type>;
496
+ using FloatReader = TypedColumnReader<FloatType>;
497
+ using DoubleReader = TypedColumnReader<DoubleType>;
498
+ using ByteArrayReader = TypedColumnReader<ByteArrayType>;
499
+ using FixedLenByteArrayReader = TypedColumnReader<FLBAType>;
500
+
501
+ } // namespace parquet
venv/lib/python3.10/site-packages/pyarrow/include/parquet/column_scanner.h ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <stdio.h>
21
+
22
+ #include <cstdint>
23
+ #include <memory>
24
+ #include <ostream>
25
+ #include <string>
26
+ #include <utility>
27
+ #include <vector>
28
+
29
+ #include "parquet/column_reader.h"
30
+ #include "parquet/exception.h"
31
+ #include "parquet/platform.h"
32
+ #include "parquet/schema.h"
33
+ #include "parquet/types.h"
34
+
35
+ namespace parquet {
36
+
37
+ static constexpr int64_t DEFAULT_SCANNER_BATCH_SIZE = 128;
38
+
39
+ class PARQUET_EXPORT Scanner {
40
+ public:
41
+ explicit Scanner(std::shared_ptr<ColumnReader> reader,
42
+ int64_t batch_size = DEFAULT_SCANNER_BATCH_SIZE,
43
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool())
44
+ : batch_size_(batch_size),
45
+ level_offset_(0),
46
+ levels_buffered_(0),
47
+ value_buffer_(AllocateBuffer(pool)),
48
+ value_offset_(0),
49
+ values_buffered_(0),
50
+ reader_(std::move(reader)) {
51
+ def_levels_.resize(
52
+ descr()->max_definition_level() > 0 ? static_cast<size_t>(batch_size_) : 0);
53
+ rep_levels_.resize(
54
+ descr()->max_repetition_level() > 0 ? static_cast<size_t>(batch_size_) : 0);
55
+ }
56
+
57
+ virtual ~Scanner() {}
58
+
59
+ static std::shared_ptr<Scanner> Make(
60
+ std::shared_ptr<ColumnReader> col_reader,
61
+ int64_t batch_size = DEFAULT_SCANNER_BATCH_SIZE,
62
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool());
63
+
64
+ virtual void PrintNext(std::ostream& out, int width, bool with_levels = false) = 0;
65
+
66
+ bool HasNext() { return level_offset_ < levels_buffered_ || reader_->HasNext(); }
67
+
68
+ const ColumnDescriptor* descr() const { return reader_->descr(); }
69
+
70
+ int64_t batch_size() const { return batch_size_; }
71
+
72
+ void SetBatchSize(int64_t batch_size) { batch_size_ = batch_size; }
73
+
74
+ protected:
75
+ int64_t batch_size_;
76
+
77
+ std::vector<int16_t> def_levels_;
78
+ std::vector<int16_t> rep_levels_;
79
+ int level_offset_;
80
+ int levels_buffered_;
81
+
82
+ std::shared_ptr<ResizableBuffer> value_buffer_;
83
+ int value_offset_;
84
+ int64_t values_buffered_;
85
+ std::shared_ptr<ColumnReader> reader_;
86
+ };
87
+
88
+ template <typename DType>
89
+ class PARQUET_TEMPLATE_CLASS_EXPORT TypedScanner : public Scanner {
90
+ public:
91
+ typedef typename DType::c_type T;
92
+
93
+ explicit TypedScanner(std::shared_ptr<ColumnReader> reader,
94
+ int64_t batch_size = DEFAULT_SCANNER_BATCH_SIZE,
95
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool())
96
+ : Scanner(std::move(reader), batch_size, pool) {
97
+ typed_reader_ = static_cast<TypedColumnReader<DType>*>(reader_.get());
98
+ int value_byte_size = type_traits<DType::type_num>::value_byte_size;
99
+ PARQUET_THROW_NOT_OK(value_buffer_->Resize(batch_size_ * value_byte_size));
100
+ values_ = reinterpret_cast<T*>(value_buffer_->mutable_data());
101
+ }
102
+
103
+ virtual ~TypedScanner() {}
104
+
105
+ bool NextLevels(int16_t* def_level, int16_t* rep_level) {
106
+ if (level_offset_ == levels_buffered_) {
107
+ levels_buffered_ = static_cast<int>(
108
+ typed_reader_->ReadBatch(static_cast<int>(batch_size_), def_levels_.data(),
109
+ rep_levels_.data(), values_, &values_buffered_));
110
+
111
+ value_offset_ = 0;
112
+ level_offset_ = 0;
113
+ if (!levels_buffered_) {
114
+ return false;
115
+ }
116
+ }
117
+ *def_level = descr()->max_definition_level() > 0 ? def_levels_[level_offset_] : 0;
118
+ *rep_level = descr()->max_repetition_level() > 0 ? rep_levels_[level_offset_] : 0;
119
+ level_offset_++;
120
+ return true;
121
+ }
122
+
123
+ bool Next(T* val, int16_t* def_level, int16_t* rep_level, bool* is_null) {
124
+ if (level_offset_ == levels_buffered_) {
125
+ if (!HasNext()) {
126
+ // Out of data pages
127
+ return false;
128
+ }
129
+ }
130
+
131
+ NextLevels(def_level, rep_level);
132
+ *is_null = *def_level < descr()->max_definition_level();
133
+
134
+ if (*is_null) {
135
+ return true;
136
+ }
137
+
138
+ if (value_offset_ == values_buffered_) {
139
+ throw ParquetException("Value was non-null, but has not been buffered");
140
+ }
141
+ *val = values_[value_offset_++];
142
+ return true;
143
+ }
144
+
145
+ // Returns true if there is a next value
146
+ bool NextValue(T* val, bool* is_null) {
147
+ if (level_offset_ == levels_buffered_) {
148
+ if (!HasNext()) {
149
+ // Out of data pages
150
+ return false;
151
+ }
152
+ }
153
+
154
+ // Out of values
155
+ int16_t def_level = -1;
156
+ int16_t rep_level = -1;
157
+ NextLevels(&def_level, &rep_level);
158
+ *is_null = def_level < descr()->max_definition_level();
159
+
160
+ if (*is_null) {
161
+ return true;
162
+ }
163
+
164
+ if (value_offset_ == values_buffered_) {
165
+ throw ParquetException("Value was non-null, but has not been buffered");
166
+ }
167
+ *val = values_[value_offset_++];
168
+ return true;
169
+ }
170
+
171
+ virtual void PrintNext(std::ostream& out, int width, bool with_levels = false) {
172
+ T val{};
173
+ int16_t def_level = -1;
174
+ int16_t rep_level = -1;
175
+ bool is_null = false;
176
+ char buffer[80];
177
+
178
+ if (!Next(&val, &def_level, &rep_level, &is_null)) {
179
+ throw ParquetException("No more values buffered");
180
+ }
181
+
182
+ if (with_levels) {
183
+ out << " D:" << def_level << " R:" << rep_level << " ";
184
+ if (!is_null) {
185
+ out << "V:";
186
+ }
187
+ }
188
+
189
+ if (is_null) {
190
+ std::string null_fmt = format_fwf<ByteArrayType>(width);
191
+ snprintf(buffer, sizeof(buffer), null_fmt.c_str(), "NULL");
192
+ } else {
193
+ FormatValue(&val, buffer, sizeof(buffer), width);
194
+ }
195
+ out << buffer;
196
+ }
197
+
198
+ private:
199
+ // The ownership of this object is expressed through the reader_ variable in the base
200
+ TypedColumnReader<DType>* typed_reader_;
201
+
202
+ inline void FormatValue(void* val, char* buffer, int bufsize, int width);
203
+
204
+ T* values_;
205
+ };
206
+
207
+ template <typename DType>
208
+ inline void TypedScanner<DType>::FormatValue(void* val, char* buffer, int bufsize,
209
+ int width) {
210
+ std::string fmt = format_fwf<DType>(width);
211
+ snprintf(buffer, bufsize, fmt.c_str(), *reinterpret_cast<T*>(val));
212
+ }
213
+
214
+ template <>
215
+ inline void TypedScanner<Int96Type>::FormatValue(void* val, char* buffer, int bufsize,
216
+ int width) {
217
+ std::string fmt = format_fwf<Int96Type>(width);
218
+ std::string result = Int96ToString(*reinterpret_cast<Int96*>(val));
219
+ snprintf(buffer, bufsize, fmt.c_str(), result.c_str());
220
+ }
221
+
222
+ template <>
223
+ inline void TypedScanner<ByteArrayType>::FormatValue(void* val, char* buffer, int bufsize,
224
+ int width) {
225
+ std::string fmt = format_fwf<ByteArrayType>(width);
226
+ std::string result = ByteArrayToString(*reinterpret_cast<ByteArray*>(val));
227
+ snprintf(buffer, bufsize, fmt.c_str(), result.c_str());
228
+ }
229
+
230
+ template <>
231
+ inline void TypedScanner<FLBAType>::FormatValue(void* val, char* buffer, int bufsize,
232
+ int width) {
233
+ std::string fmt = format_fwf<FLBAType>(width);
234
+ std::string result = FixedLenByteArrayToString(
235
+ *reinterpret_cast<FixedLenByteArray*>(val), descr()->type_length());
236
+ snprintf(buffer, bufsize, fmt.c_str(), result.c_str());
237
+ }
238
+
239
+ typedef TypedScanner<BooleanType> BoolScanner;
240
+ typedef TypedScanner<Int32Type> Int32Scanner;
241
+ typedef TypedScanner<Int64Type> Int64Scanner;
242
+ typedef TypedScanner<Int96Type> Int96Scanner;
243
+ typedef TypedScanner<FloatType> FloatScanner;
244
+ typedef TypedScanner<DoubleType> DoubleScanner;
245
+ typedef TypedScanner<ByteArrayType> ByteArrayScanner;
246
+ typedef TypedScanner<FLBAType> FixedLenByteArrayScanner;
247
+
248
+ template <typename RType>
249
+ int64_t ScanAll(int32_t batch_size, int16_t* def_levels, int16_t* rep_levels,
250
+ uint8_t* values, int64_t* values_buffered,
251
+ parquet::ColumnReader* reader) {
252
+ typedef typename RType::T Type;
253
+ auto typed_reader = static_cast<RType*>(reader);
254
+ auto vals = reinterpret_cast<Type*>(&values[0]);
255
+ return typed_reader->ReadBatch(batch_size, def_levels, rep_levels, vals,
256
+ values_buffered);
257
+ }
258
+
259
+ int64_t PARQUET_EXPORT ScanAllValues(int32_t batch_size, int16_t* def_levels,
260
+ int16_t* rep_levels, uint8_t* values,
261
+ int64_t* values_buffered,
262
+ parquet::ColumnReader* reader);
263
+
264
+ } // namespace parquet
venv/lib/python3.10/site-packages/pyarrow/include/parquet/column_writer.h ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <cstring>
22
+ #include <memory>
23
+
24
+ #include "arrow/util/compression.h"
25
+ #include "parquet/exception.h"
26
+ #include "parquet/platform.h"
27
+ #include "parquet/types.h"
28
+
29
+ namespace arrow {
30
+
31
+ class Array;
32
+
33
+ namespace bit_util {
34
+ class BitWriter;
35
+ } // namespace bit_util
36
+
37
+ namespace util {
38
+ class RleEncoder;
39
+ class CodecOptions;
40
+ } // namespace util
41
+
42
+ } // namespace arrow
43
+
44
+ namespace parquet {
45
+
46
+ struct ArrowWriteContext;
47
+ class ColumnChunkMetaDataBuilder;
48
+ class ColumnDescriptor;
49
+ class ColumnIndexBuilder;
50
+ class DataPage;
51
+ class DictionaryPage;
52
+ class Encryptor;
53
+ class OffsetIndexBuilder;
54
+ class WriterProperties;
55
+
56
+ class PARQUET_EXPORT LevelEncoder {
57
+ public:
58
+ LevelEncoder();
59
+ ~LevelEncoder();
60
+
61
+ static int MaxBufferSize(Encoding::type encoding, int16_t max_level,
62
+ int num_buffered_values);
63
+
64
+ // Initialize the LevelEncoder.
65
+ void Init(Encoding::type encoding, int16_t max_level, int num_buffered_values,
66
+ uint8_t* data, int data_size);
67
+
68
+ // Encodes a batch of levels from an array and returns the number of levels encoded
69
+ int Encode(int batch_size, const int16_t* levels);
70
+
71
+ int32_t len() {
72
+ if (encoding_ != Encoding::RLE) {
73
+ throw ParquetException("Only implemented for RLE encoding");
74
+ }
75
+ return rle_length_;
76
+ }
77
+
78
+ private:
79
+ int bit_width_;
80
+ int rle_length_;
81
+ Encoding::type encoding_;
82
+ std::unique_ptr<::arrow::util::RleEncoder> rle_encoder_;
83
+ std::unique_ptr<::arrow::bit_util::BitWriter> bit_packed_encoder_;
84
+ };
85
+
86
+ class PARQUET_EXPORT PageWriter {
87
+ public:
88
+ virtual ~PageWriter() {}
89
+
90
+ static std::unique_ptr<PageWriter> Open(
91
+ std::shared_ptr<ArrowOutputStream> sink, Compression::type codec,
92
+ ColumnChunkMetaDataBuilder* metadata, int16_t row_group_ordinal = -1,
93
+ int16_t column_chunk_ordinal = -1,
94
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool(),
95
+ bool buffered_row_group = false,
96
+ std::shared_ptr<Encryptor> header_encryptor = NULLPTR,
97
+ std::shared_ptr<Encryptor> data_encryptor = NULLPTR,
98
+ bool page_write_checksum_enabled = false,
99
+ // column_index_builder MUST outlive the PageWriter
100
+ ColumnIndexBuilder* column_index_builder = NULLPTR,
101
+ // offset_index_builder MUST outlive the PageWriter
102
+ OffsetIndexBuilder* offset_index_builder = NULLPTR,
103
+ const CodecOptions& codec_options = CodecOptions{});
104
+
105
+ ARROW_DEPRECATED("Deprecated in 13.0.0. Use CodecOptions-taking overload instead.")
106
+ static std::unique_ptr<PageWriter> Open(
107
+ std::shared_ptr<ArrowOutputStream> sink, Compression::type codec,
108
+ int compression_level, ColumnChunkMetaDataBuilder* metadata,
109
+ int16_t row_group_ordinal = -1, int16_t column_chunk_ordinal = -1,
110
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool(),
111
+ bool buffered_row_group = false,
112
+ std::shared_ptr<Encryptor> header_encryptor = NULLPTR,
113
+ std::shared_ptr<Encryptor> data_encryptor = NULLPTR,
114
+ bool page_write_checksum_enabled = false,
115
+ // column_index_builder MUST outlive the PageWriter
116
+ ColumnIndexBuilder* column_index_builder = NULLPTR,
117
+ // offset_index_builder MUST outlive the PageWriter
118
+ OffsetIndexBuilder* offset_index_builder = NULLPTR);
119
+
120
+ // The Column Writer decides if dictionary encoding is used if set and
121
+ // if the dictionary encoding has fallen back to default encoding on reaching dictionary
122
+ // page limit
123
+ virtual void Close(bool has_dictionary, bool fallback) = 0;
124
+
125
+ // Return the number of uncompressed bytes written (including header size)
126
+ virtual int64_t WriteDataPage(const DataPage& page) = 0;
127
+
128
+ // Return the number of uncompressed bytes written (including header size)
129
+ virtual int64_t WriteDictionaryPage(const DictionaryPage& page) = 0;
130
+
131
+ /// \brief The total number of bytes written as serialized data and
132
+ /// dictionary pages to the sink so far.
133
+ virtual int64_t total_compressed_bytes_written() const = 0;
134
+
135
+ virtual bool has_compressor() = 0;
136
+
137
+ virtual void Compress(const Buffer& src_buffer, ResizableBuffer* dest_buffer) = 0;
138
+ };
139
+
140
+ class PARQUET_EXPORT ColumnWriter {
141
+ public:
142
+ virtual ~ColumnWriter() = default;
143
+
144
+ static std::shared_ptr<ColumnWriter> Make(ColumnChunkMetaDataBuilder*,
145
+ std::unique_ptr<PageWriter>,
146
+ const WriterProperties* properties);
147
+
148
+ /// \brief Closes the ColumnWriter, commits any buffered values to pages.
149
+ /// \return Total size of the column in bytes
150
+ virtual int64_t Close() = 0;
151
+
152
+ /// \brief The physical Parquet type of the column
153
+ virtual Type::type type() const = 0;
154
+
155
+ /// \brief The schema for the column
156
+ virtual const ColumnDescriptor* descr() const = 0;
157
+
158
+ /// \brief The number of rows written so far
159
+ virtual int64_t rows_written() const = 0;
160
+
161
+ /// \brief The total size of the compressed pages + page headers. Values
162
+ /// are still buffered and not written to a pager yet
163
+ ///
164
+ /// So in un-buffered mode, it always returns 0
165
+ virtual int64_t total_compressed_bytes() const = 0;
166
+
167
+ /// \brief The total number of bytes written as serialized data and
168
+ /// dictionary pages to the ColumnChunk so far
169
+ /// These bytes are uncompressed bytes.
170
+ virtual int64_t total_bytes_written() const = 0;
171
+
172
+ /// \brief The total number of bytes written as serialized data and
173
+ /// dictionary pages to the ColumnChunk so far.
174
+ /// If the column is uncompressed, the value would be equal to
175
+ /// total_bytes_written().
176
+ virtual int64_t total_compressed_bytes_written() const = 0;
177
+
178
+ /// \brief Estimated size of the values that are not written to a page yet.
179
+ virtual int64_t estimated_buffered_value_bytes() const = 0;
180
+
181
+ /// \brief The file-level writer properties
182
+ virtual const WriterProperties* properties() = 0;
183
+
184
+ /// \brief Write Apache Arrow columnar data directly to ColumnWriter. Returns
185
+ /// error status if the array data type is not compatible with the concrete
186
+ /// writer type.
187
+ ///
188
+ /// leaf_array is always a primitive (possibly dictionary encoded type).
189
+ /// Leaf_field_nullable indicates whether the leaf array is considered nullable
190
+ /// according to its schema in a Table or its parent array.
191
+ virtual ::arrow::Status WriteArrow(const int16_t* def_levels, const int16_t* rep_levels,
192
+ int64_t num_levels, const ::arrow::Array& leaf_array,
193
+ ArrowWriteContext* ctx,
194
+ bool leaf_field_nullable) = 0;
195
+ };
196
+
197
+ // API to write values to a single column. This is the main client facing API.
198
+ template <typename DType>
199
+ class TypedColumnWriter : public ColumnWriter {
200
+ public:
201
+ using T = typename DType::c_type;
202
+
203
+ // Write a batch of repetition levels, definition levels, and values to the
204
+ // column.
205
+ // `num_values` is the number of logical leaf values.
206
+ // `def_levels` (resp. `rep_levels`) can be null if the column's max definition level
207
+ // (resp. max repetition level) is 0.
208
+ // If not null, each of `def_levels` and `rep_levels` must have at least
209
+ // `num_values`.
210
+ //
211
+ // The number of physical values written (taken from `values`) is returned.
212
+ // It can be smaller than `num_values` is there are some undefined values.
213
+ virtual int64_t WriteBatch(int64_t num_values, const int16_t* def_levels,
214
+ const int16_t* rep_levels, const T* values) = 0;
215
+
216
+ /// Write a batch of repetition levels, definition levels, and values to the
217
+ /// column.
218
+ ///
219
+ /// In comparison to WriteBatch the length of repetition and definition levels
220
+ /// is the same as of the number of values read for max_definition_level == 1.
221
+ /// In the case of max_definition_level > 1, the repetition and definition
222
+ /// levels are larger than the values but the values include the null entries
223
+ /// with definition_level == (max_definition_level - 1). Thus we have to differentiate
224
+ /// in the parameters of this function if the input has the length of num_values or the
225
+ /// _number of rows in the lowest nesting level_.
226
+ ///
227
+ /// In the case that the most inner node in the Parquet is required, the _number of rows
228
+ /// in the lowest nesting level_ is equal to the number of non-null values. If the
229
+ /// inner-most schema node is optional, the _number of rows in the lowest nesting level_
230
+ /// also includes all values with definition_level == (max_definition_level - 1).
231
+ ///
232
+ /// @param num_values number of levels to write.
233
+ /// @param def_levels The Parquet definition levels, length is num_values
234
+ /// @param rep_levels The Parquet repetition levels, length is num_values
235
+ /// @param valid_bits Bitmap that indicates if the row is null on the lowest nesting
236
+ /// level. The length is number of rows in the lowest nesting level.
237
+ /// @param valid_bits_offset The offset in bits of the valid_bits where the
238
+ /// first relevant bit resides.
239
+ /// @param values The values in the lowest nested level including
240
+ /// spacing for nulls on the lowest levels; input has the length
241
+ /// of the number of rows on the lowest nesting level.
242
+ virtual void WriteBatchSpaced(int64_t num_values, const int16_t* def_levels,
243
+ const int16_t* rep_levels, const uint8_t* valid_bits,
244
+ int64_t valid_bits_offset, const T* values) = 0;
245
+ };
246
+
247
+ using BoolWriter = TypedColumnWriter<BooleanType>;
248
+ using Int32Writer = TypedColumnWriter<Int32Type>;
249
+ using Int64Writer = TypedColumnWriter<Int64Type>;
250
+ using Int96Writer = TypedColumnWriter<Int96Type>;
251
+ using FloatWriter = TypedColumnWriter<FloatType>;
252
+ using DoubleWriter = TypedColumnWriter<DoubleType>;
253
+ using ByteArrayWriter = TypedColumnWriter<ByteArrayType>;
254
+ using FixedLenByteArrayWriter = TypedColumnWriter<FLBAType>;
255
+
256
+ namespace internal {
257
+
258
+ /**
259
+ * Timestamp conversion constants
260
+ */
261
+ constexpr int64_t kJulianEpochOffsetDays = INT64_C(2440588);
262
+
263
+ template <int64_t UnitPerDay, int64_t NanosecondsPerUnit>
264
+ inline void ArrowTimestampToImpalaTimestamp(const int64_t time, Int96* impala_timestamp) {
265
+ int64_t julian_days = (time / UnitPerDay) + kJulianEpochOffsetDays;
266
+ (*impala_timestamp).value[2] = (uint32_t)julian_days;
267
+
268
+ int64_t last_day_units = time % UnitPerDay;
269
+ auto last_day_nanos = last_day_units * NanosecondsPerUnit;
270
+ // impala_timestamp will be unaligned every other entry so do memcpy instead
271
+ // of assign and reinterpret cast to avoid undefined behavior.
272
+ std::memcpy(impala_timestamp, &last_day_nanos, sizeof(int64_t));
273
+ }
274
+
275
+ constexpr int64_t kSecondsInNanos = INT64_C(1000000000);
276
+
277
+ inline void SecondsToImpalaTimestamp(const int64_t seconds, Int96* impala_timestamp) {
278
+ ArrowTimestampToImpalaTimestamp<kSecondsPerDay, kSecondsInNanos>(seconds,
279
+ impala_timestamp);
280
+ }
281
+
282
+ constexpr int64_t kMillisecondsInNanos = kSecondsInNanos / INT64_C(1000);
283
+
284
+ inline void MillisecondsToImpalaTimestamp(const int64_t milliseconds,
285
+ Int96* impala_timestamp) {
286
+ ArrowTimestampToImpalaTimestamp<kMillisecondsPerDay, kMillisecondsInNanos>(
287
+ milliseconds, impala_timestamp);
288
+ }
289
+
290
+ constexpr int64_t kMicrosecondsInNanos = kMillisecondsInNanos / INT64_C(1000);
291
+
292
+ inline void MicrosecondsToImpalaTimestamp(const int64_t microseconds,
293
+ Int96* impala_timestamp) {
294
+ ArrowTimestampToImpalaTimestamp<kMicrosecondsPerDay, kMicrosecondsInNanos>(
295
+ microseconds, impala_timestamp);
296
+ }
297
+
298
+ constexpr int64_t kNanosecondsInNanos = INT64_C(1);
299
+
300
+ inline void NanosecondsToImpalaTimestamp(const int64_t nanoseconds,
301
+ Int96* impala_timestamp) {
302
+ ArrowTimestampToImpalaTimestamp<kNanosecondsPerDay, kNanosecondsInNanos>(
303
+ nanoseconds, impala_timestamp);
304
+ }
305
+
306
+ } // namespace internal
307
+ } // namespace parquet
venv/lib/python3.10/site-packages/pyarrow/include/parquet/encoding.h ADDED
@@ -0,0 +1,471 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <cstring>
22
+ #include <memory>
23
+ #include <vector>
24
+
25
+ #include "arrow/util/spaced.h"
26
+
27
+ #include "parquet/exception.h"
28
+ #include "parquet/platform.h"
29
+ #include "parquet/types.h"
30
+
31
+ namespace arrow {
32
+
33
+ class Array;
34
+ class ArrayBuilder;
35
+ class BinaryArray;
36
+ class BinaryBuilder;
37
+ class BooleanBuilder;
38
+ class Int32Type;
39
+ class Int64Type;
40
+ class FloatType;
41
+ class DoubleType;
42
+ class FixedSizeBinaryType;
43
+ template <typename T>
44
+ class NumericBuilder;
45
+ class FixedSizeBinaryBuilder;
46
+ template <typename T>
47
+ class Dictionary32Builder;
48
+
49
+ } // namespace arrow
50
+
51
+ namespace parquet {
52
+
53
+ template <typename DType>
54
+ class TypedEncoder;
55
+
56
+ using BooleanEncoder = TypedEncoder<BooleanType>;
57
+ using Int32Encoder = TypedEncoder<Int32Type>;
58
+ using Int64Encoder = TypedEncoder<Int64Type>;
59
+ using Int96Encoder = TypedEncoder<Int96Type>;
60
+ using FloatEncoder = TypedEncoder<FloatType>;
61
+ using DoubleEncoder = TypedEncoder<DoubleType>;
62
+ using ByteArrayEncoder = TypedEncoder<ByteArrayType>;
63
+ using FLBAEncoder = TypedEncoder<FLBAType>;
64
+
65
+ template <typename DType>
66
+ class TypedDecoder;
67
+
68
+ class BooleanDecoder;
69
+ using Int32Decoder = TypedDecoder<Int32Type>;
70
+ using Int64Decoder = TypedDecoder<Int64Type>;
71
+ using Int96Decoder = TypedDecoder<Int96Type>;
72
+ using FloatDecoder = TypedDecoder<FloatType>;
73
+ using DoubleDecoder = TypedDecoder<DoubleType>;
74
+ using ByteArrayDecoder = TypedDecoder<ByteArrayType>;
75
+ class FLBADecoder;
76
+
77
+ template <typename T>
78
+ struct EncodingTraits;
79
+
80
+ template <>
81
+ struct EncodingTraits<BooleanType> {
82
+ using Encoder = BooleanEncoder;
83
+ using Decoder = BooleanDecoder;
84
+
85
+ using ArrowType = ::arrow::BooleanType;
86
+ using Accumulator = ::arrow::BooleanBuilder;
87
+ struct DictAccumulator {};
88
+ };
89
+
90
+ template <>
91
+ struct EncodingTraits<Int32Type> {
92
+ using Encoder = Int32Encoder;
93
+ using Decoder = Int32Decoder;
94
+
95
+ using ArrowType = ::arrow::Int32Type;
96
+ using Accumulator = ::arrow::NumericBuilder<::arrow::Int32Type>;
97
+ using DictAccumulator = ::arrow::Dictionary32Builder<::arrow::Int32Type>;
98
+ };
99
+
100
+ template <>
101
+ struct EncodingTraits<Int64Type> {
102
+ using Encoder = Int64Encoder;
103
+ using Decoder = Int64Decoder;
104
+
105
+ using ArrowType = ::arrow::Int64Type;
106
+ using Accumulator = ::arrow::NumericBuilder<::arrow::Int64Type>;
107
+ using DictAccumulator = ::arrow::Dictionary32Builder<::arrow::Int64Type>;
108
+ };
109
+
110
+ template <>
111
+ struct EncodingTraits<Int96Type> {
112
+ using Encoder = Int96Encoder;
113
+ using Decoder = Int96Decoder;
114
+
115
+ struct Accumulator {};
116
+ struct DictAccumulator {};
117
+ };
118
+
119
+ template <>
120
+ struct EncodingTraits<FloatType> {
121
+ using Encoder = FloatEncoder;
122
+ using Decoder = FloatDecoder;
123
+
124
+ using ArrowType = ::arrow::FloatType;
125
+ using Accumulator = ::arrow::NumericBuilder<::arrow::FloatType>;
126
+ using DictAccumulator = ::arrow::Dictionary32Builder<::arrow::FloatType>;
127
+ };
128
+
129
+ template <>
130
+ struct EncodingTraits<DoubleType> {
131
+ using Encoder = DoubleEncoder;
132
+ using Decoder = DoubleDecoder;
133
+
134
+ using ArrowType = ::arrow::DoubleType;
135
+ using Accumulator = ::arrow::NumericBuilder<::arrow::DoubleType>;
136
+ using DictAccumulator = ::arrow::Dictionary32Builder<::arrow::DoubleType>;
137
+ };
138
+
139
+ template <>
140
+ struct EncodingTraits<ByteArrayType> {
141
+ using Encoder = ByteArrayEncoder;
142
+ using Decoder = ByteArrayDecoder;
143
+
144
+ using ArrowType = ::arrow::BinaryType;
145
+ /// \brief Internal helper class for decoding BYTE_ARRAY data where we can
146
+ /// overflow the capacity of a single arrow::BinaryArray
147
+ struct Accumulator {
148
+ std::unique_ptr<::arrow::BinaryBuilder> builder;
149
+ std::vector<std::shared_ptr<::arrow::Array>> chunks;
150
+ };
151
+ using DictAccumulator = ::arrow::Dictionary32Builder<::arrow::BinaryType>;
152
+ };
153
+
154
+ template <>
155
+ struct EncodingTraits<FLBAType> {
156
+ using Encoder = FLBAEncoder;
157
+ using Decoder = FLBADecoder;
158
+
159
+ using ArrowType = ::arrow::FixedSizeBinaryType;
160
+ using Accumulator = ::arrow::FixedSizeBinaryBuilder;
161
+ using DictAccumulator = ::arrow::Dictionary32Builder<::arrow::FixedSizeBinaryType>;
162
+ };
163
+
164
+ class ColumnDescriptor;
165
+
166
+ // Untyped base for all encoders
167
+ class Encoder {
168
+ public:
169
+ virtual ~Encoder() = default;
170
+
171
+ virtual int64_t EstimatedDataEncodedSize() = 0;
172
+ virtual std::shared_ptr<Buffer> FlushValues() = 0;
173
+ virtual Encoding::type encoding() const = 0;
174
+
175
+ virtual void Put(const ::arrow::Array& values) = 0;
176
+
177
+ virtual MemoryPool* memory_pool() const = 0;
178
+ };
179
+
180
+ // Base class for value encoders. Since encoders may or not have state (e.g.,
181
+ // dictionary encoding) we use a class instance to maintain any state.
182
+ //
183
+ // Encode interfaces are internal, subject to change without deprecation.
184
+ template <typename DType>
185
+ class TypedEncoder : virtual public Encoder {
186
+ public:
187
+ typedef typename DType::c_type T;
188
+
189
+ using Encoder::Put;
190
+
191
+ virtual void Put(const T* src, int num_values) = 0;
192
+
193
+ virtual void Put(const std::vector<T>& src, int num_values = -1);
194
+
195
+ virtual void PutSpaced(const T* src, int num_values, const uint8_t* valid_bits,
196
+ int64_t valid_bits_offset) = 0;
197
+ };
198
+
199
+ template <typename DType>
200
+ void TypedEncoder<DType>::Put(const std::vector<T>& src, int num_values) {
201
+ if (num_values == -1) {
202
+ num_values = static_cast<int>(src.size());
203
+ }
204
+ Put(src.data(), num_values);
205
+ }
206
+
207
+ template <>
208
+ inline void TypedEncoder<BooleanType>::Put(const std::vector<bool>& src, int num_values) {
209
+ // NOTE(wesm): This stub is here only to satisfy the compiler; it is
210
+ // overridden later with the actual implementation
211
+ }
212
+
213
+ // Base class for dictionary encoders
214
+ template <typename DType>
215
+ class DictEncoder : virtual public TypedEncoder<DType> {
216
+ public:
217
+ /// Writes out any buffered indices to buffer preceded by the bit width of this data.
218
+ /// Returns the number of bytes written.
219
+ /// If the supplied buffer is not big enough, returns -1.
220
+ /// buffer must be preallocated with buffer_len bytes. Use EstimatedDataEncodedSize()
221
+ /// to size buffer.
222
+ virtual int WriteIndices(uint8_t* buffer, int buffer_len) = 0;
223
+
224
+ virtual int dict_encoded_size() const = 0;
225
+
226
+ virtual int bit_width() const = 0;
227
+
228
+ /// Writes out the encoded dictionary to buffer. buffer must be preallocated to
229
+ /// dict_encoded_size() bytes.
230
+ virtual void WriteDict(uint8_t* buffer) const = 0;
231
+
232
+ virtual int num_entries() const = 0;
233
+
234
+ /// \brief EXPERIMENTAL: Append dictionary indices into the encoder. It is
235
+ /// assumed (without any boundschecking) that the indices reference
236
+ /// preexisting dictionary values
237
+ /// \param[in] indices the dictionary index values. Only Int32Array currently
238
+ /// supported
239
+ virtual void PutIndices(const ::arrow::Array& indices) = 0;
240
+
241
+ /// \brief EXPERIMENTAL: Append dictionary into encoder, inserting indices
242
+ /// separately. Currently throws exception if the current dictionary memo is
243
+ /// non-empty
244
+ /// \param[in] values the dictionary values. Only valid for certain
245
+ /// Parquet/Arrow type combinations, like BYTE_ARRAY/BinaryArray
246
+ virtual void PutDictionary(const ::arrow::Array& values) = 0;
247
+ };
248
+
249
+ // ----------------------------------------------------------------------
250
+ // Value decoding
251
+
252
+ class Decoder {
253
+ public:
254
+ virtual ~Decoder() = default;
255
+
256
+ // Sets the data for a new page. This will be called multiple times on the same
257
+ // decoder and should reset all internal state.
258
+ virtual void SetData(int num_values, const uint8_t* data, int len) = 0;
259
+
260
+ // Returns the number of values left (for the last call to SetData()). This is
261
+ // the number of values left in this page.
262
+ virtual int values_left() const = 0;
263
+ virtual Encoding::type encoding() const = 0;
264
+ };
265
+
266
+ template <typename DType>
267
+ class TypedDecoder : virtual public Decoder {
268
+ public:
269
+ using T = typename DType::c_type;
270
+
271
+ /// \brief Decode values into a buffer
272
+ ///
273
+ /// Subclasses may override the more specialized Decode methods below.
274
+ ///
275
+ /// \param[in] buffer destination for decoded values
276
+ /// \param[in] max_values maximum number of values to decode
277
+ /// \return The number of values decoded. Should be identical to max_values except
278
+ /// at the end of the current data page.
279
+ virtual int Decode(T* buffer, int max_values) = 0;
280
+
281
+ /// \brief Decode the values in this data page but leave spaces for null entries.
282
+ ///
283
+ /// \param[in] buffer destination for decoded values
284
+ /// \param[in] num_values size of the def_levels and buffer arrays including the number
285
+ /// of null slots
286
+ /// \param[in] null_count number of null slots
287
+ /// \param[in] valid_bits bitmap data indicating position of valid slots
288
+ /// \param[in] valid_bits_offset offset into valid_bits
289
+ /// \return The number of values decoded, including nulls.
290
+ virtual int DecodeSpaced(T* buffer, int num_values, int null_count,
291
+ const uint8_t* valid_bits, int64_t valid_bits_offset) {
292
+ if (null_count > 0) {
293
+ int values_to_read = num_values - null_count;
294
+ int values_read = Decode(buffer, values_to_read);
295
+ if (values_read != values_to_read) {
296
+ throw ParquetException("Number of values / definition_levels read did not match");
297
+ }
298
+
299
+ return ::arrow::util::internal::SpacedExpand<T>(buffer, num_values, null_count,
300
+ valid_bits, valid_bits_offset);
301
+ } else {
302
+ return Decode(buffer, num_values);
303
+ }
304
+ }
305
+
306
+ /// \brief Decode into an ArrayBuilder or other accumulator
307
+ ///
308
+ /// This function assumes the definition levels were already decoded
309
+ /// as a validity bitmap in the given `valid_bits`. `null_count`
310
+ /// is the number of 0s in `valid_bits`.
311
+ /// As a space optimization, it is allowed for `valid_bits` to be null
312
+ /// if `null_count` is zero.
313
+ ///
314
+ /// \return number of values decoded
315
+ virtual int DecodeArrow(int num_values, int null_count, const uint8_t* valid_bits,
316
+ int64_t valid_bits_offset,
317
+ typename EncodingTraits<DType>::Accumulator* out) = 0;
318
+
319
+ /// \brief Decode into an ArrayBuilder or other accumulator ignoring nulls
320
+ ///
321
+ /// \return number of values decoded
322
+ int DecodeArrowNonNull(int num_values,
323
+ typename EncodingTraits<DType>::Accumulator* out) {
324
+ return DecodeArrow(num_values, 0, /*valid_bits=*/NULLPTR, 0, out);
325
+ }
326
+
327
+ /// \brief Decode into a DictionaryBuilder
328
+ ///
329
+ /// This function assumes the definition levels were already decoded
330
+ /// as a validity bitmap in the given `valid_bits`. `null_count`
331
+ /// is the number of 0s in `valid_bits`.
332
+ /// As a space optimization, it is allowed for `valid_bits` to be null
333
+ /// if `null_count` is zero.
334
+ ///
335
+ /// \return number of values decoded
336
+ virtual int DecodeArrow(int num_values, int null_count, const uint8_t* valid_bits,
337
+ int64_t valid_bits_offset,
338
+ typename EncodingTraits<DType>::DictAccumulator* builder) = 0;
339
+
340
+ /// \brief Decode into a DictionaryBuilder ignoring nulls
341
+ ///
342
+ /// \return number of values decoded
343
+ int DecodeArrowNonNull(int num_values,
344
+ typename EncodingTraits<DType>::DictAccumulator* builder) {
345
+ return DecodeArrow(num_values, 0, /*valid_bits=*/NULLPTR, 0, builder);
346
+ }
347
+ };
348
+
349
+ template <typename DType>
350
+ class DictDecoder : virtual public TypedDecoder<DType> {
351
+ public:
352
+ using T = typename DType::c_type;
353
+
354
+ virtual void SetDict(TypedDecoder<DType>* dictionary) = 0;
355
+
356
+ /// \brief Insert dictionary values into the Arrow dictionary builder's memo,
357
+ /// but do not append any indices
358
+ virtual void InsertDictionary(::arrow::ArrayBuilder* builder) = 0;
359
+
360
+ /// \brief Decode only dictionary indices and append to dictionary
361
+ /// builder. The builder must have had the dictionary from this decoder
362
+ /// inserted already.
363
+ ///
364
+ /// \warning Remember to reset the builder each time the dict decoder is initialized
365
+ /// with a new dictionary page
366
+ virtual int DecodeIndicesSpaced(int num_values, int null_count,
367
+ const uint8_t* valid_bits, int64_t valid_bits_offset,
368
+ ::arrow::ArrayBuilder* builder) = 0;
369
+
370
+ /// \brief Decode only dictionary indices (no nulls)
371
+ ///
372
+ /// \warning Remember to reset the builder each time the dict decoder is initialized
373
+ /// with a new dictionary page
374
+ virtual int DecodeIndices(int num_values, ::arrow::ArrayBuilder* builder) = 0;
375
+
376
+ /// \brief Decode only dictionary indices (no nulls). Same as above
377
+ /// DecodeIndices but target is an array instead of a builder.
378
+ ///
379
+ /// \note API EXPERIMENTAL
380
+ virtual int DecodeIndices(int num_values, int32_t* indices) = 0;
381
+
382
+ /// \brief Get dictionary. The reader will call this API when it encounters a
383
+ /// new dictionary.
384
+ ///
385
+ /// @param[out] dictionary The pointer to dictionary values. Dictionary is owned by
386
+ /// the decoder and is destroyed when the decoder is destroyed.
387
+ /// @param[out] dictionary_length The dictionary length.
388
+ ///
389
+ /// \note API EXPERIMENTAL
390
+ virtual void GetDictionary(const T** dictionary, int32_t* dictionary_length) = 0;
391
+ };
392
+
393
+ // ----------------------------------------------------------------------
394
+ // TypedEncoder specializations, traits, and factory functions
395
+
396
+ class BooleanDecoder : virtual public TypedDecoder<BooleanType> {
397
+ public:
398
+ using TypedDecoder<BooleanType>::Decode;
399
+
400
+ /// \brief Decode and bit-pack values into a buffer
401
+ ///
402
+ /// \param[in] buffer destination for decoded values
403
+ /// This buffer will contain bit-packed values. If
404
+ /// max_values is not a multiple of 8, the trailing bits
405
+ /// of the last byte will be undefined.
406
+ /// \param[in] max_values max values to decode.
407
+ /// \return The number of values decoded. Should be identical to max_values except
408
+ /// at the end of the current data page.
409
+ virtual int Decode(uint8_t* buffer, int max_values) = 0;
410
+ };
411
+
412
+ class FLBADecoder : virtual public TypedDecoder<FLBAType> {
413
+ public:
414
+ using TypedDecoder<FLBAType>::DecodeSpaced;
415
+
416
+ // TODO(wesm): As possible follow-up to PARQUET-1508, we should examine if
417
+ // there is value in adding specialized read methods for
418
+ // FIXED_LEN_BYTE_ARRAY. If only Decimal data can occur with this data type
419
+ // then perhaps not
420
+ };
421
+
422
+ PARQUET_EXPORT
423
+ std::unique_ptr<Encoder> MakeEncoder(
424
+ Type::type type_num, Encoding::type encoding, bool use_dictionary = false,
425
+ const ColumnDescriptor* descr = NULLPTR,
426
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool());
427
+
428
+ template <typename DType>
429
+ std::unique_ptr<typename EncodingTraits<DType>::Encoder> MakeTypedEncoder(
430
+ Encoding::type encoding, bool use_dictionary = false,
431
+ const ColumnDescriptor* descr = NULLPTR,
432
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()) {
433
+ using OutType = typename EncodingTraits<DType>::Encoder;
434
+ std::unique_ptr<Encoder> base =
435
+ MakeEncoder(DType::type_num, encoding, use_dictionary, descr, pool);
436
+ return std::unique_ptr<OutType>(dynamic_cast<OutType*>(base.release()));
437
+ }
438
+
439
+ PARQUET_EXPORT
440
+ std::unique_ptr<Decoder> MakeDecoder(
441
+ Type::type type_num, Encoding::type encoding, const ColumnDescriptor* descr = NULLPTR,
442
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool());
443
+
444
+ namespace detail {
445
+
446
+ PARQUET_EXPORT
447
+ std::unique_ptr<Decoder> MakeDictDecoder(Type::type type_num,
448
+ const ColumnDescriptor* descr,
449
+ ::arrow::MemoryPool* pool);
450
+
451
+ } // namespace detail
452
+
453
+ template <typename DType>
454
+ std::unique_ptr<DictDecoder<DType>> MakeDictDecoder(
455
+ const ColumnDescriptor* descr = NULLPTR,
456
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()) {
457
+ using OutType = DictDecoder<DType>;
458
+ auto decoder = detail::MakeDictDecoder(DType::type_num, descr, pool);
459
+ return std::unique_ptr<OutType>(dynamic_cast<OutType*>(decoder.release()));
460
+ }
461
+
462
+ template <typename DType>
463
+ std::unique_ptr<typename EncodingTraits<DType>::Decoder> MakeTypedDecoder(
464
+ Encoding::type encoding, const ColumnDescriptor* descr = NULLPTR,
465
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()) {
466
+ using OutType = typename EncodingTraits<DType>::Decoder;
467
+ std::unique_ptr<Decoder> base = MakeDecoder(DType::type_num, encoding, descr, pool);
468
+ return std::unique_ptr<OutType>(dynamic_cast<OutType*>(base.release()));
469
+ }
470
+
471
+ } // namespace parquet
venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/crypto_factory.h ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+
22
+ #include "parquet/encryption/encryption.h"
23
+ #include "parquet/encryption/file_key_wrapper.h"
24
+ #include "parquet/encryption/key_toolkit.h"
25
+ #include "parquet/encryption/kms_client_factory.h"
26
+ #include "parquet/platform.h"
27
+
28
+ namespace parquet::encryption {
29
+
30
+ static constexpr ParquetCipher::type kDefaultEncryptionAlgorithm =
31
+ ParquetCipher::AES_GCM_V1;
32
+ static constexpr bool kDefaultPlaintextFooter = false;
33
+ static constexpr bool kDefaultDoubleWrapping = true;
34
+ static constexpr double kDefaultCacheLifetimeSeconds = 600; // 10 minutes
35
+ static constexpr bool kDefaultInternalKeyMaterial = true;
36
+ static constexpr bool kDefaultUniformEncryption = false;
37
+ static constexpr int32_t kDefaultDataKeyLengthBits = 128;
38
+
39
+ struct PARQUET_EXPORT EncryptionConfiguration {
40
+ explicit EncryptionConfiguration(const std::string& footer_key)
41
+ : footer_key(footer_key) {}
42
+
43
+ /// ID of the master key for footer encryption/signing
44
+ std::string footer_key;
45
+
46
+ /// List of columns to encrypt, with master key IDs (see HIVE-21848).
47
+ /// Format: "masterKeyID:colName,colName;masterKeyID:colName..."
48
+ /// Either
49
+ /// (1) column_keys must be set
50
+ /// or
51
+ /// (2) uniform_encryption must be set to true
52
+ /// If none of (1) and (2) are true, or if both are true, an exception will be
53
+ /// thrown.
54
+ std::string column_keys;
55
+
56
+ /// Encrypt footer and all columns with the same encryption key.
57
+ bool uniform_encryption = kDefaultUniformEncryption;
58
+
59
+ /// Parquet encryption algorithm. Can be "AES_GCM_V1" (default), or "AES_GCM_CTR_V1".
60
+ ParquetCipher::type encryption_algorithm = kDefaultEncryptionAlgorithm;
61
+
62
+ /// Write files with plaintext footer.
63
+ /// The default is false - files are written with encrypted footer.
64
+ bool plaintext_footer = kDefaultPlaintextFooter;
65
+
66
+ /// Use double wrapping - where data encryption keys (DEKs) are encrypted with key
67
+ /// encryption keys (KEKs), which in turn are encrypted with master keys.
68
+ /// The default is true. If set to false, use single wrapping - where DEKs are
69
+ /// encrypted directly with master keys.
70
+ bool double_wrapping = kDefaultDoubleWrapping;
71
+
72
+ /// Lifetime of cached entities (key encryption keys, local wrapping keys, KMS client
73
+ /// objects).
74
+ /// The default is 600 (10 minutes).
75
+ double cache_lifetime_seconds = kDefaultCacheLifetimeSeconds;
76
+
77
+ /// Store key material inside Parquet file footers; this mode doesn’t produce
78
+ /// additional files. By default, true. If set to false, key material is stored in
79
+ /// separate files in the same folder, which enables key rotation for immutable
80
+ /// Parquet files.
81
+ bool internal_key_material = kDefaultInternalKeyMaterial;
82
+
83
+ /// Length of data encryption keys (DEKs), randomly generated by parquet key
84
+ /// management tools. Can be 128, 192 or 256 bits.
85
+ /// The default is 128 bits.
86
+ int32_t data_key_length_bits = kDefaultDataKeyLengthBits;
87
+ };
88
+
89
+ struct PARQUET_EXPORT DecryptionConfiguration {
90
+ /// Lifetime of cached entities (key encryption keys, local wrapping keys, KMS client
91
+ /// objects).
92
+ /// The default is 600 (10 minutes).
93
+ double cache_lifetime_seconds = kDefaultCacheLifetimeSeconds;
94
+ };
95
+
96
+ /// This is a core class, that translates the parameters of high level encryption (like
97
+ /// the names of encrypted columns, names of master keys, etc), into parameters of low
98
+ /// level encryption (like the key metadata, DEK, etc). A factory that produces the low
99
+ /// level FileEncryptionProperties and FileDecryptionProperties objects, from the high
100
+ /// level parameters.
101
+ class PARQUET_EXPORT CryptoFactory {
102
+ public:
103
+ /// a KmsClientFactory object must be registered via this method before calling any of
104
+ /// GetFileEncryptionProperties()/GetFileDecryptionProperties() methods.
105
+ void RegisterKmsClientFactory(std::shared_ptr<KmsClientFactory> kms_client_factory);
106
+
107
+ /// Get the encryption properties for a Parquet file.
108
+ /// If external key material is used then a file system and path to the
109
+ /// parquet file must be provided.
110
+ std::shared_ptr<FileEncryptionProperties> GetFileEncryptionProperties(
111
+ const KmsConnectionConfig& kms_connection_config,
112
+ const EncryptionConfiguration& encryption_config, const std::string& file_path = "",
113
+ const std::shared_ptr<::arrow::fs::FileSystem>& file_system = NULLPTR);
114
+
115
+ /// Get decryption properties for a Parquet file.
116
+ /// If external key material is used then a file system and path to the
117
+ /// parquet file must be provided.
118
+ std::shared_ptr<FileDecryptionProperties> GetFileDecryptionProperties(
119
+ const KmsConnectionConfig& kms_connection_config,
120
+ const DecryptionConfiguration& decryption_config, const std::string& file_path = "",
121
+ const std::shared_ptr<::arrow::fs::FileSystem>& file_system = NULLPTR);
122
+
123
+ void RemoveCacheEntriesForToken(const std::string& access_token) {
124
+ key_toolkit_->RemoveCacheEntriesForToken(access_token);
125
+ }
126
+
127
+ void RemoveCacheEntriesForAllTokens() {
128
+ key_toolkit_->RemoveCacheEntriesForAllTokens();
129
+ }
130
+
131
+ /// Rotates master encryption keys for a Parquet file that uses external key material.
132
+ /// In single wrapping mode, data encryption keys are decrypted with the old master keys
133
+ /// and then re-encrypted with new master keys.
134
+ /// In double wrapping mode, key encryption keys are decrypted with the old master keys
135
+ /// and then re-encrypted with new master keys.
136
+ /// This relies on the KMS supporting versioning, such that the old master key is
137
+ /// used when unwrapping a key, and the latest version is used when wrapping a key.
138
+ void RotateMasterKeys(const KmsConnectionConfig& kms_connection_config,
139
+ const std::string& parquet_file_path,
140
+ const std::shared_ptr<::arrow::fs::FileSystem>& file_system,
141
+ bool double_wrapping = kDefaultDoubleWrapping,
142
+ double cache_lifetime_seconds = kDefaultCacheLifetimeSeconds);
143
+
144
+ private:
145
+ ColumnPathToEncryptionPropertiesMap GetColumnEncryptionProperties(
146
+ int dek_length, const std::string& column_keys, FileKeyWrapper* key_wrapper);
147
+
148
+ /// Key utilities object for kms client initialization and cache control
149
+ std::shared_ptr<KeyToolkit> key_toolkit_ = std::make_shared<KeyToolkit>();
150
+ };
151
+
152
+ } // namespace parquet::encryption
venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/encryption.h ADDED
@@ -0,0 +1,510 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <map>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <utility>
24
+
25
+ #include "parquet/exception.h"
26
+ #include "parquet/schema.h"
27
+ #include "parquet/types.h"
28
+
29
+ namespace parquet {
30
+
31
+ static constexpr ParquetCipher::type kDefaultEncryptionAlgorithm =
32
+ ParquetCipher::AES_GCM_V1;
33
+ static constexpr int32_t kMaximalAadMetadataLength = 256;
34
+ static constexpr bool kDefaultEncryptedFooter = true;
35
+ static constexpr bool kDefaultCheckSignature = true;
36
+ static constexpr bool kDefaultAllowPlaintextFiles = false;
37
+ static constexpr int32_t kAadFileUniqueLength = 8;
38
+
39
+ class ColumnDecryptionProperties;
40
+ using ColumnPathToDecryptionPropertiesMap =
41
+ std::map<std::string, std::shared_ptr<ColumnDecryptionProperties>>;
42
+
43
+ class ColumnEncryptionProperties;
44
+ using ColumnPathToEncryptionPropertiesMap =
45
+ std::map<std::string, std::shared_ptr<ColumnEncryptionProperties>>;
46
+
47
+ class PARQUET_EXPORT DecryptionKeyRetriever {
48
+ public:
49
+ virtual std::string GetKey(const std::string& key_metadata) = 0;
50
+ virtual ~DecryptionKeyRetriever() {}
51
+ };
52
+
53
+ /// Simple integer key retriever
54
+ class PARQUET_EXPORT IntegerKeyIdRetriever : public DecryptionKeyRetriever {
55
+ public:
56
+ void PutKey(uint32_t key_id, const std::string& key);
57
+ std::string GetKey(const std::string& key_metadata) override;
58
+
59
+ private:
60
+ std::map<uint32_t, std::string> key_map_;
61
+ };
62
+
63
+ // Simple string key retriever
64
+ class PARQUET_EXPORT StringKeyIdRetriever : public DecryptionKeyRetriever {
65
+ public:
66
+ void PutKey(const std::string& key_id, const std::string& key);
67
+ std::string GetKey(const std::string& key_metadata) override;
68
+
69
+ private:
70
+ std::map<std::string, std::string> key_map_;
71
+ };
72
+
73
+ class PARQUET_EXPORT HiddenColumnException : public ParquetException {
74
+ public:
75
+ explicit HiddenColumnException(const std::string& columnPath)
76
+ : ParquetException(columnPath.c_str()) {}
77
+ };
78
+
79
+ class PARQUET_EXPORT KeyAccessDeniedException : public ParquetException {
80
+ public:
81
+ explicit KeyAccessDeniedException(const std::string& columnPath)
82
+ : ParquetException(columnPath.c_str()) {}
83
+ };
84
+
85
+ inline const uint8_t* str2bytes(const std::string& str) {
86
+ if (str.empty()) return NULLPTR;
87
+
88
+ char* cbytes = const_cast<char*>(str.c_str());
89
+ return reinterpret_cast<const uint8_t*>(cbytes);
90
+ }
91
+
92
+ class PARQUET_EXPORT ColumnEncryptionProperties {
93
+ public:
94
+ class PARQUET_EXPORT Builder {
95
+ public:
96
+ /// Convenience builder for encrypted columns.
97
+ explicit Builder(const std::string& name) : Builder(name, true) {}
98
+
99
+ /// Convenience builder for encrypted columns.
100
+ explicit Builder(const std::shared_ptr<schema::ColumnPath>& path)
101
+ : Builder(path->ToDotString(), true) {}
102
+
103
+ /// Set a column-specific key.
104
+ /// If key is not set on an encrypted column, the column will
105
+ /// be encrypted with the footer key.
106
+ /// keyBytes Key length must be either 16, 24 or 32 bytes.
107
+ /// The key is cloned, and will be wiped out (array values set to 0) upon completion
108
+ /// of file writing.
109
+ /// Caller is responsible for wiping out the input key array.
110
+ Builder* key(std::string column_key);
111
+
112
+ /// Set a key retrieval metadata.
113
+ /// use either key_metadata() or key_id(), not both
114
+ Builder* key_metadata(const std::string& key_metadata);
115
+
116
+ /// A convenience function to set key metadata using a string id.
117
+ /// Set a key retrieval metadata (converted from String).
118
+ /// use either key_metadata() or key_id(), not both
119
+ /// key_id will be converted to metadata (UTF-8 array).
120
+ Builder* key_id(const std::string& key_id);
121
+
122
+ std::shared_ptr<ColumnEncryptionProperties> build() {
123
+ return std::shared_ptr<ColumnEncryptionProperties>(
124
+ new ColumnEncryptionProperties(encrypted_, column_path_, key_, key_metadata_));
125
+ }
126
+
127
+ private:
128
+ const std::string column_path_;
129
+ bool encrypted_;
130
+ std::string key_;
131
+ std::string key_metadata_;
132
+
133
+ Builder(const std::string path, bool encrypted)
134
+ : column_path_(path), encrypted_(encrypted) {}
135
+ };
136
+
137
+ std::string column_path() const { return column_path_; }
138
+ bool is_encrypted() const { return encrypted_; }
139
+ bool is_encrypted_with_footer_key() const { return encrypted_with_footer_key_; }
140
+ std::string key() const { return key_; }
141
+ std::string key_metadata() const { return key_metadata_; }
142
+
143
+ /// Upon completion of file writing, the encryption key
144
+ /// will be wiped out.
145
+ void WipeOutEncryptionKey() { key_.clear(); }
146
+
147
+ bool is_utilized() {
148
+ if (key_.empty())
149
+ return false; // can re-use column properties without encryption keys
150
+ return utilized_;
151
+ }
152
+
153
+ /// ColumnEncryptionProperties object can be used for writing one file only.
154
+ /// Mark ColumnEncryptionProperties as utilized once it is used in
155
+ /// FileEncryptionProperties as the encryption key will be wiped out upon
156
+ /// completion of file writing.
157
+ void set_utilized() { utilized_ = true; }
158
+
159
+ std::shared_ptr<ColumnEncryptionProperties> DeepClone() {
160
+ std::string key_copy = key_;
161
+ return std::shared_ptr<ColumnEncryptionProperties>(new ColumnEncryptionProperties(
162
+ encrypted_, column_path_, key_copy, key_metadata_));
163
+ }
164
+
165
+ ColumnEncryptionProperties() = default;
166
+ ColumnEncryptionProperties(const ColumnEncryptionProperties& other) = default;
167
+ ColumnEncryptionProperties(ColumnEncryptionProperties&& other) = default;
168
+
169
+ private:
170
+ const std::string column_path_;
171
+ bool encrypted_;
172
+ bool encrypted_with_footer_key_;
173
+ std::string key_;
174
+ std::string key_metadata_;
175
+ bool utilized_;
176
+ explicit ColumnEncryptionProperties(bool encrypted, const std::string& column_path,
177
+ const std::string& key,
178
+ const std::string& key_metadata);
179
+ };
180
+
181
+ class PARQUET_EXPORT ColumnDecryptionProperties {
182
+ public:
183
+ class PARQUET_EXPORT Builder {
184
+ public:
185
+ explicit Builder(const std::string& name) : column_path_(name) {}
186
+
187
+ explicit Builder(const std::shared_ptr<schema::ColumnPath>& path)
188
+ : Builder(path->ToDotString()) {}
189
+
190
+ /// Set an explicit column key. If applied on a file that contains
191
+ /// key metadata for this column the metadata will be ignored,
192
+ /// the column will be decrypted with this key.
193
+ /// key length must be either 16, 24 or 32 bytes.
194
+ Builder* key(const std::string& key);
195
+
196
+ std::shared_ptr<ColumnDecryptionProperties> build();
197
+
198
+ private:
199
+ const std::string column_path_;
200
+ std::string key_;
201
+ };
202
+
203
+ ColumnDecryptionProperties() = default;
204
+ ColumnDecryptionProperties(const ColumnDecryptionProperties& other) = default;
205
+ ColumnDecryptionProperties(ColumnDecryptionProperties&& other) = default;
206
+
207
+ std::string column_path() const { return column_path_; }
208
+ std::string key() const { return key_; }
209
+ bool is_utilized() { return utilized_; }
210
+
211
+ /// ColumnDecryptionProperties object can be used for reading one file only.
212
+ /// Mark ColumnDecryptionProperties as utilized once it is used in
213
+ /// FileDecryptionProperties as the encryption key will be wiped out upon
214
+ /// completion of file reading.
215
+ void set_utilized() { utilized_ = true; }
216
+
217
+ /// Upon completion of file reading, the encryption key
218
+ /// will be wiped out.
219
+ void WipeOutDecryptionKey();
220
+
221
+ std::shared_ptr<ColumnDecryptionProperties> DeepClone();
222
+
223
+ private:
224
+ const std::string column_path_;
225
+ std::string key_;
226
+ bool utilized_;
227
+
228
+ /// This class is only required for setting explicit column decryption keys -
229
+ /// to override key retriever (or to provide keys when key metadata and/or
230
+ /// key retriever are not available)
231
+ explicit ColumnDecryptionProperties(const std::string& column_path,
232
+ const std::string& key);
233
+ };
234
+
235
+ class PARQUET_EXPORT AADPrefixVerifier {
236
+ public:
237
+ /// Verifies identity (AAD Prefix) of individual file,
238
+ /// or of file collection in a data set.
239
+ /// Throws exception if an AAD prefix is wrong.
240
+ /// In a data set, AAD Prefixes should be collected,
241
+ /// and then checked for missing files.
242
+ virtual void Verify(const std::string& aad_prefix) = 0;
243
+ virtual ~AADPrefixVerifier() {}
244
+ };
245
+
246
+ class PARQUET_EXPORT FileDecryptionProperties {
247
+ public:
248
+ class PARQUET_EXPORT Builder {
249
+ public:
250
+ Builder() {
251
+ check_plaintext_footer_integrity_ = kDefaultCheckSignature;
252
+ plaintext_files_allowed_ = kDefaultAllowPlaintextFiles;
253
+ }
254
+
255
+ /// Set an explicit footer key. If applied on a file that contains
256
+ /// footer key metadata the metadata will be ignored, the footer
257
+ /// will be decrypted/verified with this key.
258
+ /// If explicit key is not set, footer key will be fetched from
259
+ /// key retriever.
260
+ /// With explicit keys or AAD prefix, new encryption properties object must be
261
+ /// created for each encrypted file.
262
+ /// Explicit encryption keys (footer and column) are cloned.
263
+ /// Upon completion of file reading, the cloned encryption keys in the properties
264
+ /// will be wiped out (array values set to 0).
265
+ /// Caller is responsible for wiping out the input key array.
266
+ /// param footerKey Key length must be either 16, 24 or 32 bytes.
267
+ Builder* footer_key(const std::string footer_key);
268
+
269
+ /// Set explicit column keys (decryption properties).
270
+ /// Its also possible to set a key retriever on this property object.
271
+ /// Upon file decryption, availability of explicit keys is checked before
272
+ /// invocation of the retriever callback.
273
+ /// If an explicit key is available for a footer or a column,
274
+ /// its key metadata will be ignored.
275
+ Builder* column_keys(
276
+ const ColumnPathToDecryptionPropertiesMap& column_decryption_properties);
277
+
278
+ /// Set a key retriever callback. Its also possible to
279
+ /// set explicit footer or column keys on this file property object.
280
+ /// Upon file decryption, availability of explicit keys is checked before
281
+ /// invocation of the retriever callback.
282
+ /// If an explicit key is available for a footer or a column,
283
+ /// its key metadata will be ignored.
284
+ Builder* key_retriever(const std::shared_ptr<DecryptionKeyRetriever>& key_retriever);
285
+
286
+ /// Skip integrity verification of plaintext footers.
287
+ /// If not called, integrity of plaintext footers will be checked in runtime,
288
+ /// and an exception will be thrown in the following situations:
289
+ /// - footer signing key is not available
290
+ /// (not passed, or not found by key retriever)
291
+ /// - footer content and signature don't match
292
+ Builder* disable_footer_signature_verification() {
293
+ check_plaintext_footer_integrity_ = false;
294
+ return this;
295
+ }
296
+
297
+ /// Explicitly supply the file AAD prefix.
298
+ /// A must when a prefix is used for file encryption, but not stored in file.
299
+ /// If AAD prefix is stored in file, it will be compared to the explicitly
300
+ /// supplied value and an exception will be thrown if they differ.
301
+ Builder* aad_prefix(const std::string& aad_prefix);
302
+
303
+ /// Set callback for verification of AAD Prefixes stored in file.
304
+ Builder* aad_prefix_verifier(std::shared_ptr<AADPrefixVerifier> aad_prefix_verifier);
305
+
306
+ /// By default, reading plaintext (unencrypted) files is not
307
+ /// allowed when using a decryptor
308
+ /// - in order to detect files that were not encrypted by mistake.
309
+ /// However, the default behavior can be overridden by calling this method.
310
+ /// The caller should use then a different method to ensure encryption
311
+ /// of files with sensitive data.
312
+ Builder* plaintext_files_allowed() {
313
+ plaintext_files_allowed_ = true;
314
+ return this;
315
+ }
316
+
317
+ std::shared_ptr<FileDecryptionProperties> build() {
318
+ return std::shared_ptr<FileDecryptionProperties>(new FileDecryptionProperties(
319
+ footer_key_, key_retriever_, check_plaintext_footer_integrity_, aad_prefix_,
320
+ aad_prefix_verifier_, column_decryption_properties_, plaintext_files_allowed_));
321
+ }
322
+
323
+ private:
324
+ std::string footer_key_;
325
+ std::string aad_prefix_;
326
+ std::shared_ptr<AADPrefixVerifier> aad_prefix_verifier_;
327
+ ColumnPathToDecryptionPropertiesMap column_decryption_properties_;
328
+
329
+ std::shared_ptr<DecryptionKeyRetriever> key_retriever_;
330
+ bool check_plaintext_footer_integrity_;
331
+ bool plaintext_files_allowed_;
332
+ };
333
+
334
+ std::string column_key(const std::string& column_path) const;
335
+
336
+ std::string footer_key() const { return footer_key_; }
337
+
338
+ std::string aad_prefix() const { return aad_prefix_; }
339
+
340
+ const std::shared_ptr<DecryptionKeyRetriever>& key_retriever() const {
341
+ return key_retriever_;
342
+ }
343
+
344
+ bool check_plaintext_footer_integrity() const {
345
+ return check_plaintext_footer_integrity_;
346
+ }
347
+
348
+ bool plaintext_files_allowed() const { return plaintext_files_allowed_; }
349
+
350
+ const std::shared_ptr<AADPrefixVerifier>& aad_prefix_verifier() const {
351
+ return aad_prefix_verifier_;
352
+ }
353
+
354
+ /// Upon completion of file reading, the encryption keys in the properties
355
+ /// will be wiped out (array values set to 0).
356
+ void WipeOutDecryptionKeys();
357
+
358
+ bool is_utilized();
359
+
360
+ /// FileDecryptionProperties object can be used for reading one file only.
361
+ /// Mark FileDecryptionProperties as utilized once it is used to read a file as the
362
+ /// encryption keys will be wiped out upon completion of file reading.
363
+ void set_utilized() { utilized_ = true; }
364
+
365
+ /// FileDecryptionProperties object can be used for reading one file only.
366
+ /// (unless this object keeps the keyRetrieval callback only, and no explicit
367
+ /// keys or aadPrefix).
368
+ /// At the end, keys are wiped out in the memory.
369
+ /// This method allows to clone identical properties for another file,
370
+ /// with an option to update the aadPrefix (if newAadPrefix is null,
371
+ /// aadPrefix will be cloned too)
372
+ std::shared_ptr<FileDecryptionProperties> DeepClone(std::string new_aad_prefix = "");
373
+
374
+ private:
375
+ std::string footer_key_;
376
+ std::string aad_prefix_;
377
+ std::shared_ptr<AADPrefixVerifier> aad_prefix_verifier_;
378
+
379
+ const std::string empty_string_ = "";
380
+ ColumnPathToDecryptionPropertiesMap column_decryption_properties_;
381
+
382
+ std::shared_ptr<DecryptionKeyRetriever> key_retriever_;
383
+ bool check_plaintext_footer_integrity_;
384
+ bool plaintext_files_allowed_;
385
+ bool utilized_;
386
+
387
+ FileDecryptionProperties(
388
+ const std::string& footer_key,
389
+ std::shared_ptr<DecryptionKeyRetriever> key_retriever,
390
+ bool check_plaintext_footer_integrity, const std::string& aad_prefix,
391
+ std::shared_ptr<AADPrefixVerifier> aad_prefix_verifier,
392
+ const ColumnPathToDecryptionPropertiesMap& column_decryption_properties,
393
+ bool plaintext_files_allowed);
394
+ };
395
+
396
+ class PARQUET_EXPORT FileEncryptionProperties {
397
+ public:
398
+ class PARQUET_EXPORT Builder {
399
+ public:
400
+ explicit Builder(const std::string& footer_key)
401
+ : parquet_cipher_(kDefaultEncryptionAlgorithm),
402
+ encrypted_footer_(kDefaultEncryptedFooter) {
403
+ footer_key_ = footer_key;
404
+ store_aad_prefix_in_file_ = false;
405
+ }
406
+
407
+ /// Create files with plaintext footer.
408
+ /// If not called, the files will be created with encrypted footer (default).
409
+ Builder* set_plaintext_footer() {
410
+ encrypted_footer_ = false;
411
+ return this;
412
+ }
413
+
414
+ /// Set encryption algorithm.
415
+ /// If not called, files will be encrypted with AES_GCM_V1 (default).
416
+ Builder* algorithm(ParquetCipher::type parquet_cipher) {
417
+ parquet_cipher_ = parquet_cipher;
418
+ return this;
419
+ }
420
+
421
+ /// Set a key retrieval metadata (converted from String).
422
+ /// use either footer_key_metadata or footer_key_id, not both.
423
+ Builder* footer_key_id(const std::string& key_id);
424
+
425
+ /// Set a key retrieval metadata.
426
+ /// use either footer_key_metadata or footer_key_id, not both.
427
+ Builder* footer_key_metadata(const std::string& footer_key_metadata);
428
+
429
+ /// Set the file AAD Prefix.
430
+ Builder* aad_prefix(const std::string& aad_prefix);
431
+
432
+ /// Skip storing AAD Prefix in file.
433
+ /// If not called, and if AAD Prefix is set, it will be stored.
434
+ Builder* disable_aad_prefix_storage();
435
+
436
+ /// Set the list of encrypted columns and their properties (keys etc).
437
+ /// If not called, all columns will be encrypted with the footer key.
438
+ /// If called, the file columns not in the list will be left unencrypted.
439
+ Builder* encrypted_columns(
440
+ const ColumnPathToEncryptionPropertiesMap& encrypted_columns);
441
+
442
+ std::shared_ptr<FileEncryptionProperties> build() {
443
+ return std::shared_ptr<FileEncryptionProperties>(new FileEncryptionProperties(
444
+ parquet_cipher_, footer_key_, footer_key_metadata_, encrypted_footer_,
445
+ aad_prefix_, store_aad_prefix_in_file_, encrypted_columns_));
446
+ }
447
+
448
+ private:
449
+ ParquetCipher::type parquet_cipher_;
450
+ bool encrypted_footer_;
451
+ std::string footer_key_;
452
+ std::string footer_key_metadata_;
453
+
454
+ std::string aad_prefix_;
455
+ bool store_aad_prefix_in_file_;
456
+ ColumnPathToEncryptionPropertiesMap encrypted_columns_;
457
+ };
458
+ bool encrypted_footer() const { return encrypted_footer_; }
459
+
460
+ EncryptionAlgorithm algorithm() const { return algorithm_; }
461
+
462
+ std::string footer_key() const { return footer_key_; }
463
+
464
+ std::string footer_key_metadata() const { return footer_key_metadata_; }
465
+
466
+ std::string file_aad() const { return file_aad_; }
467
+
468
+ std::shared_ptr<ColumnEncryptionProperties> column_encryption_properties(
469
+ const std::string& column_path);
470
+
471
+ bool is_utilized() const { return utilized_; }
472
+
473
+ /// FileEncryptionProperties object can be used for writing one file only.
474
+ /// Mark FileEncryptionProperties as utilized once it is used to write a file as the
475
+ /// encryption keys will be wiped out upon completion of file writing.
476
+ void set_utilized() { utilized_ = true; }
477
+
478
+ /// Upon completion of file writing, the encryption keys
479
+ /// will be wiped out (array values set to 0).
480
+ void WipeOutEncryptionKeys();
481
+
482
+ /// FileEncryptionProperties object can be used for writing one file only.
483
+ /// (at the end, keys are wiped out in the memory).
484
+ /// This method allows to clone identical properties for another file,
485
+ /// with an option to update the aadPrefix (if newAadPrefix is null,
486
+ /// aadPrefix will be cloned too)
487
+ std::shared_ptr<FileEncryptionProperties> DeepClone(std::string new_aad_prefix = "");
488
+
489
+ ColumnPathToEncryptionPropertiesMap encrypted_columns() const {
490
+ return encrypted_columns_;
491
+ }
492
+
493
+ private:
494
+ EncryptionAlgorithm algorithm_;
495
+ std::string footer_key_;
496
+ std::string footer_key_metadata_;
497
+ bool encrypted_footer_;
498
+ std::string file_aad_;
499
+ std::string aad_prefix_;
500
+ bool utilized_;
501
+ bool store_aad_prefix_in_file_;
502
+ ColumnPathToEncryptionPropertiesMap encrypted_columns_;
503
+
504
+ FileEncryptionProperties(ParquetCipher::type cipher, const std::string& footer_key,
505
+ const std::string& footer_key_metadata, bool encrypted_footer,
506
+ const std::string& aad_prefix, bool store_aad_prefix_in_file,
507
+ const ColumnPathToEncryptionPropertiesMap& encrypted_columns);
508
+ };
509
+
510
+ } // namespace parquet
venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_material_store.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <set>
21
+ #include <string>
22
+ #include <unordered_map>
23
+
24
+ #include "arrow/filesystem/filesystem.h"
25
+ #include "parquet/platform.h"
26
+
27
+ namespace parquet::encryption {
28
+
29
+ /// Stores encryption key material outside the Parquet file, for example in a separate
30
+ /// small file in the same folder. This is important for “key rotation”, when MEKs have to
31
+ /// be changed (if compromised; or periodically, just in case) - without modifying the
32
+ /// Parquet files (often immutable).
33
+ class PARQUET_EXPORT FileKeyMaterialStore {
34
+ public:
35
+ /// Add key material for one encryption key.
36
+ virtual void AddKeyMaterial(std::string key_id_in_file, std::string key_material) = 0;
37
+
38
+ /// Get key material
39
+ virtual std::string GetKeyMaterial(std::string key_id_in_file) = 0;
40
+
41
+ /// After key material was added for all keys in the given Parquet file,
42
+ /// save material in persistent store.
43
+ virtual void SaveMaterial() = 0;
44
+
45
+ /// Remove key material from persistent store. Used in key rotation.
46
+ virtual void RemoveMaterial() = 0;
47
+
48
+ /// Move key material to another store. Used in key rotation.
49
+ virtual void MoveMaterialTo(std::shared_ptr<FileKeyMaterialStore> target_key_store) = 0;
50
+
51
+ /// Returns the Set of all key IDs in this store (for the given Parquet file)
52
+ virtual std::vector<std::string> GetKeyIDSet() = 0;
53
+
54
+ virtual ~FileKeyMaterialStore() {}
55
+ };
56
+
57
+ } // namespace parquet::encryption
venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_unwrapper.h ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/util/concurrent_map.h"
21
+
22
+ #include "parquet/encryption/encryption.h"
23
+ #include "parquet/encryption/file_system_key_material_store.h"
24
+ #include "parquet/encryption/key_material.h"
25
+ #include "parquet/encryption/key_toolkit.h"
26
+ #include "parquet/encryption/key_toolkit_internal.h"
27
+ #include "parquet/encryption/kms_client.h"
28
+ #include "parquet/platform.h"
29
+
30
+ namespace parquet::encryption {
31
+
32
+ // This class will retrieve the key from "key metadata", following these steps:
33
+ // 1. Parse "key metadata" (see structure in KeyMetadata class).
34
+ // 2. Retrieve "key material" which can be stored inside or outside "key metadata".
35
+ // 3. Unwrap the "data encryption key" from "key material". There are 2 modes:
36
+ // 3.1. single wrapping: decrypt the wrapped "data encryption key" directly with "master
37
+ // encryption key" 3.2. double wrapping: 2 steps: 3.2.1. "key encryption key" is decrypted
38
+ // with "master encryption key" 3.2.2. "data encryption key" is decrypted with the above
39
+ // "key encryption key"
40
+ class PARQUET_EXPORT FileKeyUnwrapper : public DecryptionKeyRetriever {
41
+ public:
42
+ /// key_toolkit and kms_connection_config is to get KmsClient from cache or create
43
+ /// KmsClient if it's not in the cache yet. cache_entry_lifetime_seconds is life time of
44
+ /// KmsClient in the cache.
45
+ /// If the file uses external key material then the Parquet file path and file
46
+ /// system must be specified.
47
+ FileKeyUnwrapper(std::shared_ptr<KeyToolkit> key_toolkit,
48
+ const KmsConnectionConfig& kms_connection_config,
49
+ double cache_lifetime_seconds, const std::string& file_path = "",
50
+ const std::shared_ptr<::arrow::fs::FileSystem>& file_system = NULLPTR);
51
+
52
+ /// Constructor overload that takes a raw pointer to the KeyToolkit
53
+ FileKeyUnwrapper(KeyToolkit* key_toolkit,
54
+ const KmsConnectionConfig& kms_connection_config,
55
+ double cache_lifetime_seconds, const std::string& file_path = "",
56
+ const std::shared_ptr<::arrow::fs::FileSystem>& file_system = NULLPTR);
57
+
58
+ /// Constructor overload that takes a raw pointer to the KeyToolkit and
59
+ /// accepts an existing key_material_store rather than using
60
+ /// the file path and file system to create one when needed.
61
+ FileKeyUnwrapper(KeyToolkit* key_toolkit,
62
+ const KmsConnectionConfig& kms_connection_config,
63
+ double cache_lifetime_seconds,
64
+ std::shared_ptr<FileKeyMaterialStore> key_material_store);
65
+
66
+ /// Get the data key from key metadata
67
+ std::string GetKey(const std::string& key_metadata) override;
68
+
69
+ /// Get the data key along with the master key id from key material
70
+ KeyWithMasterId GetDataEncryptionKey(const KeyMaterial& key_material);
71
+
72
+ private:
73
+ FileKeyUnwrapper(std::shared_ptr<KeyToolkit> key_toolkit_owner, KeyToolkit* key_toolkit,
74
+ const KmsConnectionConfig& kms_connection_config,
75
+ double cache_lifetime_seconds,
76
+ std::shared_ptr<FileKeyMaterialStore> key_material_store,
77
+ const std::string& file_path,
78
+ const std::shared_ptr<::arrow::fs::FileSystem>& file_system);
79
+
80
+ std::shared_ptr<KmsClient> GetKmsClientFromConfigOrKeyMaterial(
81
+ const KeyMaterial& key_material);
82
+
83
+ /// A map of Key Encryption Key (KEK) ID -> KEK bytes, for the current token
84
+ std::shared_ptr<::arrow::util::ConcurrentMap<std::string, std::string>> kek_per_kek_id_;
85
+ std::shared_ptr<KeyToolkit> key_toolkit_owner_;
86
+ KeyToolkit* key_toolkit_;
87
+ KmsConnectionConfig kms_connection_config_;
88
+ const double cache_entry_lifetime_seconds_;
89
+ std::shared_ptr<FileKeyMaterialStore> key_material_store_;
90
+ const std::string file_path_;
91
+ std::shared_ptr<::arrow::fs::FileSystem> file_system_;
92
+ };
93
+
94
+ } // namespace parquet::encryption
venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_wrapper.h ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <string>
22
+ #include <unordered_map>
23
+
24
+ #include "arrow/util/concurrent_map.h"
25
+
26
+ #include "parquet/encryption/file_key_material_store.h"
27
+ #include "parquet/encryption/key_encryption_key.h"
28
+ #include "parquet/encryption/key_toolkit.h"
29
+ #include "parquet/encryption/kms_client.h"
30
+ #include "parquet/platform.h"
31
+
32
+ namespace parquet::encryption {
33
+
34
+ // This class will generate "key metadata" from "data encryption key" and "master key",
35
+ // following these steps:
36
+ // 1. Wrap "data encryption key". There are 2 modes:
37
+ // 1.1. single wrapping: encrypt "data encryption key" directly with "master encryption
38
+ // key"
39
+ // 1.2. double wrapping: 2 steps:
40
+ // 1.2.1. "key encryption key" is randomized (see KeyEncryptionKey class)
41
+ // 1.2.2. "data encryption key" is encrypted with the above "key encryption key"
42
+ // 2. Create "key material" (see structure in KeyMaterial class)
43
+ // 3. Create "key metadata" with "key material" inside or a reference to outside "key
44
+ // material" (see structure in KeyMetadata class).
45
+ class PARQUET_EXPORT FileKeyWrapper {
46
+ public:
47
+ static constexpr int kKeyEncryptionKeyLength = 16;
48
+ static constexpr int kKeyEncryptionKeyIdLength = 16;
49
+
50
+ /// key_toolkit and kms_connection_config is to get KmsClient from the cache or create
51
+ /// KmsClient if it's not in the cache yet. cache_entry_lifetime_seconds is life time of
52
+ /// KmsClient in the cache. key_material_store is to store "key material" outside
53
+ /// parquet file, NULL if "key material" is stored inside parquet file.
54
+ FileKeyWrapper(KeyToolkit* key_toolkit,
55
+ const KmsConnectionConfig& kms_connection_config,
56
+ std::shared_ptr<FileKeyMaterialStore> key_material_store,
57
+ double cache_entry_lifetime_seconds, bool double_wrapping);
58
+
59
+ /// Creates key_metadata field for a given data key, via wrapping the key with the
60
+ /// master key.
61
+ /// When external key material is used, an identifier is usually generated automatically
62
+ /// but may be specified explicitly to support key rotation,
63
+ /// which requires keeping the same identifiers.
64
+ std::string GetEncryptionKeyMetadata(const std::string& data_key,
65
+ const std::string& master_key_id,
66
+ bool is_footer_key,
67
+ std::string key_id_in_file = "");
68
+
69
+ private:
70
+ KeyEncryptionKey CreateKeyEncryptionKey(const std::string& master_key_id);
71
+
72
+ /// A map of Master Encryption Key ID -> KeyEncryptionKey, for the current token
73
+ std::shared_ptr<::arrow::util::ConcurrentMap<std::string, KeyEncryptionKey>>
74
+ kek_per_master_key_id_;
75
+
76
+ std::shared_ptr<KmsClient> kms_client_;
77
+ KmsConnectionConfig kms_connection_config_;
78
+ std::shared_ptr<FileKeyMaterialStore> key_material_store_;
79
+ const double cache_entry_lifetime_seconds_;
80
+ const bool double_wrapping_;
81
+ uint16_t key_counter_;
82
+ };
83
+
84
+ } // namespace parquet::encryption
venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_system_key_material_store.h ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <set>
21
+ #include <string>
22
+ #include <unordered_map>
23
+
24
+ #include "arrow/filesystem/filesystem.h"
25
+
26
+ #include "parquet/encryption/file_key_material_store.h"
27
+
28
+ namespace parquet::encryption {
29
+
30
+ /// A FileKeyMaterialStore that stores key material in a file system file in the same
31
+ /// folder as the Parquet file.
32
+ class PARQUET_EXPORT FileSystemKeyMaterialStore : public FileKeyMaterialStore {
33
+ public:
34
+ static constexpr const char kKeyMaterialFilePrefix[] = "_KEY_MATERIAL_FOR_";
35
+ static constexpr const char kTempFilePrefix[] = "_TMP";
36
+ static constexpr const char kKeyMaterialFileSuffix[] = ".json";
37
+
38
+ FileSystemKeyMaterialStore() {}
39
+ FileSystemKeyMaterialStore(const std::string& key_material_file_path,
40
+ const std::shared_ptr<::arrow::fs::FileSystem>& file_system);
41
+
42
+ /// Creates a new file system key material store for a parquet file.
43
+ /// When use_tmp_prefix is true, files are saved with an extra _TMP prefix so they don't
44
+ /// conflict with existing external material files. This is useful during key rotation
45
+ /// so that temporary key material files can be created while using the existing key
46
+ /// material, before moving the key material to the non-temporary location.
47
+ static std::shared_ptr<FileSystemKeyMaterialStore> Make(
48
+ const std::string& parquet_file_path,
49
+ const std::shared_ptr<::arrow::fs::FileSystem>& file_system, bool use_tmp_prefix);
50
+
51
+ /// Add key material for one encryption key.
52
+ void AddKeyMaterial(std::string key_id_in_file, std::string key_material) {
53
+ key_material_map_.insert({key_id_in_file, key_material});
54
+ }
55
+
56
+ /// Get key material
57
+ std::string GetKeyMaterial(std::string key_id_in_file) {
58
+ if (key_material_map_.empty()) {
59
+ LoadKeyMaterialMap();
60
+ }
61
+ auto found = key_material_map_.find(key_id_in_file);
62
+ return found->second;
63
+ }
64
+
65
+ /// After key material was added for all keys in the given Parquet file,
66
+ /// save material in persistent store.
67
+ void SaveMaterial();
68
+
69
+ /// Remove key material from persistent store. Used in key rotation.
70
+ void RemoveMaterial();
71
+
72
+ /// Move key material to another store. Used in key rotation.
73
+ void MoveMaterialTo(std::shared_ptr<FileKeyMaterialStore> target_key_store);
74
+
75
+ /// Returns the Set of all key IDs in this store (for the given Parquet file)
76
+ std::vector<std::string> GetKeyIDSet();
77
+
78
+ private:
79
+ std::string GetStorageFilePath() { return key_material_file_path_; }
80
+
81
+ std::string BuildKeyMaterialMapJson();
82
+ void LoadKeyMaterialMap();
83
+ std::string key_material_file_path_;
84
+ std::shared_ptr<::arrow::fs::FileSystem> file_system_;
85
+ /// Maps ID of a key in Parquet file and key material
86
+ std::unordered_map<std::string, std::string> key_material_map_;
87
+ };
88
+
89
+ } // namespace parquet::encryption
venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_encryption_key.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <vector>
22
+
23
+ #include "arrow/util/base64.h"
24
+
25
+ namespace parquet::encryption {
26
+
27
+ // In the double wrapping mode, each "data encryption key" (DEK) is encrypted with a “key
28
+ // encryption key” (KEK), that in turn is encrypted with a "master encryption key" (MEK).
29
+ // In a writer process, a random KEK is generated for each MEK ID, and cached in a <MEK-ID
30
+ // : KEK> map. This allows to perform an interaction with a KMS server only once for each
31
+ // MEK, in order to wrap its KEK. "Data encryption key" (DEK) wrapping is performed
32
+ // locally, and does not involve an interaction with a KMS server.
33
+ class KeyEncryptionKey {
34
+ public:
35
+ KeyEncryptionKey(std::string kek_bytes, std::string kek_id,
36
+ std::string encoded_wrapped_kek)
37
+ : kek_bytes_(std::move(kek_bytes)),
38
+ kek_id_(std::move(kek_id)),
39
+ encoded_kek_id_(::arrow::util::base64_encode(kek_id_)),
40
+ encoded_wrapped_kek_(std::move(encoded_wrapped_kek)) {}
41
+
42
+ const std::string& kek_bytes() const { return kek_bytes_; }
43
+
44
+ const std::string& kek_id() const { return kek_id_; }
45
+
46
+ const std::string& encoded_kek_id() const { return encoded_kek_id_; }
47
+
48
+ const std::string& encoded_wrapped_kek() const { return encoded_wrapped_kek_; }
49
+
50
+ private:
51
+ std::string kek_bytes_;
52
+ std::string kek_id_;
53
+ std::string encoded_kek_id_;
54
+ std::string encoded_wrapped_kek_;
55
+ };
56
+
57
+ } // namespace parquet::encryption
venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_material.h ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+
22
+ #include "parquet/platform.h"
23
+
24
+ namespace arrow {
25
+ namespace json {
26
+ namespace internal {
27
+ class ObjectParser;
28
+ } // namespace internal
29
+ } // namespace json
30
+ } // namespace arrow
31
+
32
+ namespace parquet::encryption {
33
+
34
+ // KeyMaterial class represents the "key material", keeping the information that allows
35
+ // readers to recover an encryption key (see description of the KeyMetadata class). The
36
+ // keytools package (PARQUET-1373) implements the "envelope encryption" pattern, in a
37
+ // "single wrapping" or "double wrapping" mode. In the single wrapping mode, the key
38
+ // material is generated by encrypting the "data encryption key" (DEK) by a "master key".
39
+ // In the double wrapping mode, the key material is generated by encrypting the DEK by a
40
+ // "key encryption key" (KEK), that in turn is encrypted by a "master key".
41
+ //
42
+ // Key material is kept in a flat json object, with the following fields:
43
+ // 1. "keyMaterialType" - a String, with the type of key material. In the current
44
+ // version, only one value is allowed - "PKMT1" (stands
45
+ // for "parquet key management tools, version 1"). For external key material storage,
46
+ // this field is written in both "key metadata" and "key material" jsons. For internal
47
+ // key material storage, this field is written only once in the common json.
48
+ // 2. "isFooterKey" - a boolean. If true, means that the material belongs to a file footer
49
+ // key, and keeps additional information (such as
50
+ // KMS instance ID and URL). If false, means that the material belongs to a column
51
+ // key.
52
+ // 3. "kmsInstanceID" - a String, with the KMS Instance ID. Written only in footer key
53
+ // material.
54
+ // 4. "kmsInstanceURL" - a String, with the KMS Instance URL. Written only in footer key
55
+ // material.
56
+ // 5. "masterKeyID" - a String, with the ID of the master key used to generate the
57
+ // material.
58
+ // 6. "wrappedDEK" - a String, with the wrapped DEK (base64 encoding).
59
+ // 7. "doubleWrapping" - a boolean. If true, means that the material was generated in
60
+ // double wrapping mode.
61
+ // If false - in single wrapping mode.
62
+ // 8. "keyEncryptionKeyID" - a String, with the ID of the KEK used to generate the
63
+ // material. Written only in double wrapping mode.
64
+ // 9. "wrappedKEK" - a String, with the wrapped KEK (base64 encoding). Written only in
65
+ // double wrapping mode.
66
+ class PARQUET_EXPORT KeyMaterial {
67
+ public:
68
+ // these fields are defined in a specification and should never be changed
69
+ static constexpr const char kKeyMaterialTypeField[] = "keyMaterialType";
70
+ static constexpr const char kKeyMaterialType1[] = "PKMT1";
71
+
72
+ static constexpr const char kFooterKeyIdInFile[] = "footerKey";
73
+ static constexpr const char kColumnKeyIdInFilePrefix[] = "columnKey";
74
+
75
+ static constexpr const char kIsFooterKeyField[] = "isFooterKey";
76
+ static constexpr const char kDoubleWrappingField[] = "doubleWrapping";
77
+ static constexpr const char kKmsInstanceIdField[] = "kmsInstanceID";
78
+ static constexpr const char kKmsInstanceUrlField[] = "kmsInstanceURL";
79
+ static constexpr const char kMasterKeyIdField[] = "masterKeyID";
80
+ static constexpr const char kWrappedDataEncryptionKeyField[] = "wrappedDEK";
81
+ static constexpr const char kKeyEncryptionKeyIdField[] = "keyEncryptionKeyID";
82
+ static constexpr const char kWrappedKeyEncryptionKeyField[] = "wrappedKEK";
83
+
84
+ public:
85
+ KeyMaterial() = default;
86
+
87
+ static KeyMaterial Parse(const std::string& key_material_string);
88
+
89
+ static KeyMaterial Parse(
90
+ const ::arrow::json::internal::ObjectParser* key_material_json);
91
+
92
+ /// This method returns a json string that will be stored either inside a parquet file
93
+ /// or in a key material store outside the parquet file.
94
+ static std::string SerializeToJson(bool is_footer_key,
95
+ const std::string& kms_instance_id,
96
+ const std::string& kms_instance_url,
97
+ const std::string& master_key_id,
98
+ bool is_double_wrapped, const std::string& kek_id,
99
+ const std::string& encoded_wrapped_kek,
100
+ const std::string& encoded_wrapped_dek,
101
+ bool is_internal_storage);
102
+
103
+ bool is_footer_key() const { return is_footer_key_; }
104
+ bool is_double_wrapped() const { return is_double_wrapped_; }
105
+ const std::string& master_key_id() const { return master_key_id_; }
106
+ const std::string& wrapped_dek() const { return encoded_wrapped_dek_; }
107
+ const std::string& kek_id() const { return kek_id_; }
108
+ const std::string& wrapped_kek() const { return encoded_wrapped_kek_; }
109
+ const std::string& kms_instance_id() const { return kms_instance_id_; }
110
+ const std::string& kms_instance_url() const { return kms_instance_url_; }
111
+
112
+ private:
113
+ KeyMaterial(bool is_footer_key, const std::string& kms_instance_id,
114
+ const std::string& kms_instance_url, const std::string& master_key_id,
115
+ bool is_double_wrapped, const std::string& kek_id,
116
+ const std::string& encoded_wrapped_kek,
117
+ const std::string& encoded_wrapped_dek);
118
+
119
+ bool is_footer_key_;
120
+ std::string kms_instance_id_;
121
+ std::string kms_instance_url_;
122
+ std::string master_key_id_;
123
+ bool is_double_wrapped_;
124
+ std::string kek_id_;
125
+ std::string encoded_wrapped_kek_;
126
+ std::string encoded_wrapped_dek_;
127
+ };
128
+
129
+ } // namespace parquet::encryption
venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_metadata.h ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+ #include <variant>
22
+
23
+ #include "parquet/encryption/key_material.h"
24
+ #include "parquet/exception.h"
25
+ #include "parquet/platform.h"
26
+
27
+ namespace parquet::encryption {
28
+
29
+ // Parquet encryption specification defines "key metadata" as an arbitrary byte array,
30
+ // generated by file writers for each encryption key, and passed to the low level API for
31
+ // storage in the file footer. The "key metadata" field is made available to file readers
32
+ // to enable recovery of the key. This interface can be utilized for implementation
33
+ // of any key management scheme.
34
+ //
35
+ // The keytools package (PARQUET-1373) implements one approach, of many possible, to key
36
+ // management and to generation of the "key metadata" fields. This approach, based on the
37
+ // "envelope encryption" pattern, allows integration with KMS servers. It keeps the actual
38
+ // material, required to recover a key, in a "key material" object (see the KeyMaterial
39
+ // class for details). This class is implemented to support version 1 of the parquet key
40
+ // management tools specification.
41
+ //
42
+ // KeyMetadata writes (and reads) the "key metadata" field as a flat json object,
43
+ // with the following fields:
44
+ // 1. "keyMaterialType" - a String, with the type of key material.
45
+ // 2. "internalStorage" - a boolean. If true, means that "key material" is kept inside the
46
+ // "key metadata" field. If false, "key material" is kept externally (outside Parquet
47
+ // files) - in this case, "key metadata" keeps a reference to the external "key material".
48
+ // 3. "keyReference" - a String, with the reference to the external "key material".
49
+ // Written only if internalStorage is false.
50
+ //
51
+ // If internalStorage is true, "key material" is a part of "key metadata", and the json
52
+ // keeps additional fields, described in the KeyMaterial class.
53
+ class PARQUET_EXPORT KeyMetadata {
54
+ public:
55
+ static constexpr const char kKeyMaterialInternalStorageField[] = "internalStorage";
56
+ static constexpr const char kKeyReferenceField[] = "keyReference";
57
+
58
+ /// key_metadata_bytes is the key metadata field stored in the parquet file,
59
+ /// in the serialized json object format.
60
+ static KeyMetadata Parse(const std::string& key_metadata_bytes);
61
+
62
+ static std::string CreateSerializedForExternalMaterial(
63
+ const std::string& key_reference);
64
+
65
+ bool key_material_stored_internally() const { return is_internal_storage_; }
66
+
67
+ const KeyMaterial& key_material() const {
68
+ if (!is_internal_storage_) {
69
+ throw ParquetException("key material is stored externally.");
70
+ }
71
+ return ::std::get<KeyMaterial>(key_material_or_reference_);
72
+ }
73
+
74
+ const std::string& key_reference() const {
75
+ if (is_internal_storage_) {
76
+ throw ParquetException("key material is stored internally.");
77
+ }
78
+ return ::std::get<std::string>(key_material_or_reference_);
79
+ }
80
+
81
+ private:
82
+ explicit KeyMetadata(const KeyMaterial& key_material);
83
+ explicit KeyMetadata(const std::string& key_reference);
84
+
85
+ bool is_internal_storage_;
86
+ /// If is_internal_storage_ is true, KeyMaterial is set,
87
+ /// else a string referencing to an outside "key material" is set.
88
+ ::std::variant<KeyMaterial, std::string> key_material_or_reference_;
89
+ };
90
+
91
+ } // namespace parquet::encryption
venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_toolkit.h ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <string>
22
+
23
+ #include "parquet/encryption/key_encryption_key.h"
24
+ #include "parquet/encryption/kms_client.h"
25
+ #include "parquet/encryption/kms_client_factory.h"
26
+ #include "parquet/encryption/two_level_cache_with_expiration.h"
27
+ #include "parquet/platform.h"
28
+
29
+ namespace parquet::encryption {
30
+
31
+ static constexpr uint64_t kCacheCleanPeriodForKeyRotation = 60 * 60; // 1 hour
32
+
33
+ // KeyToolkit is a utility that keeps various tools for key management (such as key
34
+ // rotation, kms client instantiation, cache control, etc), plus a number of auxiliary
35
+ // classes for internal use.
36
+ class PARQUET_EXPORT KeyToolkit {
37
+ public:
38
+ KeyToolkit() { last_cache_clean_for_key_rotation_time_ = {}; }
39
+
40
+ /// KMS client two level cache: token -> KMSInstanceId -> KmsClient
41
+ TwoLevelCacheWithExpiration<std::shared_ptr<KmsClient>>& kms_client_cache_per_token() {
42
+ return kms_client_cache_;
43
+ }
44
+ /// Key encryption key two level cache for wrapping: token -> MasterEncryptionKeyId ->
45
+ /// KeyEncryptionKey
46
+ TwoLevelCacheWithExpiration<KeyEncryptionKey>& kek_write_cache_per_token() {
47
+ return key_encryption_key_write_cache_;
48
+ }
49
+
50
+ /// Key encryption key two level cache for unwrapping: token -> KeyEncryptionKeyId ->
51
+ /// KeyEncryptionKeyBytes
52
+ TwoLevelCacheWithExpiration<std::string>& kek_read_cache_per_token() {
53
+ return key_encryption_key_read_cache_;
54
+ }
55
+
56
+ std::shared_ptr<KmsClient> GetKmsClient(
57
+ const KmsConnectionConfig& kms_connection_config, double cache_entry_lifetime_ms);
58
+
59
+ /// Flush any caches that are tied to the (compromised) access_token
60
+ void RemoveCacheEntriesForToken(const std::string& access_token);
61
+
62
+ void RemoveCacheEntriesForAllTokens();
63
+
64
+ void RegisterKmsClientFactory(std::shared_ptr<KmsClientFactory> kms_client_factory) {
65
+ if (kms_client_factory_ != NULLPTR) {
66
+ throw ParquetException("KMS client factory has already been registered.");
67
+ }
68
+ kms_client_factory_ = std::move(kms_client_factory);
69
+ }
70
+
71
+ /// Key rotation. In the single wrapping mode, decrypts data keys with old master keys,
72
+ /// then encrypts them with new master keys. In the double wrapping mode, decrypts KEKs
73
+ /// (key encryption keys) with old master keys, generates new KEKs and encrypts them
74
+ /// with new master keys. Works only if key material is not stored internally in file
75
+ /// footers. Not supported in local key wrapping mode. Method can be run by multiple
76
+ /// threads, but each thread must work on different files.
77
+ void RotateMasterKeys(const KmsConnectionConfig& kms_connection_config,
78
+ const std::string& parquet_file_path,
79
+ const std::shared_ptr<::arrow::fs::FileSystem>& file_system,
80
+ bool double_wrapping, double cache_lifetime_seconds);
81
+
82
+ private:
83
+ TwoLevelCacheWithExpiration<std::shared_ptr<KmsClient>> kms_client_cache_;
84
+ TwoLevelCacheWithExpiration<KeyEncryptionKey> key_encryption_key_write_cache_;
85
+ TwoLevelCacheWithExpiration<std::string> key_encryption_key_read_cache_;
86
+ std::shared_ptr<KmsClientFactory> kms_client_factory_;
87
+ mutable ::arrow::util::Mutex last_cache_clean_for_key_rotation_time_mutex_;
88
+ internal::TimePoint last_cache_clean_for_key_rotation_time_;
89
+ };
90
+
91
+ // "data encryption key" and "master key identifier" are paired together as output when
92
+ // parsing from "key material"
93
+ class PARQUET_EXPORT KeyWithMasterId {
94
+ public:
95
+ KeyWithMasterId(std::string key_bytes, std::string master_id)
96
+ : key_bytes_(std::move(key_bytes)), master_id_(std::move(master_id)) {}
97
+
98
+ const std::string& data_key() const { return key_bytes_; }
99
+ const std::string& master_id() const { return master_id_; }
100
+
101
+ private:
102
+ const std::string key_bytes_;
103
+ const std::string master_id_;
104
+ };
105
+
106
+ } // namespace parquet::encryption
venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/kms_client.h ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <string>
22
+ #include <unordered_map>
23
+
24
+ #include "arrow/util/mutex.h"
25
+
26
+ #include "parquet/exception.h"
27
+ #include "parquet/platform.h"
28
+
29
+ namespace parquet::encryption {
30
+
31
+ /// This class wraps the key access token of a KMS server. If your token changes over
32
+ /// time, you should keep the reference to the KeyAccessToken object and call Refresh()
33
+ /// method every time you have a new token.
34
+ class PARQUET_EXPORT KeyAccessToken {
35
+ public:
36
+ KeyAccessToken() = default;
37
+
38
+ explicit KeyAccessToken(const std::string value) : value_(value) {}
39
+
40
+ void Refresh(const std::string& new_value) {
41
+ auto lock = mutex_.Lock();
42
+ value_ = new_value;
43
+ }
44
+
45
+ const std::string& value() const {
46
+ auto lock = mutex_.Lock();
47
+ return value_;
48
+ }
49
+
50
+ private:
51
+ std::string value_;
52
+ mutable ::arrow::util::Mutex mutex_;
53
+ };
54
+
55
+ struct PARQUET_EXPORT KmsConnectionConfig {
56
+ std::string kms_instance_id;
57
+ std::string kms_instance_url;
58
+ /// If the access token is changed in the future, you should keep a reference to
59
+ /// this object and call Refresh() on it whenever there is a new access token.
60
+ std::shared_ptr<KeyAccessToken> refreshable_key_access_token;
61
+ std::unordered_map<std::string, std::string> custom_kms_conf;
62
+
63
+ KmsConnectionConfig();
64
+
65
+ const std::string& key_access_token() const {
66
+ if (refreshable_key_access_token == NULLPTR ||
67
+ refreshable_key_access_token->value().empty()) {
68
+ throw ParquetException("key access token is not set!");
69
+ }
70
+ return refreshable_key_access_token->value();
71
+ }
72
+
73
+ void SetDefaultIfEmpty();
74
+ };
75
+
76
+ class PARQUET_EXPORT KmsClient {
77
+ public:
78
+ static constexpr const char kKmsInstanceIdDefault[] = "DEFAULT";
79
+ static constexpr const char kKmsInstanceUrlDefault[] = "DEFAULT";
80
+ static constexpr const char kKeyAccessTokenDefault[] = "DEFAULT";
81
+
82
+ /// Wraps a key - encrypts it with the master key, encodes the result
83
+ /// and potentially adds a KMS-specific metadata.
84
+ virtual std::string WrapKey(const std::string& key_bytes,
85
+ const std::string& master_key_identifier) = 0;
86
+
87
+ /// Decrypts (unwraps) a key with the master key.
88
+ virtual std::string UnwrapKey(const std::string& wrapped_key,
89
+ const std::string& master_key_identifier) = 0;
90
+ virtual ~KmsClient() {}
91
+ };
92
+
93
+ } // namespace parquet::encryption
venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/kms_client_factory.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "parquet/encryption/kms_client.h"
21
+ #include "parquet/platform.h"
22
+
23
+ namespace parquet::encryption {
24
+
25
+ class PARQUET_EXPORT KmsClientFactory {
26
+ public:
27
+ explicit KmsClientFactory(bool wrap_locally = false) : wrap_locally_(wrap_locally) {}
28
+
29
+ virtual ~KmsClientFactory() = default;
30
+
31
+ virtual std::shared_ptr<KmsClient> CreateKmsClient(
32
+ const KmsConnectionConfig& kms_connection_config) = 0;
33
+
34
+ protected:
35
+ bool wrap_locally_;
36
+ };
37
+
38
+ } // namespace parquet::encryption
venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/local_wrap_kms_client.h ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <unordered_map>
21
+ #include <vector>
22
+
23
+ #include "arrow/util/concurrent_map.h"
24
+
25
+ #include "parquet/encryption/kms_client.h"
26
+ #include "parquet/platform.h"
27
+
28
+ namespace parquet::encryption {
29
+
30
+ /// This class supports local wrapping mode, master keys will be fetched from the KMS
31
+ /// server and used to encrypt other keys (data encryption keys or key encryption keys).
32
+ class PARQUET_EXPORT LocalWrapKmsClient : public KmsClient {
33
+ public:
34
+ static constexpr const char kLocalWrapNoKeyVersion[] = "NO_VERSION";
35
+
36
+ explicit LocalWrapKmsClient(const KmsConnectionConfig& kms_connection_config);
37
+
38
+ std::string WrapKey(const std::string& key_bytes,
39
+ const std::string& master_key_identifier) override;
40
+
41
+ std::string UnwrapKey(const std::string& wrapped_key,
42
+ const std::string& master_key_identifier) override;
43
+
44
+ protected:
45
+ /// Get master key from the remote KMS server.
46
+ /// Note: this function might be called by multiple threads
47
+ virtual std::string GetMasterKeyFromServer(
48
+ const std::string& master_key_identifier) = 0;
49
+
50
+ private:
51
+ /// KMS systems wrap keys by encrypting them by master keys, and attaching additional
52
+ /// information (such as the version number of the masker key) to the result of
53
+ /// encryption. The master key version is required in key rotation. Currently, the
54
+ /// local wrapping mode does not support key rotation (because not all KMS systems allow
55
+ /// to fetch a master key by its ID and version number). Still, the local wrapping mode
56
+ /// adds a placeholder for the master key version, that will enable support for key
57
+ /// rotation in this mode in the future, with appropriate KMS systems. This will also
58
+ /// enable backward compatibility, where future readers will be able to extract master
59
+ /// key version in the files written by the current code.
60
+ ///
61
+ /// LocalKeyWrap class writes (and reads) the "key wrap" as a flat json with the
62
+ /// following fields:
63
+ /// 1. "masterKeyVersion" - a String, with the master key version. In the current
64
+ /// version, only one value is allowed - "NO_VERSION".
65
+ /// 2. "encryptedKey" - a String, with the key encrypted by the master key
66
+ /// (base64-encoded).
67
+ class LocalKeyWrap {
68
+ public:
69
+ static constexpr const char kLocalWrapKeyVersionField[] = "masterKeyVersion";
70
+ static constexpr const char kLocalWrapEncryptedKeyField[] = "encryptedKey";
71
+
72
+ LocalKeyWrap(std::string master_key_version, std::string encrypted_encoded_key);
73
+
74
+ static std::string CreateSerialized(const std::string& encrypted_encoded_key);
75
+
76
+ static LocalKeyWrap Parse(const std::string& wrapped_key);
77
+
78
+ const std::string& master_key_version() const { return master_key_version_; }
79
+
80
+ const std::string& encrypted_encoded_key() const { return encrypted_encoded_key_; }
81
+
82
+ private:
83
+ std::string encrypted_encoded_key_;
84
+ std::string master_key_version_;
85
+ };
86
+
87
+ std::string GetKeyFromServer(const std::string& key_identifier);
88
+
89
+ protected:
90
+ KmsConnectionConfig kms_connection_config_;
91
+ ::arrow::util::ConcurrentMap<std::string, std::string> master_key_cache_;
92
+ };
93
+
94
+ } // namespace parquet::encryption
venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/test_encryption_util.h ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This module defines an abstract interface for iterating through pages in a
19
+ // Parquet column chunk within a row group. It could be extended in the future
20
+ // to iterate through all data pages in all chunks in a file.
21
+
22
+ #pragma once
23
+
24
+ #include <memory>
25
+ #include <string>
26
+ #include <unordered_map>
27
+
28
+ #include <gtest/gtest.h>
29
+
30
+ #include "arrow/filesystem/filesystem.h"
31
+ #include "arrow/filesystem/localfs.h"
32
+ #include "arrow/status.h"
33
+ #include "arrow/util/io_util.h"
34
+
35
+ #include "parquet/encryption/encryption.h"
36
+ #include "parquet/test_util.h"
37
+
38
+ namespace parquet {
39
+ class ParquetFileReader;
40
+ namespace encryption::test {
41
+
42
+ using ::arrow::internal::TemporaryDir;
43
+
44
+ constexpr int kFixedLength = 10;
45
+
46
+ const char kFooterEncryptionKey[] = "0123456789012345"; // 128bit/16
47
+ const char kColumnEncryptionKey1[] = "1234567890123450";
48
+ const char kColumnEncryptionKey2[] = "1234567890123451";
49
+ const char kFileName[] = "tester";
50
+
51
+ // Get the path of file inside parquet test data directory
52
+ std::string data_file(const char* file);
53
+
54
+ // A temporary directory that contains the encrypted files generated in the tests.
55
+ extern std::unique_ptr<TemporaryDir> temp_dir;
56
+
57
+ inline ::arrow::Result<std::unique_ptr<TemporaryDir>> temp_data_dir() {
58
+ return TemporaryDir::Make("parquet-encryption-test-");
59
+ }
60
+
61
+ const char kDoubleFieldName[] = "double_field";
62
+ const char kFloatFieldName[] = "float_field";
63
+ const char kBooleanFieldName[] = "boolean_field";
64
+ const char kInt32FieldName[] = "int32_field";
65
+ const char kInt64FieldName[] = "int64_field";
66
+ const char kInt96FieldName[] = "int96_field";
67
+ const char kByteArrayFieldName[] = "ba_field";
68
+ const char kFixedLenByteArrayFieldName[] = "flba_field";
69
+
70
+ const char kFooterMasterKey[] = "0123456789012345";
71
+ const char kFooterMasterKeyId[] = "kf";
72
+ const char* const kColumnMasterKeys[] = {"1234567890123450", "1234567890123451",
73
+ "1234567890123452", "1234567890123453",
74
+ "1234567890123454", "1234567890123455"};
75
+ const char* const kColumnMasterKeyIds[] = {"kc1", "kc2", "kc3", "kc4", "kc5", "kc6"};
76
+
77
+ // New master key values used to simulate key rotation
78
+ const char kNewFooterMasterKey[] = "9123456789012345";
79
+ const char* const kNewColumnMasterKeys[] = {"9234567890123450", "9234567890123451",
80
+ "9234567890123452", "9234567890123453",
81
+ "9234567890123454", "9234567890123455"};
82
+
83
+ // The result of this function will be used to set into TestOnlyInMemoryKmsClientFactory
84
+ // as the key mapping to look at.
85
+ std::unordered_map<std::string, std::string> BuildKeyMap(const char* const* column_ids,
86
+ const char* const* column_keys,
87
+ const char* footer_id,
88
+ const char* footer_key);
89
+
90
+ // The result of this function will be used to set into EncryptionConfiguration
91
+ // as column keys.
92
+ std::string BuildColumnKeyMapping();
93
+
94
+ // FileEncryptor and FileDecryptor are helper classes to write/read an encrypted parquet
95
+ // file corresponding to each pair of FileEncryptionProperties/FileDecryptionProperties.
96
+ // FileEncryptor writes the file with fixed data values and FileDecryptor reads the file
97
+ // and verify the correctness of data values.
98
+ class FileEncryptor {
99
+ public:
100
+ FileEncryptor();
101
+
102
+ void EncryptFile(
103
+ std::string file,
104
+ std::shared_ptr<parquet::FileEncryptionProperties> encryption_configurations);
105
+
106
+ private:
107
+ std::shared_ptr<schema::GroupNode> SetupEncryptionSchema();
108
+
109
+ int num_rowgroups_ = 5;
110
+ int rows_per_rowgroup_ = 50;
111
+ std::shared_ptr<schema::GroupNode> schema_;
112
+ };
113
+
114
+ class FileDecryptor {
115
+ public:
116
+ void DecryptFile(
117
+ const std::string& file_name,
118
+ const std::shared_ptr<FileDecryptionProperties>& file_decryption_properties);
119
+ void DecryptPageIndex(
120
+ const std::string& file_name,
121
+ const std::shared_ptr<FileDecryptionProperties>& file_decryption_properties);
122
+
123
+ private:
124
+ void CheckFile(
125
+ parquet::ParquetFileReader* file_reader,
126
+ const std::shared_ptr<FileDecryptionProperties>& file_decryption_properties);
127
+ void CheckPageIndex(
128
+ parquet::ParquetFileReader* file_reader,
129
+ const std::shared_ptr<FileDecryptionProperties>& file_decryption_properties);
130
+ };
131
+
132
+ } // namespace encryption::test
133
+ } // namespace parquet
venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/test_in_memory_kms.h ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <unordered_map>
21
+
22
+ #include "arrow/util/base64.h"
23
+
24
+ #include "parquet/encryption/kms_client_factory.h"
25
+ #include "parquet/encryption/local_wrap_kms_client.h"
26
+ #include "parquet/platform.h"
27
+
28
+ namespace parquet::encryption {
29
+
30
+ // This is a mock class, built for testing only. Don't use it as an example of
31
+ // LocalWrapKmsClient implementation.
32
+ class TestOnlyLocalWrapInMemoryKms : public LocalWrapKmsClient {
33
+ public:
34
+ explicit TestOnlyLocalWrapInMemoryKms(const KmsConnectionConfig& kms_connection_config);
35
+
36
+ static void InitializeMasterKeys(
37
+ const std::unordered_map<std::string, std::string>& master_keys_map);
38
+
39
+ protected:
40
+ std::string GetMasterKeyFromServer(const std::string& master_key_identifier) override;
41
+
42
+ private:
43
+ static std::unordered_map<std::string, std::string> master_key_map_;
44
+ };
45
+
46
+ // This is a mock class, built for testing only. Don't use it as an example of KmsClient
47
+ // implementation.
48
+ class TestOnlyInServerWrapKms : public KmsClient {
49
+ public:
50
+ static void InitializeMasterKeys(
51
+ const std::unordered_map<std::string, std::string>& master_keys_map);
52
+
53
+ std::string WrapKey(const std::string& key_bytes,
54
+ const std::string& master_key_identifier) override;
55
+
56
+ std::string UnwrapKey(const std::string& wrapped_key,
57
+ const std::string& master_key_identifier) override;
58
+
59
+ static void StartKeyRotation(
60
+ const std::unordered_map<std::string, std::string>& new_master_keys_map);
61
+ static void FinishKeyRotation();
62
+
63
+ private:
64
+ std::string GetMasterKeyFromServer(const std::string& master_key_identifier);
65
+
66
+ // Different wrapping and unwrapping key maps to imitate versioning
67
+ // and support key rotation.
68
+ static std::unordered_map<std::string, std::string> unwrapping_master_key_map_;
69
+ static std::unordered_map<std::string, std::string> wrapping_master_key_map_;
70
+ };
71
+
72
+ // This is a mock class, built for testing only. Don't use it as an example of
73
+ // KmsClientFactory implementation.
74
+ class TestOnlyInMemoryKmsClientFactory : public KmsClientFactory {
75
+ public:
76
+ TestOnlyInMemoryKmsClientFactory(
77
+ bool wrap_locally,
78
+ const std::unordered_map<std::string, std::string>& master_keys_map)
79
+ : KmsClientFactory(wrap_locally) {
80
+ TestOnlyLocalWrapInMemoryKms::InitializeMasterKeys(master_keys_map);
81
+ TestOnlyInServerWrapKms::InitializeMasterKeys(master_keys_map);
82
+ }
83
+
84
+ std::shared_ptr<KmsClient> CreateKmsClient(
85
+ const KmsConnectionConfig& kms_connection_config) {
86
+ if (wrap_locally_) {
87
+ return std::make_shared<TestOnlyLocalWrapInMemoryKms>(kms_connection_config);
88
+ } else {
89
+ return std::make_shared<TestOnlyInServerWrapKms>();
90
+ }
91
+ }
92
+ };
93
+
94
+ } // namespace parquet::encryption
venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/two_level_cache_with_expiration.h ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <chrono>
21
+ #include <unordered_map>
22
+
23
+ #include "arrow/util/concurrent_map.h"
24
+ #include "arrow/util/mutex.h"
25
+
26
+ namespace parquet::encryption {
27
+
28
+ using ::arrow::util::ConcurrentMap;
29
+
30
+ namespace internal {
31
+
32
+ using TimePoint =
33
+ std::chrono::time_point<std::chrono::system_clock, std::chrono::duration<double>>;
34
+
35
+ inline TimePoint CurrentTimePoint() { return std::chrono::system_clock::now(); }
36
+
37
+ template <typename E>
38
+ class ExpiringCacheEntry {
39
+ public:
40
+ ExpiringCacheEntry() = default;
41
+
42
+ ExpiringCacheEntry(E cached_item, double expiration_interval_seconds)
43
+ : expiration_timestamp_(CurrentTimePoint() +
44
+ std::chrono::duration<double>(expiration_interval_seconds)),
45
+ cached_item_(std::move(cached_item)) {}
46
+
47
+ bool IsExpired() const {
48
+ const auto now = CurrentTimePoint();
49
+ return (now > expiration_timestamp_);
50
+ }
51
+
52
+ E cached_item() { return cached_item_; }
53
+
54
+ private:
55
+ const TimePoint expiration_timestamp_;
56
+ E cached_item_;
57
+ };
58
+
59
+ // This class is to avoid the below warning when compiling KeyToolkit class with VS2015
60
+ // warning C4503: decorated name length exceeded, name was truncated
61
+ template <typename V>
62
+ class ExpiringCacheMapEntry {
63
+ public:
64
+ ExpiringCacheMapEntry() = default;
65
+
66
+ explicit ExpiringCacheMapEntry(
67
+ std::shared_ptr<ConcurrentMap<std::string, V>> cached_item,
68
+ double expiration_interval_seconds)
69
+ : map_cache_(cached_item, expiration_interval_seconds) {}
70
+
71
+ bool IsExpired() { return map_cache_.IsExpired(); }
72
+
73
+ std::shared_ptr<ConcurrentMap<std::string, V>> cached_item() {
74
+ return map_cache_.cached_item();
75
+ }
76
+
77
+ private:
78
+ // ConcurrentMap object may be accessed and modified at many places at the same time,
79
+ // from multiple threads, or even removed from cache.
80
+ ExpiringCacheEntry<std::shared_ptr<ConcurrentMap<std::string, V>>> map_cache_;
81
+ };
82
+
83
+ } // namespace internal
84
+
85
+ // Two-level cache with expiration of internal caches according to token lifetime.
86
+ // External cache is per token, internal is per string key.
87
+ // Wrapper class around:
88
+ // std::unordered_map<std::string,
89
+ // internal::ExpiringCacheEntry<std::unordered_map<std::string, V>>>
90
+ // This cache is safe to be shared between threads.
91
+ template <typename V>
92
+ class TwoLevelCacheWithExpiration {
93
+ public:
94
+ TwoLevelCacheWithExpiration() {
95
+ last_cache_cleanup_timestamp_ = internal::CurrentTimePoint();
96
+ }
97
+
98
+ std::shared_ptr<ConcurrentMap<std::string, V>> GetOrCreateInternalCache(
99
+ const std::string& access_token, double cache_entry_lifetime_seconds) {
100
+ auto lock = mutex_.Lock();
101
+
102
+ auto external_cache_entry = cache_.find(access_token);
103
+ if (external_cache_entry == cache_.end() ||
104
+ external_cache_entry->second.IsExpired()) {
105
+ cache_.insert({access_token, internal::ExpiringCacheMapEntry<V>(
106
+ std::shared_ptr<ConcurrentMap<std::string, V>>(
107
+ new ConcurrentMap<std::string, V>()),
108
+ cache_entry_lifetime_seconds)});
109
+ }
110
+
111
+ return cache_[access_token].cached_item();
112
+ }
113
+
114
+ void CheckCacheForExpiredTokens(double cache_cleanup_period_seconds) {
115
+ auto lock = mutex_.Lock();
116
+
117
+ const auto now = internal::CurrentTimePoint();
118
+ if (now > (last_cache_cleanup_timestamp_ +
119
+ std::chrono::duration<double>(cache_cleanup_period_seconds))) {
120
+ RemoveExpiredEntriesNoMutex();
121
+ last_cache_cleanup_timestamp_ =
122
+ now + std::chrono::duration<double>(cache_cleanup_period_seconds);
123
+ }
124
+ }
125
+
126
+ void RemoveExpiredEntriesFromCache() {
127
+ auto lock = mutex_.Lock();
128
+
129
+ RemoveExpiredEntriesNoMutex();
130
+ }
131
+
132
+ void Remove(const std::string& access_token) {
133
+ auto lock = mutex_.Lock();
134
+ cache_.erase(access_token);
135
+ }
136
+
137
+ void Clear() {
138
+ auto lock = mutex_.Lock();
139
+ cache_.clear();
140
+ }
141
+
142
+ private:
143
+ void RemoveExpiredEntriesNoMutex() {
144
+ for (auto it = cache_.begin(); it != cache_.end();) {
145
+ if (it->second.IsExpired()) {
146
+ it = cache_.erase(it);
147
+ } else {
148
+ ++it;
149
+ }
150
+ }
151
+ }
152
+ std::unordered_map<std::string, internal::ExpiringCacheMapEntry<V>> cache_;
153
+ internal::TimePoint last_cache_cleanup_timestamp_;
154
+ ::arrow::util::Mutex mutex_;
155
+ };
156
+
157
+ } // namespace parquet::encryption
venv/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/type_fwd.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ namespace parquet {
21
+
22
+ class Decryptor;
23
+ class Encryptor;
24
+
25
+ class InternalFileDecryptor;
26
+ class InternalFileEncryptor;
27
+
28
+ } // namespace parquet
venv/lib/python3.10/site-packages/pyarrow/include/parquet/exception.h ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <exception>
21
+ #include <sstream>
22
+ #include <string>
23
+ #include <utility>
24
+
25
+ #include "arrow/type_fwd.h"
26
+ #include "arrow/util/string_builder.h"
27
+ #include "parquet/platform.h"
28
+
29
+ // PARQUET-1085
30
+ #if !defined(ARROW_UNUSED)
31
+ #define ARROW_UNUSED(x) UNUSED(x)
32
+ #endif
33
+
34
+ // Parquet exception to Arrow Status
35
+
36
+ #define BEGIN_PARQUET_CATCH_EXCEPTIONS try {
37
+ #define END_PARQUET_CATCH_EXCEPTIONS \
38
+ } \
39
+ catch (const ::parquet::ParquetStatusException& e) { \
40
+ return e.status(); \
41
+ } \
42
+ catch (const ::parquet::ParquetException& e) { \
43
+ return ::arrow::Status::IOError(e.what()); \
44
+ }
45
+
46
+ // clang-format off
47
+
48
+ #define PARQUET_CATCH_NOT_OK(s) \
49
+ BEGIN_PARQUET_CATCH_EXCEPTIONS \
50
+ (s); \
51
+ END_PARQUET_CATCH_EXCEPTIONS
52
+
53
+ // clang-format on
54
+
55
+ #define PARQUET_CATCH_AND_RETURN(s) \
56
+ BEGIN_PARQUET_CATCH_EXCEPTIONS \
57
+ return (s); \
58
+ END_PARQUET_CATCH_EXCEPTIONS
59
+
60
+ // Arrow Status to Parquet exception
61
+
62
+ #define PARQUET_IGNORE_NOT_OK(s) \
63
+ do { \
64
+ ::arrow::Status _s = ::arrow::internal::GenericToStatus(s); \
65
+ ARROW_UNUSED(_s); \
66
+ } while (0)
67
+
68
+ #define PARQUET_THROW_NOT_OK(s) \
69
+ do { \
70
+ ::arrow::Status _s = ::arrow::internal::GenericToStatus(s); \
71
+ if (!_s.ok()) { \
72
+ throw ::parquet::ParquetStatusException(std::move(_s)); \
73
+ } \
74
+ } while (0)
75
+
76
+ #define PARQUET_ASSIGN_OR_THROW_IMPL(status_name, lhs, rexpr) \
77
+ auto status_name = (rexpr); \
78
+ PARQUET_THROW_NOT_OK(status_name.status()); \
79
+ lhs = std::move(status_name).ValueOrDie();
80
+
81
+ #define PARQUET_ASSIGN_OR_THROW(lhs, rexpr) \
82
+ PARQUET_ASSIGN_OR_THROW_IMPL(ARROW_ASSIGN_OR_RAISE_NAME(_error_or_value, __COUNTER__), \
83
+ lhs, rexpr);
84
+
85
+ namespace parquet {
86
+
87
+ class ParquetException : public std::exception {
88
+ public:
89
+ PARQUET_NORETURN static void EofException(const std::string& msg = "") {
90
+ static std::string prefix = "Unexpected end of stream";
91
+ if (msg.empty()) {
92
+ throw ParquetException(prefix);
93
+ }
94
+ throw ParquetException(prefix, ": ", msg);
95
+ }
96
+
97
+ PARQUET_NORETURN static void NYI(const std::string& msg = "") {
98
+ throw ParquetException("Not yet implemented: ", msg, ".");
99
+ }
100
+
101
+ template <typename... Args>
102
+ explicit ParquetException(Args&&... args)
103
+ : msg_(::arrow::util::StringBuilder(std::forward<Args>(args)...)) {}
104
+
105
+ explicit ParquetException(std::string msg) : msg_(std::move(msg)) {}
106
+
107
+ explicit ParquetException(const char* msg, const std::exception&) : msg_(msg) {}
108
+
109
+ ParquetException(const ParquetException&) = default;
110
+ ParquetException& operator=(const ParquetException&) = default;
111
+ ParquetException(ParquetException&&) = default;
112
+ ParquetException& operator=(ParquetException&&) = default;
113
+
114
+ const char* what() const noexcept override { return msg_.c_str(); }
115
+
116
+ private:
117
+ std::string msg_;
118
+ };
119
+
120
+ // Support printing a ParquetException.
121
+ // This is needed for clang-on-MSVC as there operator<< is not defined for
122
+ // std::exception.
123
+ PARQUET_EXPORT
124
+ std::ostream& operator<<(std::ostream& os, const ParquetException& exception);
125
+
126
+ class ParquetStatusException : public ParquetException {
127
+ public:
128
+ explicit ParquetStatusException(::arrow::Status status)
129
+ : ParquetException(status.ToString()), status_(std::move(status)) {}
130
+
131
+ const ::arrow::Status& status() const { return status_; }
132
+
133
+ private:
134
+ ::arrow::Status status_;
135
+ };
136
+
137
+ // This class exists for the purpose of detecting an invalid or corrupted file.
138
+ class ParquetInvalidOrCorruptedFileException : public ParquetStatusException {
139
+ public:
140
+ ParquetInvalidOrCorruptedFileException(const ParquetInvalidOrCorruptedFileException&) =
141
+ default;
142
+
143
+ template <typename Arg,
144
+ typename std::enable_if<
145
+ !std::is_base_of<ParquetInvalidOrCorruptedFileException, Arg>::value,
146
+ int>::type = 0,
147
+ typename... Args>
148
+ explicit ParquetInvalidOrCorruptedFileException(Arg arg, Args&&... args)
149
+ : ParquetStatusException(::arrow::Status::Invalid(std::forward<Arg>(arg),
150
+ std::forward<Args>(args)...)) {}
151
+ };
152
+
153
+ template <typename StatusReturnBlock>
154
+ void ThrowNotOk(StatusReturnBlock&& b) {
155
+ PARQUET_THROW_NOT_OK(b());
156
+ }
157
+
158
+ } // namespace parquet
venv/lib/python3.10/site-packages/pyarrow/include/parquet/file_reader.h ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <vector>
24
+
25
+ #include "arrow/io/caching.h"
26
+ #include "arrow/util/type_fwd.h"
27
+ #include "parquet/metadata.h" // IWYU pragma: keep
28
+ #include "parquet/platform.h"
29
+ #include "parquet/properties.h"
30
+
31
+ namespace parquet {
32
+
33
+ class ColumnReader;
34
+ class FileMetaData;
35
+ class PageIndexReader;
36
+ class BloomFilterReader;
37
+ class PageReader;
38
+ class RowGroupMetaData;
39
+
40
+ namespace internal {
41
+ class RecordReader;
42
+ }
43
+
44
+ class PARQUET_EXPORT RowGroupReader {
45
+ public:
46
+ // Forward declare a virtual class 'Contents' to aid dependency injection and more
47
+ // easily create test fixtures
48
+ // An implementation of the Contents class is defined in the .cc file
49
+ struct Contents {
50
+ virtual ~Contents() {}
51
+ virtual std::unique_ptr<PageReader> GetColumnPageReader(int i) = 0;
52
+ virtual const RowGroupMetaData* metadata() const = 0;
53
+ virtual const ReaderProperties* properties() const = 0;
54
+ };
55
+
56
+ explicit RowGroupReader(std::unique_ptr<Contents> contents);
57
+
58
+ // Returns the rowgroup metadata
59
+ const RowGroupMetaData* metadata() const;
60
+
61
+ // Construct a ColumnReader for the indicated row group-relative
62
+ // column. Ownership is shared with the RowGroupReader.
63
+ std::shared_ptr<ColumnReader> Column(int i);
64
+
65
+ // EXPERIMENTAL: Construct a RecordReader for the indicated column of the row group.
66
+ // Ownership is shared with the RowGroupReader.
67
+ std::shared_ptr<internal::RecordReader> RecordReader(int i,
68
+ bool read_dictionary = false);
69
+
70
+ // Construct a ColumnReader, trying to enable exposed encoding.
71
+ //
72
+ // For dictionary encoding, currently we only support column chunks that are fully
73
+ // dictionary encoded, i.e., all data pages in the column chunk are dictionary encoded.
74
+ // If a column chunk uses dictionary encoding but then falls back to plain encoding, the
75
+ // encoding will not be exposed.
76
+ //
77
+ // The returned column reader provides an API GetExposedEncoding() for the
78
+ // users to check the exposed encoding and determine how to read the batches.
79
+ //
80
+ // \note API EXPERIMENTAL
81
+ std::shared_ptr<ColumnReader> ColumnWithExposeEncoding(
82
+ int i, ExposedEncoding encoding_to_expose);
83
+
84
+ // Construct a RecordReader, trying to enable exposed encoding.
85
+ //
86
+ // For dictionary encoding, currently we only support column chunks that are
87
+ // fully dictionary encoded byte arrays. The caller should verify if the reader can read
88
+ // and expose the dictionary by checking the reader's read_dictionary(). If a column
89
+ // chunk uses dictionary encoding but then falls back to plain encoding, the returned
90
+ // reader will read decoded data without exposing the dictionary.
91
+ //
92
+ // \note API EXPERIMENTAL
93
+ std::shared_ptr<internal::RecordReader> RecordReaderWithExposeEncoding(
94
+ int i, ExposedEncoding encoding_to_expose);
95
+
96
+ std::unique_ptr<PageReader> GetColumnPageReader(int i);
97
+
98
+ private:
99
+ // Holds a pointer to an instance of Contents implementation
100
+ std::unique_ptr<Contents> contents_;
101
+ };
102
+
103
+ class PARQUET_EXPORT ParquetFileReader {
104
+ public:
105
+ // Declare a virtual class 'Contents' to aid dependency injection and more
106
+ // easily create test fixtures
107
+ // An implementation of the Contents class is defined in the .cc file
108
+ struct PARQUET_EXPORT Contents {
109
+ static std::unique_ptr<Contents> Open(
110
+ std::shared_ptr<::arrow::io::RandomAccessFile> source,
111
+ const ReaderProperties& props = default_reader_properties(),
112
+ std::shared_ptr<FileMetaData> metadata = NULLPTR);
113
+
114
+ static ::arrow::Future<std::unique_ptr<Contents>> OpenAsync(
115
+ std::shared_ptr<::arrow::io::RandomAccessFile> source,
116
+ const ReaderProperties& props = default_reader_properties(),
117
+ std::shared_ptr<FileMetaData> metadata = NULLPTR);
118
+
119
+ virtual ~Contents() = default;
120
+ // Perform any cleanup associated with the file contents
121
+ virtual void Close() = 0;
122
+ virtual std::shared_ptr<RowGroupReader> GetRowGroup(int i) = 0;
123
+ virtual std::shared_ptr<FileMetaData> metadata() const = 0;
124
+ virtual std::shared_ptr<PageIndexReader> GetPageIndexReader() = 0;
125
+ virtual BloomFilterReader& GetBloomFilterReader() = 0;
126
+ };
127
+
128
+ ParquetFileReader();
129
+ ~ParquetFileReader();
130
+
131
+ // Create a file reader instance from an Arrow file object. Thread-safety is
132
+ // the responsibility of the file implementation
133
+ static std::unique_ptr<ParquetFileReader> Open(
134
+ std::shared_ptr<::arrow::io::RandomAccessFile> source,
135
+ const ReaderProperties& props = default_reader_properties(),
136
+ std::shared_ptr<FileMetaData> metadata = NULLPTR);
137
+
138
+ // API Convenience to open a serialized Parquet file on disk, using Arrow IO
139
+ // interfaces.
140
+ static std::unique_ptr<ParquetFileReader> OpenFile(
141
+ const std::string& path, bool memory_map = false,
142
+ const ReaderProperties& props = default_reader_properties(),
143
+ std::shared_ptr<FileMetaData> metadata = NULLPTR);
144
+
145
+ // Asynchronously open a file reader from an Arrow file object.
146
+ // Does not throw - all errors are reported through the Future.
147
+ static ::arrow::Future<std::unique_ptr<ParquetFileReader>> OpenAsync(
148
+ std::shared_ptr<::arrow::io::RandomAccessFile> source,
149
+ const ReaderProperties& props = default_reader_properties(),
150
+ std::shared_ptr<FileMetaData> metadata = NULLPTR);
151
+
152
+ void Open(std::unique_ptr<Contents> contents);
153
+ void Close();
154
+
155
+ // The RowGroupReader is owned by the FileReader
156
+ std::shared_ptr<RowGroupReader> RowGroup(int i);
157
+
158
+ // Returns the file metadata. Only one instance is ever created
159
+ std::shared_ptr<FileMetaData> metadata() const;
160
+
161
+ /// Returns the PageIndexReader. Only one instance is ever created.
162
+ ///
163
+ /// If the file does not have the page index, nullptr may be returned.
164
+ /// Because it pays to check existence of page index in the file, it
165
+ /// is possible to return a non null value even if page index does
166
+ /// not exist. It is the caller's responsibility to check the return
167
+ /// value and follow-up calls to PageIndexReader.
168
+ ///
169
+ /// WARNING: The returned PageIndexReader must not outlive the ParquetFileReader.
170
+ /// Initialize GetPageIndexReader() is not thread-safety.
171
+ std::shared_ptr<PageIndexReader> GetPageIndexReader();
172
+
173
+ /// Returns the BloomFilterReader. Only one instance is ever created.
174
+ ///
175
+ /// WARNING: The returned BloomFilterReader must not outlive the ParquetFileReader.
176
+ /// Initialize GetBloomFilterReader() is not thread-safety.
177
+ BloomFilterReader& GetBloomFilterReader();
178
+
179
+ /// Pre-buffer the specified column indices in all row groups.
180
+ ///
181
+ /// Readers can optionally call this to cache the necessary slices
182
+ /// of the file in-memory before deserialization. Arrow readers can
183
+ /// automatically do this via an option. This is intended to
184
+ /// increase performance when reading from high-latency filesystems
185
+ /// (e.g. Amazon S3).
186
+ ///
187
+ /// After calling this, creating readers for row groups/column
188
+ /// indices that were not buffered may fail. Creating multiple
189
+ /// readers for the a subset of the buffered regions is
190
+ /// acceptable. This may be called again to buffer a different set
191
+ /// of row groups/columns.
192
+ ///
193
+ /// If memory usage is a concern, note that data will remain
194
+ /// buffered in memory until either \a PreBuffer() is called again,
195
+ /// or the reader itself is destructed. Reading - and buffering -
196
+ /// only one row group at a time may be useful.
197
+ ///
198
+ /// This method may throw.
199
+ void PreBuffer(const std::vector<int>& row_groups,
200
+ const std::vector<int>& column_indices,
201
+ const ::arrow::io::IOContext& ctx,
202
+ const ::arrow::io::CacheOptions& options);
203
+
204
+ /// Wait for the specified row groups and column indices to be pre-buffered.
205
+ ///
206
+ /// After the returned Future completes, reading the specified row
207
+ /// groups/columns will not block.
208
+ ///
209
+ /// PreBuffer must be called first. This method does not throw.
210
+ ::arrow::Future<> WhenBuffered(const std::vector<int>& row_groups,
211
+ const std::vector<int>& column_indices) const;
212
+
213
+ private:
214
+ // Holds a pointer to an instance of Contents implementation
215
+ std::unique_ptr<Contents> contents_;
216
+ };
217
+
218
+ // Read only Parquet file metadata
219
+ std::shared_ptr<FileMetaData> PARQUET_EXPORT
220
+ ReadMetaData(const std::shared_ptr<::arrow::io::RandomAccessFile>& source);
221
+
222
+ /// \brief Scan all values in file. Useful for performance testing
223
+ /// \param[in] columns the column numbers to scan. If empty scans all
224
+ /// \param[in] column_batch_size number of values to read at a time when scanning column
225
+ /// \param[in] reader a ParquetFileReader instance
226
+ /// \return number of semantic rows in file
227
+ PARQUET_EXPORT
228
+ int64_t ScanFileContents(std::vector<int> columns, const int32_t column_batch_size,
229
+ ParquetFileReader* reader);
230
+
231
+ } // namespace parquet