diff --git a/ckpts/universal/global_step80/zero/13.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/13.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..86c13fb2621fd2c5f8af02c8d77d287278d24b0e --- /dev/null +++ b/ckpts/universal/global_step80/zero/13.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21b20d8bf53ebaf291d4f162e3c52ec5cd294aca8d3f408d48275e53960045bd +size 50332828 diff --git a/ckpts/universal/global_step80/zero/13.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/13.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..6b76b0f9aa3900b662bc419eac120937d3e3e1f6 --- /dev/null +++ b/ckpts/universal/global_step80/zero/13.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3dbb1526ef7931306b92c6108d54aa6221c9e2706bb5648c867ae7d7d5bf8bb7 +size 50332843 diff --git a/ckpts/universal/global_step80/zero/13.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step80/zero/13.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..dd15066b28a2ab432dca81d684a1712d982b07f5 --- /dev/null +++ b/ckpts/universal/global_step80/zero/13.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35e8e8be6d54a538a55f1231106a2dfbba51e4374c102bde521e92d66b5ea14b +size 50332749 diff --git a/ckpts/universal/global_step80/zero/21.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/21.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..b8fd18585684c62962eb6da36fbee2f5c65c80eb --- /dev/null +++ b/ckpts/universal/global_step80/zero/21.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f71e4ab1cf852003e8e853eefb156a01c313e4ecba6cf1fe82388fc77450ac4 +size 16778396 diff --git a/ckpts/universal/global_step80/zero/21.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/21.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..3e7684b12aceaf8b3996c8453c4915979ffd482a --- /dev/null +++ b/ckpts/universal/global_step80/zero/21.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:578071aa58364efdf2afdccaa11c160da4c4c7d9e089b7f13c264849682a622f +size 16778411 diff --git a/ckpts/universal/global_step80/zero/23.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/23.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..0d7b12ca1149e34cc35e52d5ac3c67a80b9c8bea --- /dev/null +++ b/ckpts/universal/global_step80/zero/23.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74b45ef4e3b64f7bb6bb44718ab18ad0c1eb27f74fa04abab87633638b239eaf +size 50332828 diff --git a/ckpts/universal/global_step80/zero/23.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step80/zero/23.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..50368e999b0ee956a1d2e9d0da114ee171c02d95 --- /dev/null +++ b/ckpts/universal/global_step80/zero/23.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5030ba0dcc4d2e4a49125bd775f6f8c3126d3f5536469f64f26261fce4e19202 +size 50332749 diff --git a/ckpts/universal/global_step80/zero/4.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/4.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..01fbc8e1c45fc65d8a57a7f85081b49cd79d7dcf --- /dev/null +++ b/ckpts/universal/global_step80/zero/4.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7c9be51e695e2cd997713b5178064e2041e67a1db9421cea053286da6eb61c6 +size 16778411 diff --git a/ckpts/universal/global_step80/zero/4.attention.dense.weight/fp32.pt b/ckpts/universal/global_step80/zero/4.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..c0cf4b1df955aea38ffbb40291303364adbaa673 --- /dev/null +++ b/ckpts/universal/global_step80/zero/4.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c44b5e1a01068840c14ac9fd476260493f642101b03cbeba41de06f48303939f +size 16778317 diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/accumulation_queue.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/accumulation_queue.h new file mode 100644 index 0000000000000000000000000000000000000000..a27b8b399ce475f614d6314e527847f8541ec155 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/accumulation_queue.h @@ -0,0 +1,160 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/compute/exec.h" +#include "arrow/result.h" + +namespace arrow { +namespace acero { +namespace util { + +using arrow::compute::ExecBatch; + +/// \brief A container that accumulates batches until they are ready to +/// be processed. +class AccumulationQueue { + public: + AccumulationQueue() : row_count_(0) {} + ~AccumulationQueue() = default; + + // We should never be copying ExecBatch around + AccumulationQueue(const AccumulationQueue&) = delete; + AccumulationQueue& operator=(const AccumulationQueue&) = delete; + + AccumulationQueue(AccumulationQueue&& that); + AccumulationQueue& operator=(AccumulationQueue&& that); + + void Concatenate(AccumulationQueue&& that); + void InsertBatch(ExecBatch batch); + int64_t row_count() { return row_count_; } + size_t batch_count() { return batches_.size(); } + bool empty() const { return batches_.empty(); } + void Clear(); + ExecBatch& operator[](size_t i); + + private: + int64_t row_count_; + std::vector batches_; +}; + +/// A queue that sequences incoming batches +/// +/// This can be used when a node needs to do some kind of ordered processing on +/// the stream. +/// +/// Batches can be inserted in any order. The process_callback will be called on +/// the batches, in order, without reentrant calls. For this reason the callback +/// should be quick. +/// +/// For example, in a top-n node, the process callback should determine how many +/// rows need to be delivered for the given batch, and then return a task to actually +/// deliver those rows. +class SequencingQueue { + public: + using Task = std::function; + + /// Strategy that describes how to handle items + class Processor { + public: + /// Process the batch, potentially generating a task + /// + /// This method will be called on each batch in order. Calls to this method + /// will be serialized and it will not be called reentrantly. This makes it + /// safe to do things that rely on order but minimal time should be spent here + /// to avoid becoming a bottleneck. + /// + /// \return a follow-up task that will be scheduled. The follow-up task(s) are + /// is not guaranteed to run in any particular order. If nullopt is + /// returned then nothing will be scheduled. + virtual Result> Process(ExecBatch batch) = 0; + /// Schedule a task + virtual void Schedule(Task task) = 0; + }; + + virtual ~SequencingQueue() = default; + + /// Insert a batch into the queue + /// + /// This will insert the batch into the queue. If this batch was the next batch + /// to deliver then this will trigger 1+ calls to the process callback to generate + /// 1+ tasks. + /// + /// The task generated by this call will be executed immediately. The remaining + /// tasks will be scheduled using the schedule callback. + /// + /// From a data pipeline perspective the sequencing queue is a "sometimes" breaker. If + /// a task arrives in order then this call will usually execute the downstream pipeline. + /// If this task arrives early then this call will only queue the data. + virtual Status InsertBatch(ExecBatch batch) = 0; + + /// Create a queue + /// \param processor describes how to process the batches, must outlive the queue + static std::unique_ptr Make(Processor* processor); +}; + +/// A queue that sequences incoming batches +/// +/// Unlike SequencingQueue the Process method is not expected to schedule new tasks. +/// +/// If a batch arrives and another thread is currently processing then the batch +/// will be queued and control will return. In other words, delivery of batches will +/// not block on the Process method. +/// +/// It can be helpful to think of this as if a dedicated thread is running Process as +/// batches arrive +class SerialSequencingQueue { + public: + /// Strategy that describes how to handle items + class Processor { + public: + /// Process the batch + /// + /// This method will be called on each batch in order. Calls to this method + /// will be serialized and it will not be called reentrantly. This makes it + /// safe to do things that rely on order. + /// + /// If this falls behind then data may accumulate + /// + /// TODO: Could add backpressure if needed but right now all uses of this should + /// be pretty fast and so are unlikely to block. + virtual Status Process(ExecBatch batch) = 0; + }; + + virtual ~SerialSequencingQueue() = default; + + /// Insert a batch into the queue + /// + /// This will insert the batch into the queue. If this batch was the next batch + /// to deliver then this may trigger calls to the processor which will be run + /// as part of this call. + virtual Status InsertBatch(ExecBatch batch) = 0; + + /// Create a queue + /// \param processor describes how to process the batches, must outlive the queue + static std::unique_ptr Make(Processor* processor); +}; + +} // namespace util +} // namespace acero +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/aggregate_node.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/aggregate_node.h new file mode 100644 index 0000000000000000000000000000000000000000..790264b2083052c4623e52718f569a65451475d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/aggregate_node.h @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include + +#include "arrow/acero/visibility.h" +#include "arrow/compute/api_aggregate.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" + +namespace arrow { +namespace acero { +namespace aggregate { + +using compute::Aggregate; +using compute::default_exec_context; +using compute::ExecContext; + +/// \brief Make the output schema of an aggregate node +/// +/// The output schema is determined by the aggregation kernels, which may depend on the +/// ExecContext argument. To guarantee correct results, the same ExecContext argument +/// should be used in execution. +/// +/// \param[in] input_schema the schema of the input to the node +/// \param[in] keys the grouping keys for the aggregation +/// \param[in] segment_keys the segmenting keys for the aggregation +/// \param[in] aggregates the aggregates for the aggregation +/// \param[in] exec_ctx the execution context for the aggregation +ARROW_ACERO_EXPORT Result> MakeOutputSchema( + const std::shared_ptr& input_schema, const std::vector& keys, + const std::vector& segment_keys, const std::vector& aggregates, + ExecContext* exec_ctx = default_exec_context()); + +} // namespace aggregate +} // namespace acero +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/api.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/api.h new file mode 100644 index 0000000000000000000000000000000000000000..c9724fd512d0b56dfa3a24647b3885677c92b534 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/api.h @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE: API is EXPERIMENTAL and will change without going through a +// deprecation cycle + +#pragma once + +/// \defgroup acero-api Utilities for creating and executing execution plans +/// @{ +/// @} + +/// \defgroup acero-nodes Options classes for the various exec nodes +/// @{ +/// @} + +#include "arrow/acero/exec_plan.h" +#include "arrow/acero/options.h" diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/asof_join_node.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/asof_join_node.h new file mode 100644 index 0000000000000000000000000000000000000000..6a0ce8fd386b01ac868bac3d4d026a309e351cb3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/asof_join_node.h @@ -0,0 +1,41 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include + +#include "arrow/acero/options.h" +#include "arrow/acero/visibility.h" +#include "arrow/compute/exec.h" +#include "arrow/type.h" + +namespace arrow { +namespace acero { +namespace asofjoin { + +using AsofJoinKeys = AsofJoinNodeOptions::Keys; + +/// \brief Make the output schema of an as-of-join node +/// +/// \param[in] input_schema the schema of each input to the node +/// \param[in] input_keys the key of each input to the node +ARROW_ACERO_EXPORT Result> MakeOutputSchema( + const std::vector>& input_schema, + const std::vector& input_keys); + +} // namespace asofjoin +} // namespace acero +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/backpressure_handler.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/backpressure_handler.h new file mode 100644 index 0000000000000000000000000000000000000000..178272315d7fbe859376a6de5c704eaeb5ae73b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/backpressure_handler.h @@ -0,0 +1,74 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once +#include "arrow/acero/exec_plan.h" +#include "arrow/acero/options.h" + +#include + +namespace arrow::acero { + +class BackpressureHandler { + private: + BackpressureHandler(ExecNode* input, size_t low_threshold, size_t high_threshold, + std::unique_ptr backpressure_control) + : input_(input), + low_threshold_(low_threshold), + high_threshold_(high_threshold), + backpressure_control_(std::move(backpressure_control)) {} + + public: + static Result Make( + ExecNode* input, size_t low_threshold, size_t high_threshold, + std::unique_ptr backpressure_control) { + if (low_threshold >= high_threshold) { + return Status::Invalid("low threshold (", low_threshold, + ") must be less than high threshold (", high_threshold, ")"); + } + if (backpressure_control == NULLPTR) { + return Status::Invalid("null backpressure control parameter"); + } + BackpressureHandler backpressure_handler(input, low_threshold, high_threshold, + std::move(backpressure_control)); + return std::move(backpressure_handler); + } + + void Handle(size_t start_level, size_t end_level) { + if (start_level < high_threshold_ && end_level >= high_threshold_) { + backpressure_control_->Pause(); + } else if (start_level > low_threshold_ && end_level <= low_threshold_) { + backpressure_control_->Resume(); + } + } + + Status ForceShutdown() { + // It may be unintuitive to call Resume() here, but this is to avoid a deadlock. + // Since acero's executor won't terminate if any one node is paused, we need to + // force resume the node before stopping production. + backpressure_control_->Resume(); + return input_->StopProducing(); + } + + private: + ExecNode* input_; + size_t low_threshold_; + size_t high_threshold_; + std::unique_ptr backpressure_control_; +}; + +} // namespace arrow::acero diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/benchmark_util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/benchmark_util.h new file mode 100644 index 0000000000000000000000000000000000000000..0ba8553887c03f876b6e08f031f5641170c2e09f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/benchmark_util.h @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "benchmark/benchmark.h" + +#include "arrow/acero/exec_plan.h" +#include "arrow/acero/test_util_internal.h" +#include "arrow/compute/exec.h" + +namespace arrow { + +namespace acero { + +Status BenchmarkNodeOverhead(benchmark::State& state, int32_t num_batches, + int32_t batch_size, arrow::acero::BatchesWithSchema data, + std::vector& node_declarations, + arrow::MemoryPool* pool = default_memory_pool()); + +Status BenchmarkIsolatedNodeOverhead(benchmark::State& state, + arrow::compute::Expression expr, int32_t num_batches, + int32_t batch_size, + arrow::acero::BatchesWithSchema data, + std::string factory_name, + arrow::acero::ExecNodeOptions& options, + arrow::MemoryPool* pool = default_memory_pool()); + +} // namespace acero +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/bloom_filter.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/bloom_filter.h new file mode 100644 index 0000000000000000000000000000000000000000..50d07bfd948e063b9eda1b611312af6b19a6b847 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/bloom_filter.h @@ -0,0 +1,326 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#if defined(ARROW_HAVE_RUNTIME_AVX2) +#include +#endif + +#include +#include +#include + +#include "arrow/acero/partition_util.h" +#include "arrow/acero/util.h" +#include "arrow/memory_pool.h" +#include "arrow/result.h" +#include "arrow/status.h" + +namespace arrow { +namespace acero { + +// A set of pre-generated bit masks from a 64-bit word. +// +// It is used to map selected bits of hash to a bit mask that will be used in +// a Bloom filter. +// +// These bit masks need to look random and need to have a similar fractions of +// bits set in order for a Bloom filter to have a low false positives rate. +// +struct ARROW_ACERO_EXPORT BloomFilterMasks { + // Generate all masks as a single bit vector. Each bit offset in this bit + // vector corresponds to a single mask. + // In each consecutive kBitsPerMask bits, there must be between + // kMinBitsSet and kMaxBitsSet bits set. + // + BloomFilterMasks(); + + inline uint64_t mask(int bit_offset) { +#if ARROW_LITTLE_ENDIAN + return (arrow::util::SafeLoadAs(masks_ + bit_offset / 8) >> + (bit_offset % 8)) & + kFullMask; +#else + return (BYTESWAP(arrow::util::SafeLoadAs(masks_ + bit_offset / 8)) >> + (bit_offset % 8)) & + kFullMask; +#endif + } + + // Masks are 57 bits long because then they can be accessed at an + // arbitrary bit offset using a single unaligned 64-bit load instruction. + // + static constexpr int kBitsPerMask = 57; + static constexpr uint64_t kFullMask = (1ULL << kBitsPerMask) - 1; + + // Minimum and maximum number of bits set in each mask. + // This constraint is enforced when generating the bit masks. + // Values should be close to each other and chosen as to minimize a Bloom + // filter false positives rate. + // + static constexpr int kMinBitsSet = 4; + static constexpr int kMaxBitsSet = 5; + + // Number of generated masks. + // Having more masks to choose will improve false positives rate of Bloom + // filter but will also use more memory, which may lead to more CPU cache + // misses. + // The chosen value results in using only a few cache-lines for mask lookups, + // while providing a good variety of available bit masks. + // + static constexpr int kLogNumMasks = 10; + static constexpr int kNumMasks = 1 << kLogNumMasks; + + // Data of masks. Masks are stored in a single bit vector. Nth mask is + // kBitsPerMask bits starting at bit offset N. + // + static constexpr int kTotalBytes = (kNumMasks + 64) / 8; + uint8_t masks_[kTotalBytes]; +}; + +// A variant of a blocked Bloom filter implementation. +// A Bloom filter is a data structure that provides approximate membership test +// functionality based only on the hash of the key. Membership test may return +// false positives but not false negatives. Approximation of the result allows +// in general case (for arbitrary data types of keys) to save on both memory and +// lookup cost compared to the accurate membership test. +// The accurate test may sometimes still be cheaper for a specific data types +// and inputs, e.g. integers from a small range. +// +// This blocked Bloom filter is optimized for use in hash joins, to achieve a +// good balance between the size of the filter, the cost of its building and +// querying and the rate of false positives. +// +class ARROW_ACERO_EXPORT BlockedBloomFilter { + friend class BloomFilterBuilder_SingleThreaded; + friend class BloomFilterBuilder_Parallel; + + public: + BlockedBloomFilter() : log_num_blocks_(0), num_blocks_(0), blocks_(NULLPTR) {} + + inline bool Find(uint64_t hash) const { + uint64_t m = mask(hash); + uint64_t b = blocks_[block_id(hash)]; + return (b & m) == m; + } + + // Uses SIMD if available for smaller Bloom filters. + // Uses memory prefetching for larger Bloom filters. + // + void Find(int64_t hardware_flags, int64_t num_rows, const uint32_t* hashes, + uint8_t* result_bit_vector, bool enable_prefetch = true) const; + void Find(int64_t hardware_flags, int64_t num_rows, const uint64_t* hashes, + uint8_t* result_bit_vector, bool enable_prefetch = true) const; + + int log_num_blocks() const { return log_num_blocks_; } + + int NumHashBitsUsed() const; + + bool IsSameAs(const BlockedBloomFilter* other) const; + + int64_t NumBitsSet() const; + + // Folding of a block Bloom filter after the initial version + // has been built. + // + // One of the parameters for creation of Bloom filter is the number + // of bits allocated for it. The more bits allocated, the lower the + // probability of false positives. A good heuristic is to aim for + // half of the bits set in the constructed Bloom filter. This should + // result in a good trade off between size (and following cost of + // memory accesses) and false positives rate. + // + // There might have been many duplicate keys in the input provided + // to Bloom filter builder. In that case the resulting bit vector + // would be more sparse then originally intended. It is possible to + // easily correct that and cut in half the size of Bloom filter + // after it has already been constructed. The process to do that is + // approximately equal to OR-ing bits from upper and lower half (the + // way we address these bits when inserting or querying a hash makes + // such folding in half possible). + // + // We will keep folding as long as the fraction of bits set is less + // than 1/4. The resulting bit vector density should be in the [1/4, + // 1/2) range. + // + void Fold(); + + private: + Status CreateEmpty(int64_t num_rows_to_insert, MemoryPool* pool); + + inline void Insert(uint64_t hash) { + uint64_t m = mask(hash); + uint64_t& b = blocks_[block_id(hash)]; + b |= m; + } + + void Insert(int64_t hardware_flags, int64_t num_rows, const uint32_t* hashes); + void Insert(int64_t hardware_flags, int64_t num_rows, const uint64_t* hashes); + + inline uint64_t mask(uint64_t hash) const { + // The lowest bits of hash are used to pick mask index. + // + int mask_id = static_cast(hash & (BloomFilterMasks::kNumMasks - 1)); + uint64_t result = masks_.mask(mask_id); + + // The next set of hash bits is used to pick the amount of bit + // rotation of the mask. + // + int rotation = (hash >> BloomFilterMasks::kLogNumMasks) & 63; + result = ROTL64(result, rotation); + + return result; + } + + inline int64_t block_id(uint64_t hash) const { + // The next set of hash bits following the bits used to select a + // mask is used to pick block id (index of 64-bit word in a bit + // vector). + // + return (hash >> (BloomFilterMasks::kLogNumMasks + 6)) & (num_blocks_ - 1); + } + + template + inline void InsertImp(int64_t num_rows, const T* hashes); + + template + inline void FindImp(int64_t num_rows, const T* hashes, uint8_t* result_bit_vector, + bool enable_prefetch) const; + + void SingleFold(int num_folds); + +#if defined(ARROW_HAVE_RUNTIME_AVX2) + inline __m256i mask_avx2(__m256i hash) const; + inline __m256i block_id_avx2(__m256i hash) const; + int64_t Insert_avx2(int64_t num_rows, const uint32_t* hashes); + int64_t Insert_avx2(int64_t num_rows, const uint64_t* hashes); + template + int64_t InsertImp_avx2(int64_t num_rows, const T* hashes); + int64_t Find_avx2(int64_t num_rows, const uint32_t* hashes, + uint8_t* result_bit_vector) const; + int64_t Find_avx2(int64_t num_rows, const uint64_t* hashes, + uint8_t* result_bit_vector) const; + template + int64_t FindImp_avx2(int64_t num_rows, const T* hashes, + uint8_t* result_bit_vector) const; +#endif + + bool UsePrefetch() const { + return num_blocks_ * sizeof(uint64_t) > kPrefetchLimitBytes; + } + + static constexpr int64_t kPrefetchLimitBytes = 256 * 1024; + + static BloomFilterMasks masks_; + + // Total number of bits used by block Bloom filter must be a power + // of 2. + // + int log_num_blocks_; + int64_t num_blocks_; + + // Buffer allocated to store an array of power of 2 64-bit blocks. + // + std::shared_ptr buf_; + // Pointer to mutable data owned by Buffer + // + uint64_t* blocks_; +}; + +// We have two separate implementations of building a Bloom filter, multi-threaded and +// single-threaded. +// +// Single threaded version is useful in two ways: +// a) It allows to verify parallel implementation in tests (the single threaded one is +// simpler and can be used as the source of truth). +// b) It is preferred for small and medium size Bloom filters, because it skips extra +// synchronization related steps from parallel variant (partitioning and taking locks). +// +enum class BloomFilterBuildStrategy { + SINGLE_THREADED = 0, + PARALLEL = 1, +}; + +class ARROW_ACERO_EXPORT BloomFilterBuilder { + public: + virtual ~BloomFilterBuilder() = default; + virtual Status Begin(size_t num_threads, int64_t hardware_flags, MemoryPool* pool, + int64_t num_rows, int64_t num_batches, + BlockedBloomFilter* build_target) = 0; + virtual int64_t num_tasks() const { return 0; } + virtual Status PushNextBatch(size_t thread_index, int64_t num_rows, + const uint32_t* hashes) = 0; + virtual Status PushNextBatch(size_t thread_index, int64_t num_rows, + const uint64_t* hashes) = 0; + virtual void CleanUp() {} + static std::unique_ptr Make(BloomFilterBuildStrategy strategy); +}; + +class ARROW_ACERO_EXPORT BloomFilterBuilder_SingleThreaded : public BloomFilterBuilder { + public: + Status Begin(size_t num_threads, int64_t hardware_flags, MemoryPool* pool, + int64_t num_rows, int64_t num_batches, + BlockedBloomFilter* build_target) override; + + Status PushNextBatch(size_t /*thread_index*/, int64_t num_rows, + const uint32_t* hashes) override; + + Status PushNextBatch(size_t /*thread_index*/, int64_t num_rows, + const uint64_t* hashes) override; + + private: + template + void PushNextBatchImp(int64_t num_rows, const T* hashes); + + int64_t hardware_flags_; + BlockedBloomFilter* build_target_; +}; + +class ARROW_ACERO_EXPORT BloomFilterBuilder_Parallel : public BloomFilterBuilder { + public: + Status Begin(size_t num_threads, int64_t hardware_flags, MemoryPool* pool, + int64_t num_rows, int64_t num_batches, + BlockedBloomFilter* build_target) override; + + Status PushNextBatch(size_t thread_id, int64_t num_rows, + const uint32_t* hashes) override; + + Status PushNextBatch(size_t thread_id, int64_t num_rows, + const uint64_t* hashes) override; + + void CleanUp() override; + + private: + template + void PushNextBatchImp(size_t thread_id, int64_t num_rows, const T* hashes); + + int64_t hardware_flags_; + BlockedBloomFilter* build_target_; + int log_num_prtns_; + struct ThreadLocalState { + std::vector partitioned_hashes_32; + std::vector partitioned_hashes_64; + std::vector partition_ranges; + std::vector unprocessed_partition_ids; + }; + std::vector thread_local_states_; + PartitionLocks prtn_locks_; +}; + +} // namespace acero +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/exec_plan.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/exec_plan.h new file mode 100644 index 0000000000000000000000000000000000000000..dba6c64ddc8379f7a8e6aa666f55555ced6c78aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/exec_plan.h @@ -0,0 +1,819 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/acero/type_fwd.h" +#include "arrow/acero/visibility.h" +#include "arrow/compute/api_vector.h" +#include "arrow/compute/exec.h" +#include "arrow/compute/ordering.h" +#include "arrow/type_fwd.h" +#include "arrow/util/future.h" +#include "arrow/util/macros.h" +#include "arrow/util/tracing.h" +#include "arrow/util/type_fwd.h" + +namespace arrow { + +using compute::ExecBatch; +using compute::ExecContext; +using compute::FunctionRegistry; +using compute::GetFunctionRegistry; +using compute::Ordering; +using compute::threaded_exec_context; + +namespace acero { + +/// \addtogroup acero-internals +/// @{ + +class ARROW_ACERO_EXPORT ExecPlan : public std::enable_shared_from_this { + public: + // This allows operators to rely on signed 16-bit indices + static const uint32_t kMaxBatchSize = 1 << 15; + using NodeVector = std::vector; + + virtual ~ExecPlan() = default; + + QueryContext* query_context(); + + /// \brief retrieve the nodes in the plan + const NodeVector& nodes() const; + + /// Make an empty exec plan + static Result> Make( + QueryOptions options, ExecContext exec_context = *threaded_exec_context(), + std::shared_ptr metadata = NULLPTR); + + static Result> Make( + ExecContext exec_context = *threaded_exec_context(), + std::shared_ptr metadata = NULLPTR); + + static Result> Make( + QueryOptions options, ExecContext* exec_context, + std::shared_ptr metadata = NULLPTR); + + static Result> Make( + ExecContext* exec_context, + std::shared_ptr metadata = NULLPTR); + + ExecNode* AddNode(std::unique_ptr node); + + template + Node* EmplaceNode(Args&&... args) { + std::unique_ptr node{new Node{std::forward(args)...}}; + auto out = node.get(); + AddNode(std::move(node)); + return out; + } + + Status Validate(); + + /// \brief Start producing on all nodes + /// + /// Nodes are started in reverse topological order, such that any node + /// is started before all of its inputs. + void StartProducing(); + + /// \brief Stop producing on all nodes + /// + /// Triggers all sources to stop producing new data. In order to cleanly stop the plan + /// will continue to run any tasks that are already in progress. The caller should + /// still wait for `finished` to complete before destroying the plan. + void StopProducing(); + + /// \brief A future which will be marked finished when all tasks have finished. + Future<> finished(); + + /// \brief Return whether the plan has non-empty metadata + bool HasMetadata() const; + + /// \brief Return the plan's attached metadata + std::shared_ptr metadata() const; + + std::string ToString() const; +}; + +// Acero can be extended by providing custom implementations of ExecNode. The methods +// below are documented in detail and provide careful instruction on how to fulfill the +// ExecNode contract. It's suggested you familiarize yourself with the Acero +// documentation in the C++ user guide. +class ARROW_ACERO_EXPORT ExecNode { + public: + using NodeVector = std::vector; + + virtual ~ExecNode() = default; + + virtual const char* kind_name() const = 0; + + // The number of inputs expected by this node + int num_inputs() const { return static_cast(inputs_.size()); } + + /// This node's predecessors in the exec plan + const NodeVector& inputs() const { return inputs_; } + + /// True if the plan has no output schema (is a sink) + bool is_sink() const { return !output_schema_; } + + /// \brief Labels identifying the function of each input. + const std::vector& input_labels() const { return input_labels_; } + + /// This node's successor in the exec plan + const ExecNode* output() const { return output_; } + + /// The datatypes for batches produced by this node + const std::shared_ptr& output_schema() const { return output_schema_; } + + /// This node's exec plan + ExecPlan* plan() { return plan_; } + + /// \brief An optional label, for display and debugging + /// + /// There is no guarantee that this value is non-empty or unique. + const std::string& label() const { return label_; } + void SetLabel(std::string label) { label_ = std::move(label); } + + virtual Status Validate() const; + + /// \brief the ordering of the output batches + /// + /// This does not guarantee the batches will be emitted by this node + /// in order. Instead it guarantees that the batches will have their + /// ExecBatch::index property set in a way that respects this ordering. + /// + /// In other words, given the ordering {{"x", SortOrder::Ascending}} we + /// know that all values of x in a batch with index N will be less than + /// or equal to all values of x in a batch with index N+k (assuming k > 0). + /// Furthermore, we also know that values will be sorted within a batch. + /// Any row N will have a value of x that is less than the value for + /// any row N+k. + /// + /// Note that an ordering can be both Ordering::Unordered and Ordering::Implicit. + /// A node's output should be marked Ordering::Unordered if the order is + /// non-deterministic. For example, a hash-join has no predictable output order. + /// + /// If the ordering is Ordering::Implicit then there is a meaningful order but that + /// ordering is not represented by any column in the data. The most common case for + /// this is when reading data from an in-memory table. The data has an implicit "row + /// order" which is not necessarily represented in the data set. + /// + /// A filter or project node will not modify the ordering. Nothing needs to be done + /// other than ensure the index assigned to output batches is the same as the + /// input batch that was mapped. + /// + /// Other nodes may introduce order. For example, an order-by node will emit + /// a brand new ordering independent of the input ordering. + /// + /// Finally, as described above, such as a hash-join or aggregation may may + /// destroy ordering (although these nodes could also choose to establish a + /// new ordering based on the hash keys). + /// + /// Some nodes will require an ordering. For example, a fetch node or an + /// asof join node will only function if the input data is ordered (for fetch + /// it is enough to be implicitly ordered. For an asof join the ordering must + /// be explicit and compatible with the on key.) + /// + /// Nodes that maintain ordering should be careful to avoid introducing gaps + /// in the batch index. This may require emitting empty batches in order to + /// maintain continuity. + virtual const Ordering& ordering() const; + + /// Upstream API: + /// These functions are called by input nodes that want to inform this node + /// about an updated condition (a new input batch or an impending + /// end of stream). + /// + /// Implementation rules: + /// - these may be called anytime after StartProducing() has succeeded + /// (and even during or after StopProducing()) + /// - these may be called concurrently + /// - these are allowed to call back into PauseProducing(), ResumeProducing() + /// and StopProducing() + + /// Transfer input batch to ExecNode + /// + /// A node will typically perform some kind of operation on the batch + /// and then call InputReceived on its outputs with the result. + /// + /// Other nodes may need to accumulate some number of inputs before any + /// output can be produced. These nodes will add the batch to some kind + /// of in-memory accumulation queue and return. + virtual Status InputReceived(ExecNode* input, ExecBatch batch) = 0; + + /// Mark the inputs finished after the given number of batches. + /// + /// This may be called before all inputs are received. This simply fixes + /// the total number of incoming batches for an input, so that the ExecNode + /// knows when it has received all input, regardless of order. + virtual Status InputFinished(ExecNode* input, int total_batches) = 0; + + /// \brief Perform any needed initialization + /// + /// This hook performs any actions in between creation of ExecPlan and the call to + /// StartProducing. An example could be Bloom filter pushdown. The order of ExecNodes + /// that executes this method is undefined, but the calls are made synchronously. + /// + /// At this point a node can rely on all inputs & outputs (and the input schemas) + /// being well defined. + virtual Status Init(); + + /// Lifecycle API: + /// - start / stop to initiate and terminate production + /// - pause / resume to apply backpressure + /// + /// Implementation rules: + /// - StartProducing() should not recurse into the inputs, as it is + /// handled by ExecPlan::StartProducing() + /// - PauseProducing(), ResumeProducing(), StopProducing() may be called + /// concurrently, potentially even before the call to StartProducing + /// has finished. + /// - PauseProducing(), ResumeProducing(), StopProducing() may be called + /// by the downstream nodes' InputReceived(), InputFinished() methods + /// + /// StopProducing may be called due to an error, by the user (e.g. cancel), or + /// because a node has all the data it needs (e.g. limit, top-k on sorted data). + /// This means the method may be called multiple times and we have the following + /// additional rules + /// - StopProducing() must be idempotent + /// - StopProducing() must be forwarded to inputs (this is needed for the limit/top-k + /// case because we may not be stopping the entire plan) + + // Right now, since synchronous calls happen in both directions (input to + // output and then output to input), a node must be careful to be reentrant + // against synchronous calls from its output, *and* also concurrent calls from + // other threads. The most reliable solution is to update the internal state + // first, and notify outputs only at the end. + // + // Concurrent calls to PauseProducing and ResumeProducing can be hard to sequence + // as they may travel at different speeds through the plan. + // + // For example, consider a resume that comes quickly after a pause. If the source + // receives the resume before the pause the source may think the destination is full + // and halt production which would lead to deadlock. + // + // To resolve this a counter is sent for all calls to pause/resume. Only the call with + // the highest counter value is valid. So if a call to PauseProducing(5) comes after + // a call to ResumeProducing(6) then the source should continue producing. + + /// \brief Start producing + /// + /// This must only be called once. + /// + /// This is typically called automatically by ExecPlan::StartProducing(). + virtual Status StartProducing() = 0; + + /// \brief Pause producing temporarily + /// + /// \param output Pointer to the output that is full + /// \param counter Counter used to sequence calls to pause/resume + /// + /// This call is a hint that an output node is currently not willing + /// to receive data. + /// + /// This may be called any number of times. + /// However, the node is still free to produce data (which may be difficult + /// to prevent anyway if data is produced using multiple threads). + virtual void PauseProducing(ExecNode* output, int32_t counter) = 0; + + /// \brief Resume producing after a temporary pause + /// + /// \param output Pointer to the output that is now free + /// \param counter Counter used to sequence calls to pause/resume + /// + /// This call is a hint that an output node is willing to receive data again. + /// + /// This may be called any number of times. + virtual void ResumeProducing(ExecNode* output, int32_t counter) = 0; + + /// \brief Stop producing new data + /// + /// If this node is a source then the source should stop generating data + /// as quickly as possible. If this node is not a source then there is typically + /// nothing that needs to be done although a node may choose to start ignoring incoming + /// data. + /// + /// This method will be called when an error occurs in the plan + /// This method may also be called by the user if they wish to end a plan early + /// Finally, this method may be called if a node determines it no longer needs any more + /// input (for example, a limit node). + /// + /// This method may be called multiple times. + /// + /// This is not a pause. There will be no way to start the source again after this has + /// been called. + virtual Status StopProducing(); + + std::string ToString(int indent = 0) const; + + protected: + ExecNode(ExecPlan* plan, NodeVector inputs, std::vector input_labels, + std::shared_ptr output_schema); + + virtual Status StopProducingImpl() = 0; + + /// Provide extra info to include in the string representation. + virtual std::string ToStringExtra(int indent = 0) const; + + std::atomic stopped_; + ExecPlan* plan_; + std::string label_; + + NodeVector inputs_; + std::vector input_labels_; + + std::shared_ptr output_schema_; + ExecNode* output_ = NULLPTR; +}; + +/// \brief An extensible registry for factories of ExecNodes +class ARROW_ACERO_EXPORT ExecFactoryRegistry { + public: + using Factory = std::function(ExecPlan*, std::vector, + const ExecNodeOptions&)>; + + virtual ~ExecFactoryRegistry() = default; + + /// \brief Get the named factory from this registry + /// + /// will raise if factory_name is not found + virtual Result GetFactory(const std::string& factory_name) = 0; + + /// \brief Add a factory to this registry with the provided name + /// + /// will raise if factory_name is already in the registry + virtual Status AddFactory(std::string factory_name, Factory factory) = 0; +}; + +/// The default registry, which includes built-in factories. +ARROW_ACERO_EXPORT +ExecFactoryRegistry* default_exec_factory_registry(); + +/// \brief Construct an ExecNode using the named factory +inline Result MakeExecNode( + const std::string& factory_name, ExecPlan* plan, std::vector inputs, + const ExecNodeOptions& options, + ExecFactoryRegistry* registry = default_exec_factory_registry()) { + ARROW_ASSIGN_OR_RAISE(auto factory, registry->GetFactory(factory_name)); + return factory(plan, std::move(inputs), options); +} + +/// @} + +/// \addtogroup acero-api +/// @{ + +/// \brief Helper class for declaring execution nodes +/// +/// A Declaration represents an unconstructed ExecNode (and potentially an entire graph +/// since its inputs may also be Declarations) +/// +/// A Declaration can be converted to a plan and executed using one of the +/// DeclarationToXyz methods. +/// +/// For more direct control, a Declaration can be added to an existing execution +/// plan with Declaration::AddToPlan, which will recursively construct any inputs as +/// necessary. +struct ARROW_ACERO_EXPORT Declaration { + using Input = std::variant; + + Declaration() {} + + /// \brief construct a declaration + /// \param factory_name the name of the exec node to construct. The node must have + /// been added to the exec node registry with this name. + /// \param inputs the inputs to the node, these should be other declarations + /// \param options options that control the behavior of the node. You must use + /// the appropriate subclass. For example, if `factory_name` is + /// "project" then `options` should be ProjectNodeOptions. + /// \param label a label to give the node. Can be used to distinguish it from other + /// nodes of the same type in the plan. + Declaration(std::string factory_name, std::vector inputs, + std::shared_ptr options, std::string label) + : factory_name{std::move(factory_name)}, + inputs{std::move(inputs)}, + options{std::move(options)}, + label{std::move(label)} {} + + template + Declaration(std::string factory_name, std::vector inputs, Options options, + std::string label) + : Declaration{std::move(factory_name), std::move(inputs), + std::shared_ptr( + std::make_shared(std::move(options))), + std::move(label)} {} + + template + Declaration(std::string factory_name, std::vector inputs, Options options) + : Declaration{std::move(factory_name), std::move(inputs), std::move(options), + /*label=*/""} {} + + template + Declaration(std::string factory_name, Options options) + : Declaration{std::move(factory_name), {}, std::move(options), /*label=*/""} {} + + template + Declaration(std::string factory_name, Options options, std::string label) + : Declaration{std::move(factory_name), {}, std::move(options), std::move(label)} {} + + /// \brief Convenience factory for the common case of a simple sequence of nodes. + /// + /// Each of decls will be appended to the inputs of the subsequent declaration, + /// and the final modified declaration will be returned. + /// + /// Without this convenience factory, constructing a sequence would require explicit, + /// difficult-to-read nesting: + /// + /// Declaration{"n3", + /// { + /// Declaration{"n2", + /// { + /// Declaration{"n1", + /// { + /// Declaration{"n0", N0Opts{}}, + /// }, + /// N1Opts{}}, + /// }, + /// N2Opts{}}, + /// }, + /// N3Opts{}}; + /// + /// An equivalent Declaration can be constructed more tersely using Sequence: + /// + /// Declaration::Sequence({ + /// {"n0", N0Opts{}}, + /// {"n1", N1Opts{}}, + /// {"n2", N2Opts{}}, + /// {"n3", N3Opts{}}, + /// }); + static Declaration Sequence(std::vector decls); + + /// \brief add the declaration to an already created execution plan + /// \param plan the plan to add the node to + /// \param registry the registry to use to lookup the node factory + /// + /// This method will recursively call AddToPlan on all of the declaration's inputs. + /// This method is only for advanced use when the DeclarationToXyz methods are not + /// sufficient. + /// + /// \return the instantiated execution node + Result AddToPlan(ExecPlan* plan, ExecFactoryRegistry* registry = + default_exec_factory_registry()) const; + + // Validate a declaration + bool IsValid(ExecFactoryRegistry* registry = default_exec_factory_registry()) const; + + /// \brief the name of the factory to use when creating a node + std::string factory_name; + /// \brief the declarations's inputs + std::vector inputs; + /// \brief options to control the behavior of the node + std::shared_ptr options; + /// \brief a label to give the node in the plan + std::string label; +}; + +/// \brief How to handle unaligned buffers +enum class UnalignedBufferHandling { kWarn, kIgnore, kReallocate, kError }; + +/// \brief get the default behavior of unaligned buffer handling +/// +/// This is configurable via the ACERO_ALIGNMENT_HANDLING environment variable which +/// can be set to "warn", "ignore", "reallocate", or "error". If the environment +/// variable is not set, or is set to an invalid value, this will return kWarn +UnalignedBufferHandling GetDefaultUnalignedBufferHandling(); + +/// \brief plan-wide options that can be specified when executing an execution plan +struct ARROW_ACERO_EXPORT QueryOptions { + /// \brief Should the plan use a legacy batching strategy + /// + /// This is currently in place only to support the Scanner::ToTable + /// method. This method relies on batch indices from the scanner + /// remaining consistent. This is impractical in the ExecPlan which + /// might slice batches as needed (e.g. for a join) + /// + /// However, it still works for simple plans and this is the only way + /// we have at the moment for maintaining implicit order. + bool use_legacy_batching = false; + + /// If the output has a meaningful order then sequence the output of the plan + /// + /// The default behavior (std::nullopt) will sequence output batches if there + /// is a meaningful ordering in the final node and will emit batches immediately + /// otherwise. + /// + /// If explicitly set to true then plan execution will fail if there is no + /// meaningful ordering. This can be useful to validate a query that should + /// be emitting ordered results. + /// + /// If explicitly set to false then batches will be emit immediately even if there + /// is a meaningful ordering. This could cause batches to be emit out of order but + /// may offer a small decrease to latency. + std::optional sequence_output = std::nullopt; + + /// \brief should the plan use multiple background threads for CPU-intensive work + /// + /// If this is false then all CPU work will be done on the calling thread. I/O tasks + /// will still happen on the I/O executor and may be multi-threaded (but should not use + /// significant CPU resources). + /// + /// Will be ignored if custom_cpu_executor is set + bool use_threads = true; + + /// \brief custom executor to use for CPU-intensive work + /// + /// Must be null or remain valid for the duration of the plan. If this is null then + /// a default thread pool will be chosen whose behavior will be controlled by + /// the `use_threads` option. + ::arrow::internal::Executor* custom_cpu_executor = NULLPTR; + + /// \brief custom executor to use for IO work + /// + /// Must be null or remain valid for the duration of the plan. If this is null then + /// the global io thread pool will be chosen whose behavior will be controlled by + /// the "ARROW_IO_THREADS" environment. + ::arrow::internal::Executor* custom_io_executor = NULLPTR; + + /// \brief a memory pool to use for allocations + /// + /// Must remain valid for the duration of the plan. + MemoryPool* memory_pool = default_memory_pool(); + + /// \brief a function registry to use for the plan + /// + /// Must remain valid for the duration of the plan. + FunctionRegistry* function_registry = GetFunctionRegistry(); + /// \brief the names of the output columns + /// + /// If this is empty then names will be generated based on the input columns + /// + /// If set then the number of names must equal the number of output columns + std::vector field_names; + + /// \brief Policy for unaligned buffers in source data + /// + /// Various compute functions and acero internals will type pun array + /// buffers from uint8_t* to some kind of value type (e.g. we might + /// cast to int32_t* to add two int32 arrays) + /// + /// If the buffer is poorly aligned (e.g. an int32 array is not aligned + /// on a 4-byte boundary) then this is technically undefined behavior in C++. + /// However, most modern compilers and CPUs are fairly tolerant of this + /// behavior and nothing bad (beyond a small hit to performance) is likely + /// to happen. + /// + /// Note that this only applies to source buffers. All buffers allocated internally + /// by Acero will be suitably aligned. + /// + /// If this field is set to kWarn then Acero will check if any buffers are unaligned + /// and, if they are, will emit a warning. + /// + /// If this field is set to kReallocate then Acero will allocate a new, suitably aligned + /// buffer and copy the contents from the old buffer into this new buffer. + /// + /// If this field is set to kError then Acero will gracefully abort the plan instead. + /// + /// If this field is set to kIgnore then Acero will not even check if the buffers are + /// unaligned. + /// + /// If this field is not set then it will be treated as kWarn unless overridden + /// by the ACERO_ALIGNMENT_HANDLING environment variable + std::optional unaligned_buffer_handling; +}; + +/// \brief Calculate the output schema of a declaration +/// +/// This does not actually execute the plan. This operation may fail if the +/// declaration represents an invalid plan (e.g. a project node with multiple inputs) +/// +/// \param declaration A declaration describing an execution plan +/// \param function_registry The function registry to use for function execution. If null +/// then the default function registry will be used. +/// +/// \return the schema that batches would have after going through the execution plan +ARROW_ACERO_EXPORT Result> DeclarationToSchema( + const Declaration& declaration, FunctionRegistry* function_registry = NULLPTR); + +/// \brief Create a string representation of a plan +/// +/// This representation is for debug purposes only. +/// +/// Conversion to a string may fail if the declaration represents an +/// invalid plan. +/// +/// Use Substrait for complete serialization of plans +/// +/// \param declaration A declaration describing an execution plan +/// \param function_registry The function registry to use for function execution. If null +/// then the default function registry will be used. +/// +/// \return a string representation of the plan suitable for debugging output +ARROW_ACERO_EXPORT Result DeclarationToString( + const Declaration& declaration, FunctionRegistry* function_registry = NULLPTR); + +/// \brief Utility method to run a declaration and collect the results into a table +/// +/// \param declaration A declaration describing the plan to run +/// \param use_threads If `use_threads` is false then all CPU work will be done on the +/// calling thread. I/O tasks will still happen on the I/O executor +/// and may be multi-threaded (but should not use significant CPU +/// resources). +/// \param memory_pool The memory pool to use for allocations made while running the plan. +/// \param function_registry The function registry to use for function execution. If null +/// then the default function registry will be used. +/// +/// This method will add a sink node to the declaration to collect results into a +/// table. It will then create an ExecPlan from the declaration, start the exec plan, +/// block until the plan has finished, and return the created table. +ARROW_ACERO_EXPORT Result> DeclarationToTable( + Declaration declaration, bool use_threads = true, + MemoryPool* memory_pool = default_memory_pool(), + FunctionRegistry* function_registry = NULLPTR); + +ARROW_ACERO_EXPORT Result> DeclarationToTable( + Declaration declaration, QueryOptions query_options); + +/// \brief Asynchronous version of \see DeclarationToTable +/// +/// \param declaration A declaration describing the plan to run +/// \param use_threads The behavior of use_threads is slightly different than the +/// synchronous version since we cannot run synchronously on the +/// calling thread. Instead, if use_threads=false then a new thread +/// pool will be created with a single thread and this will be used for +/// all compute work. +/// \param memory_pool The memory pool to use for allocations made while running the plan. +/// \param function_registry The function registry to use for function execution. If null +/// then the default function registry will be used. +ARROW_ACERO_EXPORT Future> DeclarationToTableAsync( + Declaration declaration, bool use_threads = true, + MemoryPool* memory_pool = default_memory_pool(), + FunctionRegistry* function_registry = NULLPTR); + +/// \brief Overload of \see DeclarationToTableAsync accepting a custom exec context +/// +/// The executor must be specified (cannot be null) and must be kept alive until the +/// returned future finishes. +ARROW_ACERO_EXPORT Future> DeclarationToTableAsync( + Declaration declaration, ExecContext custom_exec_context); + +/// \brief a collection of exec batches with a common schema +struct BatchesWithCommonSchema { + std::vector batches; + std::shared_ptr schema; +}; + +/// \brief Utility method to run a declaration and collect the results into ExecBatch +/// vector +/// +/// \see DeclarationToTable for details on threading & execution +ARROW_ACERO_EXPORT Result DeclarationToExecBatches( + Declaration declaration, bool use_threads = true, + MemoryPool* memory_pool = default_memory_pool(), + FunctionRegistry* function_registry = NULLPTR); + +ARROW_ACERO_EXPORT Result DeclarationToExecBatches( + Declaration declaration, QueryOptions query_options); + +/// \brief Asynchronous version of \see DeclarationToExecBatches +/// +/// \see DeclarationToTableAsync for details on threading & execution +ARROW_ACERO_EXPORT Future DeclarationToExecBatchesAsync( + Declaration declaration, bool use_threads = true, + MemoryPool* memory_pool = default_memory_pool(), + FunctionRegistry* function_registry = NULLPTR); + +/// \brief Overload of \see DeclarationToExecBatchesAsync accepting a custom exec context +/// +/// \see DeclarationToTableAsync for details on threading & execution +ARROW_ACERO_EXPORT Future DeclarationToExecBatchesAsync( + Declaration declaration, ExecContext custom_exec_context); + +/// \brief Utility method to run a declaration and collect the results into a vector +/// +/// \see DeclarationToTable for details on threading & execution +ARROW_ACERO_EXPORT Result>> DeclarationToBatches( + Declaration declaration, bool use_threads = true, + MemoryPool* memory_pool = default_memory_pool(), + FunctionRegistry* function_registry = NULLPTR); + +ARROW_ACERO_EXPORT Result>> DeclarationToBatches( + Declaration declaration, QueryOptions query_options); + +/// \brief Asynchronous version of \see DeclarationToBatches +/// +/// \see DeclarationToTableAsync for details on threading & execution +ARROW_ACERO_EXPORT Future>> +DeclarationToBatchesAsync(Declaration declaration, bool use_threads = true, + MemoryPool* memory_pool = default_memory_pool(), + FunctionRegistry* function_registry = NULLPTR); + +/// \brief Overload of \see DeclarationToBatchesAsync accepting a custom exec context +/// +/// \see DeclarationToTableAsync for details on threading & execution +ARROW_ACERO_EXPORT Future>> +DeclarationToBatchesAsync(Declaration declaration, ExecContext exec_context); + +/// \brief Utility method to run a declaration and return results as a RecordBatchReader +/// +/// If an exec context is not provided then a default exec context will be used based +/// on the value of `use_threads`. If `use_threads` is false then the CPU executor will +/// be a serial executor and all CPU work will be done on the calling thread. I/O tasks +/// will still happen on the I/O executor and may be multi-threaded. +/// +/// If `use_threads` is false then all CPU work will happen during the calls to +/// RecordBatchReader::Next and no CPU work will happen in the background. If +/// `use_threads` is true then CPU work will happen on the CPU thread pool and tasks may +/// run in between calls to RecordBatchReader::Next. If the returned reader is not +/// consumed quickly enough then the plan will eventually pause as the backpressure queue +/// fills up. +/// +/// If a custom exec context is provided then the value of `use_threads` will be ignored. +/// +/// The returned RecordBatchReader can be closed early to cancel the computation of record +/// batches. In this case, only errors encountered by the computation may be reported. In +/// particular, no cancellation error may be reported. +ARROW_ACERO_EXPORT Result> DeclarationToReader( + Declaration declaration, bool use_threads = true, + MemoryPool* memory_pool = default_memory_pool(), + FunctionRegistry* function_registry = NULLPTR); + +ARROW_ACERO_EXPORT Result> DeclarationToReader( + Declaration declaration, QueryOptions query_options); + +/// \brief Utility method to run a declaration and ignore results +/// +/// This can be useful when the data are consumed as part of the plan itself, for +/// example, when the plan ends with a write node. +/// +/// \see DeclarationToTable for details on threading & execution +ARROW_ACERO_EXPORT Status +DeclarationToStatus(Declaration declaration, bool use_threads = true, + MemoryPool* memory_pool = default_memory_pool(), + FunctionRegistry* function_registry = NULLPTR); + +ARROW_ACERO_EXPORT Status DeclarationToStatus(Declaration declaration, + QueryOptions query_options); + +/// \brief Asynchronous version of \see DeclarationToStatus +/// +/// This can be useful when the data are consumed as part of the plan itself, for +/// example, when the plan ends with a write node. +/// +/// \see DeclarationToTableAsync for details on threading & execution +ARROW_ACERO_EXPORT Future<> DeclarationToStatusAsync( + Declaration declaration, bool use_threads = true, + MemoryPool* memory_pool = default_memory_pool(), + FunctionRegistry* function_registry = NULLPTR); + +/// \brief Overload of \see DeclarationToStatusAsync accepting a custom exec context +/// +/// \see DeclarationToTableAsync for details on threading & execution +ARROW_ACERO_EXPORT Future<> DeclarationToStatusAsync(Declaration declaration, + ExecContext exec_context); + +/// @} + +/// \brief Wrap an ExecBatch generator in a RecordBatchReader. +/// +/// The RecordBatchReader does not impose any ordering on emitted batches. +ARROW_ACERO_EXPORT +std::shared_ptr MakeGeneratorReader( + std::shared_ptr, std::function>()>, + MemoryPool*); + +constexpr int kDefaultBackgroundMaxQ = 32; +constexpr int kDefaultBackgroundQRestart = 16; + +/// \brief Make a generator of RecordBatchReaders +/// +/// Useful as a source node for an Exec plan +ARROW_ACERO_EXPORT +Result>()>> MakeReaderGenerator( + std::shared_ptr reader, arrow::internal::Executor* io_executor, + int max_q = kDefaultBackgroundMaxQ, int q_restart = kDefaultBackgroundQRestart); + +} // namespace acero +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join.h new file mode 100644 index 0000000000000000000000000000000000000000..a81ff274e5e3a46bab8fe7a12902a1c6c62c0bbd --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join.h @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/acero/accumulation_queue.h" +#include "arrow/acero/bloom_filter.h" +#include "arrow/acero/options.h" +#include "arrow/acero/query_context.h" +#include "arrow/acero/schema_util.h" +#include "arrow/acero/task_util.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/util/tracing.h" + +namespace arrow { +namespace acero { + +using util::AccumulationQueue; + +class HashJoinImpl { + public: + using OutputBatchCallback = std::function; + using BuildFinishedCallback = std::function; + using FinishedCallback = std::function; + using RegisterTaskGroupCallback = std::function, std::function)>; + using StartTaskGroupCallback = std::function; + using AbortContinuationImpl = std::function; + + virtual ~HashJoinImpl() = default; + virtual Status Init(QueryContext* ctx, JoinType join_type, size_t num_threads, + const HashJoinProjectionMaps* proj_map_left, + const HashJoinProjectionMaps* proj_map_right, + std::vector key_cmp, Expression filter, + RegisterTaskGroupCallback register_task_group_callback, + StartTaskGroupCallback start_task_group_callback, + OutputBatchCallback output_batch_callback, + FinishedCallback finished_callback) = 0; + + virtual Status BuildHashTable(size_t thread_index, AccumulationQueue batches, + BuildFinishedCallback on_finished) = 0; + virtual Status ProbeSingleBatch(size_t thread_index, ExecBatch batch) = 0; + virtual Status ProbingFinished(size_t thread_index) = 0; + virtual void Abort(TaskScheduler::AbortContinuationImpl pos_abort_callback) = 0; + virtual std::string ToString() const = 0; + + static Result> MakeBasic(); + static Result> MakeSwiss(); + + protected: + arrow::util::tracing::Span span_; +}; + +} // namespace acero +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_dict.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_dict.h new file mode 100644 index 0000000000000000000000000000000000000000..c7d8d785d079eb051a15f1f7ac0ce613d6910bee --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_dict.h @@ -0,0 +1,318 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/acero/schema_util.h" +#include "arrow/compute/exec.h" +#include "arrow/compute/kernels/row_encoder_internal.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" + +// This file contains hash join logic related to handling of dictionary encoded key +// columns. +// +// A key column from probe side of the join can be matched against a key column from build +// side of the join, as long as the underlying value types are equal. That means that: +// - both scalars and arrays can be used and even mixed in the same column +// - dictionary column can be matched against non-dictionary column if underlying value +// types are equal +// - dictionary column can be matched against dictionary column with a different index +// type, and potentially using a different dictionary, if underlying value types are equal +// +// We currently require in hash join that for all dictionary encoded columns, the same +// dictionary is used in all input exec batches. +// +// In order to allow matching columns with different dictionaries, different dictionary +// index types, and dictionary key against non-dictionary key, internally comparisons will +// be evaluated after remapping values on both sides of the join to a common +// representation (which will be called "unified representation"). This common +// representation is a column of int32() type (not a dictionary column). It represents an +// index in the unified dictionary computed for the (only) dictionary present on build +// side (an empty dictionary is still created for an empty build side). Null value is +// always represented in this common representation as null int32 value, unified +// dictionary will never contain a null value (so there is no ambiguity of representing +// nulls as either index to a null entry in the dictionary or null index). +// +// Unified dictionary represents values present on build side. There may be values on +// probe side that are not present in it. All such values, that are not null, are mapped +// in the common representation to a special constant kMissingValueId. +// + +namespace arrow { + +using compute::ExecBatch; +using compute::ExecContext; +using compute::internal::RowEncoder; + +namespace acero { + +/// Helper class with operations that are stateless and common to processing of dictionary +/// keys on both build and probe side. +class HashJoinDictUtil { + public: + // Null values in unified representation are always represented as null that has + // corresponding integer set to this constant + static constexpr int32_t kNullId = 0; + // Constant representing a value, that is not null, missing on the build side, in + // unified representation. + static constexpr int32_t kMissingValueId = -1; + + // Check if data types of corresponding pair of key column on build and probe side are + // compatible + static bool KeyDataTypesValid(const std::shared_ptr& probe_data_type, + const std::shared_ptr& build_data_type); + + // Input must be dictionary array or dictionary scalar. + // A precomputed and provided here lookup table in the form of int32() array will be + // used to remap input indices to unified representation. + // + static Result> IndexRemapUsingLUT( + ExecContext* ctx, const Datum& indices, int64_t batch_length, + const std::shared_ptr& map_array, + const std::shared_ptr& data_type); + + // Return int32() array that contains indices of input dictionary array or scalar after + // type casting. + static Result> ConvertToInt32( + const std::shared_ptr& from_type, const Datum& input, + int64_t batch_length, ExecContext* ctx); + + // Return an array that contains elements of input int32() array after casting to a + // given integer type. This is used for mapping unified representation stored in the + // hash table on build side back to original input data type of hash join, when + // outputting hash join results to parent exec node. + // + static Result> ConvertFromInt32( + const std::shared_ptr& to_type, const Datum& input, int64_t batch_length, + ExecContext* ctx); + + // Return dictionary referenced in either dictionary array or dictionary scalar + static std::shared_ptr ExtractDictionary(const Datum& data); +}; + +/// Implements processing of dictionary arrays/scalars in key columns on the build side of +/// a hash join. +/// Each instance of this class corresponds to a single column and stores and +/// processes only the information related to that column. +/// Const methods are thread-safe, non-const methods are not (the caller must make sure +/// that only one thread at any time will access them). +/// +class HashJoinDictBuild { + public: + // Returns true if the key column (described in input by its data type) requires any + // pre- or post-processing related to handling dictionaries. + // + static bool KeyNeedsProcessing(const std::shared_ptr& build_data_type) { + return (build_data_type->id() == Type::DICTIONARY); + } + + // Data type of unified representation + static std::shared_ptr DataTypeAfterRemapping() { return int32(); } + + // Should be called only once in hash join, before processing any build or probe + // batches. + // + // Takes a pointer to the dictionary for a corresponding key column on the build side as + // an input. If the build side is empty, it still needs to be called, but with + // dictionary pointer set to null. + // + // Currently it is required that all input batches on build side share the same + // dictionary. For each input batch during its pre-processing, dictionary will be + // checked and error will be returned if it is different then the one provided in the + // call to this method. + // + // Unifies the dictionary. The order of the values is still preserved. + // Null and duplicate entries are removed. If the dictionary is already unified, its + // copy will be produced and stored within this class. + // + // Prepares the mapping from ids within original dictionary to the ids in the resulting + // dictionary. This is used later on to pre-process (map to unified representation) key + // column on build side. + // + // Prepares the reverse mapping (in the form of hash table) from values to the ids in + // the resulting dictionary. This will be used later on to pre-process (map to unified + // representation) key column on probe side. Values on probe side that are not present + // in the original dictionary will be mapped to a special constant kMissingValueId. The + // exception is made for nulls, which get always mapped to nulls (both when null is + // represented as a dictionary id pointing to a null and a null dictionary id). + // + Status Init(ExecContext* ctx, std::shared_ptr dictionary, + std::shared_ptr index_type, std::shared_ptr value_type); + + // Remap array or scalar values into unified representation (array of int32()). + // Outputs kMissingValueId if input value is not found in the unified dictionary. + // Outputs null for null input value (with corresponding data set to kNullId). + // + Result> RemapInputValues(ExecContext* ctx, + const Datum& values, + int64_t batch_length) const; + + // Remap dictionary array or dictionary scalar on build side to unified representation. + // Dictionary referenced in the input must match the dictionary that was + // given during initialization. + // The output is a dictionary array that references unified dictionary. + // + Result> RemapInput( + ExecContext* ctx, const Datum& indices, int64_t batch_length, + const std::shared_ptr& data_type) const; + + // Outputs dictionary array referencing unified dictionary, given an array with 32-bit + // ids. + // Used to post-process values looked up in a hash table on build side of the hash join + // before outputting to the parent exec node. + // + Result> RemapOutput(const ArrayData& indices32Bit, + ExecContext* ctx) const; + + // Release shared pointers and memory + void CleanUp(); + + private: + // Data type of dictionary ids for the input dictionary on build side + std::shared_ptr index_type_; + // Data type of values for the input dictionary on build side + std::shared_ptr value_type_; + // Mapping from (encoded as string) values to the ids in unified dictionary + std::unordered_map hash_table_; + // Mapping from input dictionary ids to unified dictionary ids + std::shared_ptr remapped_ids_; + // Input dictionary + std::shared_ptr dictionary_; + // Unified dictionary + std::shared_ptr unified_dictionary_; +}; + +/// Implements processing of dictionary arrays/scalars in key columns on the probe side of +/// a hash join. +/// Each instance of this class corresponds to a single column and stores and +/// processes only the information related to that column. +/// It is not thread-safe - every participating thread should use its own instance of +/// this class. +/// +class HashJoinDictProbe { + public: + static bool KeyNeedsProcessing(const std::shared_ptr& probe_data_type, + const std::shared_ptr& build_data_type); + + // Data type of the result of remapping input key column. + // + // The result of remapping is what is used in hash join for matching keys on build and + // probe side. The exact data types may be different, as described below, and therefore + // a common representation is needed for simplifying comparisons of pairs of keys on + // both sides. + // + // We support matching key that is of non-dictionary type with key that is of dictionary + // type, as long as the underlying value types are equal. We support matching when both + // keys are of dictionary type, regardless whether underlying dictionary index types are + // the same or not. + // + static std::shared_ptr DataTypeAfterRemapping( + const std::shared_ptr& build_data_type); + + // Should only be called if KeyNeedsProcessing method returns true for a pair of + // corresponding key columns from build and probe side. + // Converts values in order to match the common representation for + // both build and probe side used in hash table comparison. + // Supports arrays and scalars as input. + // Argument opt_build_side should be null if dictionary key on probe side is matched + // with non-dictionary key on build side. + // + Result> RemapInput( + const HashJoinDictBuild* opt_build_side, const Datum& data, int64_t batch_length, + const std::shared_ptr& probe_data_type, + const std::shared_ptr& build_data_type, ExecContext* ctx); + + void CleanUp(); + + private: + // May be null if probe side key is non-dictionary. Otherwise it is used to verify that + // only a single dictionary is referenced in exec batch on probe side of hash join. + std::shared_ptr dictionary_; + // Mapping from dictionary on probe side of hash join (if it is used) to unified + // representation. + std::shared_ptr remapped_ids_; + // Encoder of key columns that uses unified representation instead of original data type + // for key columns that need to use it (have dictionaries on either side of the join). + RowEncoder encoder_; +}; + +// Encapsulates dictionary handling logic for build side of hash join. +// +class HashJoinDictBuildMulti { + public: + Status Init(const SchemaProjectionMaps& proj_map, + const ExecBatch* opt_non_empty_batch, ExecContext* ctx); + static void InitEncoder(const SchemaProjectionMaps& proj_map, + RowEncoder* encoder, ExecContext* ctx); + Status EncodeBatch(size_t thread_index, + const SchemaProjectionMaps& proj_map, + const ExecBatch& batch, RowEncoder* encoder, ExecContext* ctx) const; + Status PostDecode(const SchemaProjectionMaps& proj_map, + ExecBatch* decoded_key_batch, ExecContext* ctx); + const HashJoinDictBuild& get_dict_build(int icol) const { return remap_imp_[icol]; } + + private: + std::vector needs_remap_; + std::vector remap_imp_; +}; + +// Encapsulates dictionary handling logic for probe side of hash join +// +class HashJoinDictProbeMulti { + public: + void Init(size_t num_threads); + bool BatchRemapNeeded(size_t thread_index, + const SchemaProjectionMaps& proj_map_probe, + const SchemaProjectionMaps& proj_map_build, + ExecContext* ctx); + Status EncodeBatch(size_t thread_index, + const SchemaProjectionMaps& proj_map_probe, + const SchemaProjectionMaps& proj_map_build, + const HashJoinDictBuildMulti& dict_build, const ExecBatch& batch, + RowEncoder** out_encoder, ExecBatch* opt_out_key_batch, + ExecContext* ctx); + + private: + void InitLocalStateIfNeeded( + size_t thread_index, const SchemaProjectionMaps& proj_map_probe, + const SchemaProjectionMaps& proj_map_build, ExecContext* ctx); + static void InitEncoder(const SchemaProjectionMaps& proj_map_probe, + const SchemaProjectionMaps& proj_map_build, + RowEncoder* encoder, ExecContext* ctx); + struct ThreadLocalState { + bool is_initialized; + // Whether any key column needs remapping (because of dictionaries used) before doing + // join hash table lookups + bool any_needs_remap; + // Whether each key column needs remapping before doing join hash table lookups + std::vector needs_remap; + std::vector remap_imp; + // Encoder of key columns that uses unified representation instead of original data + // type for key columns that need to use it (have dictionaries on either side of the + // join). + RowEncoder post_remap_encoder; + }; + std::vector local_states_; +}; + +} // namespace acero +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_node.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_node.h new file mode 100644 index 0000000000000000000000000000000000000000..ad60019ceabc487cdc58e8eb084be24d59a30172 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/hash_join_node.h @@ -0,0 +1,103 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/acero/options.h" +#include "arrow/acero/schema_util.h" +#include "arrow/result.h" +#include "arrow/status.h" + +namespace arrow { + +using compute::ExecContext; + +namespace acero { + +class ARROW_ACERO_EXPORT HashJoinSchema { + public: + Status Init(JoinType join_type, const Schema& left_schema, + const std::vector& left_keys, const Schema& right_schema, + const std::vector& right_keys, const Expression& filter, + const std::string& left_field_name_prefix, + const std::string& right_field_name_prefix); + + Status Init(JoinType join_type, const Schema& left_schema, + const std::vector& left_keys, + const std::vector& left_output, const Schema& right_schema, + const std::vector& right_keys, + const std::vector& right_output, const Expression& filter, + const std::string& left_field_name_prefix, + const std::string& right_field_name_prefix); + + static Status ValidateSchemas(JoinType join_type, const Schema& left_schema, + const std::vector& left_keys, + const std::vector& left_output, + const Schema& right_schema, + const std::vector& right_keys, + const std::vector& right_output, + const std::string& left_field_name_prefix, + const std::string& right_field_name_prefix); + + bool HasDictionaries() const; + + bool HasLargeBinary() const; + + Result BindFilter(Expression filter, const Schema& left_schema, + const Schema& right_schema, ExecContext* exec_context); + std::shared_ptr MakeOutputSchema(const std::string& left_field_name_suffix, + const std::string& right_field_name_suffix); + + bool LeftPayloadIsEmpty() { return PayloadIsEmpty(0); } + + bool RightPayloadIsEmpty() { return PayloadIsEmpty(1); } + + static int kMissingField() { + return SchemaProjectionMaps::kMissingField; + } + + SchemaProjectionMaps proj_maps[2]; + + private: + static bool IsTypeSupported(const DataType& type); + + Status CollectFilterColumns(std::vector& left_filter, + std::vector& right_filter, + const Expression& filter, const Schema& left_schema, + const Schema& right_schema); + + Expression RewriteFilterToUseFilterSchema(int right_filter_offset, + const SchemaProjectionMap& left_to_filter, + const SchemaProjectionMap& right_to_filter, + const Expression& filter); + + bool PayloadIsEmpty(int side) { + assert(side == 0 || side == 1); + return proj_maps[side].num_cols(HashJoinProjection::PAYLOAD) == 0; + } + + static Result> ComputePayload(const Schema& schema, + const std::vector& output, + const std::vector& filter, + const std::vector& key); +}; + +} // namespace acero +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/map_node.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/map_node.h new file mode 100644 index 0000000000000000000000000000000000000000..8bdd0ab2ca3854c6561aa3735ae143e7c58b4f77 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/map_node.h @@ -0,0 +1,81 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/acero/exec_plan.h" +#include "arrow/acero/util.h" +#include "arrow/acero/visibility.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/cancel.h" +#include "arrow/util/type_fwd.h" + +namespace arrow { +namespace acero { + +/// A utility base class for simple exec nodes with one input +/// +/// Pause/Resume Producing are forwarded appropriately +/// There is nothing to do in StopProducingImpl +/// +/// An AtomicCounter is used to keep track of when all data has arrived. When it +/// has the Finish() method will be invoked +class ARROW_ACERO_EXPORT MapNode : public ExecNode, public TracedNode { + public: + MapNode(ExecPlan* plan, std::vector inputs, + std::shared_ptr output_schema); + + Status InputFinished(ExecNode* input, int total_batches) override; + + Status StartProducing() override; + + void PauseProducing(ExecNode* output, int32_t counter) override; + + void ResumeProducing(ExecNode* output, int32_t counter) override; + + Status InputReceived(ExecNode* input, ExecBatch batch) override; + + const Ordering& ordering() const override; + + protected: + Status StopProducingImpl() override; + + /// Transform a batch + /// + /// The output batch will have the same guarantee as the input batch + /// If this was the last batch this call may trigger Finish() + virtual Result ProcessBatch(ExecBatch batch) = 0; + + /// Function called after all data has been received + /// + /// By default this does nothing. Override this to provide a custom implementation. + virtual void Finish(); + + protected: + // Counter for the number of batches received + AtomicCounter input_counter_; +}; + +} // namespace acero +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/options.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/options.h new file mode 100644 index 0000000000000000000000000000000000000000..4447e9c67a19930111c3a3df33ae874fd330700c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/options.h @@ -0,0 +1,866 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/acero/type_fwd.h" +#include "arrow/acero/visibility.h" +#include "arrow/compute/api_aggregate.h" +#include "arrow/compute/api_vector.h" +#include "arrow/compute/exec.h" +#include "arrow/compute/expression.h" +#include "arrow/record_batch.h" +#include "arrow/result.h" +#include "arrow/util/async_generator.h" +#include "arrow/util/async_util.h" + +namespace arrow { + +using compute::Aggregate; +using compute::ExecBatch; +using compute::Expression; +using compute::literal; +using compute::Ordering; +using compute::SelectKOptions; +using compute::SortOptions; + +namespace internal { + +class Executor; + +} // namespace internal + +namespace acero { + +/// \brief This must not be used in release-mode +struct DebugOptions; + +using AsyncExecBatchGenerator = AsyncGenerator>; + +/// \addtogroup acero-nodes +/// @{ + +/// \brief A base class for all options objects +/// +/// The only time this is used directly is when a node has no configuration +class ARROW_ACERO_EXPORT ExecNodeOptions { + public: + virtual ~ExecNodeOptions() = default; + + /// \brief This must not be used in release-mode + std::shared_ptr debug_opts; +}; + +/// \brief A node representing a generic source of data for Acero +/// +/// The source node will start calling `generator` during StartProducing. An initial +/// task will be created that will call `generator`. It will not call `generator` +/// reentrantly. If the source can be read in parallel then those details should be +/// encapsulated within `generator`. +/// +/// For each batch received a new task will be created to push that batch downstream. +/// This task will slice smaller units of size `ExecPlan::kMaxBatchSize` from the +/// parent batch and call InputReceived. Thus, if the `generator` yields a large +/// batch it may result in several calls to InputReceived. +/// +/// The SourceNode will, by default, assign an implicit ordering to outgoing batches. +/// This is valid as long as the generator generates batches in a deterministic fashion. +/// Currently, the only way to override this is to subclass the SourceNode. +/// +/// This node is not generally used directly but can serve as the basis for various +/// specialized nodes. +class ARROW_ACERO_EXPORT SourceNodeOptions : public ExecNodeOptions { + public: + /// Create an instance from values + SourceNodeOptions(std::shared_ptr output_schema, + std::function>()> generator) + : output_schema(std::move(output_schema)), generator(std::move(generator)) {} + + /// \brief the schema for batches that will be generated by this source + std::shared_ptr output_schema; + /// \brief an asynchronous stream of batches ending with std::nullopt + std::function>()> generator; +}; + +/// \brief a node that generates data from a table already loaded in memory +/// +/// The table source node will slice off chunks, defined by `max_batch_size` +/// for parallel processing. The table source node extends source node and so these +/// chunks will be iteratively processed in small batches. \see SourceNodeOptions +/// for details. +class ARROW_ACERO_EXPORT TableSourceNodeOptions : public ExecNodeOptions { + public: + static constexpr int64_t kDefaultMaxBatchSize = 1 << 20; + + /// Create an instance from values + TableSourceNodeOptions(std::shared_ptr table, + int64_t max_batch_size = kDefaultMaxBatchSize) + : table(std::move(table)), max_batch_size(max_batch_size) {} + + /// \brief a table which acts as the data source + std::shared_ptr
table; + /// \brief size of batches to emit from this node + /// If the table is larger the node will emit multiple batches from the + /// the table to be processed in parallel. + int64_t max_batch_size; +}; + +/// \brief define a lazily resolved Arrow table. +/// +/// The table uniquely identified by the names can typically be resolved at the time when +/// the plan is to be consumed. +/// +/// This node is for serialization purposes only and can never be executed. +class ARROW_ACERO_EXPORT NamedTableNodeOptions : public ExecNodeOptions { + public: + /// Create an instance from values + NamedTableNodeOptions(std::vector names, std::shared_ptr schema) + : names(std::move(names)), schema(std::move(schema)) {} + + /// \brief the names to put in the serialized plan + std::vector names; + /// \brief the output schema of the table + std::shared_ptr schema; +}; + +/// \brief a source node which feeds data from a synchronous iterator of batches +/// +/// ItMaker is a maker of an iterator of tabular data. +/// +/// The node can be configured to use an I/O executor. If set then each time the +/// iterator is polled a new I/O thread task will be created to do the polling. This +/// allows a blocking iterator to stay off the CPU thread pool. +template +class ARROW_ACERO_EXPORT SchemaSourceNodeOptions : public ExecNodeOptions { + public: + /// Create an instance that will create a new task on io_executor for each iteration + SchemaSourceNodeOptions(std::shared_ptr schema, ItMaker it_maker, + arrow::internal::Executor* io_executor) + : schema(std::move(schema)), + it_maker(std::move(it_maker)), + io_executor(io_executor), + requires_io(true) {} + + /// Create an instance that will either iterate synchronously or use the default I/O + /// executor + SchemaSourceNodeOptions(std::shared_ptr schema, ItMaker it_maker, + bool requires_io = false) + : schema(std::move(schema)), + it_maker(std::move(it_maker)), + io_executor(NULLPTR), + requires_io(requires_io) {} + + /// \brief The schema of the record batches from the iterator + std::shared_ptr schema; + + /// \brief A maker of an iterator which acts as the data source + ItMaker it_maker; + + /// \brief The executor to use for scanning the iterator + /// + /// Defaults to the default I/O executor. Only used if requires_io is true. + /// If requires_io is false then this MUST be nullptr. + arrow::internal::Executor* io_executor; + + /// \brief If true then items will be fetched from the iterator on a dedicated I/O + /// thread to keep I/O off the CPU thread + bool requires_io; +}; + +/// a source node that reads from a RecordBatchReader +/// +/// Each iteration of the RecordBatchReader will be run on a new thread task created +/// on the I/O thread pool. +class ARROW_ACERO_EXPORT RecordBatchReaderSourceNodeOptions : public ExecNodeOptions { + public: + /// Create an instance from values + RecordBatchReaderSourceNodeOptions(std::shared_ptr reader, + arrow::internal::Executor* io_executor = NULLPTR) + : reader(std::move(reader)), io_executor(io_executor) {} + + /// \brief The RecordBatchReader which acts as the data source + std::shared_ptr reader; + + /// \brief The executor to use for the reader + /// + /// Defaults to the default I/O executor. + arrow::internal::Executor* io_executor; +}; + +/// a source node that reads from an iterator of array vectors +using ArrayVectorIteratorMaker = std::function>()>; +/// \brief An extended Source node which accepts a schema and array-vectors +class ARROW_ACERO_EXPORT ArrayVectorSourceNodeOptions + : public SchemaSourceNodeOptions { + using SchemaSourceNodeOptions::SchemaSourceNodeOptions; +}; + +/// a source node that reads from an iterator of ExecBatch +using ExecBatchIteratorMaker = std::function>()>; +/// \brief An extended Source node which accepts a schema and exec-batches +class ARROW_ACERO_EXPORT ExecBatchSourceNodeOptions + : public SchemaSourceNodeOptions { + public: + using SchemaSourceNodeOptions::SchemaSourceNodeOptions; + ExecBatchSourceNodeOptions(std::shared_ptr schema, + std::vector batches, + ::arrow::internal::Executor* io_executor); + ExecBatchSourceNodeOptions(std::shared_ptr schema, + std::vector batches, bool requires_io = false); +}; + +using RecordBatchIteratorMaker = std::function>()>; +/// a source node that reads from an iterator of RecordBatch +class ARROW_ACERO_EXPORT RecordBatchSourceNodeOptions + : public SchemaSourceNodeOptions { + using SchemaSourceNodeOptions::SchemaSourceNodeOptions; +}; + +/// \brief a node which excludes some rows from batches passed through it +/// +/// filter_expression will be evaluated against each batch which is pushed to +/// this node. Any rows for which filter_expression does not evaluate to `true` will be +/// excluded in the batch emitted by this node. +/// +/// This node will emit empty batches if all rows are excluded. This is done +/// to avoid gaps in the ordering. +class ARROW_ACERO_EXPORT FilterNodeOptions : public ExecNodeOptions { + public: + /// \brief create an instance from values + explicit FilterNodeOptions(Expression filter_expression) + : filter_expression(std::move(filter_expression)) {} + + /// \brief the expression to filter batches + /// + /// The return type of this expression must be boolean + Expression filter_expression; +}; + +/// \brief a node which selects a specified subset from the input +class ARROW_ACERO_EXPORT FetchNodeOptions : public ExecNodeOptions { + public: + static constexpr std::string_view kName = "fetch"; + /// \brief create an instance from values + FetchNodeOptions(int64_t offset, int64_t count) : offset(offset), count(count) {} + /// \brief the number of rows to skip + int64_t offset; + /// \brief the number of rows to keep (not counting skipped rows) + int64_t count; +}; + +/// \brief a node which executes expressions on input batches, producing batches +/// of the same length with new columns. +/// +/// Each expression will be evaluated against each batch which is pushed to +/// this node to produce a corresponding output column. +/// +/// If names are not provided, the string representations of exprs will be used. +class ARROW_ACERO_EXPORT ProjectNodeOptions : public ExecNodeOptions { + public: + /// \brief create an instance from values + explicit ProjectNodeOptions(std::vector expressions, + std::vector names = {}) + : expressions(std::move(expressions)), names(std::move(names)) {} + + /// \brief the expressions to run on the batches + /// + /// The output will have one column for each expression. If you wish to keep any of + /// the columns from the input then you should create a simple field_ref expression + /// for that column. + std::vector expressions; + /// \brief the names of the output columns + /// + /// If this is not specified then the result of calling ToString on the expression will + /// be used instead + /// + /// This list should either be empty or have the same length as `expressions` + std::vector names; +}; + +/// \brief a node which aggregates input batches and calculates summary statistics +/// +/// The node can summarize the entire input or it can group the input with grouping keys +/// and segment keys. +/// +/// By default, the aggregate node is a pipeline breaker. It must accumulate all input +/// before any output is produced. Segment keys are a performance optimization. If +/// you know your input is already partitioned by one or more columns then you can +/// specify these as segment keys. At each change in the segment keys the node will +/// emit values for all data seen so far. +/// +/// Segment keys are currently limited to single-threaded mode. +/// +/// Both keys and segment-keys determine the group. However segment-keys are also used +/// for determining grouping segments, which should be large, and allow streaming a +/// partial aggregation result after processing each segment. One common use-case for +/// segment-keys is ordered aggregation, in which the segment-key attribute specifies a +/// column with non-decreasing values or a lexicographically-ordered set of such columns. +/// +/// If the keys attribute is a non-empty vector, then each aggregate in `aggregates` is +/// expected to be a HashAggregate function. If the keys attribute is an empty vector, +/// then each aggregate is assumed to be a ScalarAggregate function. +/// +/// If the segment_keys attribute is a non-empty vector, then segmented aggregation, as +/// described above, applies. +/// +/// The keys and segment_keys vectors must be disjoint. +/// +/// If no measures are provided then you will simply get the list of unique keys. +/// +/// This node outputs segment keys first, followed by regular keys, followed by one +/// column for each aggregate. +class ARROW_ACERO_EXPORT AggregateNodeOptions : public ExecNodeOptions { + public: + /// \brief create an instance from values + explicit AggregateNodeOptions(std::vector aggregates, + std::vector keys = {}, + std::vector segment_keys = {}) + : aggregates(std::move(aggregates)), + keys(std::move(keys)), + segment_keys(std::move(segment_keys)) {} + + // aggregations which will be applied to the targeted fields + std::vector aggregates; + // keys by which aggregations will be grouped (optional) + std::vector keys; + // keys by which aggregations will be segmented (optional) + std::vector segment_keys; +}; + +/// \brief a default value at which backpressure will be applied +constexpr int32_t kDefaultBackpressureHighBytes = 1 << 30; // 1GiB +/// \brief a default value at which backpressure will be removed +constexpr int32_t kDefaultBackpressureLowBytes = 1 << 28; // 256MiB + +/// \brief an interface that can be queried for backpressure statistics +class ARROW_ACERO_EXPORT BackpressureMonitor { + public: + virtual ~BackpressureMonitor() = default; + /// \brief fetches the number of bytes currently queued up + virtual uint64_t bytes_in_use() = 0; + /// \brief checks to see if backpressure is currently applied + virtual bool is_paused() = 0; +}; + +/// \brief Options to control backpressure behavior +struct ARROW_ACERO_EXPORT BackpressureOptions { + /// \brief Create default options that perform no backpressure + BackpressureOptions() : resume_if_below(0), pause_if_above(0) {} + /// \brief Create options that will perform backpressure + /// + /// \param resume_if_below The producer should resume producing if the backpressure + /// queue has fewer than resume_if_below items. + /// \param pause_if_above The producer should pause producing if the backpressure + /// queue has more than pause_if_above items + BackpressureOptions(uint64_t resume_if_below, uint64_t pause_if_above) + : resume_if_below(resume_if_below), pause_if_above(pause_if_above) {} + + /// \brief create an instance using default values for backpressure limits + static BackpressureOptions DefaultBackpressure() { + return BackpressureOptions(kDefaultBackpressureLowBytes, + kDefaultBackpressureHighBytes); + } + + /// \brief helper method to determine if backpressure is disabled + /// \return true if pause_if_above is greater than zero, false otherwise + bool should_apply_backpressure() const { return pause_if_above > 0; } + + /// \brief the number of bytes at which the producer should resume producing + uint64_t resume_if_below; + /// \brief the number of bytes at which the producer should pause producing + /// + /// If this is <= 0 then backpressure will be disabled + uint64_t pause_if_above; +}; + +/// \brief a sink node which collects results in a queue +/// +/// Emitted batches will only be ordered if there is a meaningful ordering +/// and sequence_output is not set to false. +class ARROW_ACERO_EXPORT SinkNodeOptions : public ExecNodeOptions { + public: + explicit SinkNodeOptions(std::function>()>* generator, + std::shared_ptr* schema, + BackpressureOptions backpressure = {}, + BackpressureMonitor** backpressure_monitor = NULLPTR, + std::optional sequence_output = std::nullopt) + : generator(generator), + schema(schema), + backpressure(backpressure), + backpressure_monitor(backpressure_monitor), + sequence_output(sequence_output) {} + + explicit SinkNodeOptions(std::function>()>* generator, + BackpressureOptions backpressure = {}, + BackpressureMonitor** backpressure_monitor = NULLPTR, + std::optional sequence_output = std::nullopt) + : generator(generator), + schema(NULLPTR), + backpressure(std::move(backpressure)), + backpressure_monitor(backpressure_monitor), + sequence_output(sequence_output) {} + + /// \brief A pointer to a generator of batches. + /// + /// This will be set when the node is added to the plan and should be used to consume + /// data from the plan. If this function is not called frequently enough then the sink + /// node will start to accumulate data and may apply backpressure. + std::function>()>* generator; + /// \brief A pointer which will be set to the schema of the generated batches + /// + /// This is optional, if nullptr is passed in then it will be ignored. + /// This will be set when the node is added to the plan, before StartProducing is called + std::shared_ptr* schema; + /// \brief Options to control when to apply backpressure + /// + /// This is optional, the default is to never apply backpressure. If the plan is not + /// consumed quickly enough the system may eventually run out of memory. + BackpressureOptions backpressure; + /// \brief A pointer to a backpressure monitor + /// + /// This will be set when the node is added to the plan. This can be used to inspect + /// the amount of data currently queued in the sink node. This is an optional utility + /// and backpressure can be applied even if this is not used. + BackpressureMonitor** backpressure_monitor; + /// \brief Controls whether batches should be emitted immediately or sequenced in order + /// + /// \see QueryOptions for more details + std::optional sequence_output; +}; + +/// \brief Control used by a SinkNodeConsumer to pause & resume +/// +/// Callers should ensure that they do not call Pause and Resume simultaneously and they +/// should sequence things so that a call to Pause() is always followed by an eventual +/// call to Resume() +class ARROW_ACERO_EXPORT BackpressureControl { + public: + virtual ~BackpressureControl() = default; + /// \brief Ask the input to pause + /// + /// This is best effort, batches may continue to arrive + /// Must eventually be followed by a call to Resume() or deadlock will occur + virtual void Pause() = 0; + /// \brief Ask the input to resume + virtual void Resume() = 0; +}; + +/// \brief a sink node that consumes the data as part of the plan using callbacks +class ARROW_ACERO_EXPORT SinkNodeConsumer { + public: + virtual ~SinkNodeConsumer() = default; + /// \brief Prepare any consumer state + /// + /// This will be run once the schema is finalized as the plan is starting and + /// before any calls to Consume. A common use is to save off the schema so that + /// batches can be interpreted. + virtual Status Init(const std::shared_ptr& schema, + BackpressureControl* backpressure_control, ExecPlan* plan) = 0; + /// \brief Consume a batch of data + virtual Status Consume(ExecBatch batch) = 0; + /// \brief Signal to the consumer that the last batch has been delivered + /// + /// The returned future should only finish when all outstanding tasks have completed + /// + /// If the plan is ended early or aborts due to an error then this will not be + /// called. + virtual Future<> Finish() = 0; +}; + +/// \brief Add a sink node which consumes data within the exec plan run +class ARROW_ACERO_EXPORT ConsumingSinkNodeOptions : public ExecNodeOptions { + public: + explicit ConsumingSinkNodeOptions(std::shared_ptr consumer, + std::vector names = {}, + std::optional sequence_output = std::nullopt) + : consumer(std::move(consumer)), + names(std::move(names)), + sequence_output(sequence_output) {} + + std::shared_ptr consumer; + /// \brief Names to rename the sink's schema fields to + /// + /// If specified then names must be provided for all fields. Currently, only a flat + /// schema is supported (see GH-31875). + /// + /// If not specified then names will be generated based on the source data. + std::vector names; + /// \brief Controls whether batches should be emitted immediately or sequenced in order + /// + /// \see QueryOptions for more details + std::optional sequence_output; +}; + +/// \brief Make a node which sorts rows passed through it +/// +/// All batches pushed to this node will be accumulated, then sorted, by the given +/// fields. Then sorted batches will be forwarded to the generator in sorted order. +class ARROW_ACERO_EXPORT OrderBySinkNodeOptions : public SinkNodeOptions { + public: + /// \brief create an instance from values + explicit OrderBySinkNodeOptions( + SortOptions sort_options, + std::function>()>* generator) + : SinkNodeOptions(generator), sort_options(std::move(sort_options)) {} + + /// \brief options describing which columns and direction to sort + SortOptions sort_options; +}; + +/// \brief Apply a new ordering to data +/// +/// Currently this node works by accumulating all data, sorting, and then emitting +/// the new data with an updated batch index. +/// +/// Larger-than-memory sort is not currently supported. +class ARROW_ACERO_EXPORT OrderByNodeOptions : public ExecNodeOptions { + public: + static constexpr std::string_view kName = "order_by"; + explicit OrderByNodeOptions(Ordering ordering) : ordering(std::move(ordering)) {} + + /// \brief The new ordering to apply to outgoing data + Ordering ordering; +}; + +enum class JoinType { + LEFT_SEMI, + RIGHT_SEMI, + LEFT_ANTI, + RIGHT_ANTI, + INNER, + LEFT_OUTER, + RIGHT_OUTER, + FULL_OUTER +}; + +std::string ToString(JoinType t); + +enum class JoinKeyCmp { EQ, IS }; + +/// \brief a node which implements a join operation using a hash table +class ARROW_ACERO_EXPORT HashJoinNodeOptions : public ExecNodeOptions { + public: + static constexpr const char* default_output_suffix_for_left = ""; + static constexpr const char* default_output_suffix_for_right = ""; + /// \brief create an instance from values that outputs all columns + HashJoinNodeOptions( + JoinType in_join_type, std::vector in_left_keys, + std::vector in_right_keys, Expression filter = literal(true), + std::string output_suffix_for_left = default_output_suffix_for_left, + std::string output_suffix_for_right = default_output_suffix_for_right, + bool disable_bloom_filter = false) + : join_type(in_join_type), + left_keys(std::move(in_left_keys)), + right_keys(std::move(in_right_keys)), + output_all(true), + output_suffix_for_left(std::move(output_suffix_for_left)), + output_suffix_for_right(std::move(output_suffix_for_right)), + filter(std::move(filter)), + disable_bloom_filter(disable_bloom_filter) { + this->key_cmp.resize(this->left_keys.size()); + for (size_t i = 0; i < this->left_keys.size(); ++i) { + this->key_cmp[i] = JoinKeyCmp::EQ; + } + } + /// \brief create an instance from keys + /// + /// This will create an inner join that outputs all columns and has no post join filter + /// + /// `in_left_keys` should have the same length and types as `in_right_keys` + /// @param in_left_keys the keys in the left input + /// @param in_right_keys the keys in the right input + HashJoinNodeOptions(std::vector in_left_keys, + std::vector in_right_keys) + : left_keys(std::move(in_left_keys)), right_keys(std::move(in_right_keys)) { + this->join_type = JoinType::INNER; + this->output_all = true; + this->output_suffix_for_left = default_output_suffix_for_left; + this->output_suffix_for_right = default_output_suffix_for_right; + this->key_cmp.resize(this->left_keys.size()); + for (size_t i = 0; i < this->left_keys.size(); ++i) { + this->key_cmp[i] = JoinKeyCmp::EQ; + } + this->filter = literal(true); + } + /// \brief create an instance from values using JoinKeyCmp::EQ for all comparisons + HashJoinNodeOptions( + JoinType join_type, std::vector left_keys, + std::vector right_keys, std::vector left_output, + std::vector right_output, Expression filter = literal(true), + std::string output_suffix_for_left = default_output_suffix_for_left, + std::string output_suffix_for_right = default_output_suffix_for_right, + bool disable_bloom_filter = false) + : join_type(join_type), + left_keys(std::move(left_keys)), + right_keys(std::move(right_keys)), + output_all(false), + left_output(std::move(left_output)), + right_output(std::move(right_output)), + output_suffix_for_left(std::move(output_suffix_for_left)), + output_suffix_for_right(std::move(output_suffix_for_right)), + filter(std::move(filter)), + disable_bloom_filter(disable_bloom_filter) { + this->key_cmp.resize(this->left_keys.size()); + for (size_t i = 0; i < this->left_keys.size(); ++i) { + this->key_cmp[i] = JoinKeyCmp::EQ; + } + } + /// \brief create an instance from values + HashJoinNodeOptions( + JoinType join_type, std::vector left_keys, + std::vector right_keys, std::vector left_output, + std::vector right_output, std::vector key_cmp, + Expression filter = literal(true), + std::string output_suffix_for_left = default_output_suffix_for_left, + std::string output_suffix_for_right = default_output_suffix_for_right, + bool disable_bloom_filter = false) + : join_type(join_type), + left_keys(std::move(left_keys)), + right_keys(std::move(right_keys)), + output_all(false), + left_output(std::move(left_output)), + right_output(std::move(right_output)), + key_cmp(std::move(key_cmp)), + output_suffix_for_left(std::move(output_suffix_for_left)), + output_suffix_for_right(std::move(output_suffix_for_right)), + filter(std::move(filter)), + disable_bloom_filter(disable_bloom_filter) {} + + HashJoinNodeOptions() = default; + + // type of join (inner, left, semi...) + JoinType join_type = JoinType::INNER; + // key fields from left input + std::vector left_keys; + // key fields from right input + std::vector right_keys; + // if set all valid fields from both left and right input will be output + // (and field ref vectors for output fields will be ignored) + bool output_all = false; + // output fields passed from left input + std::vector left_output; + // output fields passed from right input + std::vector right_output; + // key comparison function (determines whether a null key is equal another null + // key or not) + std::vector key_cmp; + // suffix added to names of output fields coming from left input (used to distinguish, + // if necessary, between fields of the same name in left and right input and can be left + // empty if there are no name collisions) + std::string output_suffix_for_left; + // suffix added to names of output fields coming from right input + std::string output_suffix_for_right; + // residual filter which is applied to matching rows. Rows that do not match + // the filter are not included. The filter is applied against the + // concatenated input schema (left fields then right fields) and can reference + // fields that are not included in the output. + Expression filter = literal(true); + // whether or not to disable Bloom filters in this join + bool disable_bloom_filter = false; +}; + +/// \brief a node which implements the asof join operation +/// +/// Note, this API is experimental and will change in the future +/// +/// This node takes one left table and any number of right tables, and asof joins them +/// together. Batches produced by each input must be ordered by the "on" key. +/// This node will output one row for each row in the left table. +class ARROW_ACERO_EXPORT AsofJoinNodeOptions : public ExecNodeOptions { + public: + /// \brief Keys for one input table of the AsofJoin operation + /// + /// The keys must be consistent across the input tables: + /// Each "on" key must refer to a field of the same type and units across the tables. + /// Each "by" key must refer to a list of fields of the same types across the tables. + struct Keys { + /// \brief "on" key for the join. + /// + /// The input table must be sorted by the "on" key. Must be a single field of a common + /// type. Inexact match is used on the "on" key. i.e., a row is considered a match iff + /// left_on - tolerance <= right_on <= left_on. + /// Currently, the "on" key must be of an integer, date, or timestamp type. + FieldRef on_key; + /// \brief "by" key for the join. + /// + /// Each input table must have each field of the "by" key. Exact equality is used for + /// each field of the "by" key. + /// Currently, each field of the "by" key must be of an integer, date, timestamp, or + /// base-binary type. + std::vector by_key; + }; + + AsofJoinNodeOptions(std::vector input_keys, int64_t tolerance) + : input_keys(std::move(input_keys)), tolerance(tolerance) {} + + /// \brief AsofJoin keys per input table. At least two keys must be given. The first key + /// corresponds to a left table and all other keys correspond to right tables for the + /// as-of-join. + /// + /// \see `Keys` for details. + std::vector input_keys; + /// \brief Tolerance for inexact "on" key matching. A right row is considered a match + /// with the left row if `right.on - left.on <= tolerance`. The `tolerance` may be: + /// - negative, in which case a past-as-of-join occurs; + /// - or positive, in which case a future-as-of-join occurs; + /// - or zero, in which case an exact-as-of-join occurs. + /// + /// The tolerance is interpreted in the same units as the "on" key. + int64_t tolerance; +}; + +/// \brief a node which select top_k/bottom_k rows passed through it +/// +/// All batches pushed to this node will be accumulated, then selected, by the given +/// fields. Then sorted batches will be forwarded to the generator in sorted order. +class ARROW_ACERO_EXPORT SelectKSinkNodeOptions : public SinkNodeOptions { + public: + explicit SelectKSinkNodeOptions( + SelectKOptions select_k_options, + std::function>()>* generator) + : SinkNodeOptions(generator), select_k_options(std::move(select_k_options)) {} + + /// SelectK options + SelectKOptions select_k_options; +}; + +/// \brief a sink node which accumulates all output into a table +class ARROW_ACERO_EXPORT TableSinkNodeOptions : public ExecNodeOptions { + public: + /// \brief create an instance from values + explicit TableSinkNodeOptions(std::shared_ptr
* output_table, + std::optional sequence_output = std::nullopt) + : output_table(output_table), sequence_output(sequence_output) {} + + /// \brief an "out parameter" specifying the table that will be created + /// + /// Must not be null and remain valid for the entirety of the plan execution. After the + /// plan has completed this will be set to point to the result table + std::shared_ptr
* output_table; + /// \brief Controls whether batches should be emitted immediately or sequenced in order + /// + /// \see QueryOptions for more details + std::optional sequence_output; + /// \brief Custom names to use for the columns. + /// + /// If specified then names must be provided for all fields. Currently, only a flat + /// schema is supported (see GH-31875). + /// + /// If not specified then names will be generated based on the source data. + std::vector names; +}; + +/// \brief a row template that describes one row that will be generated for each input row +struct ARROW_ACERO_EXPORT PivotLongerRowTemplate { + PivotLongerRowTemplate(std::vector feature_values, + std::vector> measurement_values) + : feature_values(std::move(feature_values)), + measurement_values(std::move(measurement_values)) {} + /// A (typically unique) set of feature values for the template, usually derived from a + /// column name + /// + /// These will be used to populate the feature columns + std::vector feature_values; + /// The fields containing the measurements to use for this row + /// + /// These will be used to populate the measurement columns. If nullopt then nulls + /// will be inserted for the given value. + std::vector> measurement_values; +}; + +/// \brief Reshape a table by turning some columns into additional rows +/// +/// This operation is sometimes also referred to as UNPIVOT +/// +/// This is typically done when there are multiple observations in each row in order to +/// transform to a table containing a single observation per row. +/// +/// For example: +/// +/// | time | left_temp | right_temp | +/// | ---- | --------- | ---------- | +/// | 1 | 10 | 20 | +/// | 2 | 15 | 18 | +/// +/// The above table contains two observations per row. There is an implicit feature +/// "location" (left vs right) and a measurement "temp". What we really want is: +/// +/// | time | location | temp | +/// | --- | --- | --- | +/// | 1 | left | 10 | +/// | 1 | right | 20 | +/// | 2 | left | 15 | +/// | 2 | right | 18 | +/// +/// For a more complex example consider: +/// +/// | time | ax1 | ay1 | bx1 | ay2 | +/// | ---- | --- | --- | --- | --- | +/// | 0 | 1 | 2 | 3 | 4 | +/// +/// We can pretend a vs b and x vs y are features while 1 and 2 are two different +/// kinds of measurements. We thus want to pivot to +/// +/// | time | a/b | x/y | f1 | f2 | +/// | ---- | --- | --- | ---- | ---- | +/// | 0 | a | x | 1 | null | +/// | 0 | a | y | 2 | 4 | +/// | 0 | b | x | 3 | null | +/// +/// To do this we create a row template for each combination of features. One should +/// be able to do this purely by looking at the column names. For example, given the +/// above columns "ax1", "ay1", "bx1", and "ay2" we know we have three feature +/// combinations (a, x), (a, y), and (b, x). Similarly, we know we have two possible +/// measurements, "1" and "2". +/// +/// For each combination of features we create a row template. In each row template we +/// describe the combination and then list which columns to use for the measurements. +/// If a measurement doesn't exist for a given combination then we use nullopt. +/// +/// So, for our above example, we have: +/// +/// (a, x): names={"a", "x"}, values={"ax1", nullopt} +/// (a, y): names={"a", "y"}, values={"ay1", "ay2"} +/// (b, x): names={"b", "x"}, values={"bx1", nullopt} +/// +/// Finishing it off we name our new columns: +/// feature_field_names={"a/b","x/y"} +/// measurement_field_names={"f1", "f2"} +class ARROW_ACERO_EXPORT PivotLongerNodeOptions : public ExecNodeOptions { + public: + static constexpr std::string_view kName = "pivot_longer"; + /// One or more row templates to create new output rows + /// + /// Normally there are at least two row templates. The output # of rows + /// will be the input # of rows * the number of row templates + std::vector row_templates; + /// The names of the columns which describe the new features + std::vector feature_field_names; + /// The names of the columns which represent the measurements + std::vector measurement_field_names; +}; + +/// @} + +} // namespace acero +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/order_by_impl.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/order_by_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..9b5a0f69a69ffc8f23fb5416e82777d2d06f0a00 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/order_by_impl.h @@ -0,0 +1,56 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/acero/options.h" +#include "arrow/record_batch.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" + +namespace arrow { + +using compute::ExecContext; + +namespace acero { + +class OrderByImpl { + public: + virtual ~OrderByImpl() = default; + + virtual void InputReceived(const std::shared_ptr& batch) = 0; + + virtual Result DoFinish() = 0; + + virtual std::string ToString() const = 0; + + static Result> MakeSort( + ExecContext* ctx, const std::shared_ptr& output_schema, + const SortOptions& options); + + static Result> MakeSelectK( + ExecContext* ctx, const std::shared_ptr& output_schema, + const SelectKOptions& options); +}; + +} // namespace acero +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/partition_util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/partition_util.h new file mode 100644 index 0000000000000000000000000000000000000000..1413a8326ade01fc264c4800d83d2df85db59acd --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/partition_util.h @@ -0,0 +1,184 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include "arrow/acero/util.h" +#include "arrow/buffer.h" +#include "arrow/util/pcg_random.h" + +namespace arrow { +namespace acero { + +class PartitionSort { + public: + /// \brief Bucket sort rows on partition ids in O(num_rows) time. + /// + /// Include in the output exclusive cumulative sum of bucket sizes. + /// This corresponds to ranges in the sorted array containing all row ids for + /// each of the partitions. + /// + /// prtn_ranges must be initialized and have at least prtn_ranges + 1 elements + /// when this method returns prtn_ranges[i] will contains the total number of + /// elements in partitions 0 through i. prtn_ranges[0] will be 0. + /// + /// prtn_id_impl must be a function that takes in a row id (int) and returns + /// a partition id (int). The returned partition id must be between 0 and + /// num_prtns (exclusive). + /// + /// output_pos_impl is a function that takes in a row id (int) and a position (int) + /// in the bucket sorted output. The function should insert the row in the + /// output. + /// + /// For example: + /// + /// in_arr: [5, 7, 2, 3, 5, 4] + /// num_prtns: 3 + /// prtn_id_impl: [&in_arr] (int row_id) { return in_arr[row_id] / 3; } + /// output_pos_impl: [&out_arr] (int row_id, int pos) { out_arr[pos] = row_id; } + /// + /// After Execution + /// out_arr: [2, 5, 3, 5, 4, 7] + /// prtn_ranges: [0, 1, 5, 6] + template + static void Eval(int64_t num_rows, int num_prtns, uint16_t* prtn_ranges, + INPUT_PRTN_ID_FN prtn_id_impl, OUTPUT_POS_FN output_pos_impl) { + ARROW_DCHECK(num_rows > 0 && num_rows <= (1 << 15)); + ARROW_DCHECK(num_prtns >= 1 && num_prtns <= (1 << 15)); + + memset(prtn_ranges, 0, (num_prtns + 1) * sizeof(uint16_t)); + + for (int64_t i = 0; i < num_rows; ++i) { + int prtn_id = static_cast(prtn_id_impl(i)); + ++prtn_ranges[prtn_id + 1]; + } + + uint16_t sum = 0; + for (int i = 0; i < num_prtns; ++i) { + uint16_t sum_next = sum + prtn_ranges[i + 1]; + prtn_ranges[i + 1] = sum; + sum = sum_next; + } + + for (int64_t i = 0; i < num_rows; ++i) { + int prtn_id = static_cast(prtn_id_impl(i)); + int pos = prtn_ranges[prtn_id + 1]++; + output_pos_impl(i, pos); + } + } +}; + +/// \brief A control for synchronizing threads on a partitionable workload +class PartitionLocks { + public: + PartitionLocks(); + ~PartitionLocks(); + /// \brief Initializes the control, must be called before use + /// + /// \param num_threads Maximum number of threads that will access the partitions + /// \param num_prtns Number of partitions to synchronize + void Init(size_t num_threads, int num_prtns); + /// \brief Cleans up the control, it should not be used after this call + void CleanUp(); + /// \brief Acquire a partition to work on one + /// + /// \param thread_id The index of the thread trying to acquire the partition lock + /// \param num_prtns Length of prtns_to_try, must be <= num_prtns used in Init + /// \param prtns_to_try An array of partitions that still have remaining work + /// \param limit_retries If false, this method will spinwait forever until success + /// \param max_retries Max times to attempt checking out work before returning false + /// \param[out] locked_prtn_id The id of the partition locked + /// \param[out] locked_prtn_id_pos The index of the partition locked in prtns_to_try + /// \return True if a partition was locked, false if max_retries was attempted + /// without successfully acquiring a lock + /// + /// This method is thread safe + bool AcquirePartitionLock(size_t thread_id, int num_prtns, const int* prtns_to_try, + bool limit_retries, int max_retries, int* locked_prtn_id, + int* locked_prtn_id_pos); + /// \brief Release a partition so that other threads can work on it + void ReleasePartitionLock(int prtn_id); + + // Executes (synchronously and using current thread) the same operation on a set of + // multiple partitions. Tries to minimize partition locking overhead by randomizing and + // adjusting order in which partitions are processed. + // + // PROCESS_PRTN_FN is a callback which will be executed for each partition after + // acquiring the lock for that partition. It gets partition id as an argument. + // IS_PRTN_EMPTY_FN is a callback which filters out (when returning true) partitions + // with specific ids from processing. + // + template + Status ForEachPartition(size_t thread_id, + /*scratch space buffer with space for one element per partition; + dirty in and dirty out*/ + int* temp_unprocessed_prtns, IS_PRTN_EMPTY_FN is_prtn_empty_fn, + PROCESS_PRTN_FN process_prtn_fn) { + int num_unprocessed_partitions = 0; + for (int i = 0; i < num_prtns_; ++i) { + bool is_prtn_empty = is_prtn_empty_fn(i); + if (!is_prtn_empty) { + temp_unprocessed_prtns[num_unprocessed_partitions++] = i; + } + } + while (num_unprocessed_partitions > 0) { + int locked_prtn_id; + int locked_prtn_id_pos; + AcquirePartitionLock(thread_id, num_unprocessed_partitions, temp_unprocessed_prtns, + /*limit_retries=*/false, /*max_retries=*/-1, &locked_prtn_id, + &locked_prtn_id_pos); + { + class AutoReleaseLock { + public: + AutoReleaseLock(PartitionLocks* locks, int prtn_id) + : locks(locks), prtn_id(prtn_id) {} + ~AutoReleaseLock() { locks->ReleasePartitionLock(prtn_id); } + PartitionLocks* locks; + int prtn_id; + } auto_release_lock(this, locked_prtn_id); + ARROW_RETURN_NOT_OK(process_prtn_fn(locked_prtn_id)); + } + if (locked_prtn_id_pos < num_unprocessed_partitions - 1) { + temp_unprocessed_prtns[locked_prtn_id_pos] = + temp_unprocessed_prtns[num_unprocessed_partitions - 1]; + } + --num_unprocessed_partitions; + } + return Status::OK(); + } + + private: + std::atomic* lock_ptr(int prtn_id); + int random_int(size_t thread_id, int num_values); + + struct PartitionLock { + static constexpr int kCacheLineBytes = 64; + std::atomic lock; + uint8_t padding[kCacheLineBytes]; + }; + int num_prtns_; + std::unique_ptr locks_; + std::unique_ptr rngs_; +}; + +} // namespace acero +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/pch.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/pch.h new file mode 100644 index 0000000000000000000000000000000000000000..ddb4c120f2a877ffb794b8443f8af1f7707d2cf6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/pch.h @@ -0,0 +1,23 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Often-used headers, for precompiling. +// If updating this header, please make sure you check compilation speed +// before checking in. Adding headers which are not used extremely often +// may incur a slowdown, since it makes the precompiled header heavier to load. + +#include "arrow/pch.h" diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/query_context.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/query_context.h new file mode 100644 index 0000000000000000000000000000000000000000..9ea11679cba0529ad35b7f4114763d383f92c6b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/query_context.h @@ -0,0 +1,157 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +#pragma once + +#include + +#include "arrow/acero/exec_plan.h" +#include "arrow/acero/task_util.h" +#include "arrow/acero/util.h" +#include "arrow/compute/exec.h" +#include "arrow/io/interfaces.h" +#include "arrow/util/async_util.h" +#include "arrow/util/type_fwd.h" + +namespace arrow { + +using compute::default_exec_context; +using io::IOContext; + +namespace acero { + +class ARROW_ACERO_EXPORT QueryContext { + public: + QueryContext(QueryOptions opts = {}, + ExecContext exec_context = *default_exec_context()); + + Status Init(size_t max_num_threads, arrow::util::AsyncTaskScheduler* scheduler); + + const ::arrow::internal::CpuInfo* cpu_info() const; + int64_t hardware_flags() const; + const QueryOptions& options() const { return options_; } + MemoryPool* memory_pool() const { return exec_context_.memory_pool(); } + ::arrow::internal::Executor* executor() const { return exec_context_.executor(); } + ExecContext* exec_context() { return &exec_context_; } + IOContext* io_context() { return &io_context_; } + TaskScheduler* scheduler() { return task_scheduler_.get(); } + arrow::util::AsyncTaskScheduler* async_scheduler() { return async_scheduler_; } + + size_t GetThreadIndex(); + size_t max_concurrency() const; + Result GetTempStack(size_t thread_index); + + /// \brief Start an external task + /// + /// This should be avoided if possible. It is kept in for now for legacy + /// purposes. This should be called before the external task is started. If + /// a valid future is returned then it should be marked complete when the + /// external task has finished. + /// + /// \param name A name to give the task for traceability and debugging + /// + /// \return an invalid future if the plan has already ended, otherwise this + /// returns a future that must be completed when the external task + /// finishes. + Result> BeginExternalTask(std::string_view name); + + /// \brief Add a single function as a task to the query's task group + /// on the compute threadpool. + /// + /// \param fn The task to run. Takes no arguments and returns a Status. + /// \param name A name to give the task for traceability and debugging + void ScheduleTask(std::function fn, std::string_view name); + /// \brief Add a single function as a task to the query's task group + /// on the compute threadpool. + /// + /// \param fn The task to run. Takes the thread index and returns a Status. + /// \param name A name to give the task for traceability and debugging + void ScheduleTask(std::function fn, std::string_view name); + /// \brief Add a single function as a task to the query's task group on + /// the IO thread pool + /// + /// \param fn The task to run. Returns a status. + /// \param name A name to give the task for traceability and debugging + void ScheduleIOTask(std::function fn, std::string_view name); + + // Register/Start TaskGroup is a way of performing a "Parallel For" pattern: + // - The task function takes the thread index and the index of the task + // - The on_finished function takes the thread index + // Returns an integer ID that will be used to reference the task group in + // StartTaskGroup. At runtime, call StartTaskGroup with the ID and the number of times + // you'd like the task to be executed. The need to register a task group before use will + // be removed after we rewrite the scheduler. + /// \brief Register a "parallel for" task group with the scheduler + /// + /// \param task The function implementing the task. Takes the thread_index and + /// the task index. + /// \param on_finished The function that gets run once all tasks have been completed. + /// Takes the thread_index. + /// + /// Must be called inside of ExecNode::Init. + int RegisterTaskGroup(std::function task, + std::function on_finished); + + /// \brief Start the task group with the specified ID. This can only + /// be called once per task_group_id. + /// + /// \param task_group_id The ID of the task group to run + /// \param num_tasks The number of times to run the task + Status StartTaskGroup(int task_group_id, int64_t num_tasks); + + // This is an RAII class for keeping track of in-flight file IO. Useful for getting + // an estimate of memory use, and how much memory we expect to be freed soon. + // Returned by ReportTempFileIO. + struct [[nodiscard]] TempFileIOMark { + QueryContext* ctx_; + size_t bytes_; + + TempFileIOMark(QueryContext* ctx, size_t bytes) : ctx_(ctx), bytes_(bytes) { + ctx_->in_flight_bytes_to_disk_.fetch_add(bytes_, std::memory_order_acquire); + } + + ARROW_DISALLOW_COPY_AND_ASSIGN(TempFileIOMark); + + ~TempFileIOMark() { + ctx_->in_flight_bytes_to_disk_.fetch_sub(bytes_, std::memory_order_release); + } + }; + + TempFileIOMark ReportTempFileIO(size_t bytes) { return {this, bytes}; } + + size_t GetCurrentTempFileIO() { return in_flight_bytes_to_disk_.load(); } + + private: + QueryOptions options_; + // To be replaced with Acero-specific context once scheduler is done and + // we don't need ExecContext for kernels + ExecContext exec_context_; + IOContext io_context_; + + arrow::util::AsyncTaskScheduler* async_scheduler_ = NULLPTR; + std::unique_ptr task_scheduler_ = TaskScheduler::Make(); + + ThreadIndexer thread_indexer_; + struct ThreadLocalData { + bool is_init = false; + arrow::util::TempVectorStack stack; + }; + std::vector tld_; + + std::atomic in_flight_bytes_to_disk_{0}; +}; +} // namespace acero +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/schema_util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/schema_util.h new file mode 100644 index 0000000000000000000000000000000000000000..db3076a58841a6cb85fcc3d5033ef3b74ed18898 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/schema_util.h @@ -0,0 +1,226 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/type.h" // for DataType, FieldRef, Field and Schema + +namespace arrow { + +using internal::checked_cast; + +namespace acero { + +// Identifiers for all different row schemas that are used in a join +// +enum class HashJoinProjection : int { + INPUT = 0, + KEY = 1, + PAYLOAD = 2, + FILTER = 3, + OUTPUT = 4 +}; + +struct SchemaProjectionMap { + static constexpr int kMissingField = -1; + int num_cols; + const int* source_to_base; + const int* base_to_target; + inline int get(int i) const { + assert(i >= 0 && i < num_cols); + assert(source_to_base[i] != kMissingField); + return base_to_target[source_to_base[i]]; + } +}; + +/// Helper class for managing different projections of the same row schema. +/// Used to efficiently map any field in one projection to a corresponding field in +/// another projection. +/// Materialized mappings are generated lazily at the time of the first access. +/// Thread-safe apart from initialization. +template +class SchemaProjectionMaps { + public: + static constexpr int kMissingField = -1; + + Status Init(ProjectionIdEnum full_schema_handle, const Schema& schema, + const std::vector& projection_handles, + const std::vector*>& projections) { + assert(projection_handles.size() == projections.size()); + ARROW_RETURN_NOT_OK(RegisterSchema(full_schema_handle, schema)); + for (size_t i = 0; i < projections.size(); ++i) { + ARROW_RETURN_NOT_OK( + RegisterProjectedSchema(projection_handles[i], *(projections[i]), schema)); + } + RegisterEnd(); + return Status::OK(); + } + + int num_cols(ProjectionIdEnum schema_handle) const { + int id = schema_id(schema_handle); + return static_cast(schemas_[id].second.data_types.size()); + } + + bool is_empty(ProjectionIdEnum schema_handle) const { + return num_cols(schema_handle) == 0; + } + + const std::string& field_name(ProjectionIdEnum schema_handle, int field_id) const { + int id = schema_id(schema_handle); + return schemas_[id].second.field_names[field_id]; + } + + const std::shared_ptr& data_type(ProjectionIdEnum schema_handle, + int field_id) const { + int id = schema_id(schema_handle); + return schemas_[id].second.data_types[field_id]; + } + + const std::vector>& data_types( + ProjectionIdEnum schema_handle) const { + int id = schema_id(schema_handle); + return schemas_[id].second.data_types; + } + + SchemaProjectionMap map(ProjectionIdEnum from, ProjectionIdEnum to) const { + int id_from = schema_id(from); + int id_to = schema_id(to); + SchemaProjectionMap result; + result.num_cols = num_cols(from); + result.source_to_base = mappings_[id_from].data(); + result.base_to_target = inverse_mappings_[id_to].data(); + return result; + } + + protected: + struct FieldInfos { + std::vector field_paths; + std::vector field_names; + std::vector> data_types; + }; + + Status RegisterSchema(ProjectionIdEnum handle, const Schema& schema) { + FieldInfos out_fields; + const FieldVector& in_fields = schema.fields(); + out_fields.field_paths.resize(in_fields.size()); + out_fields.field_names.resize(in_fields.size()); + out_fields.data_types.resize(in_fields.size()); + for (size_t i = 0; i < in_fields.size(); ++i) { + const std::string& name = in_fields[i]->name(); + const std::shared_ptr& type = in_fields[i]->type(); + out_fields.field_paths[i] = static_cast(i); + out_fields.field_names[i] = name; + out_fields.data_types[i] = type; + } + schemas_.push_back(std::make_pair(handle, out_fields)); + return Status::OK(); + } + + Status RegisterProjectedSchema(ProjectionIdEnum handle, + const std::vector& selected_fields, + const Schema& full_schema) { + FieldInfos out_fields; + const FieldVector& in_fields = full_schema.fields(); + out_fields.field_paths.resize(selected_fields.size()); + out_fields.field_names.resize(selected_fields.size()); + out_fields.data_types.resize(selected_fields.size()); + for (size_t i = 0; i < selected_fields.size(); ++i) { + // All fields must be found in schema without ambiguity + ARROW_ASSIGN_OR_RAISE(auto match, selected_fields[i].FindOne(full_schema)); + const std::string& name = in_fields[match[0]]->name(); + const std::shared_ptr& type = in_fields[match[0]]->type(); + out_fields.field_paths[i] = match[0]; + out_fields.field_names[i] = name; + out_fields.data_types[i] = type; + } + schemas_.push_back(std::make_pair(handle, out_fields)); + return Status::OK(); + } + + void RegisterEnd() { + size_t size = schemas_.size(); + mappings_.resize(size); + inverse_mappings_.resize(size); + int id_base = 0; + for (size_t i = 0; i < size; ++i) { + GenerateMapForProjection(static_cast(i), id_base); + } + } + + int schema_id(ProjectionIdEnum schema_handle) const { + for (size_t i = 0; i < schemas_.size(); ++i) { + if (schemas_[i].first == schema_handle) { + return static_cast(i); + } + } + // We should never get here + assert(false); + return -1; + } + + void GenerateMapForProjection(int id_proj, int id_base) { + int num_cols_proj = static_cast(schemas_[id_proj].second.data_types.size()); + int num_cols_base = static_cast(schemas_[id_base].second.data_types.size()); + + std::vector& mapping = mappings_[id_proj]; + std::vector& inverse_mapping = inverse_mappings_[id_proj]; + mapping.resize(num_cols_proj); + inverse_mapping.resize(num_cols_base); + + if (id_proj == id_base) { + for (int i = 0; i < num_cols_base; ++i) { + mapping[i] = inverse_mapping[i] = i; + } + } else { + const FieldInfos& fields_proj = schemas_[id_proj].second; + const FieldInfos& fields_base = schemas_[id_base].second; + for (int i = 0; i < num_cols_base; ++i) { + inverse_mapping[i] = SchemaProjectionMap::kMissingField; + } + for (int i = 0; i < num_cols_proj; ++i) { + int field_id = SchemaProjectionMap::kMissingField; + for (int j = 0; j < num_cols_base; ++j) { + if (fields_proj.field_paths[i] == fields_base.field_paths[j]) { + field_id = j; + // If there are multiple matches for the same input field, + // it will be mapped to the first match. + break; + } + } + assert(field_id != SchemaProjectionMap::kMissingField); + mapping[i] = field_id; + inverse_mapping[field_id] = i; + } + } + } + + // vector used as a mapping from ProjectionIdEnum to fields + std::vector> schemas_; + std::vector> mappings_; + std::vector> inverse_mappings_; +}; + +using HashJoinProjectionMaps = SchemaProjectionMaps; + +} // namespace acero +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/task_util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/task_util.h new file mode 100644 index 0000000000000000000000000000000000000000..fbd4af699d12795bd92bd385f23a036d63adde38 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/task_util.h @@ -0,0 +1,102 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/acero/visibility.h" +#include "arrow/status.h" +#include "arrow/util/config.h" +#include "arrow/util/logging.h" + +namespace arrow { +namespace acero { + +// Atomic value surrounded by padding bytes to avoid cache line invalidation +// whenever it is modified by a concurrent thread on a different CPU core. +// +template +class AtomicWithPadding { + private: + static constexpr int kCacheLineSize = 64; + uint8_t padding_before[kCacheLineSize]; + + public: + std::atomic value; + + private: + uint8_t padding_after[kCacheLineSize]; +}; + +// Used for asynchronous execution of operations that can be broken into +// a fixed number of symmetric tasks that can be executed concurrently. +// +// Implements priorities between multiple such operations, called task groups. +// +// Allows to specify the maximum number of in-flight tasks at any moment. +// +// Also allows for executing next pending tasks immediately using a caller thread. +// +class ARROW_ACERO_EXPORT TaskScheduler { + public: + using TaskImpl = std::function; + using TaskGroupContinuationImpl = std::function; + using ScheduleImpl = std::function; + using AbortContinuationImpl = std::function; + + virtual ~TaskScheduler() = default; + + // Order in which task groups are registered represents priorities of their tasks + // (the first group has the highest priority). + // + // Returns task group identifier that is used to request operations on the task group. + virtual int RegisterTaskGroup(TaskImpl task_impl, + TaskGroupContinuationImpl cont_impl) = 0; + + virtual void RegisterEnd() = 0; + + // total_num_tasks may be zero, in which case task group continuation will be executed + // immediately + virtual Status StartTaskGroup(size_t thread_id, int group_id, + int64_t total_num_tasks) = 0; + + // Execute given number of tasks immediately using caller thread + virtual Status ExecuteMore(size_t thread_id, int num_tasks_to_execute, + bool execute_all) = 0; + + // Begin scheduling tasks using provided callback and + // the limit on the number of in-flight tasks at any moment. + // + // Scheduling will continue as long as there are waiting tasks. + // + // It will automatically resume whenever new task group gets started. + virtual Status StartScheduling(size_t thread_id, ScheduleImpl schedule_impl, + int num_concurrent_tasks, bool use_sync_execution) = 0; + + // Abort scheduling and execution. + // Used in case of being notified about unrecoverable error for the entire query. + virtual void Abort(AbortContinuationImpl impl) = 0; + + static std::unique_ptr Make(); +}; + +} // namespace acero +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/test_nodes.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/test_nodes.h new file mode 100644 index 0000000000000000000000000000000000000000..7e31aa31b34d7b423ab85ff2e77c1cec0087fa5b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/test_nodes.h @@ -0,0 +1,86 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/acero/options.h" +#include "arrow/acero/test_util_internal.h" +#include "arrow/testing/random.h" + +namespace arrow { +namespace acero { + +// \brief Make a delaying source that is optionally noisy (prints when it emits) +AsyncGenerator> MakeDelayedGen( + Iterator> src, std::string label, double delay_sec, + bool noisy = false); + +// \brief Make a delaying source that is optionally noisy (prints when it emits) +AsyncGenerator> MakeDelayedGen( + AsyncGenerator> src, std::string label, double delay_sec, + bool noisy = false); + +// \brief Make a delaying source that is optionally noisy (prints when it emits) +AsyncGenerator> MakeDelayedGen(BatchesWithSchema src, + std::string label, + double delay_sec, + bool noisy = false); + +/// A node that slightly resequences the input at random +struct JitterNodeOptions : public ExecNodeOptions { + random::SeedType seed; + /// The max amount to add to a node's "cost". + int max_jitter_modifier; + + explicit JitterNodeOptions(random::SeedType seed, int max_jitter_modifier = 5) + : seed(seed), max_jitter_modifier(max_jitter_modifier) {} + static constexpr std::string_view kName = "jitter"; +}; + +class GateImpl; + +class Gate { + public: + static std::shared_ptr Make(); + + Gate(); + virtual ~Gate(); + + void ReleaseAllBatches(); + void ReleaseOneBatch(); + Future<> WaitForNextReleasedBatch(); + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(Gate); + + GateImpl* impl_; +}; + +// A node that holds all input batches until a given gate is released +struct GatedNodeOptions : public ExecNodeOptions { + explicit GatedNodeOptions(Gate* gate) : gate(gate) {} + Gate* gate; + + static constexpr std::string_view kName = "gated"; +}; + +void RegisterTestNodes(); + +} // namespace acero +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/time_series_util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/time_series_util.h new file mode 100644 index 0000000000000000000000000000000000000000..97707f43bf20b95387f463a9c07e37f54c33998c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/time_series_util.h @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/record_batch.h" +#include "arrow/type_traits.h" + +namespace arrow::acero { + +// normalize the value to unsigned 64-bits while preserving ordering of values +template ::value, bool> = true> +uint64_t NormalizeTime(T t); + +uint64_t GetTime(const RecordBatch* batch, Type::type time_type, int col, uint64_t row); + +} // namespace arrow::acero diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/tpch_node.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/tpch_node.h new file mode 100644 index 0000000000000000000000000000000000000000..e6476b57ad6b4108af56777c029d932f4af94726 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/tpch_node.h @@ -0,0 +1,65 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/acero/type_fwd.h" +#include "arrow/acero/visibility.h" +#include "arrow/result.h" +#include "arrow/status.h" + +namespace arrow { +namespace acero { +namespace internal { + +class ARROW_ACERO_EXPORT TpchGen { + public: + virtual ~TpchGen() = default; + + /* + * \brief Create a factory for nodes that generate TPC-H data + * + * Note: Individual tables will reference each other. It is important that you only + * create a single TpchGen instance for each plan and then you can create nodes for each + * table from that single TpchGen instance. Note: Every batch will be scheduled as a new + * task using the ExecPlan's scheduler. + */ + static Result> Make( + ExecPlan* plan, double scale_factor = 1.0, int64_t batch_size = 4096, + std::optional seed = std::nullopt); + + // The below methods will create and add an ExecNode to the plan that generates + // data for the desired table. If columns is empty, all columns will be generated. + // The methods return the added ExecNode, which should be used for inputs. + virtual Result Supplier(std::vector columns = {}) = 0; + virtual Result Part(std::vector columns = {}) = 0; + virtual Result PartSupp(std::vector columns = {}) = 0; + virtual Result Customer(std::vector columns = {}) = 0; + virtual Result Orders(std::vector columns = {}) = 0; + virtual Result Lineitem(std::vector columns = {}) = 0; + virtual Result Nation(std::vector columns = {}) = 0; + virtual Result Region(std::vector columns = {}) = 0; +}; + +} // namespace internal +} // namespace acero +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/type_fwd.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..f0410de9f7830a7d0e55a04eb514ae9d82e6958c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/type_fwd.h @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/compute/type_fwd.h" + +namespace arrow { + +namespace acero { + +class ExecNode; +class ExecPlan; +class ExecNodeOptions; +class ExecFactoryRegistry; +class QueryContext; +struct QueryOptions; +struct Declaration; +class SinkNodeConsumer; + +} // namespace acero +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/unmaterialized_table.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/unmaterialized_table.h new file mode 100644 index 0000000000000000000000000000000000000000..05d6c866936e0a3bce7f7282dbac67f3586ffe58 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/unmaterialized_table.h @@ -0,0 +1,271 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include "arrow/array/builder_base.h" +#include "arrow/array/builder_binary.h" +#include "arrow/array/builder_primitive.h" +#include "arrow/memory_pool.h" +#include "arrow/record_batch.h" +#include "arrow/type_traits.h" +#include "arrow/util/logging.h" + +namespace arrow::acero { + +/// Lightweight representation of a cell of an unmaterialized table. +/// +struct CompositeEntry { + RecordBatch* batch; + uint64_t start; + uint64_t end; +}; + +// Forward declare the builder +template +class UnmaterializedSliceBuilder; + +/// A table of composite reference rows. Rows maintain pointers to the +/// constituent record batches, but the overall table retains shared_ptr +/// references to ensure memory remains resident while the table is live. +/// +/// The main reason for this is that, especially for wide tables, some operations +/// such as sorted_merge or asof_join are effectively row-oriented, rather than +/// column-oriented. Separating the join part from the columnar materialization +/// part simplifies the logic around data types and increases efficiency. +/// +/// We don't put the shared_ptr's into the rows for efficiency reasons. Use +/// UnmaterializedSliceBuilder to add ranges of record batches to this table +template +class UnmaterializedCompositeTable { + public: + UnmaterializedCompositeTable( + const std::shared_ptr& output_schema, size_t num_composite_tables, + std::unordered_map> output_col_to_src_, + arrow::MemoryPool* pool_ = arrow::default_memory_pool()) + : schema(output_schema), + num_composite_tables(num_composite_tables), + output_col_to_src(std::move(output_col_to_src_)), + pool{pool_} {} + + // Shallow wrappers around std::vector for performance + inline size_t capacity() { return slices.capacity(); } + inline void reserve(size_t num_slices) { slices.reserve(num_slices); } + + inline size_t Size() const { return num_rows; } + inline size_t Empty() const { return num_rows == 0; } + + Result>> Materialize() { + // Don't build empty batches + if (Empty()) { + return std::nullopt; + } + DCHECK_LE(Size(), (uint64_t)std::numeric_limits::max()); + std::vector> arrays(schema->num_fields()); + +#define MATERIALIZE_CASE(id) \ + case arrow::Type::id: { \ + using T = typename arrow::TypeIdTraits::Type; \ + ARROW_ASSIGN_OR_RAISE(arrays.at(i_col), materializeColumn(field_type, i_col)); \ + break; \ + } + + // Build the arrays column-by-column from the rows + for (int i_col = 0; i_col < schema->num_fields(); ++i_col) { + const std::shared_ptr& field = schema->field(i_col); + const auto& field_type = field->type(); + + switch (field_type->id()) { + MATERIALIZE_CASE(BOOL) + MATERIALIZE_CASE(INT8) + MATERIALIZE_CASE(INT16) + MATERIALIZE_CASE(INT32) + MATERIALIZE_CASE(INT64) + MATERIALIZE_CASE(UINT8) + MATERIALIZE_CASE(UINT16) + MATERIALIZE_CASE(UINT32) + MATERIALIZE_CASE(UINT64) + MATERIALIZE_CASE(FLOAT) + MATERIALIZE_CASE(DOUBLE) + MATERIALIZE_CASE(DATE32) + MATERIALIZE_CASE(DATE64) + MATERIALIZE_CASE(TIME32) + MATERIALIZE_CASE(TIME64) + MATERIALIZE_CASE(TIMESTAMP) + MATERIALIZE_CASE(STRING) + MATERIALIZE_CASE(LARGE_STRING) + MATERIALIZE_CASE(BINARY) + MATERIALIZE_CASE(LARGE_BINARY) + default: + return arrow::Status::Invalid("Unsupported data type ", + field->type()->ToString(), " for field ", + field->name()); + } + } + +#undef MATERIALIZE_CASE + + std::shared_ptr r = + arrow::RecordBatch::Make(schema, (int64_t)num_rows, arrays); + return r; + } + + private: + struct UnmaterializedSlice { + CompositeEntry components[MAX_COMPOSITE_TABLES]; + size_t num_components; + + inline int64_t Size() const { + if (num_components == 0) { + return 0; + } + return components[0].end - components[0].start; + } + }; + + // Mapping from an output column ID to a source table ID and column ID + std::shared_ptr schema; + size_t num_composite_tables; + std::unordered_map> output_col_to_src; + + arrow::MemoryPool* pool; + + /// A map from address of a record batch to the record batch. Used to + /// maintain the lifetime of the record batch in case it goes out of scope + /// by the main exec node thread + std::unordered_map> ptr2Ref = {}; + std::vector slices; + + size_t num_rows = 0; + + // for AddRecordBatchRef/AddSlice and access to UnmaterializedSlice + friend class UnmaterializedSliceBuilder; + + void AddRecordBatchRef(const std::shared_ptr& ref) { + ptr2Ref[(uintptr_t)ref.get()] = ref; + } + void AddSlice(const UnmaterializedSlice& slice) { + slices.push_back(slice); + num_rows += slice.Size(); + } + + template ::BuilderType> + enable_if_boolean static BuilderAppend( + Builder& builder, const std::shared_ptr& source, uint64_t row) { + if (source->IsNull(row)) { + builder.UnsafeAppendNull(); + return Status::OK(); + } + builder.UnsafeAppend(bit_util::GetBit(source->template GetValues(1), row)); + return Status::OK(); + } + + template ::BuilderType> + enable_if_t::value && !is_boolean_type::value, + Status> static BuilderAppend(Builder& builder, + const std::shared_ptr& source, + uint64_t row) { + if (source->IsNull(row)) { + builder.UnsafeAppendNull(); + return Status::OK(); + } + using CType = typename TypeTraits::CType; + builder.UnsafeAppend(source->template GetValues(1)[row]); + return Status::OK(); + } + + template ::BuilderType> + enable_if_base_binary static BuilderAppend( + Builder& builder, const std::shared_ptr& source, uint64_t row) { + if (source->IsNull(row)) { + return builder.AppendNull(); + } + using offset_type = typename Type::offset_type; + const uint8_t* data = source->buffers[2]->data(); + const offset_type* offsets = source->GetValues(1); + const offset_type offset0 = offsets[row]; + const offset_type offset1 = offsets[row + 1]; + return builder.Append(data + offset0, offset1 - offset0); + } + + template ::BuilderType> + arrow::Result> materializeColumn( + const std::shared_ptr& type, int i_col) { + ARROW_ASSIGN_OR_RAISE(auto builderPtr, arrow::MakeBuilder(type, pool)); + Builder& builder = *arrow::internal::checked_cast(builderPtr.get()); + ARROW_RETURN_NOT_OK(builder.Reserve(num_rows)); + + const auto& [table_index, column_index] = output_col_to_src[i_col]; + + for (const auto& unmaterialized_slice : slices) { + const auto& [batch, start, end] = unmaterialized_slice.components[table_index]; + if (batch) { + for (uint64_t rowNum = start; rowNum < end; ++rowNum) { + arrow::Status st = BuilderAppend( + builder, batch->column_data(column_index), rowNum); + ARROW_RETURN_NOT_OK(st); + } + } else { + for (uint64_t rowNum = start; rowNum < end; ++rowNum) { + ARROW_RETURN_NOT_OK(builder.AppendNull()); + } + } + } + std::shared_ptr result; + ARROW_RETURN_NOT_OK(builder.Finish(&result)); + return Result{std::move(result)}; + } +}; + +/// A builder class that can append blocks of data to a row. A "slice" +/// is built by horizontally concatenating record batches. +template +class UnmaterializedSliceBuilder { + public: + explicit UnmaterializedSliceBuilder( + UnmaterializedCompositeTable* table_) + : table(table_) {} + + void AddEntry(std::shared_ptr rb, uint64_t start, uint64_t end) { + if (rb) { + table->AddRecordBatchRef(rb); + } + if (slice.num_components) { + size_t last_index = slice.num_components - 1; + DCHECK_EQ(slice.components[last_index].end - slice.components[last_index].start, + end - start) + << "Slices should be the same length. "; + } + slice.components[slice.num_components++] = CompositeEntry{rb.get(), start, end}; + } + + void Finalize() { table->AddSlice(slice); } + int64_t Size() { return slice.Size(); } + + private: + using TUnmaterializedCompositeTable = + UnmaterializedCompositeTable; + using TUnmaterializedSlice = + typename TUnmaterializedCompositeTable::UnmaterializedSlice; + + TUnmaterializedCompositeTable* table; + TUnmaterializedSlice slice{}; +}; + +} // namespace arrow::acero diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/util.h new file mode 100644 index 0000000000000000000000000000000000000000..0eb9f4c87e1809fb261b131909cadb60b701e4b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/util.h @@ -0,0 +1,184 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/acero/options.h" +#include "arrow/acero/type_fwd.h" +#include "arrow/buffer.h" +#include "arrow/compute/expression.h" +#include "arrow/compute/util.h" +#include "arrow/memory_pool.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/cpu_info.h" +#include "arrow/util/logging.h" +#include "arrow/util/mutex.h" +#include "arrow/util/thread_pool.h" +#include "arrow/util/type_fwd.h" + +namespace arrow { + +namespace acero { + +ARROW_ACERO_EXPORT +Status ValidateExecNodeInputs(ExecPlan* plan, const std::vector& inputs, + int expected_num_inputs, const char* kind_name); + +ARROW_ACERO_EXPORT +Result> TableFromExecBatches( + const std::shared_ptr& schema, const std::vector& exec_batches); + +class ARROW_ACERO_EXPORT AtomicCounter { + public: + AtomicCounter() = default; + + int count() const { return count_.load(); } + + std::optional total() const { + int total = total_.load(); + if (total == -1) return {}; + return total; + } + + // return true if the counter is complete + bool Increment() { + DCHECK_NE(count_.load(), total_.load()); + int count = count_.fetch_add(1) + 1; + if (count != total_.load()) return false; + return DoneOnce(); + } + + // return true if the counter is complete + bool SetTotal(int total) { + total_.store(total); + if (count_.load() != total) return false; + return DoneOnce(); + } + + // return true if the counter has not already been completed + bool Cancel() { return DoneOnce(); } + + // return true if the counter has finished or been cancelled + bool Completed() { return complete_.load(); } + + private: + // ensure there is only one true return from Increment(), SetTotal(), or Cancel() + bool DoneOnce() { + bool expected = false; + return complete_.compare_exchange_strong(expected, true); + } + + std::atomic count_{0}, total_{-1}; + std::atomic complete_{false}; +}; + +class ARROW_ACERO_EXPORT ThreadIndexer { + public: + size_t operator()(); + + static size_t Capacity(); + + private: + static size_t Check(size_t thread_index); + + arrow::util::Mutex mutex_; + std::unordered_map id_to_index_; +}; + +/// \brief A consumer that collects results into an in-memory table +struct ARROW_ACERO_EXPORT TableSinkNodeConsumer : public SinkNodeConsumer { + public: + TableSinkNodeConsumer(std::shared_ptr
* out, MemoryPool* pool) + : out_(out), pool_(pool) {} + Status Init(const std::shared_ptr& schema, + BackpressureControl* backpressure_control, ExecPlan* plan) override; + Status Consume(ExecBatch batch) override; + Future<> Finish() override; + + private: + std::shared_ptr
* out_; + MemoryPool* pool_; + std::shared_ptr schema_; + std::vector> batches_; + arrow::util::Mutex consume_mutex_; +}; + +class ARROW_ACERO_EXPORT NullSinkNodeConsumer : public SinkNodeConsumer { + public: + Status Init(const std::shared_ptr&, BackpressureControl*, + ExecPlan* plan) override { + return Status::OK(); + } + Status Consume(ExecBatch exec_batch) override { return Status::OK(); } + Future<> Finish() override { return Status::OK(); } + + public: + static std::shared_ptr Make() { + return std::make_shared(); + } +}; + +/// CRTP helper for tracing helper functions + +class ARROW_ACERO_EXPORT TracedNode { + public: + // All nodes should call TraceStartProducing or NoteStartProducing exactly once + // Most nodes will be fine with a call to NoteStartProducing since the StartProducing + // call is usually fairly cheap and simply schedules tasks to fetch the actual data. + + explicit TracedNode(ExecNode* node) : node_(node) {} + + // Create a span to record the StartProducing work + [[nodiscard]] ::arrow::internal::tracing::Scope TraceStartProducing( + std::string extra_details) const; + + // Record a call to StartProducing without creating with a span + void NoteStartProducing(std::string extra_details) const; + + // All nodes should call TraceInputReceived for each batch they receive. This call + // should track the time spent processing the batch. NoteInputReceived is available + // but usually won't be used unless a node is simply adding batches to a trivial queue. + + // Create a span to record the InputReceived work + [[nodiscard]] ::arrow::internal::tracing::Scope TraceInputReceived( + const ExecBatch& batch) const; + + // Record a call to InputReceived without creating with a span + void NoteInputReceived(const ExecBatch& batch) const; + + // Create a span to record any "finish" work. This should NOT be called as part of + // InputFinished and many nodes may not need to call this at all. This should be used + // when a node has some extra work that has to be done once it has received all of its + // data. For example, an aggregation node calculating aggregations. This will + // typically be called as a result of InputFinished OR InputReceived. + [[nodiscard]] ::arrow::internal::tracing::Scope TraceFinish() const; + + private: + ExecNode* node_; +}; + +} // namespace acero +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/visibility.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/visibility.h new file mode 100644 index 0000000000000000000000000000000000000000..02382232b69ddcba11cb89d808eef4a52ca17e3d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/acero/visibility.h @@ -0,0 +1,50 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#if defined(_WIN32) || defined(__CYGWIN__) +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable : 4251) +#else +#pragma GCC diagnostic ignored "-Wattributes" +#endif + +#ifdef ARROW_ACERO_STATIC +#define ARROW_ACERO_EXPORT +#elif defined(ARROW_ACERO_EXPORTING) +#define ARROW_ACERO_EXPORT __declspec(dllexport) +#else +#define ARROW_ACERO_EXPORT __declspec(dllimport) +#endif + +#define ARROW_ACERO_NO_EXPORT +#else // Not Windows +#ifndef ARROW_ACERO_EXPORT +#define ARROW_ACERO_EXPORT __attribute__((visibility("default"))) +#endif +#ifndef ARROW_ACERO_NO_EXPORT +#define ARROW_ACERO_NO_EXPORT __attribute__((visibility("hidden"))) +#endif +#endif // Not-Windows + +#if defined(_MSC_VER) +#pragma warning(pop) +#endif diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/abi.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/abi.h new file mode 100644 index 0000000000000000000000000000000000000000..6abe866b5f6f65e3e1f1bb69728a0c53adf1943f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/abi.h @@ -0,0 +1,233 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +/// \file abi.h Arrow C Data Interface +/// +/// The Arrow C Data interface defines a very small, stable set +/// of C definitions which can be easily copied into any project's +/// source code and vendored to be used for columnar data interchange +/// in the Arrow format. For non-C/C++ languages and runtimes, +/// it should be almost as easy to translate the C definitions into +/// the corresponding C FFI declarations. +/// +/// Applications and libraries can therefore work with Arrow memory +/// without necessarily using the Arrow libraries or reinventing +/// the wheel. Developers can choose between tight integration +/// with the Arrow software project or minimal integration with +/// the Arrow format only. + +#pragma once + +#include + +// Spec and documentation: https://arrow.apache.org/docs/format/CDataInterface.html + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef ARROW_C_DATA_INTERFACE +#define ARROW_C_DATA_INTERFACE + +#define ARROW_FLAG_DICTIONARY_ORDERED 1 +#define ARROW_FLAG_NULLABLE 2 +#define ARROW_FLAG_MAP_KEYS_SORTED 4 + +struct ArrowSchema { + // Array type description + const char* format; + const char* name; + const char* metadata; + int64_t flags; + int64_t n_children; + struct ArrowSchema** children; + struct ArrowSchema* dictionary; + + // Release callback + void (*release)(struct ArrowSchema*); + // Opaque producer-specific data + void* private_data; +}; + +struct ArrowArray { + // Array data description + int64_t length; + int64_t null_count; + int64_t offset; + int64_t n_buffers; + int64_t n_children; + const void** buffers; + struct ArrowArray** children; + struct ArrowArray* dictionary; + + // Release callback + void (*release)(struct ArrowArray*); + // Opaque producer-specific data + void* private_data; +}; + +#endif // ARROW_C_DATA_INTERFACE + +#ifndef ARROW_C_DEVICE_DATA_INTERFACE +#define ARROW_C_DEVICE_DATA_INTERFACE + +// Spec and Documentation: https://arrow.apache.org/docs/format/CDeviceDataInterface.html + +// DeviceType for the allocated memory +typedef int32_t ArrowDeviceType; + +// CPU device, same as using ArrowArray directly +#define ARROW_DEVICE_CPU 1 +// CUDA GPU Device +#define ARROW_DEVICE_CUDA 2 +// Pinned CUDA CPU memory by cudaMallocHost +#define ARROW_DEVICE_CUDA_HOST 3 +// OpenCL Device +#define ARROW_DEVICE_OPENCL 4 +// Vulkan buffer for next-gen graphics +#define ARROW_DEVICE_VULKAN 7 +// Metal for Apple GPU +#define ARROW_DEVICE_METAL 8 +// Verilog simulator buffer +#define ARROW_DEVICE_VPI 9 +// ROCm GPUs for AMD GPUs +#define ARROW_DEVICE_ROCM 10 +// Pinned ROCm CPU memory allocated by hipMallocHost +#define ARROW_DEVICE_ROCM_HOST 11 +// Reserved for extension +#define ARROW_DEVICE_EXT_DEV 12 +// CUDA managed/unified memory allocated by cudaMallocManaged +#define ARROW_DEVICE_CUDA_MANAGED 13 +// unified shared memory allocated on a oneAPI non-partitioned device. +#define ARROW_DEVICE_ONEAPI 14 +// GPU support for next-gen WebGPU standard +#define ARROW_DEVICE_WEBGPU 15 +// Qualcomm Hexagon DSP +#define ARROW_DEVICE_HEXAGON 16 + +struct ArrowDeviceArray { + // the Allocated Array + // + // the buffers in the array (along with the buffers of any + // children) are what is allocated on the device. + struct ArrowArray array; + // The device id to identify a specific device + int64_t device_id; + // The type of device which can access this memory. + ArrowDeviceType device_type; + // An event-like object to synchronize on if needed. + void* sync_event; + // Reserved bytes for future expansion. + int64_t reserved[3]; +}; + +#endif // ARROW_C_DEVICE_DATA_INTERFACE + +#ifndef ARROW_C_STREAM_INTERFACE +#define ARROW_C_STREAM_INTERFACE + +struct ArrowArrayStream { + // Callback to get the stream type + // (will be the same for all arrays in the stream). + // + // Return value: 0 if successful, an `errno`-compatible error code otherwise. + // + // If successful, the ArrowSchema must be released independently from the stream. + int (*get_schema)(struct ArrowArrayStream*, struct ArrowSchema* out); + + // Callback to get the next array + // (if no error and the array is released, the stream has ended) + // + // Return value: 0 if successful, an `errno`-compatible error code otherwise. + // + // If successful, the ArrowArray must be released independently from the stream. + int (*get_next)(struct ArrowArrayStream*, struct ArrowArray* out); + + // Callback to get optional detailed error information. + // This must only be called if the last stream operation failed + // with a non-0 return code. + // + // Return value: pointer to a null-terminated character array describing + // the last error, or NULL if no description is available. + // + // The returned pointer is only valid until the next operation on this stream + // (including release). + const char* (*get_last_error)(struct ArrowArrayStream*); + + // Release callback: release the stream's own resources. + // Note that arrays returned by `get_next` must be individually released. + void (*release)(struct ArrowArrayStream*); + + // Opaque producer-specific data + void* private_data; +}; + +#endif // ARROW_C_STREAM_INTERFACE + +#ifndef ARROW_C_DEVICE_STREAM_INTERFACE +#define ARROW_C_DEVICE_STREAM_INTERFACE + +// Equivalent to ArrowArrayStream, but for ArrowDeviceArrays. +// +// This stream is intended to provide a stream of data on a single +// device, if a producer wants data to be produced on multiple devices +// then multiple streams should be provided. One per device. +struct ArrowDeviceArrayStream { + // The device that this stream produces data on. + ArrowDeviceType device_type; + + // Callback to get the stream schema + // (will be the same for all arrays in the stream). + // + // Return value 0 if successful, an `errno`-compatible error code otherwise. + // + // If successful, the ArrowSchema must be released independently from the stream. + // The schema should be accessible via CPU memory. + int (*get_schema)(struct ArrowDeviceArrayStream* self, struct ArrowSchema* out); + + // Callback to get the next array + // (if no error and the array is released, the stream has ended) + // + // Return value: 0 if successful, an `errno`-compatible error code otherwise. + // + // If successful, the ArrowDeviceArray must be released independently from the stream. + int (*get_next)(struct ArrowDeviceArrayStream* self, struct ArrowDeviceArray* out); + + // Callback to get optional detailed error information. + // This must only be called if the last stream operation failed + // with a non-0 return code. + // + // Return value: pointer to a null-terminated character array describing + // the last error, or NULL if no description is available. + // + // The returned pointer is only valid until the next operation on this stream + // (including release). + const char* (*get_last_error)(struct ArrowDeviceArrayStream* self); + + // Release callback: release the stream's own resources. + // Note that arrays returned by `get_next` must be individually released. + void (*release)(struct ArrowDeviceArrayStream* self); + + // Opaque producer-specific data + void* private_data; +}; + +#endif // ARROW_C_DEVICE_STREAM_INTERFACE + +#ifdef __cplusplus +} +#endif diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/bridge.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/bridge.h new file mode 100644 index 0000000000000000000000000000000000000000..74a302be4c27d491e300d206427ec0a6ff235cdb --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/bridge.h @@ -0,0 +1,348 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/c/abi.h" +#include "arrow/device.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \defgroup c-data-interface Functions for working with the C data interface. +/// +/// @{ + +/// \brief Export C++ DataType using the C data interface format. +/// +/// The root type is considered to have empty name and metadata. +/// If you want the root type to have a name and/or metadata, pass +/// a Field instead. +/// +/// \param[in] type DataType object to export +/// \param[out] out C struct where to export the datatype +ARROW_EXPORT +Status ExportType(const DataType& type, struct ArrowSchema* out); + +/// \brief Export C++ Field using the C data interface format. +/// +/// \param[in] field Field object to export +/// \param[out] out C struct where to export the field +ARROW_EXPORT +Status ExportField(const Field& field, struct ArrowSchema* out); + +/// \brief Export C++ Schema using the C data interface format. +/// +/// \param[in] schema Schema object to export +/// \param[out] out C struct where to export the field +ARROW_EXPORT +Status ExportSchema(const Schema& schema, struct ArrowSchema* out); + +/// \brief Export C++ Array using the C data interface format. +/// +/// The resulting ArrowArray struct keeps the array data and buffers alive +/// until its release callback is called by the consumer. +/// +/// \param[in] array Array object to export +/// \param[out] out C struct where to export the array +/// \param[out] out_schema optional C struct where to export the array type +ARROW_EXPORT +Status ExportArray(const Array& array, struct ArrowArray* out, + struct ArrowSchema* out_schema = NULLPTR); + +/// \brief Export C++ RecordBatch using the C data interface format. +/// +/// The record batch is exported as if it were a struct array. +/// The resulting ArrowArray struct keeps the record batch data and buffers alive +/// until its release callback is called by the consumer. +/// +/// \param[in] batch Record batch to export +/// \param[out] out C struct where to export the record batch +/// \param[out] out_schema optional C struct where to export the record batch schema +ARROW_EXPORT +Status ExportRecordBatch(const RecordBatch& batch, struct ArrowArray* out, + struct ArrowSchema* out_schema = NULLPTR); + +/// \brief Import C++ DataType from the C data interface. +/// +/// The given ArrowSchema struct is released (as per the C data interface +/// specification), even if this function fails. +/// +/// \param[in,out] schema C data interface struct representing the data type +/// \return Imported type object +ARROW_EXPORT +Result> ImportType(struct ArrowSchema* schema); + +/// \brief Import C++ Field from the C data interface. +/// +/// The given ArrowSchema struct is released (as per the C data interface +/// specification), even if this function fails. +/// +/// \param[in,out] schema C data interface struct representing the field +/// \return Imported field object +ARROW_EXPORT +Result> ImportField(struct ArrowSchema* schema); + +/// \brief Import C++ Schema from the C data interface. +/// +/// The given ArrowSchema struct is released (as per the C data interface +/// specification), even if this function fails. +/// +/// \param[in,out] schema C data interface struct representing the field +/// \return Imported field object +ARROW_EXPORT +Result> ImportSchema(struct ArrowSchema* schema); + +/// \brief Import C++ array from the C data interface. +/// +/// The ArrowArray struct has its contents moved (as per the C data interface +/// specification) to a private object held alive by the resulting array. +/// +/// \param[in,out] array C data interface struct holding the array data +/// \param[in] type type of the imported array +/// \return Imported array object +ARROW_EXPORT +Result> ImportArray(struct ArrowArray* array, + std::shared_ptr type); + +/// \brief Import C++ array and its type from the C data interface. +/// +/// The ArrowArray struct has its contents moved (as per the C data interface +/// specification) to a private object held alive by the resulting array. +/// The ArrowSchema struct is released, even if this function fails. +/// +/// \param[in,out] array C data interface struct holding the array data +/// \param[in,out] type C data interface struct holding the array type +/// \return Imported array object +ARROW_EXPORT +Result> ImportArray(struct ArrowArray* array, + struct ArrowSchema* type); + +/// \brief Import C++ record batch from the C data interface. +/// +/// The ArrowArray struct has its contents moved (as per the C data interface +/// specification) to a private object held alive by the resulting record batch. +/// +/// \param[in,out] array C data interface struct holding the record batch data +/// \param[in] schema schema of the imported record batch +/// \return Imported record batch object +ARROW_EXPORT +Result> ImportRecordBatch(struct ArrowArray* array, + std::shared_ptr schema); + +/// \brief Import C++ record batch and its schema from the C data interface. +/// +/// The type represented by the ArrowSchema struct must be a struct type array. +/// The ArrowArray struct has its contents moved (as per the C data interface +/// specification) to a private object held alive by the resulting record batch. +/// The ArrowSchema struct is released, even if this function fails. +/// +/// \param[in,out] array C data interface struct holding the record batch data +/// \param[in,out] schema C data interface struct holding the record batch schema +/// \return Imported record batch object +ARROW_EXPORT +Result> ImportRecordBatch(struct ArrowArray* array, + struct ArrowSchema* schema); + +/// @} + +/// \defgroup c-data-device-interface Functions for working with the C data device +/// interface. +/// +/// @{ + +/// \brief EXPERIMENTAL: Export C++ Array as an ArrowDeviceArray. +/// +/// The resulting ArrowDeviceArray struct keeps the array data and buffers alive +/// until its release callback is called by the consumer. All buffers in +/// the provided array MUST have the same device_type, otherwise an error +/// will be returned. +/// +/// If sync is non-null, get_event will be called on it in order to +/// potentially provide an event for consumers to synchronize on. +/// +/// \param[in] array Array object to export +/// \param[in] sync shared_ptr to object derived from Device::SyncEvent or null +/// \param[out] out C struct to export the array to +/// \param[out] out_schema optional C struct to export the array type to +ARROW_EXPORT +Status ExportDeviceArray(const Array& array, std::shared_ptr sync, + struct ArrowDeviceArray* out, + struct ArrowSchema* out_schema = NULLPTR); + +/// \brief EXPERIMENTAL: Export C++ RecordBatch as an ArrowDeviceArray. +/// +/// The record batch is exported as if it were a struct array. +/// The resulting ArrowDeviceArray struct keeps the record batch data and buffers alive +/// until its release callback is called by the consumer. +/// +/// All buffers of all columns in the record batch must have the same device_type +/// otherwise an error will be returned. If columns are on different devices, +/// they should be exported using different ArrowDeviceArray instances. +/// +/// If sync is non-null, get_event will be called on it in order to +/// potentially provide an event for consumers to synchronize on. +/// +/// \param[in] batch Record batch to export +/// \param[in] sync shared_ptr to object derived from Device::SyncEvent or null +/// \param[out] out C struct where to export the record batch +/// \param[out] out_schema optional C struct where to export the record batch schema +ARROW_EXPORT +Status ExportDeviceRecordBatch(const RecordBatch& batch, + std::shared_ptr sync, + struct ArrowDeviceArray* out, + struct ArrowSchema* out_schema = NULLPTR); + +using DeviceMemoryMapper = + std::function>(ArrowDeviceType, int64_t)>; + +ARROW_EXPORT +Result> DefaultDeviceMemoryMapper( + ArrowDeviceType device_type, int64_t device_id); + +/// \brief EXPERIMENTAL: Import C++ device array from the C data interface. +/// +/// The ArrowArray struct has its contents moved (as per the C data interface +/// specification) to a private object held alive by the resulting array. The +/// buffers of the Array are located on the device indicated by the device_type. +/// +/// \param[in,out] array C data interface struct holding the array data +/// \param[in] type type of the imported array +/// \param[in] mapper A function to map device + id to memory manager. If not +/// specified, defaults to map "cpu" to the built-in default memory manager. +/// \return Imported array object +ARROW_EXPORT +Result> ImportDeviceArray( + struct ArrowDeviceArray* array, std::shared_ptr type, + const DeviceMemoryMapper& mapper = DefaultDeviceMemoryMapper); + +/// \brief EXPERIMENTAL: Import C++ device array and its type from the C data interface. +/// +/// The ArrowArray struct has its contents moved (as per the C data interface +/// specification) to a private object held alive by the resulting array. +/// The ArrowSchema struct is released, even if this function fails. The +/// buffers of the Array are located on the device indicated by the device_type. +/// +/// \param[in,out] array C data interface struct holding the array data +/// \param[in,out] type C data interface struct holding the array type +/// \param[in] mapper A function to map device + id to memory manager. If not +/// specified, defaults to map "cpu" to the built-in default memory manager. +/// \return Imported array object +ARROW_EXPORT +Result> ImportDeviceArray( + struct ArrowDeviceArray* array, struct ArrowSchema* type, + const DeviceMemoryMapper& mapper = DefaultDeviceMemoryMapper); + +/// \brief EXPERIMENTAL: Import C++ record batch with buffers on a device from the C data +/// interface. +/// +/// The ArrowArray struct has its contents moved (as per the C data interface +/// specification) to a private object held alive by the resulting record batch. +/// The buffers of all columns of the record batch are located on the device +/// indicated by the device type. +/// +/// \param[in,out] array C data interface struct holding the record batch data +/// \param[in] schema schema of the imported record batch +/// \param[in] mapper A function to map device + id to memory manager. If not +/// specified, defaults to map "cpu" to the built-in default memory manager. +/// \return Imported record batch object +ARROW_EXPORT +Result> ImportDeviceRecordBatch( + struct ArrowDeviceArray* array, std::shared_ptr schema, + const DeviceMemoryMapper& mapper = DefaultDeviceMemoryMapper); + +/// \brief EXPERIMENTAL: Import C++ record batch with buffers on a device and its schema +/// from the C data interface. +/// +/// The type represented by the ArrowSchema struct must be a struct type array. +/// The ArrowArray struct has its contents moved (as per the C data interface +/// specification) to a private object held alive by the resulting record batch. +/// The ArrowSchema struct is released, even if this function fails. The buffers +/// of all columns of the record batch are located on the device indicated by the +/// device type. +/// +/// \param[in,out] array C data interface struct holding the record batch data +/// \param[in,out] schema C data interface struct holding the record batch schema +/// \param[in] mapper A function to map device + id to memory manager. If not +/// specified, defaults to map "cpu" to the built-in default memory manager. +/// \return Imported record batch object +ARROW_EXPORT +Result> ImportDeviceRecordBatch( + struct ArrowDeviceArray* array, struct ArrowSchema* schema, + const DeviceMemoryMapper& mapper = DefaultDeviceMemoryMapper); + +/// @} + +/// \defgroup c-stream-interface Functions for working with the C data interface. +/// +/// @{ + +/// \brief Export C++ RecordBatchReader using the C stream interface. +/// +/// The resulting ArrowArrayStream struct keeps the record batch reader alive +/// until its release callback is called by the consumer. +/// +/// \param[in] reader RecordBatchReader object to export +/// \param[out] out C struct where to export the stream +ARROW_EXPORT +Status ExportRecordBatchReader(std::shared_ptr reader, + struct ArrowArrayStream* out); + +/// \brief Export C++ ChunkedArray using the C data interface format. +/// +/// The resulting ArrowArrayStream struct keeps the chunked array data and buffers alive +/// until its release callback is called by the consumer. +/// +/// \param[in] chunked_array ChunkedArray object to export +/// \param[out] out C struct where to export the stream +ARROW_EXPORT +Status ExportChunkedArray(std::shared_ptr chunked_array, + struct ArrowArrayStream* out); + +/// \brief Import C++ RecordBatchReader from the C stream interface. +/// +/// The ArrowArrayStream struct has its contents moved to a private object +/// held alive by the resulting record batch reader. +/// +/// \param[in,out] stream C stream interface struct +/// \return Imported RecordBatchReader object +ARROW_EXPORT +Result> ImportRecordBatchReader( + struct ArrowArrayStream* stream); + +/// \brief Import C++ ChunkedArray from the C stream interface +/// +/// The ArrowArrayStream struct has its contents moved to a private object, +/// is consumed in its entirity, and released before returning all chunks +/// as a ChunkedArray. +/// +/// \param[in,out] stream C stream interface struct +/// \return Imported ChunkedArray object +ARROW_EXPORT +Result> ImportChunkedArray(struct ArrowArrayStream* stream); + +/// @} + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack.h new file mode 100644 index 0000000000000000000000000000000000000000..d11ccfc1fd72253600501d7de3a150944608ca06 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack.h @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/array/array_base.h" +#include "arrow/c/dlpack_abi.h" + +namespace arrow::dlpack { + +/// \brief Export Arrow array as DLPack tensor. +/// +/// DLMangedTensor is produced as defined by the DLPack protocol, +/// see https://dmlc.github.io/dlpack/latest/. +/// +/// Data types for which the protocol is supported are +/// integer and floating-point data types. +/// +/// DLPack protocol only supports arrays with one contiguous +/// memory region which means Arrow Arrays with validity buffers +/// are not supported. +/// +/// \param[in] arr Arrow array +/// \return DLManagedTensor struct +ARROW_EXPORT +Result ExportArray(const std::shared_ptr& arr); + +/// \brief Get DLDevice with enumerator specifying the +/// type of the device data is stored on and index of the +/// device which is 0 by default for CPU. +/// +/// \param[in] arr Arrow array +/// \return DLDevice struct +ARROW_EXPORT +Result ExportDevice(const std::shared_ptr& arr); + +} // namespace arrow::dlpack diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack_abi.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack_abi.h new file mode 100644 index 0000000000000000000000000000000000000000..4af557a7ed5d7cf3ace070c32888971d65b797a2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/dlpack_abi.h @@ -0,0 +1,321 @@ +// Taken from: +// https://github.com/dmlc/dlpack/blob/ca4d00ad3e2e0f410eeab3264d21b8a39397f362/include/dlpack/dlpack.h +/*! + * Copyright (c) 2017 by Contributors + * \file dlpack.h + * \brief The common header of DLPack. + */ +#ifndef DLPACK_DLPACK_H_ +#define DLPACK_DLPACK_H_ + +/** + * \brief Compatibility with C++ + */ +#ifdef __cplusplus +#define DLPACK_EXTERN_C extern "C" +#else +#define DLPACK_EXTERN_C +#endif + +/*! \brief The current major version of dlpack */ +#define DLPACK_MAJOR_VERSION 1 + +/*! \brief The current minor version of dlpack */ +#define DLPACK_MINOR_VERSION 0 + +/*! \brief DLPACK_DLL prefix for windows */ +#ifdef _WIN32 +#ifdef DLPACK_EXPORTS +#define DLPACK_DLL __declspec(dllexport) +#else +#define DLPACK_DLL __declspec(dllimport) +#endif +#else +#define DLPACK_DLL +#endif + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * \brief The DLPack version. + * + * A change in major version indicates that we have changed the + * data layout of the ABI - DLManagedTensorVersioned. + * + * A change in minor version indicates that we have added new + * code, such as a new device type, but the ABI is kept the same. + * + * If an obtained DLPack tensor has a major version that disagrees + * with the version number specified in this header file + * (i.e. major != DLPACK_MAJOR_VERSION), the consumer must call the deleter + * (and it is safe to do so). It is not safe to access any other fields + * as the memory layout will have changed. + * + * In the case of a minor version mismatch, the tensor can be safely used as + * long as the consumer knows how to interpret all fields. Minor version + * updates indicate the addition of enumeration values. + */ +typedef struct { + /*! \brief DLPack major version. */ + uint32_t major; + /*! \brief DLPack minor version. */ + uint32_t minor; +} DLPackVersion; + +/*! + * \brief The device type in DLDevice. + */ +#ifdef __cplusplus +typedef enum : int32_t { +#else +typedef enum { +#endif + /*! \brief CPU device */ + kDLCPU = 1, + /*! \brief CUDA GPU device */ + kDLCUDA = 2, + /*! + * \brief Pinned CUDA CPU memory by cudaMallocHost + */ + kDLCUDAHost = 3, + /*! \brief OpenCL devices. */ + kDLOpenCL = 4, + /*! \brief Vulkan buffer for next generation graphics. */ + kDLVulkan = 7, + /*! \brief Metal for Apple GPU. */ + kDLMetal = 8, + /*! \brief Verilog simulator buffer */ + kDLVPI = 9, + /*! \brief ROCm GPUs for AMD GPUs */ + kDLROCM = 10, + /*! + * \brief Pinned ROCm CPU memory allocated by hipMallocHost + */ + kDLROCMHost = 11, + /*! + * \brief Reserved extension device type, + * used for quickly test extension device + * The semantics can differ depending on the implementation. + */ + kDLExtDev = 12, + /*! + * \brief CUDA managed/unified memory allocated by cudaMallocManaged + */ + kDLCUDAManaged = 13, + /*! + * \brief Unified shared memory allocated on a oneAPI non-partititioned + * device. Call to oneAPI runtime is required to determine the device + * type, the USM allocation type and the sycl context it is bound to. + * + */ + kDLOneAPI = 14, + /*! \brief GPU support for next generation WebGPU standard. */ + kDLWebGPU = 15, + /*! \brief Qualcomm Hexagon DSP */ + kDLHexagon = 16, +} DLDeviceType; + +/*! + * \brief A Device for Tensor and operator. + */ +typedef struct { + /*! \brief The device type used in the device. */ + DLDeviceType device_type; + /*! + * \brief The device index. + * For vanilla CPU memory, pinned memory, or managed memory, this is set to 0. + */ + int32_t device_id; +} DLDevice; + +/*! + * \brief The type code options DLDataType. + */ +typedef enum { + /*! \brief signed integer */ + kDLInt = 0U, + /*! \brief unsigned integer */ + kDLUInt = 1U, + /*! \brief IEEE floating point */ + kDLFloat = 2U, + /*! + * \brief Opaque handle type, reserved for testing purposes. + * Frameworks need to agree on the handle data type for the exchange to be well-defined. + */ + kDLOpaqueHandle = 3U, + /*! \brief bfloat16 */ + kDLBfloat = 4U, + /*! + * \brief complex number + * (C/C++/Python layout: compact struct per complex number) + */ + kDLComplex = 5U, + /*! \brief boolean */ + kDLBool = 6U, +} DLDataTypeCode; + +/*! + * \brief The data type the tensor can hold. The data type is assumed to follow the + * native endian-ness. An explicit error message should be raised when attempting to + * export an array with non-native endianness + * + * Examples + * - float: type_code = 2, bits = 32, lanes = 1 + * - float4(vectorized 4 float): type_code = 2, bits = 32, lanes = 4 + * - int8: type_code = 0, bits = 8, lanes = 1 + * - std::complex: type_code = 5, bits = 64, lanes = 1 + * - bool: type_code = 6, bits = 8, lanes = 1 (as per common array library convention, + * the underlying storage size of bool is 8 bits) + */ +typedef struct { + /*! + * \brief Type code of base types. + * We keep it uint8_t instead of DLDataTypeCode for minimal memory + * footprint, but the value should be one of DLDataTypeCode enum values. + * */ + uint8_t code; + /*! + * \brief Number of bits, common choices are 8, 16, 32. + */ + uint8_t bits; + /*! \brief Number of lanes in the type, used for vector types. */ + uint16_t lanes; +} DLDataType; + +/*! + * \brief Plain C Tensor object, does not manage memory. + */ +typedef struct { + /*! + * \brief The data pointer points to the allocated data. This will be CUDA + * device pointer or cl_mem handle in OpenCL. It may be opaque on some device + * types. This pointer is always aligned to 256 bytes as in CUDA. The + * `byte_offset` field should be used to point to the beginning of the data. + * + * Note that as of Nov 2021, multiply libraries (CuPy, PyTorch, TensorFlow, + * TVM, perhaps others) do not adhere to this 256 byte aligment requirement + * on CPU/CUDA/ROCm, and always use `byte_offset=0`. This must be fixed + * (after which this note will be updated); at the moment it is recommended + * to not rely on the data pointer being correctly aligned. + * + * For given DLTensor, the size of memory required to store the contents of + * data is calculated as follows: + * + * \code{.c} + * static inline size_t GetDataSize(const DLTensor* t) { + * size_t size = 1; + * for (tvm_index_t i = 0; i < t->ndim; ++i) { + * size *= t->shape[i]; + * } + * size *= (t->dtype.bits * t->dtype.lanes + 7) / 8; + * return size; + * } + * \endcode + */ + void* data; + /*! \brief The device of the tensor */ + DLDevice device; + /*! \brief Number of dimensions */ + int32_t ndim; + /*! \brief The data type of the pointer*/ + DLDataType dtype; + /*! \brief The shape of the tensor */ + int64_t* shape; + /*! + * \brief strides of the tensor (in number of elements, not bytes) + * can be NULL, indicating tensor is compact and row-majored. + */ + int64_t* strides; + /*! \brief The offset in bytes to the beginning pointer to data */ + uint64_t byte_offset; +} DLTensor; + +/*! + * \brief C Tensor object, manage memory of DLTensor. This data structure is + * intended to facilitate the borrowing of DLTensor by another framework. It is + * not meant to transfer the tensor. When the borrowing framework doesn't need + * the tensor, it should call the deleter to notify the host that the resource + * is no longer needed. + * + * \note This data structure is used as Legacy DLManagedTensor + * in DLPack exchange and is deprecated after DLPack v0.8 + * Use DLManagedTensorVersioned instead. + * This data structure may get renamed or deleted in future versions. + * + * \sa DLManagedTensorVersioned + */ +typedef struct DLManagedTensor { + /*! \brief DLTensor which is being memory managed */ + DLTensor dl_tensor; + /*! \brief the context of the original host framework of DLManagedTensor in + * which DLManagedTensor is used in the framework. It can also be NULL. + */ + void* manager_ctx; + /*! + * \brief Destructor - this should be called + * to destruct the manager_ctx which backs the DLManagedTensor. It can be + * NULL if there is no way for the caller to provide a reasonable destructor. + * The destructors deletes the argument self as well. + */ + void (*deleter)(struct DLManagedTensor* self); +} DLManagedTensor; + +// bit masks used in in the DLManagedTensorVersioned + +/*! \brief bit mask to indicate that the tensor is read only. */ +#define DLPACK_FLAG_BITMASK_READ_ONLY (1UL << 0UL) + +/*! + * \brief A versioned and managed C Tensor object, manage memory of DLTensor. + * + * This data structure is intended to facilitate the borrowing of DLTensor by + * another framework. It is not meant to transfer the tensor. When the borrowing + * framework doesn't need the tensor, it should call the deleter to notify the + * host that the resource is no longer needed. + * + * \note This is the current standard DLPack exchange data structure. + */ +struct DLManagedTensorVersioned { + /*! + * \brief The API and ABI version of the current managed Tensor + */ + DLPackVersion version; + /*! + * \brief the context of the original host framework. + * + * Stores DLManagedTensorVersioned is used in the + * framework. It can also be NULL. + */ + void* manager_ctx; + /*! + * \brief Destructor. + * + * This should be called to destruct manager_ctx which holds the + * DLManagedTensorVersioned. It can be NULL if there is no way for the caller to provide + * a reasonable destructor. The destructors deletes the argument self as well. + */ + void (*deleter)(struct DLManagedTensorVersioned* self); + /*! + * \brief Additional bitmask flags information about the tensor. + * + * By default the flags should be set to 0. + * + * \note Future ABI changes should keep everything until this field + * stable, to ensure that deleter can be correctly called. + * + * \sa DLPACK_FLAG_BITMASK_READ_ONLY + */ + uint64_t flags; + /*! \brief DLTensor which is being memory managed */ + DLTensor dl_tensor; +}; + +#ifdef __cplusplus +} // DLPACK_EXTERN_C +#endif +#endif // DLPACK_DLPACK_H_ diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/helpers.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..a24f272feac81a4607a75eb580974335b802271e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/c/helpers.h @@ -0,0 +1,129 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/c/abi.h" + +#define ARROW_C_ASSERT(condition, msg) \ + do { \ + if (!(condition)) { \ + fprintf(stderr, "%s:%d:: %s", __FILE__, __LINE__, (msg)); \ + abort(); \ + } \ + } while (0) + +#ifdef __cplusplus +extern "C" { +#endif + +/// Query whether the C schema is released +inline int ArrowSchemaIsReleased(const struct ArrowSchema* schema) { + return schema->release == NULL; +} + +/// Mark the C schema released (for use in release callbacks) +inline void ArrowSchemaMarkReleased(struct ArrowSchema* schema) { + schema->release = NULL; +} + +/// Move the C schema from `src` to `dest` +/// +/// Note `dest` must *not* point to a valid schema already, otherwise there +/// will be a memory leak. +inline void ArrowSchemaMove(struct ArrowSchema* src, struct ArrowSchema* dest) { + assert(dest != src); + assert(!ArrowSchemaIsReleased(src)); + memcpy(dest, src, sizeof(struct ArrowSchema)); + ArrowSchemaMarkReleased(src); +} + +/// Release the C schema, if necessary, by calling its release callback +inline void ArrowSchemaRelease(struct ArrowSchema* schema) { + if (!ArrowSchemaIsReleased(schema)) { + schema->release(schema); + ARROW_C_ASSERT(ArrowSchemaIsReleased(schema), + "ArrowSchemaRelease did not cleanup release callback"); + } +} + +/// Query whether the C array is released +inline int ArrowArrayIsReleased(const struct ArrowArray* array) { + return array->release == NULL; +} + +/// Mark the C array released (for use in release callbacks) +inline void ArrowArrayMarkReleased(struct ArrowArray* array) { array->release = NULL; } + +/// Move the C array from `src` to `dest` +/// +/// Note `dest` must *not* point to a valid array already, otherwise there +/// will be a memory leak. +inline void ArrowArrayMove(struct ArrowArray* src, struct ArrowArray* dest) { + assert(dest != src); + assert(!ArrowArrayIsReleased(src)); + memcpy(dest, src, sizeof(struct ArrowArray)); + ArrowArrayMarkReleased(src); +} + +/// Release the C array, if necessary, by calling its release callback +inline void ArrowArrayRelease(struct ArrowArray* array) { + if (!ArrowArrayIsReleased(array)) { + array->release(array); + ARROW_C_ASSERT(ArrowArrayIsReleased(array), + "ArrowArrayRelease did not cleanup release callback"); + } +} + +/// Query whether the C array stream is released +inline int ArrowArrayStreamIsReleased(const struct ArrowArrayStream* stream) { + return stream->release == NULL; +} + +/// Mark the C array stream released (for use in release callbacks) +inline void ArrowArrayStreamMarkReleased(struct ArrowArrayStream* stream) { + stream->release = NULL; +} + +/// Move the C array stream from `src` to `dest` +/// +/// Note `dest` must *not* point to a valid stream already, otherwise there +/// will be a memory leak. +inline void ArrowArrayStreamMove(struct ArrowArrayStream* src, + struct ArrowArrayStream* dest) { + assert(dest != src); + assert(!ArrowArrayStreamIsReleased(src)); + memcpy(dest, src, sizeof(struct ArrowArrayStream)); + ArrowArrayStreamMarkReleased(src); +} + +/// Release the C array stream, if necessary, by calling its release callback +inline void ArrowArrayStreamRelease(struct ArrowArrayStream* stream) { + if (!ArrowArrayStreamIsReleased(stream)) { + stream->release(stream); + ARROW_C_ASSERT(ArrowArrayStreamIsReleased(stream), + "ArrowArrayStreamRelease did not cleanup release callback"); + } +} + +#ifdef __cplusplus +} +#endif diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/api.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/api.h new file mode 100644 index 0000000000000000000000000000000000000000..4af1835cd709d43e0abe3b39b46531cae9a047fc --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/api.h @@ -0,0 +1,22 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/csv/options.h" +#include "arrow/csv/reader.h" +#include "arrow/csv/writer.h" diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/chunker.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/chunker.h new file mode 100644 index 0000000000000000000000000000000000000000..662b16ec40a9485547ce01b32ea0325a23122711 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/chunker.h @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/csv/options.h" +#include "arrow/status.h" +#include "arrow/util/delimiting.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace csv { + +ARROW_EXPORT +std::unique_ptr MakeChunker(const ParseOptions& options); + +} // namespace csv +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_builder.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..07279db313e92d2daeb93be12d0ab307d0c25201 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_builder.h @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace csv { + +class BlockParser; +struct ConvertOptions; + +class ARROW_EXPORT ColumnBuilder { + public: + virtual ~ColumnBuilder() = default; + + /// Spawn a task that will try to convert and append the given CSV block. + /// All calls to Append() should happen on the same thread, otherwise + /// call Insert() instead. + virtual void Append(const std::shared_ptr& parser) = 0; + + /// Spawn a task that will try to convert and insert the given CSV block + virtual void Insert(int64_t block_index, + const std::shared_ptr& parser) = 0; + + /// Return the final chunked array. The TaskGroup _must_ have finished! + virtual Result> Finish() = 0; + + std::shared_ptr task_group() { return task_group_; } + + /// Construct a strictly-typed ColumnBuilder. + static Result> Make( + MemoryPool* pool, const std::shared_ptr& type, int32_t col_index, + const ConvertOptions& options, + const std::shared_ptr& task_group); + + /// Construct a type-inferring ColumnBuilder. + static Result> Make( + MemoryPool* pool, int32_t col_index, const ConvertOptions& options, + const std::shared_ptr& task_group); + + /// Construct a ColumnBuilder for a column of nulls + /// (i.e. not present in the CSV file). + static Result> MakeNull( + MemoryPool* pool, const std::shared_ptr& type, + const std::shared_ptr& task_group); + + protected: + explicit ColumnBuilder(std::shared_ptr task_group) + : task_group_(std::move(task_group)) {} + + std::shared_ptr task_group_; +}; + +} // namespace csv +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_decoder.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_decoder.h new file mode 100644 index 0000000000000000000000000000000000000000..5fbbd5df58b1c588b88e16b68da50b9399211abc --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_decoder.h @@ -0,0 +1,64 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace csv { + +class BlockParser; +struct ConvertOptions; + +class ARROW_EXPORT ColumnDecoder { + public: + virtual ~ColumnDecoder() = default; + + /// Spawn a task that will try to convert and insert the given CSV block + virtual Future> Decode( + const std::shared_ptr& parser) = 0; + + /// Construct a strictly-typed ColumnDecoder. + static Result> Make(MemoryPool* pool, + std::shared_ptr type, + int32_t col_index, + const ConvertOptions& options); + + /// Construct a type-inferring ColumnDecoder. + /// Inference will run only on the first block, the type will be frozen afterwards. + static Result> Make(MemoryPool* pool, int32_t col_index, + const ConvertOptions& options); + + /// Construct a ColumnDecoder for a column of nulls + /// (i.e. not present in the CSV file). + static Result> MakeNull(MemoryPool* pool, + std::shared_ptr type); + + protected: + ColumnDecoder() = default; +}; + +} // namespace csv +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/converter.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/converter.h new file mode 100644 index 0000000000000000000000000000000000000000..639f692f26a1ba3a134caac68a432ac22f068917 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/converter.h @@ -0,0 +1,82 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/csv/options.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace csv { + +class BlockParser; + +class ARROW_EXPORT Converter { + public: + Converter(const std::shared_ptr& type, const ConvertOptions& options, + MemoryPool* pool); + virtual ~Converter() = default; + + virtual Result> Convert(const BlockParser& parser, + int32_t col_index) = 0; + + std::shared_ptr type() const { return type_; } + + // Create a Converter for the given data type + static Result> Make( + const std::shared_ptr& type, const ConvertOptions& options, + MemoryPool* pool = default_memory_pool()); + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(Converter); + + virtual Status Initialize() = 0; + + // CAUTION: ConvertOptions can grow large (if it customizes hundreds or + // thousands of columns), so avoid copying it in each Converter. + const ConvertOptions& options_; + MemoryPool* pool_; + std::shared_ptr type_; +}; + +class ARROW_EXPORT DictionaryConverter : public Converter { + public: + DictionaryConverter(const std::shared_ptr& value_type, + const ConvertOptions& options, MemoryPool* pool); + + // If the dictionary length goes above this value, conversion will fail + // with Status::IndexError. + virtual void SetMaxCardinality(int32_t max_length) = 0; + + // Create a Converter for the given dictionary value type. + // The dictionary index type will always be Int32. + static Result> Make( + const std::shared_ptr& value_type, const ConvertOptions& options, + MemoryPool* pool = default_memory_pool()); + + protected: + std::shared_ptr value_type_; +}; + +} // namespace csv +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/invalid_row.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/invalid_row.h new file mode 100644 index 0000000000000000000000000000000000000000..4360ceaaea6ac07dd218c93ce13c3ab14c16fc63 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/invalid_row.h @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +namespace arrow { +namespace csv { + +/// \brief Description of an invalid row +struct InvalidRow { + /// \brief Number of columns expected in the row + int32_t expected_columns; + /// \brief Actual number of columns found in the row + int32_t actual_columns; + /// \brief The physical row number if known or -1 + /// + /// This number is one-based and also accounts for non-data rows (such as + /// CSV header rows). + int64_t number; + /// \brief View of the entire row. Memory will be freed after callback returns + const std::string_view text; +}; + +/// \brief Result returned by an InvalidRowHandler +enum class InvalidRowResult { + // Generate an error describing this row + Error, + // Skip over this row + Skip +}; + +/// \brief callback for handling a row with an invalid number of columns while parsing +/// \return result indicating if an error should be returned from the parser or the row is +/// skipped +using InvalidRowHandler = std::function; + +} // namespace csv +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/options.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/options.h new file mode 100644 index 0000000000000000000000000000000000000000..7723dcedc611e922c932d5f9e09e984044ab3c21 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/options.h @@ -0,0 +1,220 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/csv/invalid_row.h" +#include "arrow/csv/type_fwd.h" +#include "arrow/io/interfaces.h" +#include "arrow/status.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class DataType; +class TimestampParser; + +namespace csv { + +// Silly workaround for https://github.com/michaeljones/breathe/issues/453 +constexpr char kDefaultEscapeChar = '\\'; + +struct ARROW_EXPORT ParseOptions { + // Parsing options + + /// Field delimiter + char delimiter = ','; + /// Whether quoting is used + bool quoting = true; + /// Quoting character (if `quoting` is true) + char quote_char = '"'; + /// Whether a quote inside a value is double-quoted + bool double_quote = true; + /// Whether escaping is used + bool escaping = false; + /// Escaping character (if `escaping` is true) + char escape_char = kDefaultEscapeChar; + /// Whether values are allowed to contain CR (0x0d) and LF (0x0a) characters + bool newlines_in_values = false; + /// Whether empty lines are ignored. If false, an empty line represents + /// a single empty value (assuming a one-column CSV file). + bool ignore_empty_lines = true; + /// A handler function for rows which do not have the correct number of columns + InvalidRowHandler invalid_row_handler; + + /// Create parsing options with default values + static ParseOptions Defaults(); + + /// \brief Test that all set options are valid + Status Validate() const; +}; + +struct ARROW_EXPORT ConvertOptions { + // Conversion options + + /// Whether to check UTF8 validity of string columns + bool check_utf8 = true; + /// Optional per-column types (disabling type inference on those columns) + std::unordered_map> column_types; + /// Recognized spellings for null values + std::vector null_values; + /// Recognized spellings for boolean true values + std::vector true_values; + /// Recognized spellings for boolean false values + std::vector false_values; + + /// Whether string / binary columns can have null values. + /// + /// If true, then strings in "null_values" are considered null for string columns. + /// If false, then all strings are valid string values. + bool strings_can_be_null = false; + + /// Whether quoted values can be null. + /// + /// If true, then strings in "null_values" are also considered null when they + /// appear quoted in the CSV file. Otherwise, quoted values are never considered null. + bool quoted_strings_can_be_null = true; + + /// Whether to try to automatically dict-encode string / binary data. + /// If true, then when type inference detects a string or binary column, + /// it is dict-encoded up to `auto_dict_max_cardinality` distinct values + /// (per chunk), after which it switches to regular encoding. + /// + /// This setting is ignored for non-inferred columns (those in `column_types`). + bool auto_dict_encode = false; + int32_t auto_dict_max_cardinality = 50; + + /// Decimal point character for floating-point and decimal data + char decimal_point = '.'; + + // XXX Should we have a separate FilterOptions? + + /// If non-empty, indicates the names of columns from the CSV file that should + /// be actually read and converted (in the vector's order). + /// Columns not in this vector will be ignored. + std::vector include_columns; + /// If false, columns in `include_columns` but not in the CSV file will error out. + /// If true, columns in `include_columns` but not in the CSV file will produce + /// a column of nulls (whose type is selected using `column_types`, + /// or null by default) + /// This option is ignored if `include_columns` is empty. + bool include_missing_columns = false; + + /// User-defined timestamp parsers, using the virtual parser interface in + /// arrow/util/value_parsing.h. More than one parser can be specified, and + /// the CSV conversion logic will try parsing values starting from the + /// beginning of this vector. If no parsers are specified, we use the default + /// built-in ISO-8601 parser. + std::vector> timestamp_parsers; + + /// Create conversion options with default values, including conventional + /// values for `null_values`, `true_values` and `false_values` + static ConvertOptions Defaults(); + + /// \brief Test that all set options are valid + Status Validate() const; +}; + +struct ARROW_EXPORT ReadOptions { + // Reader options + + /// Whether to use the global CPU thread pool + bool use_threads = true; + + /// \brief Block size we request from the IO layer. + /// + /// This will determine multi-threading granularity as well as + /// the size of individual record batches. + /// Minimum valid value for block size is 1 + int32_t block_size = 1 << 20; // 1 MB + + /// Number of header rows to skip (not including the row of column names, if any) + int32_t skip_rows = 0; + + /// Number of rows to skip after the column names are read, if any + int32_t skip_rows_after_names = 0; + + /// Column names for the target table. + /// If empty, fall back on autogenerate_column_names. + std::vector column_names; + + /// Whether to autogenerate column names if `column_names` is empty. + /// If true, column names will be of the form "f0", "f1"... + /// If false, column names will be read from the first CSV row after `skip_rows`. + bool autogenerate_column_names = false; + + /// Create read options with default values + static ReadOptions Defaults(); + + /// \brief Test that all set options are valid + Status Validate() const; +}; + +/// \brief Quoting style for CSV writing +enum class ARROW_EXPORT QuotingStyle { + /// Only enclose values in quotes which need them, because their CSV rendering can + /// contain quotes itself (e.g. strings or binary values) + Needed, + /// Enclose all valid values in quotes. Nulls are not quoted. May cause readers to + /// interpret all values as strings if schema is inferred. + AllValid, + /// Do not enclose any values in quotes. Prevents values from containing quotes ("), + /// cell delimiters (,) or line endings (\\r, \\n), (following RFC4180). If values + /// contain these characters, an error is caused when attempting to write. + None +}; + +struct ARROW_EXPORT WriteOptions { + /// Whether to write an initial header line with column names + bool include_header = true; + + /// \brief Maximum number of rows processed at a time + /// + /// The CSV writer converts and writes data in batches of N rows. + /// This number can impact performance. + int32_t batch_size = 1024; + + /// Field delimiter + char delimiter = ','; + + /// \brief The string to write for null values. Quotes are not allowed in this string. + std::string null_string; + + /// \brief IO context for writing. + io::IOContext io_context; + + /// \brief The end of line character to use for ending rows + std::string eol = "\n"; + + /// \brief Quoting style + QuotingStyle quoting_style = QuotingStyle::Needed; + + /// Create write options with default values + static WriteOptions Defaults(); + + /// \brief Test that all set options are valid + Status Validate() const; +}; + +} // namespace csv +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/parser.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/parser.h new file mode 100644 index 0000000000000000000000000000000000000000..c73e52ce831ed95b4abe83084b483c15660bae7e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/parser.h @@ -0,0 +1,228 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/csv/options.h" +#include "arrow/csv/type_fwd.h" +#include "arrow/status.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class MemoryPool; + +namespace csv { + +/// Skip at most num_rows from the given input. The input pointer is updated +/// and the number of actually skipped rows is returns (may be less than +/// requested if the input is too short). +ARROW_EXPORT +int32_t SkipRows(const uint8_t* data, uint32_t size, int32_t num_rows, + const uint8_t** out_data); + +class BlockParserImpl; + +namespace detail { + +struct ParsedValueDesc { + uint32_t offset : 31; + bool quoted : 1; +}; + +class ARROW_EXPORT DataBatch { + public: + explicit DataBatch(int32_t num_cols) : num_cols_(num_cols) {} + + /// \brief Return the number of parsed rows (not skipped) + int32_t num_rows() const { return num_rows_; } + /// \brief Return the number of parsed columns + int32_t num_cols() const { return num_cols_; } + /// \brief Return the total size in bytes of parsed data + uint32_t num_bytes() const { return parsed_size_; } + /// \brief Return the number of skipped rows + int32_t num_skipped_rows() const { return static_cast(skipped_rows_.size()); } + + template + Status VisitColumn(int32_t col_index, int64_t first_row, Visitor&& visit) const { + using detail::ParsedValueDesc; + + int32_t batch_row = 0; + for (size_t buf_index = 0; buf_index < values_buffers_.size(); ++buf_index) { + const auto& values_buffer = values_buffers_[buf_index]; + const auto values = reinterpret_cast(values_buffer->data()); + const auto max_pos = + static_cast(values_buffer->size() / sizeof(ParsedValueDesc)) - 1; + for (int32_t pos = col_index; pos < max_pos; pos += num_cols_, ++batch_row) { + auto start = values[pos].offset; + auto stop = values[pos + 1].offset; + auto quoted = values[pos + 1].quoted; + Status status = visit(parsed_ + start, stop - start, quoted); + if (ARROW_PREDICT_FALSE(!status.ok())) { + return DecorateWithRowNumber(std::move(status), first_row, batch_row); + } + } + } + return Status::OK(); + } + + template + Status VisitLastRow(Visitor&& visit) const { + using detail::ParsedValueDesc; + + const auto& values_buffer = values_buffers_.back(); + const auto values = reinterpret_cast(values_buffer->data()); + const auto start_pos = + static_cast(values_buffer->size() / sizeof(ParsedValueDesc)) - + num_cols_ - 1; + for (int32_t col_index = 0; col_index < num_cols_; ++col_index) { + auto start = values[start_pos + col_index].offset; + auto stop = values[start_pos + col_index + 1].offset; + auto quoted = values[start_pos + col_index + 1].quoted; + ARROW_RETURN_NOT_OK(visit(parsed_ + start, stop - start, quoted)); + } + return Status::OK(); + } + + protected: + Status DecorateWithRowNumber(Status&& status, int64_t first_row, + int32_t batch_row) const { + if (first_row >= 0) { + // `skipped_rows_` is in ascending order by construction, so use bisection + // to find out how many rows were skipped before `batch_row`. + const auto skips_before = + std::upper_bound(skipped_rows_.begin(), skipped_rows_.end(), batch_row) - + skipped_rows_.begin(); + status = status.WithMessage("Row #", batch_row + skips_before + first_row, ": ", + status.message()); + } + // Use return_if so that when extra context is enabled it will be added + ARROW_RETURN_IF_(true, std::move(status), ARROW_STRINGIFY(status)); + return std::move(status); + } + + // The number of rows in this batch (not including any skipped ones) + int32_t num_rows_ = 0; + // The number of columns + int32_t num_cols_ = 0; + + // XXX should we ensure the parsed buffer is padded with 8 or 16 excess zero bytes? + // It may help with null parsing... + std::vector> values_buffers_; + std::shared_ptr parsed_buffer_; + const uint8_t* parsed_ = NULLPTR; + int32_t parsed_size_ = 0; + + // Record the current num_rows_ each time a row is skipped + std::vector skipped_rows_; + + friend class ::arrow::csv::BlockParserImpl; +}; + +} // namespace detail + +constexpr int32_t kMaxParserNumRows = 100000; + +/// \class BlockParser +/// \brief A reusable block-based parser for CSV data +/// +/// The parser takes a block of CSV data and delimits rows and fields, +/// unquoting and unescaping them on the fly. Parsed data is own by the +/// parser, so the original buffer can be discarded after Parse() returns. +/// +/// If the block is truncated (i.e. not all data can be parsed), it is up +/// to the caller to arrange the next block to start with the trailing data. +/// Also, if the previous block ends with CR (0x0d) and a new block starts +/// with LF (0x0a), the parser will consider the leading newline as an empty +/// line; the caller should therefore strip it. +class ARROW_EXPORT BlockParser { + public: + explicit BlockParser(ParseOptions options, int32_t num_cols = -1, + int64_t first_row = -1, int32_t max_num_rows = kMaxParserNumRows); + explicit BlockParser(MemoryPool* pool, ParseOptions options, int32_t num_cols = -1, + int64_t first_row = -1, int32_t max_num_rows = kMaxParserNumRows); + ~BlockParser(); + + /// \brief Parse a block of data + /// + /// Parse a block of CSV data, ingesting up to max_num_rows rows. + /// The number of bytes actually parsed is returned in out_size. + Status Parse(std::string_view data, uint32_t* out_size); + + /// \brief Parse sequential blocks of data + /// + /// Only the last block is allowed to be truncated. + Status Parse(const std::vector& data, uint32_t* out_size); + + /// \brief Parse the final block of data + /// + /// Like Parse(), but called with the final block in a file. + /// The last row may lack a trailing line separator. + Status ParseFinal(std::string_view data, uint32_t* out_size); + + /// \brief Parse the final sequential blocks of data + /// + /// Only the last block is allowed to be truncated. + Status ParseFinal(const std::vector& data, uint32_t* out_size); + + /// \brief Return the number of parsed rows + int32_t num_rows() const { return parsed_batch().num_rows(); } + /// \brief Return the number of parsed columns + int32_t num_cols() const { return parsed_batch().num_cols(); } + /// \brief Return the total size in bytes of parsed data + uint32_t num_bytes() const { return parsed_batch().num_bytes(); } + + /// \brief Return the total number of rows including rows which were skipped + int32_t total_num_rows() const { + return parsed_batch().num_rows() + parsed_batch().num_skipped_rows(); + } + + /// \brief Return the row number of the first row in the block or -1 if unsupported + int64_t first_row_num() const; + + /// \brief Visit parsed values in a column + /// + /// The signature of the visitor is + /// Status(const uint8_t* data, uint32_t size, bool quoted) + template + Status VisitColumn(int32_t col_index, Visitor&& visit) const { + return parsed_batch().VisitColumn(col_index, first_row_num(), + std::forward(visit)); + } + + template + Status VisitLastRow(Visitor&& visit) const { + return parsed_batch().VisitLastRow(std::forward(visit)); + } + + protected: + std::unique_ptr impl_; + + const detail::DataBatch& parsed_batch() const; +}; + +} // namespace csv +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/reader.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/reader.h new file mode 100644 index 0000000000000000000000000000000000000000..bae301dc14815a6fdf9388a08c4f9068155f20a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/reader.h @@ -0,0 +1,112 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/csv/options.h" // IWYU pragma: keep +#include "arrow/io/interfaces.h" +#include "arrow/record_batch.h" +#include "arrow/result.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" +#include "arrow/util/future.h" +#include "arrow/util/thread_pool.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace io { +class InputStream; +} // namespace io + +namespace csv { + +/// A class that reads an entire CSV file into a Arrow Table +class ARROW_EXPORT TableReader { + public: + virtual ~TableReader() = default; + + /// Read the entire CSV file and convert it to a Arrow Table + virtual Result> Read() = 0; + /// Read the entire CSV file and convert it to a Arrow Table + virtual Future> ReadAsync() = 0; + + /// Create a TableReader instance + static Result> Make(io::IOContext io_context, + std::shared_ptr input, + const ReadOptions&, + const ParseOptions&, + const ConvertOptions&); +}; + +/// \brief A class that reads a CSV file incrementally +/// +/// Caveats: +/// - For now, this is always single-threaded (regardless of `ReadOptions::use_threads`. +/// - Type inference is done on the first block and types are frozen afterwards; +/// to make sure the right data types are inferred, either set +/// `ReadOptions::block_size` to a large enough value, or use +/// `ConvertOptions::column_types` to set the desired data types explicitly. +class ARROW_EXPORT StreamingReader : public RecordBatchReader { + public: + virtual ~StreamingReader() = default; + + virtual Future> ReadNextAsync() = 0; + + /// \brief Return the number of bytes which have been read and processed + /// + /// The returned number includes CSV bytes which the StreamingReader has + /// finished processing, but not bytes for which some processing (e.g. + /// CSV parsing or conversion to Arrow layout) is still ongoing. + /// + /// Furthermore, the following rules apply: + /// - bytes skipped by `ReadOptions.skip_rows` are counted as being read before + /// any records are returned. + /// - bytes read while parsing the header are counted as being read before any + /// records are returned. + /// - bytes skipped by `ReadOptions.skip_rows_after_names` are counted after the + /// first batch is returned. + virtual int64_t bytes_read() const = 0; + + /// Create a StreamingReader instance + /// + /// This involves some I/O as the first batch must be loaded during the creation process + /// so it is returned as a future + /// + /// Currently, the StreamingReader is not async-reentrant and does not do any fan-out + /// parsing (see ARROW-11889) + static Future> MakeAsync( + io::IOContext io_context, std::shared_ptr input, + arrow::internal::Executor* cpu_executor, const ReadOptions&, const ParseOptions&, + const ConvertOptions&); + + static Result> Make( + io::IOContext io_context, std::shared_ptr input, + const ReadOptions&, const ParseOptions&, const ConvertOptions&); +}; + +/// \brief Count the logical rows of data in a CSV file (i.e. the +/// number of rows you would get if you read the file into a table). +ARROW_EXPORT +Future CountRowsAsync(io::IOContext io_context, + std::shared_ptr input, + arrow::internal::Executor* cpu_executor, + const ReadOptions&, const ParseOptions&); + +} // namespace csv +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/test_common.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/test_common.h new file mode 100644 index 0000000000000000000000000000000000000000..07a41604478e81ac760e8d0b3501ef24996b0a4e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/test_common.h @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/csv/parser.h" +#include "arrow/testing/visibility.h" + +namespace arrow { +namespace csv { + +ARROW_TESTING_EXPORT +std::string MakeCSVData(std::vector lines); + +// Make a BlockParser from a vector of lines representing a CSV file +ARROW_TESTING_EXPORT +void MakeCSVParser(std::vector lines, ParseOptions options, int32_t num_cols, + MemoryPool* pool, std::shared_ptr* out); + +ARROW_TESTING_EXPORT +void MakeCSVParser(std::vector lines, ParseOptions options, + std::shared_ptr* out); + +ARROW_TESTING_EXPORT +void MakeCSVParser(std::vector lines, std::shared_ptr* out); + +// Make a BlockParser from a vector of strings representing a single CSV column +ARROW_TESTING_EXPORT +void MakeColumnParser(std::vector items, std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Result> MakeSampleCsvBuffer( + size_t num_rows, std::function is_valid = {}); + +} // namespace csv +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/type_fwd.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..c0a53847a90ddb82067e0c9ac955cf4222c61742 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/type_fwd.h @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +namespace arrow { +namespace csv { + +class TableReader; +struct ConvertOptions; +struct ReadOptions; +struct ParseOptions; +struct WriteOptions; + +} // namespace csv +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/writer.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/writer.h new file mode 100644 index 0000000000000000000000000000000000000000..43233372124728ba41f0bead721dd3f8006f19b4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/csv/writer.h @@ -0,0 +1,89 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/csv/options.h" +#include "arrow/io/interfaces.h" +#include "arrow/ipc/type_fwd.h" +#include "arrow/record_batch.h" +#include "arrow/table.h" + +namespace arrow { +namespace csv { + +// Functionality for converting Arrow data to Comma separated value text. +// This library supports all primitive types that can be cast to a StringArrays. +// It applies to following formatting rules: +// - For non-binary types no quotes surround values. Nulls are represented as the empty +// string. +// - For binary types all non-null data is quoted (and quotes within data are escaped +// with an additional quote). +// Null values are empty and unquoted. + +/// \defgroup csv-write-functions High-level functions for writing CSV files +/// @{ + +/// \brief Convert table to CSV and write the result to output. +/// Experimental +ARROW_EXPORT Status WriteCSV(const Table& table, const WriteOptions& options, + arrow::io::OutputStream* output); +/// \brief Convert batch to CSV and write the result to output. +/// Experimental +ARROW_EXPORT Status WriteCSV(const RecordBatch& batch, const WriteOptions& options, + arrow::io::OutputStream* output); +/// \brief Convert batches read through a RecordBatchReader +/// to CSV and write the results to output. +/// Experimental +ARROW_EXPORT Status WriteCSV(const std::shared_ptr& reader, + const WriteOptions& options, + arrow::io::OutputStream* output); + +/// @} + +/// \defgroup csv-writer-factories Functions for creating an incremental CSV writer +/// @{ + +/// \brief Create a new CSV writer. User is responsible for closing the +/// actual OutputStream. +/// +/// \param[in] sink output stream to write to +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization +/// \return Result> +ARROW_EXPORT +Result> MakeCSVWriter( + std::shared_ptr sink, const std::shared_ptr& schema, + const WriteOptions& options = WriteOptions::Defaults()); + +/// \brief Create a new CSV writer. +/// +/// \param[in] sink output stream to write to (does not take ownership) +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization +/// \return Result> +ARROW_EXPORT +Result> MakeCSVWriter( + io::OutputStream* sink, const std::shared_ptr& schema, + const WriteOptions& options = WriteOptions::Defaults()); + +/// @} + +} // namespace csv +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/extension/fixed_shape_tensor.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/extension/fixed_shape_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..3fec79b5c2a3c75ac2aa68e8d3d88312b56a06a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/extension/fixed_shape_tensor.h @@ -0,0 +1,128 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "arrow/extension_type.h" + +namespace arrow { +namespace extension { + +class ARROW_EXPORT FixedShapeTensorArray : public ExtensionArray { + public: + using ExtensionArray::ExtensionArray; + + /// \brief Create a FixedShapeTensorArray from a Tensor + /// + /// This method will create a FixedShapeTensorArray from a Tensor, taking its first + /// dimension as the number of elements in the resulting array and the remaining + /// dimensions as the shape of the individual tensors. If Tensor provides strides, + /// they will be used to determine dimension permutation. Otherwise, row-major layout + /// (i.e. no permutation) will be assumed. + /// + /// \param[in] tensor The Tensor to convert to a FixedShapeTensorArray + static Result> FromTensor( + const std::shared_ptr& tensor); + + /// \brief Create a Tensor from FixedShapeTensorArray + /// + /// This method will create a Tensor from a FixedShapeTensorArray, setting its first + /// dimension as length equal to the FixedShapeTensorArray's length and the remaining + /// dimensions as the FixedShapeTensorType's shape. Shape and dim_names will be + /// permuted according to permutation stored in the FixedShapeTensorType metadata. + const Result> ToTensor() const; +}; + +/// \brief Concrete type class for constant-size Tensor data. +/// This is a canonical arrow extension type. +/// See: https://arrow.apache.org/docs/format/CanonicalExtensions.html +class ARROW_EXPORT FixedShapeTensorType : public ExtensionType { + public: + FixedShapeTensorType(const std::shared_ptr& value_type, const int32_t& size, + const std::vector& shape, + const std::vector& permutation = {}, + const std::vector& dim_names = {}) + : ExtensionType(fixed_size_list(value_type, size)), + value_type_(value_type), + shape_(shape), + permutation_(permutation), + dim_names_(dim_names) {} + + std::string extension_name() const override { return "arrow.fixed_shape_tensor"; } + std::string ToString(bool show_metadata = false) const override; + + /// Number of dimensions of tensor elements + size_t ndim() const { return shape_.size(); } + + /// Shape of tensor elements + const std::vector shape() const { return shape_; } + + /// Value type of tensor elements + const std::shared_ptr value_type() const { return value_type_; } + + /// Strides of tensor elements. Strides state offset in bytes between adjacent + /// elements along each dimension. In case permutation is non-empty strides are + /// computed from permuted tensor element's shape. + const std::vector& strides(); + + /// Permutation mapping from logical to physical memory layout of tensor elements + const std::vector& permutation() const { return permutation_; } + + /// Dimension names of tensor elements. Dimensions are ordered physically. + const std::vector& dim_names() const { return dim_names_; } + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::string Serialize() const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized_data) const override; + + /// Create a FixedShapeTensorArray from ArrayData + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + /// \brief Create a Tensor from an ExtensionScalar from a FixedShapeTensorArray + /// + /// This method will return a Tensor from ExtensionScalar with strides + /// derived from shape and permutation of FixedShapeTensorType. Shape and + /// dim_names will be permuted according to permutation stored in the + /// FixedShapeTensorType metadata. + static Result> MakeTensor( + const std::shared_ptr& scalar); + + /// \brief Create a FixedShapeTensorType instance + static Result> Make( + const std::shared_ptr& value_type, const std::vector& shape, + const std::vector& permutation = {}, + const std::vector& dim_names = {}); + + private: + std::shared_ptr storage_type_; + std::shared_ptr value_type_; + std::vector shape_; + std::vector strides_; + std::vector permutation_; + std::vector dim_names_; +}; + +/// \brief Return a FixedShapeTensorType instance. +ARROW_EXPORT std::shared_ptr fixed_shape_tensor( + const std::shared_ptr& storage_type, const std::vector& shape, + const std::vector& permutation = {}, + const std::vector& dim_names = {}); + +} // namespace extension +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/api.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/api.h new file mode 100644 index 0000000000000000000000000000000000000000..ed31b5c8fa41f39d915d8ecbeb40b37b51ac26d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/api.h @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/flight/client.h" +#include "arrow/flight/client_auth.h" +#include "arrow/flight/client_middleware.h" +#include "arrow/flight/client_tracing_middleware.h" +#include "arrow/flight/middleware.h" +#include "arrow/flight/server.h" +#include "arrow/flight/server_auth.h" +#include "arrow/flight/server_middleware.h" +#include "arrow/flight/server_tracing_middleware.h" +#include "arrow/flight/types.h" +#include "arrow/flight/types_async.h" diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_cookie_middleware.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_cookie_middleware.h new file mode 100644 index 0000000000000000000000000000000000000000..6a56a632dfbd220ee1aaf749f1c7fb2b9ab0852e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_cookie_middleware.h @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Middleware implementation for sending and receiving HTTP cookies. + +#pragma once + +#include + +#include "arrow/flight/client_middleware.h" + +namespace arrow { +namespace flight { + +/// \brief Returns a ClientMiddlewareFactory that handles sending and receiving cookies. +ARROW_FLIGHT_EXPORT std::shared_ptr GetCookieFactory(); + +} // namespace flight +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_middleware.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_middleware.h new file mode 100644 index 0000000000000000000000000000000000000000..8e3126553a953b9d8f2fcdb94b72f9214b690de1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/client_middleware.h @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Interfaces for defining middleware for Flight clients. Currently +// experimental. + +#pragma once + +#include + +#include "arrow/flight/middleware.h" +#include "arrow/flight/visibility.h" // IWYU pragma: keep +#include "arrow/status.h" + +namespace arrow { +namespace flight { + +/// \brief Client-side middleware for a call, instantiated per RPC. +/// +/// Middleware should be fast and must be infallible: there is no way +/// to reject the call or report errors from the middleware instance. +class ARROW_FLIGHT_EXPORT ClientMiddleware { + public: + virtual ~ClientMiddleware() = default; + + /// \brief A callback before headers are sent. Extra headers can be + /// added, but existing ones cannot be read. + virtual void SendingHeaders(AddCallHeaders* outgoing_headers) = 0; + + /// \brief A callback when headers are received from the server. + /// + /// This may be called more than once, since servers send both + /// headers and trailers. Some implementations (e.g. gRPC-Java, and + /// hence Arrow Flight in Java) may consolidate headers into + /// trailers if the RPC errored. + virtual void ReceivedHeaders(const CallHeaders& incoming_headers) = 0; + + /// \brief A callback after the call has completed. + virtual void CallCompleted(const Status& status) = 0; +}; + +/// \brief A factory for new middleware instances. +/// +/// If added to a client, this will be called for each RPC (including +/// Handshake) to give the opportunity to intercept the call. +/// +/// It is guaranteed that all client middleware methods are called +/// from the same thread that calls the RPC method implementation. +class ARROW_FLIGHT_EXPORT ClientMiddlewareFactory { + public: + virtual ~ClientMiddlewareFactory() = default; + + /// \brief A callback for the start of a new call. + /// + /// \param info Information about the call. + /// \param[out] middleware The middleware instance for this call. If + /// unset, will not add middleware to this call instance from + /// this factory. + virtual void StartCall(const CallInfo& info, + std::unique_ptr* middleware) = 0; +}; + +} // namespace flight +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/middleware.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/middleware.h new file mode 100644 index 0000000000000000000000000000000000000000..84448097ff01995cbef59b65ca6e170f4b737745 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/middleware.h @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Interfaces for defining middleware for Flight clients and +// servers. Currently experimental. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/flight/types.h" +#include "arrow/status.h" + +namespace arrow { +namespace flight { + +/// \brief A write-only wrapper around headers for an RPC call. +class ARROW_FLIGHT_EXPORT AddCallHeaders { + public: + virtual ~AddCallHeaders() = default; + + /// \brief Add a header to be sent to the client. + /// + /// \param[in] key The header name. Must be lowercase ASCII; some + /// transports may reject invalid header names. + /// \param[in] value The header value. Some transports may only + /// accept binary header values if the header name ends in "-bin". + virtual void AddHeader(const std::string& key, const std::string& value) = 0; +}; + +/// \brief An enumeration of the RPC methods Flight implements. +enum class FlightMethod : char { + Invalid = 0, + Handshake = 1, + ListFlights = 2, + GetFlightInfo = 3, + GetSchema = 4, + DoGet = 5, + DoPut = 6, + DoAction = 7, + ListActions = 8, + DoExchange = 9, + PollFlightInfo = 10, +}; + +/// \brief Get a human-readable name for a Flight method. +ARROW_FLIGHT_EXPORT +std::string ToString(FlightMethod method); + +/// \brief Information about an instance of a Flight RPC. +struct ARROW_FLIGHT_EXPORT CallInfo { + public: + /// \brief The RPC method of this call. + FlightMethod method; +}; + +} // namespace flight +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server.h new file mode 100644 index 0000000000000000000000000000000000000000..ffcffe12e3c78bb92452f33644a8293c03aa8175 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/server.h @@ -0,0 +1,328 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Interfaces to use for defining Flight RPC servers. API should be considered +// experimental for now + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/flight/server_auth.h" +#include "arrow/flight/type_fwd.h" +#include "arrow/flight/types.h" // IWYU pragma: keep +#include "arrow/flight/visibility.h" // IWYU pragma: keep +#include "arrow/ipc/dictionary.h" +#include "arrow/ipc/options.h" +#include "arrow/record_batch.h" + +namespace arrow { + +class Schema; +class Status; + +namespace flight { + +/// \brief Interface that produces a sequence of IPC payloads to be sent in +/// FlightData protobuf messages +class ARROW_FLIGHT_EXPORT FlightDataStream { + public: + virtual ~FlightDataStream(); + + virtual std::shared_ptr schema() = 0; + + /// \brief Compute FlightPayload containing serialized RecordBatch schema + virtual arrow::Result GetSchemaPayload() = 0; + + // When the stream is completed, the last payload written will have null + // metadata + virtual arrow::Result Next() = 0; + + virtual Status Close(); +}; + +/// \brief A basic implementation of FlightDataStream that will provide +/// a sequence of FlightData messages to be written to a stream +class ARROW_FLIGHT_EXPORT RecordBatchStream : public FlightDataStream { + public: + /// \param[in] reader produces a sequence of record batches + /// \param[in] options IPC options for writing + explicit RecordBatchStream( + const std::shared_ptr& reader, + const ipc::IpcWriteOptions& options = ipc::IpcWriteOptions::Defaults()); + ~RecordBatchStream() override; + + // inherit deprecated API + using FlightDataStream::GetSchemaPayload; + using FlightDataStream::Next; + + std::shared_ptr schema() override; + arrow::Result GetSchemaPayload() override; + + arrow::Result Next() override; + Status Close() override; + + private: + class RecordBatchStreamImpl; + std::unique_ptr impl_; +}; + +/// \brief A reader for IPC payloads uploaded by a client. Also allows +/// reading application-defined metadata via the Flight protocol. +class ARROW_FLIGHT_EXPORT FlightMessageReader : public MetadataRecordBatchReader { + public: + /// \brief Get the descriptor for this upload. + virtual const FlightDescriptor& descriptor() const = 0; +}; + +/// \brief A writer for application-specific metadata sent back to the +/// client during an upload. +class ARROW_FLIGHT_EXPORT FlightMetadataWriter { + public: + virtual ~FlightMetadataWriter(); + /// \brief Send a message to the client. + virtual Status WriteMetadata(const Buffer& app_metadata) = 0; +}; + +/// \brief A writer for IPC payloads to a client. Also allows sending +/// application-defined metadata via the Flight protocol. +/// +/// This class offers more control compared to FlightDataStream, +/// including the option to write metadata without data and the +/// ability to interleave reading and writing. +class ARROW_FLIGHT_EXPORT FlightMessageWriter : public MetadataRecordBatchWriter { + public: + virtual ~FlightMessageWriter() = default; +}; + +/// \brief Call state/contextual data. +class ARROW_FLIGHT_EXPORT ServerCallContext { + public: + virtual ~ServerCallContext() = default; + /// \brief The name of the authenticated peer (may be the empty string) + virtual const std::string& peer_identity() const = 0; + /// \brief The peer address (not validated) + virtual const std::string& peer() const = 0; + /// \brief Add a response header. This is only valid before the server + /// starts sending the response; generally this isn't an issue unless you + /// are implementing FlightDataStream, ResultStream, or similar interfaces + /// yourself, or during a DoExchange or DoPut. + virtual void AddHeader(const std::string& key, const std::string& value) const = 0; + /// \brief Add a response trailer. This is only valid before the server + /// sends the final status; generally this isn't an issue unless your RPC + /// handler launches a thread or similar. + virtual void AddTrailer(const std::string& key, const std::string& value) const = 0; + /// \brief Look up a middleware by key. Do not maintain a reference + /// to the object beyond the request body. + /// \return The middleware, or nullptr if not found. + virtual ServerMiddleware* GetMiddleware(const std::string& key) const = 0; + /// \brief Check if the current RPC has been cancelled (by the client, by + /// a network error, etc.). + virtual bool is_cancelled() const = 0; + /// \brief The headers sent by the client for this call. + virtual const CallHeaders& incoming_headers() const = 0; +}; + +class ARROW_FLIGHT_EXPORT FlightServerOptions { + public: + explicit FlightServerOptions(const Location& location_); + + ~FlightServerOptions(); + + /// \brief The host & port (or domain socket path) to listen on. + /// Use port 0 to bind to an available port. + Location location; + /// \brief The authentication handler to use. + std::shared_ptr auth_handler; + /// \brief A list of TLS certificate+key pairs to use. + std::vector tls_certificates; + /// \brief Enable mTLS and require that the client present a certificate. + bool verify_client; + /// \brief If using mTLS, the PEM-encoded root certificate to use. + std::string root_certificates; + /// \brief A list of server middleware to apply, along with a key to + /// identify them by. + /// + /// Middleware are always applied in the order provided. Duplicate + /// keys are an error. + std::vector>> + middleware; + + /// \brief An optional memory manager to control where to allocate incoming data. + std::shared_ptr memory_manager; + + /// \brief A Flight implementation-specific callback to customize + /// transport-specific options. + /// + /// Not guaranteed to be called. The type of the parameter is + /// specific to the Flight implementation. Users should take care to + /// link to the same transport implementation as Flight to avoid + /// runtime problems. See "Using Arrow C++ in your own project" in + /// the documentation for more details. + std::function builder_hook; +}; + +/// \brief Skeleton RPC server implementation which can be used to create +/// custom servers by implementing its abstract methods +class ARROW_FLIGHT_EXPORT FlightServerBase { + public: + FlightServerBase(); + virtual ~FlightServerBase(); + + // Lifecycle methods. + + /// \brief Initialize a Flight server listening at the given location. + /// This method must be called before any other method. + /// \param[in] options The configuration for this server. + Status Init(const FlightServerOptions& options); + + /// \brief Get the port that the Flight server is listening on. + /// This method must only be called after Init(). Will return a + /// non-positive value if no port exists (e.g. when listening on a + /// domain socket). + int port() const; + + /// \brief Get the address that the Flight server is listening on. + /// This method must only be called after Init(). + Location location() const; + + /// \brief Set the server to stop when receiving any of the given signal + /// numbers. + /// This method must be called before Serve(). + Status SetShutdownOnSignals(const std::vector sigs); + + /// \brief Start serving. + /// This method blocks until the server shuts down. + /// + /// The server will start to shut down when either Shutdown() is called + /// or one of the signals registered in SetShutdownOnSignals() is received. + Status Serve(); + + /// \brief Query whether Serve() was interrupted by a signal. + /// This method must be called after Serve() has returned. + /// + /// \return int the signal number that interrupted Serve(), if any, otherwise 0 + int GotSignal() const; + + /// \brief Shut down the server, blocking until current requests finish. + /// + /// Can be called from a signal handler or another thread while Serve() + /// blocks. Optionally a deadline can be set. Once the deadline expires + /// server will wait until remaining running calls complete. + /// + /// Should only be called once. + Status Shutdown(const std::chrono::system_clock::time_point* deadline = NULLPTR); + + /// \brief Block until server shuts down with Shutdown. + /// + /// Does not respond to signals like Serve(). + Status Wait(); + + // Implement these methods to create your own server. The default + // implementations will return a not-implemented result to the client + + /// \brief Retrieve a list of available fields given an optional opaque + /// criteria + /// \param[in] context The call context. + /// \param[in] criteria may be null + /// \param[out] listings the returned listings iterator + /// \return Status + virtual Status ListFlights(const ServerCallContext& context, const Criteria* criteria, + std::unique_ptr* listings); + + /// \brief Retrieve the schema and an access plan for the indicated + /// descriptor + /// \param[in] context The call context. + /// \param[in] request the dataset request, whether a named dataset or command + /// \param[out] info the returned flight info provider + /// \return Status + virtual Status GetFlightInfo(const ServerCallContext& context, + const FlightDescriptor& request, + std::unique_ptr* info); + + /// \brief Retrieve the current status of the target query + /// \param[in] context The call context. + /// \param[in] request the dataset request or a descriptor returned by a + /// prior PollFlightInfo call + /// \param[out] info the returned retry info provider + /// \return Status + virtual Status PollFlightInfo(const ServerCallContext& context, + const FlightDescriptor& request, + std::unique_ptr* info); + + /// \brief Retrieve the schema for the indicated descriptor + /// \param[in] context The call context. + /// \param[in] request the dataset request, whether a named dataset or command + /// \param[out] schema the returned flight schema provider + /// \return Status + virtual Status GetSchema(const ServerCallContext& context, + const FlightDescriptor& request, + std::unique_ptr* schema); + + /// \brief Get a stream of IPC payloads to put on the wire + /// \param[in] context The call context. + /// \param[in] request an opaque ticket + /// \param[out] stream the returned stream provider + /// \return Status + virtual Status DoGet(const ServerCallContext& context, const Ticket& request, + std::unique_ptr* stream); + + /// \brief Process a stream of IPC payloads sent from a client + /// \param[in] context The call context. + /// \param[in] reader a sequence of uploaded record batches + /// \param[in] writer send metadata back to the client + /// \return Status + virtual Status DoPut(const ServerCallContext& context, + std::unique_ptr reader, + std::unique_ptr writer); + + /// \brief Process a bidirectional stream of IPC payloads + /// \param[in] context The call context. + /// \param[in] reader a sequence of uploaded record batches + /// \param[in] writer send data back to the client + /// \return Status + virtual Status DoExchange(const ServerCallContext& context, + std::unique_ptr reader, + std::unique_ptr writer); + + /// \brief Execute an action, return stream of zero or more results + /// \param[in] context The call context. + /// \param[in] action the action to execute, with type and body + /// \param[out] result the result iterator + /// \return Status + virtual Status DoAction(const ServerCallContext& context, const Action& action, + std::unique_ptr* result); + + /// \brief Retrieve the list of available actions + /// \param[in] context The call context. + /// \param[out] actions a vector of available action types + /// \return Status + virtual Status ListActions(const ServerCallContext& context, + std::vector* actions); + + private: + struct Impl; + std::unique_ptr impl_; +}; + +} // namespace flight +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/test_definitions.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/test_definitions.h new file mode 100644 index 0000000000000000000000000000000000000000..1e0e8c209ac94a5071e7e2817ab384e20ac7cff2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/test_definitions.h @@ -0,0 +1,317 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Common test definitions for Flight. Individual transport +// implementations can instantiate these tests. +// +// While Googletest's value-parameterized tests would be a more +// natural way to do this, they cause runtime issues on MinGW/MSVC +// (Googletest thinks the test suite has been defined twice). + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/flight/server.h" +#include "arrow/flight/types.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace flight { + +class ARROW_FLIGHT_EXPORT FlightTest { + protected: + virtual std::string transport() const = 0; + virtual bool supports_async() const { return false; } + virtual void SetUpTest() {} + virtual void TearDownTest() {} +}; + +/// Common tests of startup/shutdown +class ARROW_FLIGHT_EXPORT ConnectivityTest : public FlightTest { + public: + // Test methods + void TestGetPort(); + void TestBuilderHook(); + void TestShutdown(); + void TestShutdownWithDeadline(); + void TestBrokenConnection(); +}; + +#define ARROW_FLIGHT_TEST_CONNECTIVITY(FIXTURE) \ + static_assert(std::is_base_of::value, \ + ARROW_STRINGIFY(FIXTURE) " must inherit from ConnectivityTest"); \ + TEST_F(FIXTURE, GetPort) { TestGetPort(); } \ + TEST_F(FIXTURE, BuilderHook) { TestBuilderHook(); } \ + TEST_F(FIXTURE, Shutdown) { TestShutdown(); } \ + TEST_F(FIXTURE, ShutdownWithDeadline) { TestShutdownWithDeadline(); } \ + TEST_F(FIXTURE, BrokenConnection) { TestBrokenConnection(); } + +/// Common tests of data plane methods +class ARROW_FLIGHT_EXPORT DataTest : public FlightTest { + public: + void SetUpTest() override; + void TearDownTest() override; + Status ConnectClient(); + + // Test methods + void TestDoGetInts(); + void TestDoGetFloats(); + void TestDoGetDicts(); + void TestDoGetLargeBatch(); + void TestFlightDataStreamError(); + void TestOverflowServerBatch(); + void TestOverflowClientBatch(); + void TestDoExchange(); + void TestDoExchangeNoData(); + void TestDoExchangeWriteOnlySchema(); + void TestDoExchangeGet(); + void TestDoExchangePut(); + void TestDoExchangeEcho(); + void TestDoExchangeTotal(); + void TestDoExchangeError(); + void TestDoExchangeConcurrency(); + void TestDoExchangeUndrained(); + void TestIssue5095(); + + private: + void CheckDoGet( + const FlightDescriptor& descr, const RecordBatchVector& expected_batches, + std::function&)> check_endpoints); + void CheckDoGet(const Ticket& ticket, const RecordBatchVector& expected_batches); + + std::unique_ptr client_; + std::unique_ptr server_; +}; + +#define ARROW_FLIGHT_TEST_DATA(FIXTURE) \ + static_assert(std::is_base_of::value, \ + ARROW_STRINGIFY(FIXTURE) " must inherit from DataTest"); \ + TEST_F(FIXTURE, TestDoGetInts) { TestDoGetInts(); } \ + TEST_F(FIXTURE, TestDoGetFloats) { TestDoGetFloats(); } \ + TEST_F(FIXTURE, TestDoGetDicts) { TestDoGetDicts(); } \ + TEST_F(FIXTURE, TestDoGetLargeBatch) { TestDoGetLargeBatch(); } \ + TEST_F(FIXTURE, TestFlightDataStreamError) { TestFlightDataStreamError(); } \ + TEST_F(FIXTURE, TestOverflowServerBatch) { TestOverflowServerBatch(); } \ + TEST_F(FIXTURE, TestOverflowClientBatch) { TestOverflowClientBatch(); } \ + TEST_F(FIXTURE, TestDoExchange) { TestDoExchange(); } \ + TEST_F(FIXTURE, TestDoExchangeNoData) { TestDoExchangeNoData(); } \ + TEST_F(FIXTURE, TestDoExchangeWriteOnlySchema) { TestDoExchangeWriteOnlySchema(); } \ + TEST_F(FIXTURE, TestDoExchangeGet) { TestDoExchangeGet(); } \ + TEST_F(FIXTURE, TestDoExchangePut) { TestDoExchangePut(); } \ + TEST_F(FIXTURE, TestDoExchangeEcho) { TestDoExchangeEcho(); } \ + TEST_F(FIXTURE, TestDoExchangeTotal) { TestDoExchangeTotal(); } \ + TEST_F(FIXTURE, TestDoExchangeError) { TestDoExchangeError(); } \ + TEST_F(FIXTURE, TestDoExchangeConcurrency) { TestDoExchangeConcurrency(); } \ + TEST_F(FIXTURE, TestDoExchangeUndrained) { TestDoExchangeUndrained(); } \ + TEST_F(FIXTURE, TestIssue5095) { TestIssue5095(); } + +/// \brief Specific tests of DoPut. +class ARROW_FLIGHT_EXPORT DoPutTest : public FlightTest { + public: + void SetUpTest() override; + void TearDownTest() override; + void CheckBatches(const FlightDescriptor& expected_descriptor, + const RecordBatchVector& expected_batches); + void CheckDoPut(const FlightDescriptor& descr, const std::shared_ptr& schema, + const RecordBatchVector& batches); + + // Test methods + void TestInts(); + void TestFloats(); + void TestEmptyBatch(); + void TestDicts(); + void TestLargeBatch(); + void TestSizeLimit(); + void TestUndrained(); + + private: + std::unique_ptr client_; + std::unique_ptr server_; +}; + +#define ARROW_FLIGHT_TEST_DO_PUT(FIXTURE) \ + static_assert(std::is_base_of::value, \ + ARROW_STRINGIFY(FIXTURE) " must inherit from DoPutTest"); \ + TEST_F(FIXTURE, TestInts) { TestInts(); } \ + TEST_F(FIXTURE, TestFloats) { TestFloats(); } \ + TEST_F(FIXTURE, TestEmptyBatch) { TestEmptyBatch(); } \ + TEST_F(FIXTURE, TestDicts) { TestDicts(); } \ + TEST_F(FIXTURE, TestLargeBatch) { TestLargeBatch(); } \ + TEST_F(FIXTURE, TestSizeLimit) { TestSizeLimit(); } \ + TEST_F(FIXTURE, TestUndrained) { TestUndrained(); } + +class ARROW_FLIGHT_EXPORT AppMetadataTestServer : public FlightServerBase { + public: + virtual ~AppMetadataTestServer() = default; + + Status DoGet(const ServerCallContext& context, const Ticket& request, + std::unique_ptr* data_stream) override; + + Status DoPut(const ServerCallContext& context, + std::unique_ptr reader, + std::unique_ptr writer) override; +}; + +/// \brief Tests of app_metadata in data plane methods. +class ARROW_FLIGHT_EXPORT AppMetadataTest : public FlightTest { + public: + void SetUpTest() override; + void TearDownTest() override; + + // Test methods + void TestDoGet(); + void TestDoGetDictionaries(); + void TestDoPut(); + void TestDoPutDictionaries(); + void TestDoPutReadMetadata(); + + private: + std::unique_ptr client_; + std::unique_ptr server_; +}; + +#define ARROW_FLIGHT_TEST_APP_METADATA(FIXTURE) \ + static_assert(std::is_base_of::value, \ + ARROW_STRINGIFY(FIXTURE) " must inherit from AppMetadataTest"); \ + TEST_F(FIXTURE, TestDoGet) { TestDoGet(); } \ + TEST_F(FIXTURE, TestDoGetDictionaries) { TestDoGetDictionaries(); } \ + TEST_F(FIXTURE, TestDoPut) { TestDoPut(); } \ + TEST_F(FIXTURE, TestDoPutDictionaries) { TestDoPutDictionaries(); } \ + TEST_F(FIXTURE, TestDoPutReadMetadata) { TestDoPutReadMetadata(); } + +/// \brief Tests of IPC options in data plane methods. +class ARROW_FLIGHT_EXPORT IpcOptionsTest : public FlightTest { + public: + void SetUpTest() override; + void TearDownTest() override; + + // Test methods + void TestDoGetReadOptions(); + void TestDoPutWriteOptions(); + void TestDoExchangeClientWriteOptions(); + void TestDoExchangeClientWriteOptionsBegin(); + void TestDoExchangeServerWriteOptions(); + + private: + std::unique_ptr client_; + std::unique_ptr server_; +}; + +#define ARROW_FLIGHT_TEST_IPC_OPTIONS(FIXTURE) \ + static_assert(std::is_base_of::value, \ + ARROW_STRINGIFY(FIXTURE) " must inherit from IpcOptionsTest"); \ + TEST_F(FIXTURE, TestDoGetReadOptions) { TestDoGetReadOptions(); } \ + TEST_F(FIXTURE, TestDoPutWriteOptions) { TestDoPutWriteOptions(); } \ + TEST_F(FIXTURE, TestDoExchangeClientWriteOptions) { \ + TestDoExchangeClientWriteOptions(); \ + } \ + TEST_F(FIXTURE, TestDoExchangeClientWriteOptionsBegin) { \ + TestDoExchangeClientWriteOptionsBegin(); \ + } \ + TEST_F(FIXTURE, TestDoExchangeServerWriteOptions) { \ + TestDoExchangeServerWriteOptions(); \ + } + +/// \brief Tests of data plane methods with CUDA memory. +/// +/// If not built with ARROW_CUDA, tests are no-ops. +class ARROW_FLIGHT_EXPORT CudaDataTest : public FlightTest { + public: + void SetUpTest() override; + void TearDownTest() override; + + // Test methods + void TestDoGet(); + void TestDoPut(); + void TestDoExchange(); + + private: + class Impl; + std::unique_ptr client_; + std::unique_ptr server_; + std::shared_ptr impl_; +}; + +#define ARROW_FLIGHT_TEST_CUDA_DATA(FIXTURE) \ + static_assert(std::is_base_of::value, \ + ARROW_STRINGIFY(FIXTURE) " must inherit from CudaDataTest"); \ + TEST_F(FIXTURE, TestDoGet) { TestDoGet(); } \ + TEST_F(FIXTURE, TestDoPut) { TestDoPut(); } \ + TEST_F(FIXTURE, TestDoExchange) { TestDoExchange(); } + +/// \brief Tests of error handling. +class ARROW_FLIGHT_EXPORT ErrorHandlingTest : public FlightTest { + public: + void SetUpTest() override; + void TearDownTest() override; + + // Test methods + void TestGetFlightInfo(); + void TestGetFlightInfoMetadata(); + void TestAsyncGetFlightInfo(); + void TestDoPut(); + void TestDoExchange(); + + protected: + struct Impl; + + std::vector> GetHeaders(); + + std::shared_ptr impl_; + std::unique_ptr client_; + std::unique_ptr server_; +}; + +#define ARROW_FLIGHT_TEST_ERROR_HANDLING(FIXTURE) \ + static_assert(std::is_base_of::value, \ + ARROW_STRINGIFY(FIXTURE) " must inherit from ErrorHandlingTest"); \ + TEST_F(FIXTURE, TestAsyncGetFlightInfo) { TestAsyncGetFlightInfo(); } \ + TEST_F(FIXTURE, TestGetFlightInfo) { TestGetFlightInfo(); } \ + TEST_F(FIXTURE, TestGetFlightInfoMetadata) { TestGetFlightInfoMetadata(); } \ + TEST_F(FIXTURE, TestDoPut) { TestDoPut(); } \ + TEST_F(FIXTURE, TestDoExchange) { TestDoExchange(); } + +/// \brief Tests of the async client. +class ARROW_FLIGHT_EXPORT AsyncClientTest : public FlightTest { + public: + void SetUpTest() override; + void TearDownTest() override; + + // Test methods + void TestGetFlightInfo(); + void TestGetFlightInfoFuture(); + void TestListenerLifetime(); + + private: + std::unique_ptr client_; + std::unique_ptr server_; +}; + +#define ARROW_FLIGHT_TEST_ASYNC_CLIENT(FIXTURE) \ + static_assert(std::is_base_of::value, \ + ARROW_STRINGIFY(FIXTURE) " must inherit from AsyncClientTest"); \ + TEST_F(FIXTURE, TestGetFlightInfo) { TestGetFlightInfo(); } \ + TEST_F(FIXTURE, TestGetFlightInfoFuture) { TestGetFlightInfoFuture(); } \ + TEST_F(FIXTURE, TestListenerLifetime) { TestListenerLifetime(); } + +} // namespace flight +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/transport.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/transport.h new file mode 100644 index 0000000000000000000000000000000000000000..4029aa5223debf714c9ab67e2127529470c26314 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/transport.h @@ -0,0 +1,302 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +/// \file +/// Internal (but not private) interface for implementing +/// alternate network transports in Flight. +/// +/// \warning EXPERIMENTAL. Subject to change. +/// +/// To implement a transport, implement ServerTransport and +/// ClientTransport, and register the desired URI schemes with +/// TransportRegistry. Flight takes care of most of the per-RPC +/// details; transports only handle connections and providing a I/O +/// stream implementation (TransportDataStream). +/// +/// On the server side: +/// +/// 1. Applications subclass FlightServerBase and override RPC handlers. +/// 2. FlightServerBase::Init will look up and create a ServerTransport +/// based on the scheme of the Location given to it. +/// 3. The ServerTransport will start the actual server. (For instance, +/// for gRPC, it creates a gRPC server and registers a gRPC service.) +/// That server will handle connections. +/// 4. The transport should forward incoming calls to the server to the RPC +/// handlers defined on ServerTransport, which implements the actual +/// RPC handler using the interfaces here. Any I/O the RPC handler needs +/// to do is managed by transport-specific implementations of +/// TransportDataStream. +/// 5. ServerTransport calls FlightServerBase for the actual application +/// logic. +/// +/// On the client side: +/// +/// 1. Applications create a FlightClient with a Location. +/// 2. FlightClient will look up and create a ClientTransport based on +/// the scheme of the Location given to it. +/// 3. When calling a method on FlightClient, FlightClient will delegate to +/// the ClientTransport. There is some indirection, e.g. for DoGet, +/// FlightClient only requests that the ClientTransport start the +/// call and provide it with an I/O stream. The "Flight implementation" +/// itself still lives in FlightClient. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/flight/type_fwd.h" +#include "arrow/flight/types.h" +#include "arrow/flight/visibility.h" +#include "arrow/ipc/options.h" +#include "arrow/type_fwd.h" + +namespace arrow { +namespace ipc { +class Message; +} +namespace flight { +class FlightStatusDetail; +namespace internal { + +/// Internal, not user-visible type used for memory-efficient reads +struct FlightData { + /// Used only for puts, may be null + std::unique_ptr descriptor; + + /// Non-length-prefixed Message header as described in format/Message.fbs + std::shared_ptr metadata; + + /// Application-defined metadata + std::shared_ptr app_metadata; + + /// Message body + std::shared_ptr body; + + /// Open IPC message from the metadata and body + ::arrow::Result> OpenMessage(); +}; + +/// \brief A transport-specific interface for reading/writing Arrow data. +/// +/// New transports will implement this to read/write IPC payloads to +/// the underlying stream. +class ARROW_FLIGHT_EXPORT TransportDataStream { + public: + virtual ~TransportDataStream() = default; + /// \brief Attempt to read the next FlightData message. + /// + /// \return success true if data was populated, false if there was + /// an error. For clients, the error can be retrieved from + /// Finish(Status). + virtual bool ReadData(FlightData* data); + /// \brief Attempt to write a FlightPayload. + /// + /// \param[in] payload The data to write. + /// \return true if the message was accepted by the transport, false + /// if not (e.g. due to client/server disconnect), Status if there + /// was an error (e.g. with the payload itself). + virtual arrow::Result WriteData(const FlightPayload& payload); + /// \brief Indicate that there are no more writes on this stream. + /// + /// This is only a hint for the underlying transport and may not + /// actually do anything. + virtual Status WritesDone(); +}; + +/// \brief A transport-specific interface for reading/writing Arrow +/// data for a client. +class ARROW_FLIGHT_EXPORT ClientDataStream : public TransportDataStream { + public: + /// \brief Attempt to read a non-data message. + /// + /// Only implemented for DoPut; mutually exclusive with + /// ReadData(FlightData*). + virtual bool ReadPutMetadata(std::shared_ptr* out); + /// \brief Attempt to cancel the call. + /// + /// This is only a hint and may not take effect immediately. The + /// client should still finish the call with Finish(Status) as usual. + virtual void TryCancel() {} + /// \brief Finish the call, reporting the server-sent status and/or + /// any client-side errors as appropriate. + /// + /// Implies WritesDone() and DoFinish(). + /// + /// \param[in] st A client-side status to combine with the + /// server-side error. That is, if an error occurs on the + /// client-side, call Finish(Status) to finish the server-side + /// call, get the server-side status, and merge the statuses + /// together so context is not lost. + Status Finish(Status st); + + protected: + /// \brief End the call, returning the final server status. + /// + /// For implementors: should imply WritesDone() (even if it does not + /// directly call it). + /// + /// Implies WritesDone(). + virtual Status DoFinish() = 0; +}; + +/// An implementation of a Flight client for a particular transport. +/// +/// Transports should override the methods they are capable of +/// supporting. The default method implementations return an error. +class ARROW_FLIGHT_EXPORT ClientTransport { + public: + virtual ~ClientTransport() = default; + + /// Initialize the client. + virtual Status Init(const FlightClientOptions& options, const Location& location, + const arrow::util::Uri& uri) = 0; + /// Close the client. Once this returns, the client is no longer usable. + virtual Status Close() = 0; + + virtual Status Authenticate(const FlightCallOptions& options, + std::unique_ptr auth_handler); + virtual arrow::Result> AuthenticateBasicToken( + const FlightCallOptions& options, const std::string& username, + const std::string& password); + virtual Status DoAction(const FlightCallOptions& options, const Action& action, + std::unique_ptr* results); + virtual Status ListActions(const FlightCallOptions& options, + std::vector* actions); + virtual Status GetFlightInfo(const FlightCallOptions& options, + const FlightDescriptor& descriptor, + std::unique_ptr* info); + virtual void GetFlightInfoAsync(const FlightCallOptions& options, + const FlightDescriptor& descriptor, + std::shared_ptr> listener); + virtual Status PollFlightInfo(const FlightCallOptions& options, + const FlightDescriptor& descriptor, + std::unique_ptr* info); + virtual arrow::Result> GetSchema( + const FlightCallOptions& options, const FlightDescriptor& descriptor); + virtual Status ListFlights(const FlightCallOptions& options, const Criteria& criteria, + std::unique_ptr* listing); + virtual Status DoGet(const FlightCallOptions& options, const Ticket& ticket, + std::unique_ptr* stream); + virtual Status DoPut(const FlightCallOptions& options, + std::unique_ptr* stream); + virtual Status DoExchange(const FlightCallOptions& options, + std::unique_ptr* stream); + + bool supports_async() const { return CheckAsyncSupport().ok(); } + virtual Status CheckAsyncSupport() const { + return Status::NotImplemented( + "this Flight transport does not support async operations"); + } + + static void SetAsyncRpc(AsyncListenerBase* listener, std::unique_ptr&& rpc); + static AsyncRpc* GetAsyncRpc(AsyncListenerBase* listener); + static std::unique_ptr ReleaseAsyncRpc(AsyncListenerBase* listener); +}; + +/// A registry of transport implementations. +class ARROW_FLIGHT_EXPORT TransportRegistry { + public: + using ClientFactory = std::function>()>; + using ServerFactory = std::function>( + FlightServerBase*, std::shared_ptr memory_manager)>; + + TransportRegistry(); + ~TransportRegistry(); + + arrow::Result> MakeClient( + const std::string& scheme) const; + arrow::Result> MakeServer( + const std::string& scheme, FlightServerBase* base, + std::shared_ptr memory_manager) const; + + Status RegisterClient(const std::string& scheme, ClientFactory factory); + Status RegisterServer(const std::string& scheme, ServerFactory factory); + + private: + class Impl; + std::unique_ptr impl_; +}; + +/// \brief Get the registry of transport implementations. +ARROW_FLIGHT_EXPORT +TransportRegistry* GetDefaultTransportRegistry(); + +//------------------------------------------------------------ +// Async APIs + +/// \brief Transport-specific state for an async RPC. +/// +/// Transport implementations may subclass this to store their own +/// state, and stash an instance in a user-supplied AsyncListener via +/// ClientTransport::GetAsyncRpc and ClientTransport::SetAsyncRpc. +/// +/// This API is EXPERIMENTAL. +class ARROW_FLIGHT_EXPORT AsyncRpc { + public: + virtual ~AsyncRpc() = default; + /// \brief Request cancellation of the RPC. + virtual void TryCancel() {} + + /// Only needed for DoPut/DoExchange + virtual void Begin(const FlightDescriptor& descriptor, std::shared_ptr schema) { + } + /// Only needed for DoPut/DoExchange + virtual void Write(arrow::flight::FlightStreamChunk chunk) {} + /// Only needed for DoPut/DoExchange + virtual void DoneWriting() {} +}; + +//------------------------------------------------------------ +// Error propagation helpers + +/// \brief Abstract error status. +/// +/// Transport implementations may use side channels (e.g. HTTP +/// trailers) to convey additional information to reconstruct the +/// original C++ status for implementations that can use it. +struct ARROW_FLIGHT_EXPORT TransportStatus { + TransportStatusCode code; + std::string message; + + /// \brief Convert a C++ status to an abstract transport status. + static TransportStatus FromStatus(const Status& arrow_status); + + /// \brief Reconstruct a string-encoded TransportStatus. + static TransportStatus FromCodeStringAndMessage(const std::string& code_str, + std::string message); + + /// \brief Convert an abstract transport status to a C++ status. + Status ToStatus() const; +}; + +/// \brief Convert the string representation of an Arrow status code +/// back to an Arrow status. +ARROW_FLIGHT_EXPORT +Status ReconstructStatus(const std::string& code_str, const Status& current_status, + std::optional message, + std::optional detail_message, + std::optional detail_bin, + std::shared_ptr detail); + +} // namespace internal +} // namespace flight +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/types.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/types.h new file mode 100644 index 0000000000000000000000000000000000000000..b3df8377b8ffdec6b2c8aeea4710cc540ad3279d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/types.h @@ -0,0 +1,1147 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Data structure for Flight RPC. API should be considered experimental for now + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/flight/type_fwd.h" +#include "arrow/flight/visibility.h" +#include "arrow/ipc/options.h" +#include "arrow/ipc/writer.h" +#include "arrow/result.h" +#include "arrow/status.h" + +namespace arrow { + +class Buffer; +class RecordBatch; +class Schema; +class Status; +class Table; + +namespace ipc { + +class DictionaryMemo; + +} // namespace ipc + +namespace util { + +class Uri; + +} // namespace util + +namespace flight { + +/// \brief A timestamp compatible with Protocol Buffer's +/// google.protobuf.Timestamp: +/// +/// https://protobuf.dev/reference/protobuf/google.protobuf/#timestamp +/// +/// > A Timestamp represents a point in time independent of any time +/// > zone or calendar, represented as seconds and fractions of +/// > seconds at nanosecond resolution in UTC Epoch time. It is +/// > encoded using the Proleptic Gregorian Calendar which extends the +/// > Gregorian calendar backwards to year one. It is encoded assuming +/// > all minutes are 60 seconds long, i.e. leap seconds are "smeared" +/// > so that no leap second table is needed for interpretation. Range +/// > is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +using Timestamp = std::chrono::system_clock::time_point; + +/// \brief A Flight-specific status code. Used to encode some +/// additional status codes into an Arrow Status. +enum class FlightStatusCode : int8_t { + /// An implementation error has occurred. + Internal, + /// A request timed out. + TimedOut, + /// A request was cancelled. + Cancelled, + /// We are not authenticated to the remote service. + Unauthenticated, + /// We do not have permission to make this request. + Unauthorized, + /// The remote service cannot handle this request at the moment. + Unavailable, + /// A request failed for some other reason + Failed +}; + +// Silence warning +// "non dll-interface class RecordBatchReader used as base for dll-interface class" +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4275) +#endif + +/// \brief Flight-specific error information in a Status. +class ARROW_FLIGHT_EXPORT FlightStatusDetail : public arrow::StatusDetail { + public: + explicit FlightStatusDetail(FlightStatusCode code) : code_{code} {} + explicit FlightStatusDetail(FlightStatusCode code, std::string extra_info) + : code_{code}, extra_info_(std::move(extra_info)) {} + const char* type_id() const override; + std::string ToString() const override; + + /// \brief Get the Flight status code. + FlightStatusCode code() const; + /// \brief Get the extra error info + std::string extra_info() const; + /// \brief Get the human-readable name of the status code. + std::string CodeAsString() const; + /// \brief Set the extra error info + void set_extra_info(std::string extra_info); + + /// \brief Try to extract a \a FlightStatusDetail from any Arrow + /// status. + /// + /// \return a \a FlightStatusDetail if it could be unwrapped, \a + /// nullptr otherwise + static std::shared_ptr UnwrapStatus(const arrow::Status& status); + + private: + FlightStatusCode code_; + std::string extra_info_; +}; + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +/// \brief Make an appropriate Arrow status for the given +/// Flight-specific status. +/// +/// \param code The Flight status code. +/// \param message The message for the error. +/// \param extra_info Optional extra binary info for the error (eg protobuf) +ARROW_FLIGHT_EXPORT +Status MakeFlightError(FlightStatusCode code, std::string message, + std::string extra_info = {}); + +/// \brief Headers sent from the client or server. +/// +/// Header values are ordered. +using CallHeaders = std::multimap; + +/// \brief A TLS certificate plus key. +struct ARROW_FLIGHT_EXPORT CertKeyPair { + /// \brief The certificate in PEM format. + std::string pem_cert; + + /// \brief The key in PEM format. + std::string pem_key; +}; + +/// \brief A type of action that can be performed with the DoAction RPC. +struct ARROW_FLIGHT_EXPORT ActionType { + /// \brief The name of the action. + std::string type; + + /// \brief A human-readable description of the action. + std::string description; + + std::string ToString() const; + bool Equals(const ActionType& other) const; + + friend bool operator==(const ActionType& left, const ActionType& right) { + return left.Equals(right); + } + friend bool operator!=(const ActionType& left, const ActionType& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); + + static const ActionType kCancelFlightInfo; + static const ActionType kRenewFlightEndpoint; + static const ActionType kSetSessionOptions; + static const ActionType kGetSessionOptions; + static const ActionType kCloseSession; +}; + +/// \brief Opaque selection criteria for ListFlights RPC +struct ARROW_FLIGHT_EXPORT Criteria { + /// Opaque criteria expression, dependent on server implementation + std::string expression; + + std::string ToString() const; + bool Equals(const Criteria& other) const; + + friend bool operator==(const Criteria& left, const Criteria& right) { + return left.Equals(right); + } + friend bool operator!=(const Criteria& left, const Criteria& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +/// \brief An action to perform with the DoAction RPC +struct ARROW_FLIGHT_EXPORT Action { + /// The action type + std::string type; + + /// The action content as a Buffer + std::shared_ptr body; + + std::string ToString() const; + bool Equals(const Action& other) const; + + friend bool operator==(const Action& left, const Action& right) { + return left.Equals(right); + } + friend bool operator!=(const Action& left, const Action& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +/// \brief Opaque result returned after executing an action +struct ARROW_FLIGHT_EXPORT Result { + std::shared_ptr body; + + std::string ToString() const; + bool Equals(const Result& other) const; + + friend bool operator==(const Result& left, const Result& right) { + return left.Equals(right); + } + friend bool operator!=(const Result& left, const Result& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +enum class CancelStatus { + /// The cancellation status is unknown. Servers should avoid using + /// this value (send a kNotCancellable if the requested FlightInfo + /// is not known). Clients can retry the request. + kUnspecified = 0, + /// The cancellation request is complete. Subsequent requests with + /// the same payload may return kCancelled or a kNotCancellable error. + kCancelled = 1, + /// The cancellation request is in progress. The client may retry + /// the cancellation request. + kCancelling = 2, + // The FlightInfo is not cancellable. The client should not retry the + // cancellation request. + kNotCancellable = 3, +}; + +/// \brief The result of the CancelFlightInfo action. +struct ARROW_FLIGHT_EXPORT CancelFlightInfoResult { + CancelStatus status; + + std::string ToString() const; + bool Equals(const CancelFlightInfoResult& other) const; + + friend bool operator==(const CancelFlightInfoResult& left, + const CancelFlightInfoResult& right) { + return left.Equals(right); + } + friend bool operator!=(const CancelFlightInfoResult& left, + const CancelFlightInfoResult& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +ARROW_FLIGHT_EXPORT +std::ostream& operator<<(std::ostream& os, CancelStatus status); + +/// \brief message for simple auth +struct ARROW_FLIGHT_EXPORT BasicAuth { + std::string username; + std::string password; + + std::string ToString() const; + bool Equals(const BasicAuth& other) const; + + friend bool operator==(const BasicAuth& left, const BasicAuth& right) { + return left.Equals(right); + } + friend bool operator!=(const BasicAuth& left, const BasicAuth& right) { + return !(left == right); + } + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; +}; + +/// \brief A request to retrieve or generate a dataset +struct ARROW_FLIGHT_EXPORT FlightDescriptor { + enum DescriptorType { + UNKNOWN = 0, /// Unused + PATH = 1, /// Named path identifying a dataset + CMD = 2 /// Opaque command to generate a dataset + }; + + /// The descriptor type + DescriptorType type; + + /// Opaque value used to express a command. Should only be defined when type + /// is CMD + std::string cmd; + + /// List of strings identifying a particular dataset. Should only be defined + /// when type is PATH + std::vector path; + + bool Equals(const FlightDescriptor& other) const; + + /// \brief Get a human-readable form of this descriptor. + std::string ToString() const; + + /// \brief Get the wire-format representation of this type. + /// + /// Useful when interoperating with non-Flight systems (e.g. REST + /// services) that may want to return Flight types. + arrow::Result SerializeToString() const; + + /// \brief Parse the wire-format representation of this type. + /// + /// Useful when interoperating with non-Flight systems (e.g. REST + /// services) that may want to return Flight types. + static arrow::Result Deserialize(std::string_view serialized); + + // Convenience factory functions + + static FlightDescriptor Command(const std::string& c) { + return FlightDescriptor{CMD, c, {}}; + } + + static FlightDescriptor Path(const std::vector& p) { + return FlightDescriptor{PATH, "", p}; + } + + friend bool operator==(const FlightDescriptor& left, const FlightDescriptor& right) { + return left.Equals(right); + } + friend bool operator!=(const FlightDescriptor& left, const FlightDescriptor& right) { + return !(left == right); + } +}; + +/// \brief Data structure providing an opaque identifier or credential to use +/// when requesting a data stream with the DoGet RPC +struct ARROW_FLIGHT_EXPORT Ticket { + std::string ticket; + + std::string ToString() const; + bool Equals(const Ticket& other) const; + + friend bool operator==(const Ticket& left, const Ticket& right) { + return left.Equals(right); + } + friend bool operator!=(const Ticket& left, const Ticket& right) { + return !(left == right); + } + + /// \brief Get the wire-format representation of this type. + /// + /// Useful when interoperating with non-Flight systems (e.g. REST + /// services) that may want to return Flight types. + arrow::Result SerializeToString() const; + + /// \brief Parse the wire-format representation of this type. + /// + /// Useful when interoperating with non-Flight systems (e.g. REST + /// services) that may want to return Flight types. + static arrow::Result Deserialize(std::string_view serialized); +}; + +class FlightClient; +class FlightServerBase; + +ARROW_FLIGHT_EXPORT +extern const char* kSchemeGrpc; +ARROW_FLIGHT_EXPORT +extern const char* kSchemeGrpcTcp; +ARROW_FLIGHT_EXPORT +extern const char* kSchemeGrpcUnix; +ARROW_FLIGHT_EXPORT +extern const char* kSchemeGrpcTls; + +/// \brief A host location (a URI) +struct ARROW_FLIGHT_EXPORT Location { + public: + /// \brief Initialize a blank location. + Location(); + + /// \brief Initialize a location by parsing a URI string + static arrow::Result Parse(const std::string& uri_string); + + /// \brief Get the fallback URI. + /// + /// arrow-flight-reuse-connection://? means that a client may attempt to + /// reuse an existing connection to a Flight service to fetch data instead + /// of creating a new connection to one of the other locations listed in a + /// FlightEndpoint response. + static const Location& ReuseConnection(); + + /// \brief Initialize a location for a non-TLS, gRPC-based Flight + /// service from a host and port + /// \param[in] host The hostname to connect to + /// \param[in] port The port + /// \return Arrow result with the resulting location + static arrow::Result ForGrpcTcp(const std::string& host, const int port); + + /// \brief Initialize a location for a TLS-enabled, gRPC-based Flight + /// service from a host and port + /// \param[in] host The hostname to connect to + /// \param[in] port The port + /// \return Arrow result with the resulting location + static arrow::Result ForGrpcTls(const std::string& host, const int port); + + /// \brief Initialize a location for a domain socket-based Flight + /// service + /// \param[in] path The path to the domain socket + /// \return Arrow result with the resulting location + static arrow::Result ForGrpcUnix(const std::string& path); + + /// \brief Initialize a location based on a URI scheme + static arrow::Result ForScheme(const std::string& scheme, + const std::string& host, const int port); + + /// \brief Get a representation of this URI as a string. + std::string ToString() const; + + /// \brief Get the scheme of this URI. + std::string scheme() const; + + bool Equals(const Location& other) const; + + friend bool operator==(const Location& left, const Location& right) { + return left.Equals(right); + } + friend bool operator!=(const Location& left, const Location& right) { + return !(left == right); + } + + private: + friend class FlightClient; + friend class FlightServerBase; + std::shared_ptr uri_; +}; + +/// \brief A flight ticket and list of locations where the ticket can be +/// redeemed +struct ARROW_FLIGHT_EXPORT FlightEndpoint { + /// Opaque ticket identify; use with DoGet RPC + Ticket ticket; + + /// List of locations where ticket can be redeemed. If the list is empty, the + /// ticket can only be redeemed on the current service where the ticket was + /// generated + std::vector locations; + + /// Expiration time of this stream. If present, clients may assume + /// they can retry DoGet requests. Otherwise, clients should avoid + /// retrying DoGet requests. + std::optional expiration_time; + + /// Opaque Application-defined metadata + std::string app_metadata; + + std::string ToString() const; + bool Equals(const FlightEndpoint& other) const; + + friend bool operator==(const FlightEndpoint& left, const FlightEndpoint& right) { + return left.Equals(right); + } + friend bool operator!=(const FlightEndpoint& left, const FlightEndpoint& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +/// \brief The request of the RenewFlightEndpoint action. +struct ARROW_FLIGHT_EXPORT RenewFlightEndpointRequest { + FlightEndpoint endpoint; + + std::string ToString() const; + bool Equals(const RenewFlightEndpointRequest& other) const; + + friend bool operator==(const RenewFlightEndpointRequest& left, + const RenewFlightEndpointRequest& right) { + return left.Equals(right); + } + friend bool operator!=(const RenewFlightEndpointRequest& left, + const RenewFlightEndpointRequest& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize( + std::string_view serialized); +}; + +/// \brief Staging data structure for messages about to be put on the wire +/// +/// This structure corresponds to FlightData in the protocol. +struct ARROW_FLIGHT_EXPORT FlightPayload { + std::shared_ptr descriptor; + std::shared_ptr app_metadata; + ipc::IpcPayload ipc_message; + + /// \brief Check that the payload can be written to the wire. + Status Validate() const; +}; + +/// \brief Schema result returned after a schema request RPC +struct ARROW_FLIGHT_EXPORT SchemaResult { + public: + SchemaResult() = default; + explicit SchemaResult(std::string schema) : raw_schema_(std::move(schema)) {} + + /// \brief Factory method to construct a SchemaResult. + static arrow::Result> Make(const Schema& schema); + + /// \brief return schema + /// \param[in,out] dictionary_memo for dictionary bookkeeping, will + /// be modified + /// \return Arrow result with the reconstructed Schema + arrow::Result> GetSchema( + ipc::DictionaryMemo* dictionary_memo) const; + + const std::string& serialized_schema() const { return raw_schema_; } + + std::string ToString() const; + bool Equals(const SchemaResult& other) const; + + friend bool operator==(const SchemaResult& left, const SchemaResult& right) { + return left.Equals(right); + } + friend bool operator!=(const SchemaResult& left, const SchemaResult& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); + + private: + std::string raw_schema_; +}; + +/// \brief The access coordinates for retrieval of a dataset, returned by +/// GetFlightInfo +class ARROW_FLIGHT_EXPORT FlightInfo { + public: + struct Data { + std::string schema; + FlightDescriptor descriptor; + std::vector endpoints; + int64_t total_records = -1; + int64_t total_bytes = -1; + bool ordered = false; + std::string app_metadata; + }; + + explicit FlightInfo(Data data) : data_(std::move(data)), reconstructed_schema_(false) {} + + /// \brief Factory method to construct a FlightInfo. + static arrow::Result Make(const Schema& schema, + const FlightDescriptor& descriptor, + const std::vector& endpoints, + int64_t total_records, int64_t total_bytes, + bool ordered = false, + std::string app_metadata = ""); + + /// \brief Deserialize the Arrow schema of the dataset. Populate any + /// dictionary encoded fields into a DictionaryMemo for + /// bookkeeping + /// \param[in,out] dictionary_memo for dictionary bookkeeping, will + /// be modified + /// \return Arrow result with the reconstructed Schema + arrow::Result> GetSchema( + ipc::DictionaryMemo* dictionary_memo) const; + + const std::string& serialized_schema() const { return data_.schema; } + + /// The descriptor associated with this flight, may not be set + const FlightDescriptor& descriptor() const { return data_.descriptor; } + + /// A list of endpoints associated with the flight (dataset). To consume the + /// whole flight, all endpoints must be consumed + const std::vector& endpoints() const { return data_.endpoints; } + + /// The total number of records (rows) in the dataset. If unknown, set to -1 + int64_t total_records() const { return data_.total_records; } + + /// The total number of bytes in the dataset. If unknown, set to -1 + int64_t total_bytes() const { return data_.total_bytes; } + + /// Whether endpoints are in the same order as the data. + bool ordered() const { return data_.ordered; } + + /// Application-defined opaque metadata + const std::string& app_metadata() const { return data_.app_metadata; } + + /// \brief Get the wire-format representation of this type. + /// + /// Useful when interoperating with non-Flight systems (e.g. REST + /// services) that may want to return Flight types. + arrow::Result SerializeToString() const; + + /// \brief Parse the wire-format representation of this type. + /// + /// Useful when interoperating with non-Flight systems (e.g. REST + /// services) that may want to return Flight types. + static arrow::Result> Deserialize( + std::string_view serialized); + + std::string ToString() const; + + /// Compare two FlightInfo for equality. This will compare the + /// serialized schema representations, NOT the logical equality of + /// the schemas. + bool Equals(const FlightInfo& other) const; + + friend bool operator==(const FlightInfo& left, const FlightInfo& right) { + return left.Equals(right); + } + friend bool operator!=(const FlightInfo& left, const FlightInfo& right) { + return !(left == right); + } + + private: + Data data_; + mutable std::shared_ptr schema_; + mutable bool reconstructed_schema_; +}; + +/// \brief The information to process a long-running query. +class ARROW_FLIGHT_EXPORT PollInfo { + public: + /// The currently available results so far. + std::unique_ptr info = NULLPTR; + /// The descriptor the client should use on the next try. If unset, + /// the query is complete. + std::optional descriptor = std::nullopt; + /// Query progress. Must be in [0.0, 1.0] but need not be + /// monotonic or nondecreasing. If unknown, do not set. + std::optional progress = std::nullopt; + /// Expiration time for this request. After this passes, the server + /// might not accept the poll descriptor anymore (and the query may + /// be cancelled). This may be updated on a call to PollFlightInfo. + std::optional expiration_time = std::nullopt; + + PollInfo() + : info(NULLPTR), + descriptor(std::nullopt), + progress(std::nullopt), + expiration_time(std::nullopt) {} + + explicit PollInfo(std::unique_ptr info, + std::optional descriptor, + std::optional progress, + std::optional expiration_time) + : info(std::move(info)), + descriptor(std::move(descriptor)), + progress(progress), + expiration_time(expiration_time) {} + + // Must not be explicit; to declare one we must declare all ("rule of five") + PollInfo(const PollInfo& other) // NOLINT(runtime/explicit) + : info(other.info ? std::make_unique(*other.info) : NULLPTR), + descriptor(other.descriptor), + progress(other.progress), + expiration_time(other.expiration_time) {} + PollInfo(PollInfo&& other) noexcept = default; // NOLINT(runtime/explicit) + ~PollInfo() = default; + PollInfo& operator=(const PollInfo& other) { + info = other.info ? std::make_unique(*other.info) : NULLPTR; + descriptor = other.descriptor; + progress = other.progress; + expiration_time = other.expiration_time; + return *this; + } + PollInfo& operator=(PollInfo&& other) = default; + + /// \brief Get the wire-format representation of this type. + /// + /// Useful when interoperating with non-Flight systems (e.g. REST + /// services) that may want to return Flight types. + arrow::Result SerializeToString() const; + + /// \brief Parse the wire-format representation of this type. + /// + /// Useful when interoperating with non-Flight systems (e.g. REST + /// services) that may want to return Flight types. + static arrow::Result> Deserialize( + std::string_view serialized); + + std::string ToString() const; + + /// Compare two PollInfo for equality. This will compare the + /// serialized schema representations, NOT the logical equality of + /// the schemas. + bool Equals(const PollInfo& other) const; + + friend bool operator==(const PollInfo& left, const PollInfo& right) { + return left.Equals(right); + } + friend bool operator!=(const PollInfo& left, const PollInfo& right) { + return !(left == right); + } +}; + +/// \brief The request of the CancelFlightInfoRequest action. +struct ARROW_FLIGHT_EXPORT CancelFlightInfoRequest { + std::unique_ptr info; + + std::string ToString() const; + bool Equals(const CancelFlightInfoRequest& other) const; + + friend bool operator==(const CancelFlightInfoRequest& left, + const CancelFlightInfoRequest& right) { + return left.Equals(right); + } + friend bool operator!=(const CancelFlightInfoRequest& left, + const CancelFlightInfoRequest& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +/// \brief Variant supporting all possible value types for {Set,Get}SessionOptions +/// +/// By convention, an attempt to set a valueless (std::monostate) SessionOptionValue +/// should attempt to unset or clear the named option value on the server. +using SessionOptionValue = std::variant>; + +/// \brief The result of setting a session option. +enum class SetSessionOptionErrorValue : int8_t { + /// \brief The status of setting the option is unknown. + /// + /// Servers should avoid using this value (send a NOT_FOUND error if the requested + /// session is not known). Clients can retry the request. + kUnspecified, + /// \brief The given session option name is invalid. + kInvalidName, + /// \brief The session option value or type is invalid. + kInvalidValue, + /// \brief The session option cannot be set. + kError +}; +std::string ToString(const SetSessionOptionErrorValue& error_value); +std::ostream& operator<<(std::ostream& os, const SetSessionOptionErrorValue& error_value); + +/// \brief The result of closing a session. +enum class CloseSessionStatus : int8_t { + // \brief The session close status is unknown. + // + // Servers should avoid using this value (send a NOT_FOUND error if the requested + // session is not known). Clients can retry the request. + kUnspecified, + // \brief The session close request is complete. + // + // Subsequent requests with the same session produce a NOT_FOUND error. + kClosed, + // \brief The session close request is in progress. + // + // The client may retry the request. + kClosing, + // \brief The session is not closeable. + // + // The client should not retry the request. + kNotClosable +}; +std::string ToString(const CloseSessionStatus& status); +std::ostream& operator<<(std::ostream& os, const CloseSessionStatus& status); + +/// \brief A request to set a set of session options by name/value. +struct ARROW_FLIGHT_EXPORT SetSessionOptionsRequest { + std::map session_options; + + std::string ToString() const; + bool Equals(const SetSessionOptionsRequest& other) const; + + friend bool operator==(const SetSessionOptionsRequest& left, + const SetSessionOptionsRequest& right) { + return left.Equals(right); + } + friend bool operator!=(const SetSessionOptionsRequest& left, + const SetSessionOptionsRequest& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +/// \brief The result(s) of setting session option(s). +struct ARROW_FLIGHT_EXPORT SetSessionOptionsResult { + struct Error { + SetSessionOptionErrorValue value; + + bool Equals(const Error& other) const { return value == other.value; } + friend bool operator==(const Error& left, const Error& right) { + return left.Equals(right); + } + friend bool operator!=(const Error& left, const Error& right) { + return !(left == right); + } + }; + + std::map errors; + + std::string ToString() const; + bool Equals(const SetSessionOptionsResult& other) const; + + friend bool operator==(const SetSessionOptionsResult& left, + const SetSessionOptionsResult& right) { + return left.Equals(right); + } + friend bool operator!=(const SetSessionOptionsResult& left, + const SetSessionOptionsResult& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +/// \brief A request to get current session options. +struct ARROW_FLIGHT_EXPORT GetSessionOptionsRequest { + std::string ToString() const; + bool Equals(const GetSessionOptionsRequest& other) const; + + friend bool operator==(const GetSessionOptionsRequest& left, + const GetSessionOptionsRequest& right) { + return left.Equals(right); + } + friend bool operator!=(const GetSessionOptionsRequest& left, + const GetSessionOptionsRequest& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +/// \brief The current session options. +struct ARROW_FLIGHT_EXPORT GetSessionOptionsResult { + std::map session_options; + + std::string ToString() const; + bool Equals(const GetSessionOptionsResult& other) const; + + friend bool operator==(const GetSessionOptionsResult& left, + const GetSessionOptionsResult& right) { + return left.Equals(right); + } + friend bool operator!=(const GetSessionOptionsResult& left, + const GetSessionOptionsResult& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +/// \brief A request to close the open client session. +struct ARROW_FLIGHT_EXPORT CloseSessionRequest { + std::string ToString() const; + bool Equals(const CloseSessionRequest& other) const; + + friend bool operator==(const CloseSessionRequest& left, + const CloseSessionRequest& right) { + return left.Equals(right); + } + friend bool operator!=(const CloseSessionRequest& left, + const CloseSessionRequest& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +/// \brief The result of attempting to close the client session. +struct ARROW_FLIGHT_EXPORT CloseSessionResult { + CloseSessionStatus status; + + std::string ToString() const; + bool Equals(const CloseSessionResult& other) const; + + friend bool operator==(const CloseSessionResult& left, + const CloseSessionResult& right) { + return left.Equals(right); + } + friend bool operator!=(const CloseSessionResult& left, + const CloseSessionResult& right) { + return !(left == right); + } + + /// \brief Serialize this message to its wire-format representation. + arrow::Result SerializeToString() const; + + /// \brief Deserialize this message from its wire-format representation. + static arrow::Result Deserialize(std::string_view serialized); +}; + +/// \brief An iterator to FlightInfo instances returned by ListFlights. +class ARROW_FLIGHT_EXPORT FlightListing { + public: + virtual ~FlightListing() = default; + + /// \brief Retrieve the next FlightInfo from the iterator. + /// \return Arrow result with a single FlightInfo. Set to \a nullptr if there + /// are none left. + virtual arrow::Result> Next() = 0; +}; + +/// \brief An iterator to Result instances returned by DoAction. +class ARROW_FLIGHT_EXPORT ResultStream { + public: + virtual ~ResultStream() = default; + + /// \brief Retrieve the next Result from the iterator. + /// \return Arrow result with a single Result. Set to \a nullptr if there are none left. + virtual arrow::Result> Next() = 0; + + /// \brief Read and drop the remaining messages to get the error (if any) from a server. + /// \return Status OK if this is no error from a server, any other status if a + /// server returns an error. + Status Drain(); +}; + +/// \brief A holder for a RecordBatch with associated Flight metadata. +struct ARROW_FLIGHT_EXPORT FlightStreamChunk { + public: + std::shared_ptr data; + std::shared_ptr app_metadata; +}; + +/// \brief An interface to read Flight data with metadata. +class ARROW_FLIGHT_EXPORT MetadataRecordBatchReader { + public: + virtual ~MetadataRecordBatchReader() = default; + + /// \brief Get the schema for this stream. + virtual arrow::Result> GetSchema() = 0; + + /// \brief Get the next message from Flight. If the stream is + /// finished, then the members of \a FlightStreamChunk will be + /// nullptr. + virtual arrow::Result Next() = 0; + + /// \brief Consume entire stream as a vector of record batches + virtual arrow::Result>> ToRecordBatches(); + + /// \brief Consume entire stream as a Table + virtual arrow::Result> ToTable(); +}; + +/// \brief Convert a MetadataRecordBatchReader to a regular RecordBatchReader. +ARROW_FLIGHT_EXPORT +arrow::Result> MakeRecordBatchReader( + std::shared_ptr reader); + +/// \brief An interface to write IPC payloads with metadata. +class ARROW_FLIGHT_EXPORT MetadataRecordBatchWriter : public ipc::RecordBatchWriter { + public: + virtual ~MetadataRecordBatchWriter() = default; + /// \brief Begin writing data with the given schema. Only used with \a DoExchange. + virtual Status Begin(const std::shared_ptr& schema, + const ipc::IpcWriteOptions& options) = 0; + virtual Status Begin(const std::shared_ptr& schema); + virtual Status WriteMetadata(std::shared_ptr app_metadata) = 0; + virtual Status WriteWithMetadata(const RecordBatch& batch, + std::shared_ptr app_metadata) = 0; +}; + +/// \brief A FlightListing implementation based on a vector of +/// FlightInfo objects. +/// +/// This can be iterated once, then it is consumed. +class ARROW_FLIGHT_EXPORT SimpleFlightListing : public FlightListing { + public: + explicit SimpleFlightListing(const std::vector& flights); + explicit SimpleFlightListing(std::vector&& flights); + + arrow::Result> Next() override; + + private: + int position_; + std::vector flights_; +}; + +/// \brief A ResultStream implementation based on a vector of +/// Result objects. +/// +/// This can be iterated once, then it is consumed. +class ARROW_FLIGHT_EXPORT SimpleResultStream : public ResultStream { + public: + explicit SimpleResultStream(std::vector&& results); + arrow::Result> Next() override; + + private: + std::vector results_; + size_t position_; +}; + +/// \defgroup flight-error Error Handling +/// Types for handling errors from RPCs. Flight uses a set of status +/// codes standardized across Flight implementations, so these types +/// let applications work directly with those codes instead of having +/// to translate to and from Arrow Status. +/// @{ + +/// \brief Abstract status code for an RPC as per the Flight +/// specification. +enum class TransportStatusCode { + /// \brief No error. + kOk = 0, + /// \brief An unknown error occurred. + kUnknown = 1, + /// \brief An error occurred in the transport implementation, or an + /// error internal to the service implementation occurred. + kInternal = 2, + /// \brief An argument is invalid. + kInvalidArgument = 3, + /// \brief The request timed out. + kTimedOut = 4, + /// \brief An argument is not necessarily invalid, but references + /// some resource that does not exist. Prefer over + /// kInvalidArgument where applicable. + kNotFound = 5, + /// \brief The request attempted to create some resource that does + /// not exist. + kAlreadyExists = 6, + /// \brief The request was explicitly cancelled. + kCancelled = 7, + /// \brief The client is not authenticated. + kUnauthenticated = 8, + /// \brief The client is not authorized to perform this request. + kUnauthorized = 9, + /// \brief The request is not implemented + kUnimplemented = 10, + /// \brief There is a network connectivity error, or some resource + /// is otherwise unavailable. Most likely a temporary condition. + kUnavailable = 11, +}; + +/// \brief Convert a code to a string. +std::string ToString(TransportStatusCode code); + +/// \brief An error from an RPC call, using Flight error codes directly +/// instead of trying to translate to Arrow Status. +/// +/// Currently, only attached to the Status passed to AsyncListener::OnFinish. +/// +/// This API is EXPERIMENTAL. +class ARROW_FLIGHT_EXPORT TransportStatusDetail : public StatusDetail { + public: + constexpr static const char* kTypeId = "flight::TransportStatusDetail"; + explicit TransportStatusDetail(TransportStatusCode code, std::string message, + std::vector> details) + : code_(code), message_(std::move(message)), details_(std::move(details)) {} + const char* type_id() const override { return kTypeId; } + std::string ToString() const override; + + static std::optional> Unwrap( + const Status& status); + + TransportStatusCode code() const { return code_; } + std::string_view message() const { return message_; } + const std::vector>& details() const { + return details_; + } + + private: + TransportStatusCode code_; + std::string message_; + std::vector> details_; +}; + +/// @} + +} // namespace flight +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/types_async.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/types_async.h new file mode 100644 index 0000000000000000000000000000000000000000..a241e64fb4e4999f7a3ffcb8e860c2d2c5928d2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/flight/types_async.h @@ -0,0 +1,80 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/flight/type_fwd.h" +#include "arrow/flight/types.h" +#include "arrow/ipc/options.h" +#include "arrow/type_fwd.h" + +namespace arrow::flight { + +/// \defgroup flight-async Async Flight Types +/// Common types used for asynchronous Flight APIs. +/// @{ + +/// \brief Non-templated state for an async RPC. +/// +/// This API is EXPERIMENTAL. +class ARROW_FLIGHT_EXPORT AsyncListenerBase { + public: + AsyncListenerBase(); + virtual ~AsyncListenerBase(); + + /// \brief Request cancellation of the RPC. + /// + /// The RPC is not cancelled until AsyncListener::OnFinish is called. + void TryCancel(); + + private: + friend class arrow::flight::internal::ClientTransport; + + /// Transport-specific state for this RPC. Transport + /// implementations may store and retrieve state here via + /// ClientTransport::SetAsyncRpc and ClientTransport::GetAsyncRpc. + std::unique_ptr rpc_state_; +}; + +/// \brief Callbacks for results from async RPCs. +/// +/// A single listener may not be used for multiple concurrent RPC +/// calls. The application MUST hold the listener alive until +/// OnFinish() is called and has finished. +/// +/// This API is EXPERIMENTAL. +template +class ARROW_FLIGHT_EXPORT AsyncListener : public AsyncListenerBase { + public: + /// \brief Get the next server result. + /// + /// This will never be called concurrently with itself or OnFinish. + virtual void OnNext(T message) = 0; + /// \brief Get the final status. + /// + /// This will never be called concurrently with itself or OnNext. If the + /// error comes from the remote server, then a TransportStatusDetail will be + /// attached. Otherwise, the error is generated by the client-side + /// transport and will not have a TransportStatusDetail. + virtual void OnFinish(Status status) = 0; +}; + +/// @} + +} // namespace arrow::flight diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/buffered.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/buffered.h new file mode 100644 index 0000000000000000000000000000000000000000..01c0a016daba06c6b635cd02fcc5912d975bf924 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/buffered.h @@ -0,0 +1,167 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Buffered stream implementations + +#pragma once + +#include +#include +#include + +#include "arrow/io/concurrency.h" +#include "arrow/io/interfaces.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Buffer; +class MemoryPool; +class Status; + +namespace io { + +class ARROW_EXPORT BufferedOutputStream : public OutputStream { + public: + ~BufferedOutputStream() override; + + /// \brief Create a buffered output stream wrapping the given output stream. + /// \param[in] buffer_size the size of the temporary write buffer + /// \param[in] pool a MemoryPool to use for allocations + /// \param[in] raw another OutputStream + /// \return the created BufferedOutputStream + static Result> Create( + int64_t buffer_size, MemoryPool* pool, std::shared_ptr raw); + + /// \brief Resize internal buffer + /// \param[in] new_buffer_size the new buffer size + /// \return Status + Status SetBufferSize(int64_t new_buffer_size); + + /// \brief Return the current size of the internal buffer + int64_t buffer_size() const; + + /// \brief Return the number of remaining bytes that have not been flushed to + /// the raw OutputStream + int64_t bytes_buffered() const; + + /// \brief Flush any buffered writes and release the raw + /// OutputStream. Further operations on this object are invalid + /// \return the underlying OutputStream + Result> Detach(); + + // OutputStream interface + + /// \brief Close the buffered output stream. This implicitly closes the + /// underlying raw output stream. + Status Close() override; + Status Abort() override; + bool closed() const override; + + Result Tell() const override; + // Write bytes to the stream. Thread-safe + Status Write(const void* data, int64_t nbytes) override; + Status Write(const std::shared_ptr& data) override; + + Status Flush() override; + + /// \brief Return the underlying raw output stream. + std::shared_ptr raw() const; + + private: + explicit BufferedOutputStream(std::shared_ptr raw, MemoryPool* pool); + + class ARROW_NO_EXPORT Impl; + std::unique_ptr impl_; +}; + +/// \class BufferedInputStream +/// \brief An InputStream that performs buffered reads from an unbuffered +/// InputStream, which can mitigate the overhead of many small reads in some +/// cases +class ARROW_EXPORT BufferedInputStream + : public internal::InputStreamConcurrencyWrapper { + public: + ~BufferedInputStream() override; + + /// \brief Create a BufferedInputStream from a raw InputStream + /// \param[in] buffer_size the size of the temporary read buffer + /// \param[in] pool a MemoryPool to use for allocations + /// \param[in] raw a raw InputStream + /// \param[in] raw_read_bound a bound on the maximum number of bytes + /// to read from the raw input stream. The default -1 indicates that + /// it is unbounded + /// \return the created BufferedInputStream + static Result> Create( + int64_t buffer_size, MemoryPool* pool, std::shared_ptr raw, + int64_t raw_read_bound = -1); + + /// \brief Resize internal read buffer; calls to Read(...) will read at least + /// \param[in] new_buffer_size the new read buffer size + /// \return Status + Status SetBufferSize(int64_t new_buffer_size); + + /// \brief Return the number of remaining bytes in the read buffer + int64_t bytes_buffered() const; + + /// \brief Return the current size of the internal buffer + int64_t buffer_size() const; + + /// \brief Release the raw InputStream. Any data buffered will be + /// discarded. Further operations on this object are invalid + /// \return raw the underlying InputStream + std::shared_ptr Detach(); + + /// \brief Return the unbuffered InputStream + std::shared_ptr raw() const; + + // InputStream APIs + + bool closed() const override; + Result> ReadMetadata() override; + Future> ReadMetadataAsync( + const IOContext& io_context) override; + + private: + friend InputStreamConcurrencyWrapper; + + explicit BufferedInputStream(std::shared_ptr raw, MemoryPool* pool, + int64_t raw_total_bytes_bound); + + Status DoClose(); + Status DoAbort() override; + + /// \brief Returns the position of the buffered stream, though the position + /// of the unbuffered stream may be further advanced. + Result DoTell() const; + + Result DoRead(int64_t nbytes, void* out); + + /// \brief Read into buffer. + Result> DoRead(int64_t nbytes); + + /// \brief Return a zero-copy string view referencing buffered data, + /// but do not advance the position of the stream. Buffers data and + /// expands the buffer size if necessary + Result DoPeek(int64_t nbytes) override; + + class ARROW_NO_EXPORT Impl; + std::unique_ptr impl_; +}; + +} // namespace io +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/caching.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/caching.h new file mode 100644 index 0000000000000000000000000000000000000000..e2b911fafdbbc2ec95d0de4233b6bbb663ffa44e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/caching.h @@ -0,0 +1,157 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/io/interfaces.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace io { + +struct ARROW_EXPORT CacheOptions { + static constexpr double kDefaultIdealBandwidthUtilizationFrac = 0.9; + static constexpr int64_t kDefaultMaxIdealRequestSizeMib = 64; + + /// \brief The maximum distance in bytes between two consecutive + /// ranges; beyond this value, ranges are not combined + int64_t hole_size_limit; + /// \brief The maximum size in bytes of a combined range; if + /// combining two consecutive ranges would produce a range of a + /// size greater than this, they are not combined + int64_t range_size_limit; + /// \brief A lazy cache does not perform any I/O until requested. + /// lazy = false: request all byte ranges when PreBuffer or WillNeed is called. + /// lazy = True, prefetch_limit = 0: request merged byte ranges only after the reader + /// needs them. + /// lazy = True, prefetch_limit = k: prefetch up to k merged byte ranges ahead of the + /// range that is currently being read. + bool lazy; + /// \brief The maximum number of ranges to be prefetched. This is only used + /// for lazy cache to asynchronously read some ranges after reading the target range. + int64_t prefetch_limit = 0; + + bool operator==(const CacheOptions& other) const { + return hole_size_limit == other.hole_size_limit && + range_size_limit == other.range_size_limit && lazy == other.lazy && + prefetch_limit == other.prefetch_limit; + } + + /// \brief Construct CacheOptions from network storage metrics (e.g. S3). + /// + /// \param[in] time_to_first_byte_millis Seek-time or Time-To-First-Byte (TTFB) in + /// milliseconds, also called call setup latency of a new read request. + /// The value is a positive integer. + /// \param[in] transfer_bandwidth_mib_per_sec Data transfer Bandwidth (BW) in MiB/sec + /// (per connection). + /// The value is a positive integer. + /// \param[in] ideal_bandwidth_utilization_frac Transfer bandwidth utilization fraction + /// (per connection) to maximize the net data load. + /// The value is a positive double precision number less than 1. + /// \param[in] max_ideal_request_size_mib The maximum single data request size (in MiB) + /// to maximize the net data load. + /// The value is a positive integer. + /// \return A new instance of CacheOptions. + static CacheOptions MakeFromNetworkMetrics( + int64_t time_to_first_byte_millis, int64_t transfer_bandwidth_mib_per_sec, + double ideal_bandwidth_utilization_frac = kDefaultIdealBandwidthUtilizationFrac, + int64_t max_ideal_request_size_mib = kDefaultMaxIdealRequestSizeMib); + + static CacheOptions Defaults(); + static CacheOptions LazyDefaults(); +}; + +namespace internal { + +/// \brief A read cache designed to hide IO latencies when reading. +/// +/// This class takes multiple byte ranges that an application expects to read, and +/// coalesces them into fewer, larger read requests, which benefits performance on some +/// filesystems, particularly remote ones like Amazon S3. By default, it also issues +/// these read requests in parallel up front. +/// +/// To use: +/// 1. Cache() the ranges you expect to read in the future. Ideally, these ranges have +/// the exact offset and length that will later be read. The cache will combine those +/// ranges according to parameters (see constructor). +/// +/// By default, the cache will also start fetching the combined ranges in parallel in +/// the background, unless CacheOptions.lazy is set. +/// +/// 2. Call WaitFor() to be notified when the given ranges have been read. If +/// CacheOptions.lazy is set, I/O will be triggered in the background here instead. +/// This can be done in parallel (e.g. if parsing a file, call WaitFor() for each +/// chunk of the file that can be parsed in parallel). +/// +/// 3. Call Read() to retrieve the actual data for the given ranges. +/// A synchronous application may skip WaitFor() and just call Read() - it will still +/// benefit from coalescing and parallel fetching. +class ARROW_EXPORT ReadRangeCache { + public: + static constexpr int64_t kDefaultHoleSizeLimit = 8192; + static constexpr int64_t kDefaultRangeSizeLimit = 32 * 1024 * 1024; + + /// Construct a read cache with default + explicit ReadRangeCache(std::shared_ptr file, IOContext ctx) + : ReadRangeCache(file, file.get(), std::move(ctx), CacheOptions::Defaults()) {} + + /// Construct a read cache with given options + explicit ReadRangeCache(std::shared_ptr file, IOContext ctx, + CacheOptions options) + : ReadRangeCache(file, file.get(), std::move(ctx), options) {} + + /// Construct a read cache with an unowned file + ReadRangeCache(RandomAccessFile* file, IOContext ctx, CacheOptions options) + : ReadRangeCache(NULLPTR, file, std::move(ctx), options) {} + + ~ReadRangeCache(); + + /// \brief Cache the given ranges in the background. + /// + /// The caller must ensure that the ranges do not overlap with each other, + /// nor with previously cached ranges. Otherwise, behaviour will be undefined. + Status Cache(std::vector ranges); + + /// \brief Read a range previously given to Cache(). + Result> Read(ReadRange range); + + /// \brief Wait until all ranges added so far have been cached. + Future<> Wait(); + + /// \brief Wait until all given ranges have been cached. + Future<> WaitFor(std::vector ranges); + + protected: + struct Impl; + struct LazyImpl; + + ReadRangeCache(std::shared_ptr owned_file, RandomAccessFile* file, + IOContext ctx, CacheOptions options); + + std::unique_ptr impl_; +}; + +} // namespace internal +} // namespace io +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/compressed.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/compressed.h new file mode 100644 index 0000000000000000000000000000000000000000..6b4e7ab4d7248829e26bc4bbef9cb3e628f5f906 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/compressed.h @@ -0,0 +1,124 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Compressed stream implementations + +#pragma once + +#include +#include + +#include "arrow/io/concurrency.h" +#include "arrow/io/interfaces.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class MemoryPool; +class Status; + +namespace util { + +class Codec; + +} // namespace util + +namespace io { + +class ARROW_EXPORT CompressedOutputStream : public OutputStream { + public: + ~CompressedOutputStream() override; + + /// \brief Create a compressed output stream wrapping the given output stream. + /// + /// The codec must be capable of streaming compression. Some codecs, + /// like Snappy, are not able to do so. + static Result> Make( + util::Codec* codec, const std::shared_ptr& raw, + MemoryPool* pool = default_memory_pool()); + + // OutputStream interface + + /// \brief Close the compressed output stream. This implicitly closes the + /// underlying raw output stream. + Status Close() override; + Status Abort() override; + bool closed() const override; + + Result Tell() const override; + + Status Write(const void* data, int64_t nbytes) override; + /// \cond FALSE + using Writable::Write; + /// \endcond + Status Flush() override; + + /// \brief Return the underlying raw output stream. + std::shared_ptr raw() const; + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(CompressedOutputStream); + + CompressedOutputStream() = default; + + class ARROW_NO_EXPORT Impl; + std::unique_ptr impl_; +}; + +class ARROW_EXPORT CompressedInputStream + : public internal::InputStreamConcurrencyWrapper { + public: + ~CompressedInputStream() override; + + /// \brief Create a compressed input stream wrapping the given input stream. + /// + /// The codec must be capable of streaming decompression. Some codecs, + /// like Snappy, are not able to do so. + static Result> Make( + util::Codec* codec, const std::shared_ptr& raw, + MemoryPool* pool = default_memory_pool()); + + // InputStream interface + + bool closed() const override; + Result> ReadMetadata() override; + Future> ReadMetadataAsync( + const IOContext& io_context) override; + + /// \brief Return the underlying raw input stream. + std::shared_ptr raw() const; + + private: + friend InputStreamConcurrencyWrapper; + ARROW_DISALLOW_COPY_AND_ASSIGN(CompressedInputStream); + + CompressedInputStream() = default; + + /// \brief Close the compressed input stream. This implicitly closes the + /// underlying raw input stream. + Status DoClose(); + Status DoAbort() override; + Result DoTell() const; + Result DoRead(int64_t nbytes, void* out); + Result> DoRead(int64_t nbytes); + + class ARROW_NO_EXPORT Impl; + std::unique_ptr impl_; +}; + +} // namespace io +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/file.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/file.h new file mode 100644 index 0000000000000000000000000000000000000000..50d4f2c4dfc90f8ffb8061f68125b24ae82bb7ed --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/file.h @@ -0,0 +1,221 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// IO interface implementations for OS files + +#pragma once + +#include +#include +#include +#include + +#include "arrow/io/concurrency.h" +#include "arrow/io/interfaces.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Buffer; +class MemoryPool; +class Status; + +namespace io { + +/// \brief An operating system file open in write-only mode. +class ARROW_EXPORT FileOutputStream : public OutputStream { + public: + ~FileOutputStream() override; + + /// \brief Open a local file for writing, truncating any existing file + /// \param[in] path with UTF8 encoding + /// \param[in] append append to existing file, otherwise truncate to 0 bytes + /// \return an open FileOutputStream + /// + /// When opening a new file, any existing file with the indicated path is + /// truncated to 0 bytes, deleting any existing data + static Result> Open(const std::string& path, + bool append = false); + + /// \brief Open a file descriptor for writing. The underlying file isn't + /// truncated. + /// \param[in] fd file descriptor + /// \return an open FileOutputStream + /// + /// The file descriptor becomes owned by the OutputStream, and will be closed + /// on Close() or destruction. + static Result> Open(int fd); + + // OutputStream interface + Status Close() override; + bool closed() const override; + Result Tell() const override; + + // Write bytes to the stream. Thread-safe + Status Write(const void* data, int64_t nbytes) override; + /// \cond FALSE + using Writable::Write; + /// \endcond + + int file_descriptor() const; + + private: + FileOutputStream(); + + class ARROW_NO_EXPORT FileOutputStreamImpl; + std::unique_ptr impl_; +}; + +/// \brief An operating system file open in read-only mode. +/// +/// Reads through this implementation are unbuffered. If many small reads +/// need to be issued, it is recommended to use a buffering layer for good +/// performance. +class ARROW_EXPORT ReadableFile + : public internal::RandomAccessFileConcurrencyWrapper { + public: + ~ReadableFile() override; + + /// \brief Open a local file for reading + /// \param[in] path with UTF8 encoding + /// \param[in] pool a MemoryPool for memory allocations + /// \return ReadableFile instance + static Result> Open( + const std::string& path, MemoryPool* pool = default_memory_pool()); + + /// \brief Open a local file for reading + /// \param[in] fd file descriptor + /// \param[in] pool a MemoryPool for memory allocations + /// \return ReadableFile instance + /// + /// The file descriptor becomes owned by the ReadableFile, and will be closed + /// on Close() or destruction. + static Result> Open( + int fd, MemoryPool* pool = default_memory_pool()); + + bool closed() const override; + + int file_descriptor() const; + + Status WillNeed(const std::vector& ranges) override; + + private: + friend RandomAccessFileConcurrencyWrapper; + + explicit ReadableFile(MemoryPool* pool); + + Status DoClose(); + Result DoTell() const; + Result DoRead(int64_t nbytes, void* buffer); + Result> DoRead(int64_t nbytes); + + /// \brief Thread-safe implementation of ReadAt + Result DoReadAt(int64_t position, int64_t nbytes, void* out); + + /// \brief Thread-safe implementation of ReadAt + Result> DoReadAt(int64_t position, int64_t nbytes); + + Result DoGetSize(); + Status DoSeek(int64_t position); + + class ARROW_NO_EXPORT ReadableFileImpl; + std::unique_ptr impl_; +}; + +/// \brief A file interface that uses memory-mapped files for memory interactions +/// +/// This implementation supports zero-copy reads. The same class is used +/// for both reading and writing. +/// +/// If opening a file in a writable mode, it is not truncated first as with +/// FileOutputStream. +class ARROW_EXPORT MemoryMappedFile : public ReadWriteFileInterface { + public: + ~MemoryMappedFile() override; + + /// Create new file with indicated size, return in read/write mode + static Result> Create(const std::string& path, + int64_t size); + + // mmap() with whole file + static Result> Open(const std::string& path, + FileMode::type mode); + + // mmap() with a region of file, the offset must be a multiple of the page size + static Result> Open(const std::string& path, + FileMode::type mode, + const int64_t offset, + const int64_t length); + + Status Close() override; + + bool closed() const override; + + Result Tell() const override; + + Status Seek(int64_t position) override; + + // Required by RandomAccessFile, copies memory into out. Not thread-safe + Result Read(int64_t nbytes, void* out) override; + + // Zero copy read, moves position pointer. Not thread-safe + Result> Read(int64_t nbytes) override; + + // Zero-copy read, leaves position unchanged. Acquires a reader lock + // for the duration of slice creation (typically very short). Is thread-safe. + Result> ReadAt(int64_t position, int64_t nbytes) override; + + // Raw copy of the memory at specified position. Thread-safe, but + // locks out other readers for the duration of memcpy. Prefer the + // zero copy method + Result ReadAt(int64_t position, int64_t nbytes, void* out) override; + + // Synchronous ReadAsync override + Future> ReadAsync(const IOContext&, int64_t position, + int64_t nbytes) override; + + Status WillNeed(const std::vector& ranges) override; + + bool supports_zero_copy() const override; + + /// Write data at the current position in the file. Thread-safe + Status Write(const void* data, int64_t nbytes) override; + /// \cond FALSE + using Writable::Write; + /// \endcond + + /// Set the size of the map to new_size. + Status Resize(int64_t new_size); + + /// Write data at a particular position in the file. Thread-safe + Status WriteAt(int64_t position, const void* data, int64_t nbytes) override; + + Result GetSize() override; + + int file_descriptor() const; + + private: + MemoryMappedFile(); + + Status WriteInternal(const void* data, int64_t nbytes); + + class ARROW_NO_EXPORT MemoryMap; + std::shared_ptr memory_map_; +}; + +} // namespace io +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/interfaces.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/interfaces.h new file mode 100644 index 0000000000000000000000000000000000000000..b36c38c6d48688a793c2588477f97648a8b550c6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/interfaces.h @@ -0,0 +1,362 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/io/type_fwd.h" +#include "arrow/type_fwd.h" +#include "arrow/util/cancel.h" +#include "arrow/util/macros.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace io { + +struct ReadRange { + int64_t offset; + int64_t length; + + friend bool operator==(const ReadRange& left, const ReadRange& right) { + return (left.offset == right.offset && left.length == right.length); + } + friend bool operator!=(const ReadRange& left, const ReadRange& right) { + return !(left == right); + } + + bool Contains(const ReadRange& other) const { + return (offset <= other.offset && offset + length >= other.offset + other.length); + } +}; + +/// EXPERIMENTAL: options provider for IO tasks +/// +/// Includes an Executor (which will be used to execute asynchronous reads), +/// a MemoryPool (which will be used to allocate buffers when zero copy reads +/// are not possible), and an external id (in case the executor receives tasks from +/// multiple sources and must distinguish tasks associated with this IOContext). +struct ARROW_EXPORT IOContext { + // No specified executor: will use a global IO thread pool + IOContext() : IOContext(default_memory_pool(), StopToken::Unstoppable()) {} + + explicit IOContext(StopToken stop_token) + : IOContext(default_memory_pool(), std::move(stop_token)) {} + + explicit IOContext(MemoryPool* pool, StopToken stop_token = StopToken::Unstoppable()); + + explicit IOContext(MemoryPool* pool, ::arrow::internal::Executor* executor, + StopToken stop_token = StopToken::Unstoppable(), + int64_t external_id = -1) + : pool_(pool), + executor_(executor), + external_id_(external_id), + stop_token_(std::move(stop_token)) {} + + explicit IOContext(::arrow::internal::Executor* executor, + StopToken stop_token = StopToken::Unstoppable(), + int64_t external_id = -1) + : pool_(default_memory_pool()), + executor_(executor), + external_id_(external_id), + stop_token_(std::move(stop_token)) {} + + MemoryPool* pool() const { return pool_; } + + ::arrow::internal::Executor* executor() const { return executor_; } + + // An application-specific ID, forwarded to executor task submissions + int64_t external_id() const { return external_id_; } + + StopToken stop_token() const { return stop_token_; } + + private: + MemoryPool* pool_; + ::arrow::internal::Executor* executor_; + int64_t external_id_; + StopToken stop_token_; +}; + +class ARROW_EXPORT FileInterface : public std::enable_shared_from_this { + public: + virtual ~FileInterface() = 0; + + /// \brief Close the stream cleanly + /// + /// For writable streams, this will attempt to flush any pending data + /// before releasing the underlying resource. + /// + /// After Close() is called, closed() returns true and the stream is not + /// available for further operations. + virtual Status Close() = 0; + + /// \brief Close the stream asynchronously + /// + /// By default, this will just submit the synchronous Close() to the + /// default I/O thread pool. Subclasses may implement this in a more + /// efficient manner. + virtual Future<> CloseAsync(); + + /// \brief Close the stream abruptly + /// + /// This method does not guarantee that any pending data is flushed. + /// It merely releases any underlying resource used by the stream for + /// its operation. + /// + /// After Abort() is called, closed() returns true and the stream is not + /// available for further operations. + virtual Status Abort(); + + /// \brief Return the position in this stream + virtual Result Tell() const = 0; + + /// \brief Return whether the stream is closed + virtual bool closed() const = 0; + + FileMode::type mode() const { return mode_; } + + protected: + FileInterface() : mode_(FileMode::READ) {} + FileMode::type mode_; + void set_mode(FileMode::type mode) { mode_ = mode; } + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(FileInterface); +}; + +class ARROW_EXPORT Seekable { + public: + virtual ~Seekable() = default; + virtual Status Seek(int64_t position) = 0; +}; + +class ARROW_EXPORT Writable { + public: + virtual ~Writable() = default; + + /// \brief Write the given data to the stream + /// + /// This method always processes the bytes in full. Depending on the + /// semantics of the stream, the data may be written out immediately, + /// held in a buffer, or written asynchronously. In the case where + /// the stream buffers the data, it will be copied. To avoid potentially + /// large copies, use the Write variant that takes an owned Buffer. + virtual Status Write(const void* data, int64_t nbytes) = 0; + + /// \brief Write the given data to the stream + /// + /// Since the Buffer owns its memory, this method can avoid a copy if + /// buffering is required. See Write(const void*, int64_t) for details. + virtual Status Write(const std::shared_ptr& data); + + /// \brief Flush buffered bytes, if any + virtual Status Flush(); + + Status Write(std::string_view data); +}; + +class ARROW_EXPORT Readable { + public: + virtual ~Readable() = default; + + /// \brief Read data from current file position. + /// + /// Read at most `nbytes` from the current file position into `out`. + /// The number of bytes read is returned. + virtual Result Read(int64_t nbytes, void* out) = 0; + + /// \brief Read data from current file position. + /// + /// Read at most `nbytes` from the current file position. Less bytes may + /// be read if EOF is reached. This method updates the current file position. + /// + /// In some cases (e.g. a memory-mapped file), this method may avoid a + /// memory copy. + virtual Result> Read(int64_t nbytes) = 0; + + /// EXPERIMENTAL: The IOContext associated with this file. + /// + /// By default, this is the same as default_io_context(), but it may be + /// overridden by subclasses. + virtual const IOContext& io_context() const; +}; + +class ARROW_EXPORT OutputStream : virtual public FileInterface, public Writable { + protected: + OutputStream() = default; +}; + +class ARROW_EXPORT InputStream : virtual public FileInterface, virtual public Readable { + public: + /// \brief Advance or skip stream indicated number of bytes + /// \param[in] nbytes the number to move forward + /// \return Status + Status Advance(int64_t nbytes); + + /// \brief Return zero-copy string_view to upcoming bytes. + /// + /// Do not modify the stream position. The view becomes invalid after + /// any operation on the stream. May trigger buffering if the requested + /// size is larger than the number of buffered bytes. + /// + /// May return NotImplemented on streams that don't support it. + /// + /// \param[in] nbytes the maximum number of bytes to see + virtual Result Peek(int64_t nbytes); + + /// \brief Return true if InputStream is capable of zero copy Buffer reads + /// + /// Zero copy reads imply the use of Buffer-returning Read() overloads. + virtual bool supports_zero_copy() const; + + /// \brief Read and return stream metadata + /// + /// If the stream implementation doesn't support metadata, empty metadata + /// is returned. Note that it is allowed to return a null pointer rather + /// than an allocated empty metadata. + virtual Result> ReadMetadata(); + + /// \brief Read stream metadata asynchronously + virtual Future> ReadMetadataAsync( + const IOContext& io_context); + Future> ReadMetadataAsync(); + + protected: + InputStream() = default; +}; + +class ARROW_EXPORT RandomAccessFile : public InputStream, public Seekable { + public: + /// Necessary because we hold a std::unique_ptr + ~RandomAccessFile() override; + + /// \brief Create an isolated InputStream that reads a segment of a + /// RandomAccessFile. Multiple such stream can be created and used + /// independently without interference + /// \param[in] file a file instance + /// \param[in] file_offset the starting position in the file + /// \param[in] nbytes the extent of bytes to read. The file should have + /// sufficient bytes available + static Result> GetStream( + std::shared_ptr file, int64_t file_offset, int64_t nbytes); + + /// \brief Return the total file size in bytes. + /// + /// This method does not read or move the current file position, so is safe + /// to call concurrently with e.g. ReadAt(). + virtual Result GetSize() = 0; + + /// \brief Read data from given file position. + /// + /// At most `nbytes` bytes are read. The number of bytes read is returned + /// (it can be less than `nbytes` if EOF is reached). + /// + /// This method can be safely called from multiple threads concurrently. + /// It is unspecified whether this method updates the file position or not. + /// + /// The default RandomAccessFile-provided implementation uses Seek() and Read(), + /// but subclasses may override it with a more efficient implementation + /// that doesn't depend on implicit file positioning. + /// + /// \param[in] position Where to read bytes from + /// \param[in] nbytes The number of bytes to read + /// \param[out] out The buffer to read bytes into + /// \return The number of bytes read, or an error + virtual Result ReadAt(int64_t position, int64_t nbytes, void* out); + + /// \brief Read data from given file position. + /// + /// At most `nbytes` bytes are read, but it can be less if EOF is reached. + /// + /// \param[in] position Where to read bytes from + /// \param[in] nbytes The number of bytes to read + /// \return A buffer containing the bytes read, or an error + virtual Result> ReadAt(int64_t position, int64_t nbytes); + + /// EXPERIMENTAL: Read data asynchronously. + virtual Future> ReadAsync(const IOContext&, int64_t position, + int64_t nbytes); + + /// EXPERIMENTAL: Read data asynchronously, using the file's IOContext. + Future> ReadAsync(int64_t position, int64_t nbytes); + + /// EXPERIMENTAL: Explicit multi-read. + /// \brief Request multiple reads at once + /// + /// The underlying filesystem may optimize these reads by coalescing small reads into + /// large reads or by breaking up large reads into multiple parallel smaller reads. The + /// reads should be issued in parallel if it makes sense for the filesystem. + /// + /// One future will be returned for each input read range. Multiple returned futures + /// may correspond to a single read. Or, a single returned future may be a combined + /// result of several individual reads. + /// + /// \param[in] ranges The ranges to read + /// \return A future that will complete with the data from the requested range is + /// available + virtual std::vector>> ReadManyAsync( + const IOContext&, const std::vector& ranges); + + /// EXPERIMENTAL: Explicit multi-read, using the file's IOContext. + std::vector>> ReadManyAsync( + const std::vector& ranges); + + /// EXPERIMENTAL: Inform that the given ranges may be read soon. + /// + /// Some implementations might arrange to prefetch some of the data. + /// However, no guarantee is made and the default implementation does nothing. + /// For robust prefetching, use ReadAt() or ReadAsync(). + virtual Status WillNeed(const std::vector& ranges); + + protected: + RandomAccessFile(); + + private: + struct ARROW_NO_EXPORT Impl; + std::unique_ptr interface_impl_; +}; + +class ARROW_EXPORT WritableFile : public OutputStream, public Seekable { + public: + virtual Status WriteAt(int64_t position, const void* data, int64_t nbytes) = 0; + + protected: + WritableFile() = default; +}; + +class ARROW_EXPORT ReadWriteFileInterface : public RandomAccessFile, public WritableFile { + protected: + ReadWriteFileInterface() { RandomAccessFile::set_mode(FileMode::READWRITE); } +}; + +/// \brief Return an iterator on an input stream +/// +/// The iterator yields a fixed-size block on each Next() call, except the +/// last block in the stream which may be smaller. +/// Once the end of stream is reached, Next() returns nullptr +/// (unlike InputStream::Read() which returns an empty buffer). +ARROW_EXPORT +Result>> MakeInputStreamIterator( + std::shared_ptr stream, int64_t block_size); + +} // namespace io +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/memory.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/memory.h new file mode 100644 index 0000000000000000000000000000000000000000..5b760a2b5a9cfe1feca6066edb9a594467bc06fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/memory.h @@ -0,0 +1,213 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Public API for different memory sharing / IO mechanisms + +#pragma once + +#include +#include +#include +#include + +#include "arrow/io/concurrency.h" +#include "arrow/io/interfaces.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Status; + +namespace io { + +/// \brief An output stream that writes to a resizable buffer +class ARROW_EXPORT BufferOutputStream : public OutputStream { + public: + explicit BufferOutputStream(const std::shared_ptr& buffer); + + /// \brief Create in-memory output stream with indicated capacity using a + /// memory pool + /// \param[in] initial_capacity the initial allocated internal capacity of + /// the OutputStream + /// \param[in,out] pool a MemoryPool to use for allocations + /// \return the created stream + static Result> Create( + int64_t initial_capacity = 4096, MemoryPool* pool = default_memory_pool()); + + ~BufferOutputStream() override; + + // Implement the OutputStream interface + + /// Close the stream, preserving the buffer (retrieve it with Finish()). + Status Close() override; + bool closed() const override; + Result Tell() const override; + Status Write(const void* data, int64_t nbytes) override; + + /// \cond FALSE + using OutputStream::Write; + /// \endcond + + /// Close the stream and return the buffer + Result> Finish(); + + /// \brief Initialize state of OutputStream with newly allocated memory and + /// set position to 0 + /// \param[in] initial_capacity the starting allocated capacity + /// \param[in,out] pool the memory pool to use for allocations + /// \return Status + Status Reset(int64_t initial_capacity = 1024, MemoryPool* pool = default_memory_pool()); + + int64_t capacity() const { return capacity_; } + + private: + BufferOutputStream(); + + // Ensures there is sufficient space available to write nbytes + Status Reserve(int64_t nbytes); + + std::shared_ptr buffer_; + bool is_open_; + int64_t capacity_; + int64_t position_; + uint8_t* mutable_data_; +}; + +/// \brief A helper class to track the size of allocations +/// +/// Writes to this stream do not copy or retain any data, they just bump +/// a size counter that can be later used to know exactly which data size +/// needs to be allocated for actual writing. +class ARROW_EXPORT MockOutputStream : public OutputStream { + public: + MockOutputStream() : extent_bytes_written_(0), is_open_(true) {} + + // Implement the OutputStream interface + Status Close() override; + bool closed() const override; + Result Tell() const override; + Status Write(const void* data, int64_t nbytes) override; + /// \cond FALSE + using Writable::Write; + /// \endcond + + int64_t GetExtentBytesWritten() const { return extent_bytes_written_; } + + private: + int64_t extent_bytes_written_; + bool is_open_; +}; + +/// \brief An output stream that writes into a fixed-size mutable buffer +class ARROW_EXPORT FixedSizeBufferWriter : public WritableFile { + public: + /// Input buffer must be mutable, will abort if not + explicit FixedSizeBufferWriter(const std::shared_ptr& buffer); + ~FixedSizeBufferWriter() override; + + Status Close() override; + bool closed() const override; + Status Seek(int64_t position) override; + Result Tell() const override; + Status Write(const void* data, int64_t nbytes) override; + /// \cond FALSE + using Writable::Write; + /// \endcond + + Status WriteAt(int64_t position, const void* data, int64_t nbytes) override; + + void set_memcopy_threads(int num_threads); + void set_memcopy_blocksize(int64_t blocksize); + void set_memcopy_threshold(int64_t threshold); + + protected: + class FixedSizeBufferWriterImpl; + std::unique_ptr impl_; +}; + +/// \class BufferReader +/// \brief Random access zero-copy reads on an arrow::Buffer +class ARROW_EXPORT BufferReader + : public internal::RandomAccessFileConcurrencyWrapper { + public: + /// \brief Instantiate from std::shared_ptr. + /// + /// This is a zero-copy constructor. + explicit BufferReader(std::shared_ptr buffer); + ARROW_DEPRECATED( + "Deprecated in 14.0.0. Use FromString or BufferReader(std::shared_ptr " + "buffer) instead.") + explicit BufferReader(const Buffer& buffer); + ARROW_DEPRECATED( + "Deprecated in 14.0.0. Use FromString or BufferReader(std::shared_ptr " + "buffer) instead.") + BufferReader(const uint8_t* data, int64_t size); + + /// \brief Instantiate from std::string_view. Does not own data + /// \deprecated Deprecated in 14.0.0. Use FromString or + /// BufferReader(std::shared_ptr buffer) instead. + ARROW_DEPRECATED( + "Deprecated in 14.0.0. Use FromString or BufferReader(std::shared_ptr " + "buffer) instead.") + explicit BufferReader(std::string_view data); + + /// \brief Instantiate from std::string. Owns data. + static std::unique_ptr FromString(std::string data); + + bool closed() const override; + + bool supports_zero_copy() const override; + + std::shared_ptr buffer() const { return buffer_; } + + // Synchronous ReadAsync override + Future> ReadAsync(const IOContext&, int64_t position, + int64_t nbytes) override; + Status WillNeed(const std::vector& ranges) override; + + protected: + friend RandomAccessFileConcurrencyWrapper; + + Status DoClose(); + + Result DoRead(int64_t nbytes, void* buffer); + Result> DoRead(int64_t nbytes); + Result DoReadAt(int64_t position, int64_t nbytes, void* out); + Result> DoReadAt(int64_t position, int64_t nbytes); + Result DoPeek(int64_t nbytes) override; + + Result DoTell() const; + Status DoSeek(int64_t position); + Result DoGetSize(); + + Status CheckClosed() const { + if (!is_open_) { + return Status::Invalid("Operation forbidden on closed BufferReader"); + } + return Status::OK(); + } + + std::shared_ptr buffer_; + const uint8_t* data_; + int64_t size_; + int64_t position_; + bool is_open_; +}; + +} // namespace io +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/slow.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/slow.h new file mode 100644 index 0000000000000000000000000000000000000000..fdcc56dfa6af622fcfd9fd10984c1d0a87414149 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/slow.h @@ -0,0 +1,118 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Slow stream implementations, mainly for testing and benchmarking + +#pragma once + +#include +#include +#include + +#include "arrow/io/interfaces.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Buffer; +class Status; + +namespace io { + +class ARROW_EXPORT LatencyGenerator { + public: + virtual ~LatencyGenerator(); + + void Sleep(); + + virtual double NextLatency() = 0; + + static std::shared_ptr Make(double average_latency); + static std::shared_ptr Make(double average_latency, int32_t seed); +}; + +// XXX use ConcurrencyWrapper? It could increase chances of finding a race. + +template +class SlowInputStreamBase : public StreamType { + public: + SlowInputStreamBase(std::shared_ptr stream, + std::shared_ptr latencies) + : stream_(std::move(stream)), latencies_(std::move(latencies)) {} + + SlowInputStreamBase(std::shared_ptr stream, double average_latency) + : stream_(std::move(stream)), latencies_(LatencyGenerator::Make(average_latency)) {} + + SlowInputStreamBase(std::shared_ptr stream, double average_latency, + int32_t seed) + : stream_(std::move(stream)), + latencies_(LatencyGenerator::Make(average_latency, seed)) {} + + protected: + std::shared_ptr stream_; + std::shared_ptr latencies_; +}; + +/// \brief An InputStream wrapper that makes reads slower. +/// +/// Read() calls are made slower by an average latency (in seconds). +/// Actual latencies form a normal distribution closely centered +/// on the average latency. +/// Other calls are forwarded directly. +class ARROW_EXPORT SlowInputStream : public SlowInputStreamBase { + public: + ~SlowInputStream() override; + + using SlowInputStreamBase::SlowInputStreamBase; + + Status Close() override; + Status Abort() override; + bool closed() const override; + + Result Read(int64_t nbytes, void* out) override; + Result> Read(int64_t nbytes) override; + Result Peek(int64_t nbytes) override; + + Result Tell() const override; +}; + +/// \brief A RandomAccessFile wrapper that makes reads slower. +/// +/// Similar to SlowInputStream, but allows random access and seeking. +class ARROW_EXPORT SlowRandomAccessFile : public SlowInputStreamBase { + public: + ~SlowRandomAccessFile() override; + + using SlowInputStreamBase::SlowInputStreamBase; + + Status Close() override; + Status Abort() override; + bool closed() const override; + + Result Read(int64_t nbytes, void* out) override; + Result> Read(int64_t nbytes) override; + Result ReadAt(int64_t position, int64_t nbytes, void* out) override; + Result> ReadAt(int64_t position, int64_t nbytes) override; + Result Peek(int64_t nbytes) override; + + Result GetSize() override; + Status Seek(int64_t position) override; + Result Tell() const override; +}; + +} // namespace io +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/stdio.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/stdio.h new file mode 100644 index 0000000000000000000000000000000000000000..9484ac7712427733862ecbc7d9ee932c5dfc0907 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/stdio.h @@ -0,0 +1,82 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/io/interfaces.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace io { + +// Output stream that just writes to stdout. +class ARROW_EXPORT StdoutStream : public OutputStream { + public: + StdoutStream(); + ~StdoutStream() override {} + + Status Close() override; + bool closed() const override; + + Result Tell() const override; + + Status Write(const void* data, int64_t nbytes) override; + + private: + int64_t pos_; +}; + +// Output stream that just writes to stderr. +class ARROW_EXPORT StderrStream : public OutputStream { + public: + StderrStream(); + ~StderrStream() override {} + + Status Close() override; + bool closed() const override; + + Result Tell() const override; + + Status Write(const void* data, int64_t nbytes) override; + + private: + int64_t pos_; +}; + +// Input stream that just reads from stdin. +class ARROW_EXPORT StdinStream : public InputStream { + public: + StdinStream(); + ~StdinStream() override {} + + Status Close() override; + bool closed() const override; + + Result Tell() const override; + + Result Read(int64_t nbytes, void* out) override; + + Result> Read(int64_t nbytes) override; + + private: + int64_t pos_; +}; + +} // namespace io +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/transform.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/transform.h new file mode 100644 index 0000000000000000000000000000000000000000..7afe29b10194efa39fec8e3b2008e16e5a3ee8e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/transform.h @@ -0,0 +1,60 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Transform stream implementations + +#pragma once + +#include +#include +#include +#include + +#include "arrow/io/interfaces.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace io { + +class ARROW_EXPORT TransformInputStream : public InputStream { + public: + using TransformFunc = + std::function>(const std::shared_ptr&)>; + + TransformInputStream(std::shared_ptr wrapped, TransformFunc transform); + ~TransformInputStream() override; + + Status Close() override; + Status Abort() override; + bool closed() const override; + + Result Read(int64_t nbytes, void* out) override; + Result> Read(int64_t nbytes) override; + + Result> ReadMetadata() override; + Future> ReadMetadataAsync( + const IOContext& io_context) override; + + Result Tell() const override; + + protected: + struct Impl; + std::unique_ptr impl_; +}; + +} // namespace io +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/type_fwd.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..a1b9e626bba289a030d87d0a14bfa2f1fb2dc29d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/io/type_fwd.h @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace io { + +struct FileMode { + enum type { READ, WRITE, READWRITE }; +}; + +struct IOContext; +struct CacheOptions; + +/// EXPERIMENTAL: convenience global singleton for default IOContext settings +ARROW_EXPORT +const IOContext& default_io_context(); + +/// \brief Get the capacity of the global I/O thread pool +/// +/// Return the number of worker threads in the thread pool to which +/// Arrow dispatches various I/O-bound tasks. This is an ideal number, +/// not necessarily the exact number of threads at a given point in time. +/// +/// You can change this number using SetIOThreadPoolCapacity(). +ARROW_EXPORT int GetIOThreadPoolCapacity(); + +/// \brief Set the capacity of the global I/O thread pool +/// +/// Set the number of worker threads in the thread pool to which +/// Arrow dispatches various I/O-bound tasks. +/// +/// The current number is returned by GetIOThreadPoolCapacity(). +ARROW_EXPORT Status SetIOThreadPoolCapacity(int threads); + +class FileInterface; +class Seekable; +class Writable; +class Readable; +class OutputStream; +class FileOutputStream; +class InputStream; +class ReadableFile; +class RandomAccessFile; +class MemoryMappedFile; +class WritableFile; +class ReadWriteFileInterface; + +class LatencyGenerator; + +class BufferOutputStream; +class BufferReader; +class CompressedInputStream; +class CompressedOutputStream; +class BufferedInputStream; +class BufferedOutputStream; + +} // namespace io +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/api.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/api.h new file mode 100644 index 0000000000000000000000000000000000000000..b5690aed8da9dfafc4af84e0a713b0c2028ed28e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/api.h @@ -0,0 +1,25 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/ipc/dictionary.h" +#include "arrow/ipc/feather.h" +#include "arrow/ipc/json_simple.h" +#include "arrow/ipc/message.h" +#include "arrow/ipc/reader.h" +#include "arrow/ipc/writer.h" diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/dictionary.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/dictionary.h new file mode 100644 index 0000000000000000000000000000000000000000..e4287cb19747fa60f5d728b6afb2bcab30443bfd --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/dictionary.h @@ -0,0 +1,177 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Tools for dictionaries in IPC context + +#pragma once + +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace ipc { + +namespace internal { + +class FieldPosition { + public: + FieldPosition() : parent_(NULLPTR), index_(-1), depth_(0) {} + + FieldPosition child(int index) const { return {this, index}; } + + std::vector path() const { + std::vector path(depth_); + const FieldPosition* cur = this; + for (int i = depth_ - 1; i >= 0; --i) { + path[i] = cur->index_; + cur = cur->parent_; + } + return path; + } + + protected: + FieldPosition(const FieldPosition* parent, int index) + : parent_(parent), index_(index), depth_(parent->depth_ + 1) {} + + const FieldPosition* parent_; + int index_; + int depth_; +}; + +} // namespace internal + +/// \brief Map fields in a schema to dictionary ids +/// +/// The mapping is structural, i.e. the field path (as a vector of indices) +/// is associated to the dictionary id. A dictionary id may be associated +/// to multiple fields. +class ARROW_EXPORT DictionaryFieldMapper { + public: + DictionaryFieldMapper(); + explicit DictionaryFieldMapper(const Schema& schema); + ~DictionaryFieldMapper(); + + Status AddSchemaFields(const Schema& schema); + Status AddField(int64_t id, std::vector field_path); + + Result GetFieldId(std::vector field_path) const; + + int num_fields() const; + + /// \brief Returns number of unique dictionaries, taking into + /// account that different fields can share the same dictionary. + int num_dicts() const; + + private: + struct Impl; + std::unique_ptr impl_; +}; + +using DictionaryVector = std::vector>>; + +/// \brief Memoization data structure for reading dictionaries from IPC streams +/// +/// This structure tracks the following associations: +/// - field position (structural) -> dictionary id +/// - dictionary id -> value type +/// - dictionary id -> dictionary (value) data +/// +/// Together, they allow resolving dictionary data when reading an IPC stream, +/// using metadata recorded in the schema message and data recorded in the +/// dictionary batch messages (see ResolveDictionaries). +/// +/// This structure isn't useful for writing an IPC stream, where only +/// DictionaryFieldMapper is necessary. +class ARROW_EXPORT DictionaryMemo { + public: + DictionaryMemo(); + ~DictionaryMemo(); + + DictionaryFieldMapper& fields(); + const DictionaryFieldMapper& fields() const; + + /// \brief Return current dictionary corresponding to a particular + /// id. Returns KeyError if id not found + Result> GetDictionary(int64_t id, MemoryPool* pool) const; + + /// \brief Return dictionary value type corresponding to a + /// particular dictionary id. + Result> GetDictionaryType(int64_t id) const; + + /// \brief Return true if we have a dictionary for the input id + bool HasDictionary(int64_t id) const; + + /// \brief Add a dictionary value type to the memo with a particular id. + /// Returns KeyError if a different type is already registered with the same id. + Status AddDictionaryType(int64_t id, const std::shared_ptr& type); + + /// \brief Add a dictionary to the memo with a particular id. Returns + /// KeyError if that dictionary already exists + Status AddDictionary(int64_t id, const std::shared_ptr& dictionary); + + /// \brief Append a dictionary delta to the memo with a particular id. Returns + /// KeyError if that dictionary does not exists + Status AddDictionaryDelta(int64_t id, const std::shared_ptr& dictionary); + + /// \brief Add a dictionary to the memo if it does not have one with the id, + /// otherwise, replace the dictionary with the new one. + /// + /// Return true if the dictionary was added, false if replaced. + Result AddOrReplaceDictionary(int64_t id, + const std::shared_ptr& dictionary); + + private: + struct Impl; + std::unique_ptr impl_; +}; + +// For writing: collect dictionary entries to write to the IPC stream, in order +// (i.e. inner dictionaries before dependent outer dictionaries). +ARROW_EXPORT +Result CollectDictionaries(const RecordBatch& batch, + const DictionaryFieldMapper& mapper); + +// For reading: resolve all dictionaries in columns, according to the field +// mapping and dictionary arrays stored in memo. +// Columns may be sparse, i.e. some entries may be left null +// (e.g. if an inclusion mask was used). +ARROW_EXPORT +Status ResolveDictionaries(const ArrayDataVector& columns, const DictionaryMemo& memo, + MemoryPool* pool); + +namespace internal { + +// Like CollectDictionaries above, but uses the memo's DictionaryFieldMapper +// and all collected dictionaries are added to the memo using AddDictionary. +// +// This is used as a shortcut in some roundtripping tests (to avoid emitting +// any actual dictionary batches). +ARROW_EXPORT +Status CollectDictionaries(const RecordBatch& batch, DictionaryMemo* memo); + +} // namespace internal + +} // namespace ipc +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/feather.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/feather.h new file mode 100644 index 0000000000000000000000000000000000000000..da88ee22f8291f81da3046e3c6e5844a5021be4d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/feather.h @@ -0,0 +1,150 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Public API for the "Feather" file format, originally created at +// http://github.com/wesm/feather + +#pragma once + +#include +#include +#include +#include + +#include "arrow/ipc/options.h" +#include "arrow/type_fwd.h" +#include "arrow/util/compression.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Schema; +class Status; +class Table; + +namespace io { + +class OutputStream; +class RandomAccessFile; + +} // namespace io + +namespace ipc { +namespace feather { + +static constexpr const int kFeatherV1Version = 2; +static constexpr const int kFeatherV2Version = 3; + +// ---------------------------------------------------------------------- +// Metadata accessor classes + +/// \class Reader +/// \brief An interface for reading columns from Feather files +class ARROW_EXPORT Reader { + public: + virtual ~Reader() = default; + + /// \brief Open a Feather file from a RandomAccessFile interface + /// + /// \param[in] source a RandomAccessFile instance + /// \return the table reader + static Result> Open( + const std::shared_ptr& source); + + /// \brief Open a Feather file from a RandomAccessFile interface + /// with IPC Read options + /// + /// \param[in] source a RandomAccessFile instance + /// \param[in] options IPC Read options + /// \return the table reader + static Result> Open( + const std::shared_ptr& source, const IpcReadOptions& options); + + /// \brief Return the version number of the Feather file + virtual int version() const = 0; + + virtual std::shared_ptr schema() const = 0; + + /// \brief Read all columns from the file as an arrow::Table. + /// + /// \param[out] out the returned table + /// \return Status + /// + /// This function is zero-copy if the file source supports zero-copy reads + virtual Status Read(std::shared_ptr
* out) = 0; + + /// \brief Read only the specified columns from the file as an arrow::Table. + /// + /// \param[in] indices the column indices to read + /// \param[out] out the returned table + /// \return Status + /// + /// This function is zero-copy if the file source supports zero-copy reads + virtual Status Read(const std::vector& indices, std::shared_ptr
* out) = 0; + + /// \brief Read only the specified columns from the file as an arrow::Table. + /// + /// \param[in] names the column names to read + /// \param[out] out the returned table + /// \return Status + /// + /// This function is zero-copy if the file source supports zero-copy reads + virtual Status Read(const std::vector& names, + std::shared_ptr
* out) = 0; +}; + +struct ARROW_EXPORT WriteProperties { + static WriteProperties Defaults(); + + static WriteProperties DefaultsV1() { + WriteProperties props = Defaults(); + props.version = kFeatherV1Version; + return props; + } + + /// Feather file version number + /// + /// version 2: "Feather V1" Apache Arrow <= 0.16.0 + /// version 3: "Feather V2" Apache Arrow > 0.16.0 + int version = kFeatherV2Version; + + // Parameters for Feather V2 only + + /// Number of rows per intra-file chunk. Use smaller chunksize when you need + /// faster random row access + int64_t chunksize = 1LL << 16; + + /// Compression type to use. Only UNCOMPRESSED, LZ4_FRAME, and ZSTD are + /// supported. The default compression returned by Defaults() is LZ4 if the + /// project is built with support for it, otherwise + /// UNCOMPRESSED. UNCOMPRESSED is set as the object default here so that if + /// WriteProperties::Defaults() is not used, the default constructor for + /// WriteProperties will work regardless of the options used to build the C++ + /// project. + Compression::type compression = Compression::UNCOMPRESSED; + + /// Compressor-specific compression level + int compression_level = ::arrow::util::kUseDefaultCompressionLevel; +}; + +ARROW_EXPORT +Status WriteTable(const Table& table, io::OutputStream* dst, + const WriteProperties& properties = WriteProperties::Defaults()); + +} // namespace feather +} // namespace ipc +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/json_simple.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/json_simple.h new file mode 100644 index 0000000000000000000000000000000000000000..3a730ee6a3f1963e2f7a486f8fac3ab4472ddf74 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/json_simple.h @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Implement a simple JSON representation format for arrays + +#pragma once + +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; +class DataType; + +namespace ipc { +namespace internal { +namespace json { + +ARROW_EXPORT +Result> ArrayFromJSON(const std::shared_ptr&, + const std::string& json); + +ARROW_EXPORT +Result> ArrayFromJSON(const std::shared_ptr&, + std::string_view json); + +ARROW_EXPORT +Result> ArrayFromJSON(const std::shared_ptr&, + const char* json); + +ARROW_EXPORT +Status ChunkedArrayFromJSON(const std::shared_ptr& type, + const std::vector& json_strings, + std::shared_ptr* out); + +ARROW_EXPORT +Status DictArrayFromJSON(const std::shared_ptr&, std::string_view indices_json, + std::string_view dictionary_json, std::shared_ptr* out); + +ARROW_EXPORT +Status ScalarFromJSON(const std::shared_ptr&, std::string_view json, + std::shared_ptr* out); + +ARROW_EXPORT +Status DictScalarFromJSON(const std::shared_ptr&, std::string_view index_json, + std::string_view dictionary_json, std::shared_ptr* out); + +} // namespace json +} // namespace internal +} // namespace ipc +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/message.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/message.h new file mode 100644 index 0000000000000000000000000000000000000000..1cd72ce993ed28ddfd1f894af35eeefbbdce6050 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/message.h @@ -0,0 +1,565 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// C++ object model and user API for interprocess schema messaging + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/io/type_fwd.h" +#include "arrow/ipc/type_fwd.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace ipc { + +struct IpcWriteOptions; + +// Read interface classes. We do not fully deserialize the flatbuffers so that +// individual fields metadata can be retrieved from very large schema without +// + +/// \class Message +/// \brief An IPC message including metadata and body +class ARROW_EXPORT Message { + public: + /// \brief Construct message, but do not validate + /// + /// Use at your own risk; Message::Open has more metadata validation + Message(std::shared_ptr metadata, std::shared_ptr body); + + ~Message(); + + /// \brief Create and validate a Message instance from two buffers + /// + /// \param[in] metadata a buffer containing the Flatbuffer metadata + /// \param[in] body a buffer containing the message body, which may be null + /// \return the created message + static Result> Open(std::shared_ptr metadata, + std::shared_ptr body); + + /// \brief Read message body and create Message given Flatbuffer metadata + /// \param[in] metadata containing a serialized Message flatbuffer + /// \param[in] stream an InputStream + /// \return the created Message + /// + /// \note If stream supports zero-copy, this is zero-copy + static Result> ReadFrom(std::shared_ptr metadata, + io::InputStream* stream); + + /// \brief Read message body from position in file, and create Message given + /// the Flatbuffer metadata + /// \param[in] offset the position in the file where the message body starts. + /// \param[in] metadata containing a serialized Message flatbuffer + /// \param[in] file the seekable file interface to read from + /// \return the created Message + /// + /// \note If file supports zero-copy, this is zero-copy + static Result> ReadFrom(const int64_t offset, + std::shared_ptr metadata, + io::RandomAccessFile* file); + + /// \brief Return true if message type and contents are equal + /// + /// \param other another message + /// \return true if contents equal + bool Equals(const Message& other) const; + + /// \brief the Message metadata + /// + /// \return buffer + std::shared_ptr metadata() const; + + /// \brief Custom metadata serialized in metadata Flatbuffer. Returns nullptr + /// when none set + const std::shared_ptr& custom_metadata() const; + + /// \brief the Message body, if any + /// + /// \return buffer is null if no body + std::shared_ptr body() const; + + /// \brief The expected body length according to the metadata, for + /// verification purposes + int64_t body_length() const; + + /// \brief The Message type + MessageType type() const; + + /// \brief The Message metadata version + MetadataVersion metadata_version() const; + + const void* header() const; + + /// \brief Write length-prefixed metadata and body to output stream + /// + /// \param[in] file output stream to write to + /// \param[in] options IPC writing options including alignment + /// \param[out] output_length the number of bytes written + /// \return Status + Status SerializeTo(io::OutputStream* file, const IpcWriteOptions& options, + int64_t* output_length) const; + + /// \brief Return true if the Message metadata passes Flatbuffer validation + bool Verify() const; + + /// \brief Whether a given message type needs a body. + static bool HasBody(MessageType type) { + return type != MessageType::NONE && type != MessageType::SCHEMA; + } + + private: + // Hide serialization details from user API + class MessageImpl; + std::unique_ptr impl_; + + ARROW_DISALLOW_COPY_AND_ASSIGN(Message); +}; + +ARROW_EXPORT std::string FormatMessageType(MessageType type); + +/// \class MessageDecoderListener +/// \brief An abstract class to listen events from MessageDecoder. +/// +/// This API is EXPERIMENTAL. +/// +/// \since 0.17.0 +class ARROW_EXPORT MessageDecoderListener { + public: + virtual ~MessageDecoderListener() = default; + + /// \brief Called when a message is decoded. + /// + /// MessageDecoder calls this method when it decodes a message. This + /// method is called multiple times when the target stream has + /// multiple messages. + /// + /// \param[in] message a decoded message + /// \return Status + virtual Status OnMessageDecoded(std::unique_ptr message) = 0; + + /// \brief Called when the decoder state is changed to + /// MessageDecoder::State::INITIAL. + /// + /// The default implementation just returns arrow::Status::OK(). + /// + /// \return Status + virtual Status OnInitial(); + + /// \brief Called when the decoder state is changed to + /// MessageDecoder::State::METADATA_LENGTH. + /// + /// The default implementation just returns arrow::Status::OK(). + /// + /// \return Status + virtual Status OnMetadataLength(); + + /// \brief Called when the decoder state is changed to + /// MessageDecoder::State::METADATA. + /// + /// The default implementation just returns arrow::Status::OK(). + /// + /// \return Status + virtual Status OnMetadata(); + + /// \brief Called when the decoder state is changed to + /// MessageDecoder::State::BODY. + /// + /// The default implementation just returns arrow::Status::OK(). + /// + /// \return Status + virtual Status OnBody(); + + /// \brief Called when the decoder state is changed to + /// MessageDecoder::State::EOS. + /// + /// The default implementation just returns arrow::Status::OK(). + /// + /// \return Status + virtual Status OnEOS(); +}; + +/// \class AssignMessageDecoderListener +/// \brief Assign a message decoded by MessageDecoder. +/// +/// This API is EXPERIMENTAL. +/// +/// \since 0.17.0 +class ARROW_EXPORT AssignMessageDecoderListener : public MessageDecoderListener { + public: + /// \brief Construct a listener that assigns a decoded message to the + /// specified location. + /// + /// \param[in] message a location to store the received message + explicit AssignMessageDecoderListener(std::unique_ptr* message) + : message_(message) {} + + virtual ~AssignMessageDecoderListener() = default; + + Status OnMessageDecoded(std::unique_ptr message) override { + *message_ = std::move(message); + return Status::OK(); + } + + private: + std::unique_ptr* message_; + + ARROW_DISALLOW_COPY_AND_ASSIGN(AssignMessageDecoderListener); +}; + +/// \class MessageDecoder +/// \brief Push style message decoder that receives data from user. +/// +/// This API is EXPERIMENTAL. +/// +/// \since 0.17.0 +class ARROW_EXPORT MessageDecoder { + public: + /// \brief State for reading a message + enum State { + /// The initial state. It requires one of the followings as the next data: + /// + /// * int32_t continuation token + /// * int32_t end-of-stream mark (== 0) + /// * int32_t metadata length (backward compatibility for + /// reading old IPC messages produced prior to version 0.15.0 + INITIAL, + + /// It requires int32_t metadata length. + METADATA_LENGTH, + + /// It requires metadata. + METADATA, + + /// It requires message body. + BODY, + + /// The end-of-stream state. No more data is processed. + EOS, + }; + + /// \brief Construct a message decoder. + /// + /// \param[in] listener a MessageDecoderListener that responds events from + /// the decoder + /// \param[in] pool an optional MemoryPool to copy metadata on the + /// \param[in] skip_body if true the body will be skipped even if the message has a body + /// CPU, if required + explicit MessageDecoder(std::shared_ptr listener, + MemoryPool* pool = default_memory_pool(), + bool skip_body = false); + + /// \brief Construct a message decoder with the specified state. + /// + /// This is a construct for advanced users that know how to decode + /// Message. + /// + /// \param[in] listener a MessageDecoderListener that responds events from + /// the decoder + /// \param[in] initial_state an initial state of the decode + /// \param[in] initial_next_required_size the number of bytes needed + /// to run the next action + /// \param[in] pool an optional MemoryPool to copy metadata on the + /// CPU, if required + /// \param[in] skip_body if true the body will be skipped even if the message has a body + MessageDecoder(std::shared_ptr listener, State initial_state, + int64_t initial_next_required_size, + MemoryPool* pool = default_memory_pool(), bool skip_body = false); + + virtual ~MessageDecoder(); + + /// \brief Feed data to the decoder as a raw data. + /// + /// If the decoder can decode one or more messages by the data, the + /// decoder calls listener->OnMessageDecoded() with a decoded + /// message multiple times. + /// + /// If the state of the decoder is changed, corresponding callbacks + /// on listener is called: + /// + /// * MessageDecoder::State::INITIAL: listener->OnInitial() + /// * MessageDecoder::State::METADATA_LENGTH: listener->OnMetadataLength() + /// * MessageDecoder::State::METADATA: listener->OnMetadata() + /// * MessageDecoder::State::BODY: listener->OnBody() + /// * MessageDecoder::State::EOS: listener->OnEOS() + /// + /// \param[in] data a raw data to be processed. This data isn't + /// copied. The passed memory must be kept alive through message + /// processing. + /// \param[in] size raw data size. + /// \return Status + Status Consume(const uint8_t* data, int64_t size); + + /// \brief Feed data to the decoder as a Buffer. + /// + /// If the decoder can decode one or more messages by the Buffer, + /// the decoder calls listener->OnMessageDecoded() with a decoded + /// message multiple times. + /// + /// \param[in] buffer a Buffer to be processed. + /// \return Status + Status Consume(std::shared_ptr buffer); + + /// \brief Return the number of bytes needed to advance the state of + /// the decoder. + /// + /// This method is provided for users who want to optimize performance. + /// Normal users don't need to use this method. + /// + /// Here is an example usage for normal users: + /// + /// ~~~{.cpp} + /// decoder.Consume(buffer1); + /// decoder.Consume(buffer2); + /// decoder.Consume(buffer3); + /// ~~~ + /// + /// Decoder has internal buffer. If consumed data isn't enough to + /// advance the state of the decoder, consumed data is buffered to + /// the internal buffer. It causes performance overhead. + /// + /// If you pass next_required_size() size data to each Consume() + /// call, the decoder doesn't use its internal buffer. It improves + /// performance. + /// + /// Here is an example usage to avoid using internal buffer: + /// + /// ~~~{.cpp} + /// buffer1 = get_data(decoder.next_required_size()); + /// decoder.Consume(buffer1); + /// buffer2 = get_data(decoder.next_required_size()); + /// decoder.Consume(buffer2); + /// ~~~ + /// + /// Users can use this method to avoid creating small + /// chunks. Message body must be contiguous data. If users pass + /// small chunks to the decoder, the decoder needs concatenate small + /// chunks internally. It causes performance overhead. + /// + /// Here is an example usage to reduce small chunks: + /// + /// ~~~{.cpp} + /// buffer = AllocateResizableBuffer(); + /// while ((small_chunk = get_data(&small_chunk_size))) { + /// auto current_buffer_size = buffer->size(); + /// buffer->Resize(current_buffer_size + small_chunk_size); + /// memcpy(buffer->mutable_data() + current_buffer_size, + /// small_chunk, + /// small_chunk_size); + /// if (buffer->size() < decoder.next_required_size()) { + /// continue; + /// } + /// std::shared_ptr chunk(buffer.release()); + /// decoder.Consume(chunk); + /// buffer = AllocateResizableBuffer(); + /// } + /// if (buffer->size() > 0) { + /// std::shared_ptr chunk(buffer.release()); + /// decoder.Consume(chunk); + /// } + /// ~~~ + /// + /// \return the number of bytes needed to advance the state of the + /// decoder + int64_t next_required_size() const; + + /// \brief Return the current state of the decoder. + /// + /// This method is provided for users who want to optimize performance. + /// Normal users don't need to use this method. + /// + /// Decoder doesn't need Buffer to process data on the + /// MessageDecoder::State::INITIAL state and the + /// MessageDecoder::State::METADATA_LENGTH. Creating Buffer has + /// performance overhead. Advanced users can avoid creating Buffer + /// by checking the current state of the decoder: + /// + /// ~~~{.cpp} + /// switch (decoder.state()) { + /// MessageDecoder::State::INITIAL: + /// MessageDecoder::State::METADATA_LENGTH: + /// { + /// uint8_t data[sizeof(int32_t)]; + /// auto data_size = input->Read(decoder.next_required_size(), data); + /// decoder.Consume(data, data_size); + /// } + /// break; + /// default: + /// { + /// auto buffer = input->Read(decoder.next_required_size()); + /// decoder.Consume(buffer); + /// } + /// break; + /// } + /// ~~~ + /// + /// \return the current state + State state() const; + + private: + class MessageDecoderImpl; + std::unique_ptr impl_; + + ARROW_DISALLOW_COPY_AND_ASSIGN(MessageDecoder); +}; + +/// \brief Abstract interface for a sequence of messages +/// \since 0.5.0 +class ARROW_EXPORT MessageReader { + public: + virtual ~MessageReader() = default; + + /// \brief Create MessageReader that reads from InputStream + static std::unique_ptr Open(io::InputStream* stream); + + /// \brief Create MessageReader that reads from owned InputStream + static std::unique_ptr Open( + const std::shared_ptr& owned_stream); + + /// \brief Read next Message from the interface + /// + /// \return an arrow::ipc::Message instance + virtual Result> ReadNextMessage() = 0; +}; + +// the first parameter of the function should be a pointer to metadata (aka. +// org::apache::arrow::flatbuf::RecordBatch*) +using FieldsLoaderFunction = std::function; + +/// \brief Read encapsulated RPC message from position in file +/// +/// Read a length-prefixed message flatbuffer starting at the indicated file +/// offset. If the message has a body with non-zero length, it will also be +/// read +/// +/// The metadata_length includes at least the length prefix and the flatbuffer +/// +/// \param[in] offset the position in the file where the message starts. The +/// first 4 bytes after the offset are the message length +/// \param[in] metadata_length the total number of bytes to read from file +/// \param[in] file the seekable file interface to read from +/// \param[in] fields_loader the function for loading subset of fields from the given file +/// \return the message read + +ARROW_EXPORT +Result> ReadMessage( + const int64_t offset, const int32_t metadata_length, io::RandomAccessFile* file, + const FieldsLoaderFunction& fields_loader = {}); + +/// \brief Read encapsulated RPC message from cached buffers +/// +/// The buffers should contain an entire message. Partial reads are not handled. +/// +/// This method can be used to read just the metadata by passing in a nullptr for the +/// body. The body will then be skipped and the body size will not be validated. +/// +/// If the body buffer is provided then it must be the complete body buffer +/// +/// This is similar to Message::Open but performs slightly more validation (e.g. checks +/// to see that the metadata length is correct and that the body is the size the metadata +/// expected) +/// +/// \param metadata The bytes for the metadata +/// \param body The bytes for the body +/// \return The message represented by the buffers +ARROW_EXPORT Result> ReadMessage( + std::shared_ptr metadata, std::shared_ptr body); + +ARROW_EXPORT +Future> ReadMessageAsync( + const int64_t offset, const int32_t metadata_length, const int64_t body_length, + io::RandomAccessFile* file, const io::IOContext& context = io::default_io_context()); + +/// \brief Advance stream to an 8-byte offset if its position is not a multiple +/// of 8 already +/// \param[in] stream an input stream +/// \param[in] alignment the byte multiple for the metadata prefix, usually 8 +/// or 64, to ensure the body starts on a multiple of that alignment +/// \return Status +ARROW_EXPORT +Status AlignStream(io::InputStream* stream, int32_t alignment = 8); + +/// \brief Advance stream to an 8-byte offset if its position is not a multiple +/// of 8 already +/// \param[in] stream an output stream +/// \param[in] alignment the byte multiple for the metadata prefix, usually 8 +/// or 64, to ensure the body starts on a multiple of that alignment +/// \return Status +ARROW_EXPORT +Status AlignStream(io::OutputStream* stream, int32_t alignment = 8); + +/// \brief Return error Status if file position is not a multiple of the +/// indicated alignment +ARROW_EXPORT +Status CheckAligned(io::FileInterface* stream, int32_t alignment = 8); + +/// \brief Read encapsulated IPC message (metadata and body) from InputStream +/// +/// Returns null if there are not enough bytes available or the +/// message length is 0 (e.g. EOS in a stream) +/// +/// \param[in] stream an input stream +/// \param[in] pool an optional MemoryPool to copy metadata on the CPU, if required +/// \return Message +ARROW_EXPORT +Result> ReadMessage(io::InputStream* stream, + MemoryPool* pool = default_memory_pool()); + +/// \brief Feed data from InputStream to MessageDecoder to decode an +/// encapsulated IPC message (metadata and body) +/// +/// This API is EXPERIMENTAL. +/// +/// \param[in] decoder a decoder +/// \param[in] stream an input stream +/// \return Status +/// +/// \since 0.17.0 +ARROW_EXPORT +Status DecodeMessage(MessageDecoder* decoder, io::InputStream* stream); + +/// Write encapsulated IPC message Does not make assumptions about +/// whether the stream is aligned already. Can write legacy (pre +/// version 0.15.0) IPC message if option set +/// +/// continuation: 0xFFFFFFFF +/// message_size: int32 +/// message: const void* +/// padding +/// +/// +/// \param[in] message a buffer containing the metadata to write +/// \param[in] options IPC writing options, including alignment and +/// legacy message support +/// \param[in,out] file the OutputStream to write to +/// \param[out] message_length the total size of the payload written including +/// padding +/// \return Status +Status WriteMessage(const Buffer& message, const IpcWriteOptions& options, + io::OutputStream* file, int32_t* message_length); + +} // namespace ipc +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/options.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/options.h new file mode 100644 index 0000000000000000000000000000000000000000..48b6758212bd5370aa2ff48f095080c92f60b086 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/options.h @@ -0,0 +1,178 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/io/caching.h" +#include "arrow/ipc/type_fwd.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/compression.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class MemoryPool; + +namespace ipc { + +// ARROW-109: We set this number arbitrarily to help catch user mistakes. For +// deeply nested schemas, it is expected the user will indicate explicitly the +// maximum allowed recursion depth +constexpr int kMaxNestingDepth = 64; + +/// \brief Options for writing Arrow IPC messages +struct ARROW_EXPORT IpcWriteOptions { + /// \brief If true, allow field lengths that don't fit in a signed 32-bit int. + /// + /// Some implementations may not be able to parse streams created with this option. + bool allow_64bit = false; + + /// \brief The maximum permitted schema nesting depth. + int max_recursion_depth = kMaxNestingDepth; + + /// \brief Write padding after memory buffers up to this multiple of bytes. + int32_t alignment = 8; + + /// \brief Write the pre-0.15.0 IPC message format + /// + /// This legacy format consists of a 4-byte prefix instead of 8-byte. + bool write_legacy_ipc_format = false; + + /// \brief The memory pool to use for allocations made during IPC writing + /// + /// While Arrow IPC is predominantly zero-copy, it may have to allocate + /// memory in some cases (for example if compression is enabled). + MemoryPool* memory_pool = default_memory_pool(); + + /// \brief Compression codec to use for record batch body buffers + /// + /// May only be UNCOMPRESSED, LZ4_FRAME and ZSTD. + std::shared_ptr codec; + + /// \brief Minimum space savings percentage required for compression to be applied + /// + /// Space savings is calculated as (1.0 - compressed_size / uncompressed_size). + /// + /// For example, if min_space_savings = 0.1, a 100-byte body buffer won't undergo + /// compression if its expected compressed size exceeds 90 bytes. If this option is + /// unset, compression will be used indiscriminately. If no codec was supplied, this + /// option is ignored. + /// + /// Values outside of the range [0,1] are handled as errors. + /// + /// Note that enabling this option may result in unreadable data for Arrow C++ versions + /// prior to 12.0.0. + std::optional min_space_savings; + + /// \brief Use global CPU thread pool to parallelize any computational tasks + /// like compression + bool use_threads = true; + + /// \brief Whether to emit dictionary deltas + /// + /// If false, a changed dictionary for a given field will emit a full + /// dictionary replacement. + /// If true, a changed dictionary will be compared against the previous + /// version. If possible, a dictionary delta will be emitted, otherwise + /// a full dictionary replacement. + /// + /// Default is false to maximize stream compatibility. + /// + /// Also, note that if a changed dictionary is a nested dictionary, + /// then a delta is never emitted, for compatibility with the read path. + bool emit_dictionary_deltas = false; + + /// \brief Whether to unify dictionaries for the IPC file format + /// + /// The IPC file format doesn't support dictionary replacements. + /// Therefore, chunks of a column with a dictionary type must have the same + /// dictionary in each record batch (or an extended dictionary + delta). + /// + /// If this option is true, RecordBatchWriter::WriteTable will attempt + /// to unify dictionaries across each table column. If this option is + /// false, incompatible dictionaries across a table column will simply + /// raise an error. + /// + /// Note that enabling this option has a runtime cost. Also, not all types + /// currently support dictionary unification. + /// + /// This option is ignored for IPC streams, which support dictionary replacement + /// and deltas. + bool unify_dictionaries = false; + + /// \brief Format version to use for IPC messages and their metadata. + /// + /// Presently using V5 version (readable by 1.0.0 and later). + /// V4 is also available (readable by 0.8.0 and later). + MetadataVersion metadata_version = MetadataVersion::V5; + + static IpcWriteOptions Defaults(); +}; + +/// \brief Options for reading Arrow IPC messages +struct ARROW_EXPORT IpcReadOptions { + /// \brief The maximum permitted schema nesting depth. + int max_recursion_depth = kMaxNestingDepth; + + /// \brief The memory pool to use for allocations made during IPC reading + /// + /// While Arrow IPC is predominantly zero-copy, it may have to allocate + /// memory in some cases (for example if compression is enabled). + MemoryPool* memory_pool = default_memory_pool(); + + /// \brief Top-level schema fields to include when deserializing RecordBatch. + /// + /// If empty (the default), return all deserialized fields. + /// If non-empty, the values are the indices of fields in the top-level schema. + std::vector included_fields; + + /// \brief Use global CPU thread pool to parallelize any computational tasks + /// like decompression + bool use_threads = true; + + /// \brief Whether to convert incoming data to platform-native endianness + /// + /// If the endianness of the received schema is not equal to platform-native + /// endianness, then all buffers with endian-sensitive data will be byte-swapped. + /// This includes the value buffers of numeric types, temporal types, decimal + /// types, as well as the offset buffers of variable-sized binary and list-like + /// types. + /// + /// Endianness conversion is achieved by the RecordBatchFileReader, + /// RecordBatchStreamReader and StreamDecoder classes. + bool ensure_native_endian = true; + + /// \brief Options to control caching behavior when pre-buffering is requested + /// + /// The lazy property will always be reset to true to deliver the expected behavior + io::CacheOptions pre_buffer_cache_options = io::CacheOptions::LazyDefaults(); + + static IpcReadOptions Defaults(); +}; + +namespace internal { + +Status CheckCompressionSupported(Compression::type codec); + +} // namespace internal +} // namespace ipc +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/reader.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/reader.h new file mode 100644 index 0000000000000000000000000000000000000000..888f59a627771b4591d2eb030483b70a49630999 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/reader.h @@ -0,0 +1,638 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Read Arrow files and streams + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/io/caching.h" +#include "arrow/io/type_fwd.h" +#include "arrow/ipc/message.h" +#include "arrow/ipc/options.h" +#include "arrow/record_batch.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/async_generator.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace ipc { + +class DictionaryMemo; +struct IpcPayload; + +using RecordBatchReader = ::arrow::RecordBatchReader; + +struct ReadStats { + /// Number of IPC messages read. + int64_t num_messages = 0; + /// Number of record batches read. + int64_t num_record_batches = 0; + /// Number of dictionary batches read. + /// + /// Note: num_dictionary_batches >= num_dictionary_deltas + num_replaced_dictionaries + int64_t num_dictionary_batches = 0; + + /// Number of dictionary deltas read. + int64_t num_dictionary_deltas = 0; + /// Number of replaced dictionaries (i.e. where a dictionary batch replaces + /// an existing dictionary with an unrelated new dictionary). + int64_t num_replaced_dictionaries = 0; +}; + +/// \brief Synchronous batch stream reader that reads from io::InputStream +/// +/// This class reads the schema (plus any dictionaries) as the first messages +/// in the stream, followed by record batches. For more granular zero-copy +/// reads see the ReadRecordBatch functions +class ARROW_EXPORT RecordBatchStreamReader : public RecordBatchReader { + public: + /// Create batch reader from generic MessageReader. + /// This will take ownership of the given MessageReader. + /// + /// \param[in] message_reader a MessageReader implementation + /// \param[in] options any IPC reading options (optional) + /// \return the created batch reader + static Result> Open( + std::unique_ptr message_reader, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief Record batch stream reader from InputStream + /// + /// \param[in] stream an input stream instance. Must stay alive throughout + /// lifetime of stream reader + /// \param[in] options any IPC reading options (optional) + /// \return the created batch reader + static Result> Open( + io::InputStream* stream, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief Open stream and retain ownership of stream object + /// \param[in] stream the input stream + /// \param[in] options any IPC reading options (optional) + /// \return the created batch reader + static Result> Open( + const std::shared_ptr& stream, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief Return current read statistics + virtual ReadStats stats() const = 0; +}; + +/// \brief Reads the record batch file format +class ARROW_EXPORT RecordBatchFileReader + : public std::enable_shared_from_this { + public: + virtual ~RecordBatchFileReader() = default; + + /// \brief Open a RecordBatchFileReader + /// + /// Open a file-like object that is assumed to be self-contained; i.e., the + /// end of the file interface is the end of the Arrow file. Note that there + /// can be any amount of data preceding the Arrow-formatted data, because we + /// need only locate the end of the Arrow file stream to discover the metadata + /// and then proceed to read the data into memory. + static Result> Open( + io::RandomAccessFile* file, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief Open a RecordBatchFileReader + /// If the file is embedded within some larger file or memory region, you can + /// pass the absolute memory offset to the end of the file (which contains the + /// metadata footer). The metadata must have been written with memory offsets + /// relative to the start of the containing file + /// + /// \param[in] file the data source + /// \param[in] footer_offset the position of the end of the Arrow file + /// \param[in] options options for IPC reading + /// \return the returned reader + static Result> Open( + io::RandomAccessFile* file, int64_t footer_offset, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief Version of Open that retains ownership of file + /// + /// \param[in] file the data source + /// \param[in] options options for IPC reading + /// \return the returned reader + static Result> Open( + const std::shared_ptr& file, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief Version of Open that retains ownership of file + /// + /// \param[in] file the data source + /// \param[in] footer_offset the position of the end of the Arrow file + /// \param[in] options options for IPC reading + /// \return the returned reader + static Result> Open( + const std::shared_ptr& file, int64_t footer_offset, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief Open a file asynchronously (owns the file). + static Future> OpenAsync( + const std::shared_ptr& file, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief Open a file asynchronously (borrows the file). + static Future> OpenAsync( + io::RandomAccessFile* file, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief Open a file asynchronously (owns the file). + static Future> OpenAsync( + const std::shared_ptr& file, int64_t footer_offset, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief Open a file asynchronously (borrows the file). + static Future> OpenAsync( + io::RandomAccessFile* file, int64_t footer_offset, + const IpcReadOptions& options = IpcReadOptions::Defaults()); + + /// \brief The schema read from the file + virtual std::shared_ptr schema() const = 0; + + /// \brief Returns the number of record batches in the file + virtual int num_record_batches() const = 0; + + /// \brief Return the metadata version from the file metadata + virtual MetadataVersion version() const = 0; + + /// \brief Return the contents of the custom_metadata field from the file's + /// Footer + virtual std::shared_ptr metadata() const = 0; + + /// \brief Read a particular record batch from the file. Does not copy memory + /// if the input source supports zero-copy. + /// + /// \param[in] i the index of the record batch to return + /// \return the read batch + virtual Result> ReadRecordBatch(int i) = 0; + + /// \brief Read a particular record batch along with its custom metadata from the file. + /// Does not copy memory if the input source supports zero-copy. + /// + /// \param[in] i the index of the record batch to return + /// \return a struct containing the read batch and its custom metadata + virtual Result ReadRecordBatchWithCustomMetadata(int i) = 0; + + /// \brief Return current read statistics + virtual ReadStats stats() const = 0; + + /// \brief Computes the total number of rows in the file. + virtual Result CountRows() = 0; + + /// \brief Begin loading metadata for the desired batches into memory. + /// + /// This method will also begin loading all dictionaries messages into memory. + /// + /// For a regular file this will immediately begin disk I/O in the background on a + /// thread on the IOContext's thread pool. If the file is memory mapped this will + /// ensure the memory needed for the metadata is paged from disk into memory + /// + /// \param indices Indices of the batches to prefetch + /// If empty then all batches will be prefetched. + virtual Status PreBufferMetadata(const std::vector& indices) = 0; + + /// \brief Get a reentrant generator of record batches. + /// + /// \param[in] coalesce If true, enable I/O coalescing. + /// \param[in] io_context The IOContext to use (controls which thread pool + /// is used for I/O). + /// \param[in] cache_options Options for coalescing (if enabled). + /// \param[in] executor Optionally, an executor to use for decoding record + /// batches. This is generally only a benefit for very wide and/or + /// compressed batches. + virtual Result>> GetRecordBatchGenerator( + const bool coalesce = false, + const io::IOContext& io_context = io::default_io_context(), + const io::CacheOptions cache_options = io::CacheOptions::LazyDefaults(), + arrow::internal::Executor* executor = NULLPTR) = 0; + + /// \brief Collect all batches as a vector of record batches + Result ToRecordBatches(); + + /// \brief Collect all batches and concatenate as arrow::Table + Result> ToTable(); +}; + +/// \brief A general listener class to receive events. +/// +/// You must implement callback methods for interested events. +/// +/// This API is EXPERIMENTAL. +/// +/// \since 0.17.0 +class ARROW_EXPORT Listener { + public: + virtual ~Listener() = default; + + /// \brief Called when end-of-stream is received. + /// + /// The default implementation just returns arrow::Status::OK(). + /// + /// \return Status + /// + /// \see StreamDecoder + virtual Status OnEOS(); + + /// \brief Called when a record batch is decoded and + /// OnRecordBatchWithMetadataDecoded() isn't overridden. + /// + /// The default implementation just returns + /// arrow::Status::NotImplemented(). + /// + /// \param[in] record_batch a record batch decoded + /// \return Status + /// + /// \see StreamDecoder + virtual Status OnRecordBatchDecoded(std::shared_ptr record_batch); + + /// \brief Called when a record batch with custom metadata is decoded. + /// + /// The default implementation just calls OnRecordBatchDecoded() + /// without custom metadata. + /// + /// \param[in] record_batch_with_metadata a record batch with custom + /// metadata decoded + /// \return Status + /// + /// \see StreamDecoder + /// + /// \since 13.0.0 + virtual Status OnRecordBatchWithMetadataDecoded( + RecordBatchWithMetadata record_batch_with_metadata); + + /// \brief Called when a schema is decoded. + /// + /// The default implementation just returns arrow::Status::OK(). + /// + /// \param[in] schema a schema decoded + /// \return Status + /// + /// \see StreamDecoder + virtual Status OnSchemaDecoded(std::shared_ptr schema); + + /// \brief Called when a schema is decoded. + /// + /// The default implementation just calls OnSchemaDecoded(schema) + /// (without filtered_schema) to keep backward compatibility. + /// + /// \param[in] schema a schema decoded + /// \param[in] filtered_schema a filtered schema that only has read fields + /// \return Status + /// + /// \see StreamDecoder + /// + /// \since 13.0.0 + virtual Status OnSchemaDecoded(std::shared_ptr schema, + std::shared_ptr filtered_schema); +}; + +/// \brief Collect schema and record batches decoded by StreamDecoder. +/// +/// This API is EXPERIMENTAL. +/// +/// \since 0.17.0 +class ARROW_EXPORT CollectListener : public Listener { + public: + CollectListener() : schema_(), filtered_schema_(), record_batches_(), metadatas_() {} + virtual ~CollectListener() = default; + + Status OnSchemaDecoded(std::shared_ptr schema, + std::shared_ptr filtered_schema) override { + schema_ = std::move(schema); + filtered_schema_ = std::move(filtered_schema); + return Status::OK(); + } + + Status OnRecordBatchWithMetadataDecoded( + RecordBatchWithMetadata record_batch_with_metadata) override { + record_batches_.push_back(std::move(record_batch_with_metadata.batch)); + metadatas_.push_back(std::move(record_batch_with_metadata.custom_metadata)); + return Status::OK(); + } + + /// \return the decoded schema + std::shared_ptr schema() const { return schema_; } + + /// \return the filtered schema + std::shared_ptr filtered_schema() const { return filtered_schema_; } + + /// \return the all decoded record batches + const std::vector>& record_batches() const { + return record_batches_; + } + + /// \return the all decoded metadatas + const std::vector>& metadatas() const { + return metadatas_; + } + + /// \return the number of collected record batches + int64_t num_record_batches() const { return record_batches_.size(); } + + /// \return the last decoded record batch and remove it from + /// record_batches + std::shared_ptr PopRecordBatch() { + auto record_batch_with_metadata = PopRecordBatchWithMetadata(); + return std::move(record_batch_with_metadata.batch); + } + + /// \return the last decoded record batch with custom metadata and + /// remove it from record_batches + RecordBatchWithMetadata PopRecordBatchWithMetadata() { + RecordBatchWithMetadata record_batch_with_metadata; + if (record_batches_.empty()) { + return record_batch_with_metadata; + } + record_batch_with_metadata.batch = std::move(record_batches_.back()); + record_batch_with_metadata.custom_metadata = std::move(metadatas_.back()); + record_batches_.pop_back(); + metadatas_.pop_back(); + return record_batch_with_metadata; + } + + private: + std::shared_ptr schema_; + std::shared_ptr filtered_schema_; + std::vector> record_batches_; + std::vector> metadatas_; +}; + +/// \brief Push style stream decoder that receives data from user. +/// +/// This class decodes the Apache Arrow IPC streaming format data. +/// +/// This API is EXPERIMENTAL. +/// +/// \see https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format +/// +/// \since 0.17.0 +class ARROW_EXPORT StreamDecoder { + public: + /// \brief Construct a stream decoder. + /// + /// \param[in] listener a Listener that must implement + /// Listener::OnRecordBatchDecoded() to receive decoded record batches + /// \param[in] options any IPC reading options (optional) + StreamDecoder(std::shared_ptr listener, + IpcReadOptions options = IpcReadOptions::Defaults()); + + virtual ~StreamDecoder(); + + /// \brief Feed data to the decoder as a raw data. + /// + /// If the decoder can read one or more record batches by the data, + /// the decoder calls listener->OnRecordBatchDecoded() with a + /// decoded record batch multiple times. + /// + /// \param[in] data a raw data to be processed. This data isn't + /// copied. The passed memory must be kept alive through record + /// batch processing. + /// \param[in] size raw data size. + /// \return Status + Status Consume(const uint8_t* data, int64_t size); + + /// \brief Feed data to the decoder as a Buffer. + /// + /// If the decoder can read one or more record batches by the + /// Buffer, the decoder calls listener->RecordBatchReceived() with a + /// decoded record batch multiple times. + /// + /// \param[in] buffer a Buffer to be processed. + /// \return Status + Status Consume(std::shared_ptr buffer); + + /// \brief Reset the internal status. + /// + /// You can reuse this decoder for new stream after calling + /// this. + /// + /// \return Status + Status Reset(); + + /// \return the shared schema of the record batches in the stream + std::shared_ptr schema() const; + + /// \brief Return the number of bytes needed to advance the state of + /// the decoder. + /// + /// This method is provided for users who want to optimize performance. + /// Normal users don't need to use this method. + /// + /// Here is an example usage for normal users: + /// + /// ~~~{.cpp} + /// decoder.Consume(buffer1); + /// decoder.Consume(buffer2); + /// decoder.Consume(buffer3); + /// ~~~ + /// + /// Decoder has internal buffer. If consumed data isn't enough to + /// advance the state of the decoder, consumed data is buffered to + /// the internal buffer. It causes performance overhead. + /// + /// If you pass next_required_size() size data to each Consume() + /// call, the decoder doesn't use its internal buffer. It improves + /// performance. + /// + /// Here is an example usage to avoid using internal buffer: + /// + /// ~~~{.cpp} + /// buffer1 = get_data(decoder.next_required_size()); + /// decoder.Consume(buffer1); + /// buffer2 = get_data(decoder.next_required_size()); + /// decoder.Consume(buffer2); + /// ~~~ + /// + /// Users can use this method to avoid creating small chunks. Record + /// batch data must be contiguous data. If users pass small chunks + /// to the decoder, the decoder needs concatenate small chunks + /// internally. It causes performance overhead. + /// + /// Here is an example usage to reduce small chunks: + /// + /// ~~~{.cpp} + /// buffer = AllocateResizableBuffer(); + /// while ((small_chunk = get_data(&small_chunk_size))) { + /// auto current_buffer_size = buffer->size(); + /// buffer->Resize(current_buffer_size + small_chunk_size); + /// memcpy(buffer->mutable_data() + current_buffer_size, + /// small_chunk, + /// small_chunk_size); + /// if (buffer->size() < decoder.next_required_size()) { + /// continue; + /// } + /// std::shared_ptr chunk(buffer.release()); + /// decoder.Consume(chunk); + /// buffer = AllocateResizableBuffer(); + /// } + /// if (buffer->size() > 0) { + /// std::shared_ptr chunk(buffer.release()); + /// decoder.Consume(chunk); + /// } + /// ~~~ + /// + /// \return the number of bytes needed to advance the state of the + /// decoder + int64_t next_required_size() const; + + /// \brief Return current read statistics + ReadStats stats() const; + + private: + class StreamDecoderImpl; + std::unique_ptr impl_; + + ARROW_DISALLOW_COPY_AND_ASSIGN(StreamDecoder); +}; + +// Generic read functions; does not copy data if the input supports zero copy reads + +/// \brief Read Schema from stream serialized as a single IPC message +/// and populate any dictionary-encoded fields into a DictionaryMemo +/// +/// \param[in] stream an InputStream +/// \param[in] dictionary_memo for recording dictionary-encoded fields +/// \return the output Schema +/// +/// If record batches follow the schema, it is better to use +/// RecordBatchStreamReader +ARROW_EXPORT +Result> ReadSchema(io::InputStream* stream, + DictionaryMemo* dictionary_memo); + +/// \brief Read Schema from encapsulated Message +/// +/// \param[in] message the message containing the Schema IPC metadata +/// \param[in] dictionary_memo DictionaryMemo for recording dictionary-encoded +/// fields. Can be nullptr if you are sure there are no +/// dictionary-encoded fields +/// \return the resulting Schema +ARROW_EXPORT +Result> ReadSchema(const Message& message, + DictionaryMemo* dictionary_memo); + +/// Read record batch as encapsulated IPC message with metadata size prefix and +/// header +/// +/// \param[in] schema the record batch schema +/// \param[in] dictionary_memo DictionaryMemo which has any +/// dictionaries. Can be nullptr if you are sure there are no +/// dictionary-encoded fields +/// \param[in] options IPC options for reading +/// \param[in] stream the file where the batch is located +/// \return the read record batch +ARROW_EXPORT +Result> ReadRecordBatch( + const std::shared_ptr& schema, const DictionaryMemo* dictionary_memo, + const IpcReadOptions& options, io::InputStream* stream); + +/// \brief Read record batch from message +/// +/// \param[in] message a Message containing the record batch metadata +/// \param[in] schema the record batch schema +/// \param[in] dictionary_memo DictionaryMemo which has any +/// dictionaries. Can be nullptr if you are sure there are no +/// dictionary-encoded fields +/// \param[in] options IPC options for reading +/// \return the read record batch +ARROW_EXPORT +Result> ReadRecordBatch( + const Message& message, const std::shared_ptr& schema, + const DictionaryMemo* dictionary_memo, const IpcReadOptions& options); + +/// Read record batch from file given metadata and schema +/// +/// \param[in] metadata a Message containing the record batch metadata +/// \param[in] schema the record batch schema +/// \param[in] dictionary_memo DictionaryMemo which has any +/// dictionaries. Can be nullptr if you are sure there are no +/// dictionary-encoded fields +/// \param[in] file a random access file +/// \param[in] options options for deserialization +/// \return the read record batch +ARROW_EXPORT +Result> ReadRecordBatch( + const Buffer& metadata, const std::shared_ptr& schema, + const DictionaryMemo* dictionary_memo, const IpcReadOptions& options, + io::RandomAccessFile* file); + +/// \brief Read arrow::Tensor as encapsulated IPC message in file +/// +/// \param[in] file an InputStream pointed at the start of the message +/// \return the read tensor +ARROW_EXPORT +Result> ReadTensor(io::InputStream* file); + +/// \brief EXPERIMENTAL: Read arrow::Tensor from IPC message +/// +/// \param[in] message a Message containing the tensor metadata and body +/// \return the read tensor +ARROW_EXPORT +Result> ReadTensor(const Message& message); + +/// \brief EXPERIMENTAL: Read arrow::SparseTensor as encapsulated IPC message in file +/// +/// \param[in] file an InputStream pointed at the start of the message +/// \return the read sparse tensor +ARROW_EXPORT +Result> ReadSparseTensor(io::InputStream* file); + +/// \brief EXPERIMENTAL: Read arrow::SparseTensor from IPC message +/// +/// \param[in] message a Message containing the tensor metadata and body +/// \return the read sparse tensor +ARROW_EXPORT +Result> ReadSparseTensor(const Message& message); + +namespace internal { + +// These internal APIs may change without warning or deprecation + +/// \brief EXPERIMENTAL: Read arrow::SparseTensorFormat::type from a metadata +/// \param[in] metadata a Buffer containing the sparse tensor metadata +/// \return the count of the body buffers +ARROW_EXPORT +Result ReadSparseTensorBodyBufferCount(const Buffer& metadata); + +/// \brief EXPERIMENTAL: Read arrow::SparseTensor from an IpcPayload +/// \param[in] payload a IpcPayload contains a serialized SparseTensor +/// \return the read sparse tensor +ARROW_EXPORT +Result> ReadSparseTensorPayload(const IpcPayload& payload); + +// For fuzzing targets +ARROW_EXPORT +Status FuzzIpcStream(const uint8_t* data, int64_t size); +ARROW_EXPORT +Status FuzzIpcTensorStream(const uint8_t* data, int64_t size); +ARROW_EXPORT +Status FuzzIpcFile(const uint8_t* data, int64_t size); + +} // namespace internal + +} // namespace ipc +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/test_common.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/test_common.h new file mode 100644 index 0000000000000000000000000000000000000000..db8613cbb1e6a4eca122a3ffc372a0a4c50ad199 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/test_common.h @@ -0,0 +1,189 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/array.h" +#include "arrow/record_batch.h" +#include "arrow/status.h" +#include "arrow/testing/visibility.h" +#include "arrow/type.h" + +namespace arrow { +namespace ipc { +namespace test { + +// A typedef used for test parameterization +typedef Status MakeRecordBatch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +void CompareArraysDetailed(int index, const Array& result, const Array& expected); + +ARROW_TESTING_EXPORT +void CompareBatchColumnsDetailed(const RecordBatch& result, const RecordBatch& expected); + +ARROW_TESTING_EXPORT +Status MakeRandomInt32Array(int64_t length, bool include_nulls, MemoryPool* pool, + std::shared_ptr* out, uint32_t seed = 0, + int32_t min = 0, int32_t max = 1000); + +ARROW_TESTING_EXPORT +Status MakeRandomInt64Array(int64_t length, bool include_nulls, MemoryPool* pool, + std::shared_ptr* out, uint32_t seed = 0); + +ARROW_TESTING_EXPORT +Status MakeRandomListArray(const std::shared_ptr& child_array, int num_lists, + bool include_nulls, MemoryPool* pool, + std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeRandomLargeListArray(const std::shared_ptr& child_array, int num_lists, + bool include_nulls, MemoryPool* pool, + std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeRandomBooleanArray(const int length, bool include_nulls, + std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeBooleanBatchSized(const int length, std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeBooleanBatch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeIntBatchSized(int length, std::shared_ptr* out, + uint32_t seed = 0); + +ARROW_TESTING_EXPORT +Status MakeIntRecordBatch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeFloat3264BatchSized(int length, std::shared_ptr* out, + uint32_t seed = 0); + +ARROW_TESTING_EXPORT +Status MakeFloat3264Batch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeFloatBatchSized(int length, std::shared_ptr* out, + uint32_t seed = 0); + +ARROW_TESTING_EXPORT +Status MakeFloatBatch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeRandomStringArray(int64_t length, bool include_nulls, MemoryPool* pool, + std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeStringTypesRecordBatch(std::shared_ptr* out, + bool with_nulls = true, bool with_view_types = true); + +ARROW_TESTING_EXPORT +Status MakeStringTypesRecordBatchWithNulls(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeNullRecordBatch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeListRecordBatch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeListViewRecordBatch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeFixedSizeListRecordBatch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeZeroLengthRecordBatch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeNonNullRecordBatch(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeDeeplyNestedList(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeDeeplyNestedListView(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeStruct(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeRunEndEncoded(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeUnion(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeDictionary(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeDictionaryFlat(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeNestedDictionary(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeMap(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeMapOfDictionary(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeDates(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeTimestamps(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeIntervals(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeTimes(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeFWBinary(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeDecimal(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeNull(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeUuid(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeComplex128(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeDictExtension(std::shared_ptr* out); + +ARROW_TESTING_EXPORT +Status MakeRandomTensor(const std::shared_ptr& type, + const std::vector& shape, bool row_major_p, + std::shared_ptr* out, uint32_t seed = 0); + +} // namespace test +} // namespace ipc +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/type_fwd.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..b0d3afa922f789f4f9a8a0b2b435b3ebe0456d42 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/type_fwd.h @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +namespace arrow { +namespace ipc { + +enum class MetadataVersion : char { + /// 0.1.0 + V1, + + /// 0.2.0 + V2, + + /// 0.3.0 to 0.7.1 + V3, + + /// 0.8.0 to 0.17.0 + V4, + + /// >= 1.0.0 + V5 +}; + +class Message; +enum class MessageType { + NONE, + SCHEMA, + DICTIONARY_BATCH, + RECORD_BATCH, + TENSOR, + SPARSE_TENSOR +}; + +struct IpcReadOptions; +struct IpcWriteOptions; + +class MessageReader; + +class RecordBatchStreamReader; +class RecordBatchFileReader; +class RecordBatchWriter; + +class DictionaryFieldMapper; +class DictionaryMemo; + +namespace feather { + +class Reader; + +} // namespace feather +} // namespace ipc +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/util.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/util.h new file mode 100644 index 0000000000000000000000000000000000000000..709fedbf31b0b31585c81b36d5a81db0e5c92754 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/util.h @@ -0,0 +1,41 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +namespace arrow { +namespace ipc { + +// Buffers are padded to 64-byte boundaries (for SIMD) +static constexpr int32_t kArrowAlignment = 64; + +// Tensors are padded to 64-byte boundaries +static constexpr int32_t kTensorAlignment = 64; + +// Align on 8-byte boundaries in IPC +static constexpr int32_t kArrowIpcAlignment = 8; + +static constexpr uint8_t kPaddingBytes[kArrowAlignment] = {0}; + +static inline int64_t PaddedLength(int64_t nbytes, int32_t alignment = kArrowAlignment) { + return ((nbytes + alignment - 1) / alignment) * alignment; +} + +} // namespace ipc +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/writer.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/writer.h new file mode 100644 index 0000000000000000000000000000000000000000..aefb59f3136e4c98419799eb31faf9700fc6efd2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/ipc/writer.h @@ -0,0 +1,475 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Implement Arrow streaming binary format + +#pragma once + +#include +#include +#include + +#include "arrow/ipc/dictionary.h" // IWYU pragma: export +#include "arrow/ipc/message.h" +#include "arrow/ipc/options.h" +#include "arrow/result.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Array; +class Buffer; +class MemoryManager; +class MemoryPool; +class RecordBatch; +class Schema; +class Status; +class Table; +class Tensor; +class SparseTensor; + +namespace io { + +class OutputStream; + +} // namespace io + +namespace ipc { + +/// \brief Intermediate data structure with metadata header, and zero +/// or more buffers for the message body. +struct IpcPayload { + MessageType type = MessageType::NONE; + std::shared_ptr metadata; + std::vector> body_buffers; + std::vector variadic_buffer_counts; + int64_t body_length = 0; // serialized body length (padded, maybe compressed) + int64_t raw_body_length = 0; // initial uncompressed body length +}; + +struct WriteStats { + /// Number of IPC messages written. + int64_t num_messages = 0; + /// Number of record batches written. + int64_t num_record_batches = 0; + /// Number of dictionary batches written. + /// + /// Note: num_dictionary_batches >= num_dictionary_deltas + num_replaced_dictionaries + int64_t num_dictionary_batches = 0; + + /// Number of dictionary deltas written. + int64_t num_dictionary_deltas = 0; + /// Number of replaced dictionaries (i.e. where a dictionary batch replaces + /// an existing dictionary with an unrelated new dictionary). + int64_t num_replaced_dictionaries = 0; + + /// Total size in bytes of record batches emitted. + /// The "raw" size counts the original buffer sizes, while the "serialized" size + /// includes padding and (optionally) compression. + int64_t total_raw_body_size = 0; + int64_t total_serialized_body_size = 0; +}; + +/// \class RecordBatchWriter +/// \brief Abstract interface for writing a stream of record batches +class ARROW_EXPORT RecordBatchWriter { + public: + virtual ~RecordBatchWriter(); + + /// \brief Write a record batch to the stream + /// + /// \param[in] batch the record batch to write to the stream + /// \return Status + virtual Status WriteRecordBatch(const RecordBatch& batch) = 0; + + /// \brief Write a record batch with custom metadata to the stream + /// + /// \param[in] batch the record batch to write to the stream + /// \param[in] custom_metadata the record batch's custom metadata to write to the stream + /// \return Status + virtual Status WriteRecordBatch( + const RecordBatch& batch, + const std::shared_ptr& custom_metadata); + + /// \brief Write possibly-chunked table by creating sequence of record batches + /// \param[in] table table to write + /// \return Status + Status WriteTable(const Table& table); + + /// \brief Write Table with a particular chunksize + /// \param[in] table table to write + /// \param[in] max_chunksize maximum number of rows for table chunks. To + /// indicate that no maximum should be enforced, pass -1. + /// \return Status + virtual Status WriteTable(const Table& table, int64_t max_chunksize); + + /// \brief Perform any logic necessary to finish the stream + /// + /// \return Status + virtual Status Close() = 0; + + /// \brief Return current write statistics + virtual WriteStats stats() const = 0; +}; + +/// \defgroup record-batch-writer-factories Functions for creating RecordBatchWriter +/// instances +/// +/// @{ + +/// Create a new IPC stream writer from stream sink and schema. User is +/// responsible for closing the actual OutputStream. +/// +/// \param[in] sink output stream to write to +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization +/// \return Result> +ARROW_EXPORT +Result> MakeStreamWriter( + io::OutputStream* sink, const std::shared_ptr& schema, + const IpcWriteOptions& options = IpcWriteOptions::Defaults()); + +/// Create a new IPC stream writer from stream sink and schema. User is +/// responsible for closing the actual OutputStream. +/// +/// \param[in] sink output stream to write to +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization +/// \return Result> +ARROW_EXPORT +Result> MakeStreamWriter( + std::shared_ptr sink, const std::shared_ptr& schema, + const IpcWriteOptions& options = IpcWriteOptions::Defaults()); + +/// Create a new IPC file writer from stream sink and schema +/// +/// \param[in] sink output stream to write to +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization, optional +/// \param[in] metadata custom metadata for File Footer, optional +/// \return Result> +ARROW_EXPORT +Result> MakeFileWriter( + io::OutputStream* sink, const std::shared_ptr& schema, + const IpcWriteOptions& options = IpcWriteOptions::Defaults(), + const std::shared_ptr& metadata = NULLPTR); + +/// Create a new IPC file writer from stream sink and schema +/// +/// \param[in] sink output stream to write to +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization, optional +/// \param[in] metadata custom metadata for File Footer, optional +/// \return Result> +ARROW_EXPORT +Result> MakeFileWriter( + std::shared_ptr sink, const std::shared_ptr& schema, + const IpcWriteOptions& options = IpcWriteOptions::Defaults(), + const std::shared_ptr& metadata = NULLPTR); + +/// @} + +/// \brief Low-level API for writing a record batch (without schema) +/// to an OutputStream as encapsulated IPC message. See Arrow format +/// documentation for more detail. +/// +/// \param[in] batch the record batch to write +/// \param[in] buffer_start_offset the start offset to use in the buffer metadata, +/// generally should be 0 +/// \param[in] dst an OutputStream +/// \param[out] metadata_length the size of the length-prefixed flatbuffer +/// including padding to a 64-byte boundary +/// \param[out] body_length the size of the contiguous buffer block plus +/// \param[in] options options for serialization +/// \return Status +ARROW_EXPORT +Status WriteRecordBatch(const RecordBatch& batch, int64_t buffer_start_offset, + io::OutputStream* dst, int32_t* metadata_length, + int64_t* body_length, const IpcWriteOptions& options); + +/// \brief Serialize record batch as encapsulated IPC message in a new buffer +/// +/// \param[in] batch the record batch +/// \param[in] options the IpcWriteOptions to use for serialization +/// \return the serialized message +ARROW_EXPORT +Result> SerializeRecordBatch(const RecordBatch& batch, + const IpcWriteOptions& options); + +/// \brief Serialize record batch as encapsulated IPC message in a new buffer +/// +/// \param[in] batch the record batch +/// \param[in] mm a MemoryManager to allocate memory from +/// \return the serialized message +ARROW_EXPORT +Result> SerializeRecordBatch(const RecordBatch& batch, + std::shared_ptr mm); + +/// \brief Write record batch to OutputStream +/// +/// \param[in] batch the record batch to write +/// \param[in] options the IpcWriteOptions to use for serialization +/// \param[in] out the OutputStream to write the output to +/// \return Status +/// +/// If writing to pre-allocated memory, you can use +/// arrow::ipc::GetRecordBatchSize to compute how much space is required +ARROW_EXPORT +Status SerializeRecordBatch(const RecordBatch& batch, const IpcWriteOptions& options, + io::OutputStream* out); + +/// \brief Serialize schema as encapsulated IPC message +/// +/// \param[in] schema the schema to write +/// \param[in] pool a MemoryPool to allocate memory from +/// \return the serialized schema +ARROW_EXPORT +Result> SerializeSchema(const Schema& schema, + MemoryPool* pool = default_memory_pool()); + +/// \brief Write multiple record batches to OutputStream, including schema +/// \param[in] batches a vector of batches. Must all have same schema +/// \param[in] options options for serialization +/// \param[out] dst an OutputStream +/// \return Status +ARROW_EXPORT +Status WriteRecordBatchStream(const std::vector>& batches, + const IpcWriteOptions& options, io::OutputStream* dst); + +/// \brief Compute the number of bytes needed to write an IPC payload +/// including metadata +/// +/// \param[in] payload the IPC payload to write +/// \param[in] options write options +/// \return the size of the complete encapsulated message +ARROW_EXPORT +int64_t GetPayloadSize(const IpcPayload& payload, + const IpcWriteOptions& options = IpcWriteOptions::Defaults()); + +/// \brief Compute the number of bytes needed to write a record batch including metadata +/// +/// \param[in] batch the record batch to write +/// \param[out] size the size of the complete encapsulated message +/// \return Status +ARROW_EXPORT +Status GetRecordBatchSize(const RecordBatch& batch, int64_t* size); + +/// \brief Compute the number of bytes needed to write a record batch including metadata +/// +/// \param[in] batch the record batch to write +/// \param[in] options options for serialization +/// \param[out] size the size of the complete encapsulated message +/// \return Status +ARROW_EXPORT +Status GetRecordBatchSize(const RecordBatch& batch, const IpcWriteOptions& options, + int64_t* size); + +/// \brief Compute the number of bytes needed to write a tensor including metadata +/// +/// \param[in] tensor the tensor to write +/// \param[out] size the size of the complete encapsulated message +/// \return Status +ARROW_EXPORT +Status GetTensorSize(const Tensor& tensor, int64_t* size); + +/// \brief EXPERIMENTAL: Convert arrow::Tensor to a Message with minimal memory +/// allocation +/// +/// \param[in] tensor the Tensor to write +/// \param[in] pool MemoryPool to allocate space for metadata +/// \return the resulting Message +ARROW_EXPORT +Result> GetTensorMessage(const Tensor& tensor, MemoryPool* pool); + +/// \brief Write arrow::Tensor as a contiguous message. +/// +/// The metadata and body are written assuming 64-byte alignment. It is the +/// user's responsibility to ensure that the OutputStream has been aligned +/// to a 64-byte multiple before writing the message. +/// +/// The message is written out as followed: +/// \code +/// +/// \endcode +/// +/// \param[in] tensor the Tensor to write +/// \param[in] dst the OutputStream to write to +/// \param[out] metadata_length the actual metadata length, including padding +/// \param[out] body_length the actual message body length +/// \return Status +ARROW_EXPORT +Status WriteTensor(const Tensor& tensor, io::OutputStream* dst, int32_t* metadata_length, + int64_t* body_length); + +/// \brief EXPERIMENTAL: Convert arrow::SparseTensor to a Message with minimal memory +/// allocation +/// +/// The message is written out as followed: +/// \code +/// +/// \endcode +/// +/// \param[in] sparse_tensor the SparseTensor to write +/// \param[in] pool MemoryPool to allocate space for metadata +/// \return the resulting Message +ARROW_EXPORT +Result> GetSparseTensorMessage(const SparseTensor& sparse_tensor, + MemoryPool* pool); + +/// \brief EXPERIMENTAL: Write arrow::SparseTensor as a contiguous message. The metadata, +/// sparse index, and body are written assuming 64-byte alignment. It is the +/// user's responsibility to ensure that the OutputStream has been aligned +/// to a 64-byte multiple before writing the message. +/// +/// \param[in] sparse_tensor the SparseTensor to write +/// \param[in] dst the OutputStream to write to +/// \param[out] metadata_length the actual metadata length, including padding +/// \param[out] body_length the actual message body length +/// \return Status +ARROW_EXPORT +Status WriteSparseTensor(const SparseTensor& sparse_tensor, io::OutputStream* dst, + int32_t* metadata_length, int64_t* body_length); + +/// \brief Compute IpcPayload for the given schema +/// \param[in] schema the Schema that is being serialized +/// \param[in] options options for serialization +/// \param[in] mapper object mapping dictionary fields to dictionary ids +/// \param[out] out the returned vector of IpcPayloads +/// \return Status +ARROW_EXPORT +Status GetSchemaPayload(const Schema& schema, const IpcWriteOptions& options, + const DictionaryFieldMapper& mapper, IpcPayload* out); + +/// \brief Compute IpcPayload for a dictionary +/// \param[in] id the dictionary id +/// \param[in] dictionary the dictionary values +/// \param[in] options options for serialization +/// \param[out] payload the output IpcPayload +/// \return Status +ARROW_EXPORT +Status GetDictionaryPayload(int64_t id, const std::shared_ptr& dictionary, + const IpcWriteOptions& options, IpcPayload* payload); + +/// \brief Compute IpcPayload for a dictionary +/// \param[in] id the dictionary id +/// \param[in] is_delta whether the dictionary is a delta dictionary +/// \param[in] dictionary the dictionary values +/// \param[in] options options for serialization +/// \param[out] payload the output IpcPayload +/// \return Status +ARROW_EXPORT +Status GetDictionaryPayload(int64_t id, bool is_delta, + const std::shared_ptr& dictionary, + const IpcWriteOptions& options, IpcPayload* payload); + +/// \brief Compute IpcPayload for the given record batch +/// \param[in] batch the RecordBatch that is being serialized +/// \param[in] options options for serialization +/// \param[out] out the returned IpcPayload +/// \return Status +ARROW_EXPORT +Status GetRecordBatchPayload(const RecordBatch& batch, const IpcWriteOptions& options, + IpcPayload* out); + +/// \brief Compute IpcPayload for the given record batch and custom metadata +/// \param[in] batch the RecordBatch that is being serialized +/// \param[in] custom_metadata the custom metadata to be serialized with the record batch +/// \param[in] options options for serialization +/// \param[out] out the returned IpcPayload +/// \return Status +ARROW_EXPORT +Status GetRecordBatchPayload( + const RecordBatch& batch, + const std::shared_ptr& custom_metadata, + const IpcWriteOptions& options, IpcPayload* out); + +/// \brief Write an IPC payload to the given stream. +/// \param[in] payload the payload to write +/// \param[in] options options for serialization +/// \param[in] dst The stream to write the payload to. +/// \param[out] metadata_length the length of the serialized metadata +/// \return Status +ARROW_EXPORT +Status WriteIpcPayload(const IpcPayload& payload, const IpcWriteOptions& options, + io::OutputStream* dst, int32_t* metadata_length); + +/// \brief Compute IpcPayload for the given sparse tensor +/// \param[in] sparse_tensor the SparseTensor that is being serialized +/// \param[in,out] pool for any required temporary memory allocations +/// \param[out] out the returned IpcPayload +/// \return Status +ARROW_EXPORT +Status GetSparseTensorPayload(const SparseTensor& sparse_tensor, MemoryPool* pool, + IpcPayload* out); + +namespace internal { + +// These internal APIs may change without warning or deprecation + +class ARROW_EXPORT IpcPayloadWriter { + public: + virtual ~IpcPayloadWriter(); + + // Default implementation is a no-op + virtual Status Start(); + + virtual Status WritePayload(const IpcPayload& payload) = 0; + + virtual Status Close() = 0; +}; + +/// Create a new IPC payload stream writer from stream sink. User is +/// responsible for closing the actual OutputStream. +/// +/// \param[in] sink output stream to write to +/// \param[in] options options for serialization +/// \return Result> +ARROW_EXPORT +Result> MakePayloadStreamWriter( + io::OutputStream* sink, const IpcWriteOptions& options = IpcWriteOptions::Defaults()); + +/// Create a new IPC payload file writer from stream sink. +/// +/// \param[in] sink output stream to write to +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization, optional +/// \param[in] metadata custom metadata for File Footer, optional +/// \return Status +ARROW_EXPORT +Result> MakePayloadFileWriter( + io::OutputStream* sink, const std::shared_ptr& schema, + const IpcWriteOptions& options = IpcWriteOptions::Defaults(), + const std::shared_ptr& metadata = NULLPTR); + +/// Create a new RecordBatchWriter from IpcPayloadWriter and schema. +/// +/// The format is implicitly the IPC stream format (allowing dictionary +/// replacement and deltas). +/// +/// \param[in] sink the IpcPayloadWriter to write to +/// \param[in] schema the schema of the record batches to be written +/// \param[in] options options for serialization +/// \return Result> +ARROW_EXPORT +Result> OpenRecordBatchWriter( + std::unique_ptr sink, const std::shared_ptr& schema, + const IpcWriteOptions& options = IpcWriteOptions::Defaults()); + +} // namespace internal +} // namespace ipc +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/tensor/converter.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/tensor/converter.h new file mode 100644 index 0000000000000000000000000000000000000000..408ab22305fff1665956ee8bb831fbc062b9994c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/tensor/converter.h @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/sparse_tensor.h" // IWYU pragma: export + +#include + +namespace arrow { +namespace internal { + +struct SparseTensorConverterMixin { + static bool IsNonZero(const uint8_t val) { return val != 0; } + + static void AssignIndex(uint8_t* indices, int64_t val, const int elsize); + + static int64_t GetIndexValue(const uint8_t* value_ptr, const int elsize); +}; + +Status MakeSparseCOOTensorFromTensor(const Tensor& tensor, + const std::shared_ptr& index_value_type, + MemoryPool* pool, + std::shared_ptr* out_sparse_index, + std::shared_ptr* out_data); + +Status MakeSparseCSXMatrixFromTensor(SparseMatrixCompressedAxis axis, + const Tensor& tensor, + const std::shared_ptr& index_value_type, + MemoryPool* pool, + std::shared_ptr* out_sparse_index, + std::shared_ptr* out_data); + +Status MakeSparseCSFTensorFromTensor(const Tensor& tensor, + const std::shared_ptr& index_value_type, + MemoryPool* pool, + std::shared_ptr* out_sparse_index, + std::shared_ptr* out_data); + +Result> MakeTensorFromSparseCOOTensor( + MemoryPool* pool, const SparseCOOTensor* sparse_tensor); + +Result> MakeTensorFromSparseCSRMatrix( + MemoryPool* pool, const SparseCSRMatrix* sparse_tensor); + +Result> MakeTensorFromSparseCSCMatrix( + MemoryPool* pool, const SparseCSCMatrix* sparse_tensor); + +Result> MakeTensorFromSparseCSFTensor( + MemoryPool* pool, const SparseCSFTensor* sparse_tensor); + +} // namespace internal +} // namespace arrow