diff --git a/ckpts/universal/global_step80/zero/25.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/25.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..cd476749f1c2511cf86257b0f4bee9dac4bccb41 --- /dev/null +++ b/ckpts/universal/global_step80/zero/25.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de2e4a118778979e61962be182c6a34e109e97b19ed65a8d7ee3c0eeaec1a017 +size 9372 diff --git a/ckpts/universal/global_step80/zero/25.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/25.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..b7ebe9a1dfd577a2fe1f984a0109debb425aaf70 --- /dev/null +++ b/ckpts/universal/global_step80/zero/25.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cd0beb0156ea8163d8d4560f1b7c52678cc3c1be4418a906291c13e358a6d84 +size 9387 diff --git a/ckpts/universal/global_step80/zero/25.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step80/zero/25.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..a33b52d5a015af74ca904b594741d936e5287c0e --- /dev/null +++ b/ckpts/universal/global_step80/zero/25.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a5ad245e1625dcbee5a55def4c3422dd0f9a8750a807134eefe00304bd1a9c7 +size 9293 diff --git a/ckpts/universal/global_step80/zero/9.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/9.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..db3bc1368e99303027e5c0a5af2fd3815c9738d5 --- /dev/null +++ b/ckpts/universal/global_step80/zero/9.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e311bc820a6ada17593a8140ce8f2c6ff3698176cdc0fe3d1e182ff87080844 +size 50332828 diff --git a/ckpts/universal/global_step80/zero/9.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/9.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..712282912c7b8abbb0c3a45e98f8dc8b99c029ee --- /dev/null +++ b/ckpts/universal/global_step80/zero/9.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e04ac1bcf845beed7bf1c5a887de05500b599ff0df45f92f22182b42f9724d3 +size 50332843 diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/api.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/api.h new file mode 100644 index 0000000000000000000000000000000000000000..ac568a00eedc32984758f4675b58ac626c9c947a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/api.h @@ -0,0 +1,47 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Coarse public API while the library is in development + +#pragma once + +#include "arrow/array.h" // IWYU pragma: export +#include "arrow/array/array_run_end.h" // IWYU pragma: export +#include "arrow/array/concatenate.h" // IWYU pragma: export +#include "arrow/buffer.h" // IWYU pragma: export +#include "arrow/builder.h" // IWYU pragma: export +#include "arrow/chunked_array.h" // IWYU pragma: export +#include "arrow/compare.h" // IWYU pragma: export +#include "arrow/config.h" // IWYU pragma: export +#include "arrow/datum.h" // IWYU pragma: export +#include "arrow/extension_type.h" // IWYU pragma: export +#include "arrow/memory_pool.h" // IWYU pragma: export +#include "arrow/pretty_print.h" // IWYU pragma: export +#include "arrow/record_batch.h" // IWYU pragma: export +#include "arrow/result.h" // IWYU pragma: export +#include "arrow/status.h" // IWYU pragma: export +#include "arrow/table.h" // IWYU pragma: export +#include "arrow/table_builder.h" // IWYU pragma: export +#include "arrow/tensor.h" // IWYU pragma: export +#include "arrow/type.h" // IWYU pragma: export +#include "arrow/util/key_value_metadata.h" // IWYU pragma: export +#include "arrow/visit_array_inline.h" // IWYU pragma: export +#include "arrow/visit_scalar_inline.h" // IWYU pragma: export +#include "arrow/visitor.h" // IWYU pragma: export + +/// \brief Top-level namespace for Apache Arrow C++ API +namespace arrow {} diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/buffer_builder.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/buffer_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..a84c98b6b24917faf53a821c5c3e5f62471bb9aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/buffer_builder.h @@ -0,0 +1,484 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/status.h" +#include "arrow/util/bit_util.h" +#include "arrow/util/bitmap_generate.h" +#include "arrow/util/bitmap_ops.h" +#include "arrow/util/macros.h" +#include "arrow/util/ubsan.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +// ---------------------------------------------------------------------- +// Buffer builder classes + +/// \class BufferBuilder +/// \brief A class for incrementally building a contiguous chunk of in-memory +/// data +class ARROW_EXPORT BufferBuilder { + public: + explicit BufferBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : pool_(pool), + data_(/*ensure never null to make ubsan happy and avoid check penalties below*/ + util::MakeNonNull()), + capacity_(0), + size_(0), + alignment_(alignment) {} + + /// \brief Constructs new Builder that will start using + /// the provided buffer until Finish/Reset are called. + /// The buffer is not resized. + explicit BufferBuilder(std::shared_ptr buffer, + MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : buffer_(std::move(buffer)), + pool_(pool), + data_(buffer_->mutable_data()), + capacity_(buffer_->capacity()), + size_(buffer_->size()), + alignment_(alignment) {} + + /// \brief Resize the buffer to the nearest multiple of 64 bytes + /// + /// \param new_capacity the new capacity of the of the builder. Will be + /// rounded up to a multiple of 64 bytes for padding + /// \param shrink_to_fit if new capacity is smaller than the existing, + /// reallocate internal buffer. Set to false to avoid reallocations when + /// shrinking the builder. + /// \return Status + Status Resize(const int64_t new_capacity, bool shrink_to_fit = true) { + if (buffer_ == NULLPTR) { + ARROW_ASSIGN_OR_RAISE(buffer_, + AllocateResizableBuffer(new_capacity, alignment_, pool_)); + } else { + ARROW_RETURN_NOT_OK(buffer_->Resize(new_capacity, shrink_to_fit)); + } + capacity_ = buffer_->capacity(); + data_ = buffer_->mutable_data(); + return Status::OK(); + } + + /// \brief Ensure that builder can accommodate the additional number of bytes + /// without the need to perform allocations + /// + /// \param[in] additional_bytes number of additional bytes to make space for + /// \return Status + Status Reserve(const int64_t additional_bytes) { + auto min_capacity = size_ + additional_bytes; + if (min_capacity <= capacity_) { + return Status::OK(); + } + return Resize(GrowByFactor(capacity_, min_capacity), false); + } + + /// \brief Return a capacity expanded by the desired growth factor + static int64_t GrowByFactor(int64_t current_capacity, int64_t new_capacity) { + // Doubling capacity except for large Reserve requests. 2x growth strategy + // (versus 1.5x) seems to have slightly better performance when using + // jemalloc, but significantly better performance when using the system + // allocator. See ARROW-6450 for further discussion + return std::max(new_capacity, current_capacity * 2); + } + + /// \brief Append the given data to the buffer + /// + /// The buffer is automatically expanded if necessary. + Status Append(const void* data, const int64_t length) { + if (ARROW_PREDICT_FALSE(size_ + length > capacity_)) { + ARROW_RETURN_NOT_OK(Resize(GrowByFactor(capacity_, size_ + length), false)); + } + UnsafeAppend(data, length); + return Status::OK(); + } + + /// \brief Append the given data to the buffer + /// + /// The buffer is automatically expanded if necessary. + Status Append(std::string_view v) { return Append(v.data(), v.size()); } + + /// \brief Append copies of a value to the buffer + /// + /// The buffer is automatically expanded if necessary. + Status Append(const int64_t num_copies, uint8_t value) { + ARROW_RETURN_NOT_OK(Reserve(num_copies)); + UnsafeAppend(num_copies, value); + return Status::OK(); + } + + // Advance pointer and zero out memory + Status Advance(const int64_t length) { return Append(length, 0); } + + // Advance pointer, but don't allocate or zero memory + void UnsafeAdvance(const int64_t length) { size_ += length; } + + // Unsafe methods don't check existing size + void UnsafeAppend(const void* data, const int64_t length) { + memcpy(data_ + size_, data, static_cast(length)); + size_ += length; + } + + void UnsafeAppend(std::string_view v) { + UnsafeAppend(v.data(), static_cast(v.size())); + } + + void UnsafeAppend(const int64_t num_copies, uint8_t value) { + memset(data_ + size_, value, static_cast(num_copies)); + size_ += num_copies; + } + + /// \brief Return result of builder as a Buffer object. + /// + /// The builder is reset and can be reused afterwards. + /// + /// \param[out] out the finalized Buffer object + /// \param shrink_to_fit if the buffer size is smaller than its capacity, + /// reallocate to fit more tightly in memory. Set to false to avoid + /// a reallocation, at the expense of potentially more memory consumption. + /// \return Status + Status Finish(std::shared_ptr* out, bool shrink_to_fit = true) { + ARROW_RETURN_NOT_OK(Resize(size_, shrink_to_fit)); + if (size_ != 0) buffer_->ZeroPadding(); + *out = buffer_; + if (*out == NULLPTR) { + ARROW_ASSIGN_OR_RAISE(*out, AllocateBuffer(0, alignment_, pool_)); + } + Reset(); + return Status::OK(); + } + + Result> Finish(bool shrink_to_fit = true) { + std::shared_ptr out; + ARROW_RETURN_NOT_OK(Finish(&out, shrink_to_fit)); + return out; + } + + /// \brief Like Finish, but override the final buffer size + /// + /// This is useful after writing data directly into the builder memory + /// without calling the Append methods (basically, when using BufferBuilder + /// mostly for memory allocation). + Result> FinishWithLength(int64_t final_length, + bool shrink_to_fit = true) { + size_ = final_length; + return Finish(shrink_to_fit); + } + + void Reset() { + buffer_ = NULLPTR; + capacity_ = size_ = 0; + } + + /// \brief Set size to a smaller value without modifying builder + /// contents. For reusable BufferBuilder classes + /// \param[in] position must be non-negative and less than or equal + /// to the current length() + void Rewind(int64_t position) { size_ = position; } + + int64_t capacity() const { return capacity_; } + int64_t length() const { return size_; } + const uint8_t* data() const { return data_; } + uint8_t* mutable_data() { return data_; } + template + const T* data_as() const { + return reinterpret_cast(data_); + } + template + T* mutable_data_as() { + return reinterpret_cast(data_); + } + + private: + std::shared_ptr buffer_; + MemoryPool* pool_; + uint8_t* data_; + int64_t capacity_; + int64_t size_; + int64_t alignment_; +}; + +template +class TypedBufferBuilder; + +/// \brief A BufferBuilder for building a buffer of arithmetic elements +template +class TypedBufferBuilder< + T, typename std::enable_if::value || + std::is_standard_layout::value>::type> { + public: + explicit TypedBufferBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : bytes_builder_(pool, alignment) {} + + explicit TypedBufferBuilder(std::shared_ptr buffer, + MemoryPool* pool = default_memory_pool()) + : bytes_builder_(std::move(buffer), pool) {} + + explicit TypedBufferBuilder(BufferBuilder builder) + : bytes_builder_(std::move(builder)) {} + + BufferBuilder* bytes_builder() { return &bytes_builder_; } + + Status Append(T value) { + return bytes_builder_.Append(reinterpret_cast(&value), sizeof(T)); + } + + Status Append(const T* values, int64_t num_elements) { + return bytes_builder_.Append(reinterpret_cast(values), + num_elements * sizeof(T)); + } + + Status Append(const int64_t num_copies, T value) { + ARROW_RETURN_NOT_OK(Reserve(num_copies + length())); + UnsafeAppend(num_copies, value); + return Status::OK(); + } + + void UnsafeAppend(T value) { + bytes_builder_.UnsafeAppend(reinterpret_cast(&value), sizeof(T)); + } + + void UnsafeAppend(const T* values, int64_t num_elements) { + bytes_builder_.UnsafeAppend(reinterpret_cast(values), + num_elements * sizeof(T)); + } + + template + void UnsafeAppend(Iter values_begin, Iter values_end) { + auto num_elements = static_cast(std::distance(values_begin, values_end)); + auto data = mutable_data() + length(); + bytes_builder_.UnsafeAdvance(num_elements * sizeof(T)); + std::copy(values_begin, values_end, data); + } + + void UnsafeAppend(const int64_t num_copies, T value) { + auto data = mutable_data() + length(); + bytes_builder_.UnsafeAdvance(num_copies * sizeof(T)); + std::fill(data, data + num_copies, value); + } + + Status Resize(const int64_t new_capacity, bool shrink_to_fit = true) { + return bytes_builder_.Resize(new_capacity * sizeof(T), shrink_to_fit); + } + + Status Reserve(const int64_t additional_elements) { + return bytes_builder_.Reserve(additional_elements * sizeof(T)); + } + + Status Advance(const int64_t length) { + return bytes_builder_.Advance(length * sizeof(T)); + } + + Status Finish(std::shared_ptr* out, bool shrink_to_fit = true) { + return bytes_builder_.Finish(out, shrink_to_fit); + } + + Result> Finish(bool shrink_to_fit = true) { + std::shared_ptr out; + ARROW_RETURN_NOT_OK(Finish(&out, shrink_to_fit)); + return out; + } + + /// \brief Like Finish, but override the final buffer size + /// + /// This is useful after writing data directly into the builder memory + /// without calling the Append methods (basically, when using TypedBufferBuilder + /// only for memory allocation). + Result> FinishWithLength(int64_t final_length, + bool shrink_to_fit = true) { + return bytes_builder_.FinishWithLength(final_length * sizeof(T), shrink_to_fit); + } + + void Reset() { bytes_builder_.Reset(); } + + int64_t length() const { return bytes_builder_.length() / sizeof(T); } + int64_t capacity() const { return bytes_builder_.capacity() / sizeof(T); } + const T* data() const { return reinterpret_cast(bytes_builder_.data()); } + T* mutable_data() { return reinterpret_cast(bytes_builder_.mutable_data()); } + + private: + BufferBuilder bytes_builder_; +}; + +/// \brief A BufferBuilder for building a buffer containing a bitmap +template <> +class TypedBufferBuilder { + public: + explicit TypedBufferBuilder(MemoryPool* pool = default_memory_pool(), + int64_t alignment = kDefaultBufferAlignment) + : bytes_builder_(pool, alignment) {} + + explicit TypedBufferBuilder(BufferBuilder builder) + : bytes_builder_(std::move(builder)) {} + + BufferBuilder* bytes_builder() { return &bytes_builder_; } + + Status Append(bool value) { + ARROW_RETURN_NOT_OK(Reserve(1)); + UnsafeAppend(value); + return Status::OK(); + } + + Status Append(const uint8_t* valid_bytes, int64_t num_elements) { + ARROW_RETURN_NOT_OK(Reserve(num_elements)); + UnsafeAppend(valid_bytes, num_elements); + return Status::OK(); + } + + Status Append(const int64_t num_copies, bool value) { + ARROW_RETURN_NOT_OK(Reserve(num_copies)); + UnsafeAppend(num_copies, value); + return Status::OK(); + } + + void UnsafeAppend(bool value) { + bit_util::SetBitTo(mutable_data(), bit_length_, value); + if (!value) { + ++false_count_; + } + ++bit_length_; + } + + /// \brief Append bits from an array of bytes (one value per byte) + void UnsafeAppend(const uint8_t* bytes, int64_t num_elements) { + if (num_elements == 0) return; + int64_t i = 0; + internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements, [&] { + bool value = bytes[i++]; + false_count_ += !value; + return value; + }); + bit_length_ += num_elements; + } + + /// \brief Append bits from a packed bitmap + void UnsafeAppend(const uint8_t* bitmap, int64_t offset, int64_t num_elements) { + if (num_elements == 0) return; + internal::CopyBitmap(bitmap, offset, num_elements, mutable_data(), bit_length_); + false_count_ += num_elements - internal::CountSetBits(bitmap, offset, num_elements); + bit_length_ += num_elements; + } + + void UnsafeAppend(const int64_t num_copies, bool value) { + bit_util::SetBitsTo(mutable_data(), bit_length_, num_copies, value); + false_count_ += num_copies * !value; + bit_length_ += num_copies; + } + + template + void UnsafeAppend(const int64_t num_elements, Generator&& gen) { + if (num_elements == 0) return; + + if (count_falses) { + internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements, [&] { + bool value = gen(); + false_count_ += !value; + return value; + }); + } else { + internal::GenerateBitsUnrolled(mutable_data(), bit_length_, num_elements, + std::forward(gen)); + } + bit_length_ += num_elements; + } + + Status Resize(const int64_t new_capacity, bool shrink_to_fit = true) { + const int64_t old_byte_capacity = bytes_builder_.capacity(); + ARROW_RETURN_NOT_OK( + bytes_builder_.Resize(bit_util::BytesForBits(new_capacity), shrink_to_fit)); + // Resize() may have chosen a larger capacity (e.g. for padding), + // so ask it again before calling memset(). + const int64_t new_byte_capacity = bytes_builder_.capacity(); + if (new_byte_capacity > old_byte_capacity) { + // The additional buffer space is 0-initialized for convenience, + // so that other methods can simply bump the length. + memset(mutable_data() + old_byte_capacity, 0, + static_cast(new_byte_capacity - old_byte_capacity)); + } + return Status::OK(); + } + + Status Reserve(const int64_t additional_elements) { + return Resize( + BufferBuilder::GrowByFactor(bit_length_, bit_length_ + additional_elements), + false); + } + + Status Advance(const int64_t length) { + ARROW_RETURN_NOT_OK(Reserve(length)); + bit_length_ += length; + false_count_ += length; + return Status::OK(); + } + + Status Finish(std::shared_ptr* out, bool shrink_to_fit = true) { + // set bytes_builder_.size_ == byte size of data + bytes_builder_.UnsafeAdvance(bit_util::BytesForBits(bit_length_) - + bytes_builder_.length()); + bit_length_ = false_count_ = 0; + return bytes_builder_.Finish(out, shrink_to_fit); + } + + Result> Finish(bool shrink_to_fit = true) { + std::shared_ptr out; + ARROW_RETURN_NOT_OK(Finish(&out, shrink_to_fit)); + return out; + } + + /// \brief Like Finish, but override the final buffer size + /// + /// This is useful after writing data directly into the builder memory + /// without calling the Append methods (basically, when using TypedBufferBuilder + /// only for memory allocation). + Result> FinishWithLength(int64_t final_length, + bool shrink_to_fit = true) { + const auto final_byte_length = bit_util::BytesForBits(final_length); + bytes_builder_.UnsafeAdvance(final_byte_length - bytes_builder_.length()); + bit_length_ = false_count_ = 0; + return bytes_builder_.FinishWithLength(final_byte_length, shrink_to_fit); + } + + void Reset() { + bytes_builder_.Reset(); + bit_length_ = false_count_ = 0; + } + + int64_t length() const { return bit_length_; } + int64_t capacity() const { return bytes_builder_.capacity() * 8; } + const uint8_t* data() const { return bytes_builder_.data(); } + uint8_t* mutable_data() { return bytes_builder_.mutable_data(); } + int64_t false_count() const { return false_count_; } + + private: + BufferBuilder bytes_builder_; + int64_t bit_length_ = 0; + int64_t false_count_ = 0; +}; + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/chunk_resolver.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/chunk_resolver.h new file mode 100644 index 0000000000000000000000000000000000000000..c5dad1a17b18ee145a3840badd9f9317c9325c72 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/chunk_resolver.h @@ -0,0 +1,164 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" + +namespace arrow::internal { + +struct ChunkLocation { + /// \brief Index of the chunk in the array of chunks + /// + /// The value is always in the range `[0, chunks.size()]`. `chunks.size()` is used + /// to represent out-of-bounds locations. + int64_t chunk_index = 0; + + /// \brief Index of the value in the chunk + /// + /// The value is undefined if chunk_index >= chunks.size() + int64_t index_in_chunk = 0; +}; + +/// \brief An utility that incrementally resolves logical indices into +/// physical indices in a chunked array. +struct ARROW_EXPORT ChunkResolver { + private: + /// \brief Array containing `chunks.size() + 1` offsets. + /// + /// `offsets_[i]` is the starting logical index of chunk `i`. `offsets_[0]` is always 0 + /// and `offsets_[chunks.size()]` is the logical length of the chunked array. + std::vector offsets_; + + /// \brief Cache of the index of the last resolved chunk. + /// + /// \invariant `cached_chunk_ in [0, chunks.size()]` + mutable std::atomic cached_chunk_; + + public: + explicit ChunkResolver(const ArrayVector& chunks) noexcept; + explicit ChunkResolver(const std::vector& chunks) noexcept; + explicit ChunkResolver(const RecordBatchVector& batches) noexcept; + + ChunkResolver(ChunkResolver&& other) noexcept; + ChunkResolver& operator=(ChunkResolver&& other) noexcept; + + ChunkResolver(const ChunkResolver& other) noexcept; + ChunkResolver& operator=(const ChunkResolver& other) noexcept; + + /// \brief Resolve a logical index to a ChunkLocation. + /// + /// The returned ChunkLocation contains the chunk index and the within-chunk index + /// equivalent to the logical index. + /// + /// \pre index >= 0 + /// \post location.chunk_index in [0, chunks.size()] + /// \param index The logical index to resolve + /// \return ChunkLocation with a valid chunk_index if index is within + /// bounds, or with chunk_index == chunks.size() if logical index is + /// `>= chunked_array.length()`. + inline ChunkLocation Resolve(int64_t index) const { + const auto cached_chunk = cached_chunk_.load(std::memory_order_relaxed); + const auto chunk_index = + ResolveChunkIndex(index, cached_chunk); + return {chunk_index, index - offsets_[chunk_index]}; + } + + /// \brief Resolve a logical index to a ChunkLocation. + /// + /// The returned ChunkLocation contains the chunk index and the within-chunk index + /// equivalent to the logical index. + /// + /// \pre index >= 0 + /// \post location.chunk_index in [0, chunks.size()] + /// \param index The logical index to resolve + /// \param hint ChunkLocation{} or the last ChunkLocation returned by + /// this ChunkResolver. + /// \return ChunkLocation with a valid chunk_index if index is within + /// bounds, or with chunk_index == chunks.size() if logical index is + /// `>= chunked_array.length()`. + inline ChunkLocation ResolveWithChunkIndexHint(int64_t index, + ChunkLocation hint) const { + assert(hint.chunk_index < static_cast(offsets_.size())); + const auto chunk_index = + ResolveChunkIndex(index, hint.chunk_index); + return {chunk_index, index - offsets_[chunk_index]}; + } + + private: + template + inline int64_t ResolveChunkIndex(int64_t index, int64_t cached_chunk) const { + // It is common for algorithms sequentially processing arrays to make consecutive + // accesses at a relatively small distance from each other, hence often falling in the + // same chunk. + // + // This is guaranteed when merging (assuming each side of the merge uses its + // own resolver), and is the most common case in recursive invocations of + // partitioning. + const auto num_offsets = static_cast(offsets_.size()); + const int64_t* offsets = offsets_.data(); + if (ARROW_PREDICT_TRUE(index >= offsets[cached_chunk]) && + (cached_chunk + 1 == num_offsets || index < offsets[cached_chunk + 1])) { + return cached_chunk; + } + // lo < hi is guaranteed by `num_offsets = chunks.size() + 1` + const auto chunk_index = Bisect(index, offsets, /*lo=*/0, /*hi=*/num_offsets); + if constexpr (StoreCachedChunk) { + assert(chunk_index < static_cast(offsets_.size())); + cached_chunk_.store(chunk_index, std::memory_order_relaxed); + } + return chunk_index; + } + + /// \brief Find the index of the chunk that contains the logical index. + /// + /// Any non-negative index is accepted. When `hi=num_offsets`, the largest + /// possible return value is `num_offsets-1` which is equal to + /// `chunks.size()`. The is returned when the logical index is out-of-bounds. + /// + /// \pre index >= 0 + /// \pre lo < hi + /// \pre lo >= 0 && hi <= offsets_.size() + static inline int64_t Bisect(int64_t index, const int64_t* offsets, int64_t lo, + int64_t hi) { + // Similar to std::upper_bound(), but slightly different as our offsets + // array always starts with 0. + auto n = hi - lo; + // First iteration does not need to check for n > 1 + // (lo < hi is guaranteed by the precondition). + assert(n > 1 && "lo < hi is a precondition of Bisect"); + do { + const int64_t m = n >> 1; + const int64_t mid = lo + m; + if (index >= offsets[mid]) { + lo = mid; + n -= m; + } else { + n = m; + } + } while (n > 1); + return lo; + } +}; + +} // namespace arrow::internal diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/memory_pool.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/memory_pool.h new file mode 100644 index 0000000000000000000000000000000000000000..98c6dc3e211b8231586283a2bf54b823eb5cc1ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/memory_pool.h @@ -0,0 +1,296 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +namespace internal { + +/////////////////////////////////////////////////////////////////////// +// Helper tracking memory statistics + +/// \brief Memory pool statistics +/// +/// 64-byte aligned so that all atomic values are on the same cache line. +class alignas(64) MemoryPoolStats { + private: + // All atomics are updated according to Acquire-Release ordering. + // https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering + // + // max_memory_, total_allocated_bytes_, and num_allocs_ only go up (they are + // monotonically increasing) which can allow some optimizations. + std::atomic max_memory_{0}; + std::atomic bytes_allocated_{0}; + std::atomic total_allocated_bytes_{0}; + std::atomic num_allocs_{0}; + + public: + int64_t max_memory() const { return max_memory_.load(std::memory_order_acquire); } + + int64_t bytes_allocated() const { + return bytes_allocated_.load(std::memory_order_acquire); + } + + int64_t total_bytes_allocated() const { + return total_allocated_bytes_.load(std::memory_order_acquire); + } + + int64_t num_allocations() const { return num_allocs_.load(std::memory_order_acquire); } + + inline void DidAllocateBytes(int64_t size) { + // Issue the load before everything else. max_memory_ is monotonically increasing, + // so we can use a relaxed load before the read-modify-write. + auto max_memory = max_memory_.load(std::memory_order_relaxed); + const auto old_bytes_allocated = + bytes_allocated_.fetch_add(size, std::memory_order_acq_rel); + // Issue store operations on values that we don't depend on to proceed + // with execution. When done, max_memory and old_bytes_allocated have + // a higher chance of being available on CPU registers. This also has the + // nice side-effect of putting 3 atomic stores close to each other in the + // instruction stream. + total_allocated_bytes_.fetch_add(size, std::memory_order_acq_rel); + num_allocs_.fetch_add(1, std::memory_order_acq_rel); + + // If other threads are updating max_memory_ concurrently we leave the loop without + // updating knowing that it already reached a value even higher than ours. + const auto allocated = old_bytes_allocated + size; + while (max_memory < allocated && !max_memory_.compare_exchange_weak( + /*expected=*/max_memory, /*desired=*/allocated, + std::memory_order_acq_rel)) { + } + } + + inline void DidReallocateBytes(int64_t old_size, int64_t new_size) { + if (new_size > old_size) { + DidAllocateBytes(new_size - old_size); + } else { + DidFreeBytes(old_size - new_size); + } + } + + inline void DidFreeBytes(int64_t size) { + bytes_allocated_.fetch_sub(size, std::memory_order_acq_rel); + } +}; + +} // namespace internal + +/// Base class for memory allocation on the CPU. +/// +/// Besides tracking the number of allocated bytes, the allocator also should +/// take care of the required 64-byte alignment. +class ARROW_EXPORT MemoryPool { + public: + virtual ~MemoryPool() = default; + + /// \brief EXPERIMENTAL. Create a new instance of the default MemoryPool + static std::unique_ptr CreateDefault(); + + /// Allocate a new memory region of at least size bytes. + /// + /// The allocated region shall be 64-byte aligned. + Status Allocate(int64_t size, uint8_t** out) { + return Allocate(size, kDefaultBufferAlignment, out); + } + + /// Allocate a new memory region of at least size bytes aligned to alignment. + virtual Status Allocate(int64_t size, int64_t alignment, uint8_t** out) = 0; + + /// Resize an already allocated memory section. + /// + /// As by default most default allocators on a platform don't support aligned + /// reallocation, this function can involve a copy of the underlying data. + virtual Status Reallocate(int64_t old_size, int64_t new_size, int64_t alignment, + uint8_t** ptr) = 0; + Status Reallocate(int64_t old_size, int64_t new_size, uint8_t** ptr) { + return Reallocate(old_size, new_size, kDefaultBufferAlignment, ptr); + } + + /// Free an allocated region. + /// + /// @param buffer Pointer to the start of the allocated memory region + /// @param size Allocated size located at buffer. An allocator implementation + /// may use this for tracking the amount of allocated bytes as well as for + /// faster deallocation if supported by its backend. + /// @param alignment The alignment of the allocation. Defaults to 64 bytes. + virtual void Free(uint8_t* buffer, int64_t size, int64_t alignment) = 0; + void Free(uint8_t* buffer, int64_t size) { + Free(buffer, size, kDefaultBufferAlignment); + } + + /// Return unused memory to the OS + /// + /// Only applies to allocators that hold onto unused memory. This will be + /// best effort, a memory pool may not implement this feature or may be + /// unable to fulfill the request due to fragmentation. + virtual void ReleaseUnused() {} + + /// The number of bytes that were allocated and not yet free'd through + /// this allocator. + virtual int64_t bytes_allocated() const = 0; + + /// Return peak memory allocation in this memory pool + /// + /// \return Maximum bytes allocated. If not known (or not implemented), + /// returns -1 + virtual int64_t max_memory() const; + + /// The number of bytes that were allocated. + virtual int64_t total_bytes_allocated() const = 0; + + /// The number of allocations or reallocations that were requested. + virtual int64_t num_allocations() const = 0; + + /// The name of the backend used by this MemoryPool (e.g. "system" or "jemalloc"). + virtual std::string backend_name() const = 0; + + protected: + MemoryPool() = default; +}; + +class ARROW_EXPORT LoggingMemoryPool : public MemoryPool { + public: + explicit LoggingMemoryPool(MemoryPool* pool); + ~LoggingMemoryPool() override = default; + + using MemoryPool::Allocate; + using MemoryPool::Free; + using MemoryPool::Reallocate; + + Status Allocate(int64_t size, int64_t alignment, uint8_t** out) override; + Status Reallocate(int64_t old_size, int64_t new_size, int64_t alignment, + uint8_t** ptr) override; + void Free(uint8_t* buffer, int64_t size, int64_t alignment) override; + + int64_t bytes_allocated() const override; + + int64_t max_memory() const override; + + int64_t total_bytes_allocated() const override; + + int64_t num_allocations() const override; + + std::string backend_name() const override; + + private: + MemoryPool* pool_; +}; + +/// Derived class for memory allocation. +/// +/// Tracks the number of bytes and maximum memory allocated through its direct +/// calls. Actual allocation is delegated to MemoryPool class. +class ARROW_EXPORT ProxyMemoryPool : public MemoryPool { + public: + explicit ProxyMemoryPool(MemoryPool* pool); + ~ProxyMemoryPool() override; + + using MemoryPool::Allocate; + using MemoryPool::Free; + using MemoryPool::Reallocate; + + Status Allocate(int64_t size, int64_t alignment, uint8_t** out) override; + Status Reallocate(int64_t old_size, int64_t new_size, int64_t alignment, + uint8_t** ptr) override; + void Free(uint8_t* buffer, int64_t size, int64_t alignment) override; + + int64_t bytes_allocated() const override; + + int64_t max_memory() const override; + + int64_t total_bytes_allocated() const override; + + int64_t num_allocations() const override; + + std::string backend_name() const override; + + private: + class ProxyMemoryPoolImpl; + std::unique_ptr impl_; +}; + +/// \brief Return a process-wide memory pool based on the system allocator. +ARROW_EXPORT MemoryPool* system_memory_pool(); + +/// \brief Return a process-wide memory pool based on jemalloc. +/// +/// May return NotImplemented if jemalloc is not available. +ARROW_EXPORT Status jemalloc_memory_pool(MemoryPool** out); + +/// \brief Set jemalloc memory page purging behavior for future-created arenas +/// to the indicated number of milliseconds. See dirty_decay_ms and +/// muzzy_decay_ms options in jemalloc for a description of what these do. The +/// default is configured to 1000 (1 second) which releases memory more +/// aggressively to the operating system than the jemalloc default of 10 +/// seconds. If you set the value to 0, dirty / muzzy pages will be released +/// immediately rather than with a time decay, but this may reduce application +/// performance. +ARROW_EXPORT +Status jemalloc_set_decay_ms(int ms); + +/// \brief Get basic statistics from jemalloc's mallctl. +/// See the MALLCTL NAMESPACE section in jemalloc project documentation for +/// available stats. +ARROW_EXPORT +Result jemalloc_get_stat(const char* name); + +/// \brief Reset the counter for peak bytes allocated in the calling thread to zero. +/// This affects subsequent calls to thread.peak.read, but not the values returned by +/// thread.allocated or thread.deallocated. +ARROW_EXPORT +Status jemalloc_peak_reset(); + +/// \brief Print summary statistics in human-readable form to stderr. +/// See malloc_stats_print documentation in jemalloc project documentation for +/// available opt flags. +ARROW_EXPORT +Status jemalloc_stats_print(const char* opts = ""); + +/// \brief Print summary statistics in human-readable form using a callback +/// See malloc_stats_print documentation in jemalloc project documentation for +/// available opt flags. +ARROW_EXPORT +Status jemalloc_stats_print(std::function write_cb, + const char* opts = ""); + +/// \brief Get summary statistics in human-readable form. +/// See malloc_stats_print documentation in jemalloc project documentation for +/// available opt flags. +ARROW_EXPORT +Result jemalloc_stats_string(const char* opts = ""); + +/// \brief Return a process-wide memory pool based on mimalloc. +/// +/// May return NotImplemented if mimalloc is not available. +ARROW_EXPORT Status mimalloc_memory_pool(MemoryPool** out); + +/// \brief Return the names of the backends supported by this Arrow build. +ARROW_EXPORT std::vector SupportedMemoryBackendNames(); + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/record_batch.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/record_batch.h new file mode 100644 index 0000000000000000000000000000000000000000..cd647a88abd972da12a6d84091e7bcc0a679c728 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/record_batch.h @@ -0,0 +1,407 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/compare.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/iterator.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +/// \class RecordBatch +/// \brief Collection of equal-length arrays matching a particular Schema +/// +/// A record batch is table-like data structure that is semantically a sequence +/// of fields, each a contiguous Arrow array +class ARROW_EXPORT RecordBatch { + public: + virtual ~RecordBatch() = default; + + /// \param[in] schema The record batch schema + /// \param[in] num_rows length of fields in the record batch. Each array + /// should have the same length as num_rows + /// \param[in] columns the record batch fields as vector of arrays + static std::shared_ptr Make(std::shared_ptr schema, + int64_t num_rows, + std::vector> columns); + + /// \brief Construct record batch from vector of internal data structures + /// \since 0.5.0 + /// + /// This class is intended for internal use, or advanced users. + /// + /// \param schema the record batch schema + /// \param num_rows the number of semantic rows in the record batch. This + /// should be equal to the length of each field + /// \param columns the data for the batch's columns + static std::shared_ptr Make( + std::shared_ptr schema, int64_t num_rows, + std::vector> columns); + + /// \brief Create an empty RecordBatch of a given schema + /// + /// The output RecordBatch will be created with DataTypes from + /// the given schema. + /// + /// \param[in] schema the schema of the empty RecordBatch + /// \param[in] pool the memory pool to allocate memory from + /// \return the resulting RecordBatch + static Result> MakeEmpty( + std::shared_ptr schema, MemoryPool* pool = default_memory_pool()); + + /// \brief Convert record batch to struct array + /// + /// Create a struct array whose child arrays are the record batch's columns. + /// Note that the record batch's top-level field metadata cannot be reflected + /// in the resulting struct array. + Result> ToStructArray() const; + + /// \brief Convert record batch with one data type to Tensor + /// + /// Create a Tensor object with shape (number of rows, number of columns) and + /// strides (type size in bytes, type size in bytes * number of rows). + /// Generated Tensor will have column-major layout. + /// + /// \param[in] null_to_nan if true, convert nulls to NaN + /// \param[in] row_major if true, create row-major Tensor else column-major Tensor + /// \param[in] pool the memory pool to allocate the tensor buffer + /// \return the resulting Tensor + Result> ToTensor( + bool null_to_nan = false, bool row_major = true, + MemoryPool* pool = default_memory_pool()) const; + + /// \brief Construct record batch from struct array + /// + /// This constructs a record batch using the child arrays of the given + /// array, which must be a struct array. + /// + /// \param[in] array the source array, must be a StructArray + /// \param[in] pool the memory pool to allocate new validity bitmaps + /// + /// This operation will usually be zero-copy. However, if the struct array has an + /// offset or a validity bitmap then these will need to be pushed into the child arrays. + /// Pushing the offset is zero-copy but pushing the validity bitmap is not. + static Result> FromStructArray( + const std::shared_ptr& array, MemoryPool* pool = default_memory_pool()); + + /// \brief Determine if two record batches are exactly equal + /// + /// \param[in] other the RecordBatch to compare with + /// \param[in] check_metadata if true, check that Schema metadata is the same + /// \param[in] opts the options for equality comparisons + /// \return true if batches are equal + bool Equals(const RecordBatch& other, bool check_metadata = false, + const EqualOptions& opts = EqualOptions::Defaults()) const; + + /// \brief Determine if two record batches are approximately equal + /// + /// \param[in] other the RecordBatch to compare with + /// \param[in] opts the options for equality comparisons + /// \return true if batches are approximately equal + bool ApproxEquals(const RecordBatch& other, + const EqualOptions& opts = EqualOptions::Defaults()) const; + + /// \return the record batch's schema + const std::shared_ptr& schema() const { return schema_; } + + /// \brief Replace the schema with another schema with the same types, but potentially + /// different field names and/or metadata. + Result> ReplaceSchema( + std::shared_ptr schema) const; + + /// \brief Retrieve all columns at once + virtual const std::vector>& columns() const = 0; + + /// \brief Retrieve an array from the record batch + /// \param[in] i field index, does not boundscheck + /// \return an Array object + virtual std::shared_ptr column(int i) const = 0; + + /// \brief Retrieve an array from the record batch + /// \param[in] name field name + /// \return an Array or null if no field was found + std::shared_ptr GetColumnByName(const std::string& name) const; + + /// \brief Retrieve an array's internal data from the record batch + /// \param[in] i field index, does not boundscheck + /// \return an internal ArrayData object + virtual std::shared_ptr column_data(int i) const = 0; + + /// \brief Retrieve all arrays' internal data from the record batch. + virtual const ArrayDataVector& column_data() const = 0; + + /// \brief Add column to the record batch, producing a new RecordBatch + /// + /// \param[in] i field index, which will be boundschecked + /// \param[in] field field to be added + /// \param[in] column column to be added + virtual Result> AddColumn( + int i, const std::shared_ptr& field, + const std::shared_ptr& column) const = 0; + + /// \brief Add new nullable column to the record batch, producing a new + /// RecordBatch. + /// + /// For non-nullable columns, use the Field-based version of this method. + /// + /// \param[in] i field index, which will be boundschecked + /// \param[in] field_name name of field to be added + /// \param[in] column column to be added + virtual Result> AddColumn( + int i, std::string field_name, const std::shared_ptr& column) const; + + /// \brief Replace a column in the record batch, producing a new RecordBatch + /// + /// \param[in] i field index, does boundscheck + /// \param[in] field field to be replaced + /// \param[in] column column to be replaced + virtual Result> SetColumn( + int i, const std::shared_ptr& field, + const std::shared_ptr& column) const = 0; + + /// \brief Remove column from the record batch, producing a new RecordBatch + /// + /// \param[in] i field index, does boundscheck + virtual Result> RemoveColumn(int i) const = 0; + + virtual std::shared_ptr ReplaceSchemaMetadata( + const std::shared_ptr& metadata) const = 0; + + /// \brief Name in i-th column + const std::string& column_name(int i) const; + + /// \return the number of columns in the table + int num_columns() const; + + /// \return the number of rows (the corresponding length of each column) + int64_t num_rows() const { return num_rows_; } + + /// \brief Copy the entire RecordBatch to destination MemoryManager + /// + /// This uses Array::CopyTo on each column of the record batch to create + /// a new record batch where all underlying buffers for the columns have + /// been copied to the destination MemoryManager. This uses + /// MemoryManager::CopyBuffer under the hood. + Result> CopyTo( + const std::shared_ptr& to) const; + + /// \brief View or Copy the entire RecordBatch to destination MemoryManager + /// + /// This uses Array::ViewOrCopyTo on each column of the record batch to create + /// a new record batch where all underlying buffers for the columns have + /// been zero-copy viewed on the destination MemoryManager, falling back + /// to performing a copy if it can't be viewed as a zero-copy buffer. This uses + /// Buffer::ViewOrCopy under the hood. + Result> ViewOrCopyTo( + const std::shared_ptr& to) const; + + /// \brief Slice each of the arrays in the record batch + /// \param[in] offset the starting offset to slice, through end of batch + /// \return new record batch + virtual std::shared_ptr Slice(int64_t offset) const; + + /// \brief Slice each of the arrays in the record batch + /// \param[in] offset the starting offset to slice + /// \param[in] length the number of elements to slice from offset + /// \return new record batch + virtual std::shared_ptr Slice(int64_t offset, int64_t length) const = 0; + + /// \return PrettyPrint representation suitable for debugging + std::string ToString() const; + + /// \brief Return names of all columns + std::vector ColumnNames() const; + + /// \brief Rename columns with provided names + Result> RenameColumns( + const std::vector& names) const; + + /// \brief Return new record batch with specified columns + Result> SelectColumns( + const std::vector& indices) const; + + /// \brief Perform cheap validation checks to determine obvious inconsistencies + /// within the record batch's schema and internal data. + /// + /// This is O(k) where k is the total number of fields and array descendents. + /// + /// \return Status + virtual Status Validate() const; + + /// \brief Perform extensive validation checks to determine inconsistencies + /// within the record batch's schema and internal data. + /// + /// This is potentially O(k*n) where n is the number of rows. + /// + /// \return Status + virtual Status ValidateFull() const; + + protected: + RecordBatch(const std::shared_ptr& schema, int64_t num_rows); + + std::shared_ptr schema_; + int64_t num_rows_; + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(RecordBatch); +}; + +struct ARROW_EXPORT RecordBatchWithMetadata { + std::shared_ptr batch; + std::shared_ptr custom_metadata; +}; + +/// \brief Abstract interface for reading stream of record batches +class ARROW_EXPORT RecordBatchReader { + public: + using ValueType = std::shared_ptr; + + virtual ~RecordBatchReader(); + + /// \return the shared schema of the record batches in the stream + virtual std::shared_ptr schema() const = 0; + + /// \brief Read the next record batch in the stream. Return null for batch + /// when reaching end of stream + /// + /// \param[out] batch the next loaded batch, null at end of stream + /// \return Status + virtual Status ReadNext(std::shared_ptr* batch) = 0; + + virtual Result ReadNext() { + return Status::NotImplemented("ReadNext with custom metadata"); + } + + /// \brief Iterator interface + Result> Next() { + std::shared_ptr batch; + ARROW_RETURN_NOT_OK(ReadNext(&batch)); + return batch; + } + + /// \brief finalize reader + virtual Status Close() { return Status::OK(); } + + class RecordBatchReaderIterator { + public: + using iterator_category = std::input_iterator_tag; + using difference_type = std::ptrdiff_t; + using value_type = std::shared_ptr; + using pointer = value_type const*; + using reference = value_type const&; + + RecordBatchReaderIterator() : batch_(RecordBatchEnd()), reader_(NULLPTR) {} + + explicit RecordBatchReaderIterator(RecordBatchReader* reader) + : batch_(RecordBatchEnd()), reader_(reader) { + Next(); + } + + bool operator==(const RecordBatchReaderIterator& other) const { + return batch_ == other.batch_; + } + + bool operator!=(const RecordBatchReaderIterator& other) const { + return !(*this == other); + } + + Result> operator*() { + ARROW_RETURN_NOT_OK(batch_.status()); + + return batch_; + } + + RecordBatchReaderIterator& operator++() { + Next(); + return *this; + } + + RecordBatchReaderIterator operator++(int) { + RecordBatchReaderIterator tmp(*this); + Next(); + return tmp; + } + + private: + std::shared_ptr RecordBatchEnd() { + return std::shared_ptr(NULLPTR); + } + + void Next() { + if (reader_ == NULLPTR) { + batch_ = RecordBatchEnd(); + return; + } + batch_ = reader_->Next(); + } + + Result> batch_; + RecordBatchReader* reader_; + }; + /// \brief Return an iterator to the first record batch in the stream + RecordBatchReaderIterator begin() { return RecordBatchReaderIterator(this); } + + /// \brief Return an iterator to the end of the stream + RecordBatchReaderIterator end() { return RecordBatchReaderIterator(); } + + /// \brief Consume entire stream as a vector of record batches + Result ToRecordBatches(); + + /// \brief Read all batches and concatenate as arrow::Table + Result> ToTable(); + + /// \brief Create a RecordBatchReader from a vector of RecordBatch. + /// + /// \param[in] batches the vector of RecordBatch to read from + /// \param[in] schema schema to conform to. Will be inferred from the first + /// element if not provided. + static Result> Make( + RecordBatchVector batches, std::shared_ptr schema = NULLPTR); + + /// \brief Create a RecordBatchReader from an Iterator of RecordBatch. + /// + /// \param[in] batches an iterator of RecordBatch to read from. + /// \param[in] schema schema that each record batch in iterator will conform to. + static Result> MakeFromIterator( + Iterator> batches, std::shared_ptr schema); +}; + +/// \brief Concatenate record batches +/// +/// The columns of the new batch are formed by concatenate the same columns of each input +/// batch. Concatenate multiple batches into a new batch requires that the schema must be +/// consistent. It supports merging batches without columns (only length, scenarios such +/// as count(*)). +/// +/// \param[in] batches a vector of record batches to be concatenated +/// \param[in] pool memory to store the result will be allocated from this memory pool +/// \return the concatenated record batch +ARROW_EXPORT +Result> ConcatenateRecordBatches( + const RecordBatchVector& batches, MemoryPool* pool = default_memory_pool()); + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/result.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/result.h new file mode 100644 index 0000000000000000000000000000000000000000..6786d2b3fcbfdbb5533d9339bd6e6b38bfa0ca76 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/result.h @@ -0,0 +1,508 @@ +// +// Copyright 2017 Asylo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Adapted from Asylo + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/status.h" +#include "arrow/util/aligned_storage.h" +#include "arrow/util/compare.h" + +namespace arrow { + +template +struct EnsureResult; + +namespace internal { + +ARROW_EXPORT void DieWithMessage(const std::string& msg); + +ARROW_EXPORT void InvalidValueOrDie(const Status& st); + +} // namespace internal + +/// A class for representing either a usable value, or an error. +/// +/// A Result object either contains a value of type `T` or a Status object +/// explaining why such a value is not present. The type `T` must be +/// copy-constructible and/or move-constructible. +/// +/// The state of a Result object may be determined by calling ok() or +/// status(). The ok() method returns true if the object contains a valid value. +/// The status() method returns the internal Status object. A Result object +/// that contains a valid value will return an OK Status for a call to status(). +/// +/// A value of type `T` may be extracted from a Result object through a call +/// to ValueOrDie(). This function should only be called if a call to ok() +/// returns true. Sample usage: +/// +/// ``` +/// arrow::Result result = CalculateFoo(); +/// if (result.ok()) { +/// Foo foo = result.ValueOrDie(); +/// foo.DoSomethingCool(); +/// } else { +/// ARROW_LOG(ERROR) << result.status(); +/// } +/// ``` +/// +/// If `T` is a move-only type, like `std::unique_ptr<>`, then the value should +/// only be extracted after invoking `std::move()` on the Result object. +/// Sample usage: +/// +/// ``` +/// arrow::Result> result = CalculateFoo(); +/// if (result.ok()) { +/// std::unique_ptr foo = std::move(result).ValueOrDie(); +/// foo->DoSomethingCool(); +/// } else { +/// ARROW_LOG(ERROR) << result.status(); +/// } +/// ``` +/// +/// Result is provided for the convenience of implementing functions that +/// return some value but may fail during execution. For instance, consider a +/// function with the following signature: +/// +/// ``` +/// arrow::Status CalculateFoo(int *output); +/// ``` +/// +/// This function may instead be written as: +/// +/// ``` +/// arrow::Result CalculateFoo(); +/// ``` +template +class [[nodiscard]] Result : public util::EqualityComparable> { + template + friend class Result; + + static_assert(!std::is_same::value, + "this assert indicates you have probably made a metaprogramming error"); + + public: + using ValueType = T; + + /// Constructs a Result object that contains a non-OK status. + /// + /// This constructor is marked `explicit` to prevent attempts to `return {}` + /// from a function with a return type of, for example, + /// `Result>`. While `return {}` seems like it would return + /// an empty vector, it will actually invoke the default constructor of + /// Result. + explicit Result() noexcept // NOLINT(runtime/explicit) + : status_(Status::UnknownError("Uninitialized Result")) {} + + ~Result() noexcept { Destroy(); } + + /// Constructs a Result object with the given non-OK Status object. All + /// calls to ValueOrDie() on this object will abort. The given `status` must + /// not be an OK status, otherwise this constructor will abort. + /// + /// This constructor is not declared explicit so that a function with a return + /// type of `Result` can return a Status object, and the status will be + /// implicitly converted to the appropriate return type as a matter of + /// convenience. + /// + /// \param status The non-OK Status object to initialize to. + Result(const Status& status) noexcept // NOLINT(runtime/explicit) + : status_(status) { + if (ARROW_PREDICT_FALSE(status.ok())) { + internal::DieWithMessage(std::string("Constructed with a non-error status: ") + + status.ToString()); + } + } + + /// Constructs a Result object that contains `value`. The resulting object + /// is considered to have an OK status. The wrapped element can be accessed + /// with ValueOrDie(). + /// + /// This constructor is made implicit so that a function with a return type of + /// `Result` can return an object of type `U &&`, implicitly converting + /// it to a `Result` object. + /// + /// Note that `T` must be implicitly constructible from `U`, and `U` must not + /// be a (cv-qualified) Status or Status-reference type. Due to C++ + /// reference-collapsing rules and perfect-forwarding semantics, this + /// constructor matches invocations that pass `value` either as a const + /// reference or as an rvalue reference. Since Result needs to work for both + /// reference and rvalue-reference types, the constructor uses perfect + /// forwarding to avoid invalidating arguments that were passed by reference. + /// See http://thbecker.net/articles/rvalue_references/section_08.html for + /// additional details. + /// + /// \param value The value to initialize to. + template ::value && std::is_convertible::value && + !std::is_same::type>::type, + Status>::value>::type> + Result(U&& value) noexcept { // NOLINT(runtime/explicit) + ConstructValue(std::forward(value)); + } + + /// Constructs a Result object that contains `value`. The resulting object + /// is considered to have an OK status. The wrapped element can be accessed + /// with ValueOrDie(). + /// + /// This constructor is made implicit so that a function with a return type of + /// `Result` can return an object of type `T`, implicitly converting + /// it to a `Result` object. + /// + /// \param value The value to initialize to. + // NOTE `Result(U&& value)` above should be sufficient, but some compilers + // fail matching it. + Result(T&& value) noexcept { // NOLINT(runtime/explicit) + ConstructValue(std::move(value)); + } + + /// Copy constructor. + /// + /// This constructor needs to be explicitly defined because the presence of + /// the move-assignment operator deletes the default copy constructor. In such + /// a scenario, since the deleted copy constructor has stricter binding rules + /// than the templated copy constructor, the templated constructor cannot act + /// as a copy constructor, and any attempt to copy-construct a `Result` + /// object results in a compilation error. + /// + /// \param other The value to copy from. + Result(const Result& other) noexcept : status_(other.status_) { + if (ARROW_PREDICT_TRUE(status_.ok())) { + ConstructValue(other.ValueUnsafe()); + } + } + + /// Templatized constructor that constructs a `Result` from a const + /// reference to a `Result`. + /// + /// `T` must be implicitly constructible from `const U &`. + /// + /// \param other The value to copy from. + template ::value && + std::is_convertible::value>::type> + Result(const Result& other) noexcept : status_(other.status_) { + if (ARROW_PREDICT_TRUE(status_.ok())) { + ConstructValue(other.ValueUnsafe()); + } + } + + /// Copy-assignment operator. + /// + /// \param other The Result object to copy. + Result& operator=(const Result& other) noexcept { + // Check for self-assignment. + if (ARROW_PREDICT_FALSE(this == &other)) { + return *this; + } + Destroy(); + status_ = other.status_; + if (ARROW_PREDICT_TRUE(status_.ok())) { + ConstructValue(other.ValueUnsafe()); + } + return *this; + } + + /// Templatized constructor which constructs a `Result` by moving the + /// contents of a `Result`. `T` must be implicitly constructible from `U + /// &&`. + /// + /// Sets `other` to contain a non-OK status with a`StatusError::Invalid` + /// error code. + /// + /// \param other The Result object to move from and set to a non-OK status. + template ::value && + std::is_convertible::value>::type> + Result(Result&& other) noexcept { + if (ARROW_PREDICT_TRUE(other.status_.ok())) { + status_ = std::move(other.status_); + ConstructValue(other.MoveValueUnsafe()); + } else { + // If we moved the status, the other status may become ok but the other + // value hasn't been constructed => crash on other destructor. + status_ = other.status_; + } + } + + /// Move-assignment operator. + /// + /// Sets `other` to an invalid state.. + /// + /// \param other The Result object to assign from and set to a non-OK + /// status. + Result& operator=(Result&& other) noexcept { + // Check for self-assignment. + if (ARROW_PREDICT_FALSE(this == &other)) { + return *this; + } + Destroy(); + if (ARROW_PREDICT_TRUE(other.status_.ok())) { + status_ = std::move(other.status_); + ConstructValue(other.MoveValueUnsafe()); + } else { + // If we moved the status, the other status may become ok but the other + // value hasn't been constructed => crash on other destructor. + status_ = other.status_; + } + return *this; + } + + /// Compare to another Result. + bool Equals(const Result& other) const { + if (ARROW_PREDICT_TRUE(status_.ok())) { + return other.status_.ok() && ValueUnsafe() == other.ValueUnsafe(); + } + return status_ == other.status_; + } + + /// Indicates whether the object contains a `T` value. Generally instead + /// of accessing this directly you will want to use ASSIGN_OR_RAISE defined + /// below. + /// + /// \return True if this Result object's status is OK (i.e. a call to ok() + /// returns true). If this function returns true, then it is safe to access + /// the wrapped element through a call to ValueOrDie(). + constexpr bool ok() const { return status_.ok(); } + + /// \brief Equivalent to ok(). + // operator bool() const { return ok(); } + + /// Gets the stored status object, or an OK status if a `T` value is stored. + /// + /// \return The stored non-OK status object, or an OK status if this object + /// has a value. + constexpr const Status& status() const { return status_; } + + /// Gets the stored `T` value. + /// + /// This method should only be called if this Result object's status is OK + /// (i.e. a call to ok() returns true), otherwise this call will abort. + /// + /// \return The stored `T` value. + const T& ValueOrDie() const& { + if (ARROW_PREDICT_FALSE(!ok())) { + internal::InvalidValueOrDie(status_); + } + return ValueUnsafe(); + } + const T& operator*() const& { return ValueOrDie(); } + const T* operator->() const { return &ValueOrDie(); } + + /// Gets a mutable reference to the stored `T` value. + /// + /// This method should only be called if this Result object's status is OK + /// (i.e. a call to ok() returns true), otherwise this call will abort. + /// + /// \return The stored `T` value. + T& ValueOrDie() & { + if (ARROW_PREDICT_FALSE(!ok())) { + internal::InvalidValueOrDie(status_); + } + return ValueUnsafe(); + } + T& operator*() & { return ValueOrDie(); } + T* operator->() { return &ValueOrDie(); } + + /// Moves and returns the internally-stored `T` value. + /// + /// This method should only be called if this Result object's status is OK + /// (i.e. a call to ok() returns true), otherwise this call will abort. The + /// Result object is invalidated after this call and will be updated to + /// contain a non-OK status. + /// + /// \return The stored `T` value. + T ValueOrDie() && { + if (ARROW_PREDICT_FALSE(!ok())) { + internal::InvalidValueOrDie(status_); + } + return MoveValueUnsafe(); + } + T operator*() && { return std::move(*this).ValueOrDie(); } + + /// Helper method for implementing Status returning functions in terms of semantically + /// equivalent Result returning functions. For example: + /// + /// Status GetInt(int *out) { return GetInt().Value(out); } + template ::value>::type> + Status Value(U* out) && { + if (!ok()) { + return status(); + } + *out = U(MoveValueUnsafe()); + return Status::OK(); + } + + /// Move and return the internally stored value or alternative if an error is stored. + T ValueOr(T alternative) && { + if (!ok()) { + return alternative; + } + return MoveValueUnsafe(); + } + + /// Retrieve the value if ok(), falling back to an alternative generated by the provided + /// factory + template + T ValueOrElse(G&& generate_alternative) && { + if (ok()) { + return MoveValueUnsafe(); + } + return std::forward(generate_alternative)(); + } + + /// Apply a function to the internally stored value to produce a new result or propagate + /// the stored error. + template + typename EnsureResult()(std::declval()))>::type Map( + M&& m) && { + if (!ok()) { + return status(); + } + return std::forward(m)(MoveValueUnsafe()); + } + + /// Apply a function to the internally stored value to produce a new result or propagate + /// the stored error. + template + typename EnsureResult()(std::declval()))>::type + Map(M&& m) const& { + if (!ok()) { + return status(); + } + return std::forward(m)(ValueUnsafe()); + } + + /// Cast the internally stored value to produce a new result or propagate the stored + /// error. + template ::value>::type> + Result As() && { + if (!ok()) { + return status(); + } + return U(MoveValueUnsafe()); + } + + /// Cast the internally stored value to produce a new result or propagate the stored + /// error. + template ::value>::type> + Result As() const& { + if (!ok()) { + return status(); + } + return U(ValueUnsafe()); + } + + constexpr const T& ValueUnsafe() const& { return *storage_.get(); } + + constexpr T& ValueUnsafe() & { return *storage_.get(); } + + T ValueUnsafe() && { return MoveValueUnsafe(); } + + T MoveValueUnsafe() { return std::move(*storage_.get()); } + + private: + Status status_; // pointer-sized + internal::AlignedStorage storage_; + + template + void ConstructValue(U&& u) noexcept { + storage_.construct(std::forward(u)); + } + + void Destroy() noexcept { + if (ARROW_PREDICT_TRUE(status_.ok())) { + static_assert(offsetof(Result, status_) == 0, + "Status is guaranteed to be at the start of Result<>"); + storage_.destroy(); + } + } +}; + +#define ARROW_ASSIGN_OR_RAISE_IMPL(result_name, lhs, rexpr) \ + auto&& result_name = (rexpr); \ + ARROW_RETURN_IF_(!(result_name).ok(), (result_name).status(), ARROW_STRINGIFY(rexpr)); \ + lhs = std::move(result_name).ValueUnsafe(); + +#define ARROW_ASSIGN_OR_RAISE_NAME(x, y) ARROW_CONCAT(x, y) + +/// \brief Execute an expression that returns a Result, extracting its value +/// into the variable defined by `lhs` (or returning a Status on error). +/// +/// Example: Assigning to a new value: +/// ARROW_ASSIGN_OR_RAISE(auto value, MaybeGetValue(arg)); +/// +/// Example: Assigning to an existing value: +/// ValueType value; +/// ARROW_ASSIGN_OR_RAISE(value, MaybeGetValue(arg)); +/// +/// WARNING: ARROW_ASSIGN_OR_RAISE expands into multiple statements; +/// it cannot be used in a single statement (e.g. as the body of an if +/// statement without {})! +/// +/// WARNING: ARROW_ASSIGN_OR_RAISE `std::move`s its right operand. If you have +/// an lvalue Result which you *don't* want to move out of cast appropriately. +/// +/// WARNING: ARROW_ASSIGN_OR_RAISE is not a single expression; it will not +/// maintain lifetimes of all temporaries in `rexpr` (e.g. +/// `ARROW_ASSIGN_OR_RAISE(auto x, MakeTemp().GetResultRef());` +/// will most likely segfault)! +#define ARROW_ASSIGN_OR_RAISE(lhs, rexpr) \ + ARROW_ASSIGN_OR_RAISE_IMPL(ARROW_ASSIGN_OR_RAISE_NAME(_error_or_value, __COUNTER__), \ + lhs, rexpr); + +namespace internal { + +template +inline const Status& GenericToStatus(const Result& res) { + return res.status(); +} + +template +inline Status GenericToStatus(Result&& res) { + return std::move(res).status(); +} + +} // namespace internal + +template ::type> +R ToResult(T t) { + return R(std::move(t)); +} + +template +struct EnsureResult { + using type = Result; +}; + +template +struct EnsureResult> { + using type = Result; +}; + +} // namespace arrow diff --git a/venv/lib/python3.10/site-packages/pyarrow/include/arrow/scalar.h b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/scalar.h new file mode 100644 index 0000000000000000000000000000000000000000..65c5ee4df0a0405017fe2e80f84a061933d350f3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/include/arrow/scalar.h @@ -0,0 +1,816 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Object model for scalar (non-Array) values. Not intended for use with large +// amounts of data + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/compare.h" +#include "arrow/extension_type.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" +#include "arrow/type_traits.h" +#include "arrow/util/compare.h" +#include "arrow/util/decimal.h" +#include "arrow/util/visibility.h" +#include "arrow/visit_type_inline.h" + +namespace arrow { + +class Array; + +/// \brief Base class for scalar values +/// +/// A Scalar represents a single value with a specific DataType. +/// Scalars are useful for passing single value inputs to compute functions, +/// or for representing individual array elements (with a non-trivial +/// wrapping cost, though). +struct ARROW_EXPORT Scalar : public std::enable_shared_from_this, + public util::EqualityComparable { + virtual ~Scalar() = default; + + /// \brief The type of the scalar value + std::shared_ptr type; + + /// \brief Whether the value is valid (not null) or not + bool is_valid = false; + + bool Equals(const Scalar& other, + const EqualOptions& options = EqualOptions::Defaults()) const; + + bool ApproxEquals(const Scalar& other, + const EqualOptions& options = EqualOptions::Defaults()) const; + + struct ARROW_EXPORT Hash { + size_t operator()(const Scalar& scalar) const { return scalar.hash(); } + + size_t operator()(const std::shared_ptr& scalar) const { + return scalar->hash(); + } + }; + + size_t hash() const; + + std::string ToString() const; + + /// \brief Perform cheap validation checks + /// + /// This is O(k) where k is the number of descendents. + /// + /// \return Status + Status Validate() const; + + /// \brief Perform extensive data validation checks + /// + /// This is potentially O(k*n) where k is the number of descendents and n + /// is the length of descendents (if list scalars are involved). + /// + /// \return Status + Status ValidateFull() const; + + static Result> Parse(const std::shared_ptr& type, + std::string_view repr); + + // TODO(bkietz) add compute::CastOptions + Result> CastTo(std::shared_ptr to) const; + + /// \brief Apply the ScalarVisitor::Visit() method specialized to the scalar type + Status Accept(ScalarVisitor* visitor) const; + + /// \brief EXPERIMENTAL Enable obtaining shared_ptr from a const + /// Scalar& context. + std::shared_ptr GetSharedPtr() const { + return const_cast(this)->shared_from_this(); + } + + protected: + Scalar(std::shared_ptr type, bool is_valid) + : type(std::move(type)), is_valid(is_valid) {} +}; + +ARROW_EXPORT void PrintTo(const Scalar& scalar, std::ostream* os); + +/// \defgroup concrete-scalar-classes Concrete Scalar subclasses +/// +/// @{ + +/// \brief A scalar value for NullType. Never valid +struct ARROW_EXPORT NullScalar : public Scalar { + public: + using TypeClass = NullType; + + NullScalar() : Scalar{null(), false} {} +}; + +/// @} + +namespace internal { + +struct ARROW_EXPORT ArraySpanFillFromScalarScratchSpace { + // 16 bytes of scratch space to enable ArraySpan to be a view onto any + // Scalar- including binary scalars where we need to create a buffer + // that looks like two 32-bit or 64-bit offsets. + alignas(int64_t) mutable uint8_t scratch_space_[sizeof(int64_t) * 2]; +}; + +struct ARROW_EXPORT PrimitiveScalarBase : public Scalar { + explicit PrimitiveScalarBase(std::shared_ptr type) + : Scalar(std::move(type), false) {} + + using Scalar::Scalar; + /// \brief Get a const pointer to the value of this scalar. May be null. + virtual const void* data() const = 0; + /// \brief Get a mutable pointer to the value of this scalar. May be null. + virtual void* mutable_data() = 0; + /// \brief Get an immutable view of the value of this scalar as bytes. + virtual std::string_view view() const = 0; +}; + +template +struct ARROW_EXPORT PrimitiveScalar : public PrimitiveScalarBase { + using PrimitiveScalarBase::PrimitiveScalarBase; + using TypeClass = T; + using ValueType = CType; + + // Non-null constructor. + PrimitiveScalar(ValueType value, std::shared_ptr type) + : PrimitiveScalarBase(std::move(type), true), value(value) {} + + explicit PrimitiveScalar(std::shared_ptr type) + : PrimitiveScalarBase(std::move(type), false) {} + + ValueType value{}; + + const void* data() const override { return &value; } + void* mutable_data() override { return &value; } + std::string_view view() const override { + return std::string_view(reinterpret_cast(&value), sizeof(ValueType)); + }; +}; + +} // namespace internal + +/// \addtogroup concrete-scalar-classes Concrete Scalar subclasses +/// +/// @{ + +struct ARROW_EXPORT BooleanScalar : public internal::PrimitiveScalar { + using Base = internal::PrimitiveScalar; + using Base::Base; + + explicit BooleanScalar(bool value) : Base(value, boolean()) {} + + BooleanScalar() : Base(boolean()) {} +}; + +template +struct NumericScalar : public internal::PrimitiveScalar { + using Base = typename internal::PrimitiveScalar; + using Base::Base; + using TypeClass = typename Base::TypeClass; + using ValueType = typename Base::ValueType; + + explicit NumericScalar(ValueType value) + : Base(value, TypeTraits::type_singleton()) {} + + NumericScalar() : Base(TypeTraits::type_singleton()) {} +}; + +struct ARROW_EXPORT Int8Scalar : public NumericScalar { + using NumericScalar::NumericScalar; +}; + +struct ARROW_EXPORT Int16Scalar : public NumericScalar { + using NumericScalar::NumericScalar; +}; + +struct ARROW_EXPORT Int32Scalar : public NumericScalar { + using NumericScalar::NumericScalar; +}; + +struct ARROW_EXPORT Int64Scalar : public NumericScalar { + using NumericScalar::NumericScalar; +}; + +struct ARROW_EXPORT UInt8Scalar : public NumericScalar { + using NumericScalar::NumericScalar; +}; + +struct ARROW_EXPORT UInt16Scalar : public NumericScalar { + using NumericScalar::NumericScalar; +}; + +struct ARROW_EXPORT UInt32Scalar : public NumericScalar { + using NumericScalar::NumericScalar; +}; + +struct ARROW_EXPORT UInt64Scalar : public NumericScalar { + using NumericScalar::NumericScalar; +}; + +struct ARROW_EXPORT HalfFloatScalar : public NumericScalar { + using NumericScalar::NumericScalar; +}; + +struct ARROW_EXPORT FloatScalar : public NumericScalar { + using NumericScalar::NumericScalar; +}; + +struct ARROW_EXPORT DoubleScalar : public NumericScalar { + using NumericScalar::NumericScalar; +}; + +struct ARROW_EXPORT BaseBinaryScalar + : public internal::PrimitiveScalarBase, + private internal::ArraySpanFillFromScalarScratchSpace { + using internal::PrimitiveScalarBase::PrimitiveScalarBase; + using ValueType = std::shared_ptr; + + std::shared_ptr value; + + const void* data() const override { + return value ? reinterpret_cast(value->data()) : NULLPTR; + } + void* mutable_data() override { + return value ? reinterpret_cast(value->mutable_data()) : NULLPTR; + } + std::string_view view() const override { + return value ? std::string_view(*value) : std::string_view(); + } + + BaseBinaryScalar(std::shared_ptr value, std::shared_ptr type) + : internal::PrimitiveScalarBase{std::move(type), true}, value(std::move(value)) {} + + friend ArraySpan; + BaseBinaryScalar(std::string s, std::shared_ptr type); +}; + +struct ARROW_EXPORT BinaryScalar : public BaseBinaryScalar { + using BaseBinaryScalar::BaseBinaryScalar; + using TypeClass = BinaryType; + + explicit BinaryScalar(std::shared_ptr value) + : BinaryScalar(std::move(value), binary()) {} + + explicit BinaryScalar(std::string s) : BaseBinaryScalar(std::move(s), binary()) {} + + BinaryScalar() : BinaryScalar(binary()) {} +}; + +struct ARROW_EXPORT StringScalar : public BinaryScalar { + using BinaryScalar::BinaryScalar; + using TypeClass = StringType; + + explicit StringScalar(std::shared_ptr value) + : StringScalar(std::move(value), utf8()) {} + + explicit StringScalar(std::string s) : BinaryScalar(std::move(s), utf8()) {} + + StringScalar() : StringScalar(utf8()) {} +}; + +struct ARROW_EXPORT BinaryViewScalar : public BaseBinaryScalar { + using BaseBinaryScalar::BaseBinaryScalar; + using TypeClass = BinaryViewType; + + explicit BinaryViewScalar(std::shared_ptr value) + : BinaryViewScalar(std::move(value), binary_view()) {} + + explicit BinaryViewScalar(std::string s) + : BaseBinaryScalar(std::move(s), binary_view()) {} + + BinaryViewScalar() : BinaryViewScalar(binary_view()) {} + + std::string_view view() const override { return std::string_view(*this->value); } +}; + +struct ARROW_EXPORT StringViewScalar : public BinaryViewScalar { + using BinaryViewScalar::BinaryViewScalar; + using TypeClass = StringViewType; + + explicit StringViewScalar(std::shared_ptr value) + : StringViewScalar(std::move(value), utf8_view()) {} + + explicit StringViewScalar(std::string s) + : BinaryViewScalar(std::move(s), utf8_view()) {} + + StringViewScalar() : StringViewScalar(utf8_view()) {} +}; + +struct ARROW_EXPORT LargeBinaryScalar : public BaseBinaryScalar { + using BaseBinaryScalar::BaseBinaryScalar; + using TypeClass = LargeBinaryType; + + LargeBinaryScalar(std::shared_ptr value, std::shared_ptr type) + : BaseBinaryScalar(std::move(value), std::move(type)) {} + + explicit LargeBinaryScalar(std::shared_ptr value) + : LargeBinaryScalar(std::move(value), large_binary()) {} + + explicit LargeBinaryScalar(std::string s) + : BaseBinaryScalar(std::move(s), large_binary()) {} + + LargeBinaryScalar() : LargeBinaryScalar(large_binary()) {} +}; + +struct ARROW_EXPORT LargeStringScalar : public LargeBinaryScalar { + using LargeBinaryScalar::LargeBinaryScalar; + using TypeClass = LargeStringType; + + explicit LargeStringScalar(std::shared_ptr value) + : LargeStringScalar(std::move(value), large_utf8()) {} + + explicit LargeStringScalar(std::string s) + : LargeBinaryScalar(std::move(s), large_utf8()) {} + + LargeStringScalar() : LargeStringScalar(large_utf8()) {} +}; + +struct ARROW_EXPORT FixedSizeBinaryScalar : public BinaryScalar { + using TypeClass = FixedSizeBinaryType; + + FixedSizeBinaryScalar(std::shared_ptr value, std::shared_ptr type, + bool is_valid = true); + + explicit FixedSizeBinaryScalar(const std::shared_ptr& value, + bool is_valid = true); + + explicit FixedSizeBinaryScalar(std::string s, bool is_valid = true); +}; + +template +struct TemporalScalar : internal::PrimitiveScalar { + using internal::PrimitiveScalar::PrimitiveScalar; + using ValueType = typename internal::PrimitiveScalar::ValueType; + + TemporalScalar(ValueType value, std::shared_ptr type) + : internal::PrimitiveScalar(std::move(value), type) {} +}; + +template +struct DateScalar : public TemporalScalar { + using TemporalScalar::TemporalScalar; + using ValueType = typename TemporalScalar::ValueType; + + explicit DateScalar(ValueType value) + : TemporalScalar(std::move(value), TypeTraits::type_singleton()) {} + DateScalar() : TemporalScalar(TypeTraits::type_singleton()) {} +}; + +struct ARROW_EXPORT Date32Scalar : public DateScalar { + using DateScalar::DateScalar; +}; + +struct ARROW_EXPORT Date64Scalar : public DateScalar { + using DateScalar::DateScalar; +}; + +template +struct ARROW_EXPORT TimeScalar : public TemporalScalar { + using TemporalScalar::TemporalScalar; + + TimeScalar(typename TemporalScalar::ValueType value, TimeUnit::type unit) + : TimeScalar(std::move(value), std::make_shared(unit)) {} +}; + +struct ARROW_EXPORT Time32Scalar : public TimeScalar { + using TimeScalar::TimeScalar; +}; + +struct ARROW_EXPORT Time64Scalar : public TimeScalar { + using TimeScalar::TimeScalar; +}; + +struct ARROW_EXPORT TimestampScalar : public TemporalScalar { + using TemporalScalar::TemporalScalar; + + TimestampScalar(typename TemporalScalar::ValueType value, + TimeUnit::type unit, std::string tz = "") + : TimestampScalar(std::move(value), timestamp(unit, std::move(tz))) {} + + static Result FromISO8601(std::string_view iso8601, + TimeUnit::type unit); +}; + +template +struct IntervalScalar : public TemporalScalar { + using TemporalScalar::TemporalScalar; + using ValueType = typename TemporalScalar::ValueType; + + explicit IntervalScalar(ValueType value) + : TemporalScalar(value, TypeTraits::type_singleton()) {} + IntervalScalar() : TemporalScalar(TypeTraits::type_singleton()) {} +}; + +struct ARROW_EXPORT MonthIntervalScalar : public IntervalScalar { + using IntervalScalar::IntervalScalar; +}; + +struct ARROW_EXPORT DayTimeIntervalScalar : public IntervalScalar { + using IntervalScalar::IntervalScalar; +}; + +struct ARROW_EXPORT MonthDayNanoIntervalScalar + : public IntervalScalar { + using IntervalScalar::IntervalScalar; +}; + +struct ARROW_EXPORT DurationScalar : public TemporalScalar { + using TemporalScalar::TemporalScalar; + + DurationScalar(typename TemporalScalar::ValueType value, + TimeUnit::type unit) + : DurationScalar(std::move(value), duration(unit)) {} + + // Convenience constructors for a DurationScalar from std::chrono::nanoseconds + template