diff --git a/.gitattributes b/.gitattributes index 429c5e02f18ff7c8c0b60ee80bb4c8415aeb40ad..2d202d418aa2a619cf9a0c7acc16499bec273cb3 100644 --- a/.gitattributes +++ b/.gitattributes @@ -72,3 +72,7 @@ llmeval-env/lib/python3.10/site-packages/triton/third_party/cuda/bin/nvdisasm fi llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_acero.so.1600 filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/pyarrow/_compute.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/pyarrow/_dataset.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_python.so filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/pyarrow/libparquet.so.1600 filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1600 filter=lfs diff=lfs merge=lfs -text diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..add68a230e7e072ed71d77af1229d3242ab07f7a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d69b13bb38af740d932b3333f923b1c11be3a42727fe327902cd8384dc6b3874 +size 1291200 diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/api.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/api.h new file mode 100644 index 0000000000000000000000000000000000000000..4af1835cd709d43e0abe3b39b46531cae9a047fc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/api.h @@ -0,0 +1,22 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/csv/options.h" +#include "arrow/csv/reader.h" +#include "arrow/csv/writer.h" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_builder.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..07279db313e92d2daeb93be12d0ab307d0c25201 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_builder.h @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace csv { + +class BlockParser; +struct ConvertOptions; + +class ARROW_EXPORT ColumnBuilder { + public: + virtual ~ColumnBuilder() = default; + + /// Spawn a task that will try to convert and append the given CSV block. + /// All calls to Append() should happen on the same thread, otherwise + /// call Insert() instead. + virtual void Append(const std::shared_ptr& parser) = 0; + + /// Spawn a task that will try to convert and insert the given CSV block + virtual void Insert(int64_t block_index, + const std::shared_ptr& parser) = 0; + + /// Return the final chunked array. The TaskGroup _must_ have finished! + virtual Result> Finish() = 0; + + std::shared_ptr task_group() { return task_group_; } + + /// Construct a strictly-typed ColumnBuilder. + static Result> Make( + MemoryPool* pool, const std::shared_ptr& type, int32_t col_index, + const ConvertOptions& options, + const std::shared_ptr& task_group); + + /// Construct a type-inferring ColumnBuilder. + static Result> Make( + MemoryPool* pool, int32_t col_index, const ConvertOptions& options, + const std::shared_ptr& task_group); + + /// Construct a ColumnBuilder for a column of nulls + /// (i.e. not present in the CSV file). + static Result> MakeNull( + MemoryPool* pool, const std::shared_ptr& type, + const std::shared_ptr& task_group); + + protected: + explicit ColumnBuilder(std::shared_ptr task_group) + : task_group_(std::move(task_group)) {} + + std::shared_ptr task_group_; +}; + +} // namespace csv +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_decoder.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_decoder.h new file mode 100644 index 0000000000000000000000000000000000000000..5fbbd5df58b1c588b88e16b68da50b9399211abc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/column_decoder.h @@ -0,0 +1,64 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace csv { + +class BlockParser; +struct ConvertOptions; + +class ARROW_EXPORT ColumnDecoder { + public: + virtual ~ColumnDecoder() = default; + + /// Spawn a task that will try to convert and insert the given CSV block + virtual Future> Decode( + const std::shared_ptr& parser) = 0; + + /// Construct a strictly-typed ColumnDecoder. + static Result> Make(MemoryPool* pool, + std::shared_ptr type, + int32_t col_index, + const ConvertOptions& options); + + /// Construct a type-inferring ColumnDecoder. + /// Inference will run only on the first block, the type will be frozen afterwards. + static Result> Make(MemoryPool* pool, int32_t col_index, + const ConvertOptions& options); + + /// Construct a ColumnDecoder for a column of nulls + /// (i.e. not present in the CSV file). + static Result> MakeNull(MemoryPool* pool, + std::shared_ptr type); + + protected: + ColumnDecoder() = default; +}; + +} // namespace csv +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/parser.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/parser.h new file mode 100644 index 0000000000000000000000000000000000000000..c73e52ce831ed95b4abe83084b483c15660bae7e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/parser.h @@ -0,0 +1,228 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/csv/options.h" +#include "arrow/csv/type_fwd.h" +#include "arrow/status.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class MemoryPool; + +namespace csv { + +/// Skip at most num_rows from the given input. The input pointer is updated +/// and the number of actually skipped rows is returns (may be less than +/// requested if the input is too short). +ARROW_EXPORT +int32_t SkipRows(const uint8_t* data, uint32_t size, int32_t num_rows, + const uint8_t** out_data); + +class BlockParserImpl; + +namespace detail { + +struct ParsedValueDesc { + uint32_t offset : 31; + bool quoted : 1; +}; + +class ARROW_EXPORT DataBatch { + public: + explicit DataBatch(int32_t num_cols) : num_cols_(num_cols) {} + + /// \brief Return the number of parsed rows (not skipped) + int32_t num_rows() const { return num_rows_; } + /// \brief Return the number of parsed columns + int32_t num_cols() const { return num_cols_; } + /// \brief Return the total size in bytes of parsed data + uint32_t num_bytes() const { return parsed_size_; } + /// \brief Return the number of skipped rows + int32_t num_skipped_rows() const { return static_cast(skipped_rows_.size()); } + + template + Status VisitColumn(int32_t col_index, int64_t first_row, Visitor&& visit) const { + using detail::ParsedValueDesc; + + int32_t batch_row = 0; + for (size_t buf_index = 0; buf_index < values_buffers_.size(); ++buf_index) { + const auto& values_buffer = values_buffers_[buf_index]; + const auto values = reinterpret_cast(values_buffer->data()); + const auto max_pos = + static_cast(values_buffer->size() / sizeof(ParsedValueDesc)) - 1; + for (int32_t pos = col_index; pos < max_pos; pos += num_cols_, ++batch_row) { + auto start = values[pos].offset; + auto stop = values[pos + 1].offset; + auto quoted = values[pos + 1].quoted; + Status status = visit(parsed_ + start, stop - start, quoted); + if (ARROW_PREDICT_FALSE(!status.ok())) { + return DecorateWithRowNumber(std::move(status), first_row, batch_row); + } + } + } + return Status::OK(); + } + + template + Status VisitLastRow(Visitor&& visit) const { + using detail::ParsedValueDesc; + + const auto& values_buffer = values_buffers_.back(); + const auto values = reinterpret_cast(values_buffer->data()); + const auto start_pos = + static_cast(values_buffer->size() / sizeof(ParsedValueDesc)) - + num_cols_ - 1; + for (int32_t col_index = 0; col_index < num_cols_; ++col_index) { + auto start = values[start_pos + col_index].offset; + auto stop = values[start_pos + col_index + 1].offset; + auto quoted = values[start_pos + col_index + 1].quoted; + ARROW_RETURN_NOT_OK(visit(parsed_ + start, stop - start, quoted)); + } + return Status::OK(); + } + + protected: + Status DecorateWithRowNumber(Status&& status, int64_t first_row, + int32_t batch_row) const { + if (first_row >= 0) { + // `skipped_rows_` is in ascending order by construction, so use bisection + // to find out how many rows were skipped before `batch_row`. + const auto skips_before = + std::upper_bound(skipped_rows_.begin(), skipped_rows_.end(), batch_row) - + skipped_rows_.begin(); + status = status.WithMessage("Row #", batch_row + skips_before + first_row, ": ", + status.message()); + } + // Use return_if so that when extra context is enabled it will be added + ARROW_RETURN_IF_(true, std::move(status), ARROW_STRINGIFY(status)); + return std::move(status); + } + + // The number of rows in this batch (not including any skipped ones) + int32_t num_rows_ = 0; + // The number of columns + int32_t num_cols_ = 0; + + // XXX should we ensure the parsed buffer is padded with 8 or 16 excess zero bytes? + // It may help with null parsing... + std::vector> values_buffers_; + std::shared_ptr parsed_buffer_; + const uint8_t* parsed_ = NULLPTR; + int32_t parsed_size_ = 0; + + // Record the current num_rows_ each time a row is skipped + std::vector skipped_rows_; + + friend class ::arrow::csv::BlockParserImpl; +}; + +} // namespace detail + +constexpr int32_t kMaxParserNumRows = 100000; + +/// \class BlockParser +/// \brief A reusable block-based parser for CSV data +/// +/// The parser takes a block of CSV data and delimits rows and fields, +/// unquoting and unescaping them on the fly. Parsed data is own by the +/// parser, so the original buffer can be discarded after Parse() returns. +/// +/// If the block is truncated (i.e. not all data can be parsed), it is up +/// to the caller to arrange the next block to start with the trailing data. +/// Also, if the previous block ends with CR (0x0d) and a new block starts +/// with LF (0x0a), the parser will consider the leading newline as an empty +/// line; the caller should therefore strip it. +class ARROW_EXPORT BlockParser { + public: + explicit BlockParser(ParseOptions options, int32_t num_cols = -1, + int64_t first_row = -1, int32_t max_num_rows = kMaxParserNumRows); + explicit BlockParser(MemoryPool* pool, ParseOptions options, int32_t num_cols = -1, + int64_t first_row = -1, int32_t max_num_rows = kMaxParserNumRows); + ~BlockParser(); + + /// \brief Parse a block of data + /// + /// Parse a block of CSV data, ingesting up to max_num_rows rows. + /// The number of bytes actually parsed is returned in out_size. + Status Parse(std::string_view data, uint32_t* out_size); + + /// \brief Parse sequential blocks of data + /// + /// Only the last block is allowed to be truncated. + Status Parse(const std::vector& data, uint32_t* out_size); + + /// \brief Parse the final block of data + /// + /// Like Parse(), but called with the final block in a file. + /// The last row may lack a trailing line separator. + Status ParseFinal(std::string_view data, uint32_t* out_size); + + /// \brief Parse the final sequential blocks of data + /// + /// Only the last block is allowed to be truncated. + Status ParseFinal(const std::vector& data, uint32_t* out_size); + + /// \brief Return the number of parsed rows + int32_t num_rows() const { return parsed_batch().num_rows(); } + /// \brief Return the number of parsed columns + int32_t num_cols() const { return parsed_batch().num_cols(); } + /// \brief Return the total size in bytes of parsed data + uint32_t num_bytes() const { return parsed_batch().num_bytes(); } + + /// \brief Return the total number of rows including rows which were skipped + int32_t total_num_rows() const { + return parsed_batch().num_rows() + parsed_batch().num_skipped_rows(); + } + + /// \brief Return the row number of the first row in the block or -1 if unsupported + int64_t first_row_num() const; + + /// \brief Visit parsed values in a column + /// + /// The signature of the visitor is + /// Status(const uint8_t* data, uint32_t size, bool quoted) + template + Status VisitColumn(int32_t col_index, Visitor&& visit) const { + return parsed_batch().VisitColumn(col_index, first_row_num(), + std::forward(visit)); + } + + template + Status VisitLastRow(Visitor&& visit) const { + return parsed_batch().VisitLastRow(std::forward(visit)); + } + + protected: + std::unique_ptr impl_; + + const detail::DataBatch& parsed_batch() const; +}; + +} // namespace csv +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/type_fwd.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..c0a53847a90ddb82067e0c9ac955cf4222c61742 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/csv/type_fwd.h @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +namespace arrow { +namespace csv { + +class TableReader; +struct ConvertOptions; +struct ReadOptions; +struct ParseOptions; +struct WriteOptions; + +} // namespace csv +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/api.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/api.h new file mode 100644 index 0000000000000000000000000000000000000000..6c94e13032307a7a954ce800fca99ca5a53fd15f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/api.h @@ -0,0 +1,22 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include "arrow/engine/substrait/api.h" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/pch.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/pch.h new file mode 100644 index 0000000000000000000000000000000000000000..ddb4c120f2a877ffb794b8443f8af1f7707d2cf6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/pch.h @@ -0,0 +1,23 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Often-used headers, for precompiling. +// If updating this header, please make sure you check compilation speed +// before checking in. Adding headers which are not used extremely often +// may incur a slowdown, since it makes the precompiled header heavier to load. + +#include "arrow/pch.h" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/api.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/api.h new file mode 100644 index 0000000000000000000000000000000000000000..8161f21712974ad6bb6a58ed451807e5a2e8e829 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/api.h @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include "arrow/engine/substrait/extension_set.h" +#include "arrow/engine/substrait/extension_types.h" +#include "arrow/engine/substrait/options.h" +#include "arrow/engine/substrait/relation.h" +#include "arrow/engine/substrait/serde.h" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/extension_set.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/extension_set.h new file mode 100644 index 0000000000000000000000000000000000000000..c18e0cf77aae5586665e5f8ad583ab5d3e2710ac --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/extension_set.h @@ -0,0 +1,481 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/compute/api_aggregate.h" +#include "arrow/compute/expression.h" +#include "arrow/engine/substrait/type_fwd.h" +#include "arrow/engine/substrait/visibility.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace engine { + +constexpr const char* kSubstraitArithmeticFunctionsUri = + "https://github.com/substrait-io/substrait/blob/main/extensions/" + "functions_arithmetic.yaml"; +constexpr const char* kSubstraitBooleanFunctionsUri = + "https://github.com/substrait-io/substrait/blob/main/extensions/" + "functions_boolean.yaml"; +constexpr const char* kSubstraitComparisonFunctionsUri = + "https://github.com/substrait-io/substrait/blob/main/extensions/" + "functions_comparison.yaml"; +constexpr const char* kSubstraitDatetimeFunctionsUri = + "https://github.com/substrait-io/substrait/blob/main/extensions/" + "functions_datetime.yaml"; +constexpr const char* kSubstraitLogarithmicFunctionsUri = + "https://github.com/substrait-io/substrait/blob/main/extensions/" + "functions_logarithmic.yaml"; +constexpr const char* kSubstraitRoundingFunctionsUri = + "https://github.com/substrait-io/substrait/blob/main/extensions/" + "functions_rounding.yaml"; +constexpr const char* kSubstraitStringFunctionsUri = + "https://github.com/substrait-io/substrait/blob/main/extensions/" + "functions_string.yaml"; +constexpr const char* kSubstraitAggregateGenericFunctionsUri = + "https://github.com/substrait-io/substrait/blob/main/extensions/" + "functions_aggregate_generic.yaml"; + +/// If a function call contains this URI then the function is looked up +/// in the registry directly, all arguments are mapped as value arguments, +/// and any options are ignored. +constexpr const char* kArrowSimpleExtensionFunctionsUri = + "urn:arrow:substrait_simple_extension_function"; + +struct ARROW_ENGINE_EXPORT Id { + std::string_view uri, name; + bool empty() const { return uri.empty() && name.empty(); } + std::string ToString() const; +}; +struct ARROW_ENGINE_EXPORT IdHashEq { + size_t operator()(Id id) const; + bool operator()(Id l, Id r) const; +}; + +/// \brief Owning storage for ids +/// +/// Substrait plans may reuse URIs and names in many places. For convenience +/// and performance Substrait ids are typically passed around as views. As we +/// convert a plan from Substrait to Arrow we need to copy these strings out of +/// the Substrait buffer and into owned storage. This class serves as that owned +/// storage. +class ARROW_ENGINE_EXPORT IdStorage { + public: + virtual ~IdStorage() = default; + /// \brief Get an equivalent id pointing into this storage + /// + /// This operation will copy the ids into storage if they do not already exist + virtual Id Emplace(Id id) = 0; + /// \brief Get an equivalent view pointing into this storage for a URI + /// + /// If no URI is found then the uri will be copied into storage + virtual std::string_view EmplaceUri(std::string_view uri) = 0; + /// \brief Get an equivalent id pointing into this storage + /// + /// If no id is found then nullopt will be returned + virtual std::optional Find(Id id) const = 0; + /// \brief Get an equivalent view pointing into this storage for a URI + /// + /// If no URI is found then nullopt will be returned + virtual std::optional FindUri(std::string_view uri) const = 0; + + static std::unique_ptr Make(); +}; + +/// \brief Describes a Substrait call +/// +/// Substrait call expressions contain a list of arguments which can either +/// be enum arguments (which are serialized as strings), value arguments (which) +/// are Arrow expressions, or type arguments (not yet implemented) +class ARROW_ENGINE_EXPORT SubstraitCall { + public: + SubstraitCall(Id id, std::shared_ptr output_type, bool output_nullable, + bool is_hash = false) + : id_(id), + output_type_(std::move(output_type)), + output_nullable_(output_nullable), + is_hash_(is_hash) {} + + const Id& id() const { return id_; } + const std::shared_ptr& output_type() const { return output_type_; } + bool output_nullable() const { return output_nullable_; } + bool is_hash() const { return is_hash_; } + const std::unordered_map>& options() const { + return options_; + } + + bool HasEnumArg(int index) const; + Result GetEnumArg(int index) const; + void SetEnumArg(int index, std::string enum_arg); + Result GetValueArg(int index) const; + bool HasValueArg(int index) const; + void SetValueArg(int index, compute::Expression value_arg); + std::optional const*> GetOption( + std::string_view option_name) const; + void SetOption(std::string_view option_name, + const std::vector& option_preferences); + bool HasOptions() const; + int size() const { return size_; } + + private: + Id id_; + std::shared_ptr output_type_; + bool output_nullable_; + // Only needed when converting from Substrait -> Arrow aggregates. The + // Arrow function name depends on whether or not there are any groups + bool is_hash_; + std::unordered_map enum_args_; + std::unordered_map value_args_; + std::unordered_map> options_; + int size_ = 0; +}; + +/// Substrait identifies functions and custom data types using a (uri, name) pair. +/// +/// This registry is a bidirectional mapping between Substrait IDs and their +/// corresponding Arrow counterparts (arrow::DataType and function names in a function +/// registry) +/// +/// Substrait extension types and variations must be registered with their +/// corresponding arrow::DataType before they can be used! +/// +/// Conceptually this can be thought of as two pairs of `unordered_map`s. One pair to +/// go back and forth between Substrait ID and arrow::DataType and another pair to go +/// back and forth between Substrait ID and Arrow function names. +/// +/// Unlike an ExtensionSet this registry is not created automatically when consuming +/// Substrait plans and must be configured ahead of time (although there is a default +/// instance). +class ARROW_ENGINE_EXPORT ExtensionIdRegistry { + public: + using ArrowToSubstraitCall = + std::function(const arrow::compute::Expression::Call&)>; + using SubstraitCallToArrow = + std::function(const SubstraitCall&)>; + using ArrowToSubstraitAggregate = + std::function(const arrow::compute::Aggregate&)>; + using SubstraitAggregateToArrow = + std::function(const SubstraitCall&)>; + + /// \brief A mapping between a Substrait ID and an arrow::DataType + struct TypeRecord { + Id id; + const std::shared_ptr& type; + }; + + /// \brief Return a uri view owned by this registry + /// + /// If the URI has never been emplaced it will return nullopt + virtual std::optional FindUri(std::string_view uri) const = 0; + /// \brief Return a id view owned by this registry + /// + /// If the id has never been emplaced it will return nullopt + virtual std::optional FindId(Id id) const = 0; + virtual std::optional GetType(const DataType&) const = 0; + virtual std::optional GetType(Id) const = 0; + virtual Status CanRegisterType(Id, const std::shared_ptr& type) const = 0; + virtual Status RegisterType(Id, std::shared_ptr) = 0; + /// \brief Register a converter that converts an Arrow call to a Substrait call + /// + /// Note that there may not be 1:1 parity between ArrowToSubstraitCall and + /// SubstraitCallToArrow because some standard functions (e.g. add) may map to + /// multiple Arrow functions (e.g. add, add_checked) + virtual Status AddArrowToSubstraitCall(std::string arrow_function_name, + ArrowToSubstraitCall conversion_func) = 0; + /// \brief Check to see if a converter can be registered + /// + /// \return Status::OK if there are no conflicts, otherwise an error is returned + virtual Status CanAddArrowToSubstraitCall( + const std::string& arrow_function_name) const = 0; + + /// \brief Register a converter that converts an Arrow aggregate to a Substrait + /// aggregate + virtual Status AddArrowToSubstraitAggregate( + std::string arrow_function_name, ArrowToSubstraitAggregate conversion_func) = 0; + /// \brief Check to see if a converter can be registered + /// + /// \return Status::OK if there are no conflicts, otherwise an error is returned + virtual Status CanAddArrowToSubstraitAggregate( + const std::string& arrow_function_name) const = 0; + + /// \brief Register a converter that converts a Substrait call to an Arrow call + virtual Status AddSubstraitCallToArrow(Id substrait_function_id, + SubstraitCallToArrow conversion_func) = 0; + /// \brief Check to see if a converter can be registered + /// + /// \return Status::OK if there are no conflicts, otherwise an error is returned + virtual Status CanAddSubstraitCallToArrow(Id substrait_function_id) const = 0; + /// \brief Register a simple mapping function + /// + /// All calls to the function must pass only value arguments. The arguments + /// will be converted to expressions and passed to the Arrow function + virtual Status AddSubstraitCallToArrow(Id substrait_function_id, + std::string arrow_function_name) = 0; + + /// \brief Register a converter that converts a Substrait aggregate to an Arrow + /// aggregate + virtual Status AddSubstraitAggregateToArrow( + Id substrait_function_id, SubstraitAggregateToArrow conversion_func) = 0; + /// \brief Check to see if a converter can be registered + /// + /// \return Status::OK if there are no conflicts, otherwise an error is returned + virtual Status CanAddSubstraitAggregateToArrow(Id substrait_function_id) const = 0; + + /// \brief Return a list of Substrait functions that have a converter + /// + /// The function ids are encoded as strings using the pattern {uri}#{name} + virtual std::vector GetSupportedSubstraitFunctions() const = 0; + + /// \brief Find a converter to map Arrow calls to Substrait calls + /// \return A converter function or an invalid status if no converter is registered + virtual Result GetArrowToSubstraitCall( + const std::string& arrow_function_name) const = 0; + + /// \brief Find a converter to map Arrow aggregates to Substrait aggregates + /// \return A converter function or an invalid status if no converter is registered + virtual Result GetArrowToSubstraitAggregate( + const std::string& arrow_function_name) const = 0; + + /// \brief Find a converter to map a Substrait aggregate to an Arrow aggregate + /// \return A converter function or an invalid status if no converter is registered + virtual Result GetSubstraitAggregateToArrow( + Id substrait_function_id) const = 0; + + /// \brief Find a converter to map a Substrait call to an Arrow call + /// \return A converter function or an invalid status if no converter is registered + virtual Result GetSubstraitCallToArrow( + Id substrait_function_id) const = 0; + + /// \brief Similar to \see GetSubstraitCallToArrow but only uses the name + /// + /// There may be multiple functions with the same name and this will return + /// the first. This is slower than GetSubstraitCallToArrow and should only + /// be used when the plan does not include a URI (or the URI is "/") + virtual Result GetSubstraitCallToArrowFallback( + std::string_view function_name) const = 0; + + /// \brief Similar to \see GetSubstraitAggregateToArrow but only uses the name + /// + /// \see GetSubstraitCallToArrowFallback for details on the fallback behavior + virtual Result GetSubstraitAggregateToArrowFallback( + std::string_view function_name) const = 0; +}; + +constexpr std::string_view kArrowExtTypesUri = + "https://github.com/apache/arrow/blob/main/format/substrait/" + "extension_types.yaml"; +// Extension types that don't match 1:1 with a data type (or the data type is +// parameterized) +constexpr std::string_view kTimeNanosTypeName = "time_nanos"; +constexpr Id kTimeNanosId = {kArrowExtTypesUri, kTimeNanosTypeName}; + +/// A default registry with all supported functions and data types registered +/// +/// Note: Function support is currently very minimal, see ARROW-15538 +ARROW_ENGINE_EXPORT ExtensionIdRegistry* default_extension_id_registry(); + +/// \brief Make a nested registry with a given parent. +/// +/// A nested registry supports registering types and functions other and on top of those +/// already registered in its parent registry. No conflicts in IDs and names used for +/// lookup are allowed. Normally, the given parent is the default registry. +/// +/// One use case for a nested registry is for dynamic registration of functions defined +/// within a Substrait plan while keeping these registrations specific to the plan. When +/// the Substrait plan is disposed of, normally after its execution, the nested registry +/// can be disposed of as well. +ARROW_ENGINE_EXPORT std::shared_ptr nested_extension_id_registry( + const ExtensionIdRegistry* parent); + +/// \brief A set of extensions used within a plan +/// +/// Each time an extension is used within a Substrait plan the extension +/// must be included in an extension set that is defined at the root of the +/// plan. +/// +/// The plan refers to a specific extension using an "anchor" which is an +/// arbitrary integer invented by the producer that has no meaning beyond a +/// plan but which should be consistent within a plan. +/// +/// To support serialization and deserialization this type serves as a +/// bidirectional map between Substrait ID and "anchor"s. +/// +/// When deserializing a Substrait plan the extension set should be extracted +/// after the plan has been converted from Protobuf and before the plan +/// is converted to an execution plan. +/// +/// The extension set can be kept and reused during serialization if a perfect +/// round trip is required. If serialization is not needed or round tripping +/// is not required then the extension set can be safely discarded after the +/// plan has been converted into an execution plan. +/// +/// When converting an execution plan into a Substrait plan an extension set +/// can be automatically generated or a previously generated extension set can +/// be used. +/// +/// ExtensionSet does not own strings; it only refers to strings in an +/// ExtensionIdRegistry. +class ARROW_ENGINE_EXPORT ExtensionSet { + public: + struct FunctionRecord { + Id id; + std::string_view name; + }; + + struct TypeRecord { + Id id; + std::shared_ptr type; + }; + + /// Construct an empty ExtensionSet to be populated during serialization. + explicit ExtensionSet(const ExtensionIdRegistry* = default_extension_id_registry()); + ARROW_DEFAULT_MOVE_AND_ASSIGN(ExtensionSet); + + /// Construct an ExtensionSet with explicit extension ids for efficient referencing + /// during deserialization. Note that input vectors need not be densely packed; an empty + /// (default constructed) Id may be used as a placeholder to indicate an unused + /// _anchor/_reference. This factory will be used to wrap the extensions declared in a + /// substrait::Plan before deserializing the plan's relations. + /// + /// Views will be replaced with equivalent views pointing to memory owned by the + /// registry. + /// + /// Note: This is an advanced operation. The order of the ids, types, and functions + /// must match the anchor numbers chosen for a plan. + /// + /// An extension set should instead be created using + /// arrow::engine::GetExtensionSetFromPlan + static Result Make( + std::unordered_map uris, + std::unordered_map type_ids, + std::unordered_map function_ids, + const ConversionOptions& conversion_options, + const ExtensionIdRegistry* = default_extension_id_registry()); + + const std::unordered_map& uris() const { return uris_; } + + /// \brief Returns a data type given an anchor + /// + /// This is used when converting a Substrait plan to an Arrow execution plan. + /// + /// If the anchor does not exist in this extension set an error will be returned. + Result DecodeType(uint32_t anchor) const; + + /// \brief Returns the number of custom type records in this extension set + /// + /// Note: the types are currently stored as a sparse vector, so this may return a value + /// larger than the actual number of types. This behavior may change in the future; see + /// ARROW-15583. + std::size_t num_types() const { return types_.size(); } + + /// \brief Lookup the anchor for a given type + /// + /// This operation is used when converting an Arrow execution plan to a Substrait plan. + /// If the type has been previously encoded then the same anchor value will returned. + /// + /// If the type has not been previously encoded then a new anchor value will be created. + /// + /// If the type does not exist in the extension id registry then an error will be + /// returned. + /// + /// \return An anchor that can be used to refer to the type within a plan + Result EncodeType(const DataType& type); + + /// \brief Return a function id given an anchor + /// + /// This is used when converting a Substrait plan to an Arrow execution plan. + /// + /// If the anchor does not exist in this extension set an error will be returned. + Result DecodeFunction(uint32_t anchor) const; + + /// \brief Lookup the anchor for a given function + /// + /// This operation is used when converting an Arrow execution plan to a Substrait plan. + /// If the function has been previously encoded then the same anchor value will be + /// returned. + /// + /// If the function has not been previously encoded then a new anchor value will be + /// created. + /// + /// If the function name is not in the extension id registry then an error will be + /// returned. + /// + /// \return An anchor that can be used to refer to the function within a plan + Result EncodeFunction(Id function_id); + + /// \brief Stores a plan-specific id that is not known to the registry + /// + /// This is used when converting an Arrow execution plan to a Substrait plan. + /// + /// If the function is a UDF, something that wasn't known to the registry, + /// then we need long term storage of the function name (the ids are just + /// views) + Id RegisterPlanSpecificId(Id id); + + /// \brief Return the number of custom functions in this extension set + std::size_t num_functions() const { return functions_.size(); } + + const ExtensionIdRegistry* registry() const { return registry_; } + + private: + const ExtensionIdRegistry* registry_; + // If the registry is not aware of an id then we probably can't do anything + // with it. However, in some cases, these may represent extensions or features + // that we can safely ignore. For example, we can usually safely ignore + // extension type variations if we assume the plan is valid. These ignorable + // ids are stored here. + std::unique_ptr plan_specific_ids_ = IdStorage::Make(); + + // Map from anchor values to URI values referenced by this extension set + std::unordered_map uris_; + // Map from anchor values to type definitions, used during Substrait->Arrow + // and populated from the Substrait extension set + std::unordered_map types_; + // Map from anchor values to function ids, used during Substrait->Arrow + // and populated from the Substrait extension set + std::unordered_map functions_; + // Map from type names to anchor values. Used during Arrow->Substrait + // and built as the plan is created. + std::unordered_map types_map_; + // Map from function names to anchor values. Used during Arrow->Substrait + // and built as the plan is created. + std::unordered_map functions_map_; + + Status CheckHasUri(std::string_view uri); + void AddUri(std::pair uri); + Status AddUri(Id id); +}; + +} // namespace engine +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/extension_types.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/extension_types.h new file mode 100644 index 0000000000000000000000000000000000000000..ae71ad83f7e5425adeae28d88d031667fe2ce9ce --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/extension_types.h @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include +#include + +#include "arrow/engine/substrait/visibility.h" +#include "arrow/type_fwd.h" + +namespace arrow { +namespace engine { + +// arrow::ExtensionTypes are provided to wrap uuid, fixed_char, varchar, interval_year, +// and interval_day which are first-class types in substrait but do not appear in +// the arrow type system. +// +// Note that these are not automatically registered with arrow::RegisterExtensionType(), +// which means among other things that serialization of these types to IPC would fail. + +/// fixed_size_binary(16) for storing Universally Unique IDentifiers +ARROW_ENGINE_EXPORT +std::shared_ptr uuid(); + +/// fixed_size_binary(length) constrained to contain only valid UTF-8 +ARROW_ENGINE_EXPORT +std::shared_ptr fixed_char(int32_t length); + +/// utf8() constrained to be shorter than `length` +ARROW_ENGINE_EXPORT +std::shared_ptr varchar(int32_t length); + +/// fixed_size_list(int32(), 2) storing a number of [years, months] +ARROW_ENGINE_EXPORT +std::shared_ptr interval_year(); + +/// fixed_size_list(int32(), 2) storing a number of [days, seconds] +ARROW_ENGINE_EXPORT +std::shared_ptr interval_day(); + +/// constructs the appropriate timestamp type given the precision +/// no time zone +ARROW_ENGINE_EXPORT +Result> precision_timestamp(int precision); + +/// constructs the appropriate timestamp type given the precision +/// and the UTC time zone +ARROW_ENGINE_EXPORT +Result> precision_timestamp_tz(int precision); + +/// Return true if t is Uuid, otherwise false +ARROW_ENGINE_EXPORT +bool UnwrapUuid(const DataType&); + +/// Return FixedChar length if t is FixedChar, otherwise nullopt +ARROW_ENGINE_EXPORT +std::optional UnwrapFixedChar(const DataType&); + +/// Return Varchar (max) length if t is VarChar, otherwise nullopt +ARROW_ENGINE_EXPORT +std::optional UnwrapVarChar(const DataType& t); + +/// Return true if t is IntervalYear, otherwise false +ARROW_ENGINE_EXPORT +bool UnwrapIntervalYear(const DataType&); + +/// Return true if t is IntervalDay, otherwise false +ARROW_ENGINE_EXPORT +bool UnwrapIntervalDay(const DataType&); + +} // namespace engine +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/options.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/options.h new file mode 100644 index 0000000000000000000000000000000000000000..1e6f6efb2c751a97e3f0cd9de3eb55c0bb87772c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/options.h @@ -0,0 +1,135 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include +#include + +#include "arrow/acero/exec_plan.h" +#include "arrow/acero/options.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/engine/substrait/type_fwd.h" +#include "arrow/engine/substrait/visibility.h" +#include "arrow/type_fwd.h" + +namespace arrow { +namespace engine { + +/// How strictly to adhere to the input structure when converting between Substrait and +/// Acero representations of a plan. This allows the user to trade conversion accuracy +/// for performance and lenience. +enum class ARROW_ENGINE_EXPORT ConversionStrictness { + /// When a primitive is used at the input that doesn't have an exact match at the + /// output, reject the conversion. This effectively asserts that there is no (known) + /// information loss in the conversion, and that plans should either round-trip back and + /// forth exactly or not at all. This option is primarily intended for testing and + /// debugging. + EXACT_ROUNDTRIP, + + /// When a primitive is used at the input that doesn't have an exact match at the + /// output, attempt to model it with some collection of primitives at the output. This + /// means that even if the incoming plan is completely optimal by some metric, the + /// returned plan is fairly likely to not be optimal anymore, and round-trips back and + /// forth may make the plan increasingly suboptimal. However, every primitive at the + /// output can be (manually) traced back to exactly one primitive at the input, which + /// may be useful when debugging. + PRESERVE_STRUCTURE, + + /// Behaves like PRESERVE_STRUCTURE, but prefers performance over structural accuracy. + /// Basic optimizations *may* be applied, in order to attempt to not regress in terms of + /// plan performance: if the incoming plan was already aggressively optimized, the goal + /// is for the output plan to not be less performant. In practical use cases, this is + /// probably the option you want. + /// + /// Note that no guarantees are made on top of PRESERVE_STRUCTURE. Past and future + /// versions of Arrow may even ignore this option entirely and treat it exactly like + /// PRESERVE_STRUCTURE. + BEST_EFFORT, +}; + +using NamedTableProvider = std::function( + const std::vector&, const Schema&)>; +static NamedTableProvider kDefaultNamedTableProvider; + +using NamedTapProvider = std::function( + const std::string&, std::vector, const std::string&, + std::shared_ptr)>; + +class ARROW_ENGINE_EXPORT ExtensionDetails { + public: + virtual ~ExtensionDetails() = default; +}; + +class ARROW_ENGINE_EXPORT ExtensionProvider { + public: + virtual ~ExtensionProvider() = default; + virtual Result MakeRel(const ConversionOptions& conv_opts, + const std::vector& inputs, + const ExtensionDetails& ext_details, + const ExtensionSet& ext_set) = 0; +}; + +/// \brief Get the default extension provider +ARROW_ENGINE_EXPORT std::shared_ptr default_extension_provider(); +/// \brief Set the default extension provider +/// +/// \param[in] provider the new provider to be set as default +ARROW_ENGINE_EXPORT void set_default_extension_provider( + const std::shared_ptr& provider); + +ARROW_ENGINE_EXPORT NamedTapProvider default_named_tap_provider(); + +ARROW_ENGINE_EXPORT void set_default_named_tap_provider(NamedTapProvider provider); + +/// Options that control the conversion between Substrait and Acero representations of a +/// plan. +struct ARROW_ENGINE_EXPORT ConversionOptions { + ConversionOptions() + : strictness(ConversionStrictness::BEST_EFFORT), + named_table_provider(kDefaultNamedTableProvider), + named_tap_provider(default_named_tap_provider()), + extension_provider(default_extension_provider()), + allow_arrow_extensions(false) {} + + /// \brief How strictly the converter should adhere to the structure of the input. + ConversionStrictness strictness; + /// \brief A custom strategy to be used for providing named tables + /// + /// The default behavior will return an invalid status if the plan has any + /// named table relations. + NamedTableProvider named_table_provider; + /// \brief A custom strategy to be used for obtaining a tap declaration + /// + /// The default provider returns an error + NamedTapProvider named_tap_provider; + /// \brief A custom strategy to be used for providing relation infos. + /// + /// The default behavior will provide for relations known to Arrow. + std::shared_ptr extension_provider; + /// \brief If true then Arrow-specific types and functions will be allowed + /// + /// Set to false to create plans that are more likely to be compatible with non-Arrow + /// engines + bool allow_arrow_extensions; +}; + +} // namespace engine +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/relation.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/relation.h new file mode 100644 index 0000000000000000000000000000000000000000..d0913b9ae029bf790fe1d348eb82911f8a912079 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/relation.h @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/acero/exec_plan.h" +#include "arrow/compute/api_aggregate.h" +#include "arrow/engine/substrait/visibility.h" +#include "arrow/type_fwd.h" + +namespace arrow { +namespace engine { + +/// Execution information resulting from converting a Substrait relation. +struct ARROW_ENGINE_EXPORT DeclarationInfo { + /// The compute declaration produced thus far. + acero::Declaration declaration; + + std::shared_ptr output_schema; +}; + +/// Information resulting from converting a Substrait plan +struct ARROW_ENGINE_EXPORT PlanInfo { + /// The root declaration. + /// + /// Only plans containing a single top-level relation are supported and so this will + /// represent that relation. + /// + /// This should technically be a RelRoot but some producers use a simple Rel here and so + /// Acero currently supports that case. + DeclarationInfo root; + /// The names of the output fields + /// + /// If `root` was created from a simple Rel then this will be empty + std::vector names; +}; + +/// An expression whose output has a name +struct ARROW_ENGINE_EXPORT NamedExpression { + /// An expression + compute::Expression expression; + // An optional name to assign to the output, may be the empty string + std::string name; +}; + +/// A collection of expressions bound to a common schema +struct ARROW_ENGINE_EXPORT BoundExpressions { + /// The expressions + std::vector named_expressions; + /// The schema that all the expressions are bound to + std::shared_ptr schema; +}; + +} // namespace engine +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/serde.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/serde.h new file mode 100644 index 0000000000000000000000000000000000000000..ab749f4a64b0513a1838c8e049c2abcd24181016 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/serde.h @@ -0,0 +1,331 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/compute/type_fwd.h" +#include "arrow/dataset/type_fwd.h" +#include "arrow/engine/substrait/options.h" +#include "arrow/engine/substrait/relation.h" +#include "arrow/engine/substrait/type_fwd.h" +#include "arrow/engine/substrait/visibility.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace engine { + +/// \brief Serialize an Acero Plan to a binary protobuf Substrait message +/// +/// \param[in] declaration the Acero declaration to serialize. +/// This declaration is the sink relation of the Acero plan. +/// \param[in,out] ext_set the extension mapping to use; may be updated to add +/// \param[in] conversion_options options to control how the conversion is done +/// +/// \return a buffer containing the protobuf serialization of the Acero relation +ARROW_ENGINE_EXPORT +Result> SerializePlan( + const acero::Declaration& declaration, ExtensionSet* ext_set, + const ConversionOptions& conversion_options = {}); + +/// \brief Serialize expressions to a Substrait message +/// +/// \param[in] bound_expressions the expressions to serialize. +/// \param[in] conversion_options options to control how the conversion is done +/// \param[in,out] ext_set the extension mapping to use, optional, only needed +/// if you want to control the value of function anchors +/// to mirror a previous serialization / deserialization. +/// Will be updated if new functions are encountered +ARROW_ENGINE_EXPORT +Result> SerializeExpressions( + const BoundExpressions& bound_expressions, + const ConversionOptions& conversion_options = {}, ExtensionSet* ext_set = NULLPTR); + +/// Factory function type for generating the node that consumes the batches produced by +/// each toplevel Substrait relation when deserializing a Substrait Plan. +using ConsumerFactory = std::function()>; + +/// \brief Deserializes a Substrait Plan message to a list of ExecNode declarations +/// +/// The output of each top-level Substrait relation will be sent to a caller supplied +/// consumer function provided by consumer_factory +/// +/// \param[in] buf a buffer containing the protobuf serialization of a Substrait Plan +/// message +/// \param[in] consumer_factory factory function for generating the node that consumes +/// the batches produced by each toplevel Substrait relation +/// \param[in] registry an extension-id-registry to use, or null for the default one. +/// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait +/// Plan is returned here. +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return a vector of ExecNode declarations, one for each toplevel relation in the +/// Substrait Plan +ARROW_ENGINE_EXPORT Result> DeserializePlans( + const Buffer& buf, const ConsumerFactory& consumer_factory, + const ExtensionIdRegistry* registry = NULLPTR, ExtensionSet* ext_set_out = NULLPTR, + const ConversionOptions& conversion_options = {}); + +/// \brief Deserializes a single-relation Substrait Plan message to an execution plan +/// +/// The output of each top-level Substrait relation will be sent to a caller supplied +/// consumer function provided by consumer_factory +/// +/// \param[in] buf a buffer containing the protobuf serialization of a Substrait Plan +/// message +/// \param[in] consumer node that consumes the batches produced by each toplevel Substrait +/// relation +/// \param[in] registry an extension-id-registry to use, or null for the default one. +/// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait +/// \param[in] conversion_options options to control how the conversion is to be done. +/// Plan is returned here. +/// \return an ExecPlan for the Substrait Plan +ARROW_ENGINE_EXPORT Result> DeserializePlan( + const Buffer& buf, const std::shared_ptr& consumer, + const ExtensionIdRegistry* registry = NULLPTR, ExtensionSet* ext_set_out = NULLPTR, + const ConversionOptions& conversion_options = {}); + +/// Factory function type for generating the write options of a node consuming the batches +/// produced by each toplevel Substrait relation when deserializing a Substrait Plan. +using WriteOptionsFactory = std::function()>; + +/// \brief Deserializes a Substrait Plan message to a list of ExecNode declarations +/// +/// The output of each top-level Substrait relation will be written to a filesystem. +/// `write_options_factory` can be used to control write behavior. +/// +/// \param[in] buf a buffer containing the protobuf serialization of a Substrait Plan +/// message +/// \param[in] write_options_factory factory function for generating the write options of +/// a node consuming the batches produced by each toplevel Substrait relation +/// \param[in] registry an extension-id-registry to use, or null for the default one. +/// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait +/// Plan is returned here. +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return a vector of ExecNode declarations, one for each toplevel relation in the +/// Substrait Plan +ARROW_ENGINE_EXPORT Result> DeserializePlans( + const Buffer& buf, const WriteOptionsFactory& write_options_factory, + const ExtensionIdRegistry* registry = NULLPTR, ExtensionSet* ext_set_out = NULLPTR, + const ConversionOptions& conversion_options = {}); + +/// \brief Deserializes a single-relation Substrait Plan message to an execution plan +/// +/// The output of the single Substrait relation will be written to a filesystem. +/// `write_options_factory` can be used to control write behavior. +/// +/// \param[in] buf a buffer containing the protobuf serialization of a Substrait Plan +/// message +/// \param[in] write_options write options of a node consuming the batches produced by +/// each toplevel Substrait relation +/// \param[in] registry an extension-id-registry to use, or null for the default one. +/// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait +/// Plan is returned here. +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return an ExecPlan for the Substrait Plan +ARROW_ENGINE_EXPORT Result> DeserializePlan( + const Buffer& buf, const std::shared_ptr& write_options, + const ExtensionIdRegistry* registry = NULLPTR, ExtensionSet* ext_set_out = NULLPTR, + const ConversionOptions& conversion_options = {}); + +/// \brief Deserializes a Substrait Plan message to a Declaration +/// +/// The plan will not contain any sink nodes and will be suitable for use in any +/// of the arrow::compute::DeclarationToXyz methods. +/// +/// \param[in] buf a buffer containing the protobuf serialization of a Substrait Plan +/// message +/// \param[in] registry an extension-id-registry to use, or null for the default one. +/// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait +/// Plan is returned here. +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return A declaration representing the Substrait plan +ARROW_ENGINE_EXPORT Result DeserializePlan( + const Buffer& buf, const ExtensionIdRegistry* registry = NULLPTR, + ExtensionSet* ext_set_out = NULLPTR, + const ConversionOptions& conversion_options = {}); + +/// \brief Deserialize a Substrait ExtendedExpression message to the corresponding Arrow +/// type +/// +/// \param[in] buf a buffer containing the protobuf serialization of a collection of bound +/// expressions +/// \param[in] registry an extension-id-registry to use, or null for the default one +/// \param[in] conversion_options options to control how the conversion is done +/// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait +/// message is returned here. +/// \return A collection of expressions and a common input schema they are bound to +ARROW_ENGINE_EXPORT Result DeserializeExpressions( + const Buffer& buf, const ExtensionIdRegistry* registry = NULLPTR, + const ConversionOptions& conversion_options = {}, + ExtensionSet* ext_set_out = NULLPTR); + +/// \brief Deserializes a Substrait Type message to the corresponding Arrow type +/// +/// \param[in] buf a buffer containing the protobuf serialization of a Substrait Type +/// message +/// \param[in] ext_set the extension mapping to use, normally provided by the +/// surrounding Plan message +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return the corresponding Arrow data type +ARROW_ENGINE_EXPORT +Result> DeserializeType( + const Buffer& buf, const ExtensionSet& ext_set, + const ConversionOptions& conversion_options = {}); + +/// \brief Serializes an Arrow type to a Substrait Type message +/// +/// \param[in] type the Arrow data type to serialize +/// \param[in,out] ext_set the extension mapping to use; may be updated to add a +/// mapping for the given type +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return a buffer containing the protobuf serialization of the corresponding Substrait +/// Type message +ARROW_ENGINE_EXPORT +Result> SerializeType( + const DataType& type, ExtensionSet* ext_set, + const ConversionOptions& conversion_options = {}); + +/// \brief Deserializes a Substrait NamedStruct message to an Arrow schema +/// +/// \param[in] buf a buffer containing the protobuf serialization of a Substrait +/// NamedStruct message +/// \param[in] ext_set the extension mapping to use, normally provided by the +/// surrounding Plan message +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return the corresponding Arrow schema +ARROW_ENGINE_EXPORT +Result> DeserializeSchema( + const Buffer& buf, const ExtensionSet& ext_set, + const ConversionOptions& conversion_options = {}); + +/// \brief Serializes an Arrow schema to a Substrait NamedStruct message +/// +/// \param[in] schema the Arrow schema to serialize +/// \param[in,out] ext_set the extension mapping to use; may be updated to add +/// mappings for the types used in the schema +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return a buffer containing the protobuf serialization of the corresponding Substrait +/// NamedStruct message +ARROW_ENGINE_EXPORT +Result> SerializeSchema( + const Schema& schema, ExtensionSet* ext_set, + const ConversionOptions& conversion_options = {}); + +/// \brief Deserializes a Substrait Expression message to a compute expression +/// +/// \param[in] buf a buffer containing the protobuf serialization of a Substrait +/// Expression message +/// \param[in] ext_set the extension mapping to use, normally provided by the +/// surrounding Plan message +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return the corresponding Arrow compute expression +ARROW_ENGINE_EXPORT +Result DeserializeExpression( + const Buffer& buf, const ExtensionSet& ext_set, + const ConversionOptions& conversion_options = {}); + +/// \brief Serializes an Arrow compute expression to a Substrait Expression message +/// +/// \param[in] expr the Arrow compute expression to serialize +/// \param[in,out] ext_set the extension mapping to use; may be updated to add +/// mappings for the types used in the expression +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return a buffer containing the protobuf serialization of the corresponding Substrait +/// Expression message +ARROW_ENGINE_EXPORT +Result> SerializeExpression( + const compute::Expression& expr, ExtensionSet* ext_set, + const ConversionOptions& conversion_options = {}); + +/// \brief Serialize an Acero Declaration to a binary protobuf Substrait message +/// +/// \param[in] declaration the Acero declaration to serialize +/// \param[in,out] ext_set the extension mapping to use; may be updated to add +/// \param[in] conversion_options options to control how the conversion is done +/// +/// \return a buffer containing the protobuf serialization of the Acero relation +ARROW_ENGINE_EXPORT Result> SerializeRelation( + const acero::Declaration& declaration, ExtensionSet* ext_set, + const ConversionOptions& conversion_options = {}); + +/// \brief Deserializes a Substrait Rel (relation) message to an ExecNode declaration +/// +/// \param[in] buf a buffer containing the protobuf serialization of a Substrait +/// Rel message +/// \param[in] ext_set the extension mapping to use, normally provided by the +/// surrounding Plan message +/// \param[in] conversion_options options to control how the conversion is to be done. +/// \return the corresponding ExecNode declaration +ARROW_ENGINE_EXPORT Result DeserializeRelation( + const Buffer& buf, const ExtensionSet& ext_set, + const ConversionOptions& conversion_options = {}); + +namespace internal { + +/// \brief Checks whether two protobuf serializations of a particular Substrait message +/// type are equivalent +/// +/// Note that a binary comparison of the two buffers is insufficient. One reason for this +/// is that the fields of a message can be specified in any order in the serialization. +/// +/// \param[in] message_name the name of the Substrait message type to check +/// \param[in] l_buf buffer containing the first protobuf serialization to compare +/// \param[in] r_buf buffer containing the second protobuf serialization to compare +/// \return success if equivalent, failure if not +ARROW_ENGINE_EXPORT +Status CheckMessagesEquivalent(std::string_view message_name, const Buffer& l_buf, + const Buffer& r_buf); + +/// \brief Utility function to convert a JSON serialization of a Substrait message to +/// its binary serialization +/// +/// \param[in] type_name the name of the Substrait message type to convert +/// \param[in] json the JSON string to convert +/// \param[in] ignore_unknown_fields if true then unknown fields will be ignored and +/// will not cause an error +/// +/// This should generally be true to allow consumption of plans from newer +/// producers but setting to false can be useful if you are testing +/// conformance to a specific Substrait version +/// \return a buffer filled with the binary protobuf serialization of message +ARROW_ENGINE_EXPORT +Result> SubstraitFromJSON(std::string_view type_name, + std::string_view json, + bool ignore_unknown_fields = true); + +/// \brief Utility function to convert a binary protobuf serialization of a Substrait +/// message to JSON +/// +/// \param[in] type_name the name of the Substrait message type to convert +/// \param[in] buf the buffer containing the binary protobuf serialization of the message +/// \return a JSON string representing the message +ARROW_ENGINE_EXPORT +Result SubstraitToJSON(std::string_view type_name, const Buffer& buf); + +} // namespace internal +} // namespace engine +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/test_plan_builder.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/test_plan_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..94c03daaa7a6957a2f8d5db77b7def1f8394d301 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/test_plan_builder.h @@ -0,0 +1,76 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// These utilities are for internal / unit test use only. +// They allow for the construction of simple Substrait plans +// programmatically without first requiring the construction +// of an ExecPlan + +// These utilities have to be here, and not in a test_util.cc +// file (or in a unit test) because only one .so is allowed +// to include each .pb.h file or else protobuf will encounter +// global namespace conflicts. + +#include +#include +#include +#include + +#include "arrow/engine/substrait/visibility.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" + +namespace arrow { +namespace engine { + +struct Id; + +namespace internal { + +/// \brief Create a scan->project->sink plan for tests +/// +/// The plan will project one additional column using the function +/// defined by `function_id`, `arguments`, and data_types. `arguments` +/// and `data_types` should have the same length but only one of each +/// should be defined at each index. +/// +/// If `data_types` is defined at an index then the plan will create a +/// direct reference (starting at index 0 and increasing by 1 for each +/// argument of this type). +/// +/// If `arguments` is defined at an index then the plan will create an +/// enum argument with that value. +ARROW_ENGINE_EXPORT Result> CreateScanProjectSubstrait( + Id function_id, const std::shared_ptr& input_table, + const std::vector& arguments, + const std::unordered_map>& options, + const std::vector>& data_types, + const DataType& output_type); + +/// \brief Create a scan->aggregate->sink plan for tests +/// +/// The plan will create an aggregate with one grouping set (defined by +/// key_idxs) and one measure. The measure will be a function +/// defined by `function_id` and direct references to `arg_idxs`. +ARROW_ENGINE_EXPORT Result> CreateScanAggSubstrait( + Id function_id, const std::shared_ptr
& input_table, + const std::vector& key_idxs, const std::vector& arg_idxs, + const DataType& output_type); + +} // namespace internal +} // namespace engine +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/test_util.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/test_util.h new file mode 100644 index 0000000000000000000000000000000000000000..a1db4b255ed8ee6a0ae7bb4a7a57f5a1aadb27cf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/test_util.h @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/testing/gtest_util.h" +#include "arrow/util/vector.h" + +#include +#include +#include +#include +#include + +#include "arrow/acero/exec_plan.h" +#include "arrow/compute/exec.h" +#include "arrow/compute/kernel.h" +#include "arrow/testing/visibility.h" +#include "arrow/util/async_generator.h" +#include "arrow/util/pcg_random.h" + +namespace arrow { +namespace engine { + +Result> SortTableOnAllFields(const std::shared_ptr
& tab); + +void AssertTablesEqualIgnoringOrder(const std::shared_ptr
& exp, + const std::shared_ptr
& act); + +} // namespace engine +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/type_fwd.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..6089d3f747a82cdc68b738b9ce6abbbb60e6811c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/type_fwd.h @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This API is EXPERIMENTAL. + +#pragma once + +namespace arrow { +namespace engine { + +class ExtensionIdRegistry; +class ExtensionSet; + +struct ConversionOptions; +struct DeclarationInfo; + +} // namespace engine +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/util.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/util.h new file mode 100644 index 0000000000000000000000000000000000000000..bef2a6c7e1823e5a661a36c96a94eac81b5462f4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/util.h @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/compute/type_fwd.h" +#include "arrow/engine/substrait/options.h" +#include "arrow/engine/substrait/type_fwd.h" +#include "arrow/engine/substrait/visibility.h" +#include "arrow/result.h" +#include "arrow/type_fwd.h" +#include "arrow/util/iterator.h" +#include "arrow/util/macros.h" + +namespace arrow { + +namespace engine { + +using PythonTableProvider = + std::function>(const std::vector&)>; + +/// \brief Utility method to run a Substrait plan +/// \param substrait_buffer The plan to run, must be in binary protobuf format +/// \param registry A registry of extension functions to make available to the plan +/// If null then the default registry will be used. +/// \param memory_pool The memory pool the plan should use to make allocations. +/// \param func_registry A registry of functions used for execution expressions. +/// `registry` maps from Substrait function IDs to "names". These +/// names will be provided to `func_registry` to get the actual +/// kernel. +/// \param conversion_options Options to control plan deserialization +/// \param use_threads If True then the CPU thread pool will be used for CPU work. If +/// False then all work will be done on the calling thread. +/// \return A record batch reader that will read out the results +ARROW_ENGINE_EXPORT Result> ExecuteSerializedPlan( + const Buffer& substrait_buffer, const ExtensionIdRegistry* registry = NULLPTR, + compute::FunctionRegistry* func_registry = NULLPTR, + const ConversionOptions& conversion_options = {}, bool use_threads = true, + MemoryPool* memory_pool = default_memory_pool()); + +/// \brief Get a Serialized Plan from a Substrait JSON plan. +/// This is a helper method for Python tests. +ARROW_ENGINE_EXPORT Result> SerializeJsonPlan( + const std::string& substrait_json); + +/// \brief Make a nested registry with the default registry as parent. +/// See arrow::engine::nested_extension_id_registry for details. +ARROW_ENGINE_EXPORT std::shared_ptr MakeExtensionIdRegistry(); + +ARROW_ENGINE_EXPORT const std::string& default_extension_types_uri(); + +// TODO(ARROW-18145) Populate these from cmake files +constexpr uint32_t kSubstraitMajorVersion = 0; +constexpr uint32_t kSubstraitMinorVersion = 44; +constexpr uint32_t kSubstraitPatchVersion = 0; + +constexpr uint32_t kSubstraitMinimumMajorVersion = 0; +constexpr uint32_t kSubstraitMinimumMinorVersion = 20; + +Status CheckVersion(uint32_t major_version, uint32_t minor_version); + +} // namespace engine + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/visibility.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/visibility.h new file mode 100644 index 0000000000000000000000000000000000000000..d81d202ee65673b1540836063d6aa5f88da9fe9c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/visibility.h @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// TODO(westonpace): Once we have a proper engine module this file +// should be renamed arrow/engine/visibility.h +// This API is EXPERIMENTAL. + +#pragma once + +#if defined(_WIN32) || defined(__CYGWIN__) +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable : 4251) +#else +#pragma GCC diagnostic ignored "-Wattributes" +#endif + +#ifdef ARROW_ENGINE_STATIC +#define ARROW_ENGINE_EXPORT +#elif defined(ARROW_ENGINE_EXPORTING) +#define ARROW_ENGINE_EXPORT __declspec(dllexport) +#else +#define ARROW_ENGINE_EXPORT __declspec(dllimport) +#endif + +#define ARROW_ENGINE_NO_EXPORT +#else // Not Windows +#ifndef ARROW_ENGINE_EXPORT +#define ARROW_ENGINE_EXPORT __attribute__((visibility("default"))) +#endif +#ifndef ARROW_ENGINE_NO_EXPORT +#define ARROW_ENGINE_NO_EXPORT __attribute__((visibility("hidden"))) +#endif +#endif // Non-Windows + +#if defined(_MSC_VER) +#pragma warning(pop) +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/extension/fixed_shape_tensor.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/extension/fixed_shape_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..3fec79b5c2a3c75ac2aa68e8d3d88312b56a06a8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/extension/fixed_shape_tensor.h @@ -0,0 +1,128 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "arrow/extension_type.h" + +namespace arrow { +namespace extension { + +class ARROW_EXPORT FixedShapeTensorArray : public ExtensionArray { + public: + using ExtensionArray::ExtensionArray; + + /// \brief Create a FixedShapeTensorArray from a Tensor + /// + /// This method will create a FixedShapeTensorArray from a Tensor, taking its first + /// dimension as the number of elements in the resulting array and the remaining + /// dimensions as the shape of the individual tensors. If Tensor provides strides, + /// they will be used to determine dimension permutation. Otherwise, row-major layout + /// (i.e. no permutation) will be assumed. + /// + /// \param[in] tensor The Tensor to convert to a FixedShapeTensorArray + static Result> FromTensor( + const std::shared_ptr& tensor); + + /// \brief Create a Tensor from FixedShapeTensorArray + /// + /// This method will create a Tensor from a FixedShapeTensorArray, setting its first + /// dimension as length equal to the FixedShapeTensorArray's length and the remaining + /// dimensions as the FixedShapeTensorType's shape. Shape and dim_names will be + /// permuted according to permutation stored in the FixedShapeTensorType metadata. + const Result> ToTensor() const; +}; + +/// \brief Concrete type class for constant-size Tensor data. +/// This is a canonical arrow extension type. +/// See: https://arrow.apache.org/docs/format/CanonicalExtensions.html +class ARROW_EXPORT FixedShapeTensorType : public ExtensionType { + public: + FixedShapeTensorType(const std::shared_ptr& value_type, const int32_t& size, + const std::vector& shape, + const std::vector& permutation = {}, + const std::vector& dim_names = {}) + : ExtensionType(fixed_size_list(value_type, size)), + value_type_(value_type), + shape_(shape), + permutation_(permutation), + dim_names_(dim_names) {} + + std::string extension_name() const override { return "arrow.fixed_shape_tensor"; } + std::string ToString(bool show_metadata = false) const override; + + /// Number of dimensions of tensor elements + size_t ndim() const { return shape_.size(); } + + /// Shape of tensor elements + const std::vector shape() const { return shape_; } + + /// Value type of tensor elements + const std::shared_ptr value_type() const { return value_type_; } + + /// Strides of tensor elements. Strides state offset in bytes between adjacent + /// elements along each dimension. In case permutation is non-empty strides are + /// computed from permuted tensor element's shape. + const std::vector& strides(); + + /// Permutation mapping from logical to physical memory layout of tensor elements + const std::vector& permutation() const { return permutation_; } + + /// Dimension names of tensor elements. Dimensions are ordered physically. + const std::vector& dim_names() const { return dim_names_; } + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::string Serialize() const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized_data) const override; + + /// Create a FixedShapeTensorArray from ArrayData + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + /// \brief Create a Tensor from an ExtensionScalar from a FixedShapeTensorArray + /// + /// This method will return a Tensor from ExtensionScalar with strides + /// derived from shape and permutation of FixedShapeTensorType. Shape and + /// dim_names will be permuted according to permutation stored in the + /// FixedShapeTensorType metadata. + static Result> MakeTensor( + const std::shared_ptr& scalar); + + /// \brief Create a FixedShapeTensorType instance + static Result> Make( + const std::shared_ptr& value_type, const std::vector& shape, + const std::vector& permutation = {}, + const std::vector& dim_names = {}); + + private: + std::shared_ptr storage_type_; + std::shared_ptr value_type_; + std::vector shape_; + std::vector strides_; + std::vector permutation_; + std::vector dim_names_; +}; + +/// \brief Return a FixedShapeTensorType instance. +ARROW_EXPORT std::shared_ptr fixed_shape_tensor( + const std::shared_ptr& storage_type, const std::vector& shape, + const std::vector& permutation = {}, + const std::vector& dim_names = {}); + +} // namespace extension +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/api.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/api.h new file mode 100644 index 0000000000000000000000000000000000000000..d55b2c2d55a8afc1a84fb204b2356e93503def42 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/api.h @@ -0,0 +1,25 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/io/buffered.h" +#include "arrow/io/compressed.h" +#include "arrow/io/file.h" +#include "arrow/io/hdfs.h" +#include "arrow/io/interfaces.h" +#include "arrow/io/memory.h" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/buffered.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/buffered.h new file mode 100644 index 0000000000000000000000000000000000000000..01c0a016daba06c6b635cd02fcc5912d975bf924 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/buffered.h @@ -0,0 +1,167 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Buffered stream implementations + +#pragma once + +#include +#include +#include + +#include "arrow/io/concurrency.h" +#include "arrow/io/interfaces.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Buffer; +class MemoryPool; +class Status; + +namespace io { + +class ARROW_EXPORT BufferedOutputStream : public OutputStream { + public: + ~BufferedOutputStream() override; + + /// \brief Create a buffered output stream wrapping the given output stream. + /// \param[in] buffer_size the size of the temporary write buffer + /// \param[in] pool a MemoryPool to use for allocations + /// \param[in] raw another OutputStream + /// \return the created BufferedOutputStream + static Result> Create( + int64_t buffer_size, MemoryPool* pool, std::shared_ptr raw); + + /// \brief Resize internal buffer + /// \param[in] new_buffer_size the new buffer size + /// \return Status + Status SetBufferSize(int64_t new_buffer_size); + + /// \brief Return the current size of the internal buffer + int64_t buffer_size() const; + + /// \brief Return the number of remaining bytes that have not been flushed to + /// the raw OutputStream + int64_t bytes_buffered() const; + + /// \brief Flush any buffered writes and release the raw + /// OutputStream. Further operations on this object are invalid + /// \return the underlying OutputStream + Result> Detach(); + + // OutputStream interface + + /// \brief Close the buffered output stream. This implicitly closes the + /// underlying raw output stream. + Status Close() override; + Status Abort() override; + bool closed() const override; + + Result Tell() const override; + // Write bytes to the stream. Thread-safe + Status Write(const void* data, int64_t nbytes) override; + Status Write(const std::shared_ptr& data) override; + + Status Flush() override; + + /// \brief Return the underlying raw output stream. + std::shared_ptr raw() const; + + private: + explicit BufferedOutputStream(std::shared_ptr raw, MemoryPool* pool); + + class ARROW_NO_EXPORT Impl; + std::unique_ptr impl_; +}; + +/// \class BufferedInputStream +/// \brief An InputStream that performs buffered reads from an unbuffered +/// InputStream, which can mitigate the overhead of many small reads in some +/// cases +class ARROW_EXPORT BufferedInputStream + : public internal::InputStreamConcurrencyWrapper { + public: + ~BufferedInputStream() override; + + /// \brief Create a BufferedInputStream from a raw InputStream + /// \param[in] buffer_size the size of the temporary read buffer + /// \param[in] pool a MemoryPool to use for allocations + /// \param[in] raw a raw InputStream + /// \param[in] raw_read_bound a bound on the maximum number of bytes + /// to read from the raw input stream. The default -1 indicates that + /// it is unbounded + /// \return the created BufferedInputStream + static Result> Create( + int64_t buffer_size, MemoryPool* pool, std::shared_ptr raw, + int64_t raw_read_bound = -1); + + /// \brief Resize internal read buffer; calls to Read(...) will read at least + /// \param[in] new_buffer_size the new read buffer size + /// \return Status + Status SetBufferSize(int64_t new_buffer_size); + + /// \brief Return the number of remaining bytes in the read buffer + int64_t bytes_buffered() const; + + /// \brief Return the current size of the internal buffer + int64_t buffer_size() const; + + /// \brief Release the raw InputStream. Any data buffered will be + /// discarded. Further operations on this object are invalid + /// \return raw the underlying InputStream + std::shared_ptr Detach(); + + /// \brief Return the unbuffered InputStream + std::shared_ptr raw() const; + + // InputStream APIs + + bool closed() const override; + Result> ReadMetadata() override; + Future> ReadMetadataAsync( + const IOContext& io_context) override; + + private: + friend InputStreamConcurrencyWrapper; + + explicit BufferedInputStream(std::shared_ptr raw, MemoryPool* pool, + int64_t raw_total_bytes_bound); + + Status DoClose(); + Status DoAbort() override; + + /// \brief Returns the position of the buffered stream, though the position + /// of the unbuffered stream may be further advanced. + Result DoTell() const; + + Result DoRead(int64_t nbytes, void* out); + + /// \brief Read into buffer. + Result> DoRead(int64_t nbytes); + + /// \brief Return a zero-copy string view referencing buffered data, + /// but do not advance the position of the stream. Buffers data and + /// expands the buffer size if necessary + Result DoPeek(int64_t nbytes) override; + + class ARROW_NO_EXPORT Impl; + std::unique_ptr impl_; +}; + +} // namespace io +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/caching.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/caching.h new file mode 100644 index 0000000000000000000000000000000000000000..e2b911fafdbbc2ec95d0de4233b6bbb663ffa44e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/caching.h @@ -0,0 +1,157 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/io/interfaces.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace io { + +struct ARROW_EXPORT CacheOptions { + static constexpr double kDefaultIdealBandwidthUtilizationFrac = 0.9; + static constexpr int64_t kDefaultMaxIdealRequestSizeMib = 64; + + /// \brief The maximum distance in bytes between two consecutive + /// ranges; beyond this value, ranges are not combined + int64_t hole_size_limit; + /// \brief The maximum size in bytes of a combined range; if + /// combining two consecutive ranges would produce a range of a + /// size greater than this, they are not combined + int64_t range_size_limit; + /// \brief A lazy cache does not perform any I/O until requested. + /// lazy = false: request all byte ranges when PreBuffer or WillNeed is called. + /// lazy = True, prefetch_limit = 0: request merged byte ranges only after the reader + /// needs them. + /// lazy = True, prefetch_limit = k: prefetch up to k merged byte ranges ahead of the + /// range that is currently being read. + bool lazy; + /// \brief The maximum number of ranges to be prefetched. This is only used + /// for lazy cache to asynchronously read some ranges after reading the target range. + int64_t prefetch_limit = 0; + + bool operator==(const CacheOptions& other) const { + return hole_size_limit == other.hole_size_limit && + range_size_limit == other.range_size_limit && lazy == other.lazy && + prefetch_limit == other.prefetch_limit; + } + + /// \brief Construct CacheOptions from network storage metrics (e.g. S3). + /// + /// \param[in] time_to_first_byte_millis Seek-time or Time-To-First-Byte (TTFB) in + /// milliseconds, also called call setup latency of a new read request. + /// The value is a positive integer. + /// \param[in] transfer_bandwidth_mib_per_sec Data transfer Bandwidth (BW) in MiB/sec + /// (per connection). + /// The value is a positive integer. + /// \param[in] ideal_bandwidth_utilization_frac Transfer bandwidth utilization fraction + /// (per connection) to maximize the net data load. + /// The value is a positive double precision number less than 1. + /// \param[in] max_ideal_request_size_mib The maximum single data request size (in MiB) + /// to maximize the net data load. + /// The value is a positive integer. + /// \return A new instance of CacheOptions. + static CacheOptions MakeFromNetworkMetrics( + int64_t time_to_first_byte_millis, int64_t transfer_bandwidth_mib_per_sec, + double ideal_bandwidth_utilization_frac = kDefaultIdealBandwidthUtilizationFrac, + int64_t max_ideal_request_size_mib = kDefaultMaxIdealRequestSizeMib); + + static CacheOptions Defaults(); + static CacheOptions LazyDefaults(); +}; + +namespace internal { + +/// \brief A read cache designed to hide IO latencies when reading. +/// +/// This class takes multiple byte ranges that an application expects to read, and +/// coalesces them into fewer, larger read requests, which benefits performance on some +/// filesystems, particularly remote ones like Amazon S3. By default, it also issues +/// these read requests in parallel up front. +/// +/// To use: +/// 1. Cache() the ranges you expect to read in the future. Ideally, these ranges have +/// the exact offset and length that will later be read. The cache will combine those +/// ranges according to parameters (see constructor). +/// +/// By default, the cache will also start fetching the combined ranges in parallel in +/// the background, unless CacheOptions.lazy is set. +/// +/// 2. Call WaitFor() to be notified when the given ranges have been read. If +/// CacheOptions.lazy is set, I/O will be triggered in the background here instead. +/// This can be done in parallel (e.g. if parsing a file, call WaitFor() for each +/// chunk of the file that can be parsed in parallel). +/// +/// 3. Call Read() to retrieve the actual data for the given ranges. +/// A synchronous application may skip WaitFor() and just call Read() - it will still +/// benefit from coalescing and parallel fetching. +class ARROW_EXPORT ReadRangeCache { + public: + static constexpr int64_t kDefaultHoleSizeLimit = 8192; + static constexpr int64_t kDefaultRangeSizeLimit = 32 * 1024 * 1024; + + /// Construct a read cache with default + explicit ReadRangeCache(std::shared_ptr file, IOContext ctx) + : ReadRangeCache(file, file.get(), std::move(ctx), CacheOptions::Defaults()) {} + + /// Construct a read cache with given options + explicit ReadRangeCache(std::shared_ptr file, IOContext ctx, + CacheOptions options) + : ReadRangeCache(file, file.get(), std::move(ctx), options) {} + + /// Construct a read cache with an unowned file + ReadRangeCache(RandomAccessFile* file, IOContext ctx, CacheOptions options) + : ReadRangeCache(NULLPTR, file, std::move(ctx), options) {} + + ~ReadRangeCache(); + + /// \brief Cache the given ranges in the background. + /// + /// The caller must ensure that the ranges do not overlap with each other, + /// nor with previously cached ranges. Otherwise, behaviour will be undefined. + Status Cache(std::vector ranges); + + /// \brief Read a range previously given to Cache(). + Result> Read(ReadRange range); + + /// \brief Wait until all ranges added so far have been cached. + Future<> Wait(); + + /// \brief Wait until all given ranges have been cached. + Future<> WaitFor(std::vector ranges); + + protected: + struct Impl; + struct LazyImpl; + + ReadRangeCache(std::shared_ptr owned_file, RandomAccessFile* file, + IOContext ctx, CacheOptions options); + + std::unique_ptr impl_; +}; + +} // namespace internal +} // namespace io +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/compressed.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/compressed.h new file mode 100644 index 0000000000000000000000000000000000000000..6b4e7ab4d7248829e26bc4bbef9cb3e628f5f906 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/compressed.h @@ -0,0 +1,124 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Compressed stream implementations + +#pragma once + +#include +#include + +#include "arrow/io/concurrency.h" +#include "arrow/io/interfaces.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class MemoryPool; +class Status; + +namespace util { + +class Codec; + +} // namespace util + +namespace io { + +class ARROW_EXPORT CompressedOutputStream : public OutputStream { + public: + ~CompressedOutputStream() override; + + /// \brief Create a compressed output stream wrapping the given output stream. + /// + /// The codec must be capable of streaming compression. Some codecs, + /// like Snappy, are not able to do so. + static Result> Make( + util::Codec* codec, const std::shared_ptr& raw, + MemoryPool* pool = default_memory_pool()); + + // OutputStream interface + + /// \brief Close the compressed output stream. This implicitly closes the + /// underlying raw output stream. + Status Close() override; + Status Abort() override; + bool closed() const override; + + Result Tell() const override; + + Status Write(const void* data, int64_t nbytes) override; + /// \cond FALSE + using Writable::Write; + /// \endcond + Status Flush() override; + + /// \brief Return the underlying raw output stream. + std::shared_ptr raw() const; + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(CompressedOutputStream); + + CompressedOutputStream() = default; + + class ARROW_NO_EXPORT Impl; + std::unique_ptr impl_; +}; + +class ARROW_EXPORT CompressedInputStream + : public internal::InputStreamConcurrencyWrapper { + public: + ~CompressedInputStream() override; + + /// \brief Create a compressed input stream wrapping the given input stream. + /// + /// The codec must be capable of streaming decompression. Some codecs, + /// like Snappy, are not able to do so. + static Result> Make( + util::Codec* codec, const std::shared_ptr& raw, + MemoryPool* pool = default_memory_pool()); + + // InputStream interface + + bool closed() const override; + Result> ReadMetadata() override; + Future> ReadMetadataAsync( + const IOContext& io_context) override; + + /// \brief Return the underlying raw input stream. + std::shared_ptr raw() const; + + private: + friend InputStreamConcurrencyWrapper; + ARROW_DISALLOW_COPY_AND_ASSIGN(CompressedInputStream); + + CompressedInputStream() = default; + + /// \brief Close the compressed input stream. This implicitly closes the + /// underlying raw input stream. + Status DoClose(); + Status DoAbort() override; + Result DoTell() const; + Result DoRead(int64_t nbytes, void* out); + Result> DoRead(int64_t nbytes); + + class ARROW_NO_EXPORT Impl; + std::unique_ptr impl_; +}; + +} // namespace io +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/concurrency.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/concurrency.h new file mode 100644 index 0000000000000000000000000000000000000000..43ceb8debcecb24e0f859b8636057cacfc090bac --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/concurrency.h @@ -0,0 +1,263 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/io/interfaces.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace io { +namespace internal { + +template +class SharedLockGuard { + public: + explicit SharedLockGuard(LockType* lock) : lock_(lock) { lock_->LockShared(); } + + ~SharedLockGuard() { lock_->UnlockShared(); } + + protected: + LockType* lock_; +}; + +template +class ExclusiveLockGuard { + public: + explicit ExclusiveLockGuard(LockType* lock) : lock_(lock) { lock_->LockExclusive(); } + + ~ExclusiveLockGuard() { lock_->UnlockExclusive(); } + + protected: + LockType* lock_; +}; + +// Debug concurrency checker that marks "shared" and "exclusive" code sections, +// aborting if the concurrency rules get violated. Does nothing in release mode. +// Note that we intentionally use the same class declaration in debug and +// release builds in order to avoid runtime failures when e.g. loading a +// release-built DLL with a debug-built application, or the reverse. + +class ARROW_EXPORT SharedExclusiveChecker { + public: + SharedExclusiveChecker(); + void LockShared(); + void UnlockShared(); + void LockExclusive(); + void UnlockExclusive(); + + SharedLockGuard shared_guard() { + return SharedLockGuard(this); + } + + ExclusiveLockGuard exclusive_guard() { + return ExclusiveLockGuard(this); + } + + protected: + struct Impl; + std::shared_ptr impl_; +}; + +// Concurrency wrappers for IO classes that check the correctness of +// concurrent calls to various methods. It is not necessary to wrap all +// IO classes with these, only a few core classes that get used in tests. +// +// We're not using virtual inheritance here as virtual bases have poorly +// understood semantic overhead which we'd be passing on to implementers +// and users of these interfaces. Instead, we just duplicate the method +// wrappers between those two classes. + +template +class ARROW_EXPORT InputStreamConcurrencyWrapper : public InputStream { + public: + Status Close() final { + auto guard = lock_.exclusive_guard(); + return derived()->DoClose(); + } + + Status Abort() final { + auto guard = lock_.exclusive_guard(); + return derived()->DoAbort(); + } + + Result Tell() const final { + auto guard = lock_.exclusive_guard(); + return derived()->DoTell(); + } + + Result Read(int64_t nbytes, void* out) final { + auto guard = lock_.exclusive_guard(); + return derived()->DoRead(nbytes, out); + } + + Result> Read(int64_t nbytes) final { + auto guard = lock_.exclusive_guard(); + return derived()->DoRead(nbytes); + } + + Result Peek(int64_t nbytes) final { + auto guard = lock_.exclusive_guard(); + return derived()->DoPeek(nbytes); + } + + /* + Methods to implement in derived class: + + Status DoClose(); + Result DoTell() const; + Result DoRead(int64_t nbytes, void* out); + Result> DoRead(int64_t nbytes); + + And optionally: + + Status DoAbort() override; + Result DoPeek(int64_t nbytes) override; + + These methods should be protected in the derived class and + InputStreamConcurrencyWrapper declared as a friend with + + friend InputStreamConcurrencyWrapper; + */ + + protected: + // Default implementations. They are virtual because the derived class may + // have derived classes itself. + virtual Status DoAbort() { return derived()->DoClose(); } + + virtual Result DoPeek(int64_t ARROW_ARG_UNUSED(nbytes)) { + return Status::NotImplemented("Peek not implemented"); + } + + Derived* derived() { return ::arrow::internal::checked_cast(this); } + + const Derived* derived() const { + return ::arrow::internal::checked_cast(this); + } + + mutable SharedExclusiveChecker lock_; +}; + +template +class ARROW_EXPORT RandomAccessFileConcurrencyWrapper : public RandomAccessFile { + public: + Status Close() final { + auto guard = lock_.exclusive_guard(); + return derived()->DoClose(); + } + + Status Abort() final { + auto guard = lock_.exclusive_guard(); + return derived()->DoAbort(); + } + + Result Tell() const final { + auto guard = lock_.exclusive_guard(); + return derived()->DoTell(); + } + + Result Read(int64_t nbytes, void* out) final { + auto guard = lock_.exclusive_guard(); + return derived()->DoRead(nbytes, out); + } + + Result> Read(int64_t nbytes) final { + auto guard = lock_.exclusive_guard(); + return derived()->DoRead(nbytes); + } + + Result Peek(int64_t nbytes) final { + auto guard = lock_.exclusive_guard(); + return derived()->DoPeek(nbytes); + } + + Status Seek(int64_t position) final { + auto guard = lock_.exclusive_guard(); + return derived()->DoSeek(position); + } + + Result GetSize() final { + auto guard = lock_.shared_guard(); + return derived()->DoGetSize(); + } + + // NOTE: ReadAt doesn't use stream pointer, but it is allowed to update it + // (it's the case on Windows when using ReadFileEx). + // So any method that relies on the current position (even if it doesn't + // update it, such as Peek) cannot run in parallel with ReadAt and has + // to use the exclusive_guard. + + Result ReadAt(int64_t position, int64_t nbytes, void* out) final { + auto guard = lock_.shared_guard(); + return derived()->DoReadAt(position, nbytes, out); + } + + Result> ReadAt(int64_t position, int64_t nbytes) final { + auto guard = lock_.shared_guard(); + return derived()->DoReadAt(position, nbytes); + } + + /* + Methods to implement in derived class: + + Status DoClose(); + Result DoTell() const; + Result DoRead(int64_t nbytes, void* out); + Result> DoRead(int64_t nbytes); + Status DoSeek(int64_t position); + Result DoGetSize() + Result DoReadAt(int64_t position, int64_t nbytes, void* out); + Result> DoReadAt(int64_t position, int64_t nbytes); + + And optionally: + + Status DoAbort() override; + Result DoPeek(int64_t nbytes) override; + + These methods should be protected in the derived class and + RandomAccessFileConcurrencyWrapper declared as a friend with + + friend RandomAccessFileConcurrencyWrapper; + */ + + protected: + // Default implementations. They are virtual because the derived class may + // have derived classes itself. + virtual Status DoAbort() { return derived()->DoClose(); } + + virtual Result DoPeek(int64_t ARROW_ARG_UNUSED(nbytes)) { + return Status::NotImplemented("Peek not implemented"); + } + + Derived* derived() { return ::arrow::internal::checked_cast(this); } + + const Derived* derived() const { + return ::arrow::internal::checked_cast(this); + } + + mutable SharedExclusiveChecker lock_; +}; + +} // namespace internal +} // namespace io +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/file.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/file.h new file mode 100644 index 0000000000000000000000000000000000000000..50d4f2c4dfc90f8ffb8061f68125b24ae82bb7ed --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/file.h @@ -0,0 +1,221 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// IO interface implementations for OS files + +#pragma once + +#include +#include +#include +#include + +#include "arrow/io/concurrency.h" +#include "arrow/io/interfaces.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Buffer; +class MemoryPool; +class Status; + +namespace io { + +/// \brief An operating system file open in write-only mode. +class ARROW_EXPORT FileOutputStream : public OutputStream { + public: + ~FileOutputStream() override; + + /// \brief Open a local file for writing, truncating any existing file + /// \param[in] path with UTF8 encoding + /// \param[in] append append to existing file, otherwise truncate to 0 bytes + /// \return an open FileOutputStream + /// + /// When opening a new file, any existing file with the indicated path is + /// truncated to 0 bytes, deleting any existing data + static Result> Open(const std::string& path, + bool append = false); + + /// \brief Open a file descriptor for writing. The underlying file isn't + /// truncated. + /// \param[in] fd file descriptor + /// \return an open FileOutputStream + /// + /// The file descriptor becomes owned by the OutputStream, and will be closed + /// on Close() or destruction. + static Result> Open(int fd); + + // OutputStream interface + Status Close() override; + bool closed() const override; + Result Tell() const override; + + // Write bytes to the stream. Thread-safe + Status Write(const void* data, int64_t nbytes) override; + /// \cond FALSE + using Writable::Write; + /// \endcond + + int file_descriptor() const; + + private: + FileOutputStream(); + + class ARROW_NO_EXPORT FileOutputStreamImpl; + std::unique_ptr impl_; +}; + +/// \brief An operating system file open in read-only mode. +/// +/// Reads through this implementation are unbuffered. If many small reads +/// need to be issued, it is recommended to use a buffering layer for good +/// performance. +class ARROW_EXPORT ReadableFile + : public internal::RandomAccessFileConcurrencyWrapper { + public: + ~ReadableFile() override; + + /// \brief Open a local file for reading + /// \param[in] path with UTF8 encoding + /// \param[in] pool a MemoryPool for memory allocations + /// \return ReadableFile instance + static Result> Open( + const std::string& path, MemoryPool* pool = default_memory_pool()); + + /// \brief Open a local file for reading + /// \param[in] fd file descriptor + /// \param[in] pool a MemoryPool for memory allocations + /// \return ReadableFile instance + /// + /// The file descriptor becomes owned by the ReadableFile, and will be closed + /// on Close() or destruction. + static Result> Open( + int fd, MemoryPool* pool = default_memory_pool()); + + bool closed() const override; + + int file_descriptor() const; + + Status WillNeed(const std::vector& ranges) override; + + private: + friend RandomAccessFileConcurrencyWrapper; + + explicit ReadableFile(MemoryPool* pool); + + Status DoClose(); + Result DoTell() const; + Result DoRead(int64_t nbytes, void* buffer); + Result> DoRead(int64_t nbytes); + + /// \brief Thread-safe implementation of ReadAt + Result DoReadAt(int64_t position, int64_t nbytes, void* out); + + /// \brief Thread-safe implementation of ReadAt + Result> DoReadAt(int64_t position, int64_t nbytes); + + Result DoGetSize(); + Status DoSeek(int64_t position); + + class ARROW_NO_EXPORT ReadableFileImpl; + std::unique_ptr impl_; +}; + +/// \brief A file interface that uses memory-mapped files for memory interactions +/// +/// This implementation supports zero-copy reads. The same class is used +/// for both reading and writing. +/// +/// If opening a file in a writable mode, it is not truncated first as with +/// FileOutputStream. +class ARROW_EXPORT MemoryMappedFile : public ReadWriteFileInterface { + public: + ~MemoryMappedFile() override; + + /// Create new file with indicated size, return in read/write mode + static Result> Create(const std::string& path, + int64_t size); + + // mmap() with whole file + static Result> Open(const std::string& path, + FileMode::type mode); + + // mmap() with a region of file, the offset must be a multiple of the page size + static Result> Open(const std::string& path, + FileMode::type mode, + const int64_t offset, + const int64_t length); + + Status Close() override; + + bool closed() const override; + + Result Tell() const override; + + Status Seek(int64_t position) override; + + // Required by RandomAccessFile, copies memory into out. Not thread-safe + Result Read(int64_t nbytes, void* out) override; + + // Zero copy read, moves position pointer. Not thread-safe + Result> Read(int64_t nbytes) override; + + // Zero-copy read, leaves position unchanged. Acquires a reader lock + // for the duration of slice creation (typically very short). Is thread-safe. + Result> ReadAt(int64_t position, int64_t nbytes) override; + + // Raw copy of the memory at specified position. Thread-safe, but + // locks out other readers for the duration of memcpy. Prefer the + // zero copy method + Result ReadAt(int64_t position, int64_t nbytes, void* out) override; + + // Synchronous ReadAsync override + Future> ReadAsync(const IOContext&, int64_t position, + int64_t nbytes) override; + + Status WillNeed(const std::vector& ranges) override; + + bool supports_zero_copy() const override; + + /// Write data at the current position in the file. Thread-safe + Status Write(const void* data, int64_t nbytes) override; + /// \cond FALSE + using Writable::Write; + /// \endcond + + /// Set the size of the map to new_size. + Status Resize(int64_t new_size); + + /// Write data at a particular position in the file. Thread-safe + Status WriteAt(int64_t position, const void* data, int64_t nbytes) override; + + Result GetSize() override; + + int file_descriptor() const; + + private: + MemoryMappedFile(); + + Status WriteInternal(const void* data, int64_t nbytes); + + class ARROW_NO_EXPORT MemoryMap; + std::shared_ptr memory_map_; +}; + +} // namespace io +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/hdfs.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/hdfs.h new file mode 100644 index 0000000000000000000000000000000000000000..46038070ae4edae9dc59760004079b596adfec51 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/hdfs.h @@ -0,0 +1,284 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/io/interfaces.h" +#include "arrow/util/macros.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Buffer; +class MemoryPool; +class Status; + +namespace io { + +class HdfsReadableFile; +class HdfsOutputStream; + +/// DEPRECATED. Use the FileSystem API in arrow::fs instead. +struct ObjectType { + enum type { FILE, DIRECTORY }; +}; + +/// DEPRECATED. Use the FileSystem API in arrow::fs instead. +struct ARROW_EXPORT FileStatistics { + /// Size of file, -1 if finding length is unsupported + int64_t size; + ObjectType::type kind; +}; + +class ARROW_EXPORT FileSystem { + public: + virtual ~FileSystem() = default; + + virtual Status MakeDirectory(const std::string& path) = 0; + + virtual Status DeleteDirectory(const std::string& path) = 0; + + virtual Status GetChildren(const std::string& path, + std::vector* listing) = 0; + + virtual Status Rename(const std::string& src, const std::string& dst) = 0; + + virtual Status Stat(const std::string& path, FileStatistics* stat) = 0; +}; + +struct HdfsPathInfo { + ObjectType::type kind; + + std::string name; + std::string owner; + std::string group; + + // Access times in UNIX timestamps (seconds) + int64_t size; + int64_t block_size; + + int32_t last_modified_time; + int32_t last_access_time; + + int16_t replication; + int16_t permissions; +}; + +struct HdfsConnectionConfig { + std::string host; + int port; + std::string user; + std::string kerb_ticket; + std::unordered_map extra_conf; +}; + +class ARROW_EXPORT HadoopFileSystem : public FileSystem { + public: + ~HadoopFileSystem() override; + + // Connect to an HDFS cluster given a configuration + // + // @param config (in): configuration for connecting + // @param fs (out): the created client + // @returns Status + static Status Connect(const HdfsConnectionConfig* config, + std::shared_ptr* fs); + + // Create directory and all parents + // + // @param path (in): absolute HDFS path + // @returns Status + Status MakeDirectory(const std::string& path) override; + + // Delete file or directory + // @param path absolute path to data + // @param recursive if path is a directory, delete contents as well + // @returns error status on failure + Status Delete(const std::string& path, bool recursive = false); + + Status DeleteDirectory(const std::string& path) override; + + // Disconnect from cluster + // + // @returns Status + Status Disconnect(); + + // @param path (in): absolute HDFS path + // @returns bool, true if the path exists, false if not (or on error) + bool Exists(const std::string& path); + + // @param path (in): absolute HDFS path + // @param info (out) + // @returns Status + Status GetPathInfo(const std::string& path, HdfsPathInfo* info); + + // @param nbytes (out): total capacity of the filesystem + // @returns Status + Status GetCapacity(int64_t* nbytes); + + // @param nbytes (out): total bytes used of the filesystem + // @returns Status + Status GetUsed(int64_t* nbytes); + + Status GetChildren(const std::string& path, std::vector* listing) override; + + /// List directory contents + /// + /// If path is a relative path, returned values will be absolute paths or URIs + /// starting from the current working directory. + Status ListDirectory(const std::string& path, std::vector* listing); + + /// Return the filesystem's current working directory. + /// + /// The working directory is the base path for all relative paths given to + /// other APIs. + /// NOTE: this actually returns a URI. + Status GetWorkingDirectory(std::string* out); + + /// Change + /// + /// @param path file path to change + /// @param owner pass null for no change + /// @param group pass null for no change + Status Chown(const std::string& path, const char* owner, const char* group); + + /// Change path permissions + /// + /// \param path Absolute path in file system + /// \param mode Mode bitset + /// \return Status + Status Chmod(const std::string& path, int mode); + + // Move file or directory from source path to destination path within the + // current filesystem + Status Rename(const std::string& src, const std::string& dst) override; + + Status Copy(const std::string& src, const std::string& dst); + + Status Move(const std::string& src, const std::string& dst); + + Status Stat(const std::string& path, FileStatistics* stat) override; + + // TODO(wesm): GetWorkingDirectory, SetWorkingDirectory + + // Open an HDFS file in READ mode. Returns error + // status if the file is not found. + // + // @param path complete file path + Status OpenReadable(const std::string& path, int32_t buffer_size, + std::shared_ptr* file); + + Status OpenReadable(const std::string& path, int32_t buffer_size, + const io::IOContext& io_context, + std::shared_ptr* file); + + Status OpenReadable(const std::string& path, std::shared_ptr* file); + + Status OpenReadable(const std::string& path, const io::IOContext& io_context, + std::shared_ptr* file); + + // FileMode::WRITE options + // @param path complete file path + // @param buffer_size 0 by default + // @param replication 0 by default + // @param default_block_size 0 by default + Status OpenWritable(const std::string& path, bool append, int32_t buffer_size, + int16_t replication, int64_t default_block_size, + std::shared_ptr* file); + + Status OpenWritable(const std::string& path, bool append, + std::shared_ptr* file); + + private: + friend class HdfsReadableFile; + friend class HdfsOutputStream; + + class ARROW_NO_EXPORT HadoopFileSystemImpl; + std::unique_ptr impl_; + + HadoopFileSystem(); + ARROW_DISALLOW_COPY_AND_ASSIGN(HadoopFileSystem); +}; + +class ARROW_EXPORT HdfsReadableFile : public RandomAccessFile { + public: + ~HdfsReadableFile() override; + + Status Close() override; + + bool closed() const override; + + // NOTE: If you wish to read a particular range of a file in a multithreaded + // context, you may prefer to use ReadAt to avoid locking issues + Result Read(int64_t nbytes, void* out) override; + Result> Read(int64_t nbytes) override; + Result ReadAt(int64_t position, int64_t nbytes, void* out) override; + Result> ReadAt(int64_t position, int64_t nbytes) override; + + Status Seek(int64_t position) override; + Result Tell() const override; + Result GetSize() override; + + private: + explicit HdfsReadableFile(const io::IOContext&); + + class ARROW_NO_EXPORT HdfsReadableFileImpl; + std::unique_ptr impl_; + + friend class HadoopFileSystem::HadoopFileSystemImpl; + + ARROW_DISALLOW_COPY_AND_ASSIGN(HdfsReadableFile); +}; + +// Naming this file OutputStream because it does not support seeking (like the +// WritableFile interface) +class ARROW_EXPORT HdfsOutputStream : public OutputStream { + public: + ~HdfsOutputStream() override; + + Status Close() override; + + bool closed() const override; + + using OutputStream::Write; + Status Write(const void* buffer, int64_t nbytes) override; + + Status Flush() override; + + Result Tell() const override; + + private: + class ARROW_NO_EXPORT HdfsOutputStreamImpl; + std::unique_ptr impl_; + + friend class HadoopFileSystem::HadoopFileSystemImpl; + + HdfsOutputStream(); + + ARROW_DISALLOW_COPY_AND_ASSIGN(HdfsOutputStream); +}; + +ARROW_EXPORT Status HaveLibHdfs(); + +} // namespace io +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/interfaces.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/interfaces.h new file mode 100644 index 0000000000000000000000000000000000000000..b36c38c6d48688a793c2588477f97648a8b550c6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/interfaces.h @@ -0,0 +1,362 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/io/type_fwd.h" +#include "arrow/type_fwd.h" +#include "arrow/util/cancel.h" +#include "arrow/util/macros.h" +#include "arrow/util/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace io { + +struct ReadRange { + int64_t offset; + int64_t length; + + friend bool operator==(const ReadRange& left, const ReadRange& right) { + return (left.offset == right.offset && left.length == right.length); + } + friend bool operator!=(const ReadRange& left, const ReadRange& right) { + return !(left == right); + } + + bool Contains(const ReadRange& other) const { + return (offset <= other.offset && offset + length >= other.offset + other.length); + } +}; + +/// EXPERIMENTAL: options provider for IO tasks +/// +/// Includes an Executor (which will be used to execute asynchronous reads), +/// a MemoryPool (which will be used to allocate buffers when zero copy reads +/// are not possible), and an external id (in case the executor receives tasks from +/// multiple sources and must distinguish tasks associated with this IOContext). +struct ARROW_EXPORT IOContext { + // No specified executor: will use a global IO thread pool + IOContext() : IOContext(default_memory_pool(), StopToken::Unstoppable()) {} + + explicit IOContext(StopToken stop_token) + : IOContext(default_memory_pool(), std::move(stop_token)) {} + + explicit IOContext(MemoryPool* pool, StopToken stop_token = StopToken::Unstoppable()); + + explicit IOContext(MemoryPool* pool, ::arrow::internal::Executor* executor, + StopToken stop_token = StopToken::Unstoppable(), + int64_t external_id = -1) + : pool_(pool), + executor_(executor), + external_id_(external_id), + stop_token_(std::move(stop_token)) {} + + explicit IOContext(::arrow::internal::Executor* executor, + StopToken stop_token = StopToken::Unstoppable(), + int64_t external_id = -1) + : pool_(default_memory_pool()), + executor_(executor), + external_id_(external_id), + stop_token_(std::move(stop_token)) {} + + MemoryPool* pool() const { return pool_; } + + ::arrow::internal::Executor* executor() const { return executor_; } + + // An application-specific ID, forwarded to executor task submissions + int64_t external_id() const { return external_id_; } + + StopToken stop_token() const { return stop_token_; } + + private: + MemoryPool* pool_; + ::arrow::internal::Executor* executor_; + int64_t external_id_; + StopToken stop_token_; +}; + +class ARROW_EXPORT FileInterface : public std::enable_shared_from_this { + public: + virtual ~FileInterface() = 0; + + /// \brief Close the stream cleanly + /// + /// For writable streams, this will attempt to flush any pending data + /// before releasing the underlying resource. + /// + /// After Close() is called, closed() returns true and the stream is not + /// available for further operations. + virtual Status Close() = 0; + + /// \brief Close the stream asynchronously + /// + /// By default, this will just submit the synchronous Close() to the + /// default I/O thread pool. Subclasses may implement this in a more + /// efficient manner. + virtual Future<> CloseAsync(); + + /// \brief Close the stream abruptly + /// + /// This method does not guarantee that any pending data is flushed. + /// It merely releases any underlying resource used by the stream for + /// its operation. + /// + /// After Abort() is called, closed() returns true and the stream is not + /// available for further operations. + virtual Status Abort(); + + /// \brief Return the position in this stream + virtual Result Tell() const = 0; + + /// \brief Return whether the stream is closed + virtual bool closed() const = 0; + + FileMode::type mode() const { return mode_; } + + protected: + FileInterface() : mode_(FileMode::READ) {} + FileMode::type mode_; + void set_mode(FileMode::type mode) { mode_ = mode; } + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(FileInterface); +}; + +class ARROW_EXPORT Seekable { + public: + virtual ~Seekable() = default; + virtual Status Seek(int64_t position) = 0; +}; + +class ARROW_EXPORT Writable { + public: + virtual ~Writable() = default; + + /// \brief Write the given data to the stream + /// + /// This method always processes the bytes in full. Depending on the + /// semantics of the stream, the data may be written out immediately, + /// held in a buffer, or written asynchronously. In the case where + /// the stream buffers the data, it will be copied. To avoid potentially + /// large copies, use the Write variant that takes an owned Buffer. + virtual Status Write(const void* data, int64_t nbytes) = 0; + + /// \brief Write the given data to the stream + /// + /// Since the Buffer owns its memory, this method can avoid a copy if + /// buffering is required. See Write(const void*, int64_t) for details. + virtual Status Write(const std::shared_ptr& data); + + /// \brief Flush buffered bytes, if any + virtual Status Flush(); + + Status Write(std::string_view data); +}; + +class ARROW_EXPORT Readable { + public: + virtual ~Readable() = default; + + /// \brief Read data from current file position. + /// + /// Read at most `nbytes` from the current file position into `out`. + /// The number of bytes read is returned. + virtual Result Read(int64_t nbytes, void* out) = 0; + + /// \brief Read data from current file position. + /// + /// Read at most `nbytes` from the current file position. Less bytes may + /// be read if EOF is reached. This method updates the current file position. + /// + /// In some cases (e.g. a memory-mapped file), this method may avoid a + /// memory copy. + virtual Result> Read(int64_t nbytes) = 0; + + /// EXPERIMENTAL: The IOContext associated with this file. + /// + /// By default, this is the same as default_io_context(), but it may be + /// overridden by subclasses. + virtual const IOContext& io_context() const; +}; + +class ARROW_EXPORT OutputStream : virtual public FileInterface, public Writable { + protected: + OutputStream() = default; +}; + +class ARROW_EXPORT InputStream : virtual public FileInterface, virtual public Readable { + public: + /// \brief Advance or skip stream indicated number of bytes + /// \param[in] nbytes the number to move forward + /// \return Status + Status Advance(int64_t nbytes); + + /// \brief Return zero-copy string_view to upcoming bytes. + /// + /// Do not modify the stream position. The view becomes invalid after + /// any operation on the stream. May trigger buffering if the requested + /// size is larger than the number of buffered bytes. + /// + /// May return NotImplemented on streams that don't support it. + /// + /// \param[in] nbytes the maximum number of bytes to see + virtual Result Peek(int64_t nbytes); + + /// \brief Return true if InputStream is capable of zero copy Buffer reads + /// + /// Zero copy reads imply the use of Buffer-returning Read() overloads. + virtual bool supports_zero_copy() const; + + /// \brief Read and return stream metadata + /// + /// If the stream implementation doesn't support metadata, empty metadata + /// is returned. Note that it is allowed to return a null pointer rather + /// than an allocated empty metadata. + virtual Result> ReadMetadata(); + + /// \brief Read stream metadata asynchronously + virtual Future> ReadMetadataAsync( + const IOContext& io_context); + Future> ReadMetadataAsync(); + + protected: + InputStream() = default; +}; + +class ARROW_EXPORT RandomAccessFile : public InputStream, public Seekable { + public: + /// Necessary because we hold a std::unique_ptr + ~RandomAccessFile() override; + + /// \brief Create an isolated InputStream that reads a segment of a + /// RandomAccessFile. Multiple such stream can be created and used + /// independently without interference + /// \param[in] file a file instance + /// \param[in] file_offset the starting position in the file + /// \param[in] nbytes the extent of bytes to read. The file should have + /// sufficient bytes available + static Result> GetStream( + std::shared_ptr file, int64_t file_offset, int64_t nbytes); + + /// \brief Return the total file size in bytes. + /// + /// This method does not read or move the current file position, so is safe + /// to call concurrently with e.g. ReadAt(). + virtual Result GetSize() = 0; + + /// \brief Read data from given file position. + /// + /// At most `nbytes` bytes are read. The number of bytes read is returned + /// (it can be less than `nbytes` if EOF is reached). + /// + /// This method can be safely called from multiple threads concurrently. + /// It is unspecified whether this method updates the file position or not. + /// + /// The default RandomAccessFile-provided implementation uses Seek() and Read(), + /// but subclasses may override it with a more efficient implementation + /// that doesn't depend on implicit file positioning. + /// + /// \param[in] position Where to read bytes from + /// \param[in] nbytes The number of bytes to read + /// \param[out] out The buffer to read bytes into + /// \return The number of bytes read, or an error + virtual Result ReadAt(int64_t position, int64_t nbytes, void* out); + + /// \brief Read data from given file position. + /// + /// At most `nbytes` bytes are read, but it can be less if EOF is reached. + /// + /// \param[in] position Where to read bytes from + /// \param[in] nbytes The number of bytes to read + /// \return A buffer containing the bytes read, or an error + virtual Result> ReadAt(int64_t position, int64_t nbytes); + + /// EXPERIMENTAL: Read data asynchronously. + virtual Future> ReadAsync(const IOContext&, int64_t position, + int64_t nbytes); + + /// EXPERIMENTAL: Read data asynchronously, using the file's IOContext. + Future> ReadAsync(int64_t position, int64_t nbytes); + + /// EXPERIMENTAL: Explicit multi-read. + /// \brief Request multiple reads at once + /// + /// The underlying filesystem may optimize these reads by coalescing small reads into + /// large reads or by breaking up large reads into multiple parallel smaller reads. The + /// reads should be issued in parallel if it makes sense for the filesystem. + /// + /// One future will be returned for each input read range. Multiple returned futures + /// may correspond to a single read. Or, a single returned future may be a combined + /// result of several individual reads. + /// + /// \param[in] ranges The ranges to read + /// \return A future that will complete with the data from the requested range is + /// available + virtual std::vector>> ReadManyAsync( + const IOContext&, const std::vector& ranges); + + /// EXPERIMENTAL: Explicit multi-read, using the file's IOContext. + std::vector>> ReadManyAsync( + const std::vector& ranges); + + /// EXPERIMENTAL: Inform that the given ranges may be read soon. + /// + /// Some implementations might arrange to prefetch some of the data. + /// However, no guarantee is made and the default implementation does nothing. + /// For robust prefetching, use ReadAt() or ReadAsync(). + virtual Status WillNeed(const std::vector& ranges); + + protected: + RandomAccessFile(); + + private: + struct ARROW_NO_EXPORT Impl; + std::unique_ptr interface_impl_; +}; + +class ARROW_EXPORT WritableFile : public OutputStream, public Seekable { + public: + virtual Status WriteAt(int64_t position, const void* data, int64_t nbytes) = 0; + + protected: + WritableFile() = default; +}; + +class ARROW_EXPORT ReadWriteFileInterface : public RandomAccessFile, public WritableFile { + protected: + ReadWriteFileInterface() { RandomAccessFile::set_mode(FileMode::READWRITE); } +}; + +/// \brief Return an iterator on an input stream +/// +/// The iterator yields a fixed-size block on each Next() call, except the +/// last block in the stream which may be smaller. +/// Once the end of stream is reached, Next() returns nullptr +/// (unlike InputStream::Read() which returns an empty buffer). +ARROW_EXPORT +Result>> MakeInputStreamIterator( + std::shared_ptr stream, int64_t block_size); + +} // namespace io +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/memory.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/memory.h new file mode 100644 index 0000000000000000000000000000000000000000..5b760a2b5a9cfe1feca6066edb9a594467bc06fb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/memory.h @@ -0,0 +1,213 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Public API for different memory sharing / IO mechanisms + +#pragma once + +#include +#include +#include +#include + +#include "arrow/io/concurrency.h" +#include "arrow/io/interfaces.h" +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Status; + +namespace io { + +/// \brief An output stream that writes to a resizable buffer +class ARROW_EXPORT BufferOutputStream : public OutputStream { + public: + explicit BufferOutputStream(const std::shared_ptr& buffer); + + /// \brief Create in-memory output stream with indicated capacity using a + /// memory pool + /// \param[in] initial_capacity the initial allocated internal capacity of + /// the OutputStream + /// \param[in,out] pool a MemoryPool to use for allocations + /// \return the created stream + static Result> Create( + int64_t initial_capacity = 4096, MemoryPool* pool = default_memory_pool()); + + ~BufferOutputStream() override; + + // Implement the OutputStream interface + + /// Close the stream, preserving the buffer (retrieve it with Finish()). + Status Close() override; + bool closed() const override; + Result Tell() const override; + Status Write(const void* data, int64_t nbytes) override; + + /// \cond FALSE + using OutputStream::Write; + /// \endcond + + /// Close the stream and return the buffer + Result> Finish(); + + /// \brief Initialize state of OutputStream with newly allocated memory and + /// set position to 0 + /// \param[in] initial_capacity the starting allocated capacity + /// \param[in,out] pool the memory pool to use for allocations + /// \return Status + Status Reset(int64_t initial_capacity = 1024, MemoryPool* pool = default_memory_pool()); + + int64_t capacity() const { return capacity_; } + + private: + BufferOutputStream(); + + // Ensures there is sufficient space available to write nbytes + Status Reserve(int64_t nbytes); + + std::shared_ptr buffer_; + bool is_open_; + int64_t capacity_; + int64_t position_; + uint8_t* mutable_data_; +}; + +/// \brief A helper class to track the size of allocations +/// +/// Writes to this stream do not copy or retain any data, they just bump +/// a size counter that can be later used to know exactly which data size +/// needs to be allocated for actual writing. +class ARROW_EXPORT MockOutputStream : public OutputStream { + public: + MockOutputStream() : extent_bytes_written_(0), is_open_(true) {} + + // Implement the OutputStream interface + Status Close() override; + bool closed() const override; + Result Tell() const override; + Status Write(const void* data, int64_t nbytes) override; + /// \cond FALSE + using Writable::Write; + /// \endcond + + int64_t GetExtentBytesWritten() const { return extent_bytes_written_; } + + private: + int64_t extent_bytes_written_; + bool is_open_; +}; + +/// \brief An output stream that writes into a fixed-size mutable buffer +class ARROW_EXPORT FixedSizeBufferWriter : public WritableFile { + public: + /// Input buffer must be mutable, will abort if not + explicit FixedSizeBufferWriter(const std::shared_ptr& buffer); + ~FixedSizeBufferWriter() override; + + Status Close() override; + bool closed() const override; + Status Seek(int64_t position) override; + Result Tell() const override; + Status Write(const void* data, int64_t nbytes) override; + /// \cond FALSE + using Writable::Write; + /// \endcond + + Status WriteAt(int64_t position, const void* data, int64_t nbytes) override; + + void set_memcopy_threads(int num_threads); + void set_memcopy_blocksize(int64_t blocksize); + void set_memcopy_threshold(int64_t threshold); + + protected: + class FixedSizeBufferWriterImpl; + std::unique_ptr impl_; +}; + +/// \class BufferReader +/// \brief Random access zero-copy reads on an arrow::Buffer +class ARROW_EXPORT BufferReader + : public internal::RandomAccessFileConcurrencyWrapper { + public: + /// \brief Instantiate from std::shared_ptr. + /// + /// This is a zero-copy constructor. + explicit BufferReader(std::shared_ptr buffer); + ARROW_DEPRECATED( + "Deprecated in 14.0.0. Use FromString or BufferReader(std::shared_ptr " + "buffer) instead.") + explicit BufferReader(const Buffer& buffer); + ARROW_DEPRECATED( + "Deprecated in 14.0.0. Use FromString or BufferReader(std::shared_ptr " + "buffer) instead.") + BufferReader(const uint8_t* data, int64_t size); + + /// \brief Instantiate from std::string_view. Does not own data + /// \deprecated Deprecated in 14.0.0. Use FromString or + /// BufferReader(std::shared_ptr buffer) instead. + ARROW_DEPRECATED( + "Deprecated in 14.0.0. Use FromString or BufferReader(std::shared_ptr " + "buffer) instead.") + explicit BufferReader(std::string_view data); + + /// \brief Instantiate from std::string. Owns data. + static std::unique_ptr FromString(std::string data); + + bool closed() const override; + + bool supports_zero_copy() const override; + + std::shared_ptr buffer() const { return buffer_; } + + // Synchronous ReadAsync override + Future> ReadAsync(const IOContext&, int64_t position, + int64_t nbytes) override; + Status WillNeed(const std::vector& ranges) override; + + protected: + friend RandomAccessFileConcurrencyWrapper; + + Status DoClose(); + + Result DoRead(int64_t nbytes, void* buffer); + Result> DoRead(int64_t nbytes); + Result DoReadAt(int64_t position, int64_t nbytes, void* out); + Result> DoReadAt(int64_t position, int64_t nbytes); + Result DoPeek(int64_t nbytes) override; + + Result DoTell() const; + Status DoSeek(int64_t position); + Result DoGetSize(); + + Status CheckClosed() const { + if (!is_open_) { + return Status::Invalid("Operation forbidden on closed BufferReader"); + } + return Status::OK(); + } + + std::shared_ptr buffer_; + const uint8_t* data_; + int64_t size_; + int64_t position_; + bool is_open_; +}; + +} // namespace io +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/mman.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/mman.h new file mode 100644 index 0000000000000000000000000000000000000000..9b06ac8e7b5cade78d78c5dab763d532fa65091b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/mman.h @@ -0,0 +1,169 @@ +// Copyright https://code.google.com/p/mman-win32/ +// +// Licensed under the MIT License; +// You may obtain a copy of the License at +// +// https://opensource.org/licenses/MIT + +#pragma once + +#include "arrow/util/windows_compatibility.h" + +#include +#include +#include + +#include + +#define PROT_NONE 0 +#define PROT_READ 1 +#define PROT_WRITE 2 +#define PROT_EXEC 4 + +#define MAP_FILE 0 +#define MAP_SHARED 1 +#define MAP_PRIVATE 2 +#define MAP_TYPE 0xf +#define MAP_FIXED 0x10 +#define MAP_ANONYMOUS 0x20 +#define MAP_ANON MAP_ANONYMOUS + +#define MAP_FAILED ((void*)-1) + +/* Flags for msync. */ +#define MS_ASYNC 1 +#define MS_SYNC 2 +#define MS_INVALIDATE 4 + +#ifndef FILE_MAP_EXECUTE +#define FILE_MAP_EXECUTE 0x0020 +#endif + +static inline int __map_mman_error(const DWORD err, const int deferr) { + if (err == 0) return 0; + // TODO: implement + return err; +} + +static inline DWORD __map_mmap_prot_page(const int prot) { + DWORD protect = 0; + + if (prot == PROT_NONE) return protect; + + if ((prot & PROT_EXEC) != 0) { + protect = ((prot & PROT_WRITE) != 0) ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ; + } else { + protect = ((prot & PROT_WRITE) != 0) ? PAGE_READWRITE : PAGE_READONLY; + } + + return protect; +} + +static inline DWORD __map_mmap_prot_file(const int prot) { + DWORD desiredAccess = 0; + + if (prot == PROT_NONE) return desiredAccess; + + if ((prot & PROT_READ) != 0) desiredAccess |= FILE_MAP_READ; + if ((prot & PROT_WRITE) != 0) desiredAccess |= FILE_MAP_WRITE; + if ((prot & PROT_EXEC) != 0) desiredAccess |= FILE_MAP_EXECUTE; + + return desiredAccess; +} + +static inline void* mmap(void* addr, size_t len, int prot, int flags, int fildes, + off_t off) { + HANDLE fm, h; + + void* map = MAP_FAILED; + const uint64_t off64 = static_cast(off); + const uint64_t maxSize = off64 + len; + + const DWORD dwFileOffsetLow = static_cast(off64 & 0xFFFFFFFFUL); + const DWORD dwFileOffsetHigh = static_cast((off64 >> 32) & 0xFFFFFFFFUL); + const DWORD dwMaxSizeLow = static_cast(maxSize & 0xFFFFFFFFUL); + const DWORD dwMaxSizeHigh = static_cast((maxSize >> 32) & 0xFFFFFFFFUL); + + const DWORD protect = __map_mmap_prot_page(prot); + const DWORD desiredAccess = __map_mmap_prot_file(prot); + + errno = 0; + + if (len == 0 + /* Unsupported flag combinations */ + || (flags & MAP_FIXED) != 0 + /* Unsupported protection combinations */ + || prot == PROT_EXEC) { + errno = EINVAL; + return MAP_FAILED; + } + + h = ((flags & MAP_ANONYMOUS) == 0) ? (HANDLE)_get_osfhandle(fildes) + : INVALID_HANDLE_VALUE; + + if ((flags & MAP_ANONYMOUS) == 0 && h == INVALID_HANDLE_VALUE) { + errno = EBADF; + return MAP_FAILED; + } + + fm = CreateFileMapping(h, NULL, protect, dwMaxSizeHigh, dwMaxSizeLow, NULL); + + if (fm == NULL) { + errno = __map_mman_error(GetLastError(), EPERM); + return MAP_FAILED; + } + + map = MapViewOfFile(fm, desiredAccess, dwFileOffsetHigh, dwFileOffsetLow, len); + + CloseHandle(fm); + + if (map == NULL) { + errno = __map_mman_error(GetLastError(), EPERM); + return MAP_FAILED; + } + + return map; +} + +static inline int munmap(void* addr, size_t len) { + if (UnmapViewOfFile(addr)) return 0; + + errno = __map_mman_error(GetLastError(), EPERM); + + return -1; +} + +static inline int mprotect(void* addr, size_t len, int prot) { + DWORD newProtect = __map_mmap_prot_page(prot); + DWORD oldProtect = 0; + + if (VirtualProtect(addr, len, newProtect, &oldProtect)) return 0; + + errno = __map_mman_error(GetLastError(), EPERM); + + return -1; +} + +static inline int msync(void* addr, size_t len, int flags) { + if (FlushViewOfFile(addr, len)) return 0; + + errno = __map_mman_error(GetLastError(), EPERM); + + return -1; +} + +static inline int mlock(const void* addr, size_t len) { + if (VirtualLock((LPVOID)addr, len)) return 0; + + errno = __map_mman_error(GetLastError(), EPERM); + + return -1; +} + +static inline int munlock(const void* addr, size_t len) { + if (VirtualUnlock((LPVOID)addr, len)) return 0; + + errno = __map_mman_error(GetLastError(), EPERM); + + return -1; +} diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/slow.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/slow.h new file mode 100644 index 0000000000000000000000000000000000000000..fdcc56dfa6af622fcfd9fd10984c1d0a87414149 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/slow.h @@ -0,0 +1,118 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Slow stream implementations, mainly for testing and benchmarking + +#pragma once + +#include +#include +#include + +#include "arrow/io/interfaces.h" +#include "arrow/util/visibility.h" + +namespace arrow { + +class Buffer; +class Status; + +namespace io { + +class ARROW_EXPORT LatencyGenerator { + public: + virtual ~LatencyGenerator(); + + void Sleep(); + + virtual double NextLatency() = 0; + + static std::shared_ptr Make(double average_latency); + static std::shared_ptr Make(double average_latency, int32_t seed); +}; + +// XXX use ConcurrencyWrapper? It could increase chances of finding a race. + +template +class SlowInputStreamBase : public StreamType { + public: + SlowInputStreamBase(std::shared_ptr stream, + std::shared_ptr latencies) + : stream_(std::move(stream)), latencies_(std::move(latencies)) {} + + SlowInputStreamBase(std::shared_ptr stream, double average_latency) + : stream_(std::move(stream)), latencies_(LatencyGenerator::Make(average_latency)) {} + + SlowInputStreamBase(std::shared_ptr stream, double average_latency, + int32_t seed) + : stream_(std::move(stream)), + latencies_(LatencyGenerator::Make(average_latency, seed)) {} + + protected: + std::shared_ptr stream_; + std::shared_ptr latencies_; +}; + +/// \brief An InputStream wrapper that makes reads slower. +/// +/// Read() calls are made slower by an average latency (in seconds). +/// Actual latencies form a normal distribution closely centered +/// on the average latency. +/// Other calls are forwarded directly. +class ARROW_EXPORT SlowInputStream : public SlowInputStreamBase { + public: + ~SlowInputStream() override; + + using SlowInputStreamBase::SlowInputStreamBase; + + Status Close() override; + Status Abort() override; + bool closed() const override; + + Result Read(int64_t nbytes, void* out) override; + Result> Read(int64_t nbytes) override; + Result Peek(int64_t nbytes) override; + + Result Tell() const override; +}; + +/// \brief A RandomAccessFile wrapper that makes reads slower. +/// +/// Similar to SlowInputStream, but allows random access and seeking. +class ARROW_EXPORT SlowRandomAccessFile : public SlowInputStreamBase { + public: + ~SlowRandomAccessFile() override; + + using SlowInputStreamBase::SlowInputStreamBase; + + Status Close() override; + Status Abort() override; + bool closed() const override; + + Result Read(int64_t nbytes, void* out) override; + Result> Read(int64_t nbytes) override; + Result ReadAt(int64_t position, int64_t nbytes, void* out) override; + Result> ReadAt(int64_t position, int64_t nbytes) override; + Result Peek(int64_t nbytes) override; + + Result GetSize() override; + Status Seek(int64_t position) override; + Result Tell() const override; +}; + +} // namespace io +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/stdio.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/stdio.h new file mode 100644 index 0000000000000000000000000000000000000000..9484ac7712427733862ecbc7d9ee932c5dfc0907 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/stdio.h @@ -0,0 +1,82 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/io/interfaces.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace io { + +// Output stream that just writes to stdout. +class ARROW_EXPORT StdoutStream : public OutputStream { + public: + StdoutStream(); + ~StdoutStream() override {} + + Status Close() override; + bool closed() const override; + + Result Tell() const override; + + Status Write(const void* data, int64_t nbytes) override; + + private: + int64_t pos_; +}; + +// Output stream that just writes to stderr. +class ARROW_EXPORT StderrStream : public OutputStream { + public: + StderrStream(); + ~StderrStream() override {} + + Status Close() override; + bool closed() const override; + + Result Tell() const override; + + Status Write(const void* data, int64_t nbytes) override; + + private: + int64_t pos_; +}; + +// Input stream that just reads from stdin. +class ARROW_EXPORT StdinStream : public InputStream { + public: + StdinStream(); + ~StdinStream() override {} + + Status Close() override; + bool closed() const override; + + Result Tell() const override; + + Result Read(int64_t nbytes, void* out) override; + + Result> Read(int64_t nbytes) override; + + private: + int64_t pos_; +}; + +} // namespace io +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/test_common.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/test_common.h new file mode 100644 index 0000000000000000000000000000000000000000..9abaef1a665366b841d78788f7736257716dfe31 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/test_common.h @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/io/interfaces.h" +#include "arrow/testing/visibility.h" +#include "arrow/type_fwd.h" + +namespace arrow { +namespace io { + +class MemoryMappedFile; + +ARROW_TESTING_EXPORT +void AssertFileContents(const std::string& path, const std::string& contents); + +ARROW_TESTING_EXPORT bool FileExists(const std::string& path); + +ARROW_TESTING_EXPORT Status PurgeLocalFileFromOsCache(const std::string& path); + +ARROW_TESTING_EXPORT +Status ZeroMemoryMap(MemoryMappedFile* file); + +class ARROW_TESTING_EXPORT MemoryMapFixture { + public: + void TearDown(); + + void CreateFile(const std::string& path, int64_t size); + + Result> InitMemoryMap(int64_t size, + const std::string& path); + + void AppendFile(const std::string& path); + + private: + std::vector tmp_files_; +}; + +class ARROW_TESTING_EXPORT TrackedRandomAccessFile : public io::RandomAccessFile { + public: + virtual int64_t num_reads() const = 0; + virtual int64_t bytes_read() const = 0; + virtual const std::vector& get_read_ranges() const = 0; + static std::unique_ptr Make(io::RandomAccessFile* target); +}; + +} // namespace io +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/transform.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/transform.h new file mode 100644 index 0000000000000000000000000000000000000000..7afe29b10194efa39fec8e3b2008e16e5a3ee8e8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/transform.h @@ -0,0 +1,60 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Transform stream implementations + +#pragma once + +#include +#include +#include +#include + +#include "arrow/io/interfaces.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace io { + +class ARROW_EXPORT TransformInputStream : public InputStream { + public: + using TransformFunc = + std::function>(const std::shared_ptr&)>; + + TransformInputStream(std::shared_ptr wrapped, TransformFunc transform); + ~TransformInputStream() override; + + Status Close() override; + Status Abort() override; + bool closed() const override; + + Result Read(int64_t nbytes, void* out) override; + Result> Read(int64_t nbytes) override; + + Result> ReadMetadata() override; + Future> ReadMetadataAsync( + const IOContext& io_context) override; + + Result Tell() const override; + + protected: + struct Impl; + std::unique_ptr impl_; +}; + +} // namespace io +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/type_fwd.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..a1b9e626bba289a030d87d0a14bfa2f1fb2dc29d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/io/type_fwd.h @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/type_fwd.h" +#include "arrow/util/visibility.h" + +namespace arrow { +namespace io { + +struct FileMode { + enum type { READ, WRITE, READWRITE }; +}; + +struct IOContext; +struct CacheOptions; + +/// EXPERIMENTAL: convenience global singleton for default IOContext settings +ARROW_EXPORT +const IOContext& default_io_context(); + +/// \brief Get the capacity of the global I/O thread pool +/// +/// Return the number of worker threads in the thread pool to which +/// Arrow dispatches various I/O-bound tasks. This is an ideal number, +/// not necessarily the exact number of threads at a given point in time. +/// +/// You can change this number using SetIOThreadPoolCapacity(). +ARROW_EXPORT int GetIOThreadPoolCapacity(); + +/// \brief Set the capacity of the global I/O thread pool +/// +/// Set the number of worker threads in the thread pool to which +/// Arrow dispatches various I/O-bound tasks. +/// +/// The current number is returned by GetIOThreadPoolCapacity(). +ARROW_EXPORT Status SetIOThreadPoolCapacity(int threads); + +class FileInterface; +class Seekable; +class Writable; +class Readable; +class OutputStream; +class FileOutputStream; +class InputStream; +class ReadableFile; +class RandomAccessFile; +class MemoryMappedFile; +class WritableFile; +class ReadWriteFileInterface; + +class LatencyGenerator; + +class BufferOutputStream; +class BufferReader; +class CompressedInputStream; +class CompressedOutputStream; +class BufferedInputStream; +class BufferedOutputStream; + +} // namespace io +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/api.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/api.h new file mode 100644 index 0000000000000000000000000000000000000000..a0b13d6d13013cfd0f5f0af9c6a6dcea6ceeaafd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/api.h @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/python/arrow_to_pandas.h" +#include "arrow/python/common.h" +#include "arrow/python/datetime.h" +#include "arrow/python/deserialize.h" +#include "arrow/python/helpers.h" +#include "arrow/python/inference.h" +#include "arrow/python/io.h" +#include "arrow/python/numpy_convert.h" +#include "arrow/python/numpy_to_arrow.h" +#include "arrow/python/python_to_arrow.h" +#include "arrow/python/serialize.h" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/arrow_to_pandas.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/arrow_to_pandas.h new file mode 100644 index 0000000000000000000000000000000000000000..82e0a600513d4abd9bb956053a2a7e94a1033f39 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/arrow_to_pandas.h @@ -0,0 +1,146 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Functions for converting between pandas's NumPy-based data representation +// and Arrow data structures + +#pragma once + +#include "arrow/python/platform.h" + +#include +#include +#include + +#include "arrow/memory_pool.h" +#include "arrow/python/visibility.h" + +namespace arrow { + +class Array; +class ChunkedArray; +class Column; +class DataType; +class MemoryPool; +class Status; +class Table; + +namespace py { + +enum class MapConversionType { + DEFAULT, // convert arrow maps to assoc lists (list of kev-value tuples) in Pandas + LOSSY, // report warnings when lossiness is encountered due to duplicate keys + STRICT_, // raise a Python exception when lossiness is encountered due to duplicate + // keys +}; + +struct PandasOptions { + /// arrow::MemoryPool to use for memory allocations + MemoryPool* pool = default_memory_pool(); + + /// If true, we will convert all string columns to categoricals + bool strings_to_categorical = false; + bool zero_copy_only = false; + bool integer_object_nulls = false; + bool date_as_object = false; + bool timestamp_as_object = false; + bool use_threads = false; + + /// Coerce all date and timestamp to datetime64[ns] + bool coerce_temporal_nanoseconds = false; + + /// Used to maintain backwards compatibility for + /// timezone bugs (see ARROW-9528). Should be removed + /// after Arrow 2.0 release. + bool ignore_timezone = false; + + /// \brief If true, do not create duplicate PyObject versions of equal + /// objects. This only applies to immutable objects like strings or datetime + /// objects + bool deduplicate_objects = false; + + /// \brief For certain data types, a cast is needed in order to store the + /// data in a pandas DataFrame or Series (e.g. timestamps are always stored + /// as nanoseconds in pandas). This option controls whether it is a safe + /// cast or not. + bool safe_cast = true; + + /// \brief If true, create one block per column rather than consolidated + /// blocks (1 per data type). Do zero-copy wrapping when there are no + /// nulls. pandas currently will consolidate the blocks on its own, causing + /// increased memory use, so keep this in mind if you are working on a + /// memory-constrained situation. + bool split_blocks = false; + + /// \brief If true, allow non-writable zero-copy views to be created for + /// single column blocks. This option is also used to provide zero copy for + /// Series data + bool allow_zero_copy_blocks = false; + + /// \brief If true, attempt to deallocate buffers in passed Arrow object if + /// it is the only remaining shared_ptr copy of it. See ARROW-3789 for + /// original context for this feature. Only currently implemented for Table + /// conversions + bool self_destruct = false; + + /// \brief The default behavior (DEFAULT), is to convert Arrow Map arrays to + /// Python association lists (list-of-tuples) in the same order as the Arrow + /// Map, as in [(key1, value1), (key2, value2), ...] + /// If LOSSY or STRICT, convert Arrow Map arrays to native Python dicts. + /// This can change the ordering of (key, value) pairs, and will deduplicate + /// multiple keys, resulting in a possible loss of data. + /// If 'lossy', this key deduplication results in a warning printed + /// when detected. If 'strict', this instead results in an exception + /// being raised when detected. + MapConversionType maps_as_pydicts = MapConversionType::DEFAULT; + + // Used internally for nested arrays. + bool decode_dictionaries = false; + + // Columns that should be casted to categorical + std::unordered_set categorical_columns; + + // Columns that should be passed through to be converted to + // ExtensionArray/Block + std::unordered_set extension_columns; + + // Used internally to decipher between to_numpy() and to_pandas() when + // the expected output differs + bool to_numpy = false; +}; + +ARROW_PYTHON_EXPORT +Status ConvertArrayToPandas(const PandasOptions& options, std::shared_ptr arr, + PyObject* py_ref, PyObject** out); + +ARROW_PYTHON_EXPORT +Status ConvertChunkedArrayToPandas(const PandasOptions& options, + std::shared_ptr col, PyObject* py_ref, + PyObject** out); + +// Convert a whole table as efficiently as possible to a pandas.DataFrame. +// +// The returned Python object is a list of tuples consisting of the exact 2D +// BlockManager structure of the pandas.DataFrame used as of pandas 0.19.x. +// +// tuple item: (indices: ndarray[int32], block: ndarray[TYPE, ndim=2]) +ARROW_PYTHON_EXPORT +Status ConvertTableToPandas(const PandasOptions& options, std::shared_ptr
table, + PyObject** out); + +} // namespace py +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/async.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/async.h new file mode 100644 index 0000000000000000000000000000000000000000..1568d21938e6e79e724d957120e68a7576ba9c2a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/async.h @@ -0,0 +1,60 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/python/common.h" +#include "arrow/status.h" +#include "arrow/util/future.h" + +namespace arrow::py { + +/// \brief Bind a Python callback to an arrow::Future. +/// +/// If the Future finishes successfully, py_wrapper is called with its +/// result value and should return a PyObject*. If py_wrapper is successful, +/// py_cb is called with its return value. +/// +/// If either the Future or py_wrapper fails, py_cb is called with the +/// associated Python exception. +/// +/// \param future The future to bind to. +/// \param py_cb The Python callback function. Will be passed the result of +/// py_wrapper, or a Python exception if the future failed or one was +/// raised by py_wrapper. +/// \param py_wrapper A function (likely defined in Cython) to convert the C++ +/// result of the future to a Python object. +template +void BindFuture(Future future, PyObject* py_cb, PyWrapper py_wrapper) { + Py_INCREF(py_cb); + OwnedRefNoGIL cb_ref(py_cb); + + auto future_cb = [cb_ref = std::move(cb_ref), + py_wrapper = std::move(py_wrapper)](Result result) { + SafeCallIntoPythonVoid([&]() { + OwnedRef py_value_or_exc{WrapResult(std::move(result), std::move(py_wrapper))}; + Py_XDECREF( + PyObject_CallFunctionObjArgs(cb_ref.obj(), py_value_or_exc.obj(), NULLPTR)); + ARROW_WARN_NOT_OK(CheckPyError(), "Internal error in async call"); + }); + }; + future.AddCallback(std::move(future_cb)); +} + +} // namespace arrow::py diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/benchmark.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/benchmark.h new file mode 100644 index 0000000000000000000000000000000000000000..8060dd33722a08eb0935687ea5cb306dbd38a9f0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/benchmark.h @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/python/platform.h" + +#include "arrow/python/visibility.h" + +namespace arrow { +namespace py { +namespace benchmark { + +// Micro-benchmark routines for use from ASV + +// Run PandasObjectIsNull() once over every object in *list* +ARROW_PYTHON_EXPORT +void Benchmark_PandasObjectIsNull(PyObject* list); + +} // namespace benchmark +} // namespace py +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/csv.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/csv.h new file mode 100644 index 0000000000000000000000000000000000000000..34302e93667394d616692a6a4603e6d0be67d211 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/csv.h @@ -0,0 +1,42 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/csv/options.h" +#include "arrow/python/common.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace py { +namespace csv { + +using PyInvalidRowCallback = std::function<::arrow::csv::InvalidRowResult( + PyObject*, const ::arrow::csv::InvalidRow&)>; + +ARROW_PYTHON_EXPORT +::arrow::csv::InvalidRowHandler MakeInvalidRowHandler(PyInvalidRowCallback, + PyObject* handler); + +} // namespace csv +} // namespace py +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/decimal.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/decimal.h new file mode 100644 index 0000000000000000000000000000000000000000..1187037aed29e2cc5910e156c260fc9d9d81bff5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/decimal.h @@ -0,0 +1,128 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/python/visibility.h" +#include "arrow/type.h" + +namespace arrow { + +class Decimal128; +class Decimal256; + +namespace py { + +class OwnedRef; + +// +// Python Decimal support +// + +namespace internal { + +// \brief Import the Python Decimal type +ARROW_PYTHON_EXPORT +Status ImportDecimalType(OwnedRef* decimal_type); + +// \brief Convert a Python Decimal object to a C++ string +// \param[in] python_decimal A Python decimal.Decimal instance +// \param[out] The string representation of the Python Decimal instance +// \return The status of the operation +ARROW_PYTHON_EXPORT +Status PythonDecimalToString(PyObject* python_decimal, std::string* out); + +// \brief Convert a C++ std::string to a Python Decimal instance +// \param[in] decimal_constructor The decimal type object +// \param[in] decimal_string A decimal string +// \return An instance of decimal.Decimal +ARROW_PYTHON_EXPORT +PyObject* DecimalFromString(PyObject* decimal_constructor, + const std::string& decimal_string); + +// \brief Convert a Python decimal to an Arrow Decimal128 object +// \param[in] python_decimal A Python decimal.Decimal instance +// \param[in] arrow_type An instance of arrow::DecimalType +// \param[out] out A pointer to a Decimal128 +// \return The status of the operation +ARROW_PYTHON_EXPORT +Status DecimalFromPythonDecimal(PyObject* python_decimal, const DecimalType& arrow_type, + Decimal128* out); + +// \brief Convert a Python object to an Arrow Decimal128 object +// \param[in] python_decimal A Python int or decimal.Decimal instance +// \param[in] arrow_type An instance of arrow::DecimalType +// \param[out] out A pointer to a Decimal128 +// \return The status of the operation +ARROW_PYTHON_EXPORT +Status DecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type, Decimal128* out); + +// \brief Convert a Python decimal to an Arrow Decimal256 object +// \param[in] python_decimal A Python decimal.Decimal instance +// \param[in] arrow_type An instance of arrow::DecimalType +// \param[out] out A pointer to a Decimal256 +// \return The status of the operation +ARROW_PYTHON_EXPORT +Status DecimalFromPythonDecimal(PyObject* python_decimal, const DecimalType& arrow_type, + Decimal256* out); + +// \brief Convert a Python object to an Arrow Decimal256 object +// \param[in] python_decimal A Python int or decimal.Decimal instance +// \param[in] arrow_type An instance of arrow::DecimalType +// \param[out] out A pointer to a Decimal256 +// \return The status of the operation +ARROW_PYTHON_EXPORT +Status DecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type, Decimal256* out); + +// \brief Check whether obj is an instance of Decimal +ARROW_PYTHON_EXPORT +bool PyDecimal_Check(PyObject* obj); + +// \brief Check whether obj is nan. This function will abort the program if the argument +// is not a Decimal instance +ARROW_PYTHON_EXPORT +bool PyDecimal_ISNAN(PyObject* obj); + +// \brief Helper class to track and update the precision and scale of a decimal +class ARROW_PYTHON_EXPORT DecimalMetadata { + public: + DecimalMetadata(); + DecimalMetadata(int32_t precision, int32_t scale); + + // \brief Adjust the precision and scale of a decimal type given a new precision and a + // new scale \param[in] suggested_precision A candidate precision \param[in] + // suggested_scale A candidate scale \return The status of the operation + Status Update(int32_t suggested_precision, int32_t suggested_scale); + + // \brief A convenient interface for updating the precision and scale based on a Python + // Decimal object \param object A Python Decimal object \return The status of the + // operation + Status Update(PyObject* object); + + int32_t precision() const { return precision_; } + int32_t scale() const { return scale_; } + + private: + int32_t precision_; + int32_t scale_; +}; + +} // namespace internal +} // namespace py +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/deserialize.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/deserialize.h new file mode 100644 index 0000000000000000000000000000000000000000..41b6a13a38875cf56abf8102d90526b66af3f9ab --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/deserialize.h @@ -0,0 +1,106 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/python/serialize.h" +#include "arrow/python/visibility.h" +#include "arrow/status.h" + +namespace arrow { + +class RecordBatch; +class Tensor; + +namespace io { + +class RandomAccessFile; + +} // namespace io + +namespace py { + +struct ARROW_PYTHON_EXPORT SparseTensorCounts { + int coo; + int csr; + int csc; + int csf; + int ndim_csf; + + int num_total_tensors() const { return coo + csr + csc + csf; } + int num_total_buffers() const { + return coo * 3 + csr * 4 + csc * 4 + 2 * ndim_csf + csf; + } +}; + +/// \brief Read serialized Python sequence from file interface using Arrow IPC +/// \param[in] src a RandomAccessFile +/// \param[out] out the reconstructed data +/// \return Status +ARROW_PYTHON_EXPORT +Status ReadSerializedObject(io::RandomAccessFile* src, SerializedPyObject* out); + +/// \brief Reconstruct SerializedPyObject from representation produced by +/// SerializedPyObject::GetComponents. +/// +/// \param[in] num_tensors number of tensors in the object +/// \param[in] num_sparse_tensors number of sparse tensors in the object +/// \param[in] num_ndarrays number of numpy Ndarrays in the object +/// \param[in] num_buffers number of buffers in the object +/// \param[in] data a list containing pyarrow.Buffer instances. It must be 1 + +/// num_tensors * 2 + num_coo_tensors * 3 + num_csr_tensors * 4 + num_csc_tensors * 4 + +/// num_csf_tensors * (2 * ndim_csf + 3) + num_buffers in length +/// \param[out] out the reconstructed object +/// \return Status +ARROW_PYTHON_EXPORT +Status GetSerializedFromComponents(int num_tensors, + const SparseTensorCounts& num_sparse_tensors, + int num_ndarrays, int num_buffers, PyObject* data, + SerializedPyObject* out); + +/// \brief Reconstruct Python object from Arrow-serialized representation +/// \param[in] context Serialization context which contains custom serialization +/// and deserialization callbacks. Can be any Python object with a +/// _serialize_callback method for serialization and a _deserialize_callback +/// method for deserialization. If context is None, no custom serialization +/// will be attempted. +/// \param[in] object Object to deserialize +/// \param[in] base a Python object holding the underlying data that any NumPy +/// arrays will reference, to avoid premature deallocation +/// \param[out] out The returned object +/// \return Status +/// This acquires the GIL +ARROW_PYTHON_EXPORT +Status DeserializeObject(PyObject* context, const SerializedPyObject& object, + PyObject* base, PyObject** out); + +/// \brief Reconstruct Ndarray from Arrow-serialized representation +/// \param[in] object Object to deserialize +/// \param[out] out The deserialized tensor +/// \return Status +ARROW_PYTHON_EXPORT +Status DeserializeNdarray(const SerializedPyObject& object, std::shared_ptr* out); + +ARROW_PYTHON_EXPORT +Status NdarrayFromBuffer(std::shared_ptr src, std::shared_ptr* out); + +} // namespace py +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/extension_type.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/extension_type.h new file mode 100644 index 0000000000000000000000000000000000000000..e6523824eb9634c18b87e4e3e5c827d8be43f8a8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/extension_type.h @@ -0,0 +1,85 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/extension_type.h" +#include "arrow/python/common.h" +#include "arrow/python/visibility.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace py { + +class ARROW_PYTHON_EXPORT PyExtensionType : public ExtensionType { + public: + // Implement extensionType API + std::string extension_name() const override { return extension_name_; } + + std::string ToString(bool show_metadata = false) const override; + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override; + + // For use from Cython + // Assumes that `typ` is borrowed + static Status FromClass(const std::shared_ptr storage_type, + const std::string extension_name, PyObject* typ, + std::shared_ptr* out); + + // Return new ref + PyObject* GetInstance() const; + Status SetInstance(PyObject*) const; + + protected: + PyExtensionType(std::shared_ptr storage_type, PyObject* typ, + PyObject* inst = NULLPTR); + PyExtensionType(std::shared_ptr storage_type, std::string extension_name, + PyObject* typ, PyObject* inst = NULLPTR); + + std::string extension_name_; + + // These fields are mutable because of two-step initialization. + mutable OwnedRefNoGIL type_class_; + // A weakref or null. Storing a strong reference to the Python extension type + // instance would create an unreclaimable reference cycle between Python and C++ + // (the Python instance has to keep a strong reference to the C++ ExtensionType + // in other direction). Instead, we store a weakref to the instance. + // If the weakref is dead, we reconstruct the instance from its serialized form. + mutable OwnedRefNoGIL type_instance_; + // Empty if type_instance_ is null + mutable std::string serialized_; +}; + +ARROW_PYTHON_EXPORT std::string PyExtensionName(); + +ARROW_PYTHON_EXPORT Status RegisterPyExtensionType(const std::shared_ptr&); + +ARROW_PYTHON_EXPORT Status UnregisterPyExtensionType(const std::string& type_name); + +} // namespace py +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/filesystem.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/filesystem.h new file mode 100644 index 0000000000000000000000000000000000000000..194b226ac5c35d4b3518c2e9fa9443c2ba1007ae --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/filesystem.h @@ -0,0 +1,130 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/python/common.h" +#include "arrow/python/visibility.h" +#include "arrow/util/macros.h" + +namespace arrow::py::fs { + +class ARROW_PYTHON_EXPORT PyFileSystemVtable { + public: + std::function get_type_name; + std::function equals; + + std::function + get_file_info; + std::function& paths, + std::vector* out)> + get_file_info_vector; + std::function* out)> + get_file_info_selector; + + std::function create_dir; + std::function delete_dir; + std::function delete_dir_contents; + std::function delete_root_dir_contents; + std::function delete_file; + std::function move; + std::function + copy_file; + + std::function* out)> + open_input_stream; + std::function* out)> + open_input_file; + std::function&, + std::shared_ptr* out)> + open_output_stream; + std::function&, + std::shared_ptr* out)> + open_append_stream; + + std::function + normalize_path; +}; + +class ARROW_PYTHON_EXPORT PyFileSystem : public arrow::fs::FileSystem { + public: + PyFileSystem(PyObject* handler, PyFileSystemVtable vtable); + ~PyFileSystem() override; + + static std::shared_ptr Make(PyObject* handler, PyFileSystemVtable vtable); + + std::string type_name() const override; + + bool Equals(const FileSystem& other) const override; + + /// \cond FALSE + using FileSystem::CreateDir; + using FileSystem::DeleteDirContents; + using FileSystem::GetFileInfo; + using FileSystem::OpenAppendStream; + using FileSystem::OpenOutputStream; + /// \endcond + + Result GetFileInfo(const std::string& path) override; + Result> GetFileInfo( + const std::vector& paths) override; + Result> GetFileInfo( + const arrow::fs::FileSelector& select) override; + + Status CreateDir(const std::string& path, bool recursive) override; + + Status DeleteDir(const std::string& path) override; + Status DeleteDirContents(const std::string& path, bool missing_dir_ok) override; + Status DeleteRootDirContents() override; + + Status DeleteFile(const std::string& path) override; + + Status Move(const std::string& src, const std::string& dest) override; + + Status CopyFile(const std::string& src, const std::string& dest) override; + + Result> OpenInputStream( + const std::string& path) override; + Result> OpenInputFile( + const std::string& path) override; + Result> OpenOutputStream( + const std::string& path, + const std::shared_ptr& metadata) override; + Result> OpenAppendStream( + const std::string& path, + const std::shared_ptr& metadata) override; + + Result NormalizePath(std::string path) override; + + PyObject* handler() const { return handler_.obj(); } + + private: + OwnedRefNoGIL handler_; + PyFileSystemVtable vtable_; +}; + +} // namespace arrow::py::fs diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/flight.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/flight.h new file mode 100644 index 0000000000000000000000000000000000000000..82d93711e55fb51a929767a7d50ecdffa0dc5e6d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/flight.h @@ -0,0 +1,350 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/flight/api.h" +#include "arrow/ipc/dictionary.h" +#include "arrow/python/common.h" + +#if defined(_WIN32) || defined(__CYGWIN__) // Windows +#if defined(_MSC_VER) +#pragma warning(disable : 4251) +#else +#pragma GCC diagnostic ignored "-Wattributes" +#endif + +#ifdef ARROW_PYTHON_STATIC +#define ARROW_PYFLIGHT_EXPORT +#elif defined(ARROW_PYFLIGHT_EXPORTING) +#define ARROW_PYFLIGHT_EXPORT __declspec(dllexport) +#else +#define ARROW_PYFLIGHT_EXPORT __declspec(dllimport) +#endif + +#else // Not Windows +#ifndef ARROW_PYFLIGHT_EXPORT +#define ARROW_PYFLIGHT_EXPORT __attribute__((visibility("default"))) +#endif +#endif // Non-Windows + +namespace arrow { + +namespace py { + +namespace flight { + +ARROW_PYFLIGHT_EXPORT +extern const char* kPyServerMiddlewareName; + +/// \brief A table of function pointers for calling from C++ into +/// Python. +class ARROW_PYFLIGHT_EXPORT PyFlightServerVtable { + public: + std::function*)> + list_flights; + std::function*)> + get_flight_info; + std::function*)> + get_schema; + std::function*)> + do_get; + std::function, + std::unique_ptr)> + do_put; + std::function, + std::unique_ptr)> + do_exchange; + std::function*)> + do_action; + std::function*)> + list_actions; +}; + +class ARROW_PYFLIGHT_EXPORT PyServerAuthHandlerVtable { + public: + std::function + authenticate; + std::function is_valid; +}; + +class ARROW_PYFLIGHT_EXPORT PyClientAuthHandlerVtable { + public: + std::function + authenticate; + std::function get_token; +}; + +/// \brief A helper to implement an auth mechanism in Python. +class ARROW_PYFLIGHT_EXPORT PyServerAuthHandler + : public arrow::flight::ServerAuthHandler { + public: + explicit PyServerAuthHandler(PyObject* handler, + const PyServerAuthHandlerVtable& vtable); + Status Authenticate(arrow::flight::ServerAuthSender* outgoing, + arrow::flight::ServerAuthReader* incoming) override; + Status IsValid(const std::string& token, std::string* peer_identity) override; + + private: + OwnedRefNoGIL handler_; + PyServerAuthHandlerVtable vtable_; +}; + +/// \brief A helper to implement an auth mechanism in Python. +class ARROW_PYFLIGHT_EXPORT PyClientAuthHandler + : public arrow::flight::ClientAuthHandler { + public: + explicit PyClientAuthHandler(PyObject* handler, + const PyClientAuthHandlerVtable& vtable); + Status Authenticate(arrow::flight::ClientAuthSender* outgoing, + arrow::flight::ClientAuthReader* incoming) override; + Status GetToken(std::string* token) override; + + private: + OwnedRefNoGIL handler_; + PyClientAuthHandlerVtable vtable_; +}; + +class ARROW_PYFLIGHT_EXPORT PyFlightServer : public arrow::flight::FlightServerBase { + public: + explicit PyFlightServer(PyObject* server, const PyFlightServerVtable& vtable); + + // Like Serve(), but set up signals and invoke Python signal handlers + // if necessary. This function may return with a Python exception set. + Status ServeWithSignals(); + + Status ListFlights(const arrow::flight::ServerCallContext& context, + const arrow::flight::Criteria* criteria, + std::unique_ptr* listings) override; + Status GetFlightInfo(const arrow::flight::ServerCallContext& context, + const arrow::flight::FlightDescriptor& request, + std::unique_ptr* info) override; + Status GetSchema(const arrow::flight::ServerCallContext& context, + const arrow::flight::FlightDescriptor& request, + std::unique_ptr* result) override; + Status DoGet(const arrow::flight::ServerCallContext& context, + const arrow::flight::Ticket& request, + std::unique_ptr* stream) override; + Status DoPut(const arrow::flight::ServerCallContext& context, + std::unique_ptr reader, + std::unique_ptr writer) override; + Status DoExchange(const arrow::flight::ServerCallContext& context, + std::unique_ptr reader, + std::unique_ptr writer) override; + Status DoAction(const arrow::flight::ServerCallContext& context, + const arrow::flight::Action& action, + std::unique_ptr* result) override; + Status ListActions(const arrow::flight::ServerCallContext& context, + std::vector* actions) override; + + private: + OwnedRefNoGIL server_; + PyFlightServerVtable vtable_; +}; + +/// \brief A callback that obtains the next result from a Flight action. +typedef std::function*)> + PyFlightResultStreamCallback; + +/// \brief A ResultStream built around a Python callback. +class ARROW_PYFLIGHT_EXPORT PyFlightResultStream : public arrow::flight::ResultStream { + public: + /// \brief Construct a FlightResultStream from a Python object and callback. + /// Must only be called while holding the GIL. + explicit PyFlightResultStream(PyObject* generator, + PyFlightResultStreamCallback callback); + arrow::Result> Next() override; + + private: + OwnedRefNoGIL generator_; + PyFlightResultStreamCallback callback_; +}; + +/// \brief A wrapper around a FlightDataStream that keeps alive a +/// Python object backing it. +class ARROW_PYFLIGHT_EXPORT PyFlightDataStream : public arrow::flight::FlightDataStream { + public: + /// \brief Construct a FlightDataStream from a Python object and underlying stream. + /// Must only be called while holding the GIL. + explicit PyFlightDataStream(PyObject* data_source, + std::unique_ptr stream); + + std::shared_ptr schema() override; + arrow::Result GetSchemaPayload() override; + arrow::Result Next() override; + + private: + OwnedRefNoGIL data_source_; + std::unique_ptr stream_; +}; + +class ARROW_PYFLIGHT_EXPORT PyServerMiddlewareFactory + : public arrow::flight::ServerMiddlewareFactory { + public: + /// \brief A callback to create the middleware instance in Python + typedef std::function* middleware)> + StartCallCallback; + + /// \brief Must only be called while holding the GIL. + explicit PyServerMiddlewareFactory(PyObject* factory, StartCallCallback start_call); + + Status StartCall(const arrow::flight::CallInfo& info, + const arrow::flight::CallHeaders& incoming_headers, + std::shared_ptr* middleware) override; + + private: + OwnedRefNoGIL factory_; + StartCallCallback start_call_; +}; + +class ARROW_PYFLIGHT_EXPORT PyServerMiddleware : public arrow::flight::ServerMiddleware { + public: + typedef std::function + SendingHeadersCallback; + typedef std::function CallCompletedCallback; + + struct Vtable { + SendingHeadersCallback sending_headers; + CallCompletedCallback call_completed; + }; + + /// \brief Must only be called while holding the GIL. + explicit PyServerMiddleware(PyObject* middleware, Vtable vtable); + + void SendingHeaders(arrow::flight::AddCallHeaders* outgoing_headers) override; + void CallCompleted(const Status& status) override; + std::string name() const override; + /// \brief Get the underlying Python object. + PyObject* py_object() const; + + private: + OwnedRefNoGIL middleware_; + Vtable vtable_; +}; + +class ARROW_PYFLIGHT_EXPORT PyClientMiddlewareFactory + : public arrow::flight::ClientMiddlewareFactory { + public: + /// \brief A callback to create the middleware instance in Python + typedef std::function* middleware)> + StartCallCallback; + + /// \brief Must only be called while holding the GIL. + explicit PyClientMiddlewareFactory(PyObject* factory, StartCallCallback start_call); + + void StartCall(const arrow::flight::CallInfo& info, + std::unique_ptr* middleware) override; + + private: + OwnedRefNoGIL factory_; + StartCallCallback start_call_; +}; + +class ARROW_PYFLIGHT_EXPORT PyClientMiddleware : public arrow::flight::ClientMiddleware { + public: + typedef std::function + SendingHeadersCallback; + typedef std::function + ReceivedHeadersCallback; + typedef std::function CallCompletedCallback; + + struct Vtable { + SendingHeadersCallback sending_headers; + ReceivedHeadersCallback received_headers; + CallCompletedCallback call_completed; + }; + + /// \brief Must only be called while holding the GIL. + explicit PyClientMiddleware(PyObject* factory, Vtable vtable); + + void SendingHeaders(arrow::flight::AddCallHeaders* outgoing_headers) override; + void ReceivedHeaders(const arrow::flight::CallHeaders& incoming_headers) override; + void CallCompleted(const Status& status) override; + + private: + OwnedRefNoGIL middleware_; + Vtable vtable_; +}; + +/// \brief A callback that obtains the next payload from a Flight result stream. +typedef std::function + PyGeneratorFlightDataStreamCallback; + +/// \brief A FlightDataStream built around a Python callback. +class ARROW_PYFLIGHT_EXPORT PyGeneratorFlightDataStream + : public arrow::flight::FlightDataStream { + public: + /// \brief Construct a FlightDataStream from a Python object and underlying stream. + /// Must only be called while holding the GIL. + explicit PyGeneratorFlightDataStream(PyObject* generator, + std::shared_ptr schema, + PyGeneratorFlightDataStreamCallback callback, + const ipc::IpcWriteOptions& options); + std::shared_ptr schema() override; + arrow::Result GetSchemaPayload() override; + arrow::Result Next() override; + + private: + OwnedRefNoGIL generator_; + std::shared_ptr schema_; + ipc::DictionaryFieldMapper mapper_; + ipc::IpcWriteOptions options_; + PyGeneratorFlightDataStreamCallback callback_; +}; + +ARROW_PYFLIGHT_EXPORT +Status CreateFlightInfo(const std::shared_ptr& schema, + const arrow::flight::FlightDescriptor& descriptor, + const std::vector& endpoints, + int64_t total_records, int64_t total_bytes, + std::unique_ptr* out); + +/// \brief Create a SchemaResult from schema. +ARROW_PYFLIGHT_EXPORT +Status CreateSchemaResult(const std::shared_ptr& schema, + std::unique_ptr* out); + +} // namespace flight +} // namespace py +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/gdb.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/gdb.h new file mode 100644 index 0000000000000000000000000000000000000000..1ddcbb51f6e0b70c1b16dc9a9ce6caf79fb2369e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/gdb.h @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/python/visibility.h" + +namespace arrow { +namespace gdb { + +ARROW_PYTHON_EXPORT +void TestSession(); + +} // namespace gdb +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/inference.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/inference.h new file mode 100644 index 0000000000000000000000000000000000000000..983384db118a16141e49a679388b83c75d1d77d6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/inference.h @@ -0,0 +1,64 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Functions for converting between CPython built-in data structures and Arrow +// data structures + +#pragma once + +#include "arrow/python/platform.h" + +#include + +#include "arrow/python/visibility.h" +#include "arrow/type.h" +#include "arrow/util/macros.h" + +#include "common.h" + +namespace arrow { + +class Array; +class Status; + +namespace py { + +// These functions take a sequence input, not arbitrary iterables + +/// \brief Infer Arrow type from a Python sequence +/// \param[in] obj the sequence of values +/// \param[in] mask an optional mask where True values are null. May +/// be nullptr +/// \param[in] pandas_null_sentinels use pandas's null value markers +ARROW_PYTHON_EXPORT +Result> InferArrowType(PyObject* obj, PyObject* mask, + bool pandas_null_sentinels); + +/// Checks whether the passed Python object is a boolean scalar +ARROW_PYTHON_EXPORT +bool IsPyBool(PyObject* obj); + +/// Checks whether the passed Python object is an integer scalar +ARROW_PYTHON_EXPORT +bool IsPyInt(PyObject* obj); + +/// Checks whether the passed Python object is a float scalar +ARROW_PYTHON_EXPORT +bool IsPyFloat(PyObject* obj); + +} // namespace py +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/init.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/init.h new file mode 100644 index 0000000000000000000000000000000000000000..2e6c954862bd92af369baf04bf10a76e0c076fb5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/init.h @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/python/platform.h" +#include "arrow/python/visibility.h" + +extern "C" { +ARROW_PYTHON_EXPORT +int arrow_init_numpy(); +} diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/ipc.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/ipc.h new file mode 100644 index 0000000000000000000000000000000000000000..2c16d8c967ff0bffc52e7803d4d894adb72b1215 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/ipc.h @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/python/common.h" +#include "arrow/python/visibility.h" +#include "arrow/record_batch.h" +#include "arrow/result.h" +#include "arrow/util/macros.h" + +namespace arrow { +namespace py { + +class ARROW_PYTHON_EXPORT PyRecordBatchReader : public RecordBatchReader { + public: + std::shared_ptr schema() const override; + + Status ReadNext(std::shared_ptr* batch) override; + + // For use from Cython + // Assumes that `iterable` is borrowed + static Result> Make(std::shared_ptr, + PyObject* iterable); + + protected: + PyRecordBatchReader(); + + Status Init(std::shared_ptr, PyObject* iterable); + + std::shared_ptr schema_; + OwnedRefNoGIL iterator_; +}; + +class ARROW_PYTHON_EXPORT CastingRecordBatchReader : public RecordBatchReader { + public: + std::shared_ptr schema() const override; + + Status ReadNext(std::shared_ptr* batch) override; + + static Result> Make( + std::shared_ptr parent, std::shared_ptr schema); + + Status Close() override; + + protected: + CastingRecordBatchReader(); + + Status Init(std::shared_ptr parent, std::shared_ptr schema); + + std::shared_ptr parent_; + std::shared_ptr schema_; +}; + +} // namespace py +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/iterators.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/iterators.h new file mode 100644 index 0000000000000000000000000000000000000000..7b31962dac5b8065b5667dc4306dfa0a0d1389ce --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/iterators.h @@ -0,0 +1,194 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/array/array_primitive.h" + +#include "arrow/python/common.h" +#include "arrow/python/numpy_internal.h" + +namespace arrow { +namespace py { +namespace internal { + +using arrow::internal::checked_cast; + +// Visit the Python sequence, calling the given callable on each element. If +// the callable returns a non-OK status, iteration stops and the status is +// returned. +// +// The call signature for Visitor must be +// +// Visit(PyObject* obj, int64_t index, bool* keep_going) +// +// If keep_going is set to false, the iteration terminates +template +inline Status VisitSequenceGeneric(PyObject* obj, int64_t offset, VisitorFunc&& func) { + // VisitorFunc may set to false to terminate iteration + bool keep_going = true; + + if (PyArray_Check(obj)) { + PyArrayObject* arr_obj = reinterpret_cast(obj); + if (PyArray_NDIM(arr_obj) != 1) { + return Status::Invalid("Only 1D arrays accepted"); + } + + if (PyArray_DESCR(arr_obj)->type_num == NPY_OBJECT) { + // It's an array object, we can fetch object pointers directly + const Ndarray1DIndexer objects(arr_obj); + for (int64_t i = offset; keep_going && i < objects.size(); ++i) { + RETURN_NOT_OK(func(objects[i], i, &keep_going)); + } + return Status::OK(); + } + // It's a non-object array, fall back on regular sequence access. + // (note PyArray_GETITEM() is slightly different: it returns standard + // Python types, not Numpy scalar types) + // This code path is inefficient: callers should implement dedicated + // logic for non-object arrays. + } + if (PySequence_Check(obj)) { + if (PyList_Check(obj) || PyTuple_Check(obj)) { + // Use fast item access + const Py_ssize_t size = PySequence_Fast_GET_SIZE(obj); + for (Py_ssize_t i = offset; keep_going && i < size; ++i) { + PyObject* value = PySequence_Fast_GET_ITEM(obj, i); + RETURN_NOT_OK(func(value, static_cast(i), &keep_going)); + } + } else { + // Regular sequence: avoid making a potentially large copy + const Py_ssize_t size = PySequence_Size(obj); + RETURN_IF_PYERROR(); + for (Py_ssize_t i = offset; keep_going && i < size; ++i) { + OwnedRef value_ref(PySequence_ITEM(obj, i)); + RETURN_IF_PYERROR(); + RETURN_NOT_OK(func(value_ref.obj(), static_cast(i), &keep_going)); + } + } + } else { + return Status::TypeError("Object is not a sequence"); + } + return Status::OK(); +} + +// Visit sequence with no null mask +template +inline Status VisitSequence(PyObject* obj, int64_t offset, VisitorFunc&& func) { + return VisitSequenceGeneric( + obj, offset, [&func](PyObject* value, int64_t i /* unused */, bool* keep_going) { + return func(value, keep_going); + }); +} + +/// Visit sequence with null mask +template +inline Status VisitSequenceMasked(PyObject* obj, PyObject* mo, int64_t offset, + VisitorFunc&& func) { + if (PyArray_Check(mo)) { + PyArrayObject* mask = reinterpret_cast(mo); + if (PyArray_NDIM(mask) != 1) { + return Status::Invalid("Mask must be 1D array"); + } + if (PyArray_SIZE(mask) != static_cast(PySequence_Size(obj))) { + return Status::Invalid("Mask was a different length from sequence being converted"); + } + + const int dtype = fix_numpy_type_num(PyArray_DESCR(mask)->type_num); + if (dtype == NPY_BOOL) { + Ndarray1DIndexer mask_values(mask); + + return VisitSequenceGeneric( + obj, offset, + [&func, &mask_values](PyObject* value, int64_t i, bool* keep_going) { + return func(value, mask_values[i], keep_going); + }); + } else { + return Status::TypeError("Mask must be boolean dtype"); + } + } else if (py::is_array(mo)) { + auto unwrap_mask_result = unwrap_array(mo); + ARROW_RETURN_NOT_OK(unwrap_mask_result); + std::shared_ptr mask_ = unwrap_mask_result.ValueOrDie(); + if (mask_->type_id() != Type::type::BOOL) { + return Status::TypeError("Mask must be an array of booleans"); + } + + if (mask_->length() != PySequence_Size(obj)) { + return Status::Invalid("Mask was a different length from sequence being converted"); + } + + if (mask_->null_count() != 0) { + return Status::TypeError("Mask must be an array of booleans"); + } + + BooleanArray* boolmask = checked_cast(mask_.get()); + return VisitSequenceGeneric( + obj, offset, [&func, &boolmask](PyObject* value, int64_t i, bool* keep_going) { + return func(value, boolmask->Value(i), keep_going); + }); + } else if (PySequence_Check(mo)) { + if (PySequence_Size(mo) != PySequence_Size(obj)) { + return Status::Invalid("Mask was a different length from sequence being converted"); + } + RETURN_IF_PYERROR(); + + return VisitSequenceGeneric( + obj, offset, [&func, &mo](PyObject* value, int64_t i, bool* keep_going) { + OwnedRef value_ref(PySequence_ITEM(mo, i)); + if (!PyBool_Check(value_ref.obj())) + return Status::TypeError("Mask must be a sequence of booleans"); + return func(value, value_ref.obj() == Py_True, keep_going); + }); + } else { + return Status::Invalid("Null mask must be a NumPy array, Arrow array or a Sequence"); + } + + return Status::OK(); +} + +// Like IterateSequence, but accepts any generic iterable (including +// non-restartable iterators, e.g. generators). +// +// The call signature for VisitorFunc must be Visit(PyObject*, bool* +// keep_going). If keep_going is set to false, the iteration terminates +template +inline Status VisitIterable(PyObject* obj, VisitorFunc&& func) { + if (PySequence_Check(obj)) { + // Numpy arrays fall here as well + return VisitSequence(obj, /*offset=*/0, std::forward(func)); + } + // Fall back on the iterator protocol + OwnedRef iter_ref(PyObject_GetIter(obj)); + PyObject* iter = iter_ref.obj(); + RETURN_IF_PYERROR(); + PyObject* value; + + bool keep_going = true; + while (keep_going && (value = PyIter_Next(iter))) { + OwnedRef value_ref(value); + RETURN_NOT_OK(func(value_ref.obj(), &keep_going)); + } + RETURN_IF_PYERROR(); // __next__() might have raised + return Status::OK(); +} + +} // namespace internal +} // namespace py +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib.h new file mode 100644 index 0000000000000000000000000000000000000000..f32cbbe7cd6b8cc13f97b3839e68e54c69bea447 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/lib.h @@ -0,0 +1,83 @@ +/* Generated by Cython 3.0.10 */ + +#ifndef __PYX_HAVE__pyarrow__lib +#define __PYX_HAVE__pyarrow__lib + +#include "Python.h" + +#ifndef __PYX_HAVE_API__pyarrow__lib + +#ifdef CYTHON_EXTERN_C + #undef __PYX_EXTERN_C + #define __PYX_EXTERN_C CYTHON_EXTERN_C +#elif defined(__PYX_EXTERN_C) + #ifdef _MSC_VER + #pragma message ("Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.") + #else + #warning Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead. + #endif +#else + #define __PYX_EXTERN_C extern "C++" +#endif + +#ifndef DL_IMPORT + #define DL_IMPORT(_T) _T +#endif + +__PYX_EXTERN_C PyObject *pyarrow_wrap_buffer(std::shared_ptr< arrow::Buffer> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_resizable_buffer(std::shared_ptr< arrow::ResizableBuffer> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_data_type(std::shared_ptr< arrow::DataType> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_field(std::shared_ptr< arrow::Field> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_schema(std::shared_ptr< arrow::Schema> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_scalar(std::shared_ptr< arrow::Scalar> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_array(std::shared_ptr< arrow::Array> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_chunked_array(std::shared_ptr< arrow::ChunkedArray> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_sparse_coo_tensor(std::shared_ptr< arrow::SparseCOOTensor> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_sparse_csc_matrix(std::shared_ptr< arrow::SparseCSCMatrix> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_sparse_csf_tensor(std::shared_ptr< arrow::SparseCSFTensor> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_sparse_csr_matrix(std::shared_ptr< arrow::SparseCSRMatrix> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_tensor(std::shared_ptr< arrow::Tensor> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_batch(std::shared_ptr< arrow::RecordBatch> const &); +__PYX_EXTERN_C PyObject *pyarrow_wrap_table(std::shared_ptr< arrow::Table> const &); +__PYX_EXTERN_C std::shared_ptr< arrow::Buffer> pyarrow_unwrap_buffer(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::DataType> pyarrow_unwrap_data_type(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::Field> pyarrow_unwrap_field(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::Schema> pyarrow_unwrap_schema(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::Scalar> pyarrow_unwrap_scalar(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::Array> pyarrow_unwrap_array(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::ChunkedArray> pyarrow_unwrap_chunked_array(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::SparseCOOTensor> pyarrow_unwrap_sparse_coo_tensor(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::SparseCSCMatrix> pyarrow_unwrap_sparse_csc_matrix(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::SparseCSFTensor> pyarrow_unwrap_sparse_csf_tensor(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::SparseCSRMatrix> pyarrow_unwrap_sparse_csr_matrix(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::Tensor> pyarrow_unwrap_tensor(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::RecordBatch> pyarrow_unwrap_batch(PyObject *); +__PYX_EXTERN_C std::shared_ptr< arrow::Table> pyarrow_unwrap_table(PyObject *); + +#endif /* !__PYX_HAVE_API__pyarrow__lib */ + +/* WARNING: the interface of the module init function changed in CPython 3.5. */ +/* It now returns a PyModuleDef instance instead of a PyModule instance. */ + +#if PY_MAJOR_VERSION < 3 +PyMODINIT_FUNC initlib(void); +#else +/* WARNING: Use PyImport_AppendInittab("lib", PyInit_lib) instead of calling PyInit_lib directly from Python 3.5 */ +PyMODINIT_FUNC PyInit_lib(void); + +#if PY_VERSION_HEX >= 0x03050000 && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus >= 201402L)) +#if defined(__cplusplus) && __cplusplus >= 201402L +[[deprecated("Use PyImport_AppendInittab(\"lib\", PyInit_lib) instead of calling PyInit_lib directly.")]] inline +#elif defined(__GNUC__) || defined(__clang__) +__attribute__ ((__deprecated__("Use PyImport_AppendInittab(\"lib\", PyInit_lib) instead of calling PyInit_lib directly."), __unused__)) __inline__ +#elif defined(_MSC_VER) +__declspec(deprecated("Use PyImport_AppendInittab(\"lib\", PyInit_lib) instead of calling PyInit_lib directly.")) __inline +#endif +static PyObject* __PYX_WARN_IF_PyInit_lib_INIT_CALLED(PyObject* res) { + return res; +} +#define PyInit_lib() __PYX_WARN_IF_PyInit_lib_INIT_CALLED(PyInit_lib()) +#endif +#endif + +#endif /* !__PYX_HAVE__pyarrow__lib */ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_interop.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_interop.h new file mode 100644 index 0000000000000000000000000000000000000000..7ea7d6e16f5285f4b2dbec7c575c80e1e029b6f8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_interop.h @@ -0,0 +1,103 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/python/platform.h" // IWYU pragma: export + +#include // IWYU pragma: export + +// Don't use the deprecated Numpy functions +#ifdef NPY_1_7_API_VERSION +#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION +#else +#define NPY_ARRAY_NOTSWAPPED NPY_NOTSWAPPED +#define NPY_ARRAY_ALIGNED NPY_ALIGNED +#define NPY_ARRAY_WRITEABLE NPY_WRITEABLE +#define NPY_ARRAY_UPDATEIFCOPY NPY_UPDATEIFCOPY +#endif + +// This is required to be able to access the NumPy C API properly in C++ files +// other than init.cc. +#define PY_ARRAY_UNIQUE_SYMBOL arrow_ARRAY_API +#ifndef NUMPY_IMPORT_ARRAY +#define NO_IMPORT_ARRAY +#endif + +#include // IWYU pragma: export +#include // IWYU pragma: export +#include // IWYU pragma: export + +// A bit subtle. Numpy has 5 canonical integer types: +// (or, rather, type pairs: signed and unsigned) +// NPY_BYTE, NPY_SHORT, NPY_INT, NPY_LONG, NPY_LONGLONG +// It also has 4 fixed-width integer aliases. +// When mapping Arrow integer types to these 4 fixed-width aliases, +// we always miss one of the canonical types (even though it may +// have the same width as one of the aliases). +// Which one depends on the platform... +// On a LP64 system, NPY_INT64 maps to NPY_LONG and +// NPY_LONGLONG needs to be handled separately. +// On a LLP64 system, NPY_INT32 maps to NPY_LONG and +// NPY_INT needs to be handled separately. + +#if NPY_BITSOF_LONG == 32 && NPY_BITSOF_LONGLONG == 64 +#define NPY_INT64_IS_LONG_LONG 1 +#else +#define NPY_INT64_IS_LONG_LONG 0 +#endif + +#if NPY_BITSOF_INT == 32 && NPY_BITSOF_LONG == 64 +#define NPY_INT32_IS_INT 1 +#else +#define NPY_INT32_IS_INT 0 +#endif + +// Backported NumPy 2 API (can be removed if numpy 2 is required) +#if NPY_ABI_VERSION < 0x02000000 +#define PyDataType_ELSIZE(descr) ((descr)->elsize) +#define PyDataType_C_METADATA(descr) ((descr)->c_metadata) +#define PyDataType_FIELDS(descr) ((descr)->fields) +#endif + +namespace arrow { +namespace py { + +inline int import_numpy() { +#ifdef NUMPY_IMPORT_ARRAY + import_array1(-1); + import_umath1(-1); +#endif + + return 0; +} + +// See above about the missing Numpy integer type numbers +inline int fix_numpy_type_num(int type_num) { +#if !NPY_INT32_IS_INT && NPY_BITSOF_INT == 32 + if (type_num == NPY_INT) return NPY_INT32; + if (type_num == NPY_UINT) return NPY_UINT32; +#endif +#if !NPY_INT64_IS_LONG_LONG && NPY_BITSOF_LONGLONG == 64 + if (type_num == NPY_LONGLONG) return NPY_INT64; + if (type_num == NPY_ULONGLONG) return NPY_UINT64; +#endif + return type_num; +} + +} // namespace py +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_to_arrow.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_to_arrow.h new file mode 100644 index 0000000000000000000000000000000000000000..b6cd093e5542008cf173f43de311e40c418e7c8d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_to_arrow.h @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Converting from pandas memory representation to Arrow data structures + +#pragma once + +#include "arrow/python/platform.h" + +#include + +#include "arrow/compute/api.h" +#include "arrow/python/visibility.h" + +namespace arrow { + +class Array; +class ChunkedArray; +class DataType; +class MemoryPool; +class Status; + +namespace py { + +/// Convert NumPy arrays to Arrow. If target data type is not known, pass a +/// type with null +/// +/// \param[in] pool Memory pool for any memory allocations +/// \param[in] ao an ndarray with the array data +/// \param[in] mo an ndarray with a null mask (True is null), optional +/// \param[in] from_pandas If true, use pandas's null sentinels to determine +/// whether values are null +/// \param[in] type a specific type to cast to, may be null +/// \param[in] cast_options casting options +/// \param[out] out a ChunkedArray, to accommodate chunked output +ARROW_PYTHON_EXPORT +Status NdarrayToArrow(MemoryPool* pool, PyObject* ao, PyObject* mo, bool from_pandas, + const std::shared_ptr& type, + const compute::CastOptions& cast_options, + std::shared_ptr* out); + +/// Safely convert NumPy arrays to Arrow. If target data type is not known, +/// pass a type with null. +/// +/// \param[in] pool Memory pool for any memory allocations +/// \param[in] ao an ndarray with the array data +/// \param[in] mo an ndarray with a null mask (True is null), optional +/// \param[in] from_pandas If true, use pandas's null sentinels to determine +/// whether values are null +/// \param[in] type a specific type to cast to, may be null +/// \param[out] out a ChunkedArray, to accommodate chunked output +ARROW_PYTHON_EXPORT +Status NdarrayToArrow(MemoryPool* pool, PyObject* ao, PyObject* mo, bool from_pandas, + const std::shared_ptr& type, + std::shared_ptr* out); + +} // namespace py +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/parquet_encryption.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/parquet_encryption.h new file mode 100644 index 0000000000000000000000000000000000000000..a1aaa30e260f5042c98f96bf081b4a49245ea656 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/parquet_encryption.h @@ -0,0 +1,132 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/python/common.h" +#include "arrow/python/visibility.h" +#include "arrow/util/macros.h" +#include "parquet/encryption/crypto_factory.h" +#include "parquet/encryption/kms_client.h" +#include "parquet/encryption/kms_client_factory.h" + +#if defined(_WIN32) || defined(__CYGWIN__) // Windows +#if defined(_MSC_VER) +#pragma warning(disable : 4251) +#else +#pragma GCC diagnostic ignored "-Wattributes" +#endif + +#ifdef ARROW_PYTHON_STATIC +#define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT +#elif defined(ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORTING) +#define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT __declspec(dllexport) +#else +#define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT __declspec(dllimport) +#endif + +#else // Not Windows +#ifndef ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT +#define ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT __attribute__((visibility("default"))) +#endif +#endif // Non-Windows + +namespace arrow { +namespace py { +namespace parquet { +namespace encryption { + +/// \brief A table of function pointers for calling from C++ into +/// Python. +class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClientVtable { + public: + std::function + wrap_key; + std::function + unwrap_key; +}; + +/// \brief A helper for KmsClient implementation in Python. +class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClient + : public ::parquet::encryption::KmsClient { + public: + PyKmsClient(PyObject* handler, PyKmsClientVtable vtable); + ~PyKmsClient() override; + + std::string WrapKey(const std::string& key_bytes, + const std::string& master_key_identifier) override; + + std::string UnwrapKey(const std::string& wrapped_key, + const std::string& master_key_identifier) override; + + private: + OwnedRefNoGIL handler_; + PyKmsClientVtable vtable_; +}; + +/// \brief A table of function pointers for calling from C++ into +/// Python. +class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClientFactoryVtable { + public: + std::function* out)> + create_kms_client; +}; + +/// \brief A helper for KmsClientFactory implementation in Python. +class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyKmsClientFactory + : public ::parquet::encryption::KmsClientFactory { + public: + PyKmsClientFactory(PyObject* handler, PyKmsClientFactoryVtable vtable); + ~PyKmsClientFactory() override; + + std::shared_ptr<::parquet::encryption::KmsClient> CreateKmsClient( + const ::parquet::encryption::KmsConnectionConfig& kms_connection_config) override; + + private: + OwnedRefNoGIL handler_; + PyKmsClientFactoryVtable vtable_; +}; + +/// \brief A CryptoFactory that returns Results instead of throwing exceptions. +class ARROW_PYTHON_PARQUET_ENCRYPTION_EXPORT PyCryptoFactory + : public ::parquet::encryption::CryptoFactory { + public: + arrow::Result> + SafeGetFileEncryptionProperties( + const ::parquet::encryption::KmsConnectionConfig& kms_connection_config, + const ::parquet::encryption::EncryptionConfiguration& encryption_config); + + /// The returned FileDecryptionProperties object will use the cache inside this + /// CryptoFactory object, so please keep this + /// CryptoFactory object alive along with the returned + /// FileDecryptionProperties object. + arrow::Result> + SafeGetFileDecryptionProperties( + const ::parquet::encryption::KmsConnectionConfig& kms_connection_config, + const ::parquet::encryption::DecryptionConfiguration& decryption_config); +}; + +} // namespace encryption +} // namespace parquet +} // namespace py +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/pch.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/pch.h new file mode 100644 index 0000000000000000000000000000000000000000..d1d688b4f17c4d0461ebd66105676083ebcb5b41 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/pch.h @@ -0,0 +1,24 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Often-used headers, for precompiling. +// If updating this header, please make sure you check compilation speed +// before checking in. Adding headers which are not used extremely often +// may incur a slowdown, since it makes the precompiled header heavier to load. + +#include "arrow/pch.h" +#include "arrow/python/platform.h" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/platform.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/platform.h new file mode 100644 index 0000000000000000000000000000000000000000..e71c7ac85399e4e3f7c93d4814fd7fdad774dc13 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/platform.h @@ -0,0 +1,41 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Functions for converting between pandas's NumPy-based data representation +// and Arrow data structures + +#pragma once + +// If PY_SSIZE_T_CLEAN is defined, argument parsing functions treat #-specifier +// to mean Py_ssize_t (defining this to suppress deprecation warning) +#define PY_SSIZE_T_CLEAN + +#include // IWYU pragma: export +#include + +// Work around C2528 error +#ifdef _MSC_VER +#if _MSC_VER >= 1900 +#undef timezone +#endif + +// https://bugs.python.org/issue36020 +// TODO(wjones127): Can remove once we drop support for CPython 3.9 +#ifdef snprintf +#undef snprintf +#endif +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow.h new file mode 100644 index 0000000000000000000000000000000000000000..113035500c0053dbb9dde5a99216aec1aefd1140 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow.h @@ -0,0 +1,89 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/python/platform.h" + +#include + +#include "arrow/python/visibility.h" + +#include "arrow/sparse_tensor.h" + +// Work around ARROW-2317 (C linkage warning from Cython) +extern "C++" { + +namespace arrow { + +class Array; +class Buffer; +class DataType; +class Field; +class RecordBatch; +class Schema; +class Status; +class Table; +class Tensor; + +namespace py { + +// Returns 0 on success, -1 on error. +ARROW_PYTHON_EXPORT int import_pyarrow(); + +#define DECLARE_WRAP_FUNCTIONS(FUNC_SUFFIX, TYPE_NAME) \ + ARROW_PYTHON_EXPORT bool is_##FUNC_SUFFIX(PyObject*); \ + ARROW_PYTHON_EXPORT Result> unwrap_##FUNC_SUFFIX( \ + PyObject*); \ + ARROW_PYTHON_EXPORT PyObject* wrap_##FUNC_SUFFIX(const std::shared_ptr&); + +DECLARE_WRAP_FUNCTIONS(buffer, Buffer) + +DECLARE_WRAP_FUNCTIONS(data_type, DataType) +DECLARE_WRAP_FUNCTIONS(field, Field) +DECLARE_WRAP_FUNCTIONS(schema, Schema) + +DECLARE_WRAP_FUNCTIONS(scalar, Scalar) + +DECLARE_WRAP_FUNCTIONS(array, Array) +DECLARE_WRAP_FUNCTIONS(chunked_array, ChunkedArray) + +DECLARE_WRAP_FUNCTIONS(sparse_coo_tensor, SparseCOOTensor) +DECLARE_WRAP_FUNCTIONS(sparse_csc_matrix, SparseCSCMatrix) +DECLARE_WRAP_FUNCTIONS(sparse_csf_tensor, SparseCSFTensor) +DECLARE_WRAP_FUNCTIONS(sparse_csr_matrix, SparseCSRMatrix) +DECLARE_WRAP_FUNCTIONS(tensor, Tensor) + +DECLARE_WRAP_FUNCTIONS(batch, RecordBatch) +DECLARE_WRAP_FUNCTIONS(table, Table) + +#undef DECLARE_WRAP_FUNCTIONS + +namespace internal { + +// If status is ok, return 0. +// If status is not ok, set Python error indicator and return -1. +ARROW_PYTHON_EXPORT int check_status(const Status& status); + +// Convert status to a Python exception object. Status must not be ok. +ARROW_PYTHON_EXPORT PyObject* convert_status(const Status& status); + +} // namespace internal +} // namespace py +} // namespace arrow + +} // extern "C++" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow_api.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow_api.h new file mode 100644 index 0000000000000000000000000000000000000000..a476e55a2a111332ed8594ace0fd29e2987046cb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow_api.h @@ -0,0 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// For backward compatibility. +#include "arrow/python/lib_api.h" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow_lib.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow_lib.h new file mode 100644 index 0000000000000000000000000000000000000000..e509593c254468a62216e0e4a7ea073ad9a3f1d4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/pyarrow_lib.h @@ -0,0 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// For backward compatibility. +#include "arrow/python/lib.h" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_test.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_test.h new file mode 100644 index 0000000000000000000000000000000000000000..c2eb62fc29accb670f5d53e326381d68a6534335 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_test.h @@ -0,0 +1,42 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/status.h" + +#include "arrow/python/visibility.h" + +namespace arrow { +namespace py { +namespace testing { + +struct TestCase { + std::string name; + std::function func; +}; + +ARROW_PYTHON_EXPORT +std::vector GetCppTestCases(); + +} // namespace testing +} // namespace py +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/serialize.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/serialize.h new file mode 100644 index 0000000000000000000000000000000000000000..fd207d3e069039351793f3252c5a8eb5d9009cdb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/serialize.h @@ -0,0 +1,145 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/ipc/options.h" +#include "arrow/python/visibility.h" +#include "arrow/sparse_tensor.h" +#include "arrow/status.h" + +// Forward declaring PyObject, see +// https://mail.python.org/pipermail/python-dev/2003-August/037601.html +#ifndef PyObject_HEAD +struct _object; +typedef _object PyObject; +#endif + +namespace arrow { + +class Buffer; +class DataType; +class MemoryPool; +class RecordBatch; +class Tensor; + +namespace io { + +class OutputStream; + +} // namespace io + +namespace py { + +struct ARROW_PYTHON_EXPORT SerializedPyObject { + std::shared_ptr batch; + std::vector> tensors; + std::vector> sparse_tensors; + std::vector> ndarrays; + std::vector> buffers; + ipc::IpcWriteOptions ipc_options; + + SerializedPyObject(); + + /// \brief Write serialized Python object to OutputStream + /// \param[in,out] dst an OutputStream + /// \return Status + Status WriteTo(io::OutputStream* dst); + + /// \brief Convert SerializedPyObject to a dict containing the message + /// components as Buffer instances with minimal memory allocation + /// + /// { + /// 'num_tensors': M, + /// 'num_sparse_tensors': N, + /// 'num_buffers': K, + /// 'data': [Buffer] + /// } + /// + /// Each tensor is written as two buffers, one for the metadata and one for + /// the body. Therefore, the number of buffers in 'data' is 2 * M + 2 * N + K + 1, + /// with the first buffer containing the serialized record batch containing + /// the UnionArray that describes the whole object + Status GetComponents(MemoryPool* pool, PyObject** out); +}; + +/// \brief Serialize Python sequence as a SerializedPyObject. +/// \param[in] context Serialization context which contains custom serialization +/// and deserialization callbacks. Can be any Python object with a +/// _serialize_callback method for serialization and a _deserialize_callback +/// method for deserialization. If context is None, no custom serialization +/// will be attempted. +/// \param[in] sequence A Python sequence object to serialize to Arrow data +/// structures +/// \param[out] out The serialized representation +/// \return Status +/// +/// Release GIL before calling +ARROW_PYTHON_EXPORT +Status SerializeObject(PyObject* context, PyObject* sequence, SerializedPyObject* out); + +/// \brief Serialize an Arrow Tensor as a SerializedPyObject. +/// \param[in] tensor Tensor to be serialized +/// \param[out] out The serialized representation +/// \return Status +ARROW_PYTHON_EXPORT +Status SerializeTensor(std::shared_ptr tensor, py::SerializedPyObject* out); + +/// \brief Write the Tensor metadata header to an OutputStream. +/// \param[in] dtype DataType of the Tensor +/// \param[in] shape The shape of the tensor +/// \param[in] tensor_num_bytes The length of the Tensor data in bytes +/// \param[in] dst The OutputStream to write the Tensor header to +/// \return Status +ARROW_PYTHON_EXPORT +Status WriteNdarrayHeader(std::shared_ptr dtype, + const std::vector& shape, int64_t tensor_num_bytes, + io::OutputStream* dst); + +struct PythonType { + enum type { + NONE, + BOOL, + INT, + PY2INT, // Kept for compatibility + BYTES, + STRING, + HALF_FLOAT, + FLOAT, + DOUBLE, + DATE64, + LIST, + DICT, + TUPLE, + SET, + TENSOR, + NDARRAY, + BUFFER, + SPARSECOOTENSOR, + SPARSECSRMATRIX, + SPARSECSCMATRIX, + SPARSECSFTENSOR, + NUM_PYTHON_TYPES + }; +}; + +} // namespace py + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/type_traits.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/type_traits.h new file mode 100644 index 0000000000000000000000000000000000000000..a941577f765583e3ac54ea163452342b5c07f309 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/type_traits.h @@ -0,0 +1,350 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Internal header + +#pragma once + +#include "arrow/python/platform.h" + +#include +#include + +#include "arrow/python/numpy_interop.h" + +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/logging.h" + +namespace arrow { +namespace py { + +static constexpr int64_t kPandasTimestampNull = std::numeric_limits::min(); +constexpr int64_t kNanosecondsInDay = 86400000000000LL; + +namespace internal { + +// +// Type traits for Numpy -> Arrow equivalence +// +template +struct npy_traits {}; + +template <> +struct npy_traits { + typedef uint8_t value_type; + using TypeClass = BooleanType; + using BuilderClass = BooleanBuilder; + + static constexpr bool supports_nulls = false; + static inline bool isnull(uint8_t v) { return false; } +}; + +#define NPY_INT_DECL(TYPE, CapType, T) \ + template <> \ + struct npy_traits { \ + typedef T value_type; \ + using TypeClass = CapType##Type; \ + using BuilderClass = CapType##Builder; \ + \ + static constexpr bool supports_nulls = false; \ + static inline bool isnull(T v) { return false; } \ + }; + +NPY_INT_DECL(INT8, Int8, int8_t); +NPY_INT_DECL(INT16, Int16, int16_t); +NPY_INT_DECL(INT32, Int32, int32_t); +NPY_INT_DECL(INT64, Int64, int64_t); + +NPY_INT_DECL(UINT8, UInt8, uint8_t); +NPY_INT_DECL(UINT16, UInt16, uint16_t); +NPY_INT_DECL(UINT32, UInt32, uint32_t); +NPY_INT_DECL(UINT64, UInt64, uint64_t); + +#if !NPY_INT32_IS_INT && NPY_BITSOF_INT == 32 +NPY_INT_DECL(INT, Int32, int32_t); +NPY_INT_DECL(UINT, UInt32, uint32_t); +#endif +#if !NPY_INT64_IS_LONG_LONG && NPY_BITSOF_LONGLONG == 64 +NPY_INT_DECL(LONGLONG, Int64, int64_t); +NPY_INT_DECL(ULONGLONG, UInt64, uint64_t); +#endif + +template <> +struct npy_traits { + typedef npy_half value_type; + using TypeClass = HalfFloatType; + using BuilderClass = HalfFloatBuilder; + + static constexpr npy_half na_sentinel = NPY_HALF_NAN; + + static constexpr bool supports_nulls = true; + + static inline bool isnull(npy_half v) { return v == NPY_HALF_NAN; } +}; + +template <> +struct npy_traits { + typedef float value_type; + using TypeClass = FloatType; + using BuilderClass = FloatBuilder; + + // We need to use quiet_NaN here instead of the NAN macro as on Windows + // the NAN macro leads to "division-by-zero" compile-time error with clang. + static constexpr float na_sentinel = std::numeric_limits::quiet_NaN(); + + static constexpr bool supports_nulls = true; + + static inline bool isnull(float v) { return v != v; } +}; + +template <> +struct npy_traits { + typedef double value_type; + using TypeClass = DoubleType; + using BuilderClass = DoubleBuilder; + + static constexpr double na_sentinel = std::numeric_limits::quiet_NaN(); + + static constexpr bool supports_nulls = true; + + static inline bool isnull(double v) { return v != v; } +}; + +template <> +struct npy_traits { + typedef int64_t value_type; + using TypeClass = TimestampType; + using BuilderClass = TimestampBuilder; + + static constexpr bool supports_nulls = true; + + static inline bool isnull(int64_t v) { + // NaT = -2**63 + // = -0x8000000000000000 + // = -9223372036854775808; + // = std::numeric_limits::min() + return v == std::numeric_limits::min(); + } +}; + +template <> +struct npy_traits { + typedef int64_t value_type; + using TypeClass = DurationType; + using BuilderClass = DurationBuilder; + + static constexpr bool supports_nulls = true; + + static inline bool isnull(int64_t v) { + // NaT = -2**63 = std::numeric_limits::min() + return v == std::numeric_limits::min(); + } +}; + +template <> +struct npy_traits { + typedef PyObject* value_type; + static constexpr bool supports_nulls = true; + + static inline bool isnull(PyObject* v) { return v == Py_None; } +}; + +// +// Type traits for Arrow -> Numpy equivalence +// Note *supports_nulls* means the equivalent Numpy type support nulls +// +template +struct arrow_traits {}; + +template <> +struct arrow_traits { + static constexpr int npy_type = NPY_BOOL; + static constexpr bool supports_nulls = false; + typedef typename npy_traits::value_type T; +}; + +#define INT_DECL(TYPE) \ + template <> \ + struct arrow_traits { \ + static constexpr int npy_type = NPY_##TYPE; \ + static constexpr bool supports_nulls = false; \ + static constexpr double na_value = std::numeric_limits::quiet_NaN(); \ + typedef typename npy_traits::value_type T; \ + }; + +INT_DECL(INT8); +INT_DECL(INT16); +INT_DECL(INT32); +INT_DECL(INT64); +INT_DECL(UINT8); +INT_DECL(UINT16); +INT_DECL(UINT32); +INT_DECL(UINT64); + +template <> +struct arrow_traits { + static constexpr int npy_type = NPY_FLOAT16; + static constexpr bool supports_nulls = true; + static constexpr uint16_t na_value = NPY_HALF_NAN; + typedef typename npy_traits::value_type T; +}; + +template <> +struct arrow_traits { + static constexpr int npy_type = NPY_FLOAT32; + static constexpr bool supports_nulls = true; + static constexpr float na_value = std::numeric_limits::quiet_NaN(); + typedef typename npy_traits::value_type T; +}; + +template <> +struct arrow_traits { + static constexpr int npy_type = NPY_FLOAT64; + static constexpr bool supports_nulls = true; + static constexpr double na_value = std::numeric_limits::quiet_NaN(); + typedef typename npy_traits::value_type T; +}; + +template <> +struct arrow_traits { + static constexpr int npy_type = NPY_DATETIME; + static constexpr int64_t npy_shift = 1; + + static constexpr bool supports_nulls = true; + static constexpr int64_t na_value = kPandasTimestampNull; + typedef typename npy_traits::value_type T; +}; + +template <> +struct arrow_traits { + static constexpr int npy_type = NPY_TIMEDELTA; + static constexpr int64_t npy_shift = 1; + + static constexpr bool supports_nulls = true; + static constexpr int64_t na_value = kPandasTimestampNull; + typedef typename npy_traits::value_type T; +}; + +template <> +struct arrow_traits { + // Data stores as FR_D day unit + static constexpr int npy_type = NPY_DATETIME; + static constexpr int64_t npy_shift = 1; + + static constexpr bool supports_nulls = true; + typedef typename npy_traits::value_type T; + + static constexpr int64_t na_value = kPandasTimestampNull; + static inline bool isnull(int64_t v) { return npy_traits::isnull(v); } +}; + +template <> +struct arrow_traits { + // Data stores as FR_D day unit + static constexpr int npy_type = NPY_DATETIME; + + // There are 1000 * 60 * 60 * 24 = 86400000ms in a day + static constexpr int64_t npy_shift = 86400000; + + static constexpr bool supports_nulls = true; + typedef typename npy_traits::value_type T; + + static constexpr int64_t na_value = kPandasTimestampNull; + static inline bool isnull(int64_t v) { return npy_traits::isnull(v); } +}; + +template <> +struct arrow_traits { + static constexpr int npy_type = NPY_OBJECT; + static constexpr bool supports_nulls = true; + static constexpr int64_t na_value = kPandasTimestampNull; + typedef typename npy_traits::value_type T; +}; + +template <> +struct arrow_traits { + static constexpr int npy_type = NPY_OBJECT; + static constexpr bool supports_nulls = true; + typedef typename npy_traits::value_type T; +}; + +template <> +struct arrow_traits { + static constexpr int npy_type = NPY_OBJECT; + static constexpr bool supports_nulls = true; +}; + +template <> +struct arrow_traits { + static constexpr int npy_type = NPY_OBJECT; + static constexpr bool supports_nulls = true; +}; + +static inline NPY_DATETIMEUNIT NumPyFrequency(TimeUnit::type unit) { + switch (unit) { + case TimestampType::Unit::SECOND: + return NPY_FR_s; + case TimestampType::Unit::MILLI: + return NPY_FR_ms; + break; + case TimestampType::Unit::MICRO: + return NPY_FR_us; + default: + // NANO + return NPY_FR_ns; + } +} + +static inline int NumPyTypeSize(int npy_type) { + npy_type = fix_numpy_type_num(npy_type); + + switch (npy_type) { + case NPY_BOOL: + case NPY_INT8: + case NPY_UINT8: + return 1; + case NPY_INT16: + case NPY_UINT16: + return 2; + case NPY_INT32: + case NPY_UINT32: + return 4; + case NPY_INT64: + case NPY_UINT64: + return 8; + case NPY_FLOAT16: + return 2; + case NPY_FLOAT32: + return 4; + case NPY_FLOAT64: + return 8; + case NPY_DATETIME: + return 8; + case NPY_OBJECT: + return sizeof(void*); + default: + ARROW_CHECK(false) << "unhandled numpy type"; + break; + } + return -1; +} + +} // namespace internal +} // namespace py +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/visibility.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/visibility.h new file mode 100644 index 0000000000000000000000000000000000000000..dd43b32fd43ff46e195d0057cf3198b926b9fdd0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/python/visibility.h @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#if defined(_WIN32) || defined(__CYGWIN__) // Windows +#if defined(_MSC_VER) +#pragma warning(disable : 4251) +#else +#pragma GCC diagnostic ignored "-Wattributes" +#endif + +#ifdef ARROW_PYTHON_STATIC +#define ARROW_PYTHON_EXPORT +#elif defined(ARROW_PYTHON_EXPORTING) +#define ARROW_PYTHON_EXPORT __declspec(dllexport) +#else +#define ARROW_PYTHON_EXPORT __declspec(dllimport) +#endif + +#else // Not Windows +#ifndef ARROW_PYTHON_EXPORT +#define ARROW_PYTHON_EXPORT __attribute__((visibility("default"))) +#endif +#endif // Non-Windows diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/tensor/converter.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/tensor/converter.h new file mode 100644 index 0000000000000000000000000000000000000000..408ab22305fff1665956ee8bb831fbc062b9994c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/tensor/converter.h @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/sparse_tensor.h" // IWYU pragma: export + +#include + +namespace arrow { +namespace internal { + +struct SparseTensorConverterMixin { + static bool IsNonZero(const uint8_t val) { return val != 0; } + + static void AssignIndex(uint8_t* indices, int64_t val, const int elsize); + + static int64_t GetIndexValue(const uint8_t* value_ptr, const int elsize); +}; + +Status MakeSparseCOOTensorFromTensor(const Tensor& tensor, + const std::shared_ptr& index_value_type, + MemoryPool* pool, + std::shared_ptr* out_sparse_index, + std::shared_ptr* out_data); + +Status MakeSparseCSXMatrixFromTensor(SparseMatrixCompressedAxis axis, + const Tensor& tensor, + const std::shared_ptr& index_value_type, + MemoryPool* pool, + std::shared_ptr* out_sparse_index, + std::shared_ptr* out_data); + +Status MakeSparseCSFTensorFromTensor(const Tensor& tensor, + const std::shared_ptr& index_value_type, + MemoryPool* pool, + std::shared_ptr* out_sparse_index, + std::shared_ptr* out_data); + +Result> MakeTensorFromSparseCOOTensor( + MemoryPool* pool, const SparseCOOTensor* sparse_tensor); + +Result> MakeTensorFromSparseCSRMatrix( + MemoryPool* pool, const SparseCSRMatrix* sparse_tensor); + +Result> MakeTensorFromSparseCSCMatrix( + MemoryPool* pool, const SparseCSCMatrix* sparse_tensor); + +Result> MakeTensorFromSparseCSFTensor( + MemoryPool* pool, const SparseCSFTensor* sparse_tensor); + +} // namespace internal +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/async_test_util.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/async_test_util.h new file mode 100644 index 0000000000000000000000000000000000000000..7066bbb63d2a5775454d5cffc82df7faf0056db8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/async_test_util.h @@ -0,0 +1,79 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/testing/gtest_util.h" +#include "arrow/util/async_generator.h" +#include "arrow/util/future.h" + +namespace arrow { +namespace util { + +template +AsyncGenerator AsyncVectorIt(std::vector v) { + return MakeVectorGenerator(std::move(v)); +} + +template +AsyncGenerator FailAt(AsyncGenerator src, int failing_index) { + auto index = std::make_shared>(0); + return [src, index, failing_index]() { + auto idx = index->fetch_add(1); + if (idx >= failing_index) { + return Future::MakeFinished(Status::Invalid("XYZ")); + } + return src(); + }; +} + +template +AsyncGenerator SlowdownABit(AsyncGenerator source) { + return MakeMappedGenerator(std::move(source), [](const T& res) { + return SleepABitAsync().Then([res]() { return res; }); + }); +} + +template +class TrackingGenerator { + public: + explicit TrackingGenerator(AsyncGenerator source) + : state_(std::make_shared(std::move(source))) {} + + Future operator()() { + state_->num_read++; + return state_->source(); + } + + int num_read() { return state_->num_read.load(); } + + private: + struct State { + explicit State(AsyncGenerator source) : source(std::move(source)), num_read(0) {} + + AsyncGenerator source; + std::atomic num_read; + }; + + std::shared_ptr state_; +}; + +} // namespace util +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/builder.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/builder.h new file mode 100644 index 0000000000000000000000000000000000000000..09e8f49dea9eb1023ce7db9813b240eed1cc0563 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/builder.h @@ -0,0 +1,231 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/array.h" +#include "arrow/array/builder_binary.h" +#include "arrow/array/builder_primitive.h" +#include "arrow/array/builder_time.h" +#include "arrow/buffer.h" +#include "arrow/testing/gtest_util.h" +#include "arrow/type_fwd.h" +#include "arrow/util/bit_util.h" +#include "arrow/visit_type_inline.h" + +namespace arrow { + +// ArrayFromVector: construct an Array from vectors of C values + +template +void ArrayFromVector(const std::shared_ptr& type, + const std::vector& is_valid, const std::vector& values, + std::shared_ptr* out) { + auto type_id = TYPE::type_id; + ASSERT_EQ(type_id, type->id()) + << "template parameter and concrete DataType instance don't agree"; + + std::unique_ptr builder_ptr; + ASSERT_OK(MakeBuilder(default_memory_pool(), type, &builder_ptr)); + // Get the concrete builder class to access its Append() specializations + auto& builder = dynamic_cast::BuilderType&>(*builder_ptr); + + for (size_t i = 0; i < values.size(); ++i) { + if (is_valid[i]) { + ASSERT_OK(builder.Append(values[i])); + } else { + ASSERT_OK(builder.AppendNull()); + } + } + ASSERT_OK(builder.Finish(out)); +} + +template +void ArrayFromVector(const std::shared_ptr& type, + const std::vector& values, std::shared_ptr* out) { + auto type_id = TYPE::type_id; + ASSERT_EQ(type_id, type->id()) + << "template parameter and concrete DataType instance don't agree"; + + std::unique_ptr builder_ptr; + ASSERT_OK(MakeBuilder(default_memory_pool(), type, &builder_ptr)); + // Get the concrete builder class to access its Append() specializations + auto& builder = dynamic_cast::BuilderType&>(*builder_ptr); + + for (size_t i = 0; i < values.size(); ++i) { + ASSERT_OK(builder.Append(values[i])); + } + ASSERT_OK(builder.Finish(out)); +} + +// Overloads without a DataType argument, for parameterless types + +template +void ArrayFromVector(const std::vector& is_valid, const std::vector& values, + std::shared_ptr* out) { + auto type = TypeTraits::type_singleton(); + ArrayFromVector(type, is_valid, values, out); +} + +template +void ArrayFromVector(const std::vector& values, std::shared_ptr* out) { + auto type = TypeTraits::type_singleton(); + ArrayFromVector(type, values, out); +} + +// ChunkedArrayFromVector: construct a ChunkedArray from vectors of C values + +template +void ChunkedArrayFromVector(const std::shared_ptr& type, + const std::vector>& is_valid, + const std::vector>& values, + std::shared_ptr* out) { + ArrayVector chunks; + ASSERT_EQ(is_valid.size(), values.size()); + for (size_t i = 0; i < values.size(); ++i) { + std::shared_ptr array; + ArrayFromVector(type, is_valid[i], values[i], &array); + chunks.push_back(array); + } + *out = std::make_shared(chunks); +} + +template +void ChunkedArrayFromVector(const std::shared_ptr& type, + const std::vector>& values, + std::shared_ptr* out) { + ArrayVector chunks; + for (size_t i = 0; i < values.size(); ++i) { + std::shared_ptr array; + ArrayFromVector(type, values[i], &array); + chunks.push_back(array); + } + *out = std::make_shared(chunks); +} + +// Overloads without a DataType argument, for parameterless types + +template +void ChunkedArrayFromVector(const std::vector>& is_valid, + const std::vector>& values, + std::shared_ptr* out) { + auto type = TypeTraits::type_singleton(); + ChunkedArrayFromVector(type, is_valid, values, out); +} + +template +void ChunkedArrayFromVector(const std::vector>& values, + std::shared_ptr* out) { + auto type = TypeTraits::type_singleton(); + ChunkedArrayFromVector(type, values, out); +} + +template +void FinishAndCheckPadding(BuilderType* builder, std::shared_ptr* out) { + ASSERT_OK_AND_ASSIGN(*out, builder->Finish()); + AssertZeroPadded(**out); + TestInitialized(**out); +} + +template +Status MakeArray(const std::vector& valid_bytes, const std::vector& values, + int64_t size, Builder* builder, std::shared_ptr* out) { + // Append the first 1000 + for (int64_t i = 0; i < size; ++i) { + if (valid_bytes[i] > 0) { + RETURN_NOT_OK(builder->Append(values[i])); + } else { + RETURN_NOT_OK(builder->AppendNull()); + } + } + return builder->Finish(out); +} + +template +struct VisitBuilder { + template ::BuilderType, + // need to let SFINAE drop this Visit when it would result in + // [](NullBuilder*){}(double_builder) + typename = decltype(std::declval()(std::declval()))> + Status Visit(const T&, ArrayBuilder* builder, Fn&& fn) { + fn(internal::checked_cast(builder)); + return Status::OK(); + } + + Status Visit(const DataType& t, ArrayBuilder* builder, Fn&& fn) { + return Status::NotImplemented("visiting builders of type ", t); + } +}; + +template +Result> ArrayFromBuilderVisitor( + const std::shared_ptr& type, int64_t initial_capacity, + int64_t visitor_repetitions, Fn&& fn) { + std::unique_ptr builder; + RETURN_NOT_OK(MakeBuilder(default_memory_pool(), type, &builder)); + + if (initial_capacity != 0) { + RETURN_NOT_OK(builder->Resize(initial_capacity)); + } + + VisitBuilder visitor; + for (int64_t i = 0; i < visitor_repetitions; ++i) { + RETURN_NOT_OK( + VisitTypeInline(*builder->type(), &visitor, builder.get(), std::forward(fn))); + } + + std::shared_ptr out; + RETURN_NOT_OK(builder->Finish(&out)); + return std::move(out); +} + +template +Result> ArrayFromBuilderVisitor( + const std::shared_ptr& type, int64_t length, Fn&& fn) { + return ArrayFromBuilderVisitor(type, length, length, std::forward(fn)); +} + +template +static inline Status GetBitmapFromVector(const std::vector& is_valid, + std::shared_ptr* result) { + size_t length = is_valid.size(); + + ARROW_ASSIGN_OR_RAISE(auto buffer, AllocateEmptyBitmap(length)); + + uint8_t* bitmap = buffer->mutable_data(); + for (size_t i = 0; i < static_cast(length); ++i) { + if (is_valid[i]) { + bit_util::SetBit(bitmap, i); + } + } + + *result = buffer; + return Status::OK(); +} + +template +inline void BitmapFromVector(const std::vector& is_valid, + std::shared_ptr* out) { + ASSERT_OK(GetBitmapFromVector(is_valid, out)); +} + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/extension_type.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/extension_type.h new file mode 100644 index 0000000000000000000000000000000000000000..846e3c7a1657850fe9f8ca91dfca31360dbf067d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/extension_type.h @@ -0,0 +1,211 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/extension_type.h" +#include "arrow/testing/visibility.h" +#include "arrow/util/macros.h" + +namespace arrow { + +class ARROW_TESTING_EXPORT UuidArray : public ExtensionArray { + public: + using ExtensionArray::ExtensionArray; +}; + +class ARROW_TESTING_EXPORT UuidType : public ExtensionType { + public: + UuidType() : ExtensionType(fixed_size_binary(16)) {} + + std::string extension_name() const override { return "uuid"; } + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override { return "uuid-serialized"; } +}; + +class ARROW_TESTING_EXPORT SmallintArray : public ExtensionArray { + public: + using ExtensionArray::ExtensionArray; +}; + +class ARROW_TESTING_EXPORT TinyintArray : public ExtensionArray { + public: + using ExtensionArray::ExtensionArray; +}; + +class ARROW_TESTING_EXPORT ListExtensionArray : public ExtensionArray { + public: + using ExtensionArray::ExtensionArray; +}; + +class ARROW_TESTING_EXPORT SmallintType : public ExtensionType { + public: + SmallintType() : ExtensionType(int16()) {} + + std::string extension_name() const override { return "smallint"; } + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override { return "smallint"; } +}; + +class ARROW_TESTING_EXPORT TinyintType : public ExtensionType { + public: + TinyintType() : ExtensionType(int8()) {} + + std::string extension_name() const override { return "tinyint"; } + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override { return "tinyint"; } +}; + +class ARROW_TESTING_EXPORT ListExtensionType : public ExtensionType { + public: + ListExtensionType() : ExtensionType(list(int32())) {} + + std::string extension_name() const override { return "list-ext"; } + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override { return "list-ext"; } +}; + +class ARROW_TESTING_EXPORT DictExtensionType : public ExtensionType { + public: + DictExtensionType() : ExtensionType(dictionary(int8(), utf8())) {} + + std::string extension_name() const override { return "dict-extension"; } + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override { return "dict-extension-serialized"; } +}; + +class ARROW_TESTING_EXPORT Complex128Array : public ExtensionArray { + public: + using ExtensionArray::ExtensionArray; +}; + +class ARROW_TESTING_EXPORT Complex128Type : public ExtensionType { + public: + Complex128Type() + : ExtensionType(struct_({::arrow::field("real", float64(), /*nullable=*/false), + ::arrow::field("imag", float64(), /*nullable=*/false)})) {} + + std::string extension_name() const override { return "complex128"; } + + bool ExtensionEquals(const ExtensionType& other) const override; + + std::shared_ptr MakeArray(std::shared_ptr data) const override; + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override; + + std::string Serialize() const override { return "complex128-serialized"; } +}; + +ARROW_TESTING_EXPORT +std::shared_ptr uuid(); + +ARROW_TESTING_EXPORT +std::shared_ptr smallint(); + +ARROW_TESTING_EXPORT +std::shared_ptr tinyint(); + +ARROW_TESTING_EXPORT +std::shared_ptr list_extension_type(); + +ARROW_TESTING_EXPORT +std::shared_ptr dict_extension_type(); + +ARROW_TESTING_EXPORT +std::shared_ptr complex128(); + +ARROW_TESTING_EXPORT +std::shared_ptr ExampleUuid(); + +ARROW_TESTING_EXPORT +std::shared_ptr ExampleSmallint(); + +ARROW_TESTING_EXPORT +std::shared_ptr ExampleTinyint(); + +ARROW_TESTING_EXPORT +std::shared_ptr ExampleDictExtension(); + +ARROW_TESTING_EXPORT +std::shared_ptr ExampleComplex128(); + +ARROW_TESTING_EXPORT +std::shared_ptr MakeComplex128(const std::shared_ptr& real, + const std::shared_ptr& imag); + +// A RAII class that registers an extension type on construction +// and unregisters it on destruction. +class ARROW_TESTING_EXPORT ExtensionTypeGuard { + public: + explicit ExtensionTypeGuard(const std::shared_ptr& type); + explicit ExtensionTypeGuard(const DataTypeVector& types); + ~ExtensionTypeGuard(); + ARROW_DEFAULT_MOVE_AND_ASSIGN(ExtensionTypeGuard); + + protected: + ARROW_DISALLOW_COPY_AND_ASSIGN(ExtensionTypeGuard); + + std::vector extension_names_; +}; + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/future_util.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/future_util.h new file mode 100644 index 0000000000000000000000000000000000000000..2ca70d05402f92c71d8f86441eeccec1ebc6d156 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/future_util.h @@ -0,0 +1,142 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/testing/gtest_util.h" +#include "arrow/util/future.h" + +// This macro should be called by futures that are expected to +// complete pretty quickly. arrow::kDefaultAssertFinishesWaitSeconds is the +// default max wait here. Anything longer than that and it's a questionable unit test +// anyways. +#define ASSERT_FINISHES_IMPL(fut) \ + do { \ + ASSERT_TRUE(fut.Wait(::arrow::kDefaultAssertFinishesWaitSeconds)); \ + if (!fut.is_finished()) { \ + FAIL() << "Future did not finish in a timely fashion"; \ + } \ + } while (false) + +#define ASSERT_FINISHES_OK(expr) \ + do { \ + auto&& _fut = (expr); \ + ASSERT_TRUE(_fut.Wait(::arrow::kDefaultAssertFinishesWaitSeconds)); \ + if (!_fut.is_finished()) { \ + FAIL() << "Future did not finish in a timely fashion"; \ + } \ + auto& _st = _fut.status(); \ + if (!_st.ok()) { \ + FAIL() << "'" ARROW_STRINGIFY(expr) "' failed with " << _st.ToString(); \ + } \ + } while (false) + +#define ASSERT_FINISHES_AND_RAISES(ENUM, expr) \ + do { \ + auto&& _fut = (expr); \ + ASSERT_FINISHES_IMPL(_fut); \ + ASSERT_RAISES(ENUM, _fut.status()); \ + } while (false) + +#define EXPECT_FINISHES_AND_RAISES_WITH_MESSAGE_THAT(ENUM, matcher, expr) \ + do { \ + auto&& fut = (expr); \ + ASSERT_FINISHES_IMPL(fut); \ + EXPECT_RAISES_WITH_MESSAGE_THAT(ENUM, matcher, fut.status()); \ + } while (false) + +#define ASSERT_FINISHES_OK_AND_ASSIGN_IMPL(lhs, rexpr, _future_name) \ + auto _future_name = (rexpr); \ + ASSERT_FINISHES_IMPL(_future_name); \ + ASSERT_OK_AND_ASSIGN(lhs, _future_name.result()); + +#define ASSERT_FINISHES_OK_AND_ASSIGN(lhs, rexpr) \ + ASSERT_FINISHES_OK_AND_ASSIGN_IMPL(lhs, rexpr, \ + ARROW_ASSIGN_OR_RAISE_NAME(_fut, __COUNTER__)) + +#define ASSERT_FINISHES_OK_AND_EQ(expected, expr) \ + do { \ + ASSERT_FINISHES_OK_AND_ASSIGN(auto _actual, (expr)); \ + ASSERT_EQ(expected, _actual); \ + } while (0) + +#define EXPECT_FINISHES_IMPL(fut) \ + do { \ + EXPECT_TRUE(fut.Wait(::arrow::kDefaultAssertFinishesWaitSeconds)); \ + if (!fut.is_finished()) { \ + ADD_FAILURE() << "Future did not finish in a timely fashion"; \ + } \ + } while (false) + +#define ON_FINISH_ASSIGN_OR_HANDLE_ERROR_IMPL(handle_error, future_name, lhs, rexpr) \ + auto future_name = (rexpr); \ + EXPECT_FINISHES_IMPL(future_name); \ + handle_error(future_name.status()); \ + EXPECT_OK_AND_ASSIGN(lhs, future_name.result()); + +#define EXPECT_FINISHES(expr) \ + do { \ + EXPECT_FINISHES_IMPL(expr); \ + } while (0) + +#define EXPECT_FINISHES_OK_AND_ASSIGN(lhs, rexpr) \ + ON_FINISH_ASSIGN_OR_HANDLE_ERROR_IMPL( \ + ARROW_EXPECT_OK, ARROW_ASSIGN_OR_RAISE_NAME(_fut, __COUNTER__), lhs, rexpr); + +#define EXPECT_FINISHES_OK_AND_EQ(expected, expr) \ + do { \ + EXPECT_FINISHES_OK_AND_ASSIGN(auto _actual, (expr)); \ + EXPECT_EQ(expected, _actual); \ + } while (0) + +namespace arrow { + +constexpr double kDefaultAssertFinishesWaitSeconds = 64; + +template +void AssertNotFinished(const Future& fut) { + ASSERT_FALSE(IsFutureFinished(fut.state())); +} + +template +void AssertFinished(const Future& fut) { + ASSERT_TRUE(IsFutureFinished(fut.state())); +} + +// Assert the future is successful *now* +template +void AssertSuccessful(const Future& fut) { + if (IsFutureFinished(fut.state())) { + ASSERT_EQ(fut.state(), FutureState::SUCCESS); + ASSERT_OK(fut.status()); + } else { + FAIL() << "Expected future to be completed successfully but it was still pending"; + } +} + +// Assert the future is failed *now* +template +void AssertFailed(const Future& fut) { + if (IsFutureFinished(fut.state())) { + ASSERT_EQ(fut.state(), FutureState::FAILURE); + ASSERT_FALSE(fut.status().ok()); + } else { + FAIL() << "Expected future to have failed but it was still pending"; + } +} + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/generator.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/generator.h new file mode 100644 index 0000000000000000000000000000000000000000..4ec8845864b72807d7e68230b25256ba53127f17 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/generator.h @@ -0,0 +1,321 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/array/array_base.h" +#include "arrow/compute/type_fwd.h" +#include "arrow/testing/gtest_util.h" +#include "arrow/testing/visibility.h" +#include "arrow/type_fwd.h" + +namespace arrow { + +class ARROW_TESTING_EXPORT ConstantArrayGenerator { + public: + /// \brief Generates a constant BooleanArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr Boolean(int64_t size, bool value = false); + + /// \brief Generates a constant UInt8Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr UInt8(int64_t size, uint8_t value = 0); + + /// \brief Generates a constant Int8Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr Int8(int64_t size, int8_t value = 0); + + /// \brief Generates a constant UInt16Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr UInt16(int64_t size, uint16_t value = 0); + + /// \brief Generates a constant UInt16Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr Int16(int64_t size, int16_t value = 0); + + /// \brief Generates a constant UInt32Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr UInt32(int64_t size, uint32_t value = 0); + + /// \brief Generates a constant UInt32Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr Int32(int64_t size, int32_t value = 0); + + /// \brief Generates a constant UInt64Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr UInt64(int64_t size, uint64_t value = 0); + + /// \brief Generates a constant UInt64Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr Int64(int64_t size, int64_t value = 0); + + /// \brief Generates a constant Float32Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr Float32(int64_t size, float value = 0); + + /// \brief Generates a constant Float64Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr Float64(int64_t size, double value = 0); + + /// \brief Generates a constant StringArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] value to repeat + /// + /// \return a generated Array + static std::shared_ptr String(int64_t size, std::string value = ""); + + template + static std::shared_ptr Numeric(int64_t size, CType value = 0) { + switch (ArrowType::type_id) { + case Type::BOOL: + return Boolean(size, static_cast(value)); + case Type::UINT8: + return UInt8(size, static_cast(value)); + case Type::INT8: + return Int8(size, static_cast(value)); + case Type::UINT16: + return UInt16(size, static_cast(value)); + case Type::INT16: + return Int16(size, static_cast(value)); + case Type::UINT32: + return UInt32(size, static_cast(value)); + case Type::INT32: + return Int32(size, static_cast(value)); + case Type::UINT64: + return UInt64(size, static_cast(value)); + case Type::INT64: + return Int64(size, static_cast(value)); + case Type::FLOAT: + return Float32(size, static_cast(value)); + case Type::DOUBLE: + return Float64(size, static_cast(value)); + case Type::INTERVAL_DAY_TIME: + case Type::DATE32: { + EXPECT_OK_AND_ASSIGN(auto viewed, + Int32(size, static_cast(value))->View(date32())); + return viewed; + } + case Type::INTERVAL_MONTHS: { + EXPECT_OK_AND_ASSIGN(auto viewed, + Int32(size, static_cast(value)) + ->View(std::make_shared())); + return viewed; + } + case Type::TIME32: { + EXPECT_OK_AND_ASSIGN(auto viewed, + Int32(size, static_cast(value)) + ->View(std::make_shared(TimeUnit::SECOND))); + return viewed; + } + case Type::TIME64: { + EXPECT_OK_AND_ASSIGN(auto viewed, Int64(size, static_cast(value)) + ->View(std::make_shared())); + return viewed; + } + case Type::DATE64: { + EXPECT_OK_AND_ASSIGN(auto viewed, + Int64(size, static_cast(value))->View(date64())); + return viewed; + } + case Type::TIMESTAMP: { + EXPECT_OK_AND_ASSIGN( + auto viewed, Int64(size, static_cast(value)) + ->View(std::make_shared(TimeUnit::SECOND))); + return viewed; + } + default: + return nullptr; + } + } + + /// \brief Generates a constant Array of zeroes + /// + /// \param[in] size the size of the array to generate + /// \param[in] type the type of the Array + /// + /// \return a generated Array + static std::shared_ptr Zeroes(int64_t size, + const std::shared_ptr& type); + + /// \brief Generates a RecordBatch of zeroes + /// + /// \param[in] size the size of the array to generate + /// \param[in] schema to conform to + /// + /// This function is handy to return of RecordBatch of a desired shape. + /// + /// \return a generated RecordBatch + static std::shared_ptr Zeroes(int64_t size, + const std::shared_ptr& schema); + + /// \brief Generates a RecordBatchReader by repeating a RecordBatch + /// + /// \param[in] n_batch the number of times it repeats batch + /// \param[in] batch the RecordBatch to repeat + /// + /// \return a generated RecordBatchReader + static std::shared_ptr Repeat( + int64_t n_batch, const std::shared_ptr batch); + + /// \brief Generates a RecordBatchReader of zeroes batches + /// + /// \param[in] n_batch the number of RecordBatch + /// \param[in] batch_size the size of each RecordBatch + /// \param[in] schema to conform to + /// + /// \return a generated RecordBatchReader + static std::shared_ptr Zeroes(int64_t n_batch, int64_t batch_size, + const std::shared_ptr& schema); +}; + +ARROW_TESTING_EXPORT +Result> ScalarVectorToArray(const ScalarVector& scalars); + +namespace gen { + +class ARROW_TESTING_EXPORT ArrayGenerator { + public: + virtual ~ArrayGenerator() = default; + virtual Result> Generate(int64_t num_rows) = 0; + virtual std::shared_ptr type() const = 0; +}; + +// Same as DataGenerator below but instead of returning Result an ok status is EXPECT'd +class ARROW_TESTING_EXPORT GTestDataGenerator { + public: + virtual ~GTestDataGenerator() = default; + virtual std::shared_ptr<::arrow::RecordBatch> RecordBatch(int64_t num_rows) = 0; + virtual std::vector> RecordBatches( + int64_t rows_per_batch, int num_batches) = 0; + + virtual ::arrow::compute::ExecBatch ExecBatch(int64_t num_rows) = 0; + virtual std::vector<::arrow::compute::ExecBatch> ExecBatches(int64_t rows_per_batch, + int num_batches) = 0; + + virtual std::shared_ptr<::arrow::Table> Table(int64_t rows_per_chunk, + int num_chunks = 1) = 0; + virtual std::shared_ptr<::arrow::Schema> Schema() = 0; +}; + +class ARROW_TESTING_EXPORT DataGenerator { + public: + virtual ~DataGenerator() = default; + virtual Result> RecordBatch(int64_t num_rows) = 0; + virtual Result>> RecordBatches( + int64_t rows_per_batch, int num_batches) = 0; + + virtual Result<::arrow::compute::ExecBatch> ExecBatch(int64_t num_rows) = 0; + virtual Result> ExecBatches( + int64_t rows_per_batch, int num_batches) = 0; + + virtual Result> Table(int64_t rows_per_chunk, + int num_chunks = 1) = 0; + virtual std::shared_ptr<::arrow::Schema> Schema() = 0; + /// @brief Converts this generator to a variant that fails (in a googletest sense) + /// if any error is encountered. + virtual std::unique_ptr FailOnError() = 0; +}; + +/// @brief A potentially named field +/// +/// If name is not specified then a name will be generated automatically (e.g. f0, f1) +struct ARROW_TESTING_EXPORT GeneratorField { + public: + GeneratorField(std::shared_ptr gen) // NOLINT implicit conversion + : name(), gen(std::move(gen)) {} + GeneratorField(std::string name, std::shared_ptr gen) + : name(std::move(name)), gen(std::move(gen)) {} + + std::optional name; + std::shared_ptr gen; +}; + +/// Create a table generator with the given fields +ARROW_TESTING_EXPORT std::shared_ptr Gen( + std::vector column_gens); + +/// make a generator that returns a constant value +ARROW_TESTING_EXPORT std::shared_ptr Constant( + std::shared_ptr value); +/// make a generator that returns an incrementing value +/// +/// Note: overflow is not prevented standard unsigned integer overflow applies +ARROW_TESTING_EXPORT std::shared_ptr Step(uint32_t start = 0, + uint32_t step = 1, + bool signed_int = false); +/// make a generator that returns a random value +ARROW_TESTING_EXPORT std::shared_ptr Random( + std::shared_ptr type); +/// TODO(if-needed) could add a repeat-scalars generator, e.g. Repeat({1, 2, 3}) for +/// 1,2,3,1,2,3,1 +/// +/// TODO(if-needed) could add a repeat-from-json generator e.g. Repeat(int32(), "[1, 2, +/// 3]")), same behavior as repeat-scalars + +} // namespace gen + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/gtest_compat.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/gtest_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..c934dd279389068d5ebf60695284d403b2e2dcd7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/gtest_compat.h @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +// GTest < 1.11 +#ifndef GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST +#define GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(A) +#endif +// GTest < 1.10 +#ifndef TYPED_TEST_SUITE +#define TYPED_TEST_SUITE TYPED_TEST_CASE +#define TYPED_TEST_SUITE_P TYPED_TEST_CASE_P +#define INSTANTIATE_TEST_SUITE_P INSTANTIATE_TEST_CASE_P +#define REGISTER_TYPED_TEST_SUITE_P REGISTER_TYPED_TEST_CASE_P +#define INSTANTIATE_TYPED_TEST_SUITE_P INSTANTIATE_TYPED_TEST_CASE_P +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/matchers.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/matchers.h new file mode 100644 index 0000000000000000000000000000000000000000..b4625b3922e86bc044e30c63c15ea5b1dbaca469 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/matchers.h @@ -0,0 +1,467 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include + +#include "arrow/datum.h" +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/stl_iterator.h" +#include "arrow/testing/future_util.h" +#include "arrow/testing/gtest_util.h" +#include "arrow/util/future.h" +#include "arrow/util/unreachable.h" + +namespace arrow { + +class PointeesEqualMatcher { + public: + template + operator testing::Matcher() const { // NOLINT runtime/explicit + struct Impl : testing::MatcherInterface { + void DescribeTo(::std::ostream* os) const override { *os << "pointees are equal"; } + + void DescribeNegationTo(::std::ostream* os) const override { + *os << "pointees are not equal"; + } + + bool MatchAndExplain(const PtrPair& pair, + testing::MatchResultListener* listener) const override { + const auto& first = *std::get<0>(pair); + const auto& second = *std::get<1>(pair); + const bool match = first.Equals(second); + *listener << "whose pointees " << testing::PrintToString(first) << " and " + << testing::PrintToString(second) + << (match ? " are equal" : " are not equal"); + return match; + } + }; + + return testing::Matcher(new Impl()); + } +}; + +// A matcher that checks that the values pointed to are Equals(). +// Useful in conjunction with other googletest matchers. +inline PointeesEqualMatcher PointeesEqual() { return {}; } + +class AnyOfJSONMatcher { + public: + AnyOfJSONMatcher(std::shared_ptr type, std::string array_json) + : type_(std::move(type)), array_json_(std::move(array_json)) {} + + template + operator testing::Matcher() const { // NOLINT runtime/explicit + struct Impl : testing::MatcherInterface { + static_assert(std::is_same>(), + "AnyOfJSON only supported for std::shared_ptr"); + Impl(std::shared_ptr type, std::string array_json) + : type_(std::move(type)), array_json_(std::move(array_json)) { + array = ArrayFromJSON(type_, array_json_); + } + void DescribeTo(std::ostream* os) const override { + *os << "matches at least one scalar from "; + *os << array->ToString(); + } + void DescribeNegationTo(::std::ostream* os) const override { + *os << "matches no scalar from "; + *os << array->ToString(); + } + bool MatchAndExplain( + const arg_type& arg, + ::testing::MatchResultListener* result_listener) const override { + for (int64_t i = 0; i < array->length(); ++i) { + std::shared_ptr scalar; + auto maybe_scalar = array->GetScalar(i); + if (maybe_scalar.ok()) { + scalar = maybe_scalar.ValueOrDie(); + } else { + *result_listener << "GetScalar() had status " + << maybe_scalar.status().ToString() << "at index " << i + << " in the input JSON Array"; + return false; + } + + if (scalar->Equals(*arg)) return true; + } + *result_listener << "Argument scalar: '" << arg->ToString() + << "' matches no scalar from " << array->ToString(); + return false; + } + const std::shared_ptr type_; + const std::string array_json_; + std::shared_ptr array; + }; + + return testing::Matcher(new Impl(type_, array_json_)); + } + + private: + const std::shared_ptr type_; + const std::string array_json_; +}; + +inline AnyOfJSONMatcher AnyOfJSON(std::shared_ptr type, + std::string array_json) { + return {std::move(type), std::move(array_json)}; +} + +template +class FutureMatcher { + public: + explicit FutureMatcher(ResultMatcher result_matcher, double wait_seconds) + : result_matcher_(std::move(result_matcher)), wait_seconds_(wait_seconds) {} + + template ::type::ValueType> + operator testing::Matcher() const { // NOLINT runtime/explicit + struct Impl : testing::MatcherInterface { + explicit Impl(const ResultMatcher& result_matcher, double wait_seconds) + : result_matcher_(testing::MatcherCast>(result_matcher)), + wait_seconds_(wait_seconds) {} + + void DescribeTo(::std::ostream* os) const override { + *os << "value "; + result_matcher_.DescribeTo(os); + } + + void DescribeNegationTo(::std::ostream* os) const override { + *os << "value "; + result_matcher_.DescribeNegationTo(os); + } + + bool MatchAndExplain(const Fut& fut, + testing::MatchResultListener* listener) const override { + if (!fut.Wait(wait_seconds_)) { + *listener << "which didn't finish within " << wait_seconds_ << " seconds"; + return false; + } + return result_matcher_.MatchAndExplain(fut.result(), listener); + } + + const testing::Matcher> result_matcher_; + const double wait_seconds_; + }; + + return testing::Matcher(new Impl(result_matcher_, wait_seconds_)); + } + + private: + const ResultMatcher result_matcher_; + const double wait_seconds_; +}; + +template +class ResultMatcher { + public: + explicit ResultMatcher(ValueMatcher value_matcher) + : value_matcher_(std::move(value_matcher)) {} + + template ::type::ValueType> + operator testing::Matcher() const { // NOLINT runtime/explicit + struct Impl : testing::MatcherInterface { + explicit Impl(const ValueMatcher& value_matcher) + : value_matcher_(testing::MatcherCast(value_matcher)) {} + + void DescribeTo(::std::ostream* os) const override { + *os << "value "; + value_matcher_.DescribeTo(os); + } + + void DescribeNegationTo(::std::ostream* os) const override { + *os << "value "; + value_matcher_.DescribeNegationTo(os); + } + + bool MatchAndExplain(const Res& maybe_value, + testing::MatchResultListener* listener) const override { + if (!maybe_value.status().ok()) { + *listener << "whose error " + << testing::PrintToString(maybe_value.status().ToString()) + << " doesn't match"; + return false; + } + const ValueType& value = maybe_value.ValueOrDie(); + testing::StringMatchResultListener value_listener; + const bool match = value_matcher_.MatchAndExplain(value, &value_listener); + *listener << "whose value " << testing::PrintToString(value) + << (match ? " matches" : " doesn't match"); + testing::internal::PrintIfNotEmpty(value_listener.str(), listener->stream()); + return match; + } + + const testing::Matcher value_matcher_; + }; + + return testing::Matcher(new Impl(value_matcher_)); + } + + private: + const ValueMatcher value_matcher_; +}; + +class ErrorMatcher { + public: + explicit ErrorMatcher(StatusCode code, + std::optional> message_matcher) + : code_(code), message_matcher_(std::move(message_matcher)) {} + + template + operator testing::Matcher() const { // NOLINT runtime/explicit + struct Impl : testing::MatcherInterface { + explicit Impl(StatusCode code, + std::optional> message_matcher) + : code_(code), message_matcher_(std::move(message_matcher)) {} + + void DescribeTo(::std::ostream* os) const override { + *os << "raises StatusCode::" << Status::CodeAsString(code_); + if (message_matcher_) { + *os << " and message "; + message_matcher_->DescribeTo(os); + } + } + + void DescribeNegationTo(::std::ostream* os) const override { + *os << "does not raise StatusCode::" << Status::CodeAsString(code_); + if (message_matcher_) { + *os << " or message "; + message_matcher_->DescribeNegationTo(os); + } + } + + bool MatchAndExplain(const Res& maybe_value, + testing::MatchResultListener* listener) const override { + const Status& status = internal::GenericToStatus(maybe_value); + testing::StringMatchResultListener value_listener; + + bool match = status.code() == code_; + if (message_matcher_) { + match = match && + message_matcher_->MatchAndExplain(status.message(), &value_listener); + } + + if (match) { + *listener << "whose error matches"; + } else if (status.ok()) { + *listener << "whose non-error doesn't match"; + } else { + *listener << "whose error doesn't match"; + } + + testing::internal::PrintIfNotEmpty(value_listener.str(), listener->stream()); + return match; + } + + const StatusCode code_; + const std::optional> message_matcher_; + }; + + return testing::Matcher(new Impl(code_, message_matcher_)); + } + + private: + const StatusCode code_; + const std::optional> message_matcher_; +}; + +class OkMatcher { + public: + template + operator testing::Matcher() const { // NOLINT runtime/explicit + struct Impl : testing::MatcherInterface { + void DescribeTo(::std::ostream* os) const override { *os << "is ok"; } + + void DescribeNegationTo(::std::ostream* os) const override { *os << "is not ok"; } + + bool MatchAndExplain(const Res& maybe_value, + testing::MatchResultListener* listener) const override { + const Status& status = internal::GenericToStatus(maybe_value); + + const bool match = status.ok(); + *listener << "whose " << (match ? "non-error matches" : "error doesn't match"); + return match; + } + }; + + return testing::Matcher(new Impl()); + } +}; + +// Returns a matcher that waits on a Future (by default for 16 seconds) +// then applies a matcher to the result. +template +FutureMatcher Finishes( + const ResultMatcher& result_matcher, + double wait_seconds = kDefaultAssertFinishesWaitSeconds) { + return FutureMatcher(result_matcher, wait_seconds); +} + +// Returns a matcher that matches the value of a successful Result. +template +ResultMatcher ResultWith(const ValueMatcher& value_matcher) { + return ResultMatcher(value_matcher); +} + +// Returns a matcher that matches an ok Status or Result. +inline OkMatcher Ok() { return {}; } + +// Returns a matcher that matches the StatusCode of a Status or Result. +// Do not use Raises(StatusCode::OK) to match a non error code. +inline ErrorMatcher Raises(StatusCode code) { return ErrorMatcher(code, std::nullopt); } + +// Returns a matcher that matches the StatusCode and message of a Status or Result. +template +ErrorMatcher Raises(StatusCode code, const MessageMatcher& message_matcher) { + return ErrorMatcher(code, testing::MatcherCast(message_matcher)); +} + +class DataEqMatcher { + public: + // TODO(bkietz) support EqualOptions, ApproxEquals, etc + // Probably it's better to use something like config-through-key_value_metadata + // as with the random generators to decouple this from EqualOptions etc. + explicit DataEqMatcher(Datum expected) : expected_(std::move(expected)) {} + + template + operator testing::Matcher() const { // NOLINT runtime/explicit + struct Impl : testing::MatcherInterface { + explicit Impl(Datum expected) : expected_(std::move(expected)) {} + + void DescribeTo(::std::ostream* os) const override { + *os << "has data "; + PrintTo(expected_, os); + } + + void DescribeNegationTo(::std::ostream* os) const override { + *os << "doesn't have data "; + PrintTo(expected_, os); + } + + bool MatchAndExplain(const Data& data, + testing::MatchResultListener* listener) const override { + Datum boxed(data); + + if (boxed.kind() != expected_.kind()) { + *listener << "whose Datum::kind " << boxed.ToString() << " doesn't match " + << expected_.ToString(); + return false; + } + + if (const auto& boxed_type = boxed.type()) { + if (*boxed_type != *expected_.type()) { + *listener << "whose DataType " << boxed_type->ToString() << " doesn't match " + << expected_.type()->ToString(); + return false; + } + } else if (const auto& boxed_schema = boxed.schema()) { + if (*boxed_schema != *expected_.schema()) { + *listener << "whose Schema " << boxed_schema->ToString() << " doesn't match " + << expected_.schema()->ToString(); + return false; + } + } else { + Unreachable(); + } + + if (boxed == expected_) { + *listener << "whose value matches"; + return true; + } + + if (listener->IsInterested() && boxed.kind() == Datum::ARRAY) { + *listener << "whose value differs from the expected value by " + << boxed.make_array()->Diff(*expected_.make_array()); + } else { + *listener << "whose value doesn't match"; + } + return false; + } + + Datum expected_; + }; + + return testing::Matcher(new Impl(expected_)); + } + + private: + Datum expected_; +}; + +/// Constructs a datum against which arguments are matched +template +DataEqMatcher DataEq(Data&& dat) { + return DataEqMatcher(Datum(std::forward(dat))); +} + +/// Constructs an array with ArrayFromJSON against which arguments are matched +inline DataEqMatcher DataEqArray(const std::shared_ptr& type, + std::string_view json) { + return DataEq(ArrayFromJSON(type, json)); +} + +/// Constructs an array from a vector of optionals against which arguments are matched +template ::ArrayType, + typename BuilderType = typename TypeTraits::BuilderType, + typename ValueType = + typename ::arrow::stl::detail::DefaultValueAccessor::ValueType> +DataEqMatcher DataEqArray(T type, const std::vector>& values) { + // FIXME(bkietz) broken until DataType is move constructible + BuilderType builder(std::make_shared(std::move(type)), default_memory_pool()); + DCHECK_OK(builder.Reserve(static_cast(values.size()))); + + // pseudo constexpr: + static const bool need_safe_append = !is_fixed_width(T::type_id); + + for (auto value : values) { + if (need_safe_append) { + DCHECK_OK(builder.AppendOrNull(value)); + } else { + builder.UnsafeAppendOrNull(value); + } + } + + return DataEq(builder.Finish().ValueOrDie()); +} + +/// Constructs a scalar with ScalarFromJSON against which arguments are matched +inline DataEqMatcher DataEqScalar(const std::shared_ptr& type, + std::string_view json) { + return DataEq(ScalarFromJSON(type, json)); +} + +/// Constructs a scalar against which arguments are matched +template ::ScalarType, + typename ValueType = typename ScalarType::ValueType> +DataEqMatcher DataEqScalar(T type, std::optional value) { + ScalarType expected(std::make_shared(std::move(type))); + + if (value) { + expected.is_valid = true; + expected.value = std::move(*value); + } + + return DataEq(std::move(expected)); +} + +// HasType, HasSchema matchers + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/pch.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/pch.h new file mode 100644 index 0000000000000000000000000000000000000000..e544ad806adc992691600b90ddd7174fb0447c4e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/pch.h @@ -0,0 +1,25 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Often-used headers, for precompiling. +// If updating this header, please make sure you check compilation speed +// before checking in. Adding headers which are not used extremely often +// may incur a slowdown, since it makes the precompiled header heavier to load. + +#include "arrow/pch.h" +#include "arrow/testing/gtest_util.h" +#include "arrow/testing/util.h" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/random.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/random.h new file mode 100644 index 0000000000000000000000000000000000000000..1d97a3ada724aec082180aea39a37386adca273c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/random.h @@ -0,0 +1,698 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/testing/uniform_real.h" +#include "arrow/testing/visibility.h" +#include "arrow/type.h" + +namespace arrow { + +class Array; + +namespace random { + +using SeedType = int32_t; +constexpr SeedType kSeedMax = std::numeric_limits::max(); + +class ARROW_TESTING_EXPORT RandomArrayGenerator { + public: + explicit RandomArrayGenerator(SeedType seed) + : seed_distribution_(static_cast(1), kSeedMax), seed_rng_(seed) {} + + /// \brief Generate a null bitmap + /// + /// \param[in] size the size of the bitmap to generate + /// \param[in] null_probability the probability of a bit being zero + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Buffer + std::shared_ptr NullBitmap(int64_t size, double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random BooleanArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] true_probability the probability of a value being 1 / bit-set + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Boolean(int64_t size, double true_probability, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + /// \brief Generate a random UInt8Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr UInt8(int64_t size, uint8_t min, uint8_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Int8Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Int8(int64_t size, int8_t min, int8_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random UInt16Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr UInt16(int64_t size, uint16_t min, uint16_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Int16Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Int16(int64_t size, int16_t min, int16_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random UInt32Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr UInt32(int64_t size, uint32_t min, uint32_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Int32Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Int32(int64_t size, int32_t min, int32_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random UInt64Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr UInt64(int64_t size, uint64_t min, uint64_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Int64Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Int64(int64_t size, int64_t min, int64_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random HalfFloatArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the distribution + /// \param[in] max the upper bound of the distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Float16(int64_t size, int16_t min, int16_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random FloatArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] nan_probability the probability of a value being NaN + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Float32(int64_t size, float min, float max, + double null_probability = 0, double nan_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random DoubleArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] nan_probability the probability of a value being NaN + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Float64(int64_t size, double min, double max, + double null_probability = 0, double nan_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Date64Array + /// + /// \param[in] size the size of the array to generate + /// \param[in] min the lower bound of the uniform distribution + /// \param[in] max the upper bound of the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Date64(int64_t size, int64_t min, int64_t max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + template + std::shared_ptr Numeric(int64_t size, CType min, CType max, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()) { + switch (ArrowType::type_id) { + case Type::UINT8: + return UInt8(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::INT8: + return Int8(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::UINT16: + return UInt16(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::INT16: + return Int16(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::UINT32: + return UInt32(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::INT32: + return Int32(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::UINT64: + return UInt64(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::INT64: + return Int64(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::HALF_FLOAT: + return Float16(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + case Type::FLOAT: + return Float32(size, static_cast(min), static_cast(max), + null_probability, /*nan_probability=*/0, alignment, memory_pool); + case Type::DOUBLE: + return Float64(size, static_cast(min), static_cast(max), + null_probability, /*nan_probability=*/0, alignment, memory_pool); + case Type::DATE64: + return Date64(size, static_cast(min), static_cast(max), + null_probability, alignment, memory_pool); + default: + return nullptr; + } + } + + /// \brief Generate a random Decimal128Array + /// + /// \param[in] type the type of the array to generate + /// (must be an instance of Decimal128Type) + /// \param[in] size the size of the array to generate + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Decimal128(std::shared_ptr type, int64_t size, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Decimal256Array + /// + /// \param[in] type the type of the array to generate + /// (must be an instance of Decimal256Type) + /// \param[in] size the size of the array to generate + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Decimal256(std::shared_ptr type, int64_t size, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate an array of offsets (for use in e.g. ListArray::FromArrays) + /// + /// \param[in] size the size of the array to generate + /// \param[in] first_offset the first offset value (usually 0) + /// \param[in] last_offset the last offset value (usually the size of the child array) + /// \param[in] null_probability the probability of an offset being null + /// \param[in] force_empty_nulls if true, null offsets must have 0 "length" + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Offsets(int64_t size, int32_t first_offset, int32_t last_offset, + double null_probability = 0, + bool force_empty_nulls = false, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + std::shared_ptr LargeOffsets(int64_t size, int64_t first_offset, + int64_t last_offset, double null_probability = 0, + bool force_empty_nulls = false, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random StringArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] min_length the lower bound of the string length + /// determined by the uniform distribution + /// \param[in] max_length the upper bound of the string length + /// determined by the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr String(int64_t size, int32_t min_length, int32_t max_length, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random StringViewArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] min_length the lower bound of the string length + /// determined by the uniform distribution + /// \param[in] max_length the upper bound of the string length + /// determined by the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] max_data_buffer_length the data buffer size at which + /// a new chunk will be generated + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr StringView(int64_t size, int32_t min_length, int32_t max_length, + double null_probability = 0, + std::optional max_data_buffer_length = {}, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random LargeStringArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] min_length the lower bound of the string length + /// determined by the uniform distribution + /// \param[in] max_length the upper bound of the string length + /// determined by the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr LargeString(int64_t size, int32_t min_length, int32_t max_length, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random StringArray with repeated values + /// + /// \param[in] size the size of the array to generate + /// \param[in] unique the number of unique string values used + /// to populate the array + /// \param[in] min_length the lower bound of the string length + /// determined by the uniform distribution + /// \param[in] max_length the upper bound of the string length + /// determined by the uniform distribution + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr StringWithRepeats( + int64_t size, int64_t unique, int32_t min_length, int32_t max_length, + double null_probability = 0, int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Like StringWithRepeats but return BinaryArray + std::shared_ptr BinaryWithRepeats( + int64_t size, int64_t unique, int32_t min_length, int32_t max_length, + double null_probability = 0, int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random FixedSizeBinaryArray + /// + /// \param[in] size the size of the array to generate + /// \param[in] byte_width the byte width of fixed-size binary items + /// \param[in] null_probability the probability of a value being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr FixedSizeBinary(int64_t size, int32_t byte_width, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random ListArray + /// + /// \param[in] values The underlying values array + /// \param[in] size The size of the generated list array + /// \param[in] null_probability the probability of a list value being null + /// \param[in] force_empty_nulls if true, null list entries must have 0 length + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr List(const Array& values, int64_t size, + double null_probability = 0, bool force_empty_nulls = false, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random ListViewArray + /// + /// \param[in] values The underlying values array + /// \param[in] size The size of the generated list array + /// \param[in] null_probability the probability of a list value being null + /// \param[in] force_empty_nulls if true, null list entries must have 0 length + /// must be set to 0 + /// \param[in] coverage proportion of the values array covered by list-views + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr ListView(const Array& values, int64_t size, + double null_probability = 0, + bool force_empty_nulls = false, double coverage = 1.0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random LargeListViewArray + /// + /// \param[in] values The underlying values array + /// \param[in] size The size of the generated list array + /// \param[in] null_probability the probability of a list value being null + /// \param[in] force_empty_nulls if true, null list entries must have 0 length + /// must be set to 0 + /// \param[in] coverage proportion of the values array covered by list-views + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr LargeListView(const Array& values, int64_t size, + double null_probability = 0, + bool force_empty_nulls = false, + double coverage = 1.0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random MapArray + /// + /// \param[in] keys The underlying keys array + /// \param[in] items The underlying items array + /// \param[in] size The size of the generated map array + /// \param[in] null_probability the probability of a map value being null + /// \param[in] force_empty_nulls if true, null map entries must have 0 length + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// + /// \return a generated Array + std::shared_ptr Map(const std::shared_ptr& keys, + const std::shared_ptr& items, int64_t size, + double null_probability = 0, bool force_empty_nulls = false, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random RunEndEncodedArray + /// + /// \param[in] value_type The DataType of the encoded values + /// \param[in] logical_size The logical length of the generated array + /// \param[in] null_probability the probability of a value being null + /// + /// \return a generated Array + std::shared_ptr RunEndEncoded(std::shared_ptr value_type, + int64_t logical_size, + double null_probability = 0.0); + + /// \brief Generate a random SparseUnionArray + /// + /// The type ids are chosen randomly, according to a uniform distribution, + /// amongst the given child fields. + /// + /// \param[in] fields Vector of Arrays containing the data for each union field + /// \param[in] size The size of the generated sparse union array + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + std::shared_ptr SparseUnion(const ArrayVector& fields, int64_t size, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random DenseUnionArray + /// + /// The type ids are chosen randomly, according to a uniform distribution, + /// amongst the given child fields. The offsets are incremented along + /// each child field. + /// + /// \param[in] fields Vector of Arrays containing the data for each union field + /// \param[in] size The size of the generated sparse union array + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + std::shared_ptr DenseUnion(const ArrayVector& fields, int64_t size, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a random Array of the specified type, size, and null_probability. + /// + /// Generation parameters other than size and null_probability are determined based on + /// the type of Array to be generated. + /// If boolean the probabilities of true,false values are 0.25,0.75 respectively. + /// If numeric min,max will be the least and greatest representable values. + /// If string min_length,max_length will be 0,sqrt(size) respectively. + /// + /// \param[in] type the type of Array to generate + /// \param[in] size the size of the Array to generate + /// \param[in] null_probability the probability of a slot being null + /// \param[in] alignment alignment for memory allocations (in bytes) + /// \param[in] memory_pool memory pool to allocate memory from + /// \return a generated Array + std::shared_ptr ArrayOf(std::shared_ptr type, int64_t size, + double null_probability = 0, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate an array with random data based on the given field. See BatchOf + /// for usage info. + std::shared_ptr ArrayOf(const Field& field, int64_t size, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + /// \brief Generate a record batch with random data of the specified length. + /// + /// Generation options are read from key-value metadata for each field, and may be + /// specified at any nesting level. For example, generation options for the child + /// values of a list array can be specified by constructing the list type with + /// list(field("item", int8(), options_metadata)) + /// + /// The following options are supported: + /// + /// For all types except NullType: + /// - null_probability (double): range [0.0, 1.0] the probability of a null value. + /// Default/value is 0.0 if the field is marked non-nullable, else it is 0.01 + /// + /// For all numeric types T: + /// - min (T::c_type): the minimum value to generate (inclusive), default + /// std::numeric_limits::min() + /// - max (T::c_type): the maximum value to generate (inclusive), default + /// std::numeric_limits::max() + /// Note this means that, for example, min/max are int16_t values for HalfFloatType. + /// + /// For floating point types T for which is_physical_floating_type: + /// - nan_probability (double): range [0.0, 1.0] the probability of a NaN value. + /// + /// For BooleanType: + /// - true_probability (double): range [0.0, 1.0] the probability of a true. + /// + /// For DictionaryType: + /// - values (int32_t): the size of the dictionary. + /// Other properties are passed to the generator for the dictionary indices. However, + /// min and max cannot be specified. Note it is not possible to otherwise customize + /// the generation of dictionary values. + /// + /// For list, string, and binary types T, including their large variants: + /// - min_length (T::offset_type): the minimum length of the child to generate, + /// default 0 + /// - max_length (T::offset_type): the minimum length of the child to generate, + /// default 1024 + /// + /// For string and binary types T (not including their large or view variants): + /// - unique (int32_t): if positive, this many distinct values will be generated + /// and all array values will be one of these values, default -1 + /// + /// For string and binary view types T: + /// - max_data_buffer_length (int64_t): the data buffer size at which a new chunk + /// will be generated, default 32KB + /// + /// For MapType: + /// - values (int32_t): the number of key-value pairs to generate, which will be + /// partitioned among the array values. + /// + /// For extension types: + /// - extension_allow_random_storage (bool): in general an extension array may have + /// invariants on its storage beyond those already imposed by the arrow format, + /// which may result in an invalid array if we just wrap randomly generated + /// storage. Set this flag to explicitly allow wrapping of randomly generated + /// storage. + std::shared_ptr BatchOf( + const FieldVector& fields, int64_t size, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + + SeedType seed() { return seed_distribution_(seed_rng_); } + + private: + std::uniform_int_distribution seed_distribution_; + std::default_random_engine seed_rng_; +}; + +/// Generate a batch with random data. See RandomArrayGenerator::BatchOf. +ARROW_TESTING_EXPORT +std::shared_ptr GenerateBatch( + const FieldVector& fields, int64_t size, SeedType seed, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + +/// Generate an array with random data. See RandomArrayGenerator::BatchOf. +ARROW_TESTING_EXPORT +std::shared_ptr GenerateArray( + const Field& field, int64_t size, SeedType seed, + int64_t alignment = kDefaultBufferAlignment, + MemoryPool* memory_pool = default_memory_pool()); + +} // namespace random + +// +// Assorted functions +// + +ARROW_TESTING_EXPORT +void rand_day_millis(int64_t N, std::vector* out); +ARROW_TESTING_EXPORT +void rand_month_day_nanos(int64_t N, + std::vector* out); + +template +void randint(int64_t N, T lower, T upper, std::vector* out) { + const int random_seed = 0; + std::default_random_engine gen(random_seed); + std::uniform_int_distribution d(lower, upper); + out->resize(N, static_cast(0)); + std::generate(out->begin(), out->end(), [&d, &gen] { return static_cast(d(gen)); }); +} + +template +void random_real(int64_t n, uint32_t seed, T min_value, T max_value, + std::vector* out) { + std::default_random_engine gen(seed); + ::arrow::random::uniform_real_distribution d(min_value, max_value); + out->resize(n, static_cast(0)); + std::generate(out->begin(), out->end(), [&d, &gen] { return static_cast(d(gen)); }); +} + +template +void rand_uniform_int(int64_t n, uint32_t seed, T min_value, T max_value, U* out) { + assert(out || (n == 0)); + std::default_random_engine gen(seed); + std::uniform_int_distribution d(min_value, max_value); + std::generate(out, out + n, [&d, &gen] { return static_cast(d(gen)); }); +} + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/uniform_real.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/uniform_real.h new file mode 100644 index 0000000000000000000000000000000000000000..8aa04a83288d9f8ce39a2d7c92b528ac9742bf98 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/uniform_real.h @@ -0,0 +1,84 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Random real generation is very slow on Arm if built with clang + libstdc++ +// due to software emulated long double arithmetic. +// This file ports some random real libs from llvm libc++ library, which are +// free from long double calculation. +// It improves performance significantly on both Arm (~100x) and x86 (~8x) in +// generating random reals when built with clang + gnu libstdc++. +// Based on: https://github.com/llvm/llvm-project/tree/main/libcxx + +#pragma once + +#include + +#include + +namespace arrow { +namespace random { + +namespace detail { + +// std::generate_canonical, simplified +// https://en.cppreference.com/w/cpp/numeric/random/generate_canonical +template +RealType generate_canonical(Rng& rng) { + const size_t b = std::numeric_limits::digits; + const size_t log2R = 63 - ::arrow::bit_util::CountLeadingZeros( + static_cast(Rng::max() - Rng::min()) + 1); + const size_t k = b / log2R + (b % log2R != 0) + (b == 0); + const RealType r = static_cast(Rng::max() - Rng::min()) + 1; + RealType base = r; + RealType sp = static_cast(rng() - Rng::min()); + for (size_t i = 1; i < k; ++i, base *= r) { + sp += (rng() - Rng::min()) * base; + } + return sp / base; +} + +} // namespace detail + +// std::uniform_real_distribution, simplified +// https://en.cppreference.com/w/cpp/numeric/random/uniform_real_distribution +template +struct uniform_real_distribution { + const RealType a, b; + + explicit uniform_real_distribution(RealType a = 0, RealType b = 1) : a(a), b(b) {} + + template + RealType operator()(Rng& rng) { + return (b - a) * detail::generate_canonical(rng) + a; + } +}; + +// std::bernoulli_distribution, simplified +// https://en.cppreference.com/w/cpp/numeric/random/bernoulli_distribution +struct bernoulli_distribution { + const double p; + + explicit bernoulli_distribution(double p = 0.5) : p(p) {} + + template + bool operator()(Rng& rng) { + return detail::generate_canonical(rng) < p; + } +}; + +} // namespace random +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/util.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/util.h new file mode 100644 index 0000000000000000000000000000000000000000..b4b2785a36292df93777c2c4e76eea27f3c4a0c8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/util.h @@ -0,0 +1,140 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/record_batch.h" +#include "arrow/status.h" +#include "arrow/testing/visibility.h" +#include "arrow/type_fwd.h" +#include "arrow/util/macros.h" + +namespace arrow { + +template +Status CopyBufferFromVector(const std::vector& values, MemoryPool* pool, + std::shared_ptr* result) { + int64_t nbytes = static_cast(values.size()) * sizeof(T); + + ARROW_ASSIGN_OR_RAISE(auto buffer, AllocateBuffer(nbytes, pool)); + auto immutable_data = reinterpret_cast(values.data()); + std::copy(immutable_data, immutable_data + nbytes, buffer->mutable_data()); + memset(buffer->mutable_data() + nbytes, 0, + static_cast(buffer->capacity() - nbytes)); + + *result = std::move(buffer); + return Status::OK(); +} + +// Sets approximately pct_null of the first n bytes in null_bytes to zero +// and the rest to non-zero (true) values. +ARROW_TESTING_EXPORT void random_null_bytes(int64_t n, double pct_null, + uint8_t* null_bytes); +ARROW_TESTING_EXPORT void random_is_valid(int64_t n, double pct_null, + std::vector* is_valid, + int random_seed = 0); +ARROW_TESTING_EXPORT void random_bytes(int64_t n, uint32_t seed, uint8_t* out); +ARROW_TESTING_EXPORT std::string random_string(int64_t n, uint32_t seed); +ARROW_TESTING_EXPORT int32_t DecimalSize(int32_t precision); +ARROW_TESTING_EXPORT void random_ascii(int64_t n, uint32_t seed, uint8_t* out); +ARROW_TESTING_EXPORT int64_t CountNulls(const std::vector& valid_bytes); + +ARROW_TESTING_EXPORT Status MakeRandomByteBuffer(int64_t length, MemoryPool* pool, + std::shared_ptr* out, + uint32_t seed = 0); + +ARROW_TESTING_EXPORT uint64_t random_seed(); + +#define DECL_T() typedef typename TestFixture::T T; + +#define DECL_TYPE() typedef typename TestFixture::Type Type; + +// ---------------------------------------------------------------------- +// A RecordBatchReader for serving a sequence of in-memory record batches + +class BatchIterator : public RecordBatchReader { + public: + BatchIterator(const std::shared_ptr& schema, + const std::vector>& batches) + : schema_(schema), batches_(batches), position_(0) {} + + std::shared_ptr schema() const override { return schema_; } + + Status ReadNext(std::shared_ptr* out) override { + if (position_ >= batches_.size()) { + *out = nullptr; + } else { + *out = batches_[position_++]; + } + return Status::OK(); + } + + private: + std::shared_ptr schema_; + std::vector> batches_; + size_t position_; +}; + +static inline std::vector (*)(FieldVector, std::vector)> +UnionTypeFactories() { + return {sparse_union, dense_union}; +} + +// Return the value of the ARROW_TEST_DATA environment variable or return error +// Status +ARROW_TESTING_EXPORT Status GetTestResourceRoot(std::string*); + +// Return the value of the ARROW_TIMEZONE_DATABASE environment variable +ARROW_TESTING_EXPORT std::optional GetTestTimezoneDatabaseRoot(); + +// Set the Timezone database based on the ARROW_TIMEZONE_DATABASE env variable +// This is only relevant on Windows, since other OSs have compatible databases built-in +ARROW_TESTING_EXPORT Status InitTestTimezoneDatabase(); + +// Get a TCP port number to listen on. This is a different number every time, +// as reusing the same port across tests can produce spurious bind errors on +// Windows. +ARROW_TESTING_EXPORT int GetListenPort(); + +// Get a IPv4 "address:port" to listen on. The address will be a loopback +// address. Compared to GetListenPort(), this will minimize the risk of +// port conflicts. +ARROW_TESTING_EXPORT std::string GetListenAddress(); + +ARROW_TESTING_EXPORT +const std::vector>& all_dictionary_index_types(); + +// Get a list of supported hardware flags from the given candidates. +// The result will always contain 0, meaning no optional CPU feature enabled at all. +ARROW_TESTING_EXPORT +std::vector GetSupportedHardwareFlags( + const std::vector& candidate_flags); + +} // namespace arrow diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/visibility.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/visibility.h new file mode 100644 index 0000000000000000000000000000000000000000..1b2aa7cd86fc65f3a1ad1b332f7c295aa3cc9c25 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/testing/visibility.h @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#if defined(_WIN32) || defined(__CYGWIN__) +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable : 4251) +#else +#pragma GCC diagnostic ignored "-Wattributes" +#endif + +#ifdef ARROW_TESTING_STATIC +#define ARROW_TESTING_EXPORT +#elif defined(ARROW_TESTING_EXPORTING) +#define ARROW_TESTING_EXPORT __declspec(dllexport) +#else +#define ARROW_TESTING_EXPORT __declspec(dllimport) +#endif + +#define ARROW_TESTING_NO_EXPORT +#else // Not Windows +#ifndef ARROW_TESTING_EXPORT +#define ARROW_TESTING_EXPORT __attribute__((visibility("default"))) +#endif +#ifndef ARROW_TESTING_NO_EXPORT +#define ARROW_TESTING_NO_EXPORT __attribute__((visibility("hidden"))) +#endif +#endif // Non-Windows + +#if defined(_MSC_VER) +#pragma warning(pop) +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1600 b/llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1600 new file mode 100644 index 0000000000000000000000000000000000000000..dcd455728785a273fdd6655b1a6bf3251a7fe7f5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1600 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d235cdfb4c840988aa4aae7c4b8b8e21f1524ab6a3e4296afd854c7d33d3af96 +size 2753864 diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_python.so b/llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_python.so new file mode 100644 index 0000000000000000000000000000000000000000..901a57ce0b9d76676380da75fcd69eefc7056b71 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_python.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14a8722d7ed23ef60665772b3ab9f095e58981061c49925ed0f58a7dfa529d8c +size 2845200 diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/libparquet.so.1600 b/llmeval-env/lib/python3.10/site-packages/pyarrow/libparquet.so.1600 new file mode 100644 index 0000000000000000000000000000000000000000..bfee234d91ad9698d2baa37e8ae7ea80adf75434 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/libparquet.so.1600 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0dd9b749bc026ebf236eb60571ba1355266a986210892bb8aaf2a6beb8146bd8 +size 10932648