diff --git a/.gitattributes b/.gitattributes index 32089612b3a001426427536dac63755bcd5f3363..2570249746a9def61f9aa68608310dab01fe5df1 100644 --- a/.gitattributes +++ b/.gitattributes @@ -78,3 +78,4 @@ llmeval-env/lib/python3.10/site-packages/pyarrow/libparquet.so.1600 filter=lfs d llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1600 filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1600 filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1600 filter=lfs diff=lfs merge=lfs -text diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae656e5349414a2894536d6d9b700ef82945618c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/acero.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/acero.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..595ab7360251cafb6ecd722e8b1291593cc3579d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/acero.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/benchmark.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/benchmark.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ddb59ba61b0fe77ab89c64e8ef7c21cfa2b2a4a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/benchmark.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/cffi.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/cffi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36d03b8207a46a7dcb47c723e9a3bcc5e873983d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/cffi.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/compute.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/compute.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87b39f8440740546726ad4c140a53b71fd05c1d2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/compute.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/conftest.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64833911aacfa7e6e23a5427b520816a06964e22 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/conftest.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/csv.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/csv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62a488a9e98deb85082ac0ce8b3dbff702a0a2b4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/csv.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/cuda.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/cuda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..557ad7e071e51120bb596f76621f51fb6e6d10f6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/cuda.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/dataset.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0b1c73ccd408f5680b22f33e17998b4af334f67 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/dataset.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/feather.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/feather.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1c91f8d81258bedd849c0f40496c8954192416c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/feather.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/flight.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/flight.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef47e625150913f37d9183411b94396db05bf13e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/flight.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/fs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/fs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08ae7f8217a951580e28cc27802726bd1dc25bf1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/fs.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/ipc.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/ipc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85daabf389fedba5840756e92ce7139e2dc67b19 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/ipc.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/json.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/json.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d5a0989e92fa32a9ce0ef1ea5f9106125bb2900 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/json.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/pandas_compat.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/pandas_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8ee53773ad9bd4f8f713cf34b22dd43ae141acf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/pandas_compat.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/substrait.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/substrait.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd96a3f3805b729231c2483677d529392b8a6428 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/substrait.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/types.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50ba818dde61dcd23b28b1376aa8d580b35ea5b4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/types.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0096409cd9f895a8de84f3039cf660f8d2077a29 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/bignum-dtoa.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/bignum-dtoa.h new file mode 100644 index 0000000000000000000000000000000000000000..f56239e8e88956a319aca3ec25fa48e0db4d6547 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/bignum-dtoa.h @@ -0,0 +1,86 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_BIGNUM_DTOA_H_ +#define DOUBLE_CONVERSION_BIGNUM_DTOA_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +enum BignumDtoaMode { + // Return the shortest correct representation. + // For example the output of 0.299999999999999988897 is (the less accurate but + // correct) 0.3. + BIGNUM_DTOA_SHORTEST, + // Same as BIGNUM_DTOA_SHORTEST but for single-precision floats. + BIGNUM_DTOA_SHORTEST_SINGLE, + // Return a fixed number of digits after the decimal point. + // For instance fixed(0.1, 4) becomes 0.1000 + // If the input number is big, the output will be big. + BIGNUM_DTOA_FIXED, + // Return a fixed number of digits, no matter what the exponent is. + BIGNUM_DTOA_PRECISION +}; + +// Converts the given double 'v' to ascii. +// The result should be interpreted as buffer * 10^(point-length). +// The buffer will be null-terminated. +// +// The input v must be > 0 and different from NaN, and Infinity. +// +// The output depends on the given mode: +// - SHORTEST: produce the least amount of digits for which the internal +// identity requirement is still satisfied. If the digits are printed +// (together with the correct exponent) then reading this number will give +// 'v' again. The buffer will choose the representation that is closest to +// 'v'. If there are two at the same distance, than the number is round up. +// In this mode the 'requested_digits' parameter is ignored. +// - FIXED: produces digits necessary to print a given number with +// 'requested_digits' digits after the decimal point. The produced digits +// might be too short in which case the caller has to fill the gaps with '0's. +// Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2. +// Halfway cases are rounded up. The call toFixed(0.15, 2) thus returns +// buffer="2", point=0. +// Note: the length of the returned buffer has no meaning wrt the significance +// of its digits. That is, just because it contains '0's does not mean that +// any other digit would not satisfy the internal identity requirement. +// - PRECISION: produces 'requested_digits' where the first digit is not '0'. +// Even though the length of produced digits usually equals +// 'requested_digits', the function is allowed to return fewer digits, in +// which case the caller has to fill the missing digits with '0's. +// Halfway cases are again rounded up. +// 'BignumDtoa' expects the given buffer to be big enough to hold all digits +// and a terminating null-character. +void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits, + Vector buffer, int* length, int* point); + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_BIGNUM_DTOA_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/bignum.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/bignum.h new file mode 100644 index 0000000000000000000000000000000000000000..0bedb63b188f16e632df75a4b8d4256eb98ba387 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/bignum.h @@ -0,0 +1,154 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_BIGNUM_H_ +#define DOUBLE_CONVERSION_BIGNUM_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +class Bignum { + public: + // 3584 = 128 * 28. We can represent 2^3584 > 10^1000 accurately. + // This bignum can encode much bigger numbers, since it contains an + // exponent. + static const int kMaxSignificantBits = 3584; + + Bignum() : used_bigits_(0), exponent_(0) {} + + void AssignUInt16(const uint16_t value); + void AssignUInt64(uint64_t value); + void AssignBignum(const Bignum& other); + + void AssignDecimalString(const Vector value); + void AssignHexString(const Vector value); + + void AssignPowerUInt16(uint16_t base, const int exponent); + + void AddUInt64(const uint64_t operand); + void AddBignum(const Bignum& other); + // Precondition: this >= other. + void SubtractBignum(const Bignum& other); + + void Square(); + void ShiftLeft(const int shift_amount); + void MultiplyByUInt32(const uint32_t factor); + void MultiplyByUInt64(const uint64_t factor); + void MultiplyByPowerOfTen(const int exponent); + void Times10() { return MultiplyByUInt32(10); } + // Pseudocode: + // int result = this / other; + // this = this % other; + // In the worst case this function is in O(this/other). + uint16_t DivideModuloIntBignum(const Bignum& other); + + bool ToHexString(char* buffer, const int buffer_size) const; + + // Returns + // -1 if a < b, + // 0 if a == b, and + // +1 if a > b. + static int Compare(const Bignum& a, const Bignum& b); + static bool Equal(const Bignum& a, const Bignum& b) { + return Compare(a, b) == 0; + } + static bool LessEqual(const Bignum& a, const Bignum& b) { + return Compare(a, b) <= 0; + } + static bool Less(const Bignum& a, const Bignum& b) { + return Compare(a, b) < 0; + } + // Returns Compare(a + b, c); + static int PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c); + // Returns a + b == c + static bool PlusEqual(const Bignum& a, const Bignum& b, const Bignum& c) { + return PlusCompare(a, b, c) == 0; + } + // Returns a + b <= c + static bool PlusLessEqual(const Bignum& a, const Bignum& b, const Bignum& c) { + return PlusCompare(a, b, c) <= 0; + } + // Returns a + b < c + static bool PlusLess(const Bignum& a, const Bignum& b, const Bignum& c) { + return PlusCompare(a, b, c) < 0; + } + private: + typedef uint32_t Chunk; + typedef uint64_t DoubleChunk; + + static const int kChunkSize = sizeof(Chunk) * 8; + static const int kDoubleChunkSize = sizeof(DoubleChunk) * 8; + // With bigit size of 28 we loose some bits, but a double still fits easily + // into two chunks, and more importantly we can use the Comba multiplication. + static const int kBigitSize = 28; + static const Chunk kBigitMask = (1 << kBigitSize) - 1; + // Every instance allocates kBigitLength chunks on the stack. Bignums cannot + // grow. There are no checks if the stack-allocated space is sufficient. + static const int kBigitCapacity = kMaxSignificantBits / kBigitSize; + + static void EnsureCapacity(const int size) { + if (size > kBigitCapacity) { + DOUBLE_CONVERSION_UNREACHABLE(); + } + } + void Align(const Bignum& other); + void Clamp(); + bool IsClamped() const { + return used_bigits_ == 0 || RawBigit(used_bigits_ - 1) != 0; + } + void Zero() { + used_bigits_ = 0; + exponent_ = 0; + } + // Requires this to have enough capacity (no tests done). + // Updates used_bigits_ if necessary. + // shift_amount must be < kBigitSize. + void BigitsShiftLeft(const int shift_amount); + // BigitLength includes the "hidden" bigits encoded in the exponent. + int BigitLength() const { return used_bigits_ + exponent_; } + Chunk& RawBigit(const int index); + const Chunk& RawBigit(const int index) const; + Chunk BigitOrZero(const int index) const; + void SubtractTimes(const Bignum& other, const int factor); + + // The Bignum's value is value(bigits_buffer_) * 2^(exponent_ * kBigitSize), + // where the value of the buffer consists of the lower kBigitSize bits of + // the first used_bigits_ Chunks in bigits_buffer_, first chunk has lowest + // significant bits. + int16_t used_bigits_; + int16_t exponent_; + Chunk bigits_buffer_[kBigitCapacity]; + + DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(Bignum); +}; + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_BIGNUM_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/cached-powers.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/cached-powers.h new file mode 100644 index 0000000000000000000000000000000000000000..68fd82d8059957a5af0099382b10e0ac8a9bac58 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/cached-powers.h @@ -0,0 +1,66 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_CACHED_POWERS_H_ +#define DOUBLE_CONVERSION_CACHED_POWERS_H_ + +#include "diy-fp.h" + +namespace arrow_vendored { +namespace double_conversion { + +namespace PowersOfTenCache { + + // Not all powers of ten are cached. The decimal exponent of two neighboring + // cached numbers will differ by kDecimalExponentDistance. + static const int kDecimalExponentDistance = 8; + + static const int kMinDecimalExponent = -348; + static const int kMaxDecimalExponent = 340; + + // Returns a cached power-of-ten with a binary exponent in the range + // [min_exponent; max_exponent] (boundaries included). + void GetCachedPowerForBinaryExponentRange(int min_exponent, + int max_exponent, + DiyFp* power, + int* decimal_exponent); + + // Returns a cached power of ten x ~= 10^k such that + // k <= decimal_exponent < k + kCachedPowersDecimalDistance. + // The given decimal_exponent must satisfy + // kMinDecimalExponent <= requested_exponent, and + // requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance. + void GetCachedPowerForDecimalExponent(int requested_exponent, + DiyFp* power, + int* found_exponent); + +} // namespace PowersOfTenCache + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_CACHED_POWERS_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/diy-fp.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/diy-fp.h new file mode 100644 index 0000000000000000000000000000000000000000..f3367b9392a32cd41d3204009120c7654be866de --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/diy-fp.h @@ -0,0 +1,139 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_DIY_FP_H_ +#define DOUBLE_CONVERSION_DIY_FP_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +// This "Do It Yourself Floating Point" class implements a floating-point number +// with a uint64 significand and an int exponent. Normalized DiyFp numbers will +// have the most significant bit of the significand set. +// Multiplication and Subtraction do not normalize their results. +// DiyFp store only non-negative numbers and are not designed to contain special +// doubles (NaN and Infinity). +class DiyFp { + public: + static const int kSignificandSize = 64; + + DiyFp() : f_(0), e_(0) {} + DiyFp(const uint64_t significand, const int32_t exponent) : f_(significand), e_(exponent) {} + + // this -= other. + // The exponents of both numbers must be the same and the significand of this + // must be greater or equal than the significand of other. + // The result will not be normalized. + void Subtract(const DiyFp& other) { + DOUBLE_CONVERSION_ASSERT(e_ == other.e_); + DOUBLE_CONVERSION_ASSERT(f_ >= other.f_); + f_ -= other.f_; + } + + // Returns a - b. + // The exponents of both numbers must be the same and a must be greater + // or equal than b. The result will not be normalized. + static DiyFp Minus(const DiyFp& a, const DiyFp& b) { + DiyFp result = a; + result.Subtract(b); + return result; + } + + // this *= other. + void Multiply(const DiyFp& other) { + // Simply "emulates" a 128 bit multiplication. + // However: the resulting number only contains 64 bits. The least + // significant 64 bits are only used for rounding the most significant 64 + // bits. + const uint64_t kM32 = 0xFFFFFFFFU; + const uint64_t a = f_ >> 32; + const uint64_t b = f_ & kM32; + const uint64_t c = other.f_ >> 32; + const uint64_t d = other.f_ & kM32; + const uint64_t ac = a * c; + const uint64_t bc = b * c; + const uint64_t ad = a * d; + const uint64_t bd = b * d; + // By adding 1U << 31 to tmp we round the final result. + // Halfway cases will be rounded up. + const uint64_t tmp = (bd >> 32) + (ad & kM32) + (bc & kM32) + (1U << 31); + e_ += other.e_ + 64; + f_ = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32); + } + + // returns a * b; + static DiyFp Times(const DiyFp& a, const DiyFp& b) { + DiyFp result = a; + result.Multiply(b); + return result; + } + + void Normalize() { + DOUBLE_CONVERSION_ASSERT(f_ != 0); + uint64_t significand = f_; + int32_t exponent = e_; + + // This method is mainly called for normalizing boundaries. In general, + // boundaries need to be shifted by 10 bits, and we optimize for this case. + const uint64_t k10MSBits = DOUBLE_CONVERSION_UINT64_2PART_C(0xFFC00000, 00000000); + while ((significand & k10MSBits) == 0) { + significand <<= 10; + exponent -= 10; + } + while ((significand & kUint64MSB) == 0) { + significand <<= 1; + exponent--; + } + f_ = significand; + e_ = exponent; + } + + static DiyFp Normalize(const DiyFp& a) { + DiyFp result = a; + result.Normalize(); + return result; + } + + uint64_t f() const { return f_; } + int32_t e() const { return e_; } + + void set_f(uint64_t new_value) { f_ = new_value; } + void set_e(int32_t new_value) { e_ = new_value; } + + private: + static const uint64_t kUint64MSB = DOUBLE_CONVERSION_UINT64_2PART_C(0x80000000, 00000000); + + uint64_t f_; + int32_t e_; +}; + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_DIY_FP_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/double-conversion.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/double-conversion.h new file mode 100644 index 0000000000000000000000000000000000000000..6e8884d84ca56dbfd05964e463dd7999364b3b35 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/double-conversion.h @@ -0,0 +1,34 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_ +#define DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_ + +#include "string-to-double.h" +#include "double-to-string.h" + +#endif // DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/double-to-string.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/double-to-string.h new file mode 100644 index 0000000000000000000000000000000000000000..90a88b902d6ea12d3adf917cdbf9e63b818d71ee --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/double-to-string.h @@ -0,0 +1,472 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_DOUBLE_TO_STRING_H_ +#define DOUBLE_CONVERSION_DOUBLE_TO_STRING_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +class DoubleToStringConverter { + public: + // When calling ToFixed with a double > 10^kMaxFixedDigitsBeforePoint + // or a requested_digits parameter > kMaxFixedDigitsAfterPoint then the + // function returns false. + static const int kMaxFixedDigitsBeforePoint = 60; + static const int kMaxFixedDigitsAfterPoint = 100; + + // When calling ToExponential with a requested_digits + // parameter > kMaxExponentialDigits then the function returns false. + static const int kMaxExponentialDigits = 120; + + // When calling ToPrecision with a requested_digits + // parameter < kMinPrecisionDigits or requested_digits > kMaxPrecisionDigits + // then the function returns false. + static const int kMinPrecisionDigits = 1; + static const int kMaxPrecisionDigits = 120; + + // The maximal number of digits that are needed to emit a double in base 10. + // A higher precision can be achieved by using more digits, but the shortest + // accurate representation of any double will never use more digits than + // kBase10MaximalLength. + // Note that DoubleToAscii null-terminates its input. So the given buffer + // should be at least kBase10MaximalLength + 1 characters long. + static const int kBase10MaximalLength = 17; + + // The maximal number of digits that are needed to emit a single in base 10. + // A higher precision can be achieved by using more digits, but the shortest + // accurate representation of any single will never use more digits than + // kBase10MaximalLengthSingle. + static const int kBase10MaximalLengthSingle = 9; + + // The length of the longest string that 'ToShortest' can produce when the + // converter is instantiated with EcmaScript defaults (see + // 'EcmaScriptConverter') + // This value does not include the trailing '\0' character. + // This amount of characters is needed for negative values that hit the + // 'decimal_in_shortest_low' limit. For example: "-0.0000033333333333333333" + static const int kMaxCharsEcmaScriptShortest = 25; + + enum Flags { + NO_FLAGS = 0, + EMIT_POSITIVE_EXPONENT_SIGN = 1, + EMIT_TRAILING_DECIMAL_POINT = 2, + EMIT_TRAILING_ZERO_AFTER_POINT = 4, + UNIQUE_ZERO = 8, + NO_TRAILING_ZERO = 16, + EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL = 32, + EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL = 64 + }; + + // Flags should be a bit-or combination of the possible Flags-enum. + // - NO_FLAGS: no special flags. + // - EMIT_POSITIVE_EXPONENT_SIGN: when the number is converted into exponent + // form, emits a '+' for positive exponents. Example: 1.2e+2. + // - EMIT_TRAILING_DECIMAL_POINT: when the input number is an integer and is + // converted into decimal format then a trailing decimal point is appended. + // Example: 2345.0 is converted to "2345.". + // - EMIT_TRAILING_ZERO_AFTER_POINT: in addition to a trailing decimal point + // emits a trailing '0'-character. This flag requires the + // EMIT_TRAILING_DECIMAL_POINT flag. + // Example: 2345.0 is converted to "2345.0". + // - UNIQUE_ZERO: "-0.0" is converted to "0.0". + // - NO_TRAILING_ZERO: Trailing zeros are removed from the fractional portion + // of the result in precision mode. Matches printf's %g. + // When EMIT_TRAILING_ZERO_AFTER_POINT is also given, one trailing zero is + // preserved. + // - EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL: when the input number has + // exactly one significant digit and is converted into exponent form then a + // trailing decimal point is appended to the significand in shortest mode + // or in precision mode with one requested digit. + // - EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL: in addition to a trailing + // decimal point emits a trailing '0'-character. This flag requires the + // EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL flag. + // + // Infinity symbol and nan_symbol provide the string representation for these + // special values. If the string is NULL and the special value is encountered + // then the conversion functions return false. + // + // The exponent_character is used in exponential representations. It is + // usually 'e' or 'E'. + // + // When converting to the shortest representation the converter will + // represent input numbers in decimal format if they are in the interval + // [10^decimal_in_shortest_low; 10^decimal_in_shortest_high[ + // (lower boundary included, greater boundary excluded). + // Example: with decimal_in_shortest_low = -6 and + // decimal_in_shortest_high = 21: + // ToShortest(0.000001) -> "0.000001" + // ToShortest(0.0000001) -> "1e-7" + // ToShortest(111111111111111111111.0) -> "111111111111111110000" + // ToShortest(100000000000000000000.0) -> "100000000000000000000" + // ToShortest(1111111111111111111111.0) -> "1.1111111111111111e+21" + // + // When converting to precision mode the converter may add + // max_leading_padding_zeroes before returning the number in exponential + // format. + // Example with max_leading_padding_zeroes_in_precision_mode = 6. + // ToPrecision(0.0000012345, 2) -> "0.0000012" + // ToPrecision(0.00000012345, 2) -> "1.2e-7" + // Similarly the converter may add up to + // max_trailing_padding_zeroes_in_precision_mode in precision mode to avoid + // returning an exponential representation. A zero added by the + // EMIT_TRAILING_ZERO_AFTER_POINT flag is counted for this limit. + // Examples for max_trailing_padding_zeroes_in_precision_mode = 1: + // ToPrecision(230.0, 2) -> "230" + // ToPrecision(230.0, 2) -> "230." with EMIT_TRAILING_DECIMAL_POINT. + // ToPrecision(230.0, 2) -> "2.3e2" with EMIT_TRAILING_ZERO_AFTER_POINT. + // + // When converting numbers with exactly one significant digit to exponent + // form in shortest mode or in precision mode with one requested digit, the + // EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT flags have + // no effect. Use the EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL flag to + // append a decimal point in this case and the + // EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL flag to also append a + // '0'-character in this case. + // Example with decimal_in_shortest_low = 0: + // ToShortest(0.0009) -> "9e-4" + // with EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL deactivated. + // ToShortest(0.0009) -> "9.e-4" + // with EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL activated. + // ToShortest(0.0009) -> "9.0e-4" + // with EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL activated and + // EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL activated. + // + // The min_exponent_width is used for exponential representations. + // The converter adds leading '0's to the exponent until the exponent + // is at least min_exponent_width digits long. + // The min_exponent_width is clamped to 5. + // As such, the exponent may never have more than 5 digits in total. + DoubleToStringConverter(int flags, + const char* infinity_symbol, + const char* nan_symbol, + char exponent_character, + int decimal_in_shortest_low, + int decimal_in_shortest_high, + int max_leading_padding_zeroes_in_precision_mode, + int max_trailing_padding_zeroes_in_precision_mode, + int min_exponent_width = 0) + : flags_(flags), + infinity_symbol_(infinity_symbol), + nan_symbol_(nan_symbol), + exponent_character_(exponent_character), + decimal_in_shortest_low_(decimal_in_shortest_low), + decimal_in_shortest_high_(decimal_in_shortest_high), + max_leading_padding_zeroes_in_precision_mode_( + max_leading_padding_zeroes_in_precision_mode), + max_trailing_padding_zeroes_in_precision_mode_( + max_trailing_padding_zeroes_in_precision_mode), + min_exponent_width_(min_exponent_width) { + // When 'trailing zero after the point' is set, then 'trailing point' + // must be set too. + DOUBLE_CONVERSION_ASSERT(((flags & EMIT_TRAILING_DECIMAL_POINT) != 0) || + !((flags & EMIT_TRAILING_ZERO_AFTER_POINT) != 0)); + } + + // Returns a converter following the EcmaScript specification. + // + // Flags: UNIQUE_ZERO and EMIT_POSITIVE_EXPONENT_SIGN. + // Special values: "Infinity" and "NaN". + // Lower case 'e' for exponential values. + // decimal_in_shortest_low: -6 + // decimal_in_shortest_high: 21 + // max_leading_padding_zeroes_in_precision_mode: 6 + // max_trailing_padding_zeroes_in_precision_mode: 0 + static const DoubleToStringConverter& EcmaScriptConverter(); + + // Computes the shortest string of digits that correctly represent the input + // number. Depending on decimal_in_shortest_low and decimal_in_shortest_high + // (see constructor) it then either returns a decimal representation, or an + // exponential representation. + // Example with decimal_in_shortest_low = -6, + // decimal_in_shortest_high = 21, + // EMIT_POSITIVE_EXPONENT_SIGN activated, and + // EMIT_TRAILING_DECIMAL_POINT deactivated: + // ToShortest(0.000001) -> "0.000001" + // ToShortest(0.0000001) -> "1e-7" + // ToShortest(111111111111111111111.0) -> "111111111111111110000" + // ToShortest(100000000000000000000.0) -> "100000000000000000000" + // ToShortest(1111111111111111111111.0) -> "1.1111111111111111e+21" + // + // Note: the conversion may round the output if the returned string + // is accurate enough to uniquely identify the input-number. + // For example the most precise representation of the double 9e59 equals + // "899999999999999918767229449717619953810131273674690656206848", but + // the converter will return the shorter (but still correct) "9e59". + // + // Returns true if the conversion succeeds. The conversion always succeeds + // except when the input value is special and no infinity_symbol or + // nan_symbol has been given to the constructor. + // + // The length of the longest result is the maximum of the length of the + // following string representations (each with possible examples): + // - NaN and negative infinity: "NaN", "-Infinity", "-inf". + // - -10^(decimal_in_shortest_high - 1): + // "-100000000000000000000", "-1000000000000000.0" + // - the longest string in range [0; -10^decimal_in_shortest_low]. Generally, + // this string is 3 + kBase10MaximalLength - decimal_in_shortest_low. + // (Sign, '0', decimal point, padding zeroes for decimal_in_shortest_low, + // and the significant digits). + // "-0.0000033333333333333333", "-0.0012345678901234567" + // - the longest exponential representation. (A negative number with + // kBase10MaximalLength significant digits). + // "-1.7976931348623157e+308", "-1.7976931348623157E308" + // In addition, the buffer must be able to hold the trailing '\0' character. + bool ToShortest(double value, StringBuilder* result_builder) const { + return ToShortestIeeeNumber(value, result_builder, SHORTEST); + } + + // Same as ToShortest, but for single-precision floats. + bool ToShortestSingle(float value, StringBuilder* result_builder) const { + return ToShortestIeeeNumber(value, result_builder, SHORTEST_SINGLE); + } + + + // Computes a decimal representation with a fixed number of digits after the + // decimal point. The last emitted digit is rounded. + // + // Examples: + // ToFixed(3.12, 1) -> "3.1" + // ToFixed(3.1415, 3) -> "3.142" + // ToFixed(1234.56789, 4) -> "1234.5679" + // ToFixed(1.23, 5) -> "1.23000" + // ToFixed(0.1, 4) -> "0.1000" + // ToFixed(1e30, 2) -> "1000000000000000019884624838656.00" + // ToFixed(0.1, 30) -> "0.100000000000000005551115123126" + // ToFixed(0.1, 17) -> "0.10000000000000001" + // + // If requested_digits equals 0, then the tail of the result depends on + // the EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT. + // Examples, for requested_digits == 0, + // let EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT be + // - false and false: then 123.45 -> 123 + // 0.678 -> 1 + // - true and false: then 123.45 -> 123. + // 0.678 -> 1. + // - true and true: then 123.45 -> 123.0 + // 0.678 -> 1.0 + // + // Returns true if the conversion succeeds. The conversion always succeeds + // except for the following cases: + // - the input value is special and no infinity_symbol or nan_symbol has + // been provided to the constructor, + // - 'value' > 10^kMaxFixedDigitsBeforePoint, or + // - 'requested_digits' > kMaxFixedDigitsAfterPoint. + // The last two conditions imply that the result for non-special values never + // contains more than + // 1 + kMaxFixedDigitsBeforePoint + 1 + kMaxFixedDigitsAfterPoint characters + // (one additional character for the sign, and one for the decimal point). + // In addition, the buffer must be able to hold the trailing '\0' character. + bool ToFixed(double value, + int requested_digits, + StringBuilder* result_builder) const; + + // Computes a representation in exponential format with requested_digits + // after the decimal point. The last emitted digit is rounded. + // If requested_digits equals -1, then the shortest exponential representation + // is computed. + // + // Examples with EMIT_POSITIVE_EXPONENT_SIGN deactivated, and + // exponent_character set to 'e'. + // ToExponential(3.12, 1) -> "3.1e0" + // ToExponential(5.0, 3) -> "5.000e0" + // ToExponential(0.001, 2) -> "1.00e-3" + // ToExponential(3.1415, -1) -> "3.1415e0" + // ToExponential(3.1415, 4) -> "3.1415e0" + // ToExponential(3.1415, 3) -> "3.142e0" + // ToExponential(123456789000000, 3) -> "1.235e14" + // ToExponential(1000000000000000019884624838656.0, -1) -> "1e30" + // ToExponential(1000000000000000019884624838656.0, 32) -> + // "1.00000000000000001988462483865600e30" + // ToExponential(1234, 0) -> "1e3" + // + // Returns true if the conversion succeeds. The conversion always succeeds + // except for the following cases: + // - the input value is special and no infinity_symbol or nan_symbol has + // been provided to the constructor, + // - 'requested_digits' > kMaxExponentialDigits. + // + // The last condition implies that the result never contains more than + // kMaxExponentialDigits + 8 characters (the sign, the digit before the + // decimal point, the decimal point, the exponent character, the + // exponent's sign, and at most 3 exponent digits). + // In addition, the buffer must be able to hold the trailing '\0' character. + bool ToExponential(double value, + int requested_digits, + StringBuilder* result_builder) const; + + + // Computes 'precision' leading digits of the given 'value' and returns them + // either in exponential or decimal format, depending on + // max_{leading|trailing}_padding_zeroes_in_precision_mode (given to the + // constructor). + // The last computed digit is rounded. + // + // Example with max_leading_padding_zeroes_in_precision_mode = 6. + // ToPrecision(0.0000012345, 2) -> "0.0000012" + // ToPrecision(0.00000012345, 2) -> "1.2e-7" + // Similarly the converter may add up to + // max_trailing_padding_zeroes_in_precision_mode in precision mode to avoid + // returning an exponential representation. A zero added by the + // EMIT_TRAILING_ZERO_AFTER_POINT flag is counted for this limit. + // Examples for max_trailing_padding_zeroes_in_precision_mode = 1: + // ToPrecision(230.0, 2) -> "230" + // ToPrecision(230.0, 2) -> "230." with EMIT_TRAILING_DECIMAL_POINT. + // ToPrecision(230.0, 2) -> "2.3e2" with EMIT_TRAILING_ZERO_AFTER_POINT. + // Examples for max_trailing_padding_zeroes_in_precision_mode = 3, and no + // EMIT_TRAILING_ZERO_AFTER_POINT: + // ToPrecision(123450.0, 6) -> "123450" + // ToPrecision(123450.0, 5) -> "123450" + // ToPrecision(123450.0, 4) -> "123500" + // ToPrecision(123450.0, 3) -> "123000" + // ToPrecision(123450.0, 2) -> "1.2e5" + // + // Returns true if the conversion succeeds. The conversion always succeeds + // except for the following cases: + // - the input value is special and no infinity_symbol or nan_symbol has + // been provided to the constructor, + // - precision < kMinPericisionDigits + // - precision > kMaxPrecisionDigits + // + // The last condition implies that the result never contains more than + // kMaxPrecisionDigits + 7 characters (the sign, the decimal point, the + // exponent character, the exponent's sign, and at most 3 exponent digits). + // In addition, the buffer must be able to hold the trailing '\0' character. + bool ToPrecision(double value, + int precision, + StringBuilder* result_builder) const; + + enum DtoaMode { + // Produce the shortest correct representation. + // For example the output of 0.299999999999999988897 is (the less accurate + // but correct) 0.3. + SHORTEST, + // Same as SHORTEST, but for single-precision floats. + SHORTEST_SINGLE, + // Produce a fixed number of digits after the decimal point. + // For instance fixed(0.1, 4) becomes 0.1000 + // If the input number is big, the output will be big. + FIXED, + // Fixed number of digits (independent of the decimal point). + PRECISION + }; + + // Converts the given double 'v' to digit characters. 'v' must not be NaN, + // +Infinity, or -Infinity. In SHORTEST_SINGLE-mode this restriction also + // applies to 'v' after it has been casted to a single-precision float. That + // is, in this mode static_cast(v) must not be NaN, +Infinity or + // -Infinity. + // + // The result should be interpreted as buffer * 10^(point-length). + // + // The digits are written to the buffer in the platform's charset, which is + // often UTF-8 (with ASCII-range digits) but may be another charset, such + // as EBCDIC. + // + // The output depends on the given mode: + // - SHORTEST: produce the least amount of digits for which the internal + // identity requirement is still satisfied. If the digits are printed + // (together with the correct exponent) then reading this number will give + // 'v' again. The buffer will choose the representation that is closest to + // 'v'. If there are two at the same distance, than the one farther away + // from 0 is chosen (halfway cases - ending with 5 - are rounded up). + // In this mode the 'requested_digits' parameter is ignored. + // - SHORTEST_SINGLE: same as SHORTEST but with single-precision. + // - FIXED: produces digits necessary to print a given number with + // 'requested_digits' digits after the decimal point. The produced digits + // might be too short in which case the caller has to fill the remainder + // with '0's. + // Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2. + // Halfway cases are rounded towards +/-Infinity (away from 0). The call + // toFixed(0.15, 2) thus returns buffer="2", point=0. + // The returned buffer may contain digits that would be truncated from the + // shortest representation of the input. + // - PRECISION: produces 'requested_digits' where the first digit is not '0'. + // Even though the length of produced digits usually equals + // 'requested_digits', the function is allowed to return fewer digits, in + // which case the caller has to fill the missing digits with '0's. + // Halfway cases are again rounded away from 0. + // DoubleToAscii expects the given buffer to be big enough to hold all + // digits and a terminating null-character. In SHORTEST-mode it expects a + // buffer of at least kBase10MaximalLength + 1. In all other modes the + // requested_digits parameter and the padding-zeroes limit the size of the + // output. Don't forget the decimal point, the exponent character and the + // terminating null-character when computing the maximal output size. + // The given length is only used in debug mode to ensure the buffer is big + // enough. + static void DoubleToAscii(double v, + DtoaMode mode, + int requested_digits, + char* buffer, + int buffer_length, + bool* sign, + int* length, + int* point); + + private: + // Implementation for ToShortest and ToShortestSingle. + bool ToShortestIeeeNumber(double value, + StringBuilder* result_builder, + DtoaMode mode) const; + + // If the value is a special value (NaN or Infinity) constructs the + // corresponding string using the configured infinity/nan-symbol. + // If either of them is NULL or the value is not special then the + // function returns false. + bool HandleSpecialValues(double value, StringBuilder* result_builder) const; + // Constructs an exponential representation (i.e. 1.234e56). + // The given exponent assumes a decimal point after the first decimal digit. + void CreateExponentialRepresentation(const char* decimal_digits, + int length, + int exponent, + StringBuilder* result_builder) const; + // Creates a decimal representation (i.e 1234.5678). + void CreateDecimalRepresentation(const char* decimal_digits, + int length, + int decimal_point, + int digits_after_point, + StringBuilder* result_builder) const; + + const int flags_; + const char* const infinity_symbol_; + const char* const nan_symbol_; + const char exponent_character_; + const int decimal_in_shortest_low_; + const int decimal_in_shortest_high_; + const int max_leading_padding_zeroes_in_precision_mode_; + const int max_trailing_padding_zeroes_in_precision_mode_; + const int min_exponent_width_; + + DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS(DoubleToStringConverter); +}; + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_DOUBLE_TO_STRING_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/ieee.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/ieee.h new file mode 100644 index 0000000000000000000000000000000000000000..4cedc0bee04e6470ce02d27b9390068e0ec0fcc1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/ieee.h @@ -0,0 +1,449 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_DOUBLE_H_ +#define DOUBLE_CONVERSION_DOUBLE_H_ + +#include "diy-fp.h" + +namespace arrow_vendored { +namespace double_conversion { + +// We assume that doubles and uint64_t have the same endianness. +static uint64_t double_to_uint64(double d) { return BitCast(d); } +static double uint64_to_double(uint64_t d64) { return BitCast(d64); } +static uint32_t float_to_uint32(float f) { return BitCast(f); } +static float uint32_to_float(uint32_t d32) { return BitCast(d32); } + +// Helper functions for doubles. +class Double { + public: + static const uint64_t kSignMask = DOUBLE_CONVERSION_UINT64_2PART_C(0x80000000, 00000000); + static const uint64_t kExponentMask = DOUBLE_CONVERSION_UINT64_2PART_C(0x7FF00000, 00000000); + static const uint64_t kSignificandMask = DOUBLE_CONVERSION_UINT64_2PART_C(0x000FFFFF, FFFFFFFF); + static const uint64_t kHiddenBit = DOUBLE_CONVERSION_UINT64_2PART_C(0x00100000, 00000000); + static const uint64_t kQuietNanBit = DOUBLE_CONVERSION_UINT64_2PART_C(0x00080000, 00000000); + static const int kPhysicalSignificandSize = 52; // Excludes the hidden bit. + static const int kSignificandSize = 53; + static const int kExponentBias = 0x3FF + kPhysicalSignificandSize; + static const int kMaxExponent = 0x7FF - kExponentBias; + + Double() : d64_(0) {} + explicit Double(double d) : d64_(double_to_uint64(d)) {} + explicit Double(uint64_t d64) : d64_(d64) {} + explicit Double(DiyFp diy_fp) + : d64_(DiyFpToUint64(diy_fp)) {} + + // The value encoded by this Double must be greater or equal to +0.0. + // It must not be special (infinity, or NaN). + DiyFp AsDiyFp() const { + DOUBLE_CONVERSION_ASSERT(Sign() > 0); + DOUBLE_CONVERSION_ASSERT(!IsSpecial()); + return DiyFp(Significand(), Exponent()); + } + + // The value encoded by this Double must be strictly greater than 0. + DiyFp AsNormalizedDiyFp() const { + DOUBLE_CONVERSION_ASSERT(value() > 0.0); + uint64_t f = Significand(); + int e = Exponent(); + + // The current double could be a denormal. + while ((f & kHiddenBit) == 0) { + f <<= 1; + e--; + } + // Do the final shifts in one go. + f <<= DiyFp::kSignificandSize - kSignificandSize; + e -= DiyFp::kSignificandSize - kSignificandSize; + return DiyFp(f, e); + } + + // Returns the double's bit as uint64. + uint64_t AsUint64() const { + return d64_; + } + + // Returns the next greater double. Returns +infinity on input +infinity. + double NextDouble() const { + if (d64_ == kInfinity) return Double(kInfinity).value(); + if (Sign() < 0 && Significand() == 0) { + // -0.0 + return 0.0; + } + if (Sign() < 0) { + return Double(d64_ - 1).value(); + } else { + return Double(d64_ + 1).value(); + } + } + + double PreviousDouble() const { + if (d64_ == (kInfinity | kSignMask)) return -Infinity(); + if (Sign() < 0) { + return Double(d64_ + 1).value(); + } else { + if (Significand() == 0) return -0.0; + return Double(d64_ - 1).value(); + } + } + + int Exponent() const { + if (IsDenormal()) return kDenormalExponent; + + uint64_t d64 = AsUint64(); + int biased_e = + static_cast((d64 & kExponentMask) >> kPhysicalSignificandSize); + return biased_e - kExponentBias; + } + + uint64_t Significand() const { + uint64_t d64 = AsUint64(); + uint64_t significand = d64 & kSignificandMask; + if (!IsDenormal()) { + return significand + kHiddenBit; + } else { + return significand; + } + } + + // Returns true if the double is a denormal. + bool IsDenormal() const { + uint64_t d64 = AsUint64(); + return (d64 & kExponentMask) == 0; + } + + // We consider denormals not to be special. + // Hence only Infinity and NaN are special. + bool IsSpecial() const { + uint64_t d64 = AsUint64(); + return (d64 & kExponentMask) == kExponentMask; + } + + bool IsNan() const { + uint64_t d64 = AsUint64(); + return ((d64 & kExponentMask) == kExponentMask) && + ((d64 & kSignificandMask) != 0); + } + + bool IsQuietNan() const { +#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__) + return IsNan() && ((AsUint64() & kQuietNanBit) == 0); +#else + return IsNan() && ((AsUint64() & kQuietNanBit) != 0); +#endif + } + + bool IsSignalingNan() const { +#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__) + return IsNan() && ((AsUint64() & kQuietNanBit) != 0); +#else + return IsNan() && ((AsUint64() & kQuietNanBit) == 0); +#endif + } + + + bool IsInfinite() const { + uint64_t d64 = AsUint64(); + return ((d64 & kExponentMask) == kExponentMask) && + ((d64 & kSignificandMask) == 0); + } + + int Sign() const { + uint64_t d64 = AsUint64(); + return (d64 & kSignMask) == 0? 1: -1; + } + + // Precondition: the value encoded by this Double must be greater or equal + // than +0.0. + DiyFp UpperBoundary() const { + DOUBLE_CONVERSION_ASSERT(Sign() > 0); + return DiyFp(Significand() * 2 + 1, Exponent() - 1); + } + + // Computes the two boundaries of this. + // The bigger boundary (m_plus) is normalized. The lower boundary has the same + // exponent as m_plus. + // Precondition: the value encoded by this Double must be greater than 0. + void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const { + DOUBLE_CONVERSION_ASSERT(value() > 0.0); + DiyFp v = this->AsDiyFp(); + DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1)); + DiyFp m_minus; + if (LowerBoundaryIsCloser()) { + m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2); + } else { + m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1); + } + m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e())); + m_minus.set_e(m_plus.e()); + *out_m_plus = m_plus; + *out_m_minus = m_minus; + } + + bool LowerBoundaryIsCloser() const { + // The boundary is closer if the significand is of the form f == 2^p-1 then + // the lower boundary is closer. + // Think of v = 1000e10 and v- = 9999e9. + // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but + // at a distance of 1e8. + // The only exception is for the smallest normal: the largest denormal is + // at the same distance as its successor. + // Note: denormals have the same exponent as the smallest normals. + bool physical_significand_is_zero = ((AsUint64() & kSignificandMask) == 0); + return physical_significand_is_zero && (Exponent() != kDenormalExponent); + } + + double value() const { return uint64_to_double(d64_); } + + // Returns the significand size for a given order of magnitude. + // If v = f*2^e with 2^p-1 <= f <= 2^p then p+e is v's order of magnitude. + // This function returns the number of significant binary digits v will have + // once it's encoded into a double. In almost all cases this is equal to + // kSignificandSize. The only exceptions are denormals. They start with + // leading zeroes and their effective significand-size is hence smaller. + static int SignificandSizeForOrderOfMagnitude(int order) { + if (order >= (kDenormalExponent + kSignificandSize)) { + return kSignificandSize; + } + if (order <= kDenormalExponent) return 0; + return order - kDenormalExponent; + } + + static double Infinity() { + return Double(kInfinity).value(); + } + + static double NaN() { + return Double(kNaN).value(); + } + + private: + static const int kDenormalExponent = -kExponentBias + 1; + static const uint64_t kInfinity = DOUBLE_CONVERSION_UINT64_2PART_C(0x7FF00000, 00000000); +#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__) + static const uint64_t kNaN = DOUBLE_CONVERSION_UINT64_2PART_C(0x7FF7FFFF, FFFFFFFF); +#else + static const uint64_t kNaN = DOUBLE_CONVERSION_UINT64_2PART_C(0x7FF80000, 00000000); +#endif + + + const uint64_t d64_; + + static uint64_t DiyFpToUint64(DiyFp diy_fp) { + uint64_t significand = diy_fp.f(); + int exponent = diy_fp.e(); + while (significand > kHiddenBit + kSignificandMask) { + significand >>= 1; + exponent++; + } + if (exponent >= kMaxExponent) { + return kInfinity; + } + if (exponent < kDenormalExponent) { + return 0; + } + while (exponent > kDenormalExponent && (significand & kHiddenBit) == 0) { + significand <<= 1; + exponent--; + } + uint64_t biased_exponent; + if (exponent == kDenormalExponent && (significand & kHiddenBit) == 0) { + biased_exponent = 0; + } else { + biased_exponent = static_cast(exponent + kExponentBias); + } + return (significand & kSignificandMask) | + (biased_exponent << kPhysicalSignificandSize); + } + + DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(Double); +}; + +class Single { + public: + static const uint32_t kSignMask = 0x80000000; + static const uint32_t kExponentMask = 0x7F800000; + static const uint32_t kSignificandMask = 0x007FFFFF; + static const uint32_t kHiddenBit = 0x00800000; + static const uint32_t kQuietNanBit = 0x00400000; + static const int kPhysicalSignificandSize = 23; // Excludes the hidden bit. + static const int kSignificandSize = 24; + + Single() : d32_(0) {} + explicit Single(float f) : d32_(float_to_uint32(f)) {} + explicit Single(uint32_t d32) : d32_(d32) {} + + // The value encoded by this Single must be greater or equal to +0.0. + // It must not be special (infinity, or NaN). + DiyFp AsDiyFp() const { + DOUBLE_CONVERSION_ASSERT(Sign() > 0); + DOUBLE_CONVERSION_ASSERT(!IsSpecial()); + return DiyFp(Significand(), Exponent()); + } + + // Returns the single's bit as uint64. + uint32_t AsUint32() const { + return d32_; + } + + int Exponent() const { + if (IsDenormal()) return kDenormalExponent; + + uint32_t d32 = AsUint32(); + int biased_e = + static_cast((d32 & kExponentMask) >> kPhysicalSignificandSize); + return biased_e - kExponentBias; + } + + uint32_t Significand() const { + uint32_t d32 = AsUint32(); + uint32_t significand = d32 & kSignificandMask; + if (!IsDenormal()) { + return significand + kHiddenBit; + } else { + return significand; + } + } + + // Returns true if the single is a denormal. + bool IsDenormal() const { + uint32_t d32 = AsUint32(); + return (d32 & kExponentMask) == 0; + } + + // We consider denormals not to be special. + // Hence only Infinity and NaN are special. + bool IsSpecial() const { + uint32_t d32 = AsUint32(); + return (d32 & kExponentMask) == kExponentMask; + } + + bool IsNan() const { + uint32_t d32 = AsUint32(); + return ((d32 & kExponentMask) == kExponentMask) && + ((d32 & kSignificandMask) != 0); + } + + bool IsQuietNan() const { +#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__) + return IsNan() && ((AsUint32() & kQuietNanBit) == 0); +#else + return IsNan() && ((AsUint32() & kQuietNanBit) != 0); +#endif + } + + bool IsSignalingNan() const { +#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__) + return IsNan() && ((AsUint32() & kQuietNanBit) != 0); +#else + return IsNan() && ((AsUint32() & kQuietNanBit) == 0); +#endif + } + + + bool IsInfinite() const { + uint32_t d32 = AsUint32(); + return ((d32 & kExponentMask) == kExponentMask) && + ((d32 & kSignificandMask) == 0); + } + + int Sign() const { + uint32_t d32 = AsUint32(); + return (d32 & kSignMask) == 0? 1: -1; + } + + // Computes the two boundaries of this. + // The bigger boundary (m_plus) is normalized. The lower boundary has the same + // exponent as m_plus. + // Precondition: the value encoded by this Single must be greater than 0. + void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const { + DOUBLE_CONVERSION_ASSERT(value() > 0.0); + DiyFp v = this->AsDiyFp(); + DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1)); + DiyFp m_minus; + if (LowerBoundaryIsCloser()) { + m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2); + } else { + m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1); + } + m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e())); + m_minus.set_e(m_plus.e()); + *out_m_plus = m_plus; + *out_m_minus = m_minus; + } + + // Precondition: the value encoded by this Single must be greater or equal + // than +0.0. + DiyFp UpperBoundary() const { + DOUBLE_CONVERSION_ASSERT(Sign() > 0); + return DiyFp(Significand() * 2 + 1, Exponent() - 1); + } + + bool LowerBoundaryIsCloser() const { + // The boundary is closer if the significand is of the form f == 2^p-1 then + // the lower boundary is closer. + // Think of v = 1000e10 and v- = 9999e9. + // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but + // at a distance of 1e8. + // The only exception is for the smallest normal: the largest denormal is + // at the same distance as its successor. + // Note: denormals have the same exponent as the smallest normals. + bool physical_significand_is_zero = ((AsUint32() & kSignificandMask) == 0); + return physical_significand_is_zero && (Exponent() != kDenormalExponent); + } + + float value() const { return uint32_to_float(d32_); } + + static float Infinity() { + return Single(kInfinity).value(); + } + + static float NaN() { + return Single(kNaN).value(); + } + + private: + static const int kExponentBias = 0x7F + kPhysicalSignificandSize; + static const int kDenormalExponent = -kExponentBias + 1; + static const int kMaxExponent = 0xFF - kExponentBias; + static const uint32_t kInfinity = 0x7F800000; +#if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__) + static const uint32_t kNaN = 0x7FBFFFFF; +#else + static const uint32_t kNaN = 0x7FC00000; +#endif + + const uint32_t d32_; + + DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(Single); +}; + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_DOUBLE_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/string-to-double.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/string-to-double.h new file mode 100644 index 0000000000000000000000000000000000000000..83eb6fec5f44400cf9a81d45862c4bfd71ac52fa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/string-to-double.h @@ -0,0 +1,240 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_STRING_TO_DOUBLE_H_ +#define DOUBLE_CONVERSION_STRING_TO_DOUBLE_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +class StringToDoubleConverter { + public: + // Enumeration for allowing octals and ignoring junk when converting + // strings to numbers. + enum Flags { + NO_FLAGS = 0, + ALLOW_HEX = 1, + ALLOW_OCTALS = 2, + ALLOW_TRAILING_JUNK = 4, + ALLOW_LEADING_SPACES = 8, + ALLOW_TRAILING_SPACES = 16, + ALLOW_SPACES_AFTER_SIGN = 32, + ALLOW_CASE_INSENSITIVITY = 64, + ALLOW_CASE_INSENSIBILITY = 64, // Deprecated + ALLOW_HEX_FLOATS = 128, + }; + + static const uc16 kNoSeparator = '\0'; + + // Flags should be a bit-or combination of the possible Flags-enum. + // - NO_FLAGS: no special flags. + // - ALLOW_HEX: recognizes the prefix "0x". Hex numbers may only be integers. + // Ex: StringToDouble("0x1234") -> 4660.0 + // In StringToDouble("0x1234.56") the characters ".56" are trailing + // junk. The result of the call is hence dependent on + // the ALLOW_TRAILING_JUNK flag and/or the junk value. + // With this flag "0x" is a junk-string. Even with ALLOW_TRAILING_JUNK, + // the string will not be parsed as "0" followed by junk. + // + // - ALLOW_OCTALS: recognizes the prefix "0" for octals: + // If a sequence of octal digits starts with '0', then the number is + // read as octal integer. Octal numbers may only be integers. + // Ex: StringToDouble("01234") -> 668.0 + // StringToDouble("012349") -> 12349.0 // Not a sequence of octal + // // digits. + // In StringToDouble("01234.56") the characters ".56" are trailing + // junk. The result of the call is hence dependent on + // the ALLOW_TRAILING_JUNK flag and/or the junk value. + // In StringToDouble("01234e56") the characters "e56" are trailing + // junk, too. + // - ALLOW_TRAILING_JUNK: ignore trailing characters that are not part of + // a double literal. + // - ALLOW_LEADING_SPACES: skip over leading whitespace, including spaces, + // new-lines, and tabs. + // - ALLOW_TRAILING_SPACES: ignore trailing whitespace. + // - ALLOW_SPACES_AFTER_SIGN: ignore whitespace after the sign. + // Ex: StringToDouble("- 123.2") -> -123.2. + // StringToDouble("+ 123.2") -> 123.2 + // - ALLOW_CASE_INSENSITIVITY: ignore case of characters for special values: + // infinity and nan. + // - ALLOW_HEX_FLOATS: allows hexadecimal float literals. + // This *must* start with "0x" and separate the exponent with "p". + // Examples: 0x1.2p3 == 9.0 + // 0x10.1p0 == 16.0625 + // ALLOW_HEX and ALLOW_HEX_FLOATS are indented. + // + // empty_string_value is returned when an empty string is given as input. + // If ALLOW_LEADING_SPACES or ALLOW_TRAILING_SPACES are set, then a string + // containing only spaces is converted to the 'empty_string_value', too. + // + // junk_string_value is returned when + // a) ALLOW_TRAILING_JUNK is not set, and a junk character (a character not + // part of a double-literal) is found. + // b) ALLOW_TRAILING_JUNK is set, but the string does not start with a + // double literal. + // + // infinity_symbol and nan_symbol are strings that are used to detect + // inputs that represent infinity and NaN. They can be null, in which case + // they are ignored. + // The conversion routine first reads any possible signs. Then it compares the + // following character of the input-string with the first character of + // the infinity, and nan-symbol. If either matches, the function assumes, that + // a match has been found, and expects the following input characters to match + // the remaining characters of the special-value symbol. + // This means that the following restrictions apply to special-value symbols: + // - they must not start with signs ('+', or '-'), + // - they must not have the same first character. + // - they must not start with digits. + // + // If the separator character is not kNoSeparator, then that specific + // character is ignored when in between two valid digits of the significant. + // It is not allowed to appear in the exponent. + // It is not allowed to lead or trail the number. + // It is not allowed to appear twice next to each other. + // + // Examples: + // flags = ALLOW_HEX | ALLOW_TRAILING_JUNK, + // empty_string_value = 0.0, + // junk_string_value = NaN, + // infinity_symbol = "infinity", + // nan_symbol = "nan": + // StringToDouble("0x1234") -> 4660.0. + // StringToDouble("0x1234K") -> 4660.0. + // StringToDouble("") -> 0.0 // empty_string_value. + // StringToDouble(" ") -> NaN // junk_string_value. + // StringToDouble(" 1") -> NaN // junk_string_value. + // StringToDouble("0x") -> NaN // junk_string_value. + // StringToDouble("-123.45") -> -123.45. + // StringToDouble("--123.45") -> NaN // junk_string_value. + // StringToDouble("123e45") -> 123e45. + // StringToDouble("123E45") -> 123e45. + // StringToDouble("123e+45") -> 123e45. + // StringToDouble("123E-45") -> 123e-45. + // StringToDouble("123e") -> 123.0 // trailing junk ignored. + // StringToDouble("123e-") -> 123.0 // trailing junk ignored. + // StringToDouble("+NaN") -> NaN // NaN string literal. + // StringToDouble("-infinity") -> -inf. // infinity literal. + // StringToDouble("Infinity") -> NaN // junk_string_value. + // + // flags = ALLOW_OCTAL | ALLOW_LEADING_SPACES, + // empty_string_value = 0.0, + // junk_string_value = NaN, + // infinity_symbol = NULL, + // nan_symbol = NULL: + // StringToDouble("0x1234") -> NaN // junk_string_value. + // StringToDouble("01234") -> 668.0. + // StringToDouble("") -> 0.0 // empty_string_value. + // StringToDouble(" ") -> 0.0 // empty_string_value. + // StringToDouble(" 1") -> 1.0 + // StringToDouble("0x") -> NaN // junk_string_value. + // StringToDouble("0123e45") -> NaN // junk_string_value. + // StringToDouble("01239E45") -> 1239e45. + // StringToDouble("-infinity") -> NaN // junk_string_value. + // StringToDouble("NaN") -> NaN // junk_string_value. + // + // flags = NO_FLAGS, + // separator = ' ': + // StringToDouble("1 2 3 4") -> 1234.0 + // StringToDouble("1 2") -> NaN // junk_string_value + // StringToDouble("1 000 000.0") -> 1000000.0 + // StringToDouble("1.000 000") -> 1.0 + // StringToDouble("1.0e1 000") -> NaN // junk_string_value + StringToDoubleConverter(int flags, + double empty_string_value, + double junk_string_value, + const char* infinity_symbol, + const char* nan_symbol, + uc16 separator = kNoSeparator) + : flags_(flags), + empty_string_value_(empty_string_value), + junk_string_value_(junk_string_value), + infinity_symbol_(infinity_symbol), + nan_symbol_(nan_symbol), + separator_(separator) { + } + + // Performs the conversion. + // The output parameter 'processed_characters_count' is set to the number + // of characters that have been processed to read the number. + // Spaces than are processed with ALLOW_{LEADING|TRAILING}_SPACES are included + // in the 'processed_characters_count'. Trailing junk is never included. + double StringToDouble(const char* buffer, + int length, + int* processed_characters_count) const; + + // Same as StringToDouble above but for 16 bit characters. + double StringToDouble(const uc16* buffer, + int length, + int* processed_characters_count) const; + + // Same as StringToDouble but reads a float. + // Note that this is not equivalent to static_cast(StringToDouble(...)) + // due to potential double-rounding. + float StringToFloat(const char* buffer, + int length, + int* processed_characters_count) const; + + // Same as StringToFloat above but for 16 bit characters. + float StringToFloat(const uc16* buffer, + int length, + int* processed_characters_count) const; + + // Same as StringToDouble for T = double, and StringToFloat for T = float. + template + T StringTo(const char* buffer, + int length, + int* processed_characters_count) const; + + // Same as StringTo above but for 16 bit characters. + template + T StringTo(const uc16* buffer, + int length, + int* processed_characters_count) const; + + private: + const int flags_; + const double empty_string_value_; + const double junk_string_value_; + const char* const infinity_symbol_; + const char* const nan_symbol_; + const uc16 separator_; + + template + double StringToIeee(Iterator start_pointer, + int length, + bool read_as_double, + int* processed_characters_count) const; + + DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS(StringToDoubleConverter); +}; + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_STRING_TO_DOUBLE_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/strtod.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/strtod.h new file mode 100644 index 0000000000000000000000000000000000000000..619db5838d2f75b6ea7b18ac3df458a029b70294 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/strtod.h @@ -0,0 +1,66 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_STRTOD_H_ +#define DOUBLE_CONVERSION_STRTOD_H_ + +#include "utils.h" + +namespace arrow_vendored { +namespace double_conversion { + +// The buffer must only contain digits in the range [0-9]. It must not +// contain a dot or a sign. It must not start with '0', and must not be empty. +double Strtod(Vector buffer, int exponent); + +// The buffer must only contain digits in the range [0-9]. It must not +// contain a dot or a sign. It must not start with '0', and must not be empty. +float Strtof(Vector buffer, int exponent); + +// Same as Strtod, but assumes that 'trimmed' is already trimmed, as if run +// through TrimAndCut. That is, 'trimmed' must have no leading or trailing +// zeros, must not be a lone zero, and must not have 'too many' digits. +double StrtodTrimmed(Vector trimmed, int exponent); + +// Same as Strtof, but assumes that 'trimmed' is already trimmed, as if run +// through TrimAndCut. That is, 'trimmed' must have no leading or trailing +// zeros, must not be a lone zero, and must not have 'too many' digits. +float StrtofTrimmed(Vector trimmed, int exponent); + +inline Vector TrimTrailingZeros(Vector buffer) { + for (int i = buffer.length() - 1; i >= 0; --i) { + if (buffer[i] != '0') { + return buffer.SubVector(0, i + 1); + } + } + return Vector(buffer.start(), 0); +} + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_STRTOD_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/utils.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..332619a31270d709fda1c4f85248ae71546debde --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/utils.h @@ -0,0 +1,420 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef DOUBLE_CONVERSION_UTILS_H_ +#define DOUBLE_CONVERSION_UTILS_H_ + +// Use DOUBLE_CONVERSION_NON_PREFIXED_MACROS to get unprefixed macros as was +// the case in double-conversion releases prior to 3.1.6 + +#include +#include + +// For pre-C++11 compatibility +#if __cplusplus >= 201103L +#define DOUBLE_CONVERSION_NULLPTR nullptr +#else +#define DOUBLE_CONVERSION_NULLPTR NULL +#endif + +#include +#ifndef DOUBLE_CONVERSION_ASSERT +#define DOUBLE_CONVERSION_ASSERT(condition) \ + assert(condition) +#endif +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(ASSERT) +#define ASSERT DOUBLE_CONVERSION_ASSERT +#endif + +#ifndef DOUBLE_CONVERSION_UNIMPLEMENTED +#define DOUBLE_CONVERSION_UNIMPLEMENTED() (abort()) +#endif +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(UNIMPLEMENTED) +#define UNIMPLEMENTED DOUBLE_CONVERSION_UNIMPLEMENTED +#endif + +#ifndef DOUBLE_CONVERSION_NO_RETURN +#ifdef _MSC_VER +#define DOUBLE_CONVERSION_NO_RETURN __declspec(noreturn) +#else +#define DOUBLE_CONVERSION_NO_RETURN __attribute__((noreturn)) +#endif +#endif +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(NO_RETURN) +#define NO_RETURN DOUBLE_CONVERSION_NO_RETURN +#endif + +#ifndef DOUBLE_CONVERSION_UNREACHABLE +#ifdef _MSC_VER +void DOUBLE_CONVERSION_NO_RETURN abort_noreturn(); +inline void abort_noreturn() { abort(); } +#define DOUBLE_CONVERSION_UNREACHABLE() (abort_noreturn()) +#else +#define DOUBLE_CONVERSION_UNREACHABLE() (abort()) +#endif +#endif +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(UNREACHABLE) +#define UNREACHABLE DOUBLE_CONVERSION_UNREACHABLE +#endif + +// Not all compilers support __has_attribute and combining a check for both +// ifdef and __has_attribute on the same preprocessor line isn't portable. +#ifdef __has_attribute +# define DOUBLE_CONVERSION_HAS_ATTRIBUTE(x) __has_attribute(x) +#else +# define DOUBLE_CONVERSION_HAS_ATTRIBUTE(x) 0 +#endif + +#ifndef DOUBLE_CONVERSION_UNUSED +#if DOUBLE_CONVERSION_HAS_ATTRIBUTE(unused) +#define DOUBLE_CONVERSION_UNUSED __attribute__((unused)) +#else +#define DOUBLE_CONVERSION_UNUSED +#endif +#endif +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(UNUSED) +#define UNUSED DOUBLE_CONVERSION_UNUSED +#endif + +#if DOUBLE_CONVERSION_HAS_ATTRIBUTE(uninitialized) +#define DOUBLE_CONVERSION_STACK_UNINITIALIZED __attribute__((uninitialized)) +#else +#define DOUBLE_CONVERSION_STACK_UNINITIALIZED +#endif +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(STACK_UNINITIALIZED) +#define STACK_UNINITIALIZED DOUBLE_CONVERSION_STACK_UNINITIALIZED +#endif + +// Double operations detection based on target architecture. +// Linux uses a 80bit wide floating point stack on x86. This induces double +// rounding, which in turn leads to wrong results. +// An easy way to test if the floating-point operations are correct is to +// evaluate: 89255.0/1e22. If the floating-point stack is 64 bits wide then +// the result is equal to 89255e-22. +// The best way to test this, is to create a division-function and to compare +// the output of the division with the expected result. (Inlining must be +// disabled.) +// On Linux,x86 89255e-22 != Div_double(89255.0/1e22) +// +// For example: +/* +// -- in div.c +double Div_double(double x, double y) { return x / y; } + +// -- in main.c +double Div_double(double x, double y); // Forward declaration. + +int main(int argc, char** argv) { + return Div_double(89255.0, 1e22) == 89255e-22; +} +*/ +// Run as follows ./main || echo "correct" +// +// If it prints "correct" then the architecture should be here, in the "correct" section. +#if defined(_M_X64) || defined(__x86_64__) || \ + defined(__ARMEL__) || defined(__avr32__) || defined(_M_ARM) || defined(_M_ARM64) || \ + defined(__hppa__) || defined(__ia64__) || \ + defined(__mips__) || \ + defined(__loongarch__) || \ + defined(__nios2__) || defined(__ghs) || \ + defined(__powerpc__) || defined(__ppc__) || defined(__ppc64__) || \ + defined(_POWER) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \ + defined(__sparc__) || defined(__sparc) || defined(__s390__) || \ + defined(__SH4__) || defined(__alpha__) || \ + defined(_MIPS_ARCH_MIPS32R2) || defined(__ARMEB__) ||\ + defined(__AARCH64EL__) || defined(__aarch64__) || defined(__AARCH64EB__) || \ + defined(__riscv) || defined(__e2k__) || \ + defined(__or1k__) || defined(__arc__) || defined(__ARC64__) || \ + defined(__microblaze__) || defined(__XTENSA__) || \ + defined(__EMSCRIPTEN__) || defined(__wasm32__) +#define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1 +#elif defined(__mc68000__) || \ + defined(__pnacl__) || defined(__native_client__) +#undef DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS +#elif defined(_M_IX86) || defined(__i386__) || defined(__i386) +#if defined(_WIN32) +// Windows uses a 64bit wide floating point stack. +#define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1 +#else +#undef DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS +#endif // _WIN32 +#else +#error Target architecture was not detected as supported by Double-Conversion. +#endif +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(CORRECT_DOUBLE_OPERATIONS) +#define CORRECT_DOUBLE_OPERATIONS DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS +#endif + +#if defined(_WIN32) && !defined(__MINGW32__) + +typedef signed char int8_t; +typedef unsigned char uint8_t; +typedef short int16_t; // NOLINT +typedef unsigned short uint16_t; // NOLINT +typedef int int32_t; +typedef unsigned int uint32_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; +// intptr_t and friends are defined in crtdefs.h through stdio.h. + +#else + +#include + +#endif + +typedef uint16_t uc16; + +// The following macro works on both 32 and 64-bit platforms. +// Usage: instead of writing 0x1234567890123456 +// write DOUBLE_CONVERSION_UINT64_2PART_C(0x12345678,90123456); +#define DOUBLE_CONVERSION_UINT64_2PART_C(a, b) (((static_cast(a) << 32) + 0x##b##u)) +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(UINT64_2PART_C) +#define UINT64_2PART_C DOUBLE_CONVERSION_UINT64_2PART_C +#endif + +// The expression DOUBLE_CONVERSION_ARRAY_SIZE(a) is a compile-time constant of type +// size_t which represents the number of elements of the given +// array. You should only use DOUBLE_CONVERSION_ARRAY_SIZE on statically allocated +// arrays. +#ifndef DOUBLE_CONVERSION_ARRAY_SIZE +#define DOUBLE_CONVERSION_ARRAY_SIZE(a) \ + ((sizeof(a) / sizeof(*(a))) / \ + static_cast(!(sizeof(a) % sizeof(*(a))))) +#endif +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(ARRAY_SIZE) +#define ARRAY_SIZE DOUBLE_CONVERSION_ARRAY_SIZE +#endif + +// A macro to disallow the evil copy constructor and operator= functions +// This should be used in the private: declarations for a class +#ifndef DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN +#define DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&); \ + void operator=(const TypeName&) +#endif +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(DC_DISALLOW_COPY_AND_ASSIGN) +#define DC_DISALLOW_COPY_AND_ASSIGN DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN +#endif + +// A macro to disallow all the implicit constructors, namely the +// default constructor, copy constructor and operator= functions. +// +// This should be used in the private: declarations for a class +// that wants to prevent anyone from instantiating it. This is +// especially useful for classes containing only static methods. +#ifndef DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS +#define DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \ + TypeName(); \ + DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(TypeName) +#endif +#if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(DC_DISALLOW_IMPLICIT_CONSTRUCTORS) +#define DC_DISALLOW_IMPLICIT_CONSTRUCTORS DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS +#endif + +namespace arrow_vendored { +namespace double_conversion { + +inline int StrLength(const char* string) { + size_t length = strlen(string); + DOUBLE_CONVERSION_ASSERT(length == static_cast(static_cast(length))); + return static_cast(length); +} + +// This is a simplified version of V8's Vector class. +template +class Vector { + public: + Vector() : start_(DOUBLE_CONVERSION_NULLPTR), length_(0) {} + Vector(T* data, int len) : start_(data), length_(len) { + DOUBLE_CONVERSION_ASSERT(len == 0 || (len > 0 && data != DOUBLE_CONVERSION_NULLPTR)); + } + + // Returns a vector using the same backing storage as this one, + // spanning from and including 'from', to but not including 'to'. + Vector SubVector(int from, int to) { + DOUBLE_CONVERSION_ASSERT(to <= length_); + DOUBLE_CONVERSION_ASSERT(from < to); + DOUBLE_CONVERSION_ASSERT(0 <= from); + return Vector(start() + from, to - from); + } + + // Returns the length of the vector. + int length() const { return length_; } + + // Returns whether or not the vector is empty. + bool is_empty() const { return length_ == 0; } + + // Returns the pointer to the start of the data in the vector. + T* start() const { return start_; } + + // Access individual vector elements - checks bounds in debug mode. + T& operator[](int index) const { + DOUBLE_CONVERSION_ASSERT(0 <= index && index < length_); + return start_[index]; + } + + T& first() { return start_[0]; } + + T& last() { return start_[length_ - 1]; } + + void pop_back() { + DOUBLE_CONVERSION_ASSERT(!is_empty()); + --length_; + } + + private: + T* start_; + int length_; +}; + + +// Helper class for building result strings in a character buffer. The +// purpose of the class is to use safe operations that checks the +// buffer bounds on all operations in debug mode. +class StringBuilder { + public: + StringBuilder(char* buffer, int buffer_size) + : buffer_(buffer, buffer_size), position_(0) { } + + ~StringBuilder() { if (!is_finalized()) Finalize(); } + + int size() const { return buffer_.length(); } + + // Get the current position in the builder. + int position() const { + DOUBLE_CONVERSION_ASSERT(!is_finalized()); + return position_; + } + + // Reset the position. + void Reset() { position_ = 0; } + + // Add a single character to the builder. It is not allowed to add + // 0-characters; use the Finalize() method to terminate the string + // instead. + void AddCharacter(char c) { + DOUBLE_CONVERSION_ASSERT(c != '\0'); + DOUBLE_CONVERSION_ASSERT(!is_finalized() && position_ < buffer_.length()); + buffer_[position_++] = c; + } + + // Add an entire string to the builder. Uses strlen() internally to + // compute the length of the input string. + void AddString(const char* s) { + AddSubstring(s, StrLength(s)); + } + + // Add the first 'n' characters of the given string 's' to the + // builder. The input string must have enough characters. + void AddSubstring(const char* s, int n) { + DOUBLE_CONVERSION_ASSERT(!is_finalized() && position_ + n < buffer_.length()); + DOUBLE_CONVERSION_ASSERT(static_cast(n) <= strlen(s)); + memmove(&buffer_[position_], s, static_cast(n)); + position_ += n; + } + + + // Add character padding to the builder. If count is non-positive, + // nothing is added to the builder. + void AddPadding(char c, int count) { + for (int i = 0; i < count; i++) { + AddCharacter(c); + } + } + + // Finalize the string by 0-terminating it and returning the buffer. + char* Finalize() { + DOUBLE_CONVERSION_ASSERT(!is_finalized() && position_ < buffer_.length()); + buffer_[position_] = '\0'; + // Make sure nobody managed to add a 0-character to the + // buffer while building the string. + DOUBLE_CONVERSION_ASSERT(strlen(buffer_.start()) == static_cast(position_)); + position_ = -1; + DOUBLE_CONVERSION_ASSERT(is_finalized()); + return buffer_.start(); + } + + private: + Vector buffer_; + int position_; + + bool is_finalized() const { return position_ < 0; } + + DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder); +}; + +// The type-based aliasing rule allows the compiler to assume that pointers of +// different types (for some definition of different) never alias each other. +// Thus the following code does not work: +// +// float f = foo(); +// int fbits = *(int*)(&f); +// +// The compiler 'knows' that the int pointer can't refer to f since the types +// don't match, so the compiler may cache f in a register, leaving random data +// in fbits. Using C++ style casts makes no difference, however a pointer to +// char data is assumed to alias any other pointer. This is the 'memcpy +// exception'. +// +// Bit_cast uses the memcpy exception to move the bits from a variable of one +// type of a variable of another type. Of course the end result is likely to +// be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005) +// will completely optimize BitCast away. +// +// There is an additional use for BitCast. +// Recent gccs will warn when they see casts that may result in breakage due to +// the type-based aliasing rule. If you have checked that there is no breakage +// you can use BitCast to cast one pointer type to another. This confuses gcc +// enough that it can no longer see that you have cast one pointer type to +// another thus avoiding the warning. +template +Dest BitCast(const Source& source) { + // Compile time assertion: sizeof(Dest) == sizeof(Source) + // A compile error here means your Dest and Source have different sizes. +#if __cplusplus >= 201103L + static_assert(sizeof(Dest) == sizeof(Source), + "source and destination size mismatch"); +#else + DOUBLE_CONVERSION_UNUSED + typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1]; +#endif + + Dest dest; + memmove(&dest, &source, sizeof(dest)); + return dest; +} + +template +Dest BitCast(Source* source) { + return BitCast(reinterpret_cast(source)); +} + +} // namespace double_conversion +} // namespace arrow_vendored + +#endif // DOUBLE_CONVERSION_UTILS_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/api/io.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/api/io.h new file mode 100644 index 0000000000000000000000000000000000000000..28a00f12a7a616136beb328d20120d6458294eab --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/api/io.h @@ -0,0 +1,20 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "parquet/exception.h" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/api/reader.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/api/reader.h new file mode 100644 index 0000000000000000000000000000000000000000..7e746e8c5bbf551e84431552f688a493e2d62bc4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/api/reader.h @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +// Column reader API +#include "parquet/column_reader.h" +#include "parquet/column_scanner.h" +#include "parquet/exception.h" +#include "parquet/file_reader.h" +#include "parquet/metadata.h" +#include "parquet/platform.h" +#include "parquet/printer.h" +#include "parquet/properties.h" +#include "parquet/statistics.h" + +// Schemas +#include "parquet/api/schema.h" + +// IO +#include "parquet/api/io.h" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/api/schema.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/api/schema.h new file mode 100644 index 0000000000000000000000000000000000000000..7ca714f47b5448974c460e424ab3821d10f7a384 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/api/schema.h @@ -0,0 +1,21 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +// Schemas +#include "parquet/schema.h" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/api/writer.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/api/writer.h new file mode 100644 index 0000000000000000000000000000000000000000..b072dcf74dea7233723ae55599d95be47c674716 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/api/writer.h @@ -0,0 +1,25 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "parquet/api/io.h" +#include "parquet/api/schema.h" +#include "parquet/column_writer.h" +#include "parquet/exception.h" +#include "parquet/file_writer.h" +#include "parquet/statistics.h" diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/reader.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/reader.h new file mode 100644 index 0000000000000000000000000000000000000000..6e46ca43f7b18ce0021cdd8064efde70f39f8eaa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/reader.h @@ -0,0 +1,379 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +// N.B. we don't include async_generator.h as it's relatively heavy +#include +#include +#include + +#include "parquet/file_reader.h" +#include "parquet/platform.h" +#include "parquet/properties.h" + +namespace arrow { + +class ChunkedArray; +class KeyValueMetadata; +class RecordBatchReader; +struct Scalar; +class Schema; +class Table; +class RecordBatch; + +} // namespace arrow + +namespace parquet { + +class FileMetaData; +class SchemaDescriptor; + +namespace arrow { + +class ColumnChunkReader; +class ColumnReader; +struct SchemaManifest; +class RowGroupReader; + +/// \brief Arrow read adapter class for deserializing Parquet files as Arrow row batches. +/// +/// This interfaces caters for different use cases and thus provides different +/// interfaces. In its most simplistic form, we cater for a user that wants to +/// read the whole Parquet at once with the `FileReader::ReadTable` method. +/// +/// More advanced users that also want to implement parallelism on top of each +/// single Parquet files should do this on the RowGroup level. For this, they can +/// call `FileReader::RowGroup(i)->ReadTable` to receive only the specified +/// RowGroup as a table. +/// +/// In the most advanced situation, where a consumer wants to independently read +/// RowGroups in parallel and consume each column individually, they can call +/// `FileReader::RowGroup(i)->Column(j)->Read` and receive an `arrow::Column` +/// instance. +/// +/// Finally, one can also get a stream of record batches using +/// `FileReader::GetRecordBatchReader()`. This can internally decode columns +/// in parallel if use_threads was enabled in the ArrowReaderProperties. +/// +/// The parquet format supports an optional integer field_id which can be assigned +/// to a field. Arrow will convert these field IDs to a metadata key named +/// PARQUET:field_id on the appropriate field. +// TODO(wesm): nested data does not always make sense with this user +// interface unless you are only reading a single leaf node from a branch of +// a table. For example: +// +// repeated group data { +// optional group record { +// optional int32 val1; +// optional byte_array val2; +// optional bool val3; +// } +// optional int32 val4; +// } +// +// In the Parquet file, there are 4 leaf nodes: +// +// * data.record.val1 +// * data.record.val2 +// * data.record.val3 +// * data.val4 +// +// When materializing this data in an Arrow array, we would have: +// +// data: list), +// val3: bool, +// >, +// val4: int32 +// >> +// +// However, in the Parquet format, each leaf node has its own repetition and +// definition levels describing the structure of the intermediate nodes in +// this array structure. Thus, we will need to scan the leaf data for a group +// of leaf nodes part of the same type tree to create a single result Arrow +// nested array structure. +// +// This is additionally complicated "chunky" repeated fields or very large byte +// arrays +class PARQUET_EXPORT FileReader { + public: + /// Factory function to create a FileReader from a ParquetFileReader and properties + static ::arrow::Status Make(::arrow::MemoryPool* pool, + std::unique_ptr reader, + const ArrowReaderProperties& properties, + std::unique_ptr* out); + + /// Factory function to create a FileReader from a ParquetFileReader + static ::arrow::Status Make(::arrow::MemoryPool* pool, + std::unique_ptr reader, + std::unique_ptr* out); + + // Since the distribution of columns amongst a Parquet file's row groups may + // be uneven (the number of values in each column chunk can be different), we + // provide a column-oriented read interface. The ColumnReader hides the + // details of paging through the file's row groups and yielding + // fully-materialized arrow::Array instances + // + // Returns error status if the column of interest is not flat. + // The indicated column index is relative to the schema + virtual ::arrow::Status GetColumn(int i, std::unique_ptr* out) = 0; + + /// \brief Return arrow schema for all the columns. + virtual ::arrow::Status GetSchema(std::shared_ptr<::arrow::Schema>* out) = 0; + + /// \brief Read column as a whole into a chunked array. + /// + /// The index i refers the index of the top level schema field, which may + /// be nested or flat - e.g. + /// + /// 0 foo.bar + /// foo.bar.baz + /// foo.qux + /// 1 foo2 + /// 2 foo3 + /// + /// i=0 will read the entire foo struct, i=1 the foo2 primitive column etc + virtual ::arrow::Status ReadColumn(int i, + std::shared_ptr<::arrow::ChunkedArray>* out) = 0; + + /// \brief Return a RecordBatchReader of all row groups and columns. + virtual ::arrow::Status GetRecordBatchReader( + std::unique_ptr<::arrow::RecordBatchReader>* out) = 0; + + /// \brief Return a RecordBatchReader of row groups selected from row_group_indices. + /// + /// Note that the ordering in row_group_indices matters. FileReaders must outlive + /// their RecordBatchReaders. + /// + /// \returns error Status if row_group_indices contains an invalid index + virtual ::arrow::Status GetRecordBatchReader( + const std::vector& row_group_indices, + std::unique_ptr<::arrow::RecordBatchReader>* out) = 0; + + /// \brief Return a RecordBatchReader of row groups selected from + /// row_group_indices, whose columns are selected by column_indices. + /// + /// Note that the ordering in row_group_indices and column_indices + /// matter. FileReaders must outlive their RecordBatchReaders. + /// + /// \returns error Status if either row_group_indices or column_indices + /// contains an invalid index + virtual ::arrow::Status GetRecordBatchReader( + const std::vector& row_group_indices, const std::vector& column_indices, + std::unique_ptr<::arrow::RecordBatchReader>* out) = 0; + + /// \brief Return a RecordBatchReader of row groups selected from + /// row_group_indices, whose columns are selected by column_indices. + /// + /// Note that the ordering in row_group_indices and column_indices + /// matter. FileReaders must outlive their RecordBatchReaders. + /// + /// \param row_group_indices which row groups to read (order determines read order). + /// \param column_indices which columns to read (order determines output schema). + /// \param[out] out record batch stream from parquet data. + /// + /// \returns error Status if either row_group_indices or column_indices + /// contains an invalid index + ::arrow::Status GetRecordBatchReader(const std::vector& row_group_indices, + const std::vector& column_indices, + std::shared_ptr<::arrow::RecordBatchReader>* out); + ::arrow::Status GetRecordBatchReader(const std::vector& row_group_indices, + std::shared_ptr<::arrow::RecordBatchReader>* out); + ::arrow::Status GetRecordBatchReader(std::shared_ptr<::arrow::RecordBatchReader>* out); + + /// \brief Return a generator of record batches. + /// + /// The FileReader must outlive the generator, so this requires that you pass in a + /// shared_ptr. + /// + /// \returns error Result if either row_group_indices or column_indices contains an + /// invalid index + virtual ::arrow::Result< + std::function<::arrow::Future>()>> + GetRecordBatchGenerator(std::shared_ptr reader, + const std::vector row_group_indices, + const std::vector column_indices, + ::arrow::internal::Executor* cpu_executor = NULLPTR, + int64_t rows_to_readahead = 0) = 0; + + /// Read all columns into a Table + virtual ::arrow::Status ReadTable(std::shared_ptr<::arrow::Table>* out) = 0; + + /// \brief Read the given columns into a Table + /// + /// The indicated column indices are relative to the internal representation + /// of the parquet table. For instance : + /// 0 foo.bar + /// foo.bar.baz 0 + /// foo.bar.baz2 1 + /// foo.qux 2 + /// 1 foo2 3 + /// 2 foo3 4 + /// + /// i=0 will read foo.bar.baz, i=1 will read only foo.bar.baz2 and so on. + /// Only leaf fields have indices; foo itself doesn't have an index. + /// To get the index for a particular leaf field, one can use + /// manifest().schema_fields to get the top level fields, and then walk the + /// tree to identify the relevant leaf fields and access its column_index. + /// To get the total number of leaf fields, use FileMetadata.num_columns(). + virtual ::arrow::Status ReadTable(const std::vector& column_indices, + std::shared_ptr<::arrow::Table>* out) = 0; + + virtual ::arrow::Status ReadRowGroup(int i, const std::vector& column_indices, + std::shared_ptr<::arrow::Table>* out) = 0; + + virtual ::arrow::Status ReadRowGroup(int i, std::shared_ptr<::arrow::Table>* out) = 0; + + virtual ::arrow::Status ReadRowGroups(const std::vector& row_groups, + const std::vector& column_indices, + std::shared_ptr<::arrow::Table>* out) = 0; + + virtual ::arrow::Status ReadRowGroups(const std::vector& row_groups, + std::shared_ptr<::arrow::Table>* out) = 0; + + /// \brief Scan file contents with one thread, return number of rows + virtual ::arrow::Status ScanContents(std::vector columns, + const int32_t column_batch_size, + int64_t* num_rows) = 0; + + /// \brief Return a reader for the RowGroup, this object must not outlive the + /// FileReader. + virtual std::shared_ptr RowGroup(int row_group_index) = 0; + + /// \brief The number of row groups in the file + virtual int num_row_groups() const = 0; + + virtual ParquetFileReader* parquet_reader() const = 0; + + /// Set whether to use multiple threads during reads of multiple columns. + /// By default only one thread is used. + virtual void set_use_threads(bool use_threads) = 0; + + /// Set number of records to read per batch for the RecordBatchReader. + virtual void set_batch_size(int64_t batch_size) = 0; + + virtual const ArrowReaderProperties& properties() const = 0; + + virtual const SchemaManifest& manifest() const = 0; + + virtual ~FileReader() = default; +}; + +class RowGroupReader { + public: + virtual ~RowGroupReader() = default; + virtual std::shared_ptr Column(int column_index) = 0; + virtual ::arrow::Status ReadTable(const std::vector& column_indices, + std::shared_ptr<::arrow::Table>* out) = 0; + virtual ::arrow::Status ReadTable(std::shared_ptr<::arrow::Table>* out) = 0; + + private: + struct Iterator; +}; + +class ColumnChunkReader { + public: + virtual ~ColumnChunkReader() = default; + virtual ::arrow::Status Read(std::shared_ptr<::arrow::ChunkedArray>* out) = 0; +}; + +// At this point, the column reader is a stream iterator. It only knows how to +// read the next batch of values for a particular column from the file until it +// runs out. +// +// We also do not expose any internal Parquet details, such as row groups. This +// might change in the future. +class PARQUET_EXPORT ColumnReader { + public: + virtual ~ColumnReader() = default; + + // Scan the next array of the indicated size. The actual size of the + // returned array may be less than the passed size depending how much data is + // available in the file. + // + // When all the data in the file has been exhausted, the result is set to + // nullptr. + // + // Returns Status::OK on a successful read, including if you have exhausted + // the data available in the file. + virtual ::arrow::Status NextBatch(int64_t batch_size, + std::shared_ptr<::arrow::ChunkedArray>* out) = 0; +}; + +/// \brief Experimental helper class for bindings (like Python) that struggle +/// either with std::move or C++ exceptions +class PARQUET_EXPORT FileReaderBuilder { + public: + FileReaderBuilder(); + + /// Create FileReaderBuilder from Arrow file and optional properties / metadata + ::arrow::Status Open(std::shared_ptr<::arrow::io::RandomAccessFile> file, + const ReaderProperties& properties = default_reader_properties(), + std::shared_ptr metadata = NULLPTR); + + /// Create FileReaderBuilder from file path and optional properties / metadata + ::arrow::Status OpenFile(const std::string& path, bool memory_map = false, + const ReaderProperties& props = default_reader_properties(), + std::shared_ptr metadata = NULLPTR); + + ParquetFileReader* raw_reader() { return raw_reader_.get(); } + + /// Set Arrow MemoryPool for memory allocation + FileReaderBuilder* memory_pool(::arrow::MemoryPool* pool); + /// Set Arrow reader properties + FileReaderBuilder* properties(const ArrowReaderProperties& arg_properties); + /// Build FileReader instance + ::arrow::Status Build(std::unique_ptr* out); + ::arrow::Result> Build(); + + private: + ::arrow::MemoryPool* pool_; + ArrowReaderProperties properties_; + std::unique_ptr raw_reader_; +}; + +/// \defgroup parquet-arrow-reader-factories Factory functions for Parquet Arrow readers +/// +/// @{ + +/// \brief Build FileReader from Arrow file and MemoryPool +/// +/// Advanced settings are supported through the FileReaderBuilder class. +PARQUET_EXPORT +::arrow::Status OpenFile(std::shared_ptr<::arrow::io::RandomAccessFile>, + ::arrow::MemoryPool* allocator, + std::unique_ptr* reader); + +/// @} + +PARQUET_EXPORT +::arrow::Status StatisticsAsScalars(const Statistics& Statistics, + std::shared_ptr<::arrow::Scalar>* min, + std::shared_ptr<::arrow::Scalar>* max); + +namespace internal { + +PARQUET_EXPORT +::arrow::Status FuzzReader(const uint8_t* data, int64_t size); + +} // namespace internal +} // namespace arrow +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/schema.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/schema.h new file mode 100644 index 0000000000000000000000000000000000000000..dd60fde43422889c53ebd7cf86fbac99c8c6f282 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/schema.h @@ -0,0 +1,184 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include + +#include "arrow/result.h" +#include "arrow/status.h" +#include "arrow/type.h" +#include "arrow/type_fwd.h" + +#include "parquet/level_conversion.h" +#include "parquet/platform.h" +#include "parquet/schema.h" + +namespace parquet { + +class ArrowReaderProperties; +class ArrowWriterProperties; +class WriterProperties; + +namespace arrow { + +/// \defgroup arrow-to-parquet-schema-conversion Functions to convert an Arrow +/// schema into a Parquet schema. +/// +/// @{ + +PARQUET_EXPORT +::arrow::Status FieldToNode(const std::shared_ptr<::arrow::Field>& field, + const WriterProperties& properties, + const ArrowWriterProperties& arrow_properties, + schema::NodePtr* out); + +PARQUET_EXPORT +::arrow::Status ToParquetSchema(const ::arrow::Schema* arrow_schema, + const WriterProperties& properties, + const ArrowWriterProperties& arrow_properties, + std::shared_ptr* out); + +PARQUET_EXPORT +::arrow::Status ToParquetSchema(const ::arrow::Schema* arrow_schema, + const WriterProperties& properties, + std::shared_ptr* out); + +/// @} + +/// \defgroup parquet-to-arrow-schema-conversion Functions to convert a Parquet +/// schema into an Arrow schema. +/// +/// @{ + +PARQUET_EXPORT +::arrow::Status FromParquetSchema( + const SchemaDescriptor* parquet_schema, const ArrowReaderProperties& properties, + const std::shared_ptr& key_value_metadata, + std::shared_ptr<::arrow::Schema>* out); + +PARQUET_EXPORT +::arrow::Status FromParquetSchema(const SchemaDescriptor* parquet_schema, + const ArrowReaderProperties& properties, + std::shared_ptr<::arrow::Schema>* out); + +PARQUET_EXPORT +::arrow::Status FromParquetSchema(const SchemaDescriptor* parquet_schema, + std::shared_ptr<::arrow::Schema>* out); + +/// @} + +/// \brief Bridge between an arrow::Field and parquet column indices. +struct PARQUET_EXPORT SchemaField { + std::shared_ptr<::arrow::Field> field; + std::vector children; + + // Only set for leaf nodes + int column_index = -1; + + parquet::internal::LevelInfo level_info; + + bool is_leaf() const { return column_index != -1; } +}; + +/// \brief Bridge between a parquet Schema and an arrow Schema. +/// +/// Expose parquet columns as a tree structure. Useful traverse and link +/// between arrow's Schema and parquet's Schema. +struct PARQUET_EXPORT SchemaManifest { + static ::arrow::Status Make( + const SchemaDescriptor* schema, + const std::shared_ptr& metadata, + const ArrowReaderProperties& properties, SchemaManifest* manifest); + + const SchemaDescriptor* descr; + std::shared_ptr<::arrow::Schema> origin_schema; + std::shared_ptr schema_metadata; + std::vector schema_fields; + + std::unordered_map column_index_to_field; + std::unordered_map child_to_parent; + + ::arrow::Status GetColumnField(int column_index, const SchemaField** out) const { + auto it = column_index_to_field.find(column_index); + if (it == column_index_to_field.end()) { + return ::arrow::Status::KeyError("Column index ", column_index, + " not found in schema manifest, may be malformed"); + } + *out = it->second; + return ::arrow::Status::OK(); + } + + const SchemaField* GetParent(const SchemaField* field) const { + // Returns nullptr also if not found + auto it = child_to_parent.find(field); + if (it == child_to_parent.end()) { + return NULLPTR; + } + return it->second; + } + + /// Coalesce a list of field indices (relative to the equivalent arrow::Schema) which + /// correspond to the column root (first node below the parquet schema's root group) of + /// each leaf referenced in column_indices. + /// + /// For example, for leaves `a.b.c`, `a.b.d.e`, and `i.j.k` (column_indices=[0,1,3]) + /// the roots are `a` and `i` (return=[0,2]). + /// + /// root + /// -- a <------ + /// -- -- b | | + /// -- -- -- c | + /// -- -- -- d | + /// -- -- -- -- e + /// -- f + /// -- -- g + /// -- -- -- h + /// -- i <--- + /// -- -- j | + /// -- -- -- k + ::arrow::Result> GetFieldIndices( + const std::vector& column_indices) const { + const schema::GroupNode* group = descr->group_node(); + std::unordered_set already_added; + + std::vector out; + for (int column_idx : column_indices) { + if (column_idx < 0 || column_idx >= descr->num_columns()) { + return ::arrow::Status::IndexError("Column index ", column_idx, " is not valid"); + } + + auto field_node = descr->GetColumnRoot(column_idx); + auto field_idx = group->FieldIndex(*field_node); + if (field_idx == -1) { + return ::arrow::Status::IndexError("Column index ", column_idx, " is not valid"); + } + + if (already_added.insert(field_idx).second) { + out.push_back(field_idx); + } + } + return out; + } +}; + +} // namespace arrow +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/test_util.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/test_util.h new file mode 100644 index 0000000000000000000000000000000000000000..b2be1b3c5354d7a28c6fad23dd745a9d32bbb7d2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/test_util.h @@ -0,0 +1,524 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "arrow/array.h" +#include "arrow/array/builder_binary.h" +#include "arrow/array/builder_decimal.h" +#include "arrow/array/builder_primitive.h" +#include "arrow/testing/gtest_util.h" +#include "arrow/testing/random.h" +#include "arrow/type_fwd.h" +#include "arrow/type_traits.h" +#include "arrow/util/decimal.h" +#include "arrow/util/float16.h" +#include "parquet/column_reader.h" +#include "parquet/test_util.h" + +namespace parquet { + +using internal::RecordReader; + +namespace arrow { + +using ::arrow::Array; +using ::arrow::ChunkedArray; +using ::arrow::Status; + +template +struct DecimalWithPrecisionAndScale { + static_assert(PRECISION >= 1 && PRECISION <= 38, "Invalid precision value"); + + using type = ::arrow::Decimal128Type; + static constexpr ::arrow::Type::type type_id = ::arrow::Decimal128Type::type_id; + static constexpr int32_t precision = PRECISION; + static constexpr int32_t scale = PRECISION - 1; +}; + +template +struct Decimal256WithPrecisionAndScale { + static_assert(PRECISION >= 1 && PRECISION <= 76, "Invalid precision value"); + + using type = ::arrow::Decimal256Type; + static constexpr ::arrow::Type::type type_id = ::arrow::Decimal256Type::type_id; + static constexpr int32_t precision = PRECISION; + static constexpr int32_t scale = PRECISION - 1; +}; + +template +::arrow::enable_if_floating_point NonNullArray( + size_t size, std::shared_ptr* out) { + using c_type = typename ArrowType::c_type; + std::vector values; + if constexpr (::arrow::is_half_float_type::value) { + values.resize(size); + test::random_float16_numbers(static_cast(size), 0, ::arrow::util::Float16(0.0f), + ::arrow::util::Float16(1.0f), values.data()); + } else { + ::arrow::random_real(size, 0, static_cast(0), static_cast(1), + &values); + } + ::arrow::NumericBuilder builder; + RETURN_NOT_OK(builder.AppendValues(values.data(), values.size())); + return builder.Finish(out); +} + +template +::arrow::enable_if_integer NonNullArray(size_t size, + std::shared_ptr* out) { + std::vector values; + ::arrow::randint(size, 0, 64, &values); + + // Passing data type so this will work with TimestampType too + ::arrow::NumericBuilder builder(std::make_shared(), + ::arrow::default_memory_pool()); + RETURN_NOT_OK(builder.AppendValues(values.data(), values.size())); + return builder.Finish(out); +} + +template +::arrow::enable_if_date NonNullArray(size_t size, + std::shared_ptr* out) { + std::vector values; + ::arrow::randint(size, 0, 24, &values); + for (size_t i = 0; i < size; i++) { + values[i] *= 86400000; + } + + // Passing data type so this will work with TimestampType too + ::arrow::NumericBuilder builder(std::make_shared(), + ::arrow::default_memory_pool()); + RETURN_NOT_OK(builder.AppendValues(values.data(), values.size())); + return builder.Finish(out); +} + +template +::arrow::enable_if_base_binary NonNullArray( + size_t size, std::shared_ptr* out) { + using BuilderType = typename ::arrow::TypeTraits::BuilderType; + BuilderType builder; + for (size_t i = 0; i < size; i++) { + RETURN_NOT_OK(builder.Append("test-string")); + } + return builder.Finish(out); +} + +template +::arrow::enable_if_fixed_size_binary NonNullArray( + size_t size, std::shared_ptr* out) { + using BuilderType = typename ::arrow::TypeTraits::BuilderType; + // set byte_width to the length of "fixed": 5 + // todo: find a way to generate test data with more diversity. + BuilderType builder(::arrow::fixed_size_binary(5)); + for (size_t i = 0; i < size; i++) { + RETURN_NOT_OK(builder.Append("fixed")); + } + return builder.Finish(out); +} + +template +static void random_decimals(int64_t n, uint32_t seed, int32_t precision, uint8_t* out) { + auto gen = ::arrow::random::RandomArrayGenerator(seed); + std::shared_ptr decimals; + if constexpr (byte_width == 16) { + decimals = gen.Decimal128(::arrow::decimal128(precision, 0), n); + } else { + decimals = gen.Decimal256(::arrow::decimal256(precision, 0), n); + } + std::memcpy(out, decimals->data()->GetValues(1, 0), byte_width * n); +} + +template +::arrow::enable_if_t< + std::is_same>::value, Status> +NonNullArray(size_t size, std::shared_ptr* out) { + constexpr int32_t kDecimalPrecision = precision; + constexpr int32_t kDecimalScale = DecimalWithPrecisionAndScale::scale; + + const auto type = ::arrow::decimal(kDecimalPrecision, kDecimalScale); + ::arrow::Decimal128Builder builder(type); + const int32_t byte_width = + static_cast(*type).byte_width(); + + constexpr int32_t seed = 0; + + ARROW_ASSIGN_OR_RAISE(auto out_buf, ::arrow::AllocateBuffer(size * byte_width)); + random_decimals<::arrow::Decimal128Type::kByteWidth>(size, seed, kDecimalPrecision, + out_buf->mutable_data()); + + RETURN_NOT_OK(builder.AppendValues(out_buf->data(), size)); + return builder.Finish(out); +} + +template +::arrow::enable_if_t< + std::is_same>::value, Status> +NonNullArray(size_t size, std::shared_ptr* out) { + constexpr int32_t kDecimalPrecision = precision; + constexpr int32_t kDecimalScale = Decimal256WithPrecisionAndScale::scale; + + const auto type = ::arrow::decimal256(kDecimalPrecision, kDecimalScale); + ::arrow::Decimal256Builder builder(type); + const int32_t byte_width = + static_cast(*type).byte_width(); + + constexpr int32_t seed = 0; + + ARROW_ASSIGN_OR_RAISE(auto out_buf, ::arrow::AllocateBuffer(size * byte_width)); + random_decimals<::arrow::Decimal256Type::kByteWidth>(size, seed, kDecimalPrecision, + out_buf->mutable_data()); + + RETURN_NOT_OK(builder.AppendValues(out_buf->data(), size)); + return builder.Finish(out); +} + +template +::arrow::enable_if_boolean NonNullArray(size_t size, + std::shared_ptr* out) { + std::vector values; + ::arrow::randint(size, 0, 1, &values); + ::arrow::BooleanBuilder builder; + RETURN_NOT_OK(builder.AppendValues(values.data(), values.size())); + return builder.Finish(out); +} + +// This helper function only supports (size/2) nulls. +template +::arrow::enable_if_floating_point NullableArray( + size_t size, size_t num_nulls, uint32_t seed, std::shared_ptr* out) { + using c_type = typename ArrowType::c_type; + std::vector values; + if constexpr (::arrow::is_half_float_type::value) { + values.resize(size); + test::random_float16_numbers(static_cast(size), 0, ::arrow::util::Float16(-1e4f), + ::arrow::util::Float16(1e4f), values.data()); + } else { + ::arrow::random_real(size, seed, static_cast(-1e10), + static_cast(1e10), &values); + } + std::vector valid_bytes(size, 1); + + for (size_t i = 0; i < num_nulls; i++) { + valid_bytes[i * 2] = 0; + } + + ::arrow::NumericBuilder builder; + RETURN_NOT_OK(builder.AppendValues(values.data(), values.size(), valid_bytes.data())); + return builder.Finish(out); +} + +// This helper function only supports (size/2) nulls. +template +::arrow::enable_if_integer NullableArray(size_t size, size_t num_nulls, + uint32_t seed, + std::shared_ptr* out) { + std::vector values; + + // Seed is random in Arrow right now + (void)seed; + ::arrow::randint(size, 0, 64, &values); + std::vector valid_bytes(size, 1); + + for (size_t i = 0; i < num_nulls; i++) { + valid_bytes[i * 2] = 0; + } + + // Passing data type so this will work with TimestampType too + ::arrow::NumericBuilder builder(std::make_shared(), + ::arrow::default_memory_pool()); + RETURN_NOT_OK(builder.AppendValues(values.data(), values.size(), valid_bytes.data())); + return builder.Finish(out); +} + +template +::arrow::enable_if_date NullableArray(size_t size, size_t num_nulls, + uint32_t seed, + std::shared_ptr* out) { + std::vector values; + + // Seed is random in Arrow right now + (void)seed; + ::arrow::randint(size, 0, 24, &values); + for (size_t i = 0; i < size; i++) { + values[i] *= 86400000; + } + std::vector valid_bytes(size, 1); + + for (size_t i = 0; i < num_nulls; i++) { + valid_bytes[i * 2] = 0; + } + + // Passing data type so this will work with TimestampType too + ::arrow::NumericBuilder builder(std::make_shared(), + ::arrow::default_memory_pool()); + RETURN_NOT_OK(builder.AppendValues(values.data(), values.size(), valid_bytes.data())); + return builder.Finish(out); +} + +// This helper function only supports (size/2) nulls yet. +template +::arrow::enable_if_base_binary NullableArray( + size_t size, size_t num_nulls, uint32_t seed, std::shared_ptr<::arrow::Array>* out) { + std::vector valid_bytes(size, 1); + + for (size_t i = 0; i < num_nulls; i++) { + valid_bytes[i * 2] = 0; + } + + using BuilderType = typename ::arrow::TypeTraits::BuilderType; + BuilderType builder; + + const int kBufferSize = 10; + uint8_t buffer[kBufferSize]; + for (size_t i = 0; i < size; i++) { + if (!valid_bytes[i]) { + RETURN_NOT_OK(builder.AppendNull()); + } else { + ::arrow::random_bytes(kBufferSize, seed + static_cast(i), buffer); + if (ArrowType::is_utf8) { + // Trivially force data to be valid UTF8 by making it all ASCII + for (auto& byte : buffer) { + byte &= 0x7f; + } + } + RETURN_NOT_OK(builder.Append(buffer, kBufferSize)); + } + } + return builder.Finish(out); +} + +// This helper function only supports (size/2) nulls yet, +// same as NullableArray(..) +template +::arrow::enable_if_fixed_size_binary NullableArray( + size_t size, size_t num_nulls, uint32_t seed, std::shared_ptr<::arrow::Array>* out) { + std::vector valid_bytes(size, 1); + + for (size_t i = 0; i < num_nulls; i++) { + valid_bytes[i * 2] = 0; + } + + using BuilderType = typename ::arrow::TypeTraits::BuilderType; + const int byte_width = 10; + BuilderType builder(::arrow::fixed_size_binary(byte_width)); + + const int kBufferSize = byte_width; + uint8_t buffer[kBufferSize]; + for (size_t i = 0; i < size; i++) { + if (!valid_bytes[i]) { + RETURN_NOT_OK(builder.AppendNull()); + } else { + ::arrow::random_bytes(kBufferSize, seed + static_cast(i), buffer); + RETURN_NOT_OK(builder.Append(buffer)); + } + } + return builder.Finish(out); +} + +template +::arrow::enable_if_t< + std::is_same>::value, Status> +NullableArray(size_t size, size_t num_nulls, uint32_t seed, + std::shared_ptr<::arrow::Array>* out) { + std::vector valid_bytes(size, '\1'); + + for (size_t i = 0; i < num_nulls; ++i) { + valid_bytes[i * 2] = '\0'; + } + + constexpr int32_t kDecimalPrecision = precision; + constexpr int32_t kDecimalScale = DecimalWithPrecisionAndScale::scale; + const auto type = ::arrow::decimal(kDecimalPrecision, kDecimalScale); + const int32_t byte_width = + static_cast(*type).byte_width(); + + ARROW_ASSIGN_OR_RAISE(auto out_buf, ::arrow::AllocateBuffer(size * byte_width)); + + random_decimals<::arrow::Decimal128Type::kByteWidth>(size, seed, precision, + out_buf->mutable_data()); + + ::arrow::Decimal128Builder builder(type); + RETURN_NOT_OK(builder.AppendValues(out_buf->data(), size, valid_bytes.data())); + return builder.Finish(out); +} + +template +::arrow::enable_if_t< + std::is_same>::value, Status> +NullableArray(size_t size, size_t num_nulls, uint32_t seed, + std::shared_ptr<::arrow::Array>* out) { + std::vector valid_bytes(size, '\1'); + + for (size_t i = 0; i < num_nulls; ++i) { + valid_bytes[i * 2] = '\0'; + } + + constexpr int32_t kDecimalPrecision = precision; + constexpr int32_t kDecimalScale = Decimal256WithPrecisionAndScale::scale; + const auto type = ::arrow::decimal256(kDecimalPrecision, kDecimalScale); + const int32_t byte_width = + static_cast(*type).byte_width(); + + ARROW_ASSIGN_OR_RAISE(auto out_buf, ::arrow::AllocateBuffer(size * byte_width)); + + random_decimals<::arrow::Decimal256Type::kByteWidth>(size, seed, precision, + out_buf->mutable_data()); + + ::arrow::Decimal256Builder builder(type); + RETURN_NOT_OK(builder.AppendValues(out_buf->data(), size, valid_bytes.data())); + return builder.Finish(out); +} + +// This helper function only supports (size/2) nulls yet. +template +::arrow::enable_if_boolean NullableArray(size_t size, size_t num_nulls, + uint32_t seed, + std::shared_ptr* out) { + std::vector values; + + // Seed is random in Arrow right now + (void)seed; + + ::arrow::randint(size, 0, 1, &values); + std::vector valid_bytes(size, 1); + + for (size_t i = 0; i < num_nulls; i++) { + valid_bytes[i * 2] = 0; + } + + ::arrow::BooleanBuilder builder; + RETURN_NOT_OK(builder.AppendValues(values.data(), values.size(), valid_bytes.data())); + return builder.Finish(out); +} + +/// Wrap an Array into a ListArray by splitting it up into size lists. +/// +/// This helper function only supports (size/2) nulls. +Status MakeListArray(const std::shared_ptr& values, int64_t size, + int64_t null_count, const std::string& item_name, + bool nullable_values, std::shared_ptr<::arrow::ListArray>* out) { + // We always include an empty list + int64_t non_null_entries = size - null_count - 1; + int64_t length_per_entry = values->length() / non_null_entries; + + auto offsets = AllocateBuffer(); + RETURN_NOT_OK(offsets->Resize((size + 1) * sizeof(int32_t))); + int32_t* offsets_ptr = reinterpret_cast(offsets->mutable_data()); + + auto null_bitmap = AllocateBuffer(); + int64_t bitmap_size = ::arrow::bit_util::BytesForBits(size); + RETURN_NOT_OK(null_bitmap->Resize(bitmap_size)); + uint8_t* null_bitmap_ptr = null_bitmap->mutable_data(); + memset(null_bitmap_ptr, 0, bitmap_size); + + int32_t current_offset = 0; + for (int64_t i = 0; i < size; i++) { + offsets_ptr[i] = current_offset; + if (!(((i % 2) == 0) && ((i / 2) < null_count))) { + // Non-null list (list with index 1 is always empty). + ::arrow::bit_util::SetBit(null_bitmap_ptr, i); + if (i != 1) { + current_offset += static_cast(length_per_entry); + } + } + } + offsets_ptr[size] = static_cast(values->length()); + + auto value_field = ::arrow::field(item_name, values->type(), nullable_values); + *out = std::make_shared<::arrow::ListArray>(::arrow::list(value_field), size, offsets, + values, null_bitmap, null_count); + + return Status::OK(); +} + +// Make an array containing only empty lists, with a null values array +Status MakeEmptyListsArray(int64_t size, std::shared_ptr* out_array) { + // Allocate an offsets buffer containing only zeroes + const int64_t offsets_nbytes = (size + 1) * sizeof(int32_t); + ARROW_ASSIGN_OR_RAISE(auto offsets_buffer, ::arrow::AllocateBuffer(offsets_nbytes)); + memset(offsets_buffer->mutable_data(), 0, offsets_nbytes); + + auto value_field = + ::arrow::field("item", ::arrow::float64(), false /* nullable_values */); + auto list_type = ::arrow::list(value_field); + + std::vector> child_buffers = {nullptr /* null bitmap */, + nullptr /* values */}; + auto child_data = + ::arrow::ArrayData::Make(value_field->type(), 0, std::move(child_buffers)); + + std::vector> buffers = {nullptr /* bitmap */, + std::move(offsets_buffer)}; + auto array_data = ::arrow::ArrayData::Make(list_type, size, std::move(buffers)); + array_data->child_data.push_back(child_data); + + *out_array = ::arrow::MakeArray(array_data); + return Status::OK(); +} + +std::shared_ptr<::arrow::Table> MakeSimpleTable( + const std::shared_ptr& values, bool nullable) { + auto schema = ::arrow::schema({::arrow::field("col", values->type(), nullable)}); + return ::arrow::Table::Make(schema, {values}); +} + +std::shared_ptr<::arrow::Table> MakeSimpleTable(const std::shared_ptr& values, + bool nullable) { + auto carr = std::make_shared<::arrow::ChunkedArray>(values); + return MakeSimpleTable(carr, nullable); +} + +template +void ExpectArray(T* expected, Array* result) { + auto p_array = static_cast<::arrow::PrimitiveArray*>(result); + for (int i = 0; i < result->length(); i++) { + EXPECT_EQ(expected[i], reinterpret_cast(p_array->values()->data())[i]); + } +} + +template +void ExpectArrayT(void* expected, Array* result) { + ::arrow::PrimitiveArray* p_array = static_cast<::arrow::PrimitiveArray*>(result); + for (int64_t i = 0; i < result->length(); i++) { + EXPECT_EQ(reinterpret_cast(expected)[i], + reinterpret_cast( + p_array->values()->data())[i]); + } +} + +template <> +void ExpectArrayT<::arrow::BooleanType>(void* expected, Array* result) { + ::arrow::BooleanBuilder builder; + ARROW_EXPECT_OK( + builder.AppendValues(reinterpret_cast(expected), result->length())); + + std::shared_ptr expected_array; + ARROW_EXPECT_OK(builder.Finish(&expected_array)); + EXPECT_TRUE(result->Equals(*expected_array)); +} + +} // namespace arrow + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/writer.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/writer.h new file mode 100644 index 0000000000000000000000000000000000000000..1decafedc97fd1e4da83300140cee19f0bab9de1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/writer.h @@ -0,0 +1,180 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "parquet/platform.h" +#include "parquet/properties.h" + +namespace arrow { + +class Array; +class ChunkedArray; +class RecordBatch; +class Schema; +class Table; + +} // namespace arrow + +namespace parquet { + +class FileMetaData; +class ParquetFileWriter; + +namespace arrow { + +/// \brief Iterative FileWriter class +/// +/// For basic usage, can write a Table at a time, creating one or more row +/// groups per write call. +/// +/// For advanced usage, can write column-by-column: Start a new RowGroup or +/// Chunk with NewRowGroup, then write column-by-column the whole column chunk. +/// +/// If PARQUET:field_id is present as a metadata key on a field, and the corresponding +/// value is a nonnegative integer, then it will be used as the field_id in the parquet +/// file. +class PARQUET_EXPORT FileWriter { + public: + static ::arrow::Status Make(MemoryPool* pool, std::unique_ptr writer, + std::shared_ptr<::arrow::Schema> schema, + std::shared_ptr arrow_properties, + std::unique_ptr* out); + + /// \brief Try to create an Arrow to Parquet file writer. + /// + /// \param schema schema of data that will be passed. + /// \param pool memory pool to use. + /// \param sink output stream to write Parquet data. + /// \param properties general Parquet writer properties. + /// \param arrow_properties Arrow-specific writer properties. + /// + /// \since 11.0.0 + static ::arrow::Result> Open( + const ::arrow::Schema& schema, MemoryPool* pool, + std::shared_ptr<::arrow::io::OutputStream> sink, + std::shared_ptr properties = default_writer_properties(), + std::shared_ptr arrow_properties = + default_arrow_writer_properties()); + + ARROW_DEPRECATED("Deprecated in 11.0.0. Use Result-returning variants instead.") + static ::arrow::Status Open(const ::arrow::Schema& schema, MemoryPool* pool, + std::shared_ptr<::arrow::io::OutputStream> sink, + std::shared_ptr properties, + std::unique_ptr* writer); + ARROW_DEPRECATED("Deprecated in 11.0.0. Use Result-returning variants instead.") + static ::arrow::Status Open(const ::arrow::Schema& schema, MemoryPool* pool, + std::shared_ptr<::arrow::io::OutputStream> sink, + std::shared_ptr properties, + std::shared_ptr arrow_properties, + std::unique_ptr* writer); + + /// Return the Arrow schema to be written to. + virtual std::shared_ptr<::arrow::Schema> schema() const = 0; + + /// \brief Write a Table to Parquet. + /// + /// \param table Arrow table to write. + /// \param chunk_size maximum number of rows to write per row group. + virtual ::arrow::Status WriteTable( + const ::arrow::Table& table, int64_t chunk_size = DEFAULT_MAX_ROW_GROUP_LENGTH) = 0; + + /// \brief Start a new row group. + /// + /// Returns an error if not all columns have been written. + /// + /// \param chunk_size the number of rows in the next row group. + virtual ::arrow::Status NewRowGroup(int64_t chunk_size) = 0; + + /// \brief Write ColumnChunk in row group using an array. + virtual ::arrow::Status WriteColumnChunk(const ::arrow::Array& data) = 0; + + /// \brief Write ColumnChunk in row group using slice of a ChunkedArray + virtual ::arrow::Status WriteColumnChunk( + const std::shared_ptr<::arrow::ChunkedArray>& data, int64_t offset, + int64_t size) = 0; + + /// \brief Write ColumnChunk in a row group using a ChunkedArray + virtual ::arrow::Status WriteColumnChunk( + const std::shared_ptr<::arrow::ChunkedArray>& data) = 0; + + /// \brief Start a new buffered row group. + /// + /// Returns an error if not all columns have been written. + virtual ::arrow::Status NewBufferedRowGroup() = 0; + + /// \brief Write a RecordBatch into the buffered row group. + /// + /// Multiple RecordBatches can be written into the same row group + /// through this method. + /// + /// WriterProperties.max_row_group_length() is respected and a new + /// row group will be created if the current row group exceeds the + /// limit. + /// + /// Batches get flushed to the output stream once NewBufferedRowGroup() + /// or Close() is called. + /// + /// WARNING: If you are writing multiple files in parallel in the same + /// executor, deadlock may occur if ArrowWriterProperties::use_threads + /// is set to true to write columns in parallel. Please disable use_threads + /// option in this case. + virtual ::arrow::Status WriteRecordBatch(const ::arrow::RecordBatch& batch) = 0; + + /// \brief Write the footer and close the file. + virtual ::arrow::Status Close() = 0; + virtual ~FileWriter(); + + virtual MemoryPool* memory_pool() const = 0; + /// \brief Return the file metadata, only available after calling Close(). + virtual const std::shared_ptr metadata() const = 0; +}; + +/// \brief Write Parquet file metadata only to indicated Arrow OutputStream +PARQUET_EXPORT +::arrow::Status WriteFileMetaData(const FileMetaData& file_metadata, + ::arrow::io::OutputStream* sink); + +/// \brief Write metadata-only Parquet file to indicated Arrow OutputStream +PARQUET_EXPORT +::arrow::Status WriteMetaDataFile(const FileMetaData& file_metadata, + ::arrow::io::OutputStream* sink); + +/// \brief Write a Table to Parquet. +/// +/// This writes one table in a single shot. To write a Parquet file with +/// multiple tables iteratively, see parquet::arrow::FileWriter. +/// +/// \param table Table to write. +/// \param pool memory pool to use. +/// \param sink output stream to write Parquet data. +/// \param chunk_size maximum number of rows to write per row group. +/// \param properties general Parquet writer properties. +/// \param arrow_properties Arrow-specific writer properties. +::arrow::Status PARQUET_EXPORT +WriteTable(const ::arrow::Table& table, MemoryPool* pool, + std::shared_ptr<::arrow::io::OutputStream> sink, + int64_t chunk_size = DEFAULT_MAX_ROW_GROUP_LENGTH, + std::shared_ptr properties = default_writer_properties(), + std::shared_ptr arrow_properties = + default_arrow_writer_properties()); + +} // namespace arrow +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/bloom_filter.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/bloom_filter.h new file mode 100644 index 0000000000000000000000000000000000000000..909563d013fedcc6604ec8decc3d7384e0b2d693 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/bloom_filter.h @@ -0,0 +1,363 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/util/bit_util.h" +#include "arrow/util/logging.h" +#include "parquet/hasher.h" +#include "parquet/platform.h" +#include "parquet/types.h" + +namespace parquet { + +// A Bloom filter is a compact structure to indicate whether an item is not in a set or +// probably in a set. The Bloom filter usually consists of a bit set that represents a +// set of elements, a hash strategy and a Bloom filter algorithm. +class PARQUET_EXPORT BloomFilter { + public: + // Maximum Bloom filter size, it sets to HDFS default block size 128MB + // This value will be reconsidered when implementing Bloom filter producer. + static constexpr uint32_t kMaximumBloomFilterBytes = 128 * 1024 * 1024; + + /// Determine whether an element exist in set or not. + /// + /// @param hash the element to contain. + /// @return false if value is definitely not in set, and true means PROBABLY + /// in set. + virtual bool FindHash(uint64_t hash) const = 0; + + /// Insert element to set represented by Bloom filter bitset. + /// @param hash the hash of value to insert into Bloom filter. + virtual void InsertHash(uint64_t hash) = 0; + + /// Insert elements to set represented by Bloom filter bitset. + /// @param hashes the hash values to insert into Bloom filter. + /// @param num_values the number of hash values to insert. + virtual void InsertHashes(const uint64_t* hashes, int num_values) = 0; + + /// Write this Bloom filter to an output stream. A Bloom filter structure should + /// include bitset length, hash strategy, algorithm, and bitset. + /// + /// @param sink the output stream to write + virtual void WriteTo(ArrowOutputStream* sink) const = 0; + + /// Get the number of bytes of bitset + virtual uint32_t GetBitsetSize() const = 0; + + /// Compute hash for 32 bits value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(int32_t value) const = 0; + + /// Compute hash for 64 bits value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(int64_t value) const = 0; + + /// Compute hash for float value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(float value) const = 0; + + /// Compute hash for double value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(double value) const = 0; + + /// Compute hash for Int96 value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(const Int96* value) const = 0; + + /// Compute hash for ByteArray value by using its plain encoding result. + /// + /// @param value the value to hash. + /// @return hash result. + virtual uint64_t Hash(const ByteArray* value) const = 0; + + /// Compute hash for fixed byte array value by using its plain encoding result. + /// + /// @param value the value address. + /// @param len the value length. + /// @return hash result. + virtual uint64_t Hash(const FLBA* value, uint32_t len) const = 0; + + /// Batch compute hashes for 32 bits values by using its plain encoding result. + /// + /// @param values values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const int32_t* values, int num_values, uint64_t* hashes) const = 0; + + /// Batch compute hashes for 64 bits values by using its plain encoding result. + /// + /// @param values values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const int64_t* values, int num_values, uint64_t* hashes) const = 0; + + /// Batch compute hashes for float values by using its plain encoding result. + /// + /// @param values values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const float* values, int num_values, uint64_t* hashes) const = 0; + + /// Batch compute hashes for double values by using its plain encoding result. + /// + /// @param values values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const double* values, int num_values, uint64_t* hashes) const = 0; + + /// Batch compute hashes for Int96 values by using its plain encoding result. + /// + /// @param values values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const Int96* values, int num_values, uint64_t* hashes) const = 0; + + /// Batch compute hashes for ByteArray values by using its plain encoding result. + /// + /// @param values values a pointer to the values to hash. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const ByteArray* values, int num_values, + uint64_t* hashes) const = 0; + + /// Batch compute hashes for fixed byte array values by using its plain encoding result. + /// + /// @param values values a pointer to the values to hash. + /// @param type_len the value length. + /// @param num_values the number of values to hash. + /// @param hashes a pointer to the output hash values, its length should be equal to + /// num_values. + virtual void Hashes(const FLBA* values, uint32_t type_len, int num_values, + uint64_t* hashes) const = 0; + + virtual ~BloomFilter() = default; + + protected: + // Hash strategy available for Bloom filter. + enum class HashStrategy : uint32_t { XXHASH = 0 }; + + // Bloom filter algorithm. + enum class Algorithm : uint32_t { BLOCK = 0 }; + + enum class CompressionStrategy : uint32_t { UNCOMPRESSED = 0 }; +}; + +/// The BlockSplitBloomFilter is implemented using block-based Bloom filters from +/// Putze et al.'s "Cache-,Hash- and Space-Efficient Bloom filters". The basic idea is to +/// hash the item to a tiny Bloom filter which size fit a single cache line or smaller. +/// +/// This implementation sets 8 bits in each tiny Bloom filter. Each tiny Bloom +/// filter is 32 bytes to take advantage of 32-byte SIMD instructions. +class PARQUET_EXPORT BlockSplitBloomFilter : public BloomFilter { + public: + /// The constructor of BlockSplitBloomFilter. It uses XXH64 as hash function. + /// + /// \param pool memory pool to use. + explicit BlockSplitBloomFilter( + ::arrow::MemoryPool* pool = ::arrow::default_memory_pool()); + + /// Initialize the BlockSplitBloomFilter. The range of num_bytes should be within + /// [kMinimumBloomFilterBytes, kMaximumBloomFilterBytes], it will be + /// rounded up/down to lower/upper bound if num_bytes is out of range and also + /// will be rounded up to a power of 2. + /// + /// @param num_bytes The number of bytes to store Bloom filter bitset. + void Init(uint32_t num_bytes); + + /// Initialize the BlockSplitBloomFilter. It copies the bitset as underlying + /// bitset because the given bitset may not satisfy the 32-byte alignment requirement + /// which may lead to segfault when performing SIMD instructions. It is the caller's + /// responsibility to free the bitset passed in. This is used when reconstructing + /// a Bloom filter from a parquet file. + /// + /// @param bitset The given bitset to initialize the Bloom filter. + /// @param num_bytes The number of bytes of given bitset. + void Init(const uint8_t* bitset, uint32_t num_bytes); + + /// Minimum Bloom filter size, it sets to 32 bytes to fit a tiny Bloom filter. + static constexpr uint32_t kMinimumBloomFilterBytes = 32; + + /// Calculate optimal size according to the number of distinct values and false + /// positive probability. + /// + /// @param ndv The number of distinct values. + /// @param fpp The false positive probability. + /// @return it always return a value between kMinimumBloomFilterBytes and + /// kMaximumBloomFilterBytes, and the return value is always a power of 2 + static uint32_t OptimalNumOfBytes(uint32_t ndv, double fpp) { + uint32_t optimal_num_of_bits = OptimalNumOfBits(ndv, fpp); + DCHECK(::arrow::bit_util::IsMultipleOf8(optimal_num_of_bits)); + return optimal_num_of_bits >> 3; + } + + /// Calculate optimal size according to the number of distinct values and false + /// positive probability. + /// + /// @param ndv The number of distinct values. + /// @param fpp The false positive probability. + /// @return it always return a value between kMinimumBloomFilterBytes * 8 and + /// kMaximumBloomFilterBytes * 8, and the return value is always a power of 16 + static uint32_t OptimalNumOfBits(uint32_t ndv, double fpp) { + DCHECK(fpp > 0.0 && fpp < 1.0); + const double m = -8.0 * ndv / log(1 - pow(fpp, 1.0 / 8)); + uint32_t num_bits; + + // Handle overflow. + if (m < 0 || m > kMaximumBloomFilterBytes << 3) { + num_bits = static_cast(kMaximumBloomFilterBytes << 3); + } else { + num_bits = static_cast(m); + } + + // Round up to lower bound + if (num_bits < kMinimumBloomFilterBytes << 3) { + num_bits = kMinimumBloomFilterBytes << 3; + } + + // Get next power of 2 if bits is not power of 2. + if ((num_bits & (num_bits - 1)) != 0) { + num_bits = static_cast(::arrow::bit_util::NextPower2(num_bits)); + } + + // Round down to upper bound + if (num_bits > kMaximumBloomFilterBytes << 3) { + num_bits = kMaximumBloomFilterBytes << 3; + } + + return num_bits; + } + + bool FindHash(uint64_t hash) const override; + void InsertHash(uint64_t hash) override; + void InsertHashes(const uint64_t* hashes, int num_values) override; + void WriteTo(ArrowOutputStream* sink) const override; + uint32_t GetBitsetSize() const override { return num_bytes_; } + + uint64_t Hash(int32_t value) const override { return hasher_->Hash(value); } + uint64_t Hash(int64_t value) const override { return hasher_->Hash(value); } + uint64_t Hash(float value) const override { return hasher_->Hash(value); } + uint64_t Hash(double value) const override { return hasher_->Hash(value); } + uint64_t Hash(const Int96* value) const override { return hasher_->Hash(value); } + uint64_t Hash(const ByteArray* value) const override { return hasher_->Hash(value); } + uint64_t Hash(const FLBA* value, uint32_t len) const override { + return hasher_->Hash(value, len); + } + + void Hashes(const int32_t* values, int num_values, uint64_t* hashes) const override { + hasher_->Hashes(values, num_values, hashes); + } + void Hashes(const int64_t* values, int num_values, uint64_t* hashes) const override { + hasher_->Hashes(values, num_values, hashes); + } + void Hashes(const float* values, int num_values, uint64_t* hashes) const override { + hasher_->Hashes(values, num_values, hashes); + } + void Hashes(const double* values, int num_values, uint64_t* hashes) const override { + hasher_->Hashes(values, num_values, hashes); + } + void Hashes(const Int96* values, int num_values, uint64_t* hashes) const override { + hasher_->Hashes(values, num_values, hashes); + } + void Hashes(const ByteArray* values, int num_values, uint64_t* hashes) const override { + hasher_->Hashes(values, num_values, hashes); + } + void Hashes(const FLBA* values, uint32_t type_len, int num_values, + uint64_t* hashes) const override { + hasher_->Hashes(values, type_len, num_values, hashes); + } + + uint64_t Hash(const int32_t* value) const { return hasher_->Hash(*value); } + uint64_t Hash(const int64_t* value) const { return hasher_->Hash(*value); } + uint64_t Hash(const float* value) const { return hasher_->Hash(*value); } + uint64_t Hash(const double* value) const { return hasher_->Hash(*value); } + + /// Deserialize the Bloom filter from an input stream. It is used when reconstructing + /// a Bloom filter from a parquet filter. + /// + /// @param properties The parquet reader properties. + /// @param input_stream The input stream from which to construct the bloom filter. + /// @param bloom_filter_length The length of the serialized bloom filter including + /// header. + /// @return The BlockSplitBloomFilter. + static BlockSplitBloomFilter Deserialize( + const ReaderProperties& properties, ArrowInputStream* input_stream, + std::optional bloom_filter_length = std::nullopt); + + private: + inline void InsertHashImpl(uint64_t hash); + + // Bytes in a tiny Bloom filter block. + static constexpr int kBytesPerFilterBlock = 32; + + // The number of bits to be set in each tiny Bloom filter + static constexpr int kBitsSetPerBlock = 8; + + // A mask structure used to set bits in each tiny Bloom filter. + struct BlockMask { + uint32_t item[kBitsSetPerBlock]; + }; + + // The block-based algorithm needs eight odd SALT values to calculate eight indexes + // of bit to set, one bit in each 32-bit word. + static constexpr uint32_t SALT[kBitsSetPerBlock] = { + 0x47b6137bU, 0x44974d91U, 0x8824ad5bU, 0xa2b7289dU, + 0x705495c7U, 0x2df1424bU, 0x9efc4947U, 0x5c6bfb31U}; + + // Memory pool to allocate aligned buffer for bitset + ::arrow::MemoryPool* pool_; + + // The underlying buffer of bitset. + std::shared_ptr data_; + + // The number of bytes of Bloom filter bitset. + uint32_t num_bytes_; + + // Hash strategy used in this Bloom filter. + HashStrategy hash_strategy_; + + // Algorithm used in this Bloom filter. + Algorithm algorithm_; + + // Compression used in this Bloom filter. + CompressionStrategy compression_strategy_; + + // The hash pointer points to actual hash class used. + std::unique_ptr hasher_; +}; + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/crypto_factory.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/crypto_factory.h new file mode 100644 index 0000000000000000000000000000000000000000..01172c2b3dd4089bf9fb494892282d8574e6cebf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/crypto_factory.h @@ -0,0 +1,152 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "parquet/encryption/encryption.h" +#include "parquet/encryption/file_key_wrapper.h" +#include "parquet/encryption/key_toolkit.h" +#include "parquet/encryption/kms_client_factory.h" +#include "parquet/platform.h" + +namespace parquet::encryption { + +static constexpr ParquetCipher::type kDefaultEncryptionAlgorithm = + ParquetCipher::AES_GCM_V1; +static constexpr bool kDefaultPlaintextFooter = false; +static constexpr bool kDefaultDoubleWrapping = true; +static constexpr double kDefaultCacheLifetimeSeconds = 600; // 10 minutes +static constexpr bool kDefaultInternalKeyMaterial = true; +static constexpr bool kDefaultUniformEncryption = false; +static constexpr int32_t kDefaultDataKeyLengthBits = 128; + +struct PARQUET_EXPORT EncryptionConfiguration { + explicit EncryptionConfiguration(const std::string& footer_key) + : footer_key(footer_key) {} + + /// ID of the master key for footer encryption/signing + std::string footer_key; + + /// List of columns to encrypt, with master key IDs (see HIVE-21848). + /// Format: "masterKeyID:colName,colName;masterKeyID:colName..." + /// Either + /// (1) column_keys must be set + /// or + /// (2) uniform_encryption must be set to true + /// If none of (1) and (2) are true, or if both are true, an exception will be + /// thrown. + std::string column_keys; + + /// Encrypt footer and all columns with the same encryption key. + bool uniform_encryption = kDefaultUniformEncryption; + + /// Parquet encryption algorithm. Can be "AES_GCM_V1" (default), or "AES_GCM_CTR_V1". + ParquetCipher::type encryption_algorithm = kDefaultEncryptionAlgorithm; + + /// Write files with plaintext footer. + /// The default is false - files are written with encrypted footer. + bool plaintext_footer = kDefaultPlaintextFooter; + + /// Use double wrapping - where data encryption keys (DEKs) are encrypted with key + /// encryption keys (KEKs), which in turn are encrypted with master keys. + /// The default is true. If set to false, use single wrapping - where DEKs are + /// encrypted directly with master keys. + bool double_wrapping = kDefaultDoubleWrapping; + + /// Lifetime of cached entities (key encryption keys, local wrapping keys, KMS client + /// objects). + /// The default is 600 (10 minutes). + double cache_lifetime_seconds = kDefaultCacheLifetimeSeconds; + + /// Store key material inside Parquet file footers; this mode doesn’t produce + /// additional files. By default, true. If set to false, key material is stored in + /// separate files in the same folder, which enables key rotation for immutable + /// Parquet files. + bool internal_key_material = kDefaultInternalKeyMaterial; + + /// Length of data encryption keys (DEKs), randomly generated by parquet key + /// management tools. Can be 128, 192 or 256 bits. + /// The default is 128 bits. + int32_t data_key_length_bits = kDefaultDataKeyLengthBits; +}; + +struct PARQUET_EXPORT DecryptionConfiguration { + /// Lifetime of cached entities (key encryption keys, local wrapping keys, KMS client + /// objects). + /// The default is 600 (10 minutes). + double cache_lifetime_seconds = kDefaultCacheLifetimeSeconds; +}; + +/// This is a core class, that translates the parameters of high level encryption (like +/// the names of encrypted columns, names of master keys, etc), into parameters of low +/// level encryption (like the key metadata, DEK, etc). A factory that produces the low +/// level FileEncryptionProperties and FileDecryptionProperties objects, from the high +/// level parameters. +class PARQUET_EXPORT CryptoFactory { + public: + /// a KmsClientFactory object must be registered via this method before calling any of + /// GetFileEncryptionProperties()/GetFileDecryptionProperties() methods. + void RegisterKmsClientFactory(std::shared_ptr kms_client_factory); + + /// Get the encryption properties for a Parquet file. + /// If external key material is used then a file system and path to the + /// parquet file must be provided. + std::shared_ptr GetFileEncryptionProperties( + const KmsConnectionConfig& kms_connection_config, + const EncryptionConfiguration& encryption_config, const std::string& file_path = "", + const std::shared_ptr<::arrow::fs::FileSystem>& file_system = NULLPTR); + + /// Get decryption properties for a Parquet file. + /// If external key material is used then a file system and path to the + /// parquet file must be provided. + std::shared_ptr GetFileDecryptionProperties( + const KmsConnectionConfig& kms_connection_config, + const DecryptionConfiguration& decryption_config, const std::string& file_path = "", + const std::shared_ptr<::arrow::fs::FileSystem>& file_system = NULLPTR); + + void RemoveCacheEntriesForToken(const std::string& access_token) { + key_toolkit_->RemoveCacheEntriesForToken(access_token); + } + + void RemoveCacheEntriesForAllTokens() { + key_toolkit_->RemoveCacheEntriesForAllTokens(); + } + + /// Rotates master encryption keys for a Parquet file that uses external key material. + /// In single wrapping mode, data encryption keys are decrypted with the old master keys + /// and then re-encrypted with new master keys. + /// In double wrapping mode, key encryption keys are decrypted with the old master keys + /// and then re-encrypted with new master keys. + /// This relies on the KMS supporting versioning, such that the old master key is + /// used when unwrapping a key, and the latest version is used when wrapping a key. + void RotateMasterKeys(const KmsConnectionConfig& kms_connection_config, + const std::string& parquet_file_path, + const std::shared_ptr<::arrow::fs::FileSystem>& file_system, + bool double_wrapping = kDefaultDoubleWrapping, + double cache_lifetime_seconds = kDefaultCacheLifetimeSeconds); + + private: + ColumnPathToEncryptionPropertiesMap GetColumnEncryptionProperties( + int dek_length, const std::string& column_keys, FileKeyWrapper* key_wrapper); + + /// Key utilities object for kms client initialization and cache control + std::shared_ptr key_toolkit_ = std::make_shared(); +}; + +} // namespace parquet::encryption diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/encryption.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/encryption.h new file mode 100644 index 0000000000000000000000000000000000000000..8fd7ec8d3d015424cf7b4bd28e73db58da375bd4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/encryption.h @@ -0,0 +1,510 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "parquet/exception.h" +#include "parquet/schema.h" +#include "parquet/types.h" + +namespace parquet { + +static constexpr ParquetCipher::type kDefaultEncryptionAlgorithm = + ParquetCipher::AES_GCM_V1; +static constexpr int32_t kMaximalAadMetadataLength = 256; +static constexpr bool kDefaultEncryptedFooter = true; +static constexpr bool kDefaultCheckSignature = true; +static constexpr bool kDefaultAllowPlaintextFiles = false; +static constexpr int32_t kAadFileUniqueLength = 8; + +class ColumnDecryptionProperties; +using ColumnPathToDecryptionPropertiesMap = + std::map>; + +class ColumnEncryptionProperties; +using ColumnPathToEncryptionPropertiesMap = + std::map>; + +class PARQUET_EXPORT DecryptionKeyRetriever { + public: + virtual std::string GetKey(const std::string& key_metadata) = 0; + virtual ~DecryptionKeyRetriever() {} +}; + +/// Simple integer key retriever +class PARQUET_EXPORT IntegerKeyIdRetriever : public DecryptionKeyRetriever { + public: + void PutKey(uint32_t key_id, const std::string& key); + std::string GetKey(const std::string& key_metadata) override; + + private: + std::map key_map_; +}; + +// Simple string key retriever +class PARQUET_EXPORT StringKeyIdRetriever : public DecryptionKeyRetriever { + public: + void PutKey(const std::string& key_id, const std::string& key); + std::string GetKey(const std::string& key_metadata) override; + + private: + std::map key_map_; +}; + +class PARQUET_EXPORT HiddenColumnException : public ParquetException { + public: + explicit HiddenColumnException(const std::string& columnPath) + : ParquetException(columnPath.c_str()) {} +}; + +class PARQUET_EXPORT KeyAccessDeniedException : public ParquetException { + public: + explicit KeyAccessDeniedException(const std::string& columnPath) + : ParquetException(columnPath.c_str()) {} +}; + +inline const uint8_t* str2bytes(const std::string& str) { + if (str.empty()) return NULLPTR; + + char* cbytes = const_cast(str.c_str()); + return reinterpret_cast(cbytes); +} + +class PARQUET_EXPORT ColumnEncryptionProperties { + public: + class PARQUET_EXPORT Builder { + public: + /// Convenience builder for encrypted columns. + explicit Builder(const std::string& name) : Builder(name, true) {} + + /// Convenience builder for encrypted columns. + explicit Builder(const std::shared_ptr& path) + : Builder(path->ToDotString(), true) {} + + /// Set a column-specific key. + /// If key is not set on an encrypted column, the column will + /// be encrypted with the footer key. + /// keyBytes Key length must be either 16, 24 or 32 bytes. + /// The key is cloned, and will be wiped out (array values set to 0) upon completion + /// of file writing. + /// Caller is responsible for wiping out the input key array. + Builder* key(std::string column_key); + + /// Set a key retrieval metadata. + /// use either key_metadata() or key_id(), not both + Builder* key_metadata(const std::string& key_metadata); + + /// A convenience function to set key metadata using a string id. + /// Set a key retrieval metadata (converted from String). + /// use either key_metadata() or key_id(), not both + /// key_id will be converted to metadata (UTF-8 array). + Builder* key_id(const std::string& key_id); + + std::shared_ptr build() { + return std::shared_ptr( + new ColumnEncryptionProperties(encrypted_, column_path_, key_, key_metadata_)); + } + + private: + const std::string column_path_; + bool encrypted_; + std::string key_; + std::string key_metadata_; + + Builder(const std::string path, bool encrypted) + : column_path_(path), encrypted_(encrypted) {} + }; + + std::string column_path() const { return column_path_; } + bool is_encrypted() const { return encrypted_; } + bool is_encrypted_with_footer_key() const { return encrypted_with_footer_key_; } + std::string key() const { return key_; } + std::string key_metadata() const { return key_metadata_; } + + /// Upon completion of file writing, the encryption key + /// will be wiped out. + void WipeOutEncryptionKey() { key_.clear(); } + + bool is_utilized() { + if (key_.empty()) + return false; // can re-use column properties without encryption keys + return utilized_; + } + + /// ColumnEncryptionProperties object can be used for writing one file only. + /// Mark ColumnEncryptionProperties as utilized once it is used in + /// FileEncryptionProperties as the encryption key will be wiped out upon + /// completion of file writing. + void set_utilized() { utilized_ = true; } + + std::shared_ptr DeepClone() { + std::string key_copy = key_; + return std::shared_ptr(new ColumnEncryptionProperties( + encrypted_, column_path_, key_copy, key_metadata_)); + } + + ColumnEncryptionProperties() = default; + ColumnEncryptionProperties(const ColumnEncryptionProperties& other) = default; + ColumnEncryptionProperties(ColumnEncryptionProperties&& other) = default; + + private: + const std::string column_path_; + bool encrypted_; + bool encrypted_with_footer_key_; + std::string key_; + std::string key_metadata_; + bool utilized_; + explicit ColumnEncryptionProperties(bool encrypted, const std::string& column_path, + const std::string& key, + const std::string& key_metadata); +}; + +class PARQUET_EXPORT ColumnDecryptionProperties { + public: + class PARQUET_EXPORT Builder { + public: + explicit Builder(const std::string& name) : column_path_(name) {} + + explicit Builder(const std::shared_ptr& path) + : Builder(path->ToDotString()) {} + + /// Set an explicit column key. If applied on a file that contains + /// key metadata for this column the metadata will be ignored, + /// the column will be decrypted with this key. + /// key length must be either 16, 24 or 32 bytes. + Builder* key(const std::string& key); + + std::shared_ptr build(); + + private: + const std::string column_path_; + std::string key_; + }; + + ColumnDecryptionProperties() = default; + ColumnDecryptionProperties(const ColumnDecryptionProperties& other) = default; + ColumnDecryptionProperties(ColumnDecryptionProperties&& other) = default; + + std::string column_path() const { return column_path_; } + std::string key() const { return key_; } + bool is_utilized() { return utilized_; } + + /// ColumnDecryptionProperties object can be used for reading one file only. + /// Mark ColumnDecryptionProperties as utilized once it is used in + /// FileDecryptionProperties as the encryption key will be wiped out upon + /// completion of file reading. + void set_utilized() { utilized_ = true; } + + /// Upon completion of file reading, the encryption key + /// will be wiped out. + void WipeOutDecryptionKey(); + + std::shared_ptr DeepClone(); + + private: + const std::string column_path_; + std::string key_; + bool utilized_; + + /// This class is only required for setting explicit column decryption keys - + /// to override key retriever (or to provide keys when key metadata and/or + /// key retriever are not available) + explicit ColumnDecryptionProperties(const std::string& column_path, + const std::string& key); +}; + +class PARQUET_EXPORT AADPrefixVerifier { + public: + /// Verifies identity (AAD Prefix) of individual file, + /// or of file collection in a data set. + /// Throws exception if an AAD prefix is wrong. + /// In a data set, AAD Prefixes should be collected, + /// and then checked for missing files. + virtual void Verify(const std::string& aad_prefix) = 0; + virtual ~AADPrefixVerifier() {} +}; + +class PARQUET_EXPORT FileDecryptionProperties { + public: + class PARQUET_EXPORT Builder { + public: + Builder() { + check_plaintext_footer_integrity_ = kDefaultCheckSignature; + plaintext_files_allowed_ = kDefaultAllowPlaintextFiles; + } + + /// Set an explicit footer key. If applied on a file that contains + /// footer key metadata the metadata will be ignored, the footer + /// will be decrypted/verified with this key. + /// If explicit key is not set, footer key will be fetched from + /// key retriever. + /// With explicit keys or AAD prefix, new encryption properties object must be + /// created for each encrypted file. + /// Explicit encryption keys (footer and column) are cloned. + /// Upon completion of file reading, the cloned encryption keys in the properties + /// will be wiped out (array values set to 0). + /// Caller is responsible for wiping out the input key array. + /// param footerKey Key length must be either 16, 24 or 32 bytes. + Builder* footer_key(const std::string footer_key); + + /// Set explicit column keys (decryption properties). + /// Its also possible to set a key retriever on this property object. + /// Upon file decryption, availability of explicit keys is checked before + /// invocation of the retriever callback. + /// If an explicit key is available for a footer or a column, + /// its key metadata will be ignored. + Builder* column_keys( + const ColumnPathToDecryptionPropertiesMap& column_decryption_properties); + + /// Set a key retriever callback. Its also possible to + /// set explicit footer or column keys on this file property object. + /// Upon file decryption, availability of explicit keys is checked before + /// invocation of the retriever callback. + /// If an explicit key is available for a footer or a column, + /// its key metadata will be ignored. + Builder* key_retriever(const std::shared_ptr& key_retriever); + + /// Skip integrity verification of plaintext footers. + /// If not called, integrity of plaintext footers will be checked in runtime, + /// and an exception will be thrown in the following situations: + /// - footer signing key is not available + /// (not passed, or not found by key retriever) + /// - footer content and signature don't match + Builder* disable_footer_signature_verification() { + check_plaintext_footer_integrity_ = false; + return this; + } + + /// Explicitly supply the file AAD prefix. + /// A must when a prefix is used for file encryption, but not stored in file. + /// If AAD prefix is stored in file, it will be compared to the explicitly + /// supplied value and an exception will be thrown if they differ. + Builder* aad_prefix(const std::string& aad_prefix); + + /// Set callback for verification of AAD Prefixes stored in file. + Builder* aad_prefix_verifier(std::shared_ptr aad_prefix_verifier); + + /// By default, reading plaintext (unencrypted) files is not + /// allowed when using a decryptor + /// - in order to detect files that were not encrypted by mistake. + /// However, the default behavior can be overridden by calling this method. + /// The caller should use then a different method to ensure encryption + /// of files with sensitive data. + Builder* plaintext_files_allowed() { + plaintext_files_allowed_ = true; + return this; + } + + std::shared_ptr build() { + return std::shared_ptr(new FileDecryptionProperties( + footer_key_, key_retriever_, check_plaintext_footer_integrity_, aad_prefix_, + aad_prefix_verifier_, column_decryption_properties_, plaintext_files_allowed_)); + } + + private: + std::string footer_key_; + std::string aad_prefix_; + std::shared_ptr aad_prefix_verifier_; + ColumnPathToDecryptionPropertiesMap column_decryption_properties_; + + std::shared_ptr key_retriever_; + bool check_plaintext_footer_integrity_; + bool plaintext_files_allowed_; + }; + + std::string column_key(const std::string& column_path) const; + + std::string footer_key() const { return footer_key_; } + + std::string aad_prefix() const { return aad_prefix_; } + + const std::shared_ptr& key_retriever() const { + return key_retriever_; + } + + bool check_plaintext_footer_integrity() const { + return check_plaintext_footer_integrity_; + } + + bool plaintext_files_allowed() const { return plaintext_files_allowed_; } + + const std::shared_ptr& aad_prefix_verifier() const { + return aad_prefix_verifier_; + } + + /// Upon completion of file reading, the encryption keys in the properties + /// will be wiped out (array values set to 0). + void WipeOutDecryptionKeys(); + + bool is_utilized(); + + /// FileDecryptionProperties object can be used for reading one file only. + /// Mark FileDecryptionProperties as utilized once it is used to read a file as the + /// encryption keys will be wiped out upon completion of file reading. + void set_utilized() { utilized_ = true; } + + /// FileDecryptionProperties object can be used for reading one file only. + /// (unless this object keeps the keyRetrieval callback only, and no explicit + /// keys or aadPrefix). + /// At the end, keys are wiped out in the memory. + /// This method allows to clone identical properties for another file, + /// with an option to update the aadPrefix (if newAadPrefix is null, + /// aadPrefix will be cloned too) + std::shared_ptr DeepClone(std::string new_aad_prefix = ""); + + private: + std::string footer_key_; + std::string aad_prefix_; + std::shared_ptr aad_prefix_verifier_; + + const std::string empty_string_ = ""; + ColumnPathToDecryptionPropertiesMap column_decryption_properties_; + + std::shared_ptr key_retriever_; + bool check_plaintext_footer_integrity_; + bool plaintext_files_allowed_; + bool utilized_; + + FileDecryptionProperties( + const std::string& footer_key, + std::shared_ptr key_retriever, + bool check_plaintext_footer_integrity, const std::string& aad_prefix, + std::shared_ptr aad_prefix_verifier, + const ColumnPathToDecryptionPropertiesMap& column_decryption_properties, + bool plaintext_files_allowed); +}; + +class PARQUET_EXPORT FileEncryptionProperties { + public: + class PARQUET_EXPORT Builder { + public: + explicit Builder(const std::string& footer_key) + : parquet_cipher_(kDefaultEncryptionAlgorithm), + encrypted_footer_(kDefaultEncryptedFooter) { + footer_key_ = footer_key; + store_aad_prefix_in_file_ = false; + } + + /// Create files with plaintext footer. + /// If not called, the files will be created with encrypted footer (default). + Builder* set_plaintext_footer() { + encrypted_footer_ = false; + return this; + } + + /// Set encryption algorithm. + /// If not called, files will be encrypted with AES_GCM_V1 (default). + Builder* algorithm(ParquetCipher::type parquet_cipher) { + parquet_cipher_ = parquet_cipher; + return this; + } + + /// Set a key retrieval metadata (converted from String). + /// use either footer_key_metadata or footer_key_id, not both. + Builder* footer_key_id(const std::string& key_id); + + /// Set a key retrieval metadata. + /// use either footer_key_metadata or footer_key_id, not both. + Builder* footer_key_metadata(const std::string& footer_key_metadata); + + /// Set the file AAD Prefix. + Builder* aad_prefix(const std::string& aad_prefix); + + /// Skip storing AAD Prefix in file. + /// If not called, and if AAD Prefix is set, it will be stored. + Builder* disable_aad_prefix_storage(); + + /// Set the list of encrypted columns and their properties (keys etc). + /// If not called, all columns will be encrypted with the footer key. + /// If called, the file columns not in the list will be left unencrypted. + Builder* encrypted_columns( + const ColumnPathToEncryptionPropertiesMap& encrypted_columns); + + std::shared_ptr build() { + return std::shared_ptr(new FileEncryptionProperties( + parquet_cipher_, footer_key_, footer_key_metadata_, encrypted_footer_, + aad_prefix_, store_aad_prefix_in_file_, encrypted_columns_)); + } + + private: + ParquetCipher::type parquet_cipher_; + bool encrypted_footer_; + std::string footer_key_; + std::string footer_key_metadata_; + + std::string aad_prefix_; + bool store_aad_prefix_in_file_; + ColumnPathToEncryptionPropertiesMap encrypted_columns_; + }; + bool encrypted_footer() const { return encrypted_footer_; } + + EncryptionAlgorithm algorithm() const { return algorithm_; } + + std::string footer_key() const { return footer_key_; } + + std::string footer_key_metadata() const { return footer_key_metadata_; } + + std::string file_aad() const { return file_aad_; } + + std::shared_ptr column_encryption_properties( + const std::string& column_path); + + bool is_utilized() const { return utilized_; } + + /// FileEncryptionProperties object can be used for writing one file only. + /// Mark FileEncryptionProperties as utilized once it is used to write a file as the + /// encryption keys will be wiped out upon completion of file writing. + void set_utilized() { utilized_ = true; } + + /// Upon completion of file writing, the encryption keys + /// will be wiped out (array values set to 0). + void WipeOutEncryptionKeys(); + + /// FileEncryptionProperties object can be used for writing one file only. + /// (at the end, keys are wiped out in the memory). + /// This method allows to clone identical properties for another file, + /// with an option to update the aadPrefix (if newAadPrefix is null, + /// aadPrefix will be cloned too) + std::shared_ptr DeepClone(std::string new_aad_prefix = ""); + + ColumnPathToEncryptionPropertiesMap encrypted_columns() const { + return encrypted_columns_; + } + + private: + EncryptionAlgorithm algorithm_; + std::string footer_key_; + std::string footer_key_metadata_; + bool encrypted_footer_; + std::string file_aad_; + std::string aad_prefix_; + bool utilized_; + bool store_aad_prefix_in_file_; + ColumnPathToEncryptionPropertiesMap encrypted_columns_; + + FileEncryptionProperties(ParquetCipher::type cipher, const std::string& footer_key, + const std::string& footer_key_metadata, bool encrypted_footer, + const std::string& aad_prefix, bool store_aad_prefix_in_file, + const ColumnPathToEncryptionPropertiesMap& encrypted_columns); +}; + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_material_store.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_material_store.h new file mode 100644 index 0000000000000000000000000000000000000000..83f028a4bc1e9e0d24e21e7acfb785af0e5b37f7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_material_store.h @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" +#include "parquet/platform.h" + +namespace parquet::encryption { + +/// Stores encryption key material outside the Parquet file, for example in a separate +/// small file in the same folder. This is important for “key rotation”, when MEKs have to +/// be changed (if compromised; or periodically, just in case) - without modifying the +/// Parquet files (often immutable). +class PARQUET_EXPORT FileKeyMaterialStore { + public: + /// Add key material for one encryption key. + virtual void AddKeyMaterial(std::string key_id_in_file, std::string key_material) = 0; + + /// Get key material + virtual std::string GetKeyMaterial(std::string key_id_in_file) = 0; + + /// After key material was added for all keys in the given Parquet file, + /// save material in persistent store. + virtual void SaveMaterial() = 0; + + /// Remove key material from persistent store. Used in key rotation. + virtual void RemoveMaterial() = 0; + + /// Move key material to another store. Used in key rotation. + virtual void MoveMaterialTo(std::shared_ptr target_key_store) = 0; + + /// Returns the Set of all key IDs in this store (for the given Parquet file) + virtual std::vector GetKeyIDSet() = 0; + + virtual ~FileKeyMaterialStore() {} +}; + +} // namespace parquet::encryption diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_unwrapper.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_unwrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..6147abbecd3e6a72a1d5c8fb65d1ccd1e0f6170e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_unwrapper.h @@ -0,0 +1,94 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/util/concurrent_map.h" + +#include "parquet/encryption/encryption.h" +#include "parquet/encryption/file_system_key_material_store.h" +#include "parquet/encryption/key_material.h" +#include "parquet/encryption/key_toolkit.h" +#include "parquet/encryption/key_toolkit_internal.h" +#include "parquet/encryption/kms_client.h" +#include "parquet/platform.h" + +namespace parquet::encryption { + +// This class will retrieve the key from "key metadata", following these steps: +// 1. Parse "key metadata" (see structure in KeyMetadata class). +// 2. Retrieve "key material" which can be stored inside or outside "key metadata". +// 3. Unwrap the "data encryption key" from "key material". There are 2 modes: +// 3.1. single wrapping: decrypt the wrapped "data encryption key" directly with "master +// encryption key" 3.2. double wrapping: 2 steps: 3.2.1. "key encryption key" is decrypted +// with "master encryption key" 3.2.2. "data encryption key" is decrypted with the above +// "key encryption key" +class PARQUET_EXPORT FileKeyUnwrapper : public DecryptionKeyRetriever { + public: + /// key_toolkit and kms_connection_config is to get KmsClient from cache or create + /// KmsClient if it's not in the cache yet. cache_entry_lifetime_seconds is life time of + /// KmsClient in the cache. + /// If the file uses external key material then the Parquet file path and file + /// system must be specified. + FileKeyUnwrapper(std::shared_ptr key_toolkit, + const KmsConnectionConfig& kms_connection_config, + double cache_lifetime_seconds, const std::string& file_path = "", + const std::shared_ptr<::arrow::fs::FileSystem>& file_system = NULLPTR); + + /// Constructor overload that takes a raw pointer to the KeyToolkit + FileKeyUnwrapper(KeyToolkit* key_toolkit, + const KmsConnectionConfig& kms_connection_config, + double cache_lifetime_seconds, const std::string& file_path = "", + const std::shared_ptr<::arrow::fs::FileSystem>& file_system = NULLPTR); + + /// Constructor overload that takes a raw pointer to the KeyToolkit and + /// accepts an existing key_material_store rather than using + /// the file path and file system to create one when needed. + FileKeyUnwrapper(KeyToolkit* key_toolkit, + const KmsConnectionConfig& kms_connection_config, + double cache_lifetime_seconds, + std::shared_ptr key_material_store); + + /// Get the data key from key metadata + std::string GetKey(const std::string& key_metadata) override; + + /// Get the data key along with the master key id from key material + KeyWithMasterId GetDataEncryptionKey(const KeyMaterial& key_material); + + private: + FileKeyUnwrapper(std::shared_ptr key_toolkit_owner, KeyToolkit* key_toolkit, + const KmsConnectionConfig& kms_connection_config, + double cache_lifetime_seconds, + std::shared_ptr key_material_store, + const std::string& file_path, + const std::shared_ptr<::arrow::fs::FileSystem>& file_system); + + std::shared_ptr GetKmsClientFromConfigOrKeyMaterial( + const KeyMaterial& key_material); + + /// A map of Key Encryption Key (KEK) ID -> KEK bytes, for the current token + std::shared_ptr<::arrow::util::ConcurrentMap> kek_per_kek_id_; + std::shared_ptr key_toolkit_owner_; + KeyToolkit* key_toolkit_; + KmsConnectionConfig kms_connection_config_; + const double cache_entry_lifetime_seconds_; + std::shared_ptr key_material_store_; + const std::string file_path_; + std::shared_ptr<::arrow::fs::FileSystem> file_system_; +}; + +} // namespace parquet::encryption diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_wrapper.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_wrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..26b9719de64dbafe39123c214d74f35301f8713b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_wrapper.h @@ -0,0 +1,84 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/util/concurrent_map.h" + +#include "parquet/encryption/file_key_material_store.h" +#include "parquet/encryption/key_encryption_key.h" +#include "parquet/encryption/key_toolkit.h" +#include "parquet/encryption/kms_client.h" +#include "parquet/platform.h" + +namespace parquet::encryption { + +// This class will generate "key metadata" from "data encryption key" and "master key", +// following these steps: +// 1. Wrap "data encryption key". There are 2 modes: +// 1.1. single wrapping: encrypt "data encryption key" directly with "master encryption +// key" +// 1.2. double wrapping: 2 steps: +// 1.2.1. "key encryption key" is randomized (see KeyEncryptionKey class) +// 1.2.2. "data encryption key" is encrypted with the above "key encryption key" +// 2. Create "key material" (see structure in KeyMaterial class) +// 3. Create "key metadata" with "key material" inside or a reference to outside "key +// material" (see structure in KeyMetadata class). +class PARQUET_EXPORT FileKeyWrapper { + public: + static constexpr int kKeyEncryptionKeyLength = 16; + static constexpr int kKeyEncryptionKeyIdLength = 16; + + /// key_toolkit and kms_connection_config is to get KmsClient from the cache or create + /// KmsClient if it's not in the cache yet. cache_entry_lifetime_seconds is life time of + /// KmsClient in the cache. key_material_store is to store "key material" outside + /// parquet file, NULL if "key material" is stored inside parquet file. + FileKeyWrapper(KeyToolkit* key_toolkit, + const KmsConnectionConfig& kms_connection_config, + std::shared_ptr key_material_store, + double cache_entry_lifetime_seconds, bool double_wrapping); + + /// Creates key_metadata field for a given data key, via wrapping the key with the + /// master key. + /// When external key material is used, an identifier is usually generated automatically + /// but may be specified explicitly to support key rotation, + /// which requires keeping the same identifiers. + std::string GetEncryptionKeyMetadata(const std::string& data_key, + const std::string& master_key_id, + bool is_footer_key, + std::string key_id_in_file = ""); + + private: + KeyEncryptionKey CreateKeyEncryptionKey(const std::string& master_key_id); + + /// A map of Master Encryption Key ID -> KeyEncryptionKey, for the current token + std::shared_ptr<::arrow::util::ConcurrentMap> + kek_per_master_key_id_; + + std::shared_ptr kms_client_; + KmsConnectionConfig kms_connection_config_; + std::shared_ptr key_material_store_; + const double cache_entry_lifetime_seconds_; + const bool double_wrapping_; + uint16_t key_counter_; +}; + +} // namespace parquet::encryption diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_system_key_material_store.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_system_key_material_store.h new file mode 100644 index 0000000000000000000000000000000000000000..896a53202f589158ae684aa5df9c1f69cae86b28 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_system_key_material_store.h @@ -0,0 +1,89 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/filesystem/filesystem.h" + +#include "parquet/encryption/file_key_material_store.h" + +namespace parquet::encryption { + +/// A FileKeyMaterialStore that stores key material in a file system file in the same +/// folder as the Parquet file. +class PARQUET_EXPORT FileSystemKeyMaterialStore : public FileKeyMaterialStore { + public: + static constexpr const char kKeyMaterialFilePrefix[] = "_KEY_MATERIAL_FOR_"; + static constexpr const char kTempFilePrefix[] = "_TMP"; + static constexpr const char kKeyMaterialFileSuffix[] = ".json"; + + FileSystemKeyMaterialStore() {} + FileSystemKeyMaterialStore(const std::string& key_material_file_path, + const std::shared_ptr<::arrow::fs::FileSystem>& file_system); + + /// Creates a new file system key material store for a parquet file. + /// When use_tmp_prefix is true, files are saved with an extra _TMP prefix so they don't + /// conflict with existing external material files. This is useful during key rotation + /// so that temporary key material files can be created while using the existing key + /// material, before moving the key material to the non-temporary location. + static std::shared_ptr Make( + const std::string& parquet_file_path, + const std::shared_ptr<::arrow::fs::FileSystem>& file_system, bool use_tmp_prefix); + + /// Add key material for one encryption key. + void AddKeyMaterial(std::string key_id_in_file, std::string key_material) { + key_material_map_.insert({key_id_in_file, key_material}); + } + + /// Get key material + std::string GetKeyMaterial(std::string key_id_in_file) { + if (key_material_map_.empty()) { + LoadKeyMaterialMap(); + } + auto found = key_material_map_.find(key_id_in_file); + return found->second; + } + + /// After key material was added for all keys in the given Parquet file, + /// save material in persistent store. + void SaveMaterial(); + + /// Remove key material from persistent store. Used in key rotation. + void RemoveMaterial(); + + /// Move key material to another store. Used in key rotation. + void MoveMaterialTo(std::shared_ptr target_key_store); + + /// Returns the Set of all key IDs in this store (for the given Parquet file) + std::vector GetKeyIDSet(); + + private: + std::string GetStorageFilePath() { return key_material_file_path_; } + + std::string BuildKeyMaterialMapJson(); + void LoadKeyMaterialMap(); + std::string key_material_file_path_; + std::shared_ptr<::arrow::fs::FileSystem> file_system_; + /// Maps ID of a key in Parquet file and key material + std::unordered_map key_material_map_; +}; + +} // namespace parquet::encryption diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_encryption_key.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_encryption_key.h new file mode 100644 index 0000000000000000000000000000000000000000..62263ee3cd5062ece20ac0f79b89d3cf0312f360 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_encryption_key.h @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/util/base64.h" + +namespace parquet::encryption { + +// In the double wrapping mode, each "data encryption key" (DEK) is encrypted with a “key +// encryption key” (KEK), that in turn is encrypted with a "master encryption key" (MEK). +// In a writer process, a random KEK is generated for each MEK ID, and cached in a map. This allows to perform an interaction with a KMS server only once for each +// MEK, in order to wrap its KEK. "Data encryption key" (DEK) wrapping is performed +// locally, and does not involve an interaction with a KMS server. +class KeyEncryptionKey { + public: + KeyEncryptionKey(std::string kek_bytes, std::string kek_id, + std::string encoded_wrapped_kek) + : kek_bytes_(std::move(kek_bytes)), + kek_id_(std::move(kek_id)), + encoded_kek_id_(::arrow::util::base64_encode(kek_id_)), + encoded_wrapped_kek_(std::move(encoded_wrapped_kek)) {} + + const std::string& kek_bytes() const { return kek_bytes_; } + + const std::string& kek_id() const { return kek_id_; } + + const std::string& encoded_kek_id() const { return encoded_kek_id_; } + + const std::string& encoded_wrapped_kek() const { return encoded_wrapped_kek_; } + + private: + std::string kek_bytes_; + std::string kek_id_; + std::string encoded_kek_id_; + std::string encoded_wrapped_kek_; +}; + +} // namespace parquet::encryption diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_material.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_material.h new file mode 100644 index 0000000000000000000000000000000000000000..3e7e862c996d3f0b0c016f3953dc40dcb314a8a0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_material.h @@ -0,0 +1,129 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "parquet/platform.h" + +namespace arrow { +namespace json { +namespace internal { +class ObjectParser; +} // namespace internal +} // namespace json +} // namespace arrow + +namespace parquet::encryption { + +// KeyMaterial class represents the "key material", keeping the information that allows +// readers to recover an encryption key (see description of the KeyMetadata class). The +// keytools package (PARQUET-1373) implements the "envelope encryption" pattern, in a +// "single wrapping" or "double wrapping" mode. In the single wrapping mode, the key +// material is generated by encrypting the "data encryption key" (DEK) by a "master key". +// In the double wrapping mode, the key material is generated by encrypting the DEK by a +// "key encryption key" (KEK), that in turn is encrypted by a "master key". +// +// Key material is kept in a flat json object, with the following fields: +// 1. "keyMaterialType" - a String, with the type of key material. In the current +// version, only one value is allowed - "PKMT1" (stands +// for "parquet key management tools, version 1"). For external key material storage, +// this field is written in both "key metadata" and "key material" jsons. For internal +// key material storage, this field is written only once in the common json. +// 2. "isFooterKey" - a boolean. If true, means that the material belongs to a file footer +// key, and keeps additional information (such as +// KMS instance ID and URL). If false, means that the material belongs to a column +// key. +// 3. "kmsInstanceID" - a String, with the KMS Instance ID. Written only in footer key +// material. +// 4. "kmsInstanceURL" - a String, with the KMS Instance URL. Written only in footer key +// material. +// 5. "masterKeyID" - a String, with the ID of the master key used to generate the +// material. +// 6. "wrappedDEK" - a String, with the wrapped DEK (base64 encoding). +// 7. "doubleWrapping" - a boolean. If true, means that the material was generated in +// double wrapping mode. +// If false - in single wrapping mode. +// 8. "keyEncryptionKeyID" - a String, with the ID of the KEK used to generate the +// material. Written only in double wrapping mode. +// 9. "wrappedKEK" - a String, with the wrapped KEK (base64 encoding). Written only in +// double wrapping mode. +class PARQUET_EXPORT KeyMaterial { + public: + // these fields are defined in a specification and should never be changed + static constexpr const char kKeyMaterialTypeField[] = "keyMaterialType"; + static constexpr const char kKeyMaterialType1[] = "PKMT1"; + + static constexpr const char kFooterKeyIdInFile[] = "footerKey"; + static constexpr const char kColumnKeyIdInFilePrefix[] = "columnKey"; + + static constexpr const char kIsFooterKeyField[] = "isFooterKey"; + static constexpr const char kDoubleWrappingField[] = "doubleWrapping"; + static constexpr const char kKmsInstanceIdField[] = "kmsInstanceID"; + static constexpr const char kKmsInstanceUrlField[] = "kmsInstanceURL"; + static constexpr const char kMasterKeyIdField[] = "masterKeyID"; + static constexpr const char kWrappedDataEncryptionKeyField[] = "wrappedDEK"; + static constexpr const char kKeyEncryptionKeyIdField[] = "keyEncryptionKeyID"; + static constexpr const char kWrappedKeyEncryptionKeyField[] = "wrappedKEK"; + + public: + KeyMaterial() = default; + + static KeyMaterial Parse(const std::string& key_material_string); + + static KeyMaterial Parse( + const ::arrow::json::internal::ObjectParser* key_material_json); + + /// This method returns a json string that will be stored either inside a parquet file + /// or in a key material store outside the parquet file. + static std::string SerializeToJson(bool is_footer_key, + const std::string& kms_instance_id, + const std::string& kms_instance_url, + const std::string& master_key_id, + bool is_double_wrapped, const std::string& kek_id, + const std::string& encoded_wrapped_kek, + const std::string& encoded_wrapped_dek, + bool is_internal_storage); + + bool is_footer_key() const { return is_footer_key_; } + bool is_double_wrapped() const { return is_double_wrapped_; } + const std::string& master_key_id() const { return master_key_id_; } + const std::string& wrapped_dek() const { return encoded_wrapped_dek_; } + const std::string& kek_id() const { return kek_id_; } + const std::string& wrapped_kek() const { return encoded_wrapped_kek_; } + const std::string& kms_instance_id() const { return kms_instance_id_; } + const std::string& kms_instance_url() const { return kms_instance_url_; } + + private: + KeyMaterial(bool is_footer_key, const std::string& kms_instance_id, + const std::string& kms_instance_url, const std::string& master_key_id, + bool is_double_wrapped, const std::string& kek_id, + const std::string& encoded_wrapped_kek, + const std::string& encoded_wrapped_dek); + + bool is_footer_key_; + std::string kms_instance_id_; + std::string kms_instance_url_; + std::string master_key_id_; + bool is_double_wrapped_; + std::string kek_id_; + std::string encoded_wrapped_kek_; + std::string encoded_wrapped_dek_; +}; + +} // namespace parquet::encryption diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_metadata.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_metadata.h new file mode 100644 index 0000000000000000000000000000000000000000..6fe8ac7ccb9db3fb92da42064f9fe2aeabdbfb52 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_metadata.h @@ -0,0 +1,91 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "parquet/encryption/key_material.h" +#include "parquet/exception.h" +#include "parquet/platform.h" + +namespace parquet::encryption { + +// Parquet encryption specification defines "key metadata" as an arbitrary byte array, +// generated by file writers for each encryption key, and passed to the low level API for +// storage in the file footer. The "key metadata" field is made available to file readers +// to enable recovery of the key. This interface can be utilized for implementation +// of any key management scheme. +// +// The keytools package (PARQUET-1373) implements one approach, of many possible, to key +// management and to generation of the "key metadata" fields. This approach, based on the +// "envelope encryption" pattern, allows integration with KMS servers. It keeps the actual +// material, required to recover a key, in a "key material" object (see the KeyMaterial +// class for details). This class is implemented to support version 1 of the parquet key +// management tools specification. +// +// KeyMetadata writes (and reads) the "key metadata" field as a flat json object, +// with the following fields: +// 1. "keyMaterialType" - a String, with the type of key material. +// 2. "internalStorage" - a boolean. If true, means that "key material" is kept inside the +// "key metadata" field. If false, "key material" is kept externally (outside Parquet +// files) - in this case, "key metadata" keeps a reference to the external "key material". +// 3. "keyReference" - a String, with the reference to the external "key material". +// Written only if internalStorage is false. +// +// If internalStorage is true, "key material" is a part of "key metadata", and the json +// keeps additional fields, described in the KeyMaterial class. +class PARQUET_EXPORT KeyMetadata { + public: + static constexpr const char kKeyMaterialInternalStorageField[] = "internalStorage"; + static constexpr const char kKeyReferenceField[] = "keyReference"; + + /// key_metadata_bytes is the key metadata field stored in the parquet file, + /// in the serialized json object format. + static KeyMetadata Parse(const std::string& key_metadata_bytes); + + static std::string CreateSerializedForExternalMaterial( + const std::string& key_reference); + + bool key_material_stored_internally() const { return is_internal_storage_; } + + const KeyMaterial& key_material() const { + if (!is_internal_storage_) { + throw ParquetException("key material is stored externally."); + } + return ::std::get(key_material_or_reference_); + } + + const std::string& key_reference() const { + if (is_internal_storage_) { + throw ParquetException("key material is stored internally."); + } + return ::std::get(key_material_or_reference_); + } + + private: + explicit KeyMetadata(const KeyMaterial& key_material); + explicit KeyMetadata(const std::string& key_reference); + + bool is_internal_storage_; + /// If is_internal_storage_ is true, KeyMaterial is set, + /// else a string referencing to an outside "key material" is set. + ::std::variant key_material_or_reference_; +}; + +} // namespace parquet::encryption diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_toolkit.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_toolkit.h new file mode 100644 index 0000000000000000000000000000000000000000..339692a99a33d9dfe3fb266352423aa2e5f4589a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_toolkit.h @@ -0,0 +1,106 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "parquet/encryption/key_encryption_key.h" +#include "parquet/encryption/kms_client.h" +#include "parquet/encryption/kms_client_factory.h" +#include "parquet/encryption/two_level_cache_with_expiration.h" +#include "parquet/platform.h" + +namespace parquet::encryption { + +static constexpr uint64_t kCacheCleanPeriodForKeyRotation = 60 * 60; // 1 hour + +// KeyToolkit is a utility that keeps various tools for key management (such as key +// rotation, kms client instantiation, cache control, etc), plus a number of auxiliary +// classes for internal use. +class PARQUET_EXPORT KeyToolkit { + public: + KeyToolkit() { last_cache_clean_for_key_rotation_time_ = {}; } + + /// KMS client two level cache: token -> KMSInstanceId -> KmsClient + TwoLevelCacheWithExpiration>& kms_client_cache_per_token() { + return kms_client_cache_; + } + /// Key encryption key two level cache for wrapping: token -> MasterEncryptionKeyId -> + /// KeyEncryptionKey + TwoLevelCacheWithExpiration& kek_write_cache_per_token() { + return key_encryption_key_write_cache_; + } + + /// Key encryption key two level cache for unwrapping: token -> KeyEncryptionKeyId -> + /// KeyEncryptionKeyBytes + TwoLevelCacheWithExpiration& kek_read_cache_per_token() { + return key_encryption_key_read_cache_; + } + + std::shared_ptr GetKmsClient( + const KmsConnectionConfig& kms_connection_config, double cache_entry_lifetime_ms); + + /// Flush any caches that are tied to the (compromised) access_token + void RemoveCacheEntriesForToken(const std::string& access_token); + + void RemoveCacheEntriesForAllTokens(); + + void RegisterKmsClientFactory(std::shared_ptr kms_client_factory) { + if (kms_client_factory_ != NULLPTR) { + throw ParquetException("KMS client factory has already been registered."); + } + kms_client_factory_ = std::move(kms_client_factory); + } + + /// Key rotation. In the single wrapping mode, decrypts data keys with old master keys, + /// then encrypts them with new master keys. In the double wrapping mode, decrypts KEKs + /// (key encryption keys) with old master keys, generates new KEKs and encrypts them + /// with new master keys. Works only if key material is not stored internally in file + /// footers. Not supported in local key wrapping mode. Method can be run by multiple + /// threads, but each thread must work on different files. + void RotateMasterKeys(const KmsConnectionConfig& kms_connection_config, + const std::string& parquet_file_path, + const std::shared_ptr<::arrow::fs::FileSystem>& file_system, + bool double_wrapping, double cache_lifetime_seconds); + + private: + TwoLevelCacheWithExpiration> kms_client_cache_; + TwoLevelCacheWithExpiration key_encryption_key_write_cache_; + TwoLevelCacheWithExpiration key_encryption_key_read_cache_; + std::shared_ptr kms_client_factory_; + mutable ::arrow::util::Mutex last_cache_clean_for_key_rotation_time_mutex_; + internal::TimePoint last_cache_clean_for_key_rotation_time_; +}; + +// "data encryption key" and "master key identifier" are paired together as output when +// parsing from "key material" +class PARQUET_EXPORT KeyWithMasterId { + public: + KeyWithMasterId(std::string key_bytes, std::string master_id) + : key_bytes_(std::move(key_bytes)), master_id_(std::move(master_id)) {} + + const std::string& data_key() const { return key_bytes_; } + const std::string& master_id() const { return master_id_; } + + private: + const std::string key_bytes_; + const std::string master_id_; +}; + +} // namespace parquet::encryption diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/kms_client.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/kms_client.h new file mode 100644 index 0000000000000000000000000000000000000000..ef363d9c2cda1e8aef06b2ceebab61a1e0916870 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/kms_client.h @@ -0,0 +1,93 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include + +#include "arrow/util/mutex.h" + +#include "parquet/exception.h" +#include "parquet/platform.h" + +namespace parquet::encryption { + +/// This class wraps the key access token of a KMS server. If your token changes over +/// time, you should keep the reference to the KeyAccessToken object and call Refresh() +/// method every time you have a new token. +class PARQUET_EXPORT KeyAccessToken { + public: + KeyAccessToken() = default; + + explicit KeyAccessToken(const std::string value) : value_(value) {} + + void Refresh(const std::string& new_value) { + auto lock = mutex_.Lock(); + value_ = new_value; + } + + const std::string& value() const { + auto lock = mutex_.Lock(); + return value_; + } + + private: + std::string value_; + mutable ::arrow::util::Mutex mutex_; +}; + +struct PARQUET_EXPORT KmsConnectionConfig { + std::string kms_instance_id; + std::string kms_instance_url; + /// If the access token is changed in the future, you should keep a reference to + /// this object and call Refresh() on it whenever there is a new access token. + std::shared_ptr refreshable_key_access_token; + std::unordered_map custom_kms_conf; + + KmsConnectionConfig(); + + const std::string& key_access_token() const { + if (refreshable_key_access_token == NULLPTR || + refreshable_key_access_token->value().empty()) { + throw ParquetException("key access token is not set!"); + } + return refreshable_key_access_token->value(); + } + + void SetDefaultIfEmpty(); +}; + +class PARQUET_EXPORT KmsClient { + public: + static constexpr const char kKmsInstanceIdDefault[] = "DEFAULT"; + static constexpr const char kKmsInstanceUrlDefault[] = "DEFAULT"; + static constexpr const char kKeyAccessTokenDefault[] = "DEFAULT"; + + /// Wraps a key - encrypts it with the master key, encodes the result + /// and potentially adds a KMS-specific metadata. + virtual std::string WrapKey(const std::string& key_bytes, + const std::string& master_key_identifier) = 0; + + /// Decrypts (unwraps) a key with the master key. + virtual std::string UnwrapKey(const std::string& wrapped_key, + const std::string& master_key_identifier) = 0; + virtual ~KmsClient() {} +}; + +} // namespace parquet::encryption diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/kms_client_factory.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/kms_client_factory.h new file mode 100644 index 0000000000000000000000000000000000000000..7a7c77c7eebbfbb687575acb12b89c1c2e99461a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/kms_client_factory.h @@ -0,0 +1,38 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "parquet/encryption/kms_client.h" +#include "parquet/platform.h" + +namespace parquet::encryption { + +class PARQUET_EXPORT KmsClientFactory { + public: + explicit KmsClientFactory(bool wrap_locally = false) : wrap_locally_(wrap_locally) {} + + virtual ~KmsClientFactory() = default; + + virtual std::shared_ptr CreateKmsClient( + const KmsConnectionConfig& kms_connection_config) = 0; + + protected: + bool wrap_locally_; +}; + +} // namespace parquet::encryption diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/local_wrap_kms_client.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/local_wrap_kms_client.h new file mode 100644 index 0000000000000000000000000000000000000000..3c90d82960525bf10c0dc23ea6a2c96c78104fea --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/local_wrap_kms_client.h @@ -0,0 +1,94 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/util/concurrent_map.h" + +#include "parquet/encryption/kms_client.h" +#include "parquet/platform.h" + +namespace parquet::encryption { + +/// This class supports local wrapping mode, master keys will be fetched from the KMS +/// server and used to encrypt other keys (data encryption keys or key encryption keys). +class PARQUET_EXPORT LocalWrapKmsClient : public KmsClient { + public: + static constexpr const char kLocalWrapNoKeyVersion[] = "NO_VERSION"; + + explicit LocalWrapKmsClient(const KmsConnectionConfig& kms_connection_config); + + std::string WrapKey(const std::string& key_bytes, + const std::string& master_key_identifier) override; + + std::string UnwrapKey(const std::string& wrapped_key, + const std::string& master_key_identifier) override; + + protected: + /// Get master key from the remote KMS server. + /// Note: this function might be called by multiple threads + virtual std::string GetMasterKeyFromServer( + const std::string& master_key_identifier) = 0; + + private: + /// KMS systems wrap keys by encrypting them by master keys, and attaching additional + /// information (such as the version number of the masker key) to the result of + /// encryption. The master key version is required in key rotation. Currently, the + /// local wrapping mode does not support key rotation (because not all KMS systems allow + /// to fetch a master key by its ID and version number). Still, the local wrapping mode + /// adds a placeholder for the master key version, that will enable support for key + /// rotation in this mode in the future, with appropriate KMS systems. This will also + /// enable backward compatibility, where future readers will be able to extract master + /// key version in the files written by the current code. + /// + /// LocalKeyWrap class writes (and reads) the "key wrap" as a flat json with the + /// following fields: + /// 1. "masterKeyVersion" - a String, with the master key version. In the current + /// version, only one value is allowed - "NO_VERSION". + /// 2. "encryptedKey" - a String, with the key encrypted by the master key + /// (base64-encoded). + class LocalKeyWrap { + public: + static constexpr const char kLocalWrapKeyVersionField[] = "masterKeyVersion"; + static constexpr const char kLocalWrapEncryptedKeyField[] = "encryptedKey"; + + LocalKeyWrap(std::string master_key_version, std::string encrypted_encoded_key); + + static std::string CreateSerialized(const std::string& encrypted_encoded_key); + + static LocalKeyWrap Parse(const std::string& wrapped_key); + + const std::string& master_key_version() const { return master_key_version_; } + + const std::string& encrypted_encoded_key() const { return encrypted_encoded_key_; } + + private: + std::string encrypted_encoded_key_; + std::string master_key_version_; + }; + + std::string GetKeyFromServer(const std::string& key_identifier); + + protected: + KmsConnectionConfig kms_connection_config_; + ::arrow::util::ConcurrentMap master_key_cache_; +}; + +} // namespace parquet::encryption diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/test_encryption_util.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/test_encryption_util.h new file mode 100644 index 0000000000000000000000000000000000000000..9bfc774278dde9ac42699339fb1a056e3fd14a70 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/test_encryption_util.h @@ -0,0 +1,133 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This module defines an abstract interface for iterating through pages in a +// Parquet column chunk within a row group. It could be extended in the future +// to iterate through all data pages in all chunks in a file. + +#pragma once + +#include +#include +#include + +#include + +#include "arrow/filesystem/filesystem.h" +#include "arrow/filesystem/localfs.h" +#include "arrow/status.h" +#include "arrow/util/io_util.h" + +#include "parquet/encryption/encryption.h" +#include "parquet/test_util.h" + +namespace parquet { +class ParquetFileReader; +namespace encryption::test { + +using ::arrow::internal::TemporaryDir; + +constexpr int kFixedLength = 10; + +const char kFooterEncryptionKey[] = "0123456789012345"; // 128bit/16 +const char kColumnEncryptionKey1[] = "1234567890123450"; +const char kColumnEncryptionKey2[] = "1234567890123451"; +const char kFileName[] = "tester"; + +// Get the path of file inside parquet test data directory +std::string data_file(const char* file); + +// A temporary directory that contains the encrypted files generated in the tests. +extern std::unique_ptr temp_dir; + +inline ::arrow::Result> temp_data_dir() { + return TemporaryDir::Make("parquet-encryption-test-"); +} + +const char kDoubleFieldName[] = "double_field"; +const char kFloatFieldName[] = "float_field"; +const char kBooleanFieldName[] = "boolean_field"; +const char kInt32FieldName[] = "int32_field"; +const char kInt64FieldName[] = "int64_field"; +const char kInt96FieldName[] = "int96_field"; +const char kByteArrayFieldName[] = "ba_field"; +const char kFixedLenByteArrayFieldName[] = "flba_field"; + +const char kFooterMasterKey[] = "0123456789012345"; +const char kFooterMasterKeyId[] = "kf"; +const char* const kColumnMasterKeys[] = {"1234567890123450", "1234567890123451", + "1234567890123452", "1234567890123453", + "1234567890123454", "1234567890123455"}; +const char* const kColumnMasterKeyIds[] = {"kc1", "kc2", "kc3", "kc4", "kc5", "kc6"}; + +// New master key values used to simulate key rotation +const char kNewFooterMasterKey[] = "9123456789012345"; +const char* const kNewColumnMasterKeys[] = {"9234567890123450", "9234567890123451", + "9234567890123452", "9234567890123453", + "9234567890123454", "9234567890123455"}; + +// The result of this function will be used to set into TestOnlyInMemoryKmsClientFactory +// as the key mapping to look at. +std::unordered_map BuildKeyMap(const char* const* column_ids, + const char* const* column_keys, + const char* footer_id, + const char* footer_key); + +// The result of this function will be used to set into EncryptionConfiguration +// as column keys. +std::string BuildColumnKeyMapping(); + +// FileEncryptor and FileDecryptor are helper classes to write/read an encrypted parquet +// file corresponding to each pair of FileEncryptionProperties/FileDecryptionProperties. +// FileEncryptor writes the file with fixed data values and FileDecryptor reads the file +// and verify the correctness of data values. +class FileEncryptor { + public: + FileEncryptor(); + + void EncryptFile( + std::string file, + std::shared_ptr encryption_configurations); + + private: + std::shared_ptr SetupEncryptionSchema(); + + int num_rowgroups_ = 5; + int rows_per_rowgroup_ = 50; + std::shared_ptr schema_; +}; + +class FileDecryptor { + public: + void DecryptFile( + const std::string& file_name, + const std::shared_ptr& file_decryption_properties); + void DecryptPageIndex( + const std::string& file_name, + const std::shared_ptr& file_decryption_properties); + + private: + void CheckFile( + parquet::ParquetFileReader* file_reader, + const std::shared_ptr& file_decryption_properties); + void CheckPageIndex( + parquet::ParquetFileReader* file_reader, + const std::shared_ptr& file_decryption_properties); +}; + +} // namespace encryption::test +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/test_in_memory_kms.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/test_in_memory_kms.h new file mode 100644 index 0000000000000000000000000000000000000000..c5fdc797b8ca78a7eddbbdd57dc5a56cb8745526 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/test_in_memory_kms.h @@ -0,0 +1,94 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/util/base64.h" + +#include "parquet/encryption/kms_client_factory.h" +#include "parquet/encryption/local_wrap_kms_client.h" +#include "parquet/platform.h" + +namespace parquet::encryption { + +// This is a mock class, built for testing only. Don't use it as an example of +// LocalWrapKmsClient implementation. +class TestOnlyLocalWrapInMemoryKms : public LocalWrapKmsClient { + public: + explicit TestOnlyLocalWrapInMemoryKms(const KmsConnectionConfig& kms_connection_config); + + static void InitializeMasterKeys( + const std::unordered_map& master_keys_map); + + protected: + std::string GetMasterKeyFromServer(const std::string& master_key_identifier) override; + + private: + static std::unordered_map master_key_map_; +}; + +// This is a mock class, built for testing only. Don't use it as an example of KmsClient +// implementation. +class TestOnlyInServerWrapKms : public KmsClient { + public: + static void InitializeMasterKeys( + const std::unordered_map& master_keys_map); + + std::string WrapKey(const std::string& key_bytes, + const std::string& master_key_identifier) override; + + std::string UnwrapKey(const std::string& wrapped_key, + const std::string& master_key_identifier) override; + + static void StartKeyRotation( + const std::unordered_map& new_master_keys_map); + static void FinishKeyRotation(); + + private: + std::string GetMasterKeyFromServer(const std::string& master_key_identifier); + + // Different wrapping and unwrapping key maps to imitate versioning + // and support key rotation. + static std::unordered_map unwrapping_master_key_map_; + static std::unordered_map wrapping_master_key_map_; +}; + +// This is a mock class, built for testing only. Don't use it as an example of +// KmsClientFactory implementation. +class TestOnlyInMemoryKmsClientFactory : public KmsClientFactory { + public: + TestOnlyInMemoryKmsClientFactory( + bool wrap_locally, + const std::unordered_map& master_keys_map) + : KmsClientFactory(wrap_locally) { + TestOnlyLocalWrapInMemoryKms::InitializeMasterKeys(master_keys_map); + TestOnlyInServerWrapKms::InitializeMasterKeys(master_keys_map); + } + + std::shared_ptr CreateKmsClient( + const KmsConnectionConfig& kms_connection_config) { + if (wrap_locally_) { + return std::make_shared(kms_connection_config); + } else { + return std::make_shared(); + } + } +}; + +} // namespace parquet::encryption diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/two_level_cache_with_expiration.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/two_level_cache_with_expiration.h new file mode 100644 index 0000000000000000000000000000000000000000..76c2b8277000052865787ee148191b73fe37fcb0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/two_level_cache_with_expiration.h @@ -0,0 +1,157 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +#include "arrow/util/concurrent_map.h" +#include "arrow/util/mutex.h" + +namespace parquet::encryption { + +using ::arrow::util::ConcurrentMap; + +namespace internal { + +using TimePoint = + std::chrono::time_point>; + +inline TimePoint CurrentTimePoint() { return std::chrono::system_clock::now(); } + +template +class ExpiringCacheEntry { + public: + ExpiringCacheEntry() = default; + + ExpiringCacheEntry(E cached_item, double expiration_interval_seconds) + : expiration_timestamp_(CurrentTimePoint() + + std::chrono::duration(expiration_interval_seconds)), + cached_item_(std::move(cached_item)) {} + + bool IsExpired() const { + const auto now = CurrentTimePoint(); + return (now > expiration_timestamp_); + } + + E cached_item() { return cached_item_; } + + private: + const TimePoint expiration_timestamp_; + E cached_item_; +}; + +// This class is to avoid the below warning when compiling KeyToolkit class with VS2015 +// warning C4503: decorated name length exceeded, name was truncated +template +class ExpiringCacheMapEntry { + public: + ExpiringCacheMapEntry() = default; + + explicit ExpiringCacheMapEntry( + std::shared_ptr> cached_item, + double expiration_interval_seconds) + : map_cache_(cached_item, expiration_interval_seconds) {} + + bool IsExpired() { return map_cache_.IsExpired(); } + + std::shared_ptr> cached_item() { + return map_cache_.cached_item(); + } + + private: + // ConcurrentMap object may be accessed and modified at many places at the same time, + // from multiple threads, or even removed from cache. + ExpiringCacheEntry>> map_cache_; +}; + +} // namespace internal + +// Two-level cache with expiration of internal caches according to token lifetime. +// External cache is per token, internal is per string key. +// Wrapper class around: +// std::unordered_map>> +// This cache is safe to be shared between threads. +template +class TwoLevelCacheWithExpiration { + public: + TwoLevelCacheWithExpiration() { + last_cache_cleanup_timestamp_ = internal::CurrentTimePoint(); + } + + std::shared_ptr> GetOrCreateInternalCache( + const std::string& access_token, double cache_entry_lifetime_seconds) { + auto lock = mutex_.Lock(); + + auto external_cache_entry = cache_.find(access_token); + if (external_cache_entry == cache_.end() || + external_cache_entry->second.IsExpired()) { + cache_.insert({access_token, internal::ExpiringCacheMapEntry( + std::shared_ptr>( + new ConcurrentMap()), + cache_entry_lifetime_seconds)}); + } + + return cache_[access_token].cached_item(); + } + + void CheckCacheForExpiredTokens(double cache_cleanup_period_seconds) { + auto lock = mutex_.Lock(); + + const auto now = internal::CurrentTimePoint(); + if (now > (last_cache_cleanup_timestamp_ + + std::chrono::duration(cache_cleanup_period_seconds))) { + RemoveExpiredEntriesNoMutex(); + last_cache_cleanup_timestamp_ = + now + std::chrono::duration(cache_cleanup_period_seconds); + } + } + + void RemoveExpiredEntriesFromCache() { + auto lock = mutex_.Lock(); + + RemoveExpiredEntriesNoMutex(); + } + + void Remove(const std::string& access_token) { + auto lock = mutex_.Lock(); + cache_.erase(access_token); + } + + void Clear() { + auto lock = mutex_.Lock(); + cache_.clear(); + } + + private: + void RemoveExpiredEntriesNoMutex() { + for (auto it = cache_.begin(); it != cache_.end();) { + if (it->second.IsExpired()) { + it = cache_.erase(it); + } else { + ++it; + } + } + } + std::unordered_map> cache_; + internal::TimePoint last_cache_cleanup_timestamp_; + ::arrow::util::Mutex mutex_; +}; + +} // namespace parquet::encryption diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/type_fwd.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/type_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..623811718482c591e708a297dff9eb35ae0c85a9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/type_fwd.h @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +namespace parquet { + +class Decryptor; +class Encryptor; + +class InternalFileDecryptor; +class InternalFileEncryptor; + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/exception.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/exception.h new file mode 100644 index 0000000000000000000000000000000000000000..826f5bdc8bf73741ac37d457d6013dfc8d0fb5a6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/exception.h @@ -0,0 +1,158 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/type_fwd.h" +#include "arrow/util/string_builder.h" +#include "parquet/platform.h" + +// PARQUET-1085 +#if !defined(ARROW_UNUSED) +#define ARROW_UNUSED(x) UNUSED(x) +#endif + +// Parquet exception to Arrow Status + +#define BEGIN_PARQUET_CATCH_EXCEPTIONS try { +#define END_PARQUET_CATCH_EXCEPTIONS \ + } \ + catch (const ::parquet::ParquetStatusException& e) { \ + return e.status(); \ + } \ + catch (const ::parquet::ParquetException& e) { \ + return ::arrow::Status::IOError(e.what()); \ + } + +// clang-format off + +#define PARQUET_CATCH_NOT_OK(s) \ + BEGIN_PARQUET_CATCH_EXCEPTIONS \ + (s); \ + END_PARQUET_CATCH_EXCEPTIONS + +// clang-format on + +#define PARQUET_CATCH_AND_RETURN(s) \ + BEGIN_PARQUET_CATCH_EXCEPTIONS \ + return (s); \ + END_PARQUET_CATCH_EXCEPTIONS + +// Arrow Status to Parquet exception + +#define PARQUET_IGNORE_NOT_OK(s) \ + do { \ + ::arrow::Status _s = ::arrow::internal::GenericToStatus(s); \ + ARROW_UNUSED(_s); \ + } while (0) + +#define PARQUET_THROW_NOT_OK(s) \ + do { \ + ::arrow::Status _s = ::arrow::internal::GenericToStatus(s); \ + if (!_s.ok()) { \ + throw ::parquet::ParquetStatusException(std::move(_s)); \ + } \ + } while (0) + +#define PARQUET_ASSIGN_OR_THROW_IMPL(status_name, lhs, rexpr) \ + auto status_name = (rexpr); \ + PARQUET_THROW_NOT_OK(status_name.status()); \ + lhs = std::move(status_name).ValueOrDie(); + +#define PARQUET_ASSIGN_OR_THROW(lhs, rexpr) \ + PARQUET_ASSIGN_OR_THROW_IMPL(ARROW_ASSIGN_OR_RAISE_NAME(_error_or_value, __COUNTER__), \ + lhs, rexpr); + +namespace parquet { + +class ParquetException : public std::exception { + public: + PARQUET_NORETURN static void EofException(const std::string& msg = "") { + static std::string prefix = "Unexpected end of stream"; + if (msg.empty()) { + throw ParquetException(prefix); + } + throw ParquetException(prefix, ": ", msg); + } + + PARQUET_NORETURN static void NYI(const std::string& msg = "") { + throw ParquetException("Not yet implemented: ", msg, "."); + } + + template + explicit ParquetException(Args&&... args) + : msg_(::arrow::util::StringBuilder(std::forward(args)...)) {} + + explicit ParquetException(std::string msg) : msg_(std::move(msg)) {} + + explicit ParquetException(const char* msg, const std::exception&) : msg_(msg) {} + + ParquetException(const ParquetException&) = default; + ParquetException& operator=(const ParquetException&) = default; + ParquetException(ParquetException&&) = default; + ParquetException& operator=(ParquetException&&) = default; + + const char* what() const noexcept override { return msg_.c_str(); } + + private: + std::string msg_; +}; + +// Support printing a ParquetException. +// This is needed for clang-on-MSVC as there operator<< is not defined for +// std::exception. +PARQUET_EXPORT +std::ostream& operator<<(std::ostream& os, const ParquetException& exception); + +class ParquetStatusException : public ParquetException { + public: + explicit ParquetStatusException(::arrow::Status status) + : ParquetException(status.ToString()), status_(std::move(status)) {} + + const ::arrow::Status& status() const { return status_; } + + private: + ::arrow::Status status_; +}; + +// This class exists for the purpose of detecting an invalid or corrupted file. +class ParquetInvalidOrCorruptedFileException : public ParquetStatusException { + public: + ParquetInvalidOrCorruptedFileException(const ParquetInvalidOrCorruptedFileException&) = + default; + + template ::value, + int>::type = 0, + typename... Args> + explicit ParquetInvalidOrCorruptedFileException(Arg arg, Args&&... args) + : ParquetStatusException(::arrow::Status::Invalid(std::forward(arg), + std::forward(args)...)) {} +}; + +template +void ThrowNotOk(StatusReturnBlock&& b) { + PARQUET_THROW_NOT_OK(b()); +} + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/level_comparison.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/level_comparison.h new file mode 100644 index 0000000000000000000000000000000000000000..3ae442dd46e57b7f86b405d9502442d3195719e8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/level_comparison.h @@ -0,0 +1,38 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +#pragma once + +#include +#include + +#include "parquet/platform.h" + +namespace parquet::internal { + +/// Builds a bitmap where each set bit indicates the corresponding level is greater +/// than rhs. +uint64_t PARQUET_EXPORT GreaterThanBitmap(const int16_t* levels, int64_t num_levels, + int16_t rhs); + +struct MinMax { + int16_t min; + int16_t max; +}; + +MinMax FindMinMax(const int16_t* levels, int64_t num_levels); + +} // namespace parquet::internal diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/level_comparison_inc.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/level_comparison_inc.h new file mode 100644 index 0000000000000000000000000000000000000000..cfee50665433182571a659fbd805f27532a3f7e3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/level_comparison_inc.h @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +#pragma once + +#include "arrow/util/bit_util.h" +#include "arrow/util/endian.h" +#include "parquet/level_comparison.h" + +// Used to make sure ODR rule isn't violated. +#ifndef PARQUET_IMPL_NAMESPACE +#error "PARQUET_IMPL_NAMESPACE must be defined" +#endif +namespace parquet::internal::PARQUET_IMPL_NAMESPACE { +/// Builds a bitmap by applying predicate to the level vector provided. +/// +/// \param[in] levels Rep or def level array. +/// \param[in] num_levels The number of levels to process (must be [0, 64]) +/// \param[in] predicate The predicate to apply (must have the signature `bool +/// predicate(int16_t)`. +/// \returns The bitmap using least significant "bit" ordering. +/// +template +inline uint64_t LevelsToBitmap(const int16_t* levels, int64_t num_levels, + Predicate predicate) { + // Both clang and GCC can vectorize this automatically with SSE4/AVX2. + uint64_t mask = 0; + for (int x = 0; x < num_levels; x++) { + mask |= static_cast(predicate(levels[x]) ? 1 : 0) << x; + } + return ::arrow::bit_util::ToLittleEndian(mask); +} + +inline MinMax FindMinMaxImpl(const int16_t* levels, int64_t num_levels) { + MinMax out{std::numeric_limits::max(), std::numeric_limits::min()}; + for (int x = 0; x < num_levels; x++) { + out.min = std::min(levels[x], out.min); + out.max = std::max(levels[x], out.max); + } + return out; +} + +inline uint64_t GreaterThanBitmapImpl(const int16_t* levels, int64_t num_levels, + int16_t rhs) { + return LevelsToBitmap(levels, num_levels, [rhs](int16_t value) { return value > rhs; }); +} + +} // namespace parquet::internal::PARQUET_IMPL_NAMESPACE diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/schema.h b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/schema.h new file mode 100644 index 0000000000000000000000000000000000000000..896ec1e47968d5cb8a4f8df8fd097b035075a4f1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/schema.h @@ -0,0 +1,492 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This module contains the logical parquet-cpp types (independent of Thrift +// structures), schema nodes, and related type tools + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "parquet/platform.h" +#include "parquet/types.h" +#include "parquet/windows_fixup.h" // for OPTIONAL + +namespace parquet { + +class SchemaDescriptor; + +namespace schema { + +class Node; + +// List encodings: using the terminology from Impala to define different styles +// of representing logical lists (a.k.a. ARRAY types) in Parquet schemas. Since +// the converted type named in the Parquet metadata is ConvertedType::LIST we +// use that terminology here. It also helps distinguish from the *_ARRAY +// primitive types. +// +// One-level encoding: Only allows required lists with required cells +// repeated value_type name +// +// Two-level encoding: Enables optional lists with only required cells +// group list +// repeated value_type item +// +// Three-level encoding: Enables optional lists with optional cells +// group bag +// repeated group list +// value_type item +// +// 2- and 1-level encoding are respectively equivalent to 3-level encoding with +// the non-repeated nodes set to required. +// +// The "official" encoding recommended in the Parquet spec is the 3-level, and +// we use that as the default when creating list types. For semantic completeness +// we allow the other two. Since all types of encodings will occur "in the +// wild" we need to be able to interpret the associated definition levels in +// the context of the actual encoding used in the file. +// +// NB: Some Parquet writers may not set ConvertedType::LIST on the repeated +// SchemaElement, which could make things challenging if we are trying to infer +// that a sequence of nodes semantically represents an array according to one +// of these encodings (versus a struct containing an array). We should refuse +// the temptation to guess, as they say. +struct ListEncoding { + enum type { ONE_LEVEL, TWO_LEVEL, THREE_LEVEL }; +}; + +class PARQUET_EXPORT ColumnPath { + public: + ColumnPath() : path_() {} + explicit ColumnPath(const std::vector& path) : path_(path) {} + explicit ColumnPath(std::vector&& path) : path_(std::move(path)) {} + + static std::shared_ptr FromDotString(const std::string& dotstring); + static std::shared_ptr FromNode(const Node& node); + + std::shared_ptr extend(const std::string& node_name) const; + std::string ToDotString() const; + const std::vector& ToDotVector() const; + + protected: + std::vector path_; +}; + +// Base class for logical schema types. A type has a name, repetition level, +// and optionally a logical type (ConvertedType in Parquet metadata parlance) +class PARQUET_EXPORT Node { + public: + enum type { PRIMITIVE, GROUP }; + + virtual ~Node() {} + + bool is_primitive() const { return type_ == Node::PRIMITIVE; } + + bool is_group() const { return type_ == Node::GROUP; } + + bool is_optional() const { return repetition_ == Repetition::OPTIONAL; } + + bool is_repeated() const { return repetition_ == Repetition::REPEATED; } + + bool is_required() const { return repetition_ == Repetition::REQUIRED; } + + virtual bool Equals(const Node* other) const = 0; + + const std::string& name() const { return name_; } + + Node::type node_type() const { return type_; } + + Repetition::type repetition() const { return repetition_; } + + ConvertedType::type converted_type() const { return converted_type_; } + + const std::shared_ptr& logical_type() const { return logical_type_; } + + /// \brief The field_id value for the serialized SchemaElement. If the + /// field_id is less than 0 (e.g. -1), it will not be set when serialized to + /// Thrift. + int field_id() const { return field_id_; } + + const Node* parent() const { return parent_; } + + const std::shared_ptr path() const; + + virtual void ToParquet(void* element) const = 0; + + // Node::Visitor abstract class for walking schemas with the visitor pattern + class Visitor { + public: + virtual ~Visitor() {} + + virtual void Visit(Node* node) = 0; + }; + class ConstVisitor { + public: + virtual ~ConstVisitor() {} + + virtual void Visit(const Node* node) = 0; + }; + + virtual void Visit(Visitor* visitor) = 0; + virtual void VisitConst(ConstVisitor* visitor) const = 0; + + protected: + friend class GroupNode; + + Node(Node::type type, const std::string& name, Repetition::type repetition, + ConvertedType::type converted_type = ConvertedType::NONE, int field_id = -1) + : type_(type), + name_(name), + repetition_(repetition), + converted_type_(converted_type), + field_id_(field_id), + parent_(NULLPTR) {} + + Node(Node::type type, const std::string& name, Repetition::type repetition, + std::shared_ptr logical_type, int field_id = -1) + : type_(type), + name_(name), + repetition_(repetition), + logical_type_(std::move(logical_type)), + field_id_(field_id), + parent_(NULLPTR) {} + + Node::type type_; + std::string name_; + Repetition::type repetition_; + ConvertedType::type converted_type_; + std::shared_ptr logical_type_; + int field_id_; + // Nodes should not be shared, they have a single parent. + const Node* parent_; + + bool EqualsInternal(const Node* other) const; + void SetParent(const Node* p_parent); + + private: + PARQUET_DISALLOW_COPY_AND_ASSIGN(Node); +}; + +// Save our breath all over the place with these typedefs +using NodePtr = std::shared_ptr; +using NodeVector = std::vector; + +// A type that is one of the primitive Parquet storage types. In addition to +// the other type metadata (name, repetition level, logical type), also has the +// physical storage type and their type-specific metadata (byte width, decimal +// parameters) +class PARQUET_EXPORT PrimitiveNode : public Node { + public: + static std::unique_ptr FromParquet(const void* opaque_element); + + // A field_id -1 (or any negative value) will be serialized as null in Thrift + static inline NodePtr Make(const std::string& name, Repetition::type repetition, + Type::type type, + ConvertedType::type converted_type = ConvertedType::NONE, + int length = -1, int precision = -1, int scale = -1, + int field_id = -1) { + return NodePtr(new PrimitiveNode(name, repetition, type, converted_type, length, + precision, scale, field_id)); + } + + // If no logical type, pass LogicalType::None() or nullptr + // A field_id -1 (or any negative value) will be serialized as null in Thrift + static inline NodePtr Make(const std::string& name, Repetition::type repetition, + std::shared_ptr logical_type, + Type::type primitive_type, int primitive_length = -1, + int field_id = -1) { + return NodePtr(new PrimitiveNode(name, repetition, std::move(logical_type), + primitive_type, primitive_length, field_id)); + } + + bool Equals(const Node* other) const override; + + Type::type physical_type() const { return physical_type_; } + + ColumnOrder column_order() const { return column_order_; } + + void SetColumnOrder(ColumnOrder column_order) { column_order_ = column_order; } + + int32_t type_length() const { return type_length_; } + + const DecimalMetadata& decimal_metadata() const { return decimal_metadata_; } + + void ToParquet(void* element) const override; + void Visit(Visitor* visitor) override; + void VisitConst(ConstVisitor* visitor) const override; + + private: + PrimitiveNode(const std::string& name, Repetition::type repetition, Type::type type, + ConvertedType::type converted_type = ConvertedType::NONE, int length = -1, + int precision = -1, int scale = -1, int field_id = -1); + + PrimitiveNode(const std::string& name, Repetition::type repetition, + std::shared_ptr logical_type, + Type::type primitive_type, int primitive_length = -1, int field_id = -1); + + Type::type physical_type_; + int32_t type_length_; + DecimalMetadata decimal_metadata_; + ColumnOrder column_order_; + + // For FIXED_LEN_BYTE_ARRAY + void SetTypeLength(int32_t length) { type_length_ = length; } + + bool EqualsInternal(const PrimitiveNode* other) const; + + FRIEND_TEST(TestPrimitiveNode, Attrs); + FRIEND_TEST(TestPrimitiveNode, Equals); + FRIEND_TEST(TestPrimitiveNode, PhysicalLogicalMapping); + FRIEND_TEST(TestPrimitiveNode, FromParquet); +}; + +class PARQUET_EXPORT GroupNode : public Node { + public: + static std::unique_ptr FromParquet(const void* opaque_element, + NodeVector fields = {}); + + // A field_id -1 (or any negative value) will be serialized as null in Thrift + static inline NodePtr Make(const std::string& name, Repetition::type repetition, + const NodeVector& fields, + ConvertedType::type converted_type = ConvertedType::NONE, + int field_id = -1) { + return NodePtr(new GroupNode(name, repetition, fields, converted_type, field_id)); + } + + // If no logical type, pass nullptr + // A field_id -1 (or any negative value) will be serialized as null in Thrift + static inline NodePtr Make(const std::string& name, Repetition::type repetition, + const NodeVector& fields, + std::shared_ptr logical_type, + int field_id = -1) { + return NodePtr(new GroupNode(name, repetition, fields, logical_type, field_id)); + } + + bool Equals(const Node* other) const override; + + const NodePtr& field(int i) const { return fields_[i]; } + // Get the index of a field by its name, or negative value if not found. + // If several fields share the same name, it is unspecified which one + // is returned. + int FieldIndex(const std::string& name) const; + // Get the index of a field by its node, or negative value if not found. + int FieldIndex(const Node& node) const; + + int field_count() const { return static_cast(fields_.size()); } + + void ToParquet(void* element) const override; + void Visit(Visitor* visitor) override; + void VisitConst(ConstVisitor* visitor) const override; + + /// \brief Return true if this node or any child node has REPEATED repetition + /// type + bool HasRepeatedFields() const; + + private: + GroupNode(const std::string& name, Repetition::type repetition, + const NodeVector& fields, + ConvertedType::type converted_type = ConvertedType::NONE, int field_id = -1); + + GroupNode(const std::string& name, Repetition::type repetition, + const NodeVector& fields, std::shared_ptr logical_type, + int field_id = -1); + + NodeVector fields_; + bool EqualsInternal(const GroupNode* other) const; + + // Mapping between field name to the field index + std::unordered_multimap field_name_to_idx_; + + FRIEND_TEST(TestGroupNode, Attrs); + FRIEND_TEST(TestGroupNode, Equals); + FRIEND_TEST(TestGroupNode, FieldIndex); + FRIEND_TEST(TestGroupNode, FieldIndexDuplicateName); +}; + +// ---------------------------------------------------------------------- +// Convenience primitive type factory functions + +#define PRIMITIVE_FACTORY(FuncName, TYPE) \ + static inline NodePtr FuncName(const std::string& name, \ + Repetition::type repetition = Repetition::OPTIONAL, \ + int field_id = -1) { \ + return PrimitiveNode::Make(name, repetition, Type::TYPE, ConvertedType::NONE, \ + /*length=*/-1, /*precision=*/-1, /*scale=*/-1, field_id); \ + } + +PRIMITIVE_FACTORY(Boolean, BOOLEAN) +PRIMITIVE_FACTORY(Int32, INT32) +PRIMITIVE_FACTORY(Int64, INT64) +PRIMITIVE_FACTORY(Int96, INT96) +PRIMITIVE_FACTORY(Float, FLOAT) +PRIMITIVE_FACTORY(Double, DOUBLE) +PRIMITIVE_FACTORY(ByteArray, BYTE_ARRAY) + +void PARQUET_EXPORT PrintSchema(const schema::Node* schema, std::ostream& stream, + int indent_width = 2); + +} // namespace schema + +// The ColumnDescriptor encapsulates information necessary to interpret +// primitive column data in the context of a particular schema. We have to +// examine the node structure of a column's path to the root in the schema tree +// to be able to reassemble the nested structure from the repetition and +// definition levels. +class PARQUET_EXPORT ColumnDescriptor { + public: + ColumnDescriptor(schema::NodePtr node, int16_t max_definition_level, + int16_t max_repetition_level, + const SchemaDescriptor* schema_descr = NULLPTR); + + bool Equals(const ColumnDescriptor& other) const; + + int16_t max_definition_level() const { return max_definition_level_; } + + int16_t max_repetition_level() const { return max_repetition_level_; } + + Type::type physical_type() const { return primitive_node_->physical_type(); } + + ConvertedType::type converted_type() const { return primitive_node_->converted_type(); } + + const std::shared_ptr& logical_type() const { + return primitive_node_->logical_type(); + } + + ColumnOrder column_order() const { return primitive_node_->column_order(); } + + SortOrder::type sort_order() const { + auto la = logical_type(); + auto pt = physical_type(); + return la ? GetSortOrder(la, pt) : GetSortOrder(converted_type(), pt); + } + + const std::string& name() const { return primitive_node_->name(); } + + const std::shared_ptr path() const; + + const schema::NodePtr& schema_node() const { return node_; } + + std::string ToString() const; + + int type_length() const; + + int type_precision() const; + + int type_scale() const; + + private: + schema::NodePtr node_; + const schema::PrimitiveNode* primitive_node_; + + int16_t max_definition_level_; + int16_t max_repetition_level_; +}; + +// Container for the converted Parquet schema with a computed information from +// the schema analysis needed for file reading +// +// * Column index to Node +// * Max repetition / definition levels for each primitive node +// +// The ColumnDescriptor objects produced by this class can be used to assist in +// the reconstruction of fully materialized data structures from the +// repetition-definition level encoding of nested data +// +// TODO(wesm): this object can be recomputed from a Schema +class PARQUET_EXPORT SchemaDescriptor { + public: + SchemaDescriptor() {} + ~SchemaDescriptor() {} + + // Analyze the schema + void Init(std::unique_ptr schema); + void Init(schema::NodePtr schema); + + const ColumnDescriptor* Column(int i) const; + + // Get the index of a column by its dotstring path, or negative value if not found. + // If several columns share the same dotstring path, it is unspecified which one + // is returned. + int ColumnIndex(const std::string& node_path) const; + // Get the index of a column by its node, or negative value if not found. + int ColumnIndex(const schema::Node& node) const; + + bool Equals(const SchemaDescriptor& other, std::ostream* diff_output = NULLPTR) const; + + // The number of physical columns appearing in the file + int num_columns() const { return static_cast(leaves_.size()); } + + const schema::NodePtr& schema_root() const { return schema_; } + + const schema::GroupNode* group_node() const { return group_node_; } + + // Returns the root (child of the schema root) node of the leaf(column) node + const schema::Node* GetColumnRoot(int i) const; + + const std::string& name() const { return group_node_->name(); } + + std::string ToString() const; + + void updateColumnOrders(const std::vector& column_orders); + + /// \brief Return column index corresponding to a particular + /// PrimitiveNode. Returns -1 if not found + int GetColumnIndex(const schema::PrimitiveNode& node) const; + + /// \brief Return true if any field or their children have REPEATED repetition + /// type + bool HasRepeatedFields() const; + + private: + friend class ColumnDescriptor; + + // Root Node + schema::NodePtr schema_; + // Root Node + const schema::GroupNode* group_node_; + + void BuildTree(const schema::NodePtr& node, int16_t max_def_level, + int16_t max_rep_level, const schema::NodePtr& base); + + // Result of leaf node / tree analysis + std::vector leaves_; + + std::unordered_map node_to_leaf_index_; + + // Mapping between leaf nodes and root group of leaf (first node + // below the schema's root group) + // + // For example, the leaf `a.b.c.d` would have a link back to `a` + // + // -- a <------ + // -- -- b | + // -- -- -- c | + // -- -- -- -- d + std::unordered_map leaf_to_base_; + + // Mapping between ColumnPath DotString to the leaf index + std::unordered_multimap leaf_to_idx_; +}; + +} // namespace parquet diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/__init__.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/__init__.pxd new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/common.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/common.pxd new file mode 100644 index 0000000000000000000000000000000000000000..044dd0333f323367dcba32a8fe013eccd0986e08 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/common.pxd @@ -0,0 +1,175 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from libc.stdint cimport * +from libcpp cimport bool as c_bool, nullptr +from libcpp.functional cimport function +from libcpp.memory cimport shared_ptr, unique_ptr, make_shared +from libcpp.string cimport string as c_string +from libcpp.utility cimport pair +from libcpp.vector cimport vector +from libcpp.unordered_map cimport unordered_map +from libcpp.unordered_set cimport unordered_set + +from cpython cimport PyObject +from cpython.datetime cimport PyDateTime_DateTime +cimport cpython + + +cdef extern from * namespace "std" nogil: + cdef shared_ptr[T] static_pointer_cast[T, U](shared_ptr[U]) + + +cdef extern from "" namespace "std" nogil: + cdef cppclass optional[T]: + ctypedef T value_type + optional() + optional(nullopt_t) + optional(optional&) except + + optional(T&) except + + c_bool has_value() + T& value() + T& value_or[U](U& default_value) + void swap(optional&) + void reset() + T& emplace(...) + T& operator*() + # T* operator->() # Not Supported + optional& operator=(optional&) + optional& operator=[U](U&) + + +# vendored from the cymove project https://github.com/ozars/cymove +cdef extern from * namespace "cymove" nogil: + """ + #include + #include + namespace cymove { + template + inline typename std::remove_reference::type&& cymove(T& t) { + return std::move(t); + } + template + inline typename std::remove_reference::type&& cymove(T&& t) { + return std::move(t); + } + } // namespace cymove + """ + cdef T move" cymove::cymove"[T](T) + +cdef extern from * namespace "arrow::py" nogil: + """ + #include + #include + + namespace arrow { + namespace py { + template + std::shared_ptr to_shared(std::unique_ptr& t) { + return std::move(t); + } + template + std::shared_ptr to_shared(std::unique_ptr&& t) { + return std::move(t); + } + } // namespace py + } // namespace arrow + """ + cdef shared_ptr[T] to_shared" arrow::py::to_shared"[T](unique_ptr[T]) + +cdef extern from "arrow/python/platform.h": + pass + +cdef extern from "": + void Py_XDECREF(PyObject* o) + Py_ssize_t Py_REFCNT(PyObject* o) + +cdef extern from "numpy/halffloat.h": + ctypedef uint16_t npy_half + +cdef extern from "arrow/api.h" namespace "arrow" nogil: + # We can later add more of the common status factory methods as needed + cdef CStatus CStatus_OK "arrow::Status::OK"() + + cdef CStatus CStatus_Invalid "arrow::Status::Invalid"() + cdef CStatus CStatus_NotImplemented \ + "arrow::Status::NotImplemented"(const c_string& msg) + cdef CStatus CStatus_UnknownError \ + "arrow::Status::UnknownError"(const c_string& msg) + + cdef cppclass CStatus "arrow::Status": + CStatus() + + c_string ToString() + c_string message() + shared_ptr[CStatusDetail] detail() + + c_bool ok() + c_bool IsIOError() + c_bool IsOutOfMemory() + c_bool IsInvalid() + c_bool IsKeyError() + c_bool IsNotImplemented() + c_bool IsTypeError() + c_bool IsCapacityError() + c_bool IsIndexError() + c_bool IsSerializationError() + c_bool IsCancelled() + + void Warn() + + cdef cppclass CStatusDetail "arrow::StatusDetail": + c_string ToString() + + +cdef extern from "arrow/result.h" namespace "arrow" nogil: + cdef cppclass CResult "arrow::Result"[T]: + CResult() + CResult(CStatus) + CResult(T) + c_bool ok() + CStatus status() + CStatus Value(T*) + T operator*() + + +cdef extern from "arrow/util/future.h" namespace "arrow" nogil: + cdef cppclass CFuture "arrow::Future"[T]: + CFuture() + + +cdef extern from "arrow/python/async.h" namespace "arrow::py" nogil: + # BindFuture's third argument is really a C++ callable with + # the signature `object(T*)`, but Cython does not allow declaring that. + # We use an ellipsis as a workaround. + # Another possibility is to type-erase the argument by making it + # `object(void*)`, but it would lose compile-time C++ type safety. + void BindFuture[T](CFuture[T], object cb, ...) + + +cdef extern from "arrow/python/common.h" namespace "arrow::py" nogil: + T GetResultValue[T](CResult[T]) except * + cdef function[F] BindFunction[F](void* unbound, object bound, ...) + + +cdef inline object PyObject_to_object(PyObject* o): + # Cast to "object" increments reference count + cdef object result = o + cpython.Py_DECREF(result) + return result diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow.pxd new file mode 100644 index 0000000000000000000000000000000000000000..6dae45ab80b1c168742cb3e67eb55cccb8ba1bf8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow.pxd @@ -0,0 +1,3023 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.common cimport * + + +cdef extern from "arrow/util/key_value_metadata.h" namespace "arrow" nogil: + cdef cppclass CKeyValueMetadata" arrow::KeyValueMetadata": + CKeyValueMetadata() + CKeyValueMetadata(const unordered_map[c_string, c_string]&) + CKeyValueMetadata(const vector[c_string]& keys, + const vector[c_string]& values) + + void reserve(int64_t n) + int64_t size() const + c_string key(int64_t i) const + c_string value(int64_t i) const + int FindKey(const c_string& key) const + + shared_ptr[CKeyValueMetadata] Copy() const + c_bool Equals(const CKeyValueMetadata& other) + void Append(const c_string& key, const c_string& value) + void ToUnorderedMap(unordered_map[c_string, c_string]*) const + c_string ToString() const + + CResult[c_string] Get(const c_string& key) const + CStatus Delete(const c_string& key) + CStatus Set(const c_string& key, const c_string& value) + c_bool Contains(const c_string& key) const + + +cdef extern from "arrow/util/decimal.h" namespace "arrow" nogil: + cdef cppclass CDecimal128" arrow::Decimal128": + c_string ToString(int32_t scale) const + + +cdef extern from "arrow/util/decimal.h" namespace "arrow" nogil: + cdef cppclass CDecimal256" arrow::Decimal256": + c_string ToString(int32_t scale) const + + +cdef extern from "arrow/config.h" namespace "arrow" nogil: + cdef cppclass CBuildInfo" arrow::BuildInfo": + int version + int version_major + int version_minor + int version_patch + c_string version_string + c_string so_version + c_string full_so_version + c_string compiler_id + c_string compiler_version + c_string compiler_flags + c_string git_id + c_string git_description + c_string package_kind + c_string build_type + + const CBuildInfo& GetBuildInfo() + + cdef cppclass CRuntimeInfo" arrow::RuntimeInfo": + c_string simd_level + c_string detected_simd_level + + CRuntimeInfo GetRuntimeInfo() + + cdef cppclass CGlobalOptions" arrow::GlobalOptions": + optional[c_string] timezone_db_path + + CStatus Initialize(const CGlobalOptions& options) + + +cdef extern from "arrow/util/future.h" namespace "arrow" nogil: + cdef cppclass CFuture_Void" arrow::Future<>": + CStatus status() + + +cdef extern from "arrow/api.h" namespace "arrow" nogil: + cdef enum Type" arrow::Type::type": + _Type_NA" arrow::Type::NA" + + _Type_BOOL" arrow::Type::BOOL" + + _Type_UINT8" arrow::Type::UINT8" + _Type_INT8" arrow::Type::INT8" + _Type_UINT16" arrow::Type::UINT16" + _Type_INT16" arrow::Type::INT16" + _Type_UINT32" arrow::Type::UINT32" + _Type_INT32" arrow::Type::INT32" + _Type_UINT64" arrow::Type::UINT64" + _Type_INT64" arrow::Type::INT64" + + _Type_HALF_FLOAT" arrow::Type::HALF_FLOAT" + _Type_FLOAT" arrow::Type::FLOAT" + _Type_DOUBLE" arrow::Type::DOUBLE" + + _Type_DECIMAL128" arrow::Type::DECIMAL128" + _Type_DECIMAL256" arrow::Type::DECIMAL256" + + _Type_DATE32" arrow::Type::DATE32" + _Type_DATE64" arrow::Type::DATE64" + _Type_TIMESTAMP" arrow::Type::TIMESTAMP" + _Type_TIME32" arrow::Type::TIME32" + _Type_TIME64" arrow::Type::TIME64" + _Type_DURATION" arrow::Type::DURATION" + _Type_INTERVAL_MONTH_DAY_NANO" arrow::Type::INTERVAL_MONTH_DAY_NANO" + + _Type_BINARY" arrow::Type::BINARY" + _Type_STRING" arrow::Type::STRING" + _Type_LARGE_BINARY" arrow::Type::LARGE_BINARY" + _Type_LARGE_STRING" arrow::Type::LARGE_STRING" + _Type_FIXED_SIZE_BINARY" arrow::Type::FIXED_SIZE_BINARY" + _Type_BINARY_VIEW" arrow::Type::BINARY_VIEW" + _Type_STRING_VIEW" arrow::Type::STRING_VIEW" + + _Type_LIST" arrow::Type::LIST" + _Type_LARGE_LIST" arrow::Type::LARGE_LIST" + _Type_FIXED_SIZE_LIST" arrow::Type::FIXED_SIZE_LIST" + _Type_LIST_VIEW" arrow::Type::LIST_VIEW" + _Type_LARGE_LIST_VIEW" arrow::Type::LARGE_LIST_VIEW" + _Type_STRUCT" arrow::Type::STRUCT" + _Type_SPARSE_UNION" arrow::Type::SPARSE_UNION" + _Type_DENSE_UNION" arrow::Type::DENSE_UNION" + _Type_DICTIONARY" arrow::Type::DICTIONARY" + _Type_RUN_END_ENCODED" arrow::Type::RUN_END_ENCODED" + _Type_MAP" arrow::Type::MAP" + + _Type_EXTENSION" arrow::Type::EXTENSION" + + cdef enum UnionMode" arrow::UnionMode::type": + _UnionMode_SPARSE" arrow::UnionMode::SPARSE" + _UnionMode_DENSE" arrow::UnionMode::DENSE" + + cdef enum TimeUnit" arrow::TimeUnit::type": + TimeUnit_SECOND" arrow::TimeUnit::SECOND" + TimeUnit_MILLI" arrow::TimeUnit::MILLI" + TimeUnit_MICRO" arrow::TimeUnit::MICRO" + TimeUnit_NANO" arrow::TimeUnit::NANO" + + cdef cppclass CBufferSpec" arrow::DataTypeLayout::BufferSpec": + pass + + cdef cppclass CDataTypeLayout" arrow::DataTypeLayout": + vector[CBufferSpec] buffers + c_bool has_dictionary + + cdef cppclass CDataType" arrow::DataType": + Type id() + + c_bool Equals(const CDataType& other, c_bool check_metadata) + c_bool Equals(const shared_ptr[CDataType]& other, c_bool check_metadata) + + shared_ptr[CField] field(int i) + const vector[shared_ptr[CField]] fields() + int num_fields() + CDataTypeLayout layout() + c_string ToString() + + c_bool is_primitive(Type type) + c_bool is_numeric(Type type) + + cdef cppclass CArrayData" arrow::ArrayData": + shared_ptr[CDataType] type + int64_t length + int64_t null_count + int64_t offset + vector[shared_ptr[CBuffer]] buffers + vector[shared_ptr[CArrayData]] child_data + shared_ptr[CArrayData] dictionary + + @staticmethod + shared_ptr[CArrayData] Make(const shared_ptr[CDataType]& type, + int64_t length, + vector[shared_ptr[CBuffer]]& buffers, + int64_t null_count, + int64_t offset) + + @staticmethod + shared_ptr[CArrayData] MakeWithChildren" Make"( + const shared_ptr[CDataType]& type, + int64_t length, + vector[shared_ptr[CBuffer]]& buffers, + vector[shared_ptr[CArrayData]]& child_data, + int64_t null_count, + int64_t offset) + + @staticmethod + shared_ptr[CArrayData] MakeWithChildrenAndDictionary" Make"( + const shared_ptr[CDataType]& type, + int64_t length, + vector[shared_ptr[CBuffer]]& buffers, + vector[shared_ptr[CArrayData]]& child_data, + shared_ptr[CArrayData]& dictionary, + int64_t null_count, + int64_t offset) + + cdef cppclass CArray" arrow::Array": + shared_ptr[CDataType] type() + + int64_t length() + int64_t null_count() + int64_t offset() + Type type_id() + + int num_fields() + + CResult[shared_ptr[CScalar]] GetScalar(int64_t i) const + + c_string Diff(const CArray& other) + c_bool Equals(const CArray& arr) + c_bool IsNull(int i) + + shared_ptr[CArrayData] data() + + shared_ptr[CArray] Slice(int64_t offset) + shared_ptr[CArray] Slice(int64_t offset, int64_t length) + + CStatus Validate() const + CStatus ValidateFull() const + CResult[shared_ptr[CArray]] View(const shared_ptr[CDataType]& type) + + shared_ptr[CArray] MakeArray(const shared_ptr[CArrayData]& data) + CResult[shared_ptr[CArray]] MakeArrayOfNull( + const shared_ptr[CDataType]& type, int64_t length, CMemoryPool* pool) + + CResult[shared_ptr[CArray]] MakeArrayFromScalar( + const CScalar& scalar, int64_t length, CMemoryPool* pool) + + CStatus DebugPrint(const CArray& arr, int indent) + + cdef cppclass CFixedWidthType" arrow::FixedWidthType"(CDataType): + int bit_width() + int byte_width() + + cdef cppclass CNullArray" arrow::NullArray"(CArray): + CNullArray(int64_t length) + + cdef cppclass CDictionaryArray" arrow::DictionaryArray"(CArray): + CDictionaryArray(const shared_ptr[CDataType]& type, + const shared_ptr[CArray]& indices, + const shared_ptr[CArray]& dictionary) + CDictionaryArray(const shared_ptr[CArrayData]& data) + + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const shared_ptr[CDataType]& type, + const shared_ptr[CArray]& indices, + const shared_ptr[CArray]& dictionary) + + shared_ptr[CArray] indices() + shared_ptr[CArray] dictionary() + + cdef cppclass CDate32Type" arrow::Date32Type"(CFixedWidthType): + pass + + cdef cppclass CDate64Type" arrow::Date64Type"(CFixedWidthType): + pass + + cdef cppclass CTimestampType" arrow::TimestampType"(CFixedWidthType): + CTimestampType(TimeUnit unit) + TimeUnit unit() + const c_string& timezone() + + cdef cppclass CTime32Type" arrow::Time32Type"(CFixedWidthType): + TimeUnit unit() + + cdef cppclass CTime64Type" arrow::Time64Type"(CFixedWidthType): + TimeUnit unit() + + shared_ptr[CDataType] ctime32" arrow::time32"(TimeUnit unit) + shared_ptr[CDataType] ctime64" arrow::time64"(TimeUnit unit) + + cdef cppclass CDurationType" arrow::DurationType"(CFixedWidthType): + TimeUnit unit() + + shared_ptr[CDataType] cduration" arrow::duration"(TimeUnit unit) + + cdef cppclass CDictionaryType" arrow::DictionaryType"(CFixedWidthType): + CDictionaryType(const shared_ptr[CDataType]& index_type, + const shared_ptr[CDataType]& value_type, + c_bool ordered) + + shared_ptr[CDataType] index_type() + shared_ptr[CDataType] value_type() + c_bool ordered() + + shared_ptr[CDataType] ctimestamp" arrow::timestamp"(TimeUnit unit) + shared_ptr[CDataType] ctimestamp" arrow::timestamp"( + TimeUnit unit, const c_string& timezone) + + cdef cppclass CMemoryPool" arrow::MemoryPool": + int64_t bytes_allocated() + int64_t max_memory() + c_string backend_name() + void ReleaseUnused() + + cdef cppclass CLoggingMemoryPool" arrow::LoggingMemoryPool"(CMemoryPool): + CLoggingMemoryPool(CMemoryPool*) + + cdef cppclass CProxyMemoryPool" arrow::ProxyMemoryPool"(CMemoryPool): + CProxyMemoryPool(CMemoryPool*) + + cdef cppclass CBuffer" arrow::Buffer": + CBuffer(const uint8_t* data, int64_t size) + const uint8_t* data() + uint8_t* mutable_data() + uintptr_t address() + uintptr_t mutable_address() + int64_t size() + shared_ptr[CBuffer] parent() + c_bool is_cpu() const + c_bool is_mutable() const + c_string ToHexString() + c_bool Equals(const CBuffer& other) + + CResult[shared_ptr[CBuffer]] SliceBufferSafe( + const shared_ptr[CBuffer]& buffer, int64_t offset) + CResult[shared_ptr[CBuffer]] SliceBufferSafe( + const shared_ptr[CBuffer]& buffer, int64_t offset, int64_t length) + + cdef cppclass CMutableBuffer" arrow::MutableBuffer"(CBuffer): + CMutableBuffer(const uint8_t* data, int64_t size) + + cdef cppclass CResizableBuffer" arrow::ResizableBuffer"(CMutableBuffer): + CStatus Resize(const int64_t new_size, c_bool shrink_to_fit) + CStatus Reserve(const int64_t new_size) + + CResult[unique_ptr[CBuffer]] AllocateBuffer(const int64_t size, + CMemoryPool* pool) + + CResult[unique_ptr[CResizableBuffer]] AllocateResizableBuffer( + const int64_t size, CMemoryPool* pool) + + cdef cppclass CSyncEvent" arrow::Device::SyncEvent": + pass + + cdef cppclass CDevice" arrow::Device": + pass + + cdef CMemoryPool* c_default_memory_pool" arrow::default_memory_pool"() + cdef CMemoryPool* c_system_memory_pool" arrow::system_memory_pool"() + cdef CStatus c_jemalloc_memory_pool" arrow::jemalloc_memory_pool"( + CMemoryPool** out) + cdef CStatus c_mimalloc_memory_pool" arrow::mimalloc_memory_pool"( + CMemoryPool** out) + cdef vector[c_string] c_supported_memory_backends \ + " arrow::SupportedMemoryBackendNames"() + + CStatus c_jemalloc_set_decay_ms" arrow::jemalloc_set_decay_ms"(int ms) + + cdef cppclass CListType" arrow::ListType"(CDataType): + CListType(const shared_ptr[CDataType]& value_type) + CListType(const shared_ptr[CField]& field) + shared_ptr[CDataType] value_type() + shared_ptr[CField] value_field() + + cdef cppclass CLargeListType" arrow::LargeListType"(CDataType): + CLargeListType(const shared_ptr[CDataType]& value_type) + CLargeListType(const shared_ptr[CField]& field) + shared_ptr[CDataType] value_type() + shared_ptr[CField] value_field() + + cdef cppclass CListViewType" arrow::ListViewType"(CDataType): + CListViewType(const shared_ptr[CDataType]& value_type) + CListViewType(const shared_ptr[CField]& field) + shared_ptr[CDataType] value_type() + shared_ptr[CField] value_field() + + cdef cppclass CLargeListViewType" arrow::LargeListViewType"(CDataType): + CLargeListViewType(const shared_ptr[CDataType]& value_type) + CLargeListViewType(const shared_ptr[CField]& field) + shared_ptr[CDataType] value_type() + shared_ptr[CField] value_field() + + cdef cppclass CMapType" arrow::MapType"(CDataType): + CMapType(const shared_ptr[CField]& key_field, + const shared_ptr[CField]& item_field, c_bool keys_sorted) + shared_ptr[CDataType] key_type() + shared_ptr[CField] key_field() + shared_ptr[CDataType] item_type() + shared_ptr[CField] item_field() + c_bool keys_sorted() + + cdef cppclass CFixedSizeListType" arrow::FixedSizeListType"(CDataType): + CFixedSizeListType(const shared_ptr[CDataType]& value_type, + int32_t list_size) + CFixedSizeListType(const shared_ptr[CField]& field, int32_t list_size) + shared_ptr[CDataType] value_type() + shared_ptr[CField] value_field() + int32_t list_size() + + cdef cppclass CStringType" arrow::StringType"(CDataType): + pass + + cdef cppclass CFixedSizeBinaryType \ + " arrow::FixedSizeBinaryType"(CFixedWidthType): + CFixedSizeBinaryType(int byte_width) + int byte_width() + int bit_width() + + cdef cppclass CDecimal128Type \ + " arrow::Decimal128Type"(CFixedSizeBinaryType): + CDecimal128Type(int precision, int scale) + int precision() + int scale() + + cdef cppclass CDecimal256Type \ + " arrow::Decimal256Type"(CFixedSizeBinaryType): + CDecimal256Type(int precision, int scale) + int precision() + int scale() + + cdef cppclass CRunEndEncodedType " arrow::RunEndEncodedType"(CDataType): + CRunEndEncodedType(const shared_ptr[CDataType]& run_end_type, + const shared_ptr[CDataType]& value_type) + const shared_ptr[CDataType]& run_end_type() + const shared_ptr[CDataType]& value_type() + + cdef cppclass CField" arrow::Field": + cppclass CMergeOptions "MergeOptions": + CMergeOptions() + c_bool promote_nullability + + @staticmethod + CMergeOptions Defaults() + + @staticmethod + CMergeOptions Permissive() + + const c_string& name() + shared_ptr[CDataType] type() + c_bool nullable() + + c_string ToString() + c_bool Equals(const CField& other, c_bool check_metadata) + + shared_ptr[const CKeyValueMetadata] metadata() + + CField(const c_string& name, const shared_ptr[CDataType]& type, + c_bool nullable) + + CField(const c_string& name, const shared_ptr[CDataType]& type, + c_bool nullable, const shared_ptr[CKeyValueMetadata]& metadata) + + # Removed const in Cython so don't have to cast to get code to generate + shared_ptr[CField] AddMetadata( + const shared_ptr[CKeyValueMetadata]& metadata) + shared_ptr[CField] WithMetadata( + const shared_ptr[CKeyValueMetadata]& metadata) + shared_ptr[CField] RemoveMetadata() + shared_ptr[CField] WithType(const shared_ptr[CDataType]& type) + shared_ptr[CField] WithName(const c_string& name) + shared_ptr[CField] WithNullable(c_bool nullable) + vector[shared_ptr[CField]] Flatten() + + cdef cppclass CFieldRef" arrow::FieldRef": + CFieldRef() + CFieldRef(c_string name) + CFieldRef(int index) + CFieldRef(vector[CFieldRef]) + + @staticmethod + CResult[CFieldRef] FromDotPath(c_string& dot_path) + const c_string* name() const + + cdef cppclass CFieldRefHash" arrow::FieldRef::Hash": + pass + + cdef cppclass CStructType" arrow::StructType"(CDataType): + CStructType(const vector[shared_ptr[CField]]& fields) + + shared_ptr[CField] GetFieldByName(const c_string& name) + vector[shared_ptr[CField]] GetAllFieldsByName(const c_string& name) + int GetFieldIndex(const c_string& name) + vector[int] GetAllFieldIndices(const c_string& name) + + cdef cppclass CUnionType" arrow::UnionType"(CDataType): + UnionMode mode() + const vector[int8_t]& type_codes() + const vector[int]& child_ids() + + cdef shared_ptr[CDataType] CMakeSparseUnionType" arrow::sparse_union"( + vector[shared_ptr[CField]] fields, + vector[int8_t] type_codes) + + cdef shared_ptr[CDataType] CMakeDenseUnionType" arrow::dense_union"( + vector[shared_ptr[CField]] fields, + vector[int8_t] type_codes) + + cdef shared_ptr[CDataType] CMakeRunEndEncodedType" arrow::run_end_encoded"( + shared_ptr[CDataType] run_end_type, + shared_ptr[CDataType] value_type) + + cdef shared_ptr[CDataType] CMakeListViewType" arrow::list_view"( + shared_ptr[CField] value_type) + + cdef shared_ptr[CDataType] CMakeLargeListViewType" arrow::large_list_view"( + shared_ptr[CField] value_type) + + cdef cppclass CSchema" arrow::Schema": + CSchema(const vector[shared_ptr[CField]]& fields) + CSchema(const vector[shared_ptr[CField]]& fields, + const shared_ptr[const CKeyValueMetadata]& metadata) + + # Does not actually exist, but gets Cython to not complain + CSchema(const vector[shared_ptr[CField]]& fields, + const shared_ptr[CKeyValueMetadata]& metadata) + + c_bool Equals(const CSchema& other, c_bool check_metadata) + + shared_ptr[CField] field(int i) + shared_ptr[const CKeyValueMetadata] metadata() + shared_ptr[CField] GetFieldByName(const c_string& name) + vector[shared_ptr[CField]] GetAllFieldsByName(const c_string& name) + int GetFieldIndex(const c_string& name) + vector[int] GetAllFieldIndices(const c_string& name) + const vector[shared_ptr[CField]] fields() + int num_fields() + c_string ToString() + + CResult[shared_ptr[CSchema]] AddField(int i, + const shared_ptr[CField]& field) + CResult[shared_ptr[CSchema]] RemoveField(int i) + CResult[shared_ptr[CSchema]] SetField(int i, + const shared_ptr[CField]& field) + + # Removed const in Cython so don't have to cast to get code to generate + shared_ptr[CSchema] AddMetadata( + const shared_ptr[CKeyValueMetadata]& metadata) + shared_ptr[CSchema] WithMetadata( + const shared_ptr[CKeyValueMetadata]& metadata) + shared_ptr[CSchema] RemoveMetadata() + + CResult[shared_ptr[CSchema]] UnifySchemas( + const vector[shared_ptr[CSchema]]& schemas, + CField.CMergeOptions field_merge_options) + + cdef cppclass PrettyPrintOptions: + PrettyPrintOptions() + PrettyPrintOptions(int indent_arg) + PrettyPrintOptions(int indent_arg, int window_arg) + int indent + int indent_size + int window + int container_window + c_string null_rep + c_bool skip_new_lines + c_bool truncate_metadata + c_bool show_field_metadata + c_bool show_schema_metadata + + @staticmethod + PrettyPrintOptions Defaults() + + CStatus PrettyPrint(const CArray& schema, + const PrettyPrintOptions& options, + c_string* result) + CStatus PrettyPrint(const CChunkedArray& schema, + const PrettyPrintOptions& options, + c_string* result) + CStatus PrettyPrint(const CSchema& schema, + const PrettyPrintOptions& options, + c_string* result) + + cdef cppclass CBooleanArray" arrow::BooleanArray"(CArray): + c_bool Value(int i) + int64_t false_count() + int64_t true_count() + + cdef cppclass CUInt8Array" arrow::UInt8Array"(CArray): + uint8_t Value(int i) + + cdef cppclass CInt8Array" arrow::Int8Array"(CArray): + int8_t Value(int i) + + cdef cppclass CUInt16Array" arrow::UInt16Array"(CArray): + uint16_t Value(int i) + + cdef cppclass CInt16Array" arrow::Int16Array"(CArray): + int16_t Value(int i) + + cdef cppclass CUInt32Array" arrow::UInt32Array"(CArray): + uint32_t Value(int i) + + cdef cppclass CInt32Array" arrow::Int32Array"(CArray): + int32_t Value(int i) + + cdef cppclass CUInt64Array" arrow::UInt64Array"(CArray): + uint64_t Value(int i) + + cdef cppclass CInt64Array" arrow::Int64Array"(CArray): + int64_t Value(int i) + + cdef cppclass CDate32Array" arrow::Date32Array"(CArray): + int32_t Value(int i) + + cdef cppclass CDate64Array" arrow::Date64Array"(CArray): + int64_t Value(int i) + + cdef cppclass CTime32Array" arrow::Time32Array"(CArray): + int32_t Value(int i) + + cdef cppclass CTime64Array" arrow::Time64Array"(CArray): + int64_t Value(int i) + + cdef cppclass CTimestampArray" arrow::TimestampArray"(CArray): + int64_t Value(int i) + + cdef cppclass CDurationArray" arrow::DurationArray"(CArray): + int64_t Value(int i) + + cdef cppclass CMonthDayNanoIntervalArray \ + "arrow::MonthDayNanoIntervalArray"(CArray): + pass + + cdef cppclass CHalfFloatArray" arrow::HalfFloatArray"(CArray): + uint16_t Value(int i) + + cdef cppclass CFloatArray" arrow::FloatArray"(CArray): + float Value(int i) + + cdef cppclass CDoubleArray" arrow::DoubleArray"(CArray): + double Value(int i) + + cdef cppclass CFixedSizeBinaryArray" arrow::FixedSizeBinaryArray"(CArray): + const uint8_t* GetValue(int i) + + cdef cppclass CDecimal128Array" arrow::Decimal128Array"( + CFixedSizeBinaryArray + ): + c_string FormatValue(int i) + + cdef cppclass CDecimal256Array" arrow::Decimal256Array"( + CFixedSizeBinaryArray + ): + c_string FormatValue(int i) + + cdef cppclass CListArray" arrow::ListArray"(CArray): + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const CArray& offsets, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap, + ) + + @staticmethod + CResult[shared_ptr[CArray]] FromArraysAndType" FromArrays"( + shared_ptr[CDataType], + const CArray& offsets, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap, + ) + + const int32_t* raw_value_offsets() + int32_t value_offset(int i) + int32_t value_length(int i) + shared_ptr[CArray] values() + shared_ptr[CArray] offsets() + shared_ptr[CDataType] value_type() + + cdef cppclass CLargeListArray" arrow::LargeListArray"(CArray): + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const CArray& offsets, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap + ) + + @staticmethod + CResult[shared_ptr[CArray]] FromArraysAndType" FromArrays"( + shared_ptr[CDataType], + const CArray& offsets, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap + ) + + int64_t value_offset(int i) + int64_t value_length(int i) + shared_ptr[CArray] values() + shared_ptr[CArray] offsets() + shared_ptr[CDataType] value_type() + + cdef cppclass CFixedSizeListArray" arrow::FixedSizeListArray"(CArray): + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const shared_ptr[CArray]& values, + int32_t list_size, + shared_ptr[CBuffer] null_bitmap) + + @staticmethod + CResult[shared_ptr[CArray]] FromArraysAndType" FromArrays"( + const shared_ptr[CArray]& values, + shared_ptr[CDataType], + shared_ptr[CBuffer] null_bitmap) + + int64_t value_offset(int i) + int64_t value_length(int i) + shared_ptr[CArray] values() + shared_ptr[CDataType] value_type() + + cdef cppclass CListViewArray" arrow::ListViewArray"(CArray): + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const CArray& offsets, + const CArray& sizes, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap, + ) + + @staticmethod + CResult[shared_ptr[CArray]] FromArraysAndType" FromArrays"( + shared_ptr[CDataType], + const CArray& offsets, + const CArray& sizes, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap, + ) + + CResult[shared_ptr[CArray]] Flatten( + CMemoryPool* pool + ) + + const int32_t* raw_value_offsets() + const int32_t* raw_value_sizes() + int32_t value_offset(int i) + int32_t value_length(int i) + shared_ptr[CArray] values() + shared_ptr[CArray] offsets() + shared_ptr[CArray] sizes() + shared_ptr[CDataType] value_type() + + cdef cppclass CLargeListViewArray" arrow::LargeListViewArray"(CArray): + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const CArray& offsets, + const CArray& sizes, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap, + ) + + @staticmethod + CResult[shared_ptr[CArray]] FromArraysAndType" FromArrays"( + shared_ptr[CDataType], + const CArray& offsets, + const CArray& sizes, + const CArray& values, + CMemoryPool* pool, + shared_ptr[CBuffer] null_bitmap, + ) + + CResult[shared_ptr[CArray]] Flatten( + CMemoryPool* pool + ) + + int64_t value_offset(int i) + int64_t value_length(int i) + shared_ptr[CArray] values() + shared_ptr[CArray] offsets() + shared_ptr[CArray] sizes() + shared_ptr[CDataType] value_type() + + cdef cppclass CMapArray" arrow::MapArray"(CArray): + @staticmethod + CResult[shared_ptr[CArray]] FromArrays( + const shared_ptr[CArray]& offsets, + const shared_ptr[CArray]& keys, + const shared_ptr[CArray]& items, + CMemoryPool* pool) + + @staticmethod + CResult[shared_ptr[CArray]] FromArraysAndType" FromArrays"( + shared_ptr[CDataType], + const shared_ptr[CArray]& offsets, + const shared_ptr[CArray]& keys, + const shared_ptr[CArray]& items, + CMemoryPool* pool) + + shared_ptr[CArray] keys() + shared_ptr[CArray] items() + CMapType* map_type() + int64_t value_offset(int i) + int64_t value_length(int i) + shared_ptr[CArray] values() + shared_ptr[CDataType] value_type() + + cdef cppclass CUnionArray" arrow::UnionArray"(CArray): + shared_ptr[CBuffer] type_codes() + int8_t* raw_type_codes() + int child_id(int64_t index) + shared_ptr[CArray] field(int pos) + const CArray* UnsafeField(int pos) + UnionMode mode() + + cdef cppclass CSparseUnionArray" arrow::SparseUnionArray"(CUnionArray): + @staticmethod + CResult[shared_ptr[CArray]] Make( + const CArray& type_codes, + const vector[shared_ptr[CArray]]& children, + const vector[c_string]& field_names, + const vector[int8_t]& type_codes) + + cdef cppclass CDenseUnionArray" arrow::DenseUnionArray"(CUnionArray): + @staticmethod + CResult[shared_ptr[CArray]] Make( + const CArray& type_codes, + const CArray& value_offsets, + const vector[shared_ptr[CArray]]& children, + const vector[c_string]& field_names, + const vector[int8_t]& type_codes) + + int32_t value_offset(int i) + shared_ptr[CBuffer] value_offsets() + + cdef cppclass CBinaryArray" arrow::BinaryArray"(CArray): + const uint8_t* GetValue(int i, int32_t* length) + shared_ptr[CBuffer] value_data() + int32_t value_offset(int64_t i) + int32_t value_length(int64_t i) + int32_t total_values_length() + + cdef cppclass CLargeBinaryArray" arrow::LargeBinaryArray"(CArray): + const uint8_t* GetValue(int i, int64_t* length) + shared_ptr[CBuffer] value_data() + int64_t value_offset(int64_t i) + int64_t value_length(int64_t i) + int64_t total_values_length() + + cdef cppclass CStringArray" arrow::StringArray"(CBinaryArray): + CStringArray(int64_t length, shared_ptr[CBuffer] value_offsets, + shared_ptr[CBuffer] data, + shared_ptr[CBuffer] null_bitmap, + int64_t null_count, + int64_t offset) + + c_string GetString(int i) + + cdef cppclass CLargeStringArray" arrow::LargeStringArray" \ + (CLargeBinaryArray): + CLargeStringArray(int64_t length, shared_ptr[CBuffer] value_offsets, + shared_ptr[CBuffer] data, + shared_ptr[CBuffer] null_bitmap, + int64_t null_count, + int64_t offset) + + c_string GetString(int i) + + cdef cppclass CStructArray" arrow::StructArray"(CArray): + CStructArray(shared_ptr[CDataType]& type, int64_t length, + vector[shared_ptr[CArray]]& children, + shared_ptr[CBuffer] null_bitmap=nullptr, + int64_t null_count=-1, + int64_t offset=0) + + # XXX Cython crashes if default argument values are declared here + # https://github.com/cython/cython/issues/2167 + @staticmethod + CResult[shared_ptr[CArray]] MakeFromFieldNames "Make"( + vector[shared_ptr[CArray]] children, + vector[c_string] field_names, + shared_ptr[CBuffer] null_bitmap, + int64_t null_count, + int64_t offset) + + @staticmethod + CResult[shared_ptr[CArray]] MakeFromFields "Make"( + vector[shared_ptr[CArray]] children, + vector[shared_ptr[CField]] fields, + shared_ptr[CBuffer] null_bitmap, + int64_t null_count, + int64_t offset) + + shared_ptr[CArray] field(int pos) + shared_ptr[CArray] GetFieldByName(const c_string& name) const + CResult[shared_ptr[CArray]] GetFlattenedField(int index, CMemoryPool* pool) const + + CResult[vector[shared_ptr[CArray]]] Flatten(CMemoryPool* pool) + + cdef cppclass CRunEndEncodedArray" arrow::RunEndEncodedArray"(CArray): + @staticmethod + CResult[shared_ptr[CRunEndEncodedArray]] Make( + const shared_ptr[CDataType]& type, + int64_t logical_length, + const shared_ptr[CArray]& run_ends, + const shared_ptr[CArray]& values, + int64_t logical_offset) + + @staticmethod + CResult[shared_ptr[CRunEndEncodedArray]] MakeFromArrays "Make"( + int64_t logical_length, + const shared_ptr[CArray]& run_ends, + const shared_ptr[CArray]& values, + int64_t logical_offset) + + shared_ptr[CArray]& run_ends() + shared_ptr[CArray]& values() + + int64_t FindPhysicalOffset() + int64_t FindPhysicalLength() + + cdef cppclass CChunkedArray" arrow::ChunkedArray": + CChunkedArray(const vector[shared_ptr[CArray]]& arrays) + CChunkedArray(const vector[shared_ptr[CArray]]& arrays, + const shared_ptr[CDataType]& type) + + @staticmethod + CResult[shared_ptr[CChunkedArray]] Make(vector[shared_ptr[CArray]] chunks, + shared_ptr[CDataType] type) + int64_t length() + int64_t null_count() + int num_chunks() + c_bool Equals(const CChunkedArray& other) + + shared_ptr[CArray] chunk(int i) + shared_ptr[CDataType] type() + CResult[shared_ptr[CScalar]] GetScalar(int64_t index) const + shared_ptr[CChunkedArray] Slice(int64_t offset, int64_t length) const + shared_ptr[CChunkedArray] Slice(int64_t offset) const + + CResult[vector[shared_ptr[CChunkedArray]]] Flatten(CMemoryPool* pool) + + CStatus Validate() const + CStatus ValidateFull() const + + cdef cppclass CRecordBatch" arrow::RecordBatch": + @staticmethod + shared_ptr[CRecordBatch] Make( + const shared_ptr[CSchema]& schema, int64_t num_rows, + const vector[shared_ptr[CArray]]& columns) + + CResult[shared_ptr[CStructArray]] ToStructArray() const + + @staticmethod + CResult[shared_ptr[CRecordBatch]] FromStructArray( + const shared_ptr[CArray]& array) + + c_bool Equals(const CRecordBatch& other, c_bool check_metadata) + + shared_ptr[CSchema] schema() + shared_ptr[CArray] column(int i) + const c_string& column_name(int i) + + CResult[shared_ptr[CRecordBatch]] AddColumn( + int i, shared_ptr[CField] field, shared_ptr[CArray] column) + CResult[shared_ptr[CRecordBatch]] RemoveColumn(int i) + CResult[shared_ptr[CRecordBatch]] SetColumn( + int i, shared_ptr[CField] field, shared_ptr[CArray] column) + + const vector[shared_ptr[CArray]]& columns() + + CResult[shared_ptr[CRecordBatch]] RenameColumns(const vector[c_string]&) + CResult[shared_ptr[CRecordBatch]] SelectColumns(const vector[int]&) + + int num_columns() + int64_t num_rows() + + CStatus Validate() const + CStatus ValidateFull() const + + shared_ptr[CRecordBatch] ReplaceSchemaMetadata( + const shared_ptr[CKeyValueMetadata]& metadata) + + shared_ptr[CRecordBatch] Slice(int64_t offset) + shared_ptr[CRecordBatch] Slice(int64_t offset, int64_t length) + + CResult[shared_ptr[CTensor]] ToTensor(c_bool null_to_nan, c_bool row_major, + CMemoryPool* pool) const + + cdef cppclass CRecordBatchWithMetadata" arrow::RecordBatchWithMetadata": + shared_ptr[CRecordBatch] batch + # The struct in C++ does not actually have these two `const` qualifiers, but + # adding `const` gets Cython to not complain + const shared_ptr[const CKeyValueMetadata] custom_metadata + + cdef cppclass CTable" arrow::Table": + CTable(const shared_ptr[CSchema]& schema, + const vector[shared_ptr[CChunkedArray]]& columns) + + @staticmethod + shared_ptr[CTable] Make( + const shared_ptr[CSchema]& schema, + const vector[shared_ptr[CChunkedArray]]& columns) + + @staticmethod + shared_ptr[CTable] MakeWithRows "Make"( + const shared_ptr[CSchema]& schema, + const vector[shared_ptr[CChunkedArray]]& columns, + int64_t num_rows) + + @staticmethod + shared_ptr[CTable] MakeFromArrays" Make"( + const shared_ptr[CSchema]& schema, + const vector[shared_ptr[CArray]]& arrays) + + @staticmethod + CResult[shared_ptr[CTable]] FromRecordBatchReader( + CRecordBatchReader *reader) + + @staticmethod + CResult[shared_ptr[CTable]] FromRecordBatches( + const shared_ptr[CSchema]& schema, + const vector[shared_ptr[CRecordBatch]]& batches) + + int num_columns() + int64_t num_rows() + + c_bool Equals(const CTable& other, c_bool check_metadata) + + shared_ptr[CSchema] schema() + shared_ptr[CChunkedArray] column(int i) + shared_ptr[CField] field(int i) + + CResult[shared_ptr[CTable]] AddColumn( + int i, shared_ptr[CField] field, shared_ptr[CChunkedArray] column) + CResult[shared_ptr[CTable]] RemoveColumn(int i) + CResult[shared_ptr[CTable]] SetColumn( + int i, shared_ptr[CField] field, shared_ptr[CChunkedArray] column) + + vector[c_string] ColumnNames() + CResult[shared_ptr[CTable]] RenameColumns(const vector[c_string]&) + CResult[shared_ptr[CTable]] SelectColumns(const vector[int]&) + + CResult[shared_ptr[CTable]] Flatten(CMemoryPool* pool) + + CResult[shared_ptr[CTable]] CombineChunks(CMemoryPool* pool) + + CStatus Validate() const + CStatus ValidateFull() const + + shared_ptr[CTable] ReplaceSchemaMetadata( + const shared_ptr[CKeyValueMetadata]& metadata) + + shared_ptr[CTable] Slice(int64_t offset) + shared_ptr[CTable] Slice(int64_t offset, int64_t length) + + cdef cppclass CRecordBatchReader" arrow::RecordBatchReader": + shared_ptr[CSchema] schema() + CStatus Close() + CResult[CRecordBatchWithMetadata] ReadNext() + CStatus ReadNext(shared_ptr[CRecordBatch]* batch) + CResult[shared_ptr[CTable]] ToTable() + + cdef cppclass TableBatchReader(CRecordBatchReader): + TableBatchReader(const CTable& table) + TableBatchReader(shared_ptr[CTable] table) + void set_chunksize(int64_t chunksize) + + cdef cppclass CTensor" arrow::Tensor": + shared_ptr[CDataType] type() + shared_ptr[CBuffer] data() + + const vector[int64_t]& shape() + const vector[int64_t]& strides() + int64_t size() + + int ndim() + const vector[c_string]& dim_names() + const c_string& dim_name(int i) + + c_bool is_mutable() + c_bool is_contiguous() + Type type_id() + c_bool Equals(const CTensor& other) + + cdef cppclass CSparseIndex" arrow::SparseIndex": + pass + + cdef cppclass CSparseCOOIndex" arrow::SparseCOOIndex": + c_bool is_canonical() + + cdef cppclass CSparseCOOTensor" arrow::SparseCOOTensor": + shared_ptr[CDataType] type() + shared_ptr[CBuffer] data() + CResult[shared_ptr[CTensor]] ToTensor() + + shared_ptr[CSparseIndex] sparse_index() + + const vector[int64_t]& shape() + int64_t size() + int64_t non_zero_length() + + int ndim() + const vector[c_string]& dim_names() + const c_string& dim_name(int i) + + c_bool is_mutable() + Type type_id() + c_bool Equals(const CSparseCOOTensor& other) + + cdef cppclass CSparseCSRMatrix" arrow::SparseCSRMatrix": + shared_ptr[CDataType] type() + shared_ptr[CBuffer] data() + CResult[shared_ptr[CTensor]] ToTensor() + + const vector[int64_t]& shape() + int64_t size() + int64_t non_zero_length() + + int ndim() + const vector[c_string]& dim_names() + const c_string& dim_name(int i) + + c_bool is_mutable() + Type type_id() + c_bool Equals(const CSparseCSRMatrix& other) + + cdef cppclass CSparseCSCMatrix" arrow::SparseCSCMatrix": + shared_ptr[CDataType] type() + shared_ptr[CBuffer] data() + CResult[shared_ptr[CTensor]] ToTensor() + + const vector[int64_t]& shape() + int64_t size() + int64_t non_zero_length() + + int ndim() + const vector[c_string]& dim_names() + const c_string& dim_name(int i) + + c_bool is_mutable() + Type type_id() + c_bool Equals(const CSparseCSCMatrix& other) + + cdef cppclass CSparseCSFTensor" arrow::SparseCSFTensor": + shared_ptr[CDataType] type() + shared_ptr[CBuffer] data() + CResult[shared_ptr[CTensor]] ToTensor() + + const vector[int64_t]& shape() + int64_t size() + int64_t non_zero_length() + + int ndim() + const vector[c_string]& dim_names() + const c_string& dim_name(int i) + + c_bool is_mutable() + Type type_id() + c_bool Equals(const CSparseCSFTensor& other) + + cdef cppclass CScalar" arrow::Scalar": + CScalar(shared_ptr[CDataType]) + + shared_ptr[CDataType] type + c_bool is_valid + + c_string ToString() const + c_bool Equals(const CScalar& other) const + CStatus Validate() const + CStatus ValidateFull() const + + cdef cppclass CScalarHash" arrow::Scalar::Hash": + size_t operator()(const shared_ptr[CScalar]& scalar) const + + cdef cppclass CNullScalar" arrow::NullScalar"(CScalar): + CNullScalar() + + cdef cppclass CBooleanScalar" arrow::BooleanScalar"(CScalar): + CBooleanScalar(c_bool value) + c_bool value + + cdef cppclass CInt8Scalar" arrow::Int8Scalar"(CScalar): + int8_t value + + cdef cppclass CUInt8Scalar" arrow::UInt8Scalar"(CScalar): + uint8_t value + + cdef cppclass CInt16Scalar" arrow::Int16Scalar"(CScalar): + int16_t value + + cdef cppclass CUInt16Scalar" arrow::UInt16Scalar"(CScalar): + uint16_t value + + cdef cppclass CInt32Scalar" arrow::Int32Scalar"(CScalar): + int32_t value + + cdef cppclass CUInt32Scalar" arrow::UInt32Scalar"(CScalar): + uint32_t value + + cdef cppclass CInt64Scalar" arrow::Int64Scalar"(CScalar): + int64_t value + + cdef cppclass CUInt64Scalar" arrow::UInt64Scalar"(CScalar): + uint64_t value + + cdef cppclass CHalfFloatScalar" arrow::HalfFloatScalar"(CScalar): + npy_half value + + cdef cppclass CFloatScalar" arrow::FloatScalar"(CScalar): + float value + + cdef cppclass CDoubleScalar" arrow::DoubleScalar"(CScalar): + double value + + cdef cppclass CDecimal128Scalar" arrow::Decimal128Scalar"(CScalar): + CDecimal128 value + + cdef cppclass CDecimal256Scalar" arrow::Decimal256Scalar"(CScalar): + CDecimal256 value + + cdef cppclass CDate32Scalar" arrow::Date32Scalar"(CScalar): + int32_t value + + cdef cppclass CDate64Scalar" arrow::Date64Scalar"(CScalar): + int64_t value + + cdef cppclass CTime32Scalar" arrow::Time32Scalar"(CScalar): + int32_t value + + cdef cppclass CTime64Scalar" arrow::Time64Scalar"(CScalar): + int64_t value + + cdef cppclass CTimestampScalar" arrow::TimestampScalar"(CScalar): + int64_t value + + cdef cppclass CDurationScalar" arrow::DurationScalar"(CScalar): + int64_t value + + cdef cppclass CMonthDayNanoIntervalScalar \ + "arrow::MonthDayNanoIntervalScalar"(CScalar): + pass + + cdef cppclass CBaseBinaryScalar" arrow::BaseBinaryScalar"(CScalar): + shared_ptr[CBuffer] value + + cdef cppclass CBaseListScalar" arrow::BaseListScalar"(CScalar): + shared_ptr[CArray] value + + cdef cppclass CListScalar" arrow::ListScalar"(CBaseListScalar): + pass + + cdef cppclass CListViewScalar" arrow::ListViewScalar"(CBaseListScalar): + pass + + cdef cppclass CLargeListViewScalar" arrow::LargeListViewScalar"(CBaseListScalar): + pass + + cdef cppclass CMapScalar" arrow::MapScalar"(CListScalar): + pass + + cdef cppclass CStructScalar" arrow::StructScalar"(CScalar): + vector[shared_ptr[CScalar]] value + CResult[shared_ptr[CScalar]] field(CFieldRef ref) const + + cdef cppclass CDictionaryScalarIndexAndDictionary \ + "arrow::DictionaryScalar::ValueType": + shared_ptr[CScalar] index + shared_ptr[CArray] dictionary + + cdef cppclass CDictionaryScalar" arrow::DictionaryScalar"(CScalar): + CDictionaryScalar(CDictionaryScalarIndexAndDictionary value, + shared_ptr[CDataType], c_bool is_valid) + CDictionaryScalarIndexAndDictionary value + + CResult[shared_ptr[CScalar]] GetEncodedValue() + + cdef cppclass CUnionScalar" arrow::UnionScalar"(CScalar): + int8_t type_code + + cdef cppclass CDenseUnionScalar" arrow::DenseUnionScalar"(CUnionScalar): + shared_ptr[CScalar] value + + cdef cppclass CSparseUnionScalar" arrow::SparseUnionScalar"(CUnionScalar): + vector[shared_ptr[CScalar]] value + int child_id + + cdef cppclass CRunEndEncodedScalar" arrow::RunEndEncodedScalar"(CScalar): + shared_ptr[CScalar] value + + cdef cppclass CExtensionScalar" arrow::ExtensionScalar"(CScalar): + CExtensionScalar(shared_ptr[CScalar] storage, + shared_ptr[CDataType], c_bool is_valid) + shared_ptr[CScalar] value + + shared_ptr[CScalar] MakeScalar[Value](Value value) + + cdef cppclass CConcatenateTablesOptions" arrow::ConcatenateTablesOptions": + c_bool unify_schemas + CField.CMergeOptions field_merge_options + + @staticmethod + CConcatenateTablesOptions Defaults() + + CResult[shared_ptr[CTable]] ConcatenateTables( + const vector[shared_ptr[CTable]]& tables, + CConcatenateTablesOptions options, + CMemoryPool* memory_pool) + + cdef cppclass CDictionaryUnifier" arrow::DictionaryUnifier": + @staticmethod + CResult[shared_ptr[CChunkedArray]] UnifyChunkedArray( + shared_ptr[CChunkedArray] array, CMemoryPool* pool) + + @staticmethod + CResult[shared_ptr[CTable]] UnifyTable( + const CTable& table, CMemoryPool* pool) + + shared_ptr[CScalar] MakeNullScalar(shared_ptr[CDataType] type) + + +cdef extern from "arrow/c/dlpack_abi.h" nogil: + ctypedef enum DLDeviceType: + kDLCPU = 1 + + ctypedef struct DLDevice: + DLDeviceType device_type + int32_t device_id + + ctypedef struct DLManagedTensor: + void (*deleter)(DLManagedTensor*) + + +cdef extern from "arrow/c/dlpack.h" namespace "arrow::dlpack" nogil: + CResult[DLManagedTensor*] ExportToDLPack" arrow::dlpack::ExportArray"( + const shared_ptr[CArray]& arr) + + CResult[DLDevice] ExportDevice(const shared_ptr[CArray]& arr) + + +cdef extern from "arrow/builder.h" namespace "arrow" nogil: + + cdef cppclass CArrayBuilder" arrow::ArrayBuilder": + CArrayBuilder(shared_ptr[CDataType], CMemoryPool* pool) + + int64_t length() + int64_t null_count() + CStatus AppendNull() + CStatus Finish(shared_ptr[CArray]* out) + CStatus Reserve(int64_t additional_capacity) + + cdef cppclass CBooleanBuilder" arrow::BooleanBuilder"(CArrayBuilder): + CBooleanBuilder(CMemoryPool* pool) + CStatus Append(const c_bool val) + CStatus Append(const uint8_t val) + + cdef cppclass CInt8Builder" arrow::Int8Builder"(CArrayBuilder): + CInt8Builder(CMemoryPool* pool) + CStatus Append(const int8_t value) + + cdef cppclass CInt16Builder" arrow::Int16Builder"(CArrayBuilder): + CInt16Builder(CMemoryPool* pool) + CStatus Append(const int16_t value) + + cdef cppclass CInt32Builder" arrow::Int32Builder"(CArrayBuilder): + CInt32Builder(CMemoryPool* pool) + CStatus Append(const int32_t value) + + cdef cppclass CInt64Builder" arrow::Int64Builder"(CArrayBuilder): + CInt64Builder(CMemoryPool* pool) + CStatus Append(const int64_t value) + + cdef cppclass CUInt8Builder" arrow::UInt8Builder"(CArrayBuilder): + CUInt8Builder(CMemoryPool* pool) + CStatus Append(const uint8_t value) + + cdef cppclass CUInt16Builder" arrow::UInt16Builder"(CArrayBuilder): + CUInt16Builder(CMemoryPool* pool) + CStatus Append(const uint16_t value) + + cdef cppclass CUInt32Builder" arrow::UInt32Builder"(CArrayBuilder): + CUInt32Builder(CMemoryPool* pool) + CStatus Append(const uint32_t value) + + cdef cppclass CUInt64Builder" arrow::UInt64Builder"(CArrayBuilder): + CUInt64Builder(CMemoryPool* pool) + CStatus Append(const uint64_t value) + + cdef cppclass CHalfFloatBuilder" arrow::HalfFloatBuilder"(CArrayBuilder): + CHalfFloatBuilder(CMemoryPool* pool) + + cdef cppclass CFloatBuilder" arrow::FloatBuilder"(CArrayBuilder): + CFloatBuilder(CMemoryPool* pool) + CStatus Append(const float value) + + cdef cppclass CDoubleBuilder" arrow::DoubleBuilder"(CArrayBuilder): + CDoubleBuilder(CMemoryPool* pool) + CStatus Append(const double value) + + cdef cppclass CBinaryBuilder" arrow::BinaryBuilder"(CArrayBuilder): + CArrayBuilder(shared_ptr[CDataType], CMemoryPool* pool) + CStatus Append(const char* value, int32_t length) + + cdef cppclass CStringBuilder" arrow::StringBuilder"(CBinaryBuilder): + CStringBuilder(CMemoryPool* pool) + CStatus Append(const c_string& value) + + cdef cppclass CBinaryViewBuilder" arrow::BinaryViewBuilder"(CArrayBuilder): + CBinaryViewBuilder(shared_ptr[CDataType], CMemoryPool* pool) + CStatus Append(const char* value, int32_t length) + + cdef cppclass CStringViewBuilder" arrow::StringViewBuilder"(CBinaryViewBuilder): + CStringViewBuilder(CMemoryPool* pool) + CStatus Append(const c_string& value) + + cdef cppclass CTimestampBuilder "arrow::TimestampBuilder"(CArrayBuilder): + CTimestampBuilder(const shared_ptr[CDataType] typ, CMemoryPool* pool) + CStatus Append(const int64_t value) + + cdef cppclass CDate32Builder "arrow::Date32Builder"(CArrayBuilder): + CDate32Builder(CMemoryPool* pool) + CStatus Append(const int32_t value) + + cdef cppclass CDate64Builder "arrow::Date64Builder"(CArrayBuilder): + CDate64Builder(CMemoryPool* pool) + CStatus Append(const int64_t value) + + +# Use typedef to emulate syntax for std::function +ctypedef void CallbackTransform(object, const shared_ptr[CBuffer]& src, + shared_ptr[CBuffer]* dest) + +ctypedef CResult[shared_ptr[CInputStream]] StreamWrapFunc( + shared_ptr[CInputStream]) + + +cdef extern from "arrow/util/cancel.h" namespace "arrow" nogil: + cdef cppclass CStopToken "arrow::StopToken": + CStatus Poll() + c_bool IsStopRequested() + + cdef cppclass CStopSource "arrow::StopSource": + CStopToken token() + + CResult[CStopSource*] SetSignalStopSource() + void ResetSignalStopSource() + + CStatus RegisterCancellingSignalHandler(vector[int] signals) + void UnregisterCancellingSignalHandler() + + +cdef extern from "arrow/io/api.h" namespace "arrow::io" nogil: + cdef enum FileMode" arrow::io::FileMode::type": + FileMode_READ" arrow::io::FileMode::READ" + FileMode_WRITE" arrow::io::FileMode::WRITE" + FileMode_READWRITE" arrow::io::FileMode::READWRITE" + + cdef enum ObjectType" arrow::io::ObjectType::type": + ObjectType_FILE" arrow::io::ObjectType::FILE" + ObjectType_DIRECTORY" arrow::io::ObjectType::DIRECTORY" + + cdef cppclass CIOContext" arrow::io::IOContext": + CIOContext() + CIOContext(CStopToken) + CIOContext(CMemoryPool*) + CIOContext(CMemoryPool*, CStopToken) + + CIOContext c_default_io_context "arrow::io::default_io_context"() + int GetIOThreadPoolCapacity() + CStatus SetIOThreadPoolCapacity(int threads) + + cdef cppclass FileStatistics: + int64_t size + ObjectType kind + + cdef cppclass FileInterface: + CStatus Close() + CResult[int64_t] Tell() + FileMode mode() + c_bool closed() + + cdef cppclass Readable: + # put overload under a different name to avoid cython bug with multiple + # layers of inheritance + CResult[shared_ptr[CBuffer]] ReadBuffer" Read"(int64_t nbytes) + CResult[int64_t] Read(int64_t nbytes, uint8_t* out) + + cdef cppclass Seekable: + CStatus Seek(int64_t position) + + cdef cppclass Writable: + CStatus WriteBuffer" Write"(shared_ptr[CBuffer] data) + CStatus Write(const uint8_t* data, int64_t nbytes) + CStatus Flush() + + cdef cppclass CCacheOptions "arrow::io::CacheOptions": + int64_t hole_size_limit + int64_t range_size_limit + c_bool lazy + int64_t prefetch_limit + c_bool Equals "operator==" (CCacheOptions other) + + @staticmethod + CCacheOptions MakeFromNetworkMetrics(int64_t time_to_first_byte_millis, + int64_t transfer_bandwidth_mib_per_sec, + double ideal_bandwidth_utilization_frac, + int64_t max_ideal_request_size_mib) + + @staticmethod + CCacheOptions LazyDefaults() + + cdef cppclass COutputStream" arrow::io::OutputStream"(FileInterface, + Writable): + pass + + cdef cppclass CInputStream" arrow::io::InputStream"(FileInterface, + Readable): + CResult[shared_ptr[const CKeyValueMetadata]] ReadMetadata() + + cdef cppclass CRandomAccessFile" arrow::io::RandomAccessFile"(CInputStream, + Seekable): + CResult[int64_t] GetSize() + + @staticmethod + CResult[shared_ptr[CInputStream]] GetStream( + shared_ptr[CRandomAccessFile] file, + int64_t file_offset, + int64_t nbytes) + + CResult[int64_t] ReadAt(int64_t position, int64_t nbytes, + uint8_t* buffer) + CResult[shared_ptr[CBuffer]] ReadAt(int64_t position, int64_t nbytes) + c_bool supports_zero_copy() + + cdef cppclass WritableFile(COutputStream, Seekable): + CStatus WriteAt(int64_t position, const uint8_t* data, + int64_t nbytes) + + cdef cppclass ReadWriteFileInterface(CRandomAccessFile, + WritableFile): + pass + + cdef cppclass CIOFileSystem" arrow::io::FileSystem": + CStatus Stat(const c_string& path, FileStatistics* stat) + + cdef cppclass FileOutputStream(COutputStream): + @staticmethod + CResult[shared_ptr[COutputStream]] Open(const c_string& path) + + @staticmethod + CResult[shared_ptr[COutputStream]] OpenWithAppend" Open"( + const c_string& path, c_bool append) + + int file_descriptor() + + cdef cppclass ReadableFile(CRandomAccessFile): + @staticmethod + CResult[shared_ptr[ReadableFile]] Open(const c_string& path) + + @staticmethod + CResult[shared_ptr[ReadableFile]] Open(const c_string& path, + CMemoryPool* memory_pool) + + int file_descriptor() + + cdef cppclass CMemoryMappedFile \ + " arrow::io::MemoryMappedFile"(ReadWriteFileInterface): + + @staticmethod + CResult[shared_ptr[CMemoryMappedFile]] Create(const c_string& path, + int64_t size) + + @staticmethod + CResult[shared_ptr[CMemoryMappedFile]] Open(const c_string& path, + FileMode mode) + + CStatus Resize(int64_t size) + + int file_descriptor() + + cdef cppclass CCompressedInputStream \ + " arrow::io::CompressedInputStream"(CInputStream): + @staticmethod + CResult[shared_ptr[CCompressedInputStream]] Make( + CCodec* codec, shared_ptr[CInputStream] raw) + + cdef cppclass CCompressedOutputStream \ + " arrow::io::CompressedOutputStream"(COutputStream): + @staticmethod + CResult[shared_ptr[CCompressedOutputStream]] Make( + CCodec* codec, shared_ptr[COutputStream] raw) + + cdef cppclass CBufferedInputStream \ + " arrow::io::BufferedInputStream"(CInputStream): + + @staticmethod + CResult[shared_ptr[CBufferedInputStream]] Create( + int64_t buffer_size, CMemoryPool* pool, + shared_ptr[CInputStream] raw) + + CResult[shared_ptr[CInputStream]] Detach() + + cdef cppclass CBufferedOutputStream \ + " arrow::io::BufferedOutputStream"(COutputStream): + + @staticmethod + CResult[shared_ptr[CBufferedOutputStream]] Create( + int64_t buffer_size, CMemoryPool* pool, + shared_ptr[COutputStream] raw) + + CResult[shared_ptr[COutputStream]] Detach() + + cdef cppclass CTransformInputStreamVTable \ + "arrow::py::TransformInputStreamVTable": + CTransformInputStreamVTable() + function[CallbackTransform] transform + + shared_ptr[CInputStream] MakeTransformInputStream \ + "arrow::py::MakeTransformInputStream"( + shared_ptr[CInputStream] wrapped, CTransformInputStreamVTable vtable, + object method_arg) + + shared_ptr[function[StreamWrapFunc]] MakeStreamTransformFunc \ + "arrow::py::MakeStreamTransformFunc"( + CTransformInputStreamVTable vtable, + object method_arg) + + # ---------------------------------------------------------------------- + # HDFS + + CStatus HaveLibHdfs() + CStatus HaveLibHdfs3() + + cdef enum HdfsDriver" arrow::io::HdfsDriver": + HdfsDriver_LIBHDFS" arrow::io::HdfsDriver::LIBHDFS" + HdfsDriver_LIBHDFS3" arrow::io::HdfsDriver::LIBHDFS3" + + cdef cppclass HdfsConnectionConfig: + c_string host + int port + c_string user + c_string kerb_ticket + unordered_map[c_string, c_string] extra_conf + HdfsDriver driver + + cdef cppclass HdfsPathInfo: + ObjectType kind + c_string name + c_string owner + c_string group + int32_t last_modified_time + int32_t last_access_time + int64_t size + int16_t replication + int64_t block_size + int16_t permissions + + cdef cppclass HdfsReadableFile(CRandomAccessFile): + pass + + cdef cppclass HdfsOutputStream(COutputStream): + pass + + cdef cppclass CIOHadoopFileSystem \ + "arrow::io::HadoopFileSystem"(CIOFileSystem): + @staticmethod + CStatus Connect(const HdfsConnectionConfig* config, + shared_ptr[CIOHadoopFileSystem]* client) + + CStatus MakeDirectory(const c_string& path) + + CStatus Delete(const c_string& path, c_bool recursive) + + CStatus Disconnect() + + c_bool Exists(const c_string& path) + + CStatus Chmod(const c_string& path, int mode) + CStatus Chown(const c_string& path, const char* owner, + const char* group) + + CStatus GetCapacity(int64_t* nbytes) + CStatus GetUsed(int64_t* nbytes) + + CStatus ListDirectory(const c_string& path, + vector[HdfsPathInfo]* listing) + + CStatus GetPathInfo(const c_string& path, HdfsPathInfo* info) + + CStatus Rename(const c_string& src, const c_string& dst) + + CStatus OpenReadable(const c_string& path, + shared_ptr[HdfsReadableFile]* handle) + + CStatus OpenWritable(const c_string& path, c_bool append, + int32_t buffer_size, int16_t replication, + int64_t default_block_size, + shared_ptr[HdfsOutputStream]* handle) + + cdef cppclass CBufferReader \ + " arrow::io::BufferReader"(CRandomAccessFile): + CBufferReader(const shared_ptr[CBuffer]& buffer) + CBufferReader(const uint8_t* data, int64_t nbytes) + + cdef cppclass CBufferOutputStream \ + " arrow::io::BufferOutputStream"(COutputStream): + CBufferOutputStream(const shared_ptr[CResizableBuffer]& buffer) + + cdef cppclass CMockOutputStream \ + " arrow::io::MockOutputStream"(COutputStream): + CMockOutputStream() + int64_t GetExtentBytesWritten() + + cdef cppclass CFixedSizeBufferWriter \ + " arrow::io::FixedSizeBufferWriter"(WritableFile): + CFixedSizeBufferWriter(const shared_ptr[CBuffer]& buffer) + + void set_memcopy_threads(int num_threads) + void set_memcopy_blocksize(int64_t blocksize) + void set_memcopy_threshold(int64_t threshold) + + +cdef extern from "arrow/ipc/api.h" namespace "arrow::ipc" nogil: + cdef enum MessageType" arrow::ipc::MessageType": + MessageType_SCHEMA" arrow::ipc::MessageType::SCHEMA" + MessageType_RECORD_BATCH" arrow::ipc::MessageType::RECORD_BATCH" + MessageType_DICTIONARY_BATCH \ + " arrow::ipc::MessageType::DICTIONARY_BATCH" + + # TODO: use "cpdef enum class" to automatically get a Python wrapper? + # See + # https://github.com/cython/cython/commit/2c7c22f51405299a4e247f78edf52957d30cf71d#diff-61c1365c0f761a8137754bb3a73bfbf7 + ctypedef enum CMetadataVersion" arrow::ipc::MetadataVersion": + CMetadataVersion_V1" arrow::ipc::MetadataVersion::V1" + CMetadataVersion_V2" arrow::ipc::MetadataVersion::V2" + CMetadataVersion_V3" arrow::ipc::MetadataVersion::V3" + CMetadataVersion_V4" arrow::ipc::MetadataVersion::V4" + CMetadataVersion_V5" arrow::ipc::MetadataVersion::V5" + + cdef cppclass CIpcWriteOptions" arrow::ipc::IpcWriteOptions": + c_bool allow_64bit + int max_recursion_depth + int32_t alignment + c_bool write_legacy_ipc_format + CMemoryPool* memory_pool + CMetadataVersion metadata_version + shared_ptr[CCodec] codec + c_bool use_threads + c_bool emit_dictionary_deltas + c_bool unify_dictionaries + + CIpcWriteOptions() + CIpcWriteOptions(CIpcWriteOptions) + + @staticmethod + CIpcWriteOptions Defaults() + + cdef cppclass CIpcReadOptions" arrow::ipc::IpcReadOptions": + int max_recursion_depth + CMemoryPool* memory_pool + vector[int] included_fields + c_bool use_threads + c_bool ensure_native_endian + + @staticmethod + CIpcReadOptions Defaults() + + cdef cppclass CIpcWriteStats" arrow::ipc::WriteStats": + int64_t num_messages + int64_t num_record_batches + int64_t num_dictionary_batches + int64_t num_dictionary_deltas + int64_t num_replaced_dictionaries + + cdef cppclass CIpcReadStats" arrow::ipc::ReadStats": + int64_t num_messages + int64_t num_record_batches + int64_t num_dictionary_batches + int64_t num_dictionary_deltas + int64_t num_replaced_dictionaries + + cdef cppclass CDictionaryMemo" arrow::ipc::DictionaryMemo": + pass + + cdef cppclass CIpcPayload" arrow::ipc::IpcPayload": + MessageType type + shared_ptr[CBuffer] metadata + vector[shared_ptr[CBuffer]] body_buffers + int64_t body_length + + cdef cppclass CMessage" arrow::ipc::Message": + CResult[unique_ptr[CMessage]] Open(shared_ptr[CBuffer] metadata, + shared_ptr[CBuffer] body) + + shared_ptr[CBuffer] body() + + c_bool Equals(const CMessage& other) + + shared_ptr[CBuffer] metadata() + CMetadataVersion metadata_version() + MessageType type() + + CStatus SerializeTo(COutputStream* stream, + const CIpcWriteOptions& options, + int64_t* output_length) + + c_string FormatMessageType(MessageType type) + + cdef cppclass CMessageReader" arrow::ipc::MessageReader": + @staticmethod + unique_ptr[CMessageReader] Open(const shared_ptr[CInputStream]& stream) + + CResult[unique_ptr[CMessage]] ReadNextMessage() + + cdef cppclass CRecordBatchWriter" arrow::ipc::RecordBatchWriter": + CStatus Close() + CStatus WriteRecordBatch(const CRecordBatch& batch) + CStatus WriteRecordBatch( + const CRecordBatch& batch, + const shared_ptr[const CKeyValueMetadata]& metadata) + CStatus WriteTable(const CTable& table, int64_t max_chunksize) + + CIpcWriteStats stats() + + cdef cppclass CRecordBatchStreamReader \ + " arrow::ipc::RecordBatchStreamReader"(CRecordBatchReader): + @staticmethod + CResult[shared_ptr[CRecordBatchReader]] Open( + const shared_ptr[CInputStream], const CIpcReadOptions&) + + @staticmethod + CResult[shared_ptr[CRecordBatchReader]] Open2" Open"( + unique_ptr[CMessageReader] message_reader, + const CIpcReadOptions& options) + + CIpcReadStats stats() + + cdef cppclass CRecordBatchFileReader \ + " arrow::ipc::RecordBatchFileReader": + @staticmethod + CResult[shared_ptr[CRecordBatchFileReader]] Open( + CRandomAccessFile* file, + const CIpcReadOptions& options) + + @staticmethod + CResult[shared_ptr[CRecordBatchFileReader]] Open2" Open"( + CRandomAccessFile* file, int64_t footer_offset, + const CIpcReadOptions& options) + + shared_ptr[CSchema] schema() + + int num_record_batches() + + CResult[shared_ptr[CRecordBatch]] ReadRecordBatch(int i) + + CResult[CRecordBatchWithMetadata] ReadRecordBatchWithCustomMetadata(int i) + + CIpcReadStats stats() + + CResult[shared_ptr[CRecordBatchWriter]] MakeStreamWriter( + shared_ptr[COutputStream] sink, const shared_ptr[CSchema]& schema, + CIpcWriteOptions& options) + + CResult[shared_ptr[CRecordBatchWriter]] MakeFileWriter( + shared_ptr[COutputStream] sink, const shared_ptr[CSchema]& schema, + CIpcWriteOptions& options) + + CResult[unique_ptr[CMessage]] ReadMessage(CInputStream* stream, + CMemoryPool* pool) + + CStatus GetRecordBatchSize(const CRecordBatch& batch, int64_t* size) + CStatus GetTensorSize(const CTensor& tensor, int64_t* size) + + CStatus WriteTensor(const CTensor& tensor, COutputStream* dst, + int32_t* metadata_length, + int64_t* body_length) + + CResult[shared_ptr[CTensor]] ReadTensor(CInputStream* stream) + + CResult[shared_ptr[CRecordBatch]] ReadRecordBatch( + const CMessage& message, const shared_ptr[CSchema]& schema, + CDictionaryMemo* dictionary_memo, + const CIpcReadOptions& options) + + CResult[shared_ptr[CBuffer]] SerializeSchema( + const CSchema& schema, CMemoryPool* pool) + + CResult[shared_ptr[CBuffer]] SerializeRecordBatch( + const CRecordBatch& schema, const CIpcWriteOptions& options) + + CResult[shared_ptr[CSchema]] ReadSchema(const CMessage& message, + CDictionaryMemo* dictionary_memo) + + CResult[shared_ptr[CSchema]] ReadSchema(CInputStream* stream, + CDictionaryMemo* dictionary_memo) + + CResult[shared_ptr[CRecordBatch]] ReadRecordBatch( + const shared_ptr[CSchema]& schema, + CDictionaryMemo* dictionary_memo, + const CIpcReadOptions& options, + CInputStream* stream) + + CStatus AlignStream(CInputStream* stream, int64_t alignment) + CStatus AlignStream(COutputStream* stream, int64_t alignment) + + cdef CStatus GetRecordBatchPayload \ + " arrow::ipc::GetRecordBatchPayload"( + const CRecordBatch& batch, + const CIpcWriteOptions& options, + CIpcPayload* out) + + +cdef extern from "arrow/util/value_parsing.h" namespace "arrow" nogil: + cdef cppclass CTimestampParser" arrow::TimestampParser": + const char* kind() const + const char* format() const + + @staticmethod + shared_ptr[CTimestampParser] MakeStrptime(c_string format) + + @staticmethod + shared_ptr[CTimestampParser] MakeISO8601() + + +cdef extern from "arrow/csv/api.h" namespace "arrow::csv" nogil: + + cdef cppclass CCSVInvalidRow" arrow::csv::InvalidRow": + int32_t expected_columns + int32_t actual_columns + int64_t number + c_string text + + ctypedef enum CInvalidRowResult" arrow::csv::InvalidRowResult": + CInvalidRowResult_Error" arrow::csv::InvalidRowResult::Error" + CInvalidRowResult_Skip" arrow::csv::InvalidRowResult::Skip" + + ctypedef CInvalidRowResult CInvalidRowHandler(const CCSVInvalidRow&) + + +cdef extern from "arrow/csv/api.h" namespace "arrow::csv" nogil: + + ctypedef enum CQuotingStyle "arrow::csv::QuotingStyle": + CQuotingStyle_Needed "arrow::csv::QuotingStyle::Needed" + CQuotingStyle_AllValid "arrow::csv::QuotingStyle::AllValid" + CQuotingStyle_None "arrow::csv::QuotingStyle::None" + + cdef cppclass CCSVParseOptions" arrow::csv::ParseOptions": + unsigned char delimiter + c_bool quoting + unsigned char quote_char + c_bool double_quote + c_bool escaping + unsigned char escape_char + c_bool newlines_in_values + c_bool ignore_empty_lines + function[CInvalidRowHandler] invalid_row_handler + + CCSVParseOptions() + CCSVParseOptions(CCSVParseOptions) + + @staticmethod + CCSVParseOptions Defaults() + + CStatus Validate() + + cdef cppclass CCSVConvertOptions" arrow::csv::ConvertOptions": + c_bool check_utf8 + unordered_map[c_string, shared_ptr[CDataType]] column_types + vector[c_string] null_values + vector[c_string] true_values + vector[c_string] false_values + c_bool strings_can_be_null + c_bool quoted_strings_can_be_null + vector[shared_ptr[CTimestampParser]] timestamp_parsers + + c_bool auto_dict_encode + int32_t auto_dict_max_cardinality + unsigned char decimal_point + + vector[c_string] include_columns + c_bool include_missing_columns + + CCSVConvertOptions() + CCSVConvertOptions(CCSVConvertOptions) + + @staticmethod + CCSVConvertOptions Defaults() + + CStatus Validate() + + cdef cppclass CCSVReadOptions" arrow::csv::ReadOptions": + c_bool use_threads + int32_t block_size + int32_t skip_rows + int32_t skip_rows_after_names + vector[c_string] column_names + c_bool autogenerate_column_names + + CCSVReadOptions() + CCSVReadOptions(CCSVReadOptions) + + @staticmethod + CCSVReadOptions Defaults() + + CStatus Validate() + + cdef cppclass CCSVWriteOptions" arrow::csv::WriteOptions": + c_bool include_header + int32_t batch_size + unsigned char delimiter + CQuotingStyle quoting_style + CIOContext io_context + + CCSVWriteOptions() + CCSVWriteOptions(CCSVWriteOptions) + + @staticmethod + CCSVWriteOptions Defaults() + + CStatus Validate() + + cdef cppclass CCSVReader" arrow::csv::TableReader": + @staticmethod + CResult[shared_ptr[CCSVReader]] Make( + CIOContext, shared_ptr[CInputStream], + CCSVReadOptions, CCSVParseOptions, CCSVConvertOptions) + + CResult[shared_ptr[CTable]] Read() + + cdef cppclass CCSVStreamingReader" arrow::csv::StreamingReader"( + CRecordBatchReader): + @staticmethod + CResult[shared_ptr[CCSVStreamingReader]] Make( + CIOContext, shared_ptr[CInputStream], + CCSVReadOptions, CCSVParseOptions, CCSVConvertOptions) + + cdef CStatus WriteCSV(CTable&, CCSVWriteOptions& options, COutputStream*) + cdef CStatus WriteCSV( + CRecordBatch&, CCSVWriteOptions& options, COutputStream*) + cdef CResult[shared_ptr[CRecordBatchWriter]] MakeCSVWriter( + shared_ptr[COutputStream], shared_ptr[CSchema], + CCSVWriteOptions& options) + + +cdef extern from "arrow/json/options.h" nogil: + + ctypedef enum CUnexpectedFieldBehavior \ + "arrow::json::UnexpectedFieldBehavior": + CUnexpectedFieldBehavior_Ignore \ + "arrow::json::UnexpectedFieldBehavior::Ignore" + CUnexpectedFieldBehavior_Error \ + "arrow::json::UnexpectedFieldBehavior::Error" + CUnexpectedFieldBehavior_InferType \ + "arrow::json::UnexpectedFieldBehavior::InferType" + + cdef cppclass CJSONReadOptions" arrow::json::ReadOptions": + c_bool use_threads + int32_t block_size + + @staticmethod + CJSONReadOptions Defaults() + + cdef cppclass CJSONParseOptions" arrow::json::ParseOptions": + shared_ptr[CSchema] explicit_schema + c_bool newlines_in_values + CUnexpectedFieldBehavior unexpected_field_behavior + + @staticmethod + CJSONParseOptions Defaults() + + +cdef extern from "arrow/json/reader.h" namespace "arrow::json" nogil: + + cdef cppclass CJSONReader" arrow::json::TableReader": + @staticmethod + CResult[shared_ptr[CJSONReader]] Make( + CMemoryPool*, shared_ptr[CInputStream], + CJSONReadOptions, CJSONParseOptions) + + CResult[shared_ptr[CTable]] Read() + + +cdef extern from "arrow/util/thread_pool.h" namespace "arrow::internal" nogil: + + cdef cppclass CExecutor "arrow::internal::Executor": + pass + + cdef cppclass CThreadPool "arrow::internal::ThreadPool"(CExecutor): + @staticmethod + CResult[shared_ptr[CThreadPool]] Make(int threads) + + CThreadPool* GetCpuThreadPool() + + +cdef extern from "arrow/compute/api.h" namespace "arrow::compute" nogil: + + cdef cppclass CExecBatch "arrow::compute::ExecBatch": + vector[CDatum] values + int64_t length + + cdef cppclass CExecContext" arrow::compute::ExecContext": + CExecContext() + CExecContext(CMemoryPool* pool) + CExecContext(CMemoryPool* pool, CExecutor* exc) + + CMemoryPool* memory_pool() const + CExecutor* executor() + + cdef cppclass CKernelSignature" arrow::compute::KernelSignature": + c_string ToString() const + + cdef cppclass CKernel" arrow::compute::Kernel": + shared_ptr[CKernelSignature] signature + + cdef cppclass CArrayKernel" arrow::compute::ArrayKernel"(CKernel): + pass + + cdef cppclass CScalarKernel" arrow::compute::ScalarKernel"(CArrayKernel): + pass + + cdef cppclass CVectorKernel" arrow::compute::VectorKernel"(CArrayKernel): + pass + + cdef cppclass CScalarAggregateKernel \ + " arrow::compute::ScalarAggregateKernel"(CKernel): + pass + + cdef cppclass CHashAggregateKernel \ + " arrow::compute::HashAggregateKernel"(CKernel): + pass + + cdef cppclass CArity" arrow::compute::Arity": + int num_args + c_bool is_varargs + + CArity() + + CArity(int num_args, c_bool is_varargs) + + cdef enum FunctionKind" arrow::compute::Function::Kind": + FunctionKind_SCALAR" arrow::compute::Function::SCALAR" + FunctionKind_VECTOR" arrow::compute::Function::VECTOR" + FunctionKind_SCALAR_AGGREGATE \ + " arrow::compute::Function::SCALAR_AGGREGATE" + FunctionKind_HASH_AGGREGATE \ + " arrow::compute::Function::HASH_AGGREGATE" + FunctionKind_META \ + " arrow::compute::Function::META" + + cdef cppclass CFunctionDoc" arrow::compute::FunctionDoc": + c_string summary + c_string description + vector[c_string] arg_names + c_string options_class + c_bool options_required + + cdef cppclass CFunctionOptionsType" arrow::compute::FunctionOptionsType": + const char* type_name() const + + cdef cppclass CFunctionOptions" arrow::compute::FunctionOptions": + const CFunctionOptionsType* options_type() const + const char* type_name() const + c_bool Equals(const CFunctionOptions& other) const + c_string ToString() const + unique_ptr[CFunctionOptions] Copy() const + CResult[shared_ptr[CBuffer]] Serialize() const + + @staticmethod + CResult[unique_ptr[CFunctionOptions]] Deserialize( + const c_string& type_name, const CBuffer& buffer) + + cdef cppclass CFunction" arrow::compute::Function": + const c_string& name() const + FunctionKind kind() const + const CArity& arity() const + const CFunctionDoc& doc() const + int num_kernels() const + CResult[CDatum] Execute(const vector[CDatum]& args, + const CFunctionOptions* options, + CExecContext* ctx) const + CResult[CDatum] Execute(const CExecBatch& args, + const CFunctionOptions* options, + CExecContext* ctx) const + + cdef cppclass CScalarFunction" arrow::compute::ScalarFunction"(CFunction): + vector[const CScalarKernel*] kernels() const + + cdef cppclass CVectorFunction" arrow::compute::VectorFunction"(CFunction): + vector[const CVectorKernel*] kernels() const + + cdef cppclass CScalarAggregateFunction \ + " arrow::compute::ScalarAggregateFunction"(CFunction): + vector[const CScalarAggregateKernel*] kernels() const + + cdef cppclass CHashAggregateFunction \ + " arrow::compute::HashAggregateFunction"(CFunction): + vector[const CHashAggregateKernel*] kernels() const + + cdef cppclass CMetaFunction" arrow::compute::MetaFunction"(CFunction): + pass + + cdef cppclass CFunctionRegistry" arrow::compute::FunctionRegistry": + CResult[shared_ptr[CFunction]] GetFunction( + const c_string& name) const + vector[c_string] GetFunctionNames() const + int num_functions() const + + CFunctionRegistry* GetFunctionRegistry() + + cdef cppclass CElementWiseAggregateOptions \ + "arrow::compute::ElementWiseAggregateOptions"(CFunctionOptions): + CElementWiseAggregateOptions(c_bool skip_nulls) + c_bool skip_nulls + + ctypedef enum CRoundMode \ + "arrow::compute::RoundMode": + CRoundMode_DOWN \ + "arrow::compute::RoundMode::DOWN" + CRoundMode_UP \ + "arrow::compute::RoundMode::UP" + CRoundMode_TOWARDS_ZERO \ + "arrow::compute::RoundMode::TOWARDS_ZERO" + CRoundMode_TOWARDS_INFINITY \ + "arrow::compute::RoundMode::TOWARDS_INFINITY" + CRoundMode_HALF_DOWN \ + "arrow::compute::RoundMode::HALF_DOWN" + CRoundMode_HALF_UP \ + "arrow::compute::RoundMode::HALF_UP" + CRoundMode_HALF_TOWARDS_ZERO \ + "arrow::compute::RoundMode::HALF_TOWARDS_ZERO" + CRoundMode_HALF_TOWARDS_INFINITY \ + "arrow::compute::RoundMode::HALF_TOWARDS_INFINITY" + CRoundMode_HALF_TO_EVEN \ + "arrow::compute::RoundMode::HALF_TO_EVEN" + CRoundMode_HALF_TO_ODD \ + "arrow::compute::RoundMode::HALF_TO_ODD" + + cdef cppclass CRoundOptions \ + "arrow::compute::RoundOptions"(CFunctionOptions): + CRoundOptions(int64_t ndigits, CRoundMode round_mode) + int64_t ndigits + CRoundMode round_mode + + cdef cppclass CRoundBinaryOptions \ + "arrow::compute::RoundBinaryOptions"(CFunctionOptions): + CRoundBinaryOptions(CRoundMode round_mode) + CRoundMode round_mode + + ctypedef enum CCalendarUnit \ + "arrow::compute::CalendarUnit": + CCalendarUnit_NANOSECOND \ + "arrow::compute::CalendarUnit::NANOSECOND" + CCalendarUnit_MICROSECOND \ + "arrow::compute::CalendarUnit::MICROSECOND" + CCalendarUnit_MILLISECOND \ + "arrow::compute::CalendarUnit::MILLISECOND" + CCalendarUnit_SECOND \ + "arrow::compute::CalendarUnit::SECOND" + CCalendarUnit_MINUTE \ + "arrow::compute::CalendarUnit::MINUTE" + CCalendarUnit_HOUR \ + "arrow::compute::CalendarUnit::HOUR" + CCalendarUnit_DAY \ + "arrow::compute::CalendarUnit::DAY" + CCalendarUnit_WEEK \ + "arrow::compute::CalendarUnit::WEEK" + CCalendarUnit_MONTH \ + "arrow::compute::CalendarUnit::MONTH" + CCalendarUnit_QUARTER \ + "arrow::compute::CalendarUnit::QUARTER" + CCalendarUnit_YEAR \ + "arrow::compute::CalendarUnit::YEAR" + + cdef cppclass CRoundTemporalOptions \ + "arrow::compute::RoundTemporalOptions"(CFunctionOptions): + CRoundTemporalOptions(int multiple, CCalendarUnit unit, + c_bool week_starts_monday, + c_bool ceil_is_strictly_greater, + c_bool calendar_based_origin) + int multiple + CCalendarUnit unit + c_bool week_starts_monday + c_bool ceil_is_strictly_greater + c_bool calendar_based_origin + + cdef cppclass CRoundToMultipleOptions \ + "arrow::compute::RoundToMultipleOptions"(CFunctionOptions): + CRoundToMultipleOptions(shared_ptr[CScalar] multiple, CRoundMode round_mode) + shared_ptr[CScalar] multiple + CRoundMode round_mode + + cdef enum CJoinNullHandlingBehavior \ + "arrow::compute::JoinOptions::NullHandlingBehavior": + CJoinNullHandlingBehavior_EMIT_NULL \ + "arrow::compute::JoinOptions::EMIT_NULL" + CJoinNullHandlingBehavior_SKIP \ + "arrow::compute::JoinOptions::SKIP" + CJoinNullHandlingBehavior_REPLACE \ + "arrow::compute::JoinOptions::REPLACE" + + cdef cppclass CJoinOptions \ + "arrow::compute::JoinOptions"(CFunctionOptions): + CJoinOptions(CJoinNullHandlingBehavior null_handling, + c_string null_replacement) + CJoinNullHandlingBehavior null_handling + c_string null_replacement + + cdef cppclass CMatchSubstringOptions \ + "arrow::compute::MatchSubstringOptions"(CFunctionOptions): + CMatchSubstringOptions(c_string pattern, c_bool ignore_case) + c_string pattern + c_bool ignore_case + + cdef cppclass CTrimOptions \ + "arrow::compute::TrimOptions"(CFunctionOptions): + CTrimOptions(c_string characters) + c_string characters + + cdef cppclass CPadOptions \ + "arrow::compute::PadOptions"(CFunctionOptions): + CPadOptions(int64_t width, c_string padding) + int64_t width + c_string padding + + cdef cppclass CSliceOptions \ + "arrow::compute::SliceOptions"(CFunctionOptions): + CSliceOptions(int64_t start, int64_t stop, int64_t step) + int64_t start + int64_t stop + int64_t step + + cdef cppclass CListSliceOptions \ + "arrow::compute::ListSliceOptions"(CFunctionOptions): + CListSliceOptions(int64_t start, optional[int64_t] stop, + int64_t step, + optional[c_bool] return_fixed_size_list) + int64_t start + optional[int64_t] stop + int64_t step + optional[c_bool] return_fixed_size_list + + cdef cppclass CSplitOptions \ + "arrow::compute::SplitOptions"(CFunctionOptions): + CSplitOptions(int64_t max_splits, c_bool reverse) + int64_t max_splits + c_bool reverse + + cdef cppclass CSplitPatternOptions \ + "arrow::compute::SplitPatternOptions"(CFunctionOptions): + CSplitPatternOptions(c_string pattern, int64_t max_splits, + c_bool reverse) + int64_t max_splits + c_bool reverse + c_string pattern + + cdef cppclass CReplaceSliceOptions \ + "arrow::compute::ReplaceSliceOptions"(CFunctionOptions): + CReplaceSliceOptions(int64_t start, int64_t stop, c_string replacement) + int64_t start + int64_t stop + c_string replacement + + cdef cppclass CReplaceSubstringOptions \ + "arrow::compute::ReplaceSubstringOptions"(CFunctionOptions): + CReplaceSubstringOptions(c_string pattern, c_string replacement, + int64_t max_replacements) + c_string pattern + c_string replacement + int64_t max_replacements + + cdef cppclass CExtractRegexOptions \ + "arrow::compute::ExtractRegexOptions"(CFunctionOptions): + CExtractRegexOptions(c_string pattern) + c_string pattern + + cdef cppclass CCastOptions" arrow::compute::CastOptions"(CFunctionOptions): + CCastOptions() + CCastOptions(c_bool safe) + CCastOptions(CCastOptions options) + + @staticmethod + CCastOptions Safe() + + @staticmethod + CCastOptions Unsafe() + shared_ptr[CDataType] to_type + c_bool allow_int_overflow + c_bool allow_time_truncate + c_bool allow_time_overflow + c_bool allow_decimal_truncate + c_bool allow_float_truncate + c_bool allow_invalid_utf8 + + cdef enum CFilterNullSelectionBehavior \ + "arrow::compute::FilterOptions::NullSelectionBehavior": + CFilterNullSelectionBehavior_DROP \ + "arrow::compute::FilterOptions::DROP" + CFilterNullSelectionBehavior_EMIT_NULL \ + "arrow::compute::FilterOptions::EMIT_NULL" + + cdef cppclass CFilterOptions \ + " arrow::compute::FilterOptions"(CFunctionOptions): + CFilterOptions() + CFilterOptions(CFilterNullSelectionBehavior null_selection_behavior) + CFilterNullSelectionBehavior null_selection_behavior + + cdef enum CDictionaryEncodeNullEncodingBehavior \ + "arrow::compute::DictionaryEncodeOptions::NullEncodingBehavior": + CDictionaryEncodeNullEncodingBehavior_ENCODE \ + "arrow::compute::DictionaryEncodeOptions::ENCODE" + CDictionaryEncodeNullEncodingBehavior_MASK \ + "arrow::compute::DictionaryEncodeOptions::MASK" + + cdef cppclass CDictionaryEncodeOptions \ + "arrow::compute::DictionaryEncodeOptions"(CFunctionOptions): + CDictionaryEncodeOptions( + CDictionaryEncodeNullEncodingBehavior null_encoding) + CDictionaryEncodeNullEncodingBehavior null_encoding + + cdef cppclass CRunEndEncodeOptions \ + "arrow::compute::RunEndEncodeOptions"(CFunctionOptions): + CRunEndEncodeOptions() + CRunEndEncodeOptions(shared_ptr[CDataType] run_end_type) + shared_ptr[CDataType] run_end_type + + cdef cppclass CTakeOptions \ + " arrow::compute::TakeOptions"(CFunctionOptions): + CTakeOptions(c_bool boundscheck) + c_bool boundscheck + + cdef cppclass CStrptimeOptions \ + "arrow::compute::StrptimeOptions"(CFunctionOptions): + CStrptimeOptions(c_string format, TimeUnit unit, c_bool raise_error) + c_string format + TimeUnit unit + c_bool raise_error + + cdef cppclass CStrftimeOptions \ + "arrow::compute::StrftimeOptions"(CFunctionOptions): + CStrftimeOptions(c_string format, c_string locale) + c_string format + c_string locale + + cdef cppclass CDayOfWeekOptions \ + "arrow::compute::DayOfWeekOptions"(CFunctionOptions): + CDayOfWeekOptions(c_bool count_from_zero, uint32_t week_start) + c_bool count_from_zero + uint32_t week_start + + cdef enum CAssumeTimezoneAmbiguous \ + "arrow::compute::AssumeTimezoneOptions::Ambiguous": + CAssumeTimezoneAmbiguous_AMBIGUOUS_RAISE \ + "arrow::compute::AssumeTimezoneOptions::AMBIGUOUS_RAISE" + CAssumeTimezoneAmbiguous_AMBIGUOUS_EARLIEST \ + "arrow::compute::AssumeTimezoneOptions::AMBIGUOUS_EARLIEST" + CAssumeTimezoneAmbiguous_AMBIGUOUS_LATEST \ + "arrow::compute::AssumeTimezoneOptions::AMBIGUOUS_LATEST" + + cdef enum CAssumeTimezoneNonexistent \ + "arrow::compute::AssumeTimezoneOptions::Nonexistent": + CAssumeTimezoneNonexistent_NONEXISTENT_RAISE \ + "arrow::compute::AssumeTimezoneOptions::NONEXISTENT_RAISE" + CAssumeTimezoneNonexistent_NONEXISTENT_EARLIEST \ + "arrow::compute::AssumeTimezoneOptions::NONEXISTENT_EARLIEST" + CAssumeTimezoneNonexistent_NONEXISTENT_LATEST \ + "arrow::compute::AssumeTimezoneOptions::NONEXISTENT_LATEST" + + cdef cppclass CAssumeTimezoneOptions \ + "arrow::compute::AssumeTimezoneOptions"(CFunctionOptions): + CAssumeTimezoneOptions(c_string timezone, + CAssumeTimezoneAmbiguous ambiguous, + CAssumeTimezoneNonexistent nonexistent) + c_string timezone + CAssumeTimezoneAmbiguous ambiguous + CAssumeTimezoneNonexistent nonexistent + + cdef cppclass CWeekOptions \ + "arrow::compute::WeekOptions"(CFunctionOptions): + CWeekOptions(c_bool week_starts_monday, c_bool count_from_zero, + c_bool first_week_is_fully_in_year) + c_bool week_starts_monday + c_bool count_from_zero + c_bool first_week_is_fully_in_year + + cdef cppclass CNullOptions \ + "arrow::compute::NullOptions"(CFunctionOptions): + CNullOptions(c_bool nan_is_null) + c_bool nan_is_null + + cdef cppclass CVarianceOptions \ + "arrow::compute::VarianceOptions"(CFunctionOptions): + CVarianceOptions(int ddof, c_bool skip_nulls, uint32_t min_count) + int ddof + c_bool skip_nulls + uint32_t min_count + + cdef cppclass CScalarAggregateOptions \ + "arrow::compute::ScalarAggregateOptions"(CFunctionOptions): + CScalarAggregateOptions(c_bool skip_nulls, uint32_t min_count) + c_bool skip_nulls + uint32_t min_count + + cdef enum CCountMode "arrow::compute::CountOptions::CountMode": + CCountMode_ONLY_VALID "arrow::compute::CountOptions::ONLY_VALID" + CCountMode_ONLY_NULL "arrow::compute::CountOptions::ONLY_NULL" + CCountMode_ALL "arrow::compute::CountOptions::ALL" + + cdef cppclass CCountOptions \ + "arrow::compute::CountOptions"(CFunctionOptions): + CCountOptions(CCountMode mode) + CCountMode mode + + cdef cppclass CModeOptions \ + "arrow::compute::ModeOptions"(CFunctionOptions): + CModeOptions(int64_t n, c_bool skip_nulls, uint32_t min_count) + int64_t n + c_bool skip_nulls + uint32_t min_count + + cdef cppclass CIndexOptions \ + "arrow::compute::IndexOptions"(CFunctionOptions): + CIndexOptions(shared_ptr[CScalar] value) + shared_ptr[CScalar] value + + cdef cppclass CAggregate "arrow::compute::Aggregate": + c_string function + shared_ptr[CFunctionOptions] options + vector[CFieldRef] target + c_string name + + cdef enum CMapLookupOccurrence \ + "arrow::compute::MapLookupOptions::Occurrence": + CMapLookupOccurrence_ALL "arrow::compute::MapLookupOptions::ALL" + CMapLookupOccurrence_FIRST "arrow::compute::MapLookupOptions::FIRST" + CMapLookupOccurrence_LAST "arrow::compute::MapLookupOptions::LAST" + + cdef cppclass CMapLookupOptions \ + "arrow::compute::MapLookupOptions"(CFunctionOptions): + CMapLookupOptions(shared_ptr[CScalar] query_key, + CMapLookupOccurrence occurrence) + CMapLookupOccurrence occurrence + shared_ptr[CScalar] query_key + + cdef cppclass CMakeStructOptions \ + "arrow::compute::MakeStructOptions"(CFunctionOptions): + CMakeStructOptions(vector[c_string] n, + vector[c_bool] r, + vector[shared_ptr[const CKeyValueMetadata]] m) + CMakeStructOptions(vector[c_string] n) + vector[c_string] field_names + vector[c_bool] field_nullability + vector[shared_ptr[const CKeyValueMetadata]] field_metadata + + cdef cppclass CStructFieldOptions \ + "arrow::compute::StructFieldOptions"(CFunctionOptions): + CStructFieldOptions(vector[int] indices) + CStructFieldOptions(CFieldRef field_ref) + vector[int] indices + CFieldRef field_ref + + ctypedef enum CSortOrder" arrow::compute::SortOrder": + CSortOrder_Ascending \ + "arrow::compute::SortOrder::Ascending" + CSortOrder_Descending \ + "arrow::compute::SortOrder::Descending" + + ctypedef enum CNullPlacement" arrow::compute::NullPlacement": + CNullPlacement_AtStart \ + "arrow::compute::NullPlacement::AtStart" + CNullPlacement_AtEnd \ + "arrow::compute::NullPlacement::AtEnd" + + cdef cppclass CPartitionNthOptions \ + "arrow::compute::PartitionNthOptions"(CFunctionOptions): + CPartitionNthOptions(int64_t pivot, CNullPlacement) + int64_t pivot + CNullPlacement null_placement + + cdef cppclass CCumulativeOptions \ + "arrow::compute::CumulativeOptions"(CFunctionOptions): + CCumulativeOptions(c_bool skip_nulls) + CCumulativeOptions(shared_ptr[CScalar] start, c_bool skip_nulls) + optional[shared_ptr[CScalar]] start + c_bool skip_nulls + + cdef cppclass CPairwiseOptions \ + "arrow::compute::PairwiseOptions"(CFunctionOptions): + CPairwiseOptions(int64_t period) + int64_t period + + cdef cppclass CArraySortOptions \ + "arrow::compute::ArraySortOptions"(CFunctionOptions): + CArraySortOptions(CSortOrder, CNullPlacement) + CSortOrder order + CNullPlacement null_placement + + cdef cppclass CSortKey" arrow::compute::SortKey": + CSortKey(CFieldRef target, CSortOrder order) + CFieldRef target + CSortOrder order + + cdef cppclass COrdering" arrow::compute::Ordering": + COrdering(vector[CSortKey] sort_keys, CNullPlacement null_placement) + + cdef cppclass CSortOptions \ + "arrow::compute::SortOptions"(CFunctionOptions): + CSortOptions(vector[CSortKey] sort_keys, CNullPlacement) + vector[CSortKey] sort_keys + CNullPlacement null_placement + + cdef cppclass CSelectKOptions \ + "arrow::compute::SelectKOptions"(CFunctionOptions): + CSelectKOptions(int64_t k, vector[CSortKey] sort_keys) + int64_t k + vector[CSortKey] sort_keys + + cdef enum CQuantileInterp \ + "arrow::compute::QuantileOptions::Interpolation": + CQuantileInterp_LINEAR "arrow::compute::QuantileOptions::LINEAR" + CQuantileInterp_LOWER "arrow::compute::QuantileOptions::LOWER" + CQuantileInterp_HIGHER "arrow::compute::QuantileOptions::HIGHER" + CQuantileInterp_NEAREST "arrow::compute::QuantileOptions::NEAREST" + CQuantileInterp_MIDPOINT "arrow::compute::QuantileOptions::MIDPOINT" + + cdef cppclass CQuantileOptions \ + "arrow::compute::QuantileOptions"(CFunctionOptions): + CQuantileOptions(vector[double] q, CQuantileInterp interpolation, + c_bool skip_nulls, uint32_t min_count) + vector[double] q + CQuantileInterp interpolation + c_bool skip_nulls + uint32_t min_count + + cdef cppclass CTDigestOptions \ + "arrow::compute::TDigestOptions"(CFunctionOptions): + CTDigestOptions(vector[double] q, + uint32_t delta, uint32_t buffer_size, + c_bool skip_nulls, uint32_t min_count) + vector[double] q + uint32_t delta + uint32_t buffer_size + c_bool skip_nulls + uint32_t min_count + + cdef enum CUtf8NormalizeForm \ + "arrow::compute::Utf8NormalizeOptions::Form": + CUtf8NormalizeForm_NFC "arrow::compute::Utf8NormalizeOptions::NFC" + CUtf8NormalizeForm_NFKC "arrow::compute::Utf8NormalizeOptions::NFKC" + CUtf8NormalizeForm_NFD "arrow::compute::Utf8NormalizeOptions::NFD" + CUtf8NormalizeForm_NFKD "arrow::compute::Utf8NormalizeOptions::NFKD" + + cdef cppclass CUtf8NormalizeOptions \ + "arrow::compute::Utf8NormalizeOptions"(CFunctionOptions): + CUtf8NormalizeOptions(CUtf8NormalizeForm form) + CUtf8NormalizeForm form + + cdef cppclass CSetLookupOptions \ + "arrow::compute::SetLookupOptions"(CFunctionOptions): + CSetLookupOptions(CDatum value_set, c_bool skip_nulls) + CDatum value_set + c_bool skip_nulls + + cdef cppclass CRandomOptions \ + "arrow::compute::RandomOptions"(CFunctionOptions): + CRandomOptions(CRandomOptions) + + @staticmethod + CRandomOptions FromSystemRandom() + + @staticmethod + CRandomOptions FromSeed(uint64_t seed) + + cdef enum CRankOptionsTiebreaker \ + "arrow::compute::RankOptions::Tiebreaker": + CRankOptionsTiebreaker_Min "arrow::compute::RankOptions::Min" + CRankOptionsTiebreaker_Max "arrow::compute::RankOptions::Max" + CRankOptionsTiebreaker_First "arrow::compute::RankOptions::First" + CRankOptionsTiebreaker_Dense "arrow::compute::RankOptions::Dense" + + cdef cppclass CRankOptions \ + "arrow::compute::RankOptions"(CFunctionOptions): + CRankOptions(vector[CSortKey] sort_keys, CNullPlacement, + CRankOptionsTiebreaker tiebreaker) + vector[CSortKey] sort_keys + CNullPlacement null_placement + CRankOptionsTiebreaker tiebreaker + + cdef enum DatumType" arrow::Datum::type": + DatumType_NONE" arrow::Datum::NONE" + DatumType_SCALAR" arrow::Datum::SCALAR" + DatumType_ARRAY" arrow::Datum::ARRAY" + DatumType_CHUNKED_ARRAY" arrow::Datum::CHUNKED_ARRAY" + DatumType_RECORD_BATCH" arrow::Datum::RECORD_BATCH" + DatumType_TABLE" arrow::Datum::TABLE" + DatumType_COLLECTION" arrow::Datum::COLLECTION" + + cdef cppclass CDatum" arrow::Datum": + CDatum() + CDatum(const shared_ptr[CArray]& value) + CDatum(const shared_ptr[CChunkedArray]& value) + CDatum(const shared_ptr[CScalar]& value) + CDatum(const shared_ptr[CRecordBatch]& value) + CDatum(const shared_ptr[CTable]& value) + + DatumType kind() const + c_string ToString() const + + const shared_ptr[CArrayData]& array() const + const shared_ptr[CChunkedArray]& chunked_array() const + const shared_ptr[CRecordBatch]& record_batch() const + const shared_ptr[CTable]& table() const + const shared_ptr[CScalar]& scalar() const + + cdef c_string ToString(DatumType kind) + + +cdef extern from * namespace "arrow::compute": + # inlined from compute/function_internal.h to avoid exposing + # implementation details + """ + #include "arrow/compute/function.h" + namespace arrow { + namespace compute { + namespace internal { + Result> DeserializeFunctionOptions( + const Buffer& buffer); + } // namespace internal + } // namespace compute + } // namespace arrow + """ + CResult[unique_ptr[CFunctionOptions]] DeserializeFunctionOptions \ + " arrow::compute::internal::DeserializeFunctionOptions"( + const CBuffer& buffer) + + +cdef extern from * namespace "arrow::compute": + # inlined from expression_internal.h to avoid + # proliferation of #include + """ + #include + + #include "arrow/type.h" + #include "arrow/datum.h" + + namespace arrow { + namespace compute { + struct KnownFieldValues { + std::unordered_map map; + }; + } // namespace compute + } // namespace arrow + """ + cdef struct CKnownFieldValues "arrow::compute::KnownFieldValues": + unordered_map[CFieldRef, CDatum, CFieldRefHash] map + +cdef extern from "arrow/compute/expression.h" \ + namespace "arrow::compute" nogil: + + cdef cppclass CExpression "arrow::compute::Expression": + c_bool Equals(const CExpression& other) const + c_string ToString() const + CResult[CExpression] Bind(const CSchema&) + const CFieldRef* field_ref() const + + cdef CExpression CMakeScalarExpression \ + "arrow::compute::literal"(shared_ptr[CScalar] value) + + cdef CExpression CMakeFieldExpression \ + "arrow::compute::field_ref"(CFieldRef) + + cdef CExpression CMakeFieldExpressionByIndex \ + "arrow::compute::field_ref"(int idx) + + cdef CExpression CMakeCallExpression \ + "arrow::compute::call"(c_string function, + vector[CExpression] arguments, + shared_ptr[CFunctionOptions] options) + + cdef CResult[shared_ptr[CBuffer]] CSerializeExpression \ + "arrow::compute::Serialize"(const CExpression&) + + cdef CResult[CExpression] CDeserializeExpression \ + "arrow::compute::Deserialize"(shared_ptr[CBuffer]) + + cdef CResult[CKnownFieldValues] \ + CExtractKnownFieldValues "arrow::compute::ExtractKnownFieldValues"( + const CExpression& partition_expression) + + +cdef extern from "arrow/extension_type.h" namespace "arrow": + cdef cppclass CExtensionTypeRegistry" arrow::ExtensionTypeRegistry": + @staticmethod + shared_ptr[CExtensionTypeRegistry] GetGlobalRegistry() + + cdef cppclass CExtensionType" arrow::ExtensionType"(CDataType): + c_string extension_name() + shared_ptr[CDataType] storage_type() + + @staticmethod + shared_ptr[CArray] WrapArray(shared_ptr[CDataType] ext_type, + shared_ptr[CArray] storage) + + @staticmethod + shared_ptr[CChunkedArray] WrapArray(shared_ptr[CDataType] ext_type, + shared_ptr[CChunkedArray] storage) + + cdef cppclass CExtensionArray" arrow::ExtensionArray"(CArray): + CExtensionArray(shared_ptr[CDataType], shared_ptr[CArray] storage) + + shared_ptr[CArray] storage() + + +cdef extern from "arrow/extension/fixed_shape_tensor.h" namespace "arrow::extension" nogil: + cdef cppclass CFixedShapeTensorType \ + " arrow::extension::FixedShapeTensorType"(CExtensionType): + + CResult[shared_ptr[CTensor]] MakeTensor(const shared_ptr[CExtensionScalar]& scalar) const + + @staticmethod + CResult[shared_ptr[CDataType]] Make(const shared_ptr[CDataType]& value_type, + const vector[int64_t]& shape, + const vector[int64_t]& permutation, + const vector[c_string]& dim_names) + + const shared_ptr[CDataType] value_type() + const vector[int64_t] shape() + const vector[int64_t] permutation() + const vector[c_string] dim_names() + + cdef cppclass CFixedShapeTensorArray \ + " arrow::extension::FixedShapeTensorArray"(CExtensionArray): + const CResult[shared_ptr[CTensor]] ToTensor() const + +cdef extern from "arrow/util/compression.h" namespace "arrow" nogil: + cdef enum CCompressionType" arrow::Compression::type": + CCompressionType_UNCOMPRESSED" arrow::Compression::UNCOMPRESSED" + CCompressionType_SNAPPY" arrow::Compression::SNAPPY" + CCompressionType_GZIP" arrow::Compression::GZIP" + CCompressionType_BROTLI" arrow::Compression::BROTLI" + CCompressionType_ZSTD" arrow::Compression::ZSTD" + CCompressionType_LZ4" arrow::Compression::LZ4" + CCompressionType_LZ4_FRAME" arrow::Compression::LZ4_FRAME" + CCompressionType_BZ2" arrow::Compression::BZ2" + + cdef cppclass CCodec" arrow::util::Codec": + @staticmethod + CResult[unique_ptr[CCodec]] Create(CCompressionType codec) + + @staticmethod + CResult[unique_ptr[CCodec]] CreateWithLevel" Create"( + CCompressionType codec, + int compression_level) + + @staticmethod + c_bool SupportsCompressionLevel(CCompressionType codec) + + @staticmethod + CResult[int] MinimumCompressionLevel(CCompressionType codec) + + @staticmethod + CResult[int] MaximumCompressionLevel(CCompressionType codec) + + @staticmethod + CResult[int] DefaultCompressionLevel(CCompressionType codec) + + @staticmethod + c_bool IsAvailable(CCompressionType codec) + + CResult[int64_t] Decompress(int64_t input_len, const uint8_t* input, + int64_t output_len, + uint8_t* output_buffer) + CResult[int64_t] Compress(int64_t input_len, const uint8_t* input, + int64_t output_buffer_len, + uint8_t* output_buffer) + c_string name() const + int compression_level() const + int64_t MaxCompressedLen(int64_t input_len, const uint8_t* input) + + +cdef extern from "arrow/util/io_util.h" namespace "arrow::internal" nogil: + int ErrnoFromStatus(CStatus status) + int WinErrorFromStatus(CStatus status) + int SignalFromStatus(CStatus status) + + CStatus SendSignal(int signum) + CStatus SendSignalToThread(int signum, uint64_t thread_id) + + +cdef extern from "arrow/util/iterator.h" namespace "arrow" nogil: + cdef cppclass CIterator" arrow::Iterator"[T]: + CResult[T] Next() + CStatus Visit[Visitor](Visitor&& visitor) + cppclass RangeIterator: + CResult[T] operator*() + RangeIterator& operator++() + c_bool operator!=(RangeIterator) const + RangeIterator begin() + RangeIterator end() + CIterator[T] MakeVectorIterator[T](vector[T] v) + +cdef extern from "arrow/util/thread_pool.h" namespace "arrow" nogil: + int GetCpuThreadPoolCapacity() + CStatus SetCpuThreadPoolCapacity(int threads) + +cdef extern from "arrow/array/concatenate.h" namespace "arrow" nogil: + CResult[shared_ptr[CArray]] Concatenate( + const vector[shared_ptr[CArray]]& arrays, + CMemoryPool* pool) + +cdef extern from "arrow/c/abi.h": + cdef struct ArrowSchema: + void (*release)(ArrowSchema*) noexcept nogil + + cdef struct ArrowArray: + void (*release)(ArrowArray*) noexcept nogil + + cdef struct ArrowArrayStream: + void (*release)(ArrowArrayStream*) noexcept nogil + + cdef struct ArrowDeviceArray: + pass + +cdef extern from "arrow/c/bridge.h" namespace "arrow" nogil: + CStatus ExportType(CDataType&, ArrowSchema* out) + CResult[shared_ptr[CDataType]] ImportType(ArrowSchema*) + + CStatus ExportField(CField&, ArrowSchema* out) + CResult[shared_ptr[CField]] ImportField(ArrowSchema*) + + CStatus ExportSchema(CSchema&, ArrowSchema* out) + CResult[shared_ptr[CSchema]] ImportSchema(ArrowSchema*) + + CStatus ExportArray(CArray&, ArrowArray* out) + CStatus ExportArray(CArray&, ArrowArray* out, ArrowSchema* out_schema) + CResult[shared_ptr[CArray]] ImportArray(ArrowArray*, + shared_ptr[CDataType]) + CResult[shared_ptr[CArray]] ImportArray(ArrowArray*, ArrowSchema*) + + CStatus ExportRecordBatch(CRecordBatch&, ArrowArray* out) + CStatus ExportRecordBatch(CRecordBatch&, ArrowArray* out, + ArrowSchema* out_schema) + CResult[shared_ptr[CRecordBatch]] ImportRecordBatch(ArrowArray*, + shared_ptr[CSchema]) + CResult[shared_ptr[CRecordBatch]] ImportRecordBatch(ArrowArray*, + ArrowSchema*) + + CStatus ExportRecordBatchReader(shared_ptr[CRecordBatchReader], + ArrowArrayStream*) + CResult[shared_ptr[CRecordBatchReader]] ImportRecordBatchReader( + ArrowArrayStream*) + + CStatus ExportChunkedArray(shared_ptr[CChunkedArray], ArrowArrayStream*) + CResult[shared_ptr[CChunkedArray]] ImportChunkedArray(ArrowArrayStream*) + + CStatus ExportDeviceArray(const CArray&, shared_ptr[CSyncEvent], + ArrowDeviceArray* out, ArrowSchema*) + CResult[shared_ptr[CArray]] ImportDeviceArray( + ArrowDeviceArray*, shared_ptr[CDataType]) + CResult[shared_ptr[CArray]] ImportDeviceArray( + ArrowDeviceArray*, ArrowSchema*) + + CStatus ExportDeviceRecordBatch(const CRecordBatch&, shared_ptr[CSyncEvent], + ArrowDeviceArray* out, ArrowSchema*) + CResult[shared_ptr[CRecordBatch]] ImportDeviceRecordBatch( + ArrowDeviceArray*, shared_ptr[CSchema]) + CResult[shared_ptr[CRecordBatch]] ImportDeviceRecordBatch( + ArrowDeviceArray*, ArrowSchema*) + + +cdef extern from "arrow/util/byte_size.h" namespace "arrow::util" nogil: + CResult[int64_t] ReferencedBufferSize(const CArray& array_data) + CResult[int64_t] ReferencedBufferSize(const CRecordBatch& record_batch) + CResult[int64_t] ReferencedBufferSize(const CChunkedArray& chunked_array) + CResult[int64_t] ReferencedBufferSize(const CTable& table) + int64_t TotalBufferSize(const CArray& array) + int64_t TotalBufferSize(const CChunkedArray& array) + int64_t TotalBufferSize(const CRecordBatch& record_batch) + int64_t TotalBufferSize(const CTable& table) + +ctypedef PyObject* CallbackUdf(object user_function, const CUdfContext& context, object inputs) + + +cdef extern from "arrow/api.h" namespace "arrow" nogil: + + cdef cppclass CRecordBatchIterator "arrow::RecordBatchIterator"( + CIterator[shared_ptr[CRecordBatch]]): + pass + + +cdef extern from "arrow/python/udf.h" namespace "arrow::py" nogil: + cdef cppclass CUdfContext" arrow::py::UdfContext": + CMemoryPool *pool + int64_t batch_length + + cdef cppclass CUdfOptions" arrow::py::UdfOptions": + c_string func_name + CArity arity + CFunctionDoc func_doc + vector[shared_ptr[CDataType]] input_types + shared_ptr[CDataType] output_type + + CStatus RegisterScalarFunction(PyObject* function, + function[CallbackUdf] wrapper, const CUdfOptions& options, + CFunctionRegistry* registry) + + CStatus RegisterTabularFunction(PyObject* function, + function[CallbackUdf] wrapper, const CUdfOptions& options, + CFunctionRegistry* registry) + + CStatus RegisterAggregateFunction(PyObject* function, + function[CallbackUdf] wrapper, const CUdfOptions& options, + CFunctionRegistry* registry) + + CStatus RegisterVectorFunction(PyObject* function, + function[CallbackUdf] wrapper, const CUdfOptions& options, + CFunctionRegistry* registry) + + CResult[shared_ptr[CRecordBatchReader]] CallTabularFunction( + const c_string& func_name, const vector[CDatum]& args, CFunctionRegistry* registry) + +cdef extern from "arrow/compute/cast.h" namespace "arrow::compute": + CResult[CDatum] Cast(const CDatum& value, const CCastOptions& options) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_acero.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_acero.pxd new file mode 100644 index 0000000000000000000000000000000000000000..dc9babee190e110583d9d2cc24b71567203290af --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_acero.pxd @@ -0,0 +1,118 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * + + +cdef extern from "arrow/acero/options.h" namespace "arrow::acero" nogil: + cdef enum CJoinType "arrow::acero::JoinType": + CJoinType_LEFT_SEMI "arrow::acero::JoinType::LEFT_SEMI" + CJoinType_RIGHT_SEMI "arrow::acero::JoinType::RIGHT_SEMI" + CJoinType_LEFT_ANTI "arrow::acero::JoinType::LEFT_ANTI" + CJoinType_RIGHT_ANTI "arrow::acero::JoinType::RIGHT_ANTI" + CJoinType_INNER "arrow::acero::JoinType::INNER" + CJoinType_LEFT_OUTER "arrow::acero::JoinType::LEFT_OUTER" + CJoinType_RIGHT_OUTER "arrow::acero::JoinType::RIGHT_OUTER" + CJoinType_FULL_OUTER "arrow::acero::JoinType::FULL_OUTER" + + cdef cppclass CExecNodeOptions "arrow::acero::ExecNodeOptions": + pass + + cdef cppclass CSourceNodeOptions "arrow::acero::SourceNodeOptions"(CExecNodeOptions): + pass + + cdef cppclass CTableSourceNodeOptions "arrow::acero::TableSourceNodeOptions"(CExecNodeOptions): + CTableSourceNodeOptions(shared_ptr[CTable] table) + CTableSourceNodeOptions(shared_ptr[CTable] table, int64_t max_batch_size) + + cdef cppclass CSinkNodeOptions "arrow::acero::SinkNodeOptions"(CExecNodeOptions): + pass + + cdef cppclass CFilterNodeOptions "arrow::acero::FilterNodeOptions"(CExecNodeOptions): + CFilterNodeOptions(CExpression) + + cdef cppclass CProjectNodeOptions "arrow::acero::ProjectNodeOptions"(CExecNodeOptions): + CProjectNodeOptions(vector[CExpression] expressions) + CProjectNodeOptions(vector[CExpression] expressions, + vector[c_string] names) + + cdef cppclass CAggregateNodeOptions "arrow::acero::AggregateNodeOptions"(CExecNodeOptions): + CAggregateNodeOptions(vector[CAggregate] aggregates, vector[CFieldRef] names) + + cdef cppclass COrderByNodeOptions "arrow::acero::OrderByNodeOptions"(CExecNodeOptions): + COrderByNodeOptions(COrdering ordering) + + cdef cppclass CHashJoinNodeOptions "arrow::acero::HashJoinNodeOptions"(CExecNodeOptions): + CHashJoinNodeOptions(CJoinType, vector[CFieldRef] in_left_keys, + vector[CFieldRef] in_right_keys) + CHashJoinNodeOptions(CJoinType, vector[CFieldRef] in_left_keys, + vector[CFieldRef] in_right_keys, + CExpression filter, + c_string output_suffix_for_left, + c_string output_suffix_for_right) + CHashJoinNodeOptions(CJoinType join_type, + vector[CFieldRef] left_keys, + vector[CFieldRef] right_keys, + vector[CFieldRef] left_output, + vector[CFieldRef] right_output, + CExpression filter, + c_string output_suffix_for_left, + c_string output_suffix_for_right) + + cdef struct CAsofJoinKeys "arrow::acero::AsofJoinNodeOptions::Keys": + CFieldRef on_key + vector[CFieldRef] by_key + + cdef cppclass CAsofJoinNodeOptions "arrow::acero::AsofJoinNodeOptions"(CExecNodeOptions): + CAsofJoinNodeOptions(vector[CAsofJoinKeys] keys, int64_t tolerance) + + +cdef extern from "arrow/acero/exec_plan.h" namespace "arrow::acero" nogil: + cdef cppclass CDeclaration "arrow::acero::Declaration": + cppclass Input: + Input(CExecNode*) + Input(CDeclaration) + + c_string label + vector[Input] inputs + + CDeclaration() + CDeclaration(c_string factory_name, CExecNodeOptions options) + CDeclaration(c_string factory_name, vector[Input] inputs, shared_ptr[CExecNodeOptions] options) + + @staticmethod + CDeclaration Sequence(vector[CDeclaration] decls) + + cdef cppclass CExecNode "arrow::acero::ExecNode": + const vector[CExecNode*]& inputs() const + const shared_ptr[CSchema]& output_schema() const + + CResult[shared_ptr[CTable]] DeclarationToTable( + CDeclaration declaration, c_bool use_threads + ) + CResult[shared_ptr[CTable]] DeclarationToTable( + CDeclaration declaration, c_bool use_threads, + CMemoryPool* memory_pool, CFunctionRegistry* function_registry + ) + CResult[unique_ptr[CRecordBatchReader]] DeclarationToReader( + CDeclaration declaration, c_bool use_threads + ) + + CResult[c_string] DeclarationToString(const CDeclaration& declaration) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_cuda.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_cuda.pxd new file mode 100644 index 0000000000000000000000000000000000000000..3ac943cf941d8591483d4be2e2bceaac3e051292 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_cuda.pxd @@ -0,0 +1,107 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.libarrow cimport * + +cdef extern from "arrow/gpu/cuda_api.h" namespace "arrow::cuda" nogil: + + cdef cppclass CCudaDeviceManager" arrow::cuda::CudaDeviceManager": + @staticmethod + CResult[CCudaDeviceManager*] Instance() + CResult[shared_ptr[CCudaContext]] GetContext(int gpu_number) + CResult[shared_ptr[CCudaContext]] GetSharedContext(int gpu_number, + void* handle) + CStatus AllocateHost(int device_number, int64_t nbytes, + shared_ptr[CCudaHostBuffer]* buffer) + int num_devices() const + + cdef cppclass CCudaContext" arrow::cuda::CudaContext": + CResult[shared_ptr[CCudaBuffer]] Allocate(int64_t nbytes) + CResult[shared_ptr[CCudaBuffer]] View(uint8_t* data, int64_t nbytes) + CResult[shared_ptr[CCudaBuffer]] OpenIpcBuffer( + const CCudaIpcMemHandle& ipc_handle) + CStatus Synchronize() + int64_t bytes_allocated() const + const void* handle() const + int device_number() const + CResult[uintptr_t] GetDeviceAddress(uintptr_t addr) + + cdef cppclass CCudaIpcMemHandle" arrow::cuda::CudaIpcMemHandle": + @staticmethod + CResult[shared_ptr[CCudaIpcMemHandle]] FromBuffer( + const void* opaque_handle) + CResult[shared_ptr[CBuffer]] Serialize(CMemoryPool* pool) const + + cdef cppclass CCudaBuffer" arrow::cuda::CudaBuffer"(CBuffer): + CCudaBuffer(uint8_t* data, int64_t size, + const shared_ptr[CCudaContext]& context, + c_bool own_data=false, c_bool is_ipc=false) + CCudaBuffer(const shared_ptr[CCudaBuffer]& parent, + const int64_t offset, const int64_t size) + + @staticmethod + CResult[shared_ptr[CCudaBuffer]] FromBuffer(shared_ptr[CBuffer] buf) + + CStatus CopyToHost(const int64_t position, const int64_t nbytes, + void* out) const + CStatus CopyFromHost(const int64_t position, const void* data, + int64_t nbytes) + CStatus CopyFromDevice(const int64_t position, const void* data, + int64_t nbytes) + CStatus CopyFromAnotherDevice(const shared_ptr[CCudaContext]& src_ctx, + const int64_t position, const void* data, + int64_t nbytes) + CResult[shared_ptr[CCudaIpcMemHandle]] ExportForIpc() + shared_ptr[CCudaContext] context() const + + cdef cppclass \ + CCudaHostBuffer" arrow::cuda::CudaHostBuffer"(CMutableBuffer): + pass + + cdef cppclass \ + CCudaBufferReader" arrow::cuda::CudaBufferReader"(CBufferReader): + CCudaBufferReader(const shared_ptr[CBuffer]& buffer) + CResult[int64_t] Read(int64_t nbytes, void* buffer) + CResult[shared_ptr[CBuffer]] Read(int64_t nbytes) + + cdef cppclass \ + CCudaBufferWriter" arrow::cuda::CudaBufferWriter"(WritableFile): + CCudaBufferWriter(const shared_ptr[CCudaBuffer]& buffer) + CStatus Close() + CStatus Write(const void* data, int64_t nbytes) + CStatus WriteAt(int64_t position, const void* data, int64_t nbytes) + CStatus SetBufferSize(const int64_t buffer_size) + int64_t buffer_size() + int64_t num_bytes_buffered() const + + CResult[shared_ptr[CCudaHostBuffer]] AllocateCudaHostBuffer( + int device_number, const int64_t size) + + # Cuda prefix is added to avoid picking up arrow::cuda functions + # from arrow namespace. + CResult[shared_ptr[CCudaBuffer]] \ + CudaSerializeRecordBatch" arrow::cuda::SerializeRecordBatch"\ + (const CRecordBatch& batch, + CCudaContext* ctx) + CResult[shared_ptr[CRecordBatch]] \ + CudaReadRecordBatch" arrow::cuda::ReadRecordBatch"\ + (const shared_ptr[CSchema]& schema, + CDictionaryMemo* dictionary_memo, + const shared_ptr[CCudaBuffer]& buffer, + CMemoryPool* pool) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset.pxd new file mode 100644 index 0000000000000000000000000000000000000000..fe96705a54b2ff3eb9e2ec4da998566a58767a81 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset.pxd @@ -0,0 +1,413 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from libcpp.unordered_map cimport unordered_map +from libcpp cimport bool as c_bool + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_acero cimport * +from pyarrow.includes.libarrow_fs cimport * + + +cdef extern from "arrow/dataset/plan.h" namespace "arrow::dataset::internal" nogil: + + cdef void Initialize() + + +ctypedef CStatus cb_writer_finish_internal(CFileWriter*) +ctypedef void cb_writer_finish(dict, CFileWriter*) + +cdef extern from "arrow/dataset/api.h" namespace "arrow::dataset" nogil: + + cdef enum ExistingDataBehavior" arrow::dataset::ExistingDataBehavior": + ExistingDataBehavior_DELETE_MATCHING" \ + arrow::dataset::ExistingDataBehavior::kDeleteMatchingPartitions" + ExistingDataBehavior_OVERWRITE_OR_IGNORE" \ + arrow::dataset::ExistingDataBehavior::kOverwriteOrIgnore" + ExistingDataBehavior_ERROR" \ + arrow::dataset::ExistingDataBehavior::kError" + + cdef cppclass CScanOptions "arrow::dataset::ScanOptions": + shared_ptr[CSchema] dataset_schema + shared_ptr[CSchema] projected_schema + c_bool use_threads + CExpression filter + + cdef cppclass CScanNodeOptions "arrow::dataset::ScanNodeOptions"(CExecNodeOptions): + CScanNodeOptions(shared_ptr[CDataset] dataset, shared_ptr[CScanOptions] scan_options) + + shared_ptr[CScanOptions] scan_options + + cdef cppclass CFragmentScanOptions "arrow::dataset::FragmentScanOptions": + c_string type_name() const + + ctypedef CIterator[shared_ptr[CScanTask]] CScanTaskIterator \ + "arrow::dataset::ScanTaskIterator" + + cdef cppclass CScanTask" arrow::dataset::ScanTask": + CResult[CRecordBatchIterator] Execute() + + cdef cppclass CFragment "arrow::dataset::Fragment": + CResult[shared_ptr[CSchema]] ReadPhysicalSchema() + CResult[CScanTaskIterator] Scan(shared_ptr[CScanOptions] options) + c_bool splittable() const + c_string type_name() const + const CExpression& partition_expression() const + + ctypedef vector[shared_ptr[CFragment]] CFragmentVector \ + "arrow::dataset::FragmentVector" + + ctypedef CIterator[shared_ptr[CFragment]] CFragmentIterator \ + "arrow::dataset::FragmentIterator" + + cdef cppclass CInMemoryFragment "arrow::dataset::InMemoryFragment"( + CFragment): + CInMemoryFragment(vector[shared_ptr[CRecordBatch]] record_batches, + CExpression partition_expression) + + cdef cppclass CTaggedRecordBatch "arrow::dataset::TaggedRecordBatch": + shared_ptr[CRecordBatch] record_batch + shared_ptr[CFragment] fragment + + ctypedef CIterator[CTaggedRecordBatch] CTaggedRecordBatchIterator \ + "arrow::dataset::TaggedRecordBatchIterator" + + cdef cppclass CScanner "arrow::dataset::Scanner": + CScanner(shared_ptr[CDataset], shared_ptr[CScanOptions]) + CScanner(shared_ptr[CFragment], shared_ptr[CScanOptions]) + CResult[CScanTaskIterator] Scan() + CResult[CTaggedRecordBatchIterator] ScanBatches() + CResult[shared_ptr[CTable]] ToTable() + CResult[shared_ptr[CTable]] TakeRows(const CArray& indices) + CResult[shared_ptr[CTable]] Head(int64_t num_rows) + CResult[int64_t] CountRows() + CResult[CFragmentIterator] GetFragments() + CResult[shared_ptr[CRecordBatchReader]] ToRecordBatchReader() + const shared_ptr[CScanOptions]& options() + + cdef cppclass CScannerBuilder "arrow::dataset::ScannerBuilder": + CScannerBuilder(shared_ptr[CDataset], + shared_ptr[CScanOptions] scan_options) + CScannerBuilder(shared_ptr[CSchema], shared_ptr[CFragment], + shared_ptr[CScanOptions] scan_options) + + @staticmethod + shared_ptr[CScannerBuilder] FromRecordBatchReader( + shared_ptr[CRecordBatchReader] reader) + CStatus ProjectColumns "Project"(const vector[c_string]& columns) + CStatus Project(vector[CExpression]& exprs, vector[c_string]& columns) + CStatus Filter(CExpression filter) + CStatus UseThreads(c_bool use_threads) + CStatus Pool(CMemoryPool* pool) + CStatus BatchSize(int64_t batch_size) + CStatus BatchReadahead(int32_t batch_readahead) + CStatus FragmentReadahead(int32_t fragment_readahead) + CStatus FragmentScanOptions( + shared_ptr[CFragmentScanOptions] fragment_scan_options) + CResult[shared_ptr[CScanOptions]] GetScanOptions() + CResult[shared_ptr[CScanner]] Finish() + shared_ptr[CSchema] schema() const + + ctypedef vector[shared_ptr[CDataset]] CDatasetVector \ + "arrow::dataset::DatasetVector" + + cdef cppclass CDataset "arrow::dataset::Dataset": + const shared_ptr[CSchema] & schema() + CResult[CFragmentIterator] GetFragments() + CResult[CFragmentIterator] GetFragments(CExpression predicate) + const CExpression & partition_expression() + c_string type_name() + + CResult[shared_ptr[CDataset]] ReplaceSchema(shared_ptr[CSchema]) + + CResult[shared_ptr[CScannerBuilder]] NewScan() + + cdef cppclass CInMemoryDataset "arrow::dataset::InMemoryDataset"( + CDataset): + CInMemoryDataset(shared_ptr[CRecordBatchReader]) + CInMemoryDataset(shared_ptr[CTable]) + + cdef cppclass CUnionDataset "arrow::dataset::UnionDataset"( + CDataset): + @staticmethod + CResult[shared_ptr[CUnionDataset]] Make(shared_ptr[CSchema] schema, + CDatasetVector children) + + const CDatasetVector& children() const + + cdef cppclass CInspectOptions "arrow::dataset::InspectOptions": + int fragments + + cdef cppclass CFinishOptions "arrow::dataset::FinishOptions": + shared_ptr[CSchema] schema + CInspectOptions inspect_options + c_bool validate_fragments + + cdef cppclass CDatasetFactory "arrow::dataset::DatasetFactory": + CResult[vector[shared_ptr[CSchema]]] InspectSchemas(CInspectOptions) + CResult[shared_ptr[CSchema]] Inspect(CInspectOptions) + CResult[shared_ptr[CDataset]] FinishWithSchema "Finish"( + const shared_ptr[CSchema]& schema) + CResult[shared_ptr[CDataset]] Finish() + const CExpression& root_partition() + CStatus SetRootPartition(CExpression partition) + + cdef cppclass CUnionDatasetFactory "arrow::dataset::UnionDatasetFactory": + @staticmethod + CResult[shared_ptr[CDatasetFactory]] Make( + vector[shared_ptr[CDatasetFactory]] factories) + + cdef cppclass CFileSource "arrow::dataset::FileSource": + const c_string& path() const + const shared_ptr[CFileSystem]& filesystem() const + const shared_ptr[CBuffer]& buffer() const + const int64_t size() const + # HACK: Cython can't handle all the overloads so don't declare them. + # This means invalid construction of CFileSource won't be caught in + # the C++ generation phase (though it will still be caught when + # the generated C++ is compiled). + CFileSource(...) + + cdef cppclass CFileWriteOptions \ + "arrow::dataset::FileWriteOptions": + const shared_ptr[CFileFormat]& format() const + c_string type_name() const + + cdef cppclass CFileWriter \ + "arrow::dataset::FileWriter": + const shared_ptr[CFileFormat]& format() const + const shared_ptr[CSchema]& schema() const + const shared_ptr[CFileWriteOptions]& options() const + const CFileLocator& destination() const + CResult[int64_t] GetBytesWritten() + + cdef cppclass CFileFormat "arrow::dataset::FileFormat": + shared_ptr[CFragmentScanOptions] default_fragment_scan_options + c_string type_name() const + CResult[shared_ptr[CSchema]] Inspect(const CFileSource&) const + CResult[shared_ptr[CFileFragment]] MakeFragment( + CFileSource source, + CExpression partition_expression, + shared_ptr[CSchema] physical_schema) + shared_ptr[CFileWriteOptions] DefaultWriteOptions() + + cdef cppclass CFileFragment "arrow::dataset::FileFragment"( + CFragment): + const CFileSource& source() const + const shared_ptr[CFileFormat]& format() const + + cdef cppclass CFileSystemDatasetWriteOptions \ + "arrow::dataset::FileSystemDatasetWriteOptions": + shared_ptr[CFileWriteOptions] file_write_options + shared_ptr[CFileSystem] filesystem + c_string base_dir + shared_ptr[CPartitioning] partitioning + int max_partitions + c_string basename_template + function[cb_writer_finish_internal] writer_pre_finish + function[cb_writer_finish_internal] writer_post_finish + ExistingDataBehavior existing_data_behavior + c_bool create_dir + uint32_t max_open_files + uint64_t max_rows_per_file + uint64_t min_rows_per_group + uint64_t max_rows_per_group + + cdef cppclass CFileSystemDataset \ + "arrow::dataset::FileSystemDataset"(CDataset): + @staticmethod + CResult[shared_ptr[CDataset]] Make( + shared_ptr[CSchema] schema, + CExpression source_partition, + shared_ptr[CFileFormat] format, + shared_ptr[CFileSystem] filesystem, + vector[shared_ptr[CFileFragment]] fragments) + + @staticmethod + CStatus Write( + const CFileSystemDatasetWriteOptions& write_options, + shared_ptr[CScanner] scanner) + + c_string type() + vector[c_string] files() + const shared_ptr[CFileFormat]& format() const + const shared_ptr[CFileSystem]& filesystem() const + const shared_ptr[CPartitioning]& partitioning() const + + cdef cppclass CIpcFileWriteOptions \ + "arrow::dataset::IpcFileWriteOptions"(CFileWriteOptions): + shared_ptr[CIpcWriteOptions] options + + cdef cppclass CIpcFileFormat "arrow::dataset::IpcFileFormat"( + CFileFormat): + pass + + cdef cppclass COrcFileFormat "arrow::dataset::OrcFileFormat"( + CFileFormat): + pass + + cdef cppclass CCsvFileWriteOptions \ + "arrow::dataset::CsvFileWriteOptions"(CFileWriteOptions): + shared_ptr[CCSVWriteOptions] write_options + CMemoryPool* pool + + cdef cppclass CCsvFileFormat "arrow::dataset::CsvFileFormat"( + CFileFormat): + CCSVParseOptions parse_options + + cdef cppclass CCsvFragmentScanOptions \ + "arrow::dataset::CsvFragmentScanOptions"(CFragmentScanOptions): + CCSVConvertOptions convert_options + CCSVReadOptions read_options + function[StreamWrapFunc] stream_transform_func + + cdef cppclass CJsonFileFormat "arrow::dataset::JsonFileFormat"(CFileFormat): + pass + + cdef cppclass CJsonFragmentScanOptions "arrow::dataset::JsonFragmentScanOptions"(CFragmentScanOptions): + CJSONParseOptions parse_options + CJSONReadOptions read_options + + cdef cppclass CPartitioning "arrow::dataset::Partitioning": + c_string type_name() const + CResult[CExpression] Parse(const c_string & path) const + const shared_ptr[CSchema] & schema() + c_bool Equals(const CPartitioning& other) const + + cdef cppclass CSegmentEncoding" arrow::dataset::SegmentEncoding": + bint operator==(CSegmentEncoding) + + CSegmentEncoding CSegmentEncoding_None\ + " arrow::dataset::SegmentEncoding::None" + CSegmentEncoding CSegmentEncoding_Uri\ + " arrow::dataset::SegmentEncoding::Uri" + + cdef cppclass CKeyValuePartitioningOptions \ + "arrow::dataset::KeyValuePartitioningOptions": + CSegmentEncoding segment_encoding + + cdef cppclass CHivePartitioningOptions \ + "arrow::dataset::HivePartitioningOptions": + CSegmentEncoding segment_encoding + c_string null_fallback + + cdef cppclass CPartitioningFactoryOptions \ + "arrow::dataset::PartitioningFactoryOptions": + c_bool infer_dictionary + shared_ptr[CSchema] schema + CSegmentEncoding segment_encoding + + cdef cppclass CHivePartitioningFactoryOptions \ + "arrow::dataset::HivePartitioningFactoryOptions": + c_bool infer_dictionary + c_string null_fallback + shared_ptr[CSchema] schema + CSegmentEncoding segment_encoding + + cdef cppclass CPartitioningFactory "arrow::dataset::PartitioningFactory": + c_string type_name() const + + cdef cppclass CKeyValuePartitioning \ + "arrow::dataset::KeyValuePartitioning"(CPartitioning): + CKeyValuePartitioning(shared_ptr[CSchema] schema, + vector[shared_ptr[CArray]] dictionaries, + CKeyValuePartitioningOptions options) + + vector[shared_ptr[CArray]] dictionaries() const + CSegmentEncoding segment_encoding() + + cdef cppclass CDirectoryPartitioning \ + "arrow::dataset::DirectoryPartitioning"(CPartitioning): + CDirectoryPartitioning(shared_ptr[CSchema] schema, + vector[shared_ptr[CArray]] dictionaries) + + @staticmethod + shared_ptr[CPartitioningFactory] MakeFactory( + vector[c_string] field_names, CPartitioningFactoryOptions) + + vector[shared_ptr[CArray]] dictionaries() const + + cdef cppclass CHivePartitioning \ + "arrow::dataset::HivePartitioning"(CPartitioning): + CHivePartitioning(shared_ptr[CSchema] schema, + vector[shared_ptr[CArray]] dictionaries, + CHivePartitioningOptions options) + + @staticmethod + shared_ptr[CPartitioningFactory] MakeFactory( + CHivePartitioningFactoryOptions) + + vector[shared_ptr[CArray]] dictionaries() const + c_string null_fallback() const + + cdef cppclass CFilenamePartitioning \ + "arrow::dataset::FilenamePartitioning"(CPartitioning): + CFilenamePartitioning(shared_ptr[CSchema] schema, + vector[shared_ptr[CArray]] dictionaries) + + @staticmethod + shared_ptr[CPartitioningFactory] MakeFactory( + vector[c_string] field_names, CPartitioningFactoryOptions) + + vector[shared_ptr[CArray]] dictionaries() const + + cdef cppclass CPartitioningOrFactory \ + "arrow::dataset::PartitioningOrFactory": + CPartitioningOrFactory(shared_ptr[CPartitioning]) + CPartitioningOrFactory(shared_ptr[CPartitioningFactory]) + CPartitioningOrFactory & operator = (shared_ptr[CPartitioning]) + CPartitioningOrFactory & operator = ( + shared_ptr[CPartitioningFactory]) + shared_ptr[CPartitioning] partitioning() const + shared_ptr[CPartitioningFactory] factory() const + + cdef cppclass CFileSystemFactoryOptions \ + "arrow::dataset::FileSystemFactoryOptions": + CPartitioningOrFactory partitioning + c_string partition_base_dir + c_bool exclude_invalid_files + vector[c_string] selector_ignore_prefixes + + cdef cppclass CFileSystemDatasetFactory \ + "arrow::dataset::FileSystemDatasetFactory"( + CDatasetFactory): + @staticmethod + CResult[shared_ptr[CDatasetFactory]] MakeFromPaths "Make"( + shared_ptr[CFileSystem] filesystem, + vector[c_string] paths, + shared_ptr[CFileFormat] format, + CFileSystemFactoryOptions options + ) + + @staticmethod + CResult[shared_ptr[CDatasetFactory]] MakeFromSelector "Make"( + shared_ptr[CFileSystem] filesystem, + CFileSelector, + shared_ptr[CFileFormat] format, + CFileSystemFactoryOptions options + ) + + @staticmethod + CResult[shared_ptr[CDatasetFactory]] MakeFromFileInfos "Make"( + shared_ptr[CFileSystem] filesystem, + vector[CFileInfo] files, + shared_ptr[CFileFormat] format, + CFileSystemFactoryOptions options + ) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset_parquet.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset_parquet.pxd new file mode 100644 index 0000000000000000000000000000000000000000..e5389b3135faf472e9b1f7d85fee9dea5fd9f2b0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset_parquet.pxd @@ -0,0 +1,105 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.libarrow_dataset cimport * +from pyarrow.includes.libparquet_encryption cimport * + +from pyarrow._parquet cimport * + + +cdef extern from "arrow/dataset/parquet_encryption_config.h" namespace "arrow::dataset" nogil: + cdef cppclass CParquetEncryptionConfig "arrow::dataset::ParquetEncryptionConfig": + shared_ptr[CCryptoFactory] crypto_factory + shared_ptr[CKmsConnectionConfig] kms_connection_config + shared_ptr[CEncryptionConfiguration] encryption_config + + cdef cppclass CParquetDecryptionConfig "arrow::dataset::ParquetDecryptionConfig": + shared_ptr[CCryptoFactory] crypto_factory + shared_ptr[CKmsConnectionConfig] kms_connection_config + shared_ptr[CDecryptionConfiguration] decryption_config + + +cdef extern from "arrow/dataset/api.h" namespace "arrow::dataset" nogil: + + cdef cppclass CParquetFileWriter \ + "arrow::dataset::ParquetFileWriter"(CFileWriter): + const shared_ptr[FileWriter]& parquet_writer() const + + cdef cppclass CParquetFileWriteOptions \ + "arrow::dataset::ParquetFileWriteOptions"(CFileWriteOptions): + shared_ptr[WriterProperties] writer_properties + shared_ptr[ArrowWriterProperties] arrow_writer_properties + shared_ptr[CParquetEncryptionConfig] parquet_encryption_config + + cdef cppclass CParquetFileFragment "arrow::dataset::ParquetFileFragment"( + CFileFragment): + const vector[int]& row_groups() const + shared_ptr[CFileMetaData] metadata() const + CResult[vector[shared_ptr[CFragment]]] SplitByRowGroup( + CExpression predicate) + CResult[shared_ptr[CFragment]] SubsetWithFilter "Subset"( + CExpression predicate) + CResult[shared_ptr[CFragment]] SubsetWithIds "Subset"( + vector[int] row_group_ids) + CStatus EnsureCompleteMetadata() + + cdef cppclass CParquetFileFormatReaderOptions \ + "arrow::dataset::ParquetFileFormat::ReaderOptions": + unordered_set[c_string] dict_columns + TimeUnit coerce_int96_timestamp_unit + + cdef cppclass CParquetFileFormat "arrow::dataset::ParquetFileFormat"( + CFileFormat): + CParquetFileFormatReaderOptions reader_options + CResult[shared_ptr[CFileFragment]] MakeFragment( + CFileSource source, + CExpression partition_expression, + shared_ptr[CSchema] physical_schema, + vector[int] row_groups) + + cdef cppclass CParquetFragmentScanOptions \ + "arrow::dataset::ParquetFragmentScanOptions"(CFragmentScanOptions): + shared_ptr[CReaderProperties] reader_properties + shared_ptr[ArrowReaderProperties] arrow_reader_properties + shared_ptr[CParquetDecryptionConfig] parquet_decryption_config + + cdef cppclass CParquetFactoryOptions \ + "arrow::dataset::ParquetFactoryOptions": + CPartitioningOrFactory partitioning + c_string partition_base_dir + c_bool validate_column_chunk_paths + + cdef cppclass CParquetDatasetFactory \ + "arrow::dataset::ParquetDatasetFactory"(CDatasetFactory): + @staticmethod + CResult[shared_ptr[CDatasetFactory]] MakeFromMetaDataPath "Make"( + const c_string& metadata_path, + shared_ptr[CFileSystem] filesystem, + shared_ptr[CParquetFileFormat] format, + CParquetFactoryOptions options + ) + + @staticmethod + CResult[shared_ptr[CDatasetFactory]] MakeFromMetaDataSource "Make"( + const CFileSource& metadata_path, + const c_string& base_path, + shared_ptr[CFileSystem] filesystem, + shared_ptr[CParquetFileFormat] format, + CParquetFactoryOptions options + ) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_feather.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_feather.pxd new file mode 100644 index 0000000000000000000000000000000000000000..722e947bfeca238af0bd6ee002e7f3f9f1063ed6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_feather.pxd @@ -0,0 +1,50 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.libarrow cimport (CCompressionType, CStatus, CTable, + COutputStream, CResult, shared_ptr, + vector, CRandomAccessFile, CSchema, + c_string, CIpcReadOptions) + + +cdef extern from "arrow/ipc/api.h" namespace "arrow::ipc" nogil: + int kFeatherV1Version" arrow::ipc::feather::kFeatherV1Version" + int kFeatherV2Version" arrow::ipc::feather::kFeatherV2Version" + + cdef cppclass CFeatherProperties" arrow::ipc::feather::WriteProperties": + int version + int chunksize + CCompressionType compression + int compression_level + + CStatus WriteFeather" arrow::ipc::feather::WriteTable" \ + (const CTable& table, COutputStream* out, + CFeatherProperties properties) + + cdef cppclass CFeatherReader" arrow::ipc::feather::Reader": + @staticmethod + CResult[shared_ptr[CFeatherReader]] Open( + const shared_ptr[CRandomAccessFile]& file, + const CIpcReadOptions& options) + int version() + shared_ptr[CSchema] schema() + + CStatus Read(shared_ptr[CTable]* out) + CStatus Read(const vector[int] indices, shared_ptr[CTable]* out) + CStatus Read(const vector[c_string] names, shared_ptr[CTable]* out) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_flight.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_flight.pxd new file mode 100644 index 0000000000000000000000000000000000000000..c4cf5830c41286fde3af95c894b909e0aa34a72d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_flight.pxd @@ -0,0 +1,622 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * + + +cdef extern from "arrow/flight/api.h" namespace "arrow" nogil: + cdef char* CTracingServerMiddlewareName\ + " arrow::flight::TracingServerMiddleware::kMiddlewareName" + + cdef cppclass CActionType" arrow::flight::ActionType": + c_string type + c_string description + bint operator==(CActionType) + CResult[c_string] SerializeToString() + + @staticmethod + CResult[CActionType] Deserialize(const c_string& serialized) + + cdef cppclass CAction" arrow::flight::Action": + c_string type + shared_ptr[CBuffer] body + bint operator==(CAction) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CAction] Deserialize(const c_string& serialized) + + cdef cppclass CFlightResult" arrow::flight::Result": + CFlightResult() + CFlightResult(CFlightResult) + shared_ptr[CBuffer] body + bint operator==(CFlightResult) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CFlightResult] Deserialize(const c_string& serialized) + + cdef cppclass CBasicAuth" arrow::flight::BasicAuth": + CBasicAuth() + CBasicAuth(CBuffer) + CBasicAuth(CBasicAuth) + c_string username + c_string password + bint operator==(CBasicAuth) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CBasicAuth] Deserialize(const c_string& serialized) + + cdef cppclass CResultStream" arrow::flight::ResultStream": + CResult[unique_ptr[CFlightResult]] Next() + + cdef cppclass CDescriptorType \ + " arrow::flight::FlightDescriptor::DescriptorType": + bint operator==(CDescriptorType) + + CDescriptorType CDescriptorTypeUnknown\ + " arrow::flight::FlightDescriptor::UNKNOWN" + CDescriptorType CDescriptorTypePath\ + " arrow::flight::FlightDescriptor::PATH" + CDescriptorType CDescriptorTypeCmd\ + " arrow::flight::FlightDescriptor::CMD" + + cdef cppclass CFlightDescriptor" arrow::flight::FlightDescriptor": + CDescriptorType type + c_string cmd + vector[c_string] path + bint operator==(CFlightDescriptor) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CFlightDescriptor] Deserialize(const c_string& serialized) + + cdef cppclass CTicket" arrow::flight::Ticket": + CTicket() + c_string ticket + bint operator==(CTicket) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CTicket] Deserialize(const c_string& serialized) + + cdef cppclass CCriteria" arrow::flight::Criteria": + CCriteria() + c_string expression + bint operator==(CCriteria) + CResult[c_string] SerializeToString() + + @staticmethod + CResult[CCriteria] Deserialize(const c_string& serialized) + + cdef cppclass CLocation" arrow::flight::Location": + CLocation() + c_string ToString() + c_bool Equals(const CLocation& other) + + @staticmethod + CResult[CLocation] Parse(const c_string& uri_string) + + @staticmethod + CResult[CLocation] ForGrpcTcp(const c_string& host, int port) + + @staticmethod + CResult[CLocation] ForGrpcTls(const c_string& host, int port) + + @staticmethod + CResult[CLocation] ForGrpcUnix(const c_string& path) + + cdef cppclass CFlightEndpoint" arrow::flight::FlightEndpoint": + CFlightEndpoint() + + CTicket ticket + vector[CLocation] locations + + bint operator==(CFlightEndpoint) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CFlightEndpoint] Deserialize(const c_string& serialized) + + cdef cppclass CFlightInfo" arrow::flight::FlightInfo": + CFlightInfo(CFlightInfo info) + int64_t total_records() + int64_t total_bytes() + CResult[shared_ptr[CSchema]] GetSchema(CDictionaryMemo* memo) + CFlightDescriptor& descriptor() + const vector[CFlightEndpoint]& endpoints() + CResult[c_string] SerializeToString() + c_string ToString() + bint operator==(CFlightInfo) + + @staticmethod + CResult[unique_ptr[CFlightInfo]] Deserialize( + const c_string& serialized) + + cdef cppclass CSchemaResult" arrow::flight::SchemaResult": + CSchemaResult() + CSchemaResult(CSchemaResult result) + CResult[shared_ptr[CSchema]] GetSchema(CDictionaryMemo* memo) + bint operator==(CSchemaResult) + CResult[c_string] SerializeToString() + c_string ToString() + + @staticmethod + CResult[CSchemaResult] Deserialize(const c_string& serialized) + + cdef cppclass CFlightListing" arrow::flight::FlightListing": + CResult[unique_ptr[CFlightInfo]] Next() + + cdef cppclass CSimpleFlightListing" arrow::flight::SimpleFlightListing": + # This doesn't work with Cython >= 3 + # CSimpleFlightListing(vector[CFlightInfo]&& info) + CSimpleFlightListing(const vector[CFlightInfo]& info) + + cdef cppclass CFlightPayload" arrow::flight::FlightPayload": + shared_ptr[CBuffer] descriptor + shared_ptr[CBuffer] app_metadata + CIpcPayload ipc_message + + cdef cppclass CFlightDataStream" arrow::flight::FlightDataStream": + shared_ptr[CSchema] schema() + CResult[CFlightPayload] Next() + + cdef cppclass CFlightStreamChunk" arrow::flight::FlightStreamChunk": + CFlightStreamChunk() + shared_ptr[CRecordBatch] data + shared_ptr[CBuffer] app_metadata + + cdef cppclass CMetadataRecordBatchReader \ + " arrow::flight::MetadataRecordBatchReader": + CResult[shared_ptr[CSchema]] GetSchema() + CResult[CFlightStreamChunk] Next() + CResult[shared_ptr[CTable]] ToTable() + + CResult[shared_ptr[CRecordBatchReader]] MakeRecordBatchReader\ + " arrow::flight::MakeRecordBatchReader"( + shared_ptr[CMetadataRecordBatchReader]) + + cdef cppclass CMetadataRecordBatchWriter \ + " arrow::flight::MetadataRecordBatchWriter"(CRecordBatchWriter): + CStatus Begin(shared_ptr[CSchema] schema, + const CIpcWriteOptions& options) + CStatus WriteMetadata(shared_ptr[CBuffer] app_metadata) + CStatus WriteWithMetadata(const CRecordBatch& batch, + shared_ptr[CBuffer] app_metadata) + + cdef cppclass CFlightStreamReader \ + " arrow::flight::FlightStreamReader"(CMetadataRecordBatchReader): + void Cancel() + CResult[shared_ptr[CTable]] ToTableWithStopToken" ToTable"\ + (const CStopToken& stop_token) + + cdef cppclass CFlightMessageReader \ + " arrow::flight::FlightMessageReader"(CMetadataRecordBatchReader): + CFlightDescriptor& descriptor() + + cdef cppclass CFlightMessageWriter \ + " arrow::flight::FlightMessageWriter"(CMetadataRecordBatchWriter): + pass + + cdef cppclass CFlightStreamWriter \ + " arrow::flight::FlightStreamWriter"(CMetadataRecordBatchWriter): + CStatus DoneWriting() + + cdef cppclass CRecordBatchStream \ + " arrow::flight::RecordBatchStream"(CFlightDataStream): + CRecordBatchStream(shared_ptr[CRecordBatchReader]& reader, + const CIpcWriteOptions& options) + + cdef cppclass CFlightMetadataReader" arrow::flight::FlightMetadataReader": + CStatus ReadMetadata(shared_ptr[CBuffer]* out) + + cdef cppclass CFlightMetadataWriter" arrow::flight::FlightMetadataWriter": + CStatus WriteMetadata(const CBuffer& message) + + cdef cppclass CServerAuthReader" arrow::flight::ServerAuthReader": + CStatus Read(c_string* token) + + cdef cppclass CServerAuthSender" arrow::flight::ServerAuthSender": + CStatus Write(c_string& token) + + cdef cppclass CClientAuthReader" arrow::flight::ClientAuthReader": + CStatus Read(c_string* token) + + cdef cppclass CClientAuthSender" arrow::flight::ClientAuthSender": + CStatus Write(c_string& token) + + cdef cppclass CServerAuthHandler" arrow::flight::ServerAuthHandler": + pass + + cdef cppclass CClientAuthHandler" arrow::flight::ClientAuthHandler": + pass + + cdef cppclass CServerCallContext" arrow::flight::ServerCallContext": + c_string& peer_identity() + c_string& peer() + c_bool is_cancelled() + void AddHeader(const c_string& key, const c_string& value) + void AddTrailer(const c_string& key, const c_string& value) + CServerMiddleware* GetMiddleware(const c_string& key) + + cdef cppclass CTimeoutDuration" arrow::flight::TimeoutDuration": + CTimeoutDuration(double) + + cdef cppclass CFlightCallOptions" arrow::flight::FlightCallOptions": + CFlightCallOptions() + CTimeoutDuration timeout + CIpcWriteOptions write_options + CIpcReadOptions read_options + vector[pair[c_string, c_string]] headers + CStopToken stop_token + + cdef cppclass CCertKeyPair" arrow::flight::CertKeyPair": + CCertKeyPair() + c_string pem_cert + c_string pem_key + + cdef cppclass CFlightMethod" arrow::flight::FlightMethod": + bint operator==(CFlightMethod) + + CFlightMethod CFlightMethodInvalid\ + " arrow::flight::FlightMethod::Invalid" + CFlightMethod CFlightMethodHandshake\ + " arrow::flight::FlightMethod::Handshake" + CFlightMethod CFlightMethodListFlights\ + " arrow::flight::FlightMethod::ListFlights" + CFlightMethod CFlightMethodGetFlightInfo\ + " arrow::flight::FlightMethod::GetFlightInfo" + CFlightMethod CFlightMethodGetSchema\ + " arrow::flight::FlightMethod::GetSchema" + CFlightMethod CFlightMethodDoGet\ + " arrow::flight::FlightMethod::DoGet" + CFlightMethod CFlightMethodDoPut\ + " arrow::flight::FlightMethod::DoPut" + CFlightMethod CFlightMethodDoAction\ + " arrow::flight::FlightMethod::DoAction" + CFlightMethod CFlightMethodListActions\ + " arrow::flight::FlightMethod::ListActions" + CFlightMethod CFlightMethodDoExchange\ + " arrow::flight::FlightMethod::DoExchange" + + cdef cppclass CCallInfo" arrow::flight::CallInfo": + CFlightMethod method + + # This is really std::unordered_multimap, but Cython has no + # bindings for it, so treat it as an opaque class and bind the + # methods we need + cdef cppclass CCallHeaders" arrow::flight::CallHeaders": + cppclass const_iterator: + pair[c_string, c_string] operator*() + # For Cython < 3 + const_iterator operator++() + # For Cython >= 3 + const_iterator operator++(int) + bint operator==(const_iterator) + bint operator!=(const_iterator) + const_iterator cbegin() + const_iterator cend() + + cdef cppclass CAddCallHeaders" arrow::flight::AddCallHeaders": + void AddHeader(const c_string& key, const c_string& value) + + cdef cppclass CServerMiddleware" arrow::flight::ServerMiddleware": + c_string name() + + cdef cppclass CServerMiddlewareFactory\ + " arrow::flight::ServerMiddlewareFactory": + pass + + cdef cppclass CClientMiddleware" arrow::flight::ClientMiddleware": + pass + + cdef cppclass CClientMiddlewareFactory\ + " arrow::flight::ClientMiddlewareFactory": + pass + + cpdef cppclass CTracingServerMiddlewareTraceKey\ + " arrow::flight::TracingServerMiddleware::TraceKey": + CTracingServerMiddlewareTraceKey() + c_string key + c_string value + + cdef cppclass CTracingServerMiddleware\ + " arrow::flight::TracingServerMiddleware"(CServerMiddleware): + vector[CTracingServerMiddlewareTraceKey] GetTraceContext() + + cdef shared_ptr[CServerMiddlewareFactory] \ + MakeTracingServerMiddlewareFactory\ + " arrow::flight::MakeTracingServerMiddlewareFactory"() + + cdef cppclass CFlightServerOptions" arrow::flight::FlightServerOptions": + CFlightServerOptions(const CLocation& location) + CLocation location + unique_ptr[CServerAuthHandler] auth_handler + vector[CCertKeyPair] tls_certificates + c_bool verify_client + c_string root_certificates + vector[pair[c_string, shared_ptr[CServerMiddlewareFactory]]] middleware + + cdef cppclass CFlightClientOptions" arrow::flight::FlightClientOptions": + c_string tls_root_certs + c_string cert_chain + c_string private_key + c_string override_hostname + vector[shared_ptr[CClientMiddlewareFactory]] middleware + int64_t write_size_limit_bytes + vector[pair[c_string, CIntStringVariant]] generic_options + c_bool disable_server_verification + + @staticmethod + CFlightClientOptions Defaults() + + cdef cppclass CDoPutResult" arrow::flight::FlightClient::DoPutResult": + unique_ptr[CFlightStreamWriter] writer + unique_ptr[CFlightMetadataReader] reader + + cdef cppclass CDoExchangeResult" arrow::flight::FlightClient::DoExchangeResult": + unique_ptr[CFlightStreamWriter] writer + unique_ptr[CFlightStreamReader] reader + + cdef cppclass CFlightClient" arrow::flight::FlightClient": + @staticmethod + CResult[unique_ptr[CFlightClient]] Connect(const CLocation& location, + const CFlightClientOptions& options) + + c_bool supports_async() + CStatus CheckAsyncSupport() + + CStatus Authenticate(CFlightCallOptions& options, + unique_ptr[CClientAuthHandler] auth_handler) + + CResult[pair[c_string, c_string]] AuthenticateBasicToken( + CFlightCallOptions& options, + const c_string& username, + const c_string& password) + + CResult[unique_ptr[CResultStream]] DoAction(CFlightCallOptions& options, CAction& action) + CResult[vector[CActionType]] ListActions(CFlightCallOptions& options) + + CResult[unique_ptr[CFlightListing]] ListFlights(CFlightCallOptions& options, CCriteria criteria) + CResult[unique_ptr[CFlightInfo]] GetFlightInfo(CFlightCallOptions& options, + CFlightDescriptor& descriptor) + CFuture[CFlightInfo] GetFlightInfoAsync(CFlightCallOptions& options, + CFlightDescriptor& descriptor) + CResult[unique_ptr[CSchemaResult]] GetSchema(CFlightCallOptions& options, + CFlightDescriptor& descriptor) + CResult[unique_ptr[CFlightStreamReader]] DoGet(CFlightCallOptions& options, CTicket& ticket) + CResult[CDoPutResult] DoPut(CFlightCallOptions& options, + CFlightDescriptor& descriptor, + shared_ptr[CSchema]& schema) + CResult[CDoExchangeResult] DoExchange(CFlightCallOptions& options, + CFlightDescriptor& descriptor) + CStatus Close() + + cdef cppclass CFlightStatusCode" arrow::flight::FlightStatusCode": + bint operator==(CFlightStatusCode) + + CFlightStatusCode CFlightStatusInternal \ + " arrow::flight::FlightStatusCode::Internal" + CFlightStatusCode CFlightStatusTimedOut \ + " arrow::flight::FlightStatusCode::TimedOut" + CFlightStatusCode CFlightStatusCancelled \ + " arrow::flight::FlightStatusCode::Cancelled" + CFlightStatusCode CFlightStatusUnauthenticated \ + " arrow::flight::FlightStatusCode::Unauthenticated" + CFlightStatusCode CFlightStatusUnauthorized \ + " arrow::flight::FlightStatusCode::Unauthorized" + CFlightStatusCode CFlightStatusUnavailable \ + " arrow::flight::FlightStatusCode::Unavailable" + CFlightStatusCode CFlightStatusFailed \ + " arrow::flight::FlightStatusCode::Failed" + + cdef cppclass FlightStatusDetail" arrow::flight::FlightStatusDetail": + CFlightStatusCode code() + c_string extra_info() + + @staticmethod + shared_ptr[FlightStatusDetail] UnwrapStatus(const CStatus& status) + + cdef cppclass FlightWriteSizeStatusDetail\ + " arrow::flight::FlightWriteSizeStatusDetail": + int64_t limit() + int64_t actual() + + @staticmethod + shared_ptr[FlightWriteSizeStatusDetail] UnwrapStatus( + const CStatus& status) + + cdef CStatus MakeFlightError" arrow::flight::MakeFlightError" \ + (CFlightStatusCode code, const c_string& message) + + cdef CStatus MakeFlightError" arrow::flight::MakeFlightError" \ + (CFlightStatusCode code, + const c_string& message, + const c_string& extra_info) + +# Callbacks for implementing Flight servers +# Use typedef to emulate syntax for std::function +ctypedef CStatus cb_list_flights(object, const CServerCallContext&, + const CCriteria*, + unique_ptr[CFlightListing]*) +ctypedef CStatus cb_get_flight_info(object, const CServerCallContext&, + const CFlightDescriptor&, + unique_ptr[CFlightInfo]*) +ctypedef CStatus cb_get_schema(object, const CServerCallContext&, + const CFlightDescriptor&, + unique_ptr[CSchemaResult]*) +ctypedef CStatus cb_do_put(object, const CServerCallContext&, + unique_ptr[CFlightMessageReader], + unique_ptr[CFlightMetadataWriter]) +ctypedef CStatus cb_do_get(object, const CServerCallContext&, + const CTicket&, + unique_ptr[CFlightDataStream]*) +ctypedef CStatus cb_do_exchange(object, const CServerCallContext&, + unique_ptr[CFlightMessageReader], + unique_ptr[CFlightMessageWriter]) +ctypedef CStatus cb_do_action(object, const CServerCallContext&, + const CAction&, + unique_ptr[CResultStream]*) +ctypedef CStatus cb_list_actions(object, const CServerCallContext&, + vector[CActionType]*) +ctypedef CStatus cb_result_next(object, unique_ptr[CFlightResult]*) +ctypedef CStatus cb_data_stream_next(object, CFlightPayload*) +ctypedef CStatus cb_server_authenticate(object, CServerAuthSender*, + CServerAuthReader*) +ctypedef CStatus cb_is_valid(object, const c_string&, c_string*) +ctypedef CStatus cb_client_authenticate(object, CClientAuthSender*, + CClientAuthReader*) +ctypedef CStatus cb_get_token(object, c_string*) + +ctypedef CStatus cb_middleware_sending_headers(object, CAddCallHeaders*) +ctypedef CStatus cb_middleware_call_completed(object, const CStatus&) +ctypedef CStatus cb_client_middleware_received_headers( + object, const CCallHeaders&) +ctypedef CStatus cb_server_middleware_start_call( + object, + const CCallInfo&, + const CCallHeaders&, + shared_ptr[CServerMiddleware]*) +ctypedef CStatus cb_client_middleware_start_call( + object, + const CCallInfo&, + unique_ptr[CClientMiddleware]*) + +cdef extern from "arrow/python/flight.h" namespace "arrow::py::flight" nogil: + cdef char* CPyServerMiddlewareName\ + " arrow::py::flight::kPyServerMiddlewareName" + + cdef cppclass PyFlightServerVtable: + PyFlightServerVtable() + function[cb_list_flights] list_flights + function[cb_get_flight_info] get_flight_info + function[cb_get_schema] get_schema + function[cb_do_put] do_put + function[cb_do_get] do_get + function[cb_do_exchange] do_exchange + function[cb_do_action] do_action + function[cb_list_actions] list_actions + + cdef cppclass PyServerAuthHandlerVtable: + PyServerAuthHandlerVtable() + function[cb_server_authenticate] authenticate + function[cb_is_valid] is_valid + + cdef cppclass PyClientAuthHandlerVtable: + PyClientAuthHandlerVtable() + function[cb_client_authenticate] authenticate + function[cb_get_token] get_token + + cdef cppclass PyFlightServer: + PyFlightServer(object server, PyFlightServerVtable vtable) + + CStatus Init(CFlightServerOptions& options) + int port() + CStatus ServeWithSignals() except * + CStatus Shutdown() + CStatus Wait() + + cdef cppclass PyServerAuthHandler\ + " arrow::py::flight::PyServerAuthHandler"(CServerAuthHandler): + PyServerAuthHandler(object handler, PyServerAuthHandlerVtable vtable) + + cdef cppclass PyClientAuthHandler\ + " arrow::py::flight::PyClientAuthHandler"(CClientAuthHandler): + PyClientAuthHandler(object handler, PyClientAuthHandlerVtable vtable) + + cdef cppclass CPyFlightResultStream\ + " arrow::py::flight::PyFlightResultStream"(CResultStream): + CPyFlightResultStream(object generator, + function[cb_result_next] callback) + + cdef cppclass CPyFlightDataStream\ + " arrow::py::flight::PyFlightDataStream"(CFlightDataStream): + CPyFlightDataStream(object data_source, + unique_ptr[CFlightDataStream] stream) + + cdef cppclass CPyGeneratorFlightDataStream\ + " arrow::py::flight::PyGeneratorFlightDataStream"\ + (CFlightDataStream): + CPyGeneratorFlightDataStream(object generator, + shared_ptr[CSchema] schema, + function[cb_data_stream_next] callback, + const CIpcWriteOptions& options) + + cdef cppclass PyServerMiddlewareVtable\ + " arrow::py::flight::PyServerMiddleware::Vtable": + PyServerMiddlewareVtable() + function[cb_middleware_sending_headers] sending_headers + function[cb_middleware_call_completed] call_completed + + cdef cppclass PyClientMiddlewareVtable\ + " arrow::py::flight::PyClientMiddleware::Vtable": + PyClientMiddlewareVtable() + function[cb_middleware_sending_headers] sending_headers + function[cb_client_middleware_received_headers] received_headers + function[cb_middleware_call_completed] call_completed + + cdef cppclass CPyServerMiddleware\ + " arrow::py::flight::PyServerMiddleware"(CServerMiddleware): + CPyServerMiddleware(object middleware, PyServerMiddlewareVtable vtable) + void* py_object() + + cdef cppclass CPyServerMiddlewareFactory\ + " arrow::py::flight::PyServerMiddlewareFactory"\ + (CServerMiddlewareFactory): + CPyServerMiddlewareFactory( + object factory, + function[cb_server_middleware_start_call] start_call) + + cdef cppclass CPyClientMiddleware\ + " arrow::py::flight::PyClientMiddleware"(CClientMiddleware): + CPyClientMiddleware(object middleware, PyClientMiddlewareVtable vtable) + + cdef cppclass CPyClientMiddlewareFactory\ + " arrow::py::flight::PyClientMiddlewareFactory"\ + (CClientMiddlewareFactory): + CPyClientMiddlewareFactory( + object factory, + function[cb_client_middleware_start_call] start_call) + + cdef CStatus CreateFlightInfo" arrow::py::flight::CreateFlightInfo"( + shared_ptr[CSchema] schema, + CFlightDescriptor& descriptor, + vector[CFlightEndpoint] endpoints, + int64_t total_records, + int64_t total_bytes, + unique_ptr[CFlightInfo]* out) + + cdef CStatus CreateSchemaResult" arrow::py::flight::CreateSchemaResult"( + shared_ptr[CSchema] schema, + unique_ptr[CSchemaResult]* out) + + +cdef extern from "" namespace "std" nogil: + cdef cppclass CIntStringVariant" std::variant": + CIntStringVariant() + CIntStringVariant(int) + CIntStringVariant(c_string) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_fs.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_fs.pxd new file mode 100644 index 0000000000000000000000000000000000000000..328b426a498db70bb5b1fca5765f8d0220559ccc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_fs.pxd @@ -0,0 +1,366 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_python cimport CTimePoint + +cdef extern from "arrow/filesystem/api.h" namespace "arrow::fs" nogil: + + ctypedef enum CFileType "arrow::fs::FileType": + CFileType_NotFound "arrow::fs::FileType::NotFound" + CFileType_Unknown "arrow::fs::FileType::Unknown" + CFileType_File "arrow::fs::FileType::File" + CFileType_Directory "arrow::fs::FileType::Directory" + + cdef cppclass CFileInfo "arrow::fs::FileInfo": + CFileInfo() + CFileInfo(CFileInfo) + CFileInfo& operator=(CFileInfo) + CFileInfo(const CFileInfo&) + CFileInfo& operator=(const CFileInfo&) + + CFileType type() + void set_type(CFileType type) + c_string path() + void set_path(const c_string& path) + c_string base_name() + int64_t size() + void set_size(int64_t size) + c_string extension() + CTimePoint mtime() + void set_mtime(CTimePoint mtime) + + cdef cppclass CFileSelector "arrow::fs::FileSelector": + CFileSelector() + c_string base_dir + c_bool allow_not_found + c_bool recursive + + cdef cppclass CFileLocator "arrow::fs::FileLocator": + shared_ptr[CFileSystem] filesystem + c_string path + + cdef cppclass CFileSystem "arrow::fs::FileSystem": + shared_ptr[CFileSystem] shared_from_this() + c_string type_name() const + CResult[c_string] NormalizePath(c_string path) + CResult[CFileInfo] GetFileInfo(const c_string& path) + CResult[vector[CFileInfo]] GetFileInfo( + const vector[c_string]& paths) + CResult[vector[CFileInfo]] GetFileInfo(const CFileSelector& select) + CStatus CreateDir(const c_string& path, c_bool recursive) + CStatus DeleteDir(const c_string& path) + CStatus DeleteDirContents(const c_string& path, c_bool missing_dir_ok) + CStatus DeleteRootDirContents() + CStatus DeleteFile(const c_string& path) + CStatus DeleteFiles(const vector[c_string]& paths) + CStatus Move(const c_string& src, const c_string& dest) + CStatus CopyFile(const c_string& src, const c_string& dest) + CResult[shared_ptr[CInputStream]] OpenInputStream( + const c_string& path) + CResult[shared_ptr[CRandomAccessFile]] OpenInputFile( + const c_string& path) + CResult[shared_ptr[COutputStream]] OpenOutputStream( + const c_string& path, const shared_ptr[const CKeyValueMetadata]&) + CResult[shared_ptr[COutputStream]] OpenAppendStream( + const c_string& path, const shared_ptr[const CKeyValueMetadata]&) + c_bool Equals(const CFileSystem& other) + c_bool Equals(shared_ptr[CFileSystem] other) + + CResult[shared_ptr[CFileSystem]] CFileSystemFromUri \ + "arrow::fs::FileSystemFromUri"(const c_string& uri, c_string* out_path) + CResult[shared_ptr[CFileSystem]] CFileSystemFromUriOrPath \ + "arrow::fs::FileSystemFromUriOrPath"(const c_string& uri, + c_string* out_path) + + cdef cppclass CFileSystemGlobalOptions \ + "arrow::fs::FileSystemGlobalOptions": + c_string tls_ca_file_path + c_string tls_ca_dir_path + + CStatus CFileSystemsInitialize "arrow::fs::Initialize" \ + (const CFileSystemGlobalOptions& options) + + cdef cppclass CLocalFileSystemOptions "arrow::fs::LocalFileSystemOptions": + c_bool use_mmap + + @staticmethod + CLocalFileSystemOptions Defaults() + + c_bool Equals(const CLocalFileSystemOptions& other) + + cdef cppclass CLocalFileSystem "arrow::fs::LocalFileSystem"(CFileSystem): + CLocalFileSystem() + CLocalFileSystem(CLocalFileSystemOptions) + CLocalFileSystemOptions options() + + cdef cppclass CSubTreeFileSystem \ + "arrow::fs::SubTreeFileSystem"(CFileSystem): + CSubTreeFileSystem(const c_string& base_path, + shared_ptr[CFileSystem] base_fs) + c_string base_path() + shared_ptr[CFileSystem] base_fs() + + ctypedef enum CS3LogLevel "arrow::fs::S3LogLevel": + CS3LogLevel_Off "arrow::fs::S3LogLevel::Off" + CS3LogLevel_Fatal "arrow::fs::S3LogLevel::Fatal" + CS3LogLevel_Error "arrow::fs::S3LogLevel::Error" + CS3LogLevel_Warn "arrow::fs::S3LogLevel::Warn" + CS3LogLevel_Info "arrow::fs::S3LogLevel::Info" + CS3LogLevel_Debug "arrow::fs::S3LogLevel::Debug" + CS3LogLevel_Trace "arrow::fs::S3LogLevel::Trace" + + cdef struct CS3GlobalOptions "arrow::fs::S3GlobalOptions": + CS3LogLevel log_level + int num_event_loop_threads + + cdef cppclass CS3ProxyOptions "arrow::fs::S3ProxyOptions": + c_string scheme + c_string host + int port + c_string username + c_string password + c_bool Equals(const CS3ProxyOptions& other) + + @staticmethod + CResult[CS3ProxyOptions] FromUriString "FromUri"( + const c_string& uri_string) + + ctypedef enum CS3CredentialsKind "arrow::fs::S3CredentialsKind": + CS3CredentialsKind_Anonymous "arrow::fs::S3CredentialsKind::Anonymous" + CS3CredentialsKind_Default "arrow::fs::S3CredentialsKind::Default" + CS3CredentialsKind_Explicit "arrow::fs::S3CredentialsKind::Explicit" + CS3CredentialsKind_Role "arrow::fs::S3CredentialsKind::Role" + CS3CredentialsKind_WebIdentity \ + "arrow::fs::S3CredentialsKind::WebIdentity" + + cdef cppclass CS3RetryStrategy "arrow::fs::S3RetryStrategy": + @staticmethod + shared_ptr[CS3RetryStrategy] GetAwsDefaultRetryStrategy(int64_t max_attempts) + + @staticmethod + shared_ptr[CS3RetryStrategy] GetAwsStandardRetryStrategy(int64_t max_attempts) + + cdef cppclass CS3Options "arrow::fs::S3Options": + c_string region + double connect_timeout + double request_timeout + c_string endpoint_override + c_string scheme + c_bool background_writes + c_bool allow_bucket_creation + c_bool allow_bucket_deletion + c_bool force_virtual_addressing + shared_ptr[const CKeyValueMetadata] default_metadata + c_string role_arn + c_string session_name + c_string external_id + int load_frequency + CS3ProxyOptions proxy_options + CS3CredentialsKind credentials_kind + shared_ptr[CS3RetryStrategy] retry_strategy + void ConfigureDefaultCredentials() + void ConfigureAccessKey(const c_string& access_key, + const c_string& secret_key, + const c_string& session_token) + c_string GetAccessKey() + c_string GetSecretKey() + c_string GetSessionToken() + c_bool Equals(const CS3Options& other) + + @staticmethod + CS3Options Defaults() + + @staticmethod + CS3Options Anonymous() + + @staticmethod + CS3Options FromAccessKey(const c_string& access_key, + const c_string& secret_key, + const c_string& session_token) + + @staticmethod + CS3Options FromAssumeRole(const c_string& role_arn, + const c_string& session_name, + const c_string& external_id, + const int load_frequency) + + cdef cppclass CS3FileSystem "arrow::fs::S3FileSystem"(CFileSystem): + @staticmethod + CResult[shared_ptr[CS3FileSystem]] Make(const CS3Options& options) + CS3Options options() + c_string region() + + cdef CStatus CInitializeS3 "arrow::fs::InitializeS3"( + const CS3GlobalOptions& options) + cdef CStatus CEnsureS3Initialized "arrow::fs::EnsureS3Initialized"() + cdef CStatus CFinalizeS3 "arrow::fs::FinalizeS3"() + cdef CStatus CEnsureS3Finalized "arrow::fs::EnsureS3Finalized"() + + cdef CResult[c_string] ResolveS3BucketRegion(const c_string& bucket) + + cdef cppclass CGcsCredentials "arrow::fs::GcsCredentials": + c_bool anonymous() + CTimePoint expiration() + c_string access_token() + c_string target_service_account() + + cdef cppclass CGcsOptions "arrow::fs::GcsOptions": + CGcsCredentials credentials + c_string endpoint_override + c_string scheme + c_string default_bucket_location + optional[c_string] project_id + optional[double] retry_limit_seconds + shared_ptr[const CKeyValueMetadata] default_metadata + c_bool Equals(const CS3Options& other) + + @staticmethod + CGcsOptions Defaults() + + @staticmethod + CGcsOptions Anonymous() + + @staticmethod + CGcsOptions FromAccessToken(const c_string& access_token, + CTimePoint expiration) + + @staticmethod + CGcsOptions FromImpersonatedServiceAccount(const CGcsCredentials& base_credentials, + c_string& target_service_account) + + cdef cppclass CGcsFileSystem "arrow::fs::GcsFileSystem": + @staticmethod + CResult[shared_ptr[CGcsFileSystem]] Make(const CGcsOptions& options) + CGcsOptions options() + + cdef cppclass CAzureOptions "arrow::fs::AzureOptions": + c_string account_name + c_string blob_storage_authority + c_string dfs_storage_authority + c_string blob_storage_scheme + c_string dfs_storage_scheme + + c_bool Equals(const CAzureOptions& other) + CStatus ConfigureDefaultCredential() + CStatus ConfigureAccountKeyCredential(c_string account_key) + + cdef cppclass CAzureFileSystem "arrow::fs::AzureFileSystem": + @staticmethod + CResult[shared_ptr[CAzureFileSystem]] Make(const CAzureOptions& options) + CAzureOptions options() + + cdef cppclass CHdfsOptions "arrow::fs::HdfsOptions": + HdfsConnectionConfig connection_config + int32_t buffer_size + int16_t replication + int64_t default_block_size + + @staticmethod + CResult[CHdfsOptions] FromUriString "FromUri"( + const c_string& uri_string) + void ConfigureEndPoint(c_string host, int port) + void ConfigureDriver(c_bool use_hdfs3) + void ConfigureReplication(int16_t replication) + void ConfigureUser(c_string user_name) + void ConfigureBufferSize(int32_t buffer_size) + void ConfigureBlockSize(int64_t default_block_size) + void ConfigureKerberosTicketCachePath(c_string path) + void ConfigureExtraConf(c_string key, c_string value) + + cdef cppclass CHadoopFileSystem "arrow::fs::HadoopFileSystem"(CFileSystem): + @staticmethod + CResult[shared_ptr[CHadoopFileSystem]] Make( + const CHdfsOptions& options) + CHdfsOptions options() + + cdef cppclass CMockFileSystem "arrow::fs::internal::MockFileSystem"( + CFileSystem): + CMockFileSystem(CTimePoint current_time) + + CStatus CCopyFiles "arrow::fs::CopyFiles"( + const vector[CFileLocator]& sources, + const vector[CFileLocator]& destinations, + const CIOContext& io_context, + int64_t chunk_size, c_bool use_threads) + CStatus CCopyFilesWithSelector "arrow::fs::CopyFiles"( + const shared_ptr[CFileSystem]& source_fs, + const CFileSelector& source_sel, + const shared_ptr[CFileSystem]& destination_fs, + const c_string& destination_base_dir, + const CIOContext& io_context, + int64_t chunk_size, c_bool use_threads) + + +# Callbacks for implementing Python filesystems +# Use typedef to emulate syntax for std::function +ctypedef void CallbackGetTypeName(object, c_string*) +ctypedef c_bool CallbackEquals(object, const CFileSystem&) + +ctypedef void CallbackGetFileInfo(object, const c_string&, CFileInfo*) +ctypedef void CallbackGetFileInfoVector(object, const vector[c_string]&, + vector[CFileInfo]*) +ctypedef void CallbackGetFileInfoSelector(object, const CFileSelector&, + vector[CFileInfo]*) +ctypedef void CallbackCreateDir(object, const c_string&, c_bool) +ctypedef void CallbackDeleteDir(object, const c_string&) +ctypedef void CallbackDeleteDirContents(object, const c_string&, c_bool) +ctypedef void CallbackDeleteRootDirContents(object) +ctypedef void CallbackDeleteFile(object, const c_string&) +ctypedef void CallbackMove(object, const c_string&, const c_string&) +ctypedef void CallbackCopyFile(object, const c_string&, const c_string&) + +ctypedef void CallbackOpenInputStream(object, const c_string&, + shared_ptr[CInputStream]*) +ctypedef void CallbackOpenInputFile(object, const c_string&, + shared_ptr[CRandomAccessFile]*) +ctypedef void CallbackOpenOutputStream( + object, const c_string&, const shared_ptr[const CKeyValueMetadata]&, + shared_ptr[COutputStream]*) +ctypedef void CallbackNormalizePath(object, const c_string&, c_string*) + +cdef extern from "arrow/python/filesystem.h" namespace "arrow::py::fs" nogil: + + cdef cppclass CPyFileSystemVtable "arrow::py::fs::PyFileSystemVtable": + PyFileSystemVtable() + function[CallbackGetTypeName] get_type_name + function[CallbackEquals] equals + function[CallbackGetFileInfo] get_file_info + function[CallbackGetFileInfoVector] get_file_info_vector + function[CallbackGetFileInfoSelector] get_file_info_selector + function[CallbackCreateDir] create_dir + function[CallbackDeleteDir] delete_dir + function[CallbackDeleteDirContents] delete_dir_contents + function[CallbackDeleteRootDirContents] delete_root_dir_contents + function[CallbackDeleteFile] delete_file + function[CallbackMove] move + function[CallbackCopyFile] copy_file + function[CallbackOpenInputStream] open_input_stream + function[CallbackOpenInputFile] open_input_file + function[CallbackOpenOutputStream] open_output_stream + function[CallbackOpenOutputStream] open_append_stream + function[CallbackNormalizePath] normalize_path + + cdef cppclass CPyFileSystem "arrow::py::fs::PyFileSystem": + @staticmethod + shared_ptr[CPyFileSystem] Make(object handler, + CPyFileSystemVtable vtable) + + PyObject* handler() diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_python.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_python.pxd new file mode 100644 index 0000000000000000000000000000000000000000..136d6bc8b14cd7826cd51f46ea97bf325180e738 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_python.pxd @@ -0,0 +1,319 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * + + +ctypedef CInvalidRowResult PyInvalidRowCallback(object, + const CCSVInvalidRow&) + + +cdef extern from "arrow/python/csv.h" namespace "arrow::py::csv": + + function[CInvalidRowHandler] MakeInvalidRowHandler( + function[PyInvalidRowCallback], object handler) + + +cdef extern from "arrow/python/api.h" namespace "arrow::py": + # Requires GIL + CResult[shared_ptr[CDataType]] InferArrowType( + object obj, object mask, c_bool pandas_null_sentinels) + + +cdef extern from "arrow/python/api.h" namespace "arrow::py::internal": + object NewMonthDayNanoTupleType() + CResult[PyObject*] MonthDayNanoIntervalArrayToPyList( + const CMonthDayNanoIntervalArray& array) + CResult[PyObject*] MonthDayNanoIntervalScalarToPyObject( + const CMonthDayNanoIntervalScalar& scalar) + + +cdef extern from "arrow/python/arrow_to_pandas.h" namespace "arrow::py::MapConversionType": + cdef enum MapConversionType "arrow::py::MapConversionType": + DEFAULT, + LOSSY, + STRICT_ + + +cdef extern from "arrow/python/api.h" namespace "arrow::py" nogil: + shared_ptr[CDataType] GetPrimitiveType(Type type) + + object PyHalf_FromHalf(npy_half value) + + cdef cppclass PyConversionOptions: + PyConversionOptions() + + shared_ptr[CDataType] type + int64_t size + CMemoryPool* pool + c_bool from_pandas + c_bool ignore_timezone + c_bool strict + + # TODO Some functions below are not actually "nogil" + + CResult[shared_ptr[CChunkedArray]] ConvertPySequence( + object obj, object mask, const PyConversionOptions& options, + CMemoryPool* pool) + + CResult[shared_ptr[CDataType]] NumPyDtypeToArrow(object dtype) + + CStatus NdarrayToArrow(CMemoryPool* pool, object ao, object mo, + c_bool from_pandas, + const shared_ptr[CDataType]& type, + shared_ptr[CChunkedArray]* out) + + CStatus NdarrayToArrow(CMemoryPool* pool, object ao, object mo, + c_bool from_pandas, + const shared_ptr[CDataType]& type, + const CCastOptions& cast_options, + shared_ptr[CChunkedArray]* out) + + CStatus NdarrayToTensor(CMemoryPool* pool, object ao, + const vector[c_string]& dim_names, + shared_ptr[CTensor]* out) + + CStatus TensorToNdarray(const shared_ptr[CTensor]& tensor, object base, + PyObject** out) + + CStatus SparseCOOTensorToNdarray( + const shared_ptr[CSparseCOOTensor]& sparse_tensor, object base, + PyObject** out_data, PyObject** out_coords) + + CStatus SparseCSRMatrixToNdarray( + const shared_ptr[CSparseCSRMatrix]& sparse_tensor, object base, + PyObject** out_data, PyObject** out_indptr, PyObject** out_indices) + + CStatus SparseCSCMatrixToNdarray( + const shared_ptr[CSparseCSCMatrix]& sparse_tensor, object base, + PyObject** out_data, PyObject** out_indptr, PyObject** out_indices) + + CStatus SparseCSFTensorToNdarray( + const shared_ptr[CSparseCSFTensor]& sparse_tensor, object base, + PyObject** out_data, PyObject** out_indptr, PyObject** out_indices) + + CStatus NdarraysToSparseCOOTensor(CMemoryPool* pool, object data_ao, + object coords_ao, + const vector[int64_t]& shape, + const vector[c_string]& dim_names, + shared_ptr[CSparseCOOTensor]* out) + + CStatus NdarraysToSparseCSRMatrix(CMemoryPool* pool, object data_ao, + object indptr_ao, object indices_ao, + const vector[int64_t]& shape, + const vector[c_string]& dim_names, + shared_ptr[CSparseCSRMatrix]* out) + + CStatus NdarraysToSparseCSCMatrix(CMemoryPool* pool, object data_ao, + object indptr_ao, object indices_ao, + const vector[int64_t]& shape, + const vector[c_string]& dim_names, + shared_ptr[CSparseCSCMatrix]* out) + + CStatus NdarraysToSparseCSFTensor(CMemoryPool* pool, object data_ao, + object indptr_ao, object indices_ao, + const vector[int64_t]& shape, + const vector[int64_t]& axis_order, + const vector[c_string]& dim_names, + shared_ptr[CSparseCSFTensor]* out) + + CStatus TensorToSparseCOOTensor(shared_ptr[CTensor], + shared_ptr[CSparseCOOTensor]* out) + + CStatus TensorToSparseCSRMatrix(shared_ptr[CTensor], + shared_ptr[CSparseCSRMatrix]* out) + + CStatus TensorToSparseCSCMatrix(shared_ptr[CTensor], + shared_ptr[CSparseCSCMatrix]* out) + + CStatus TensorToSparseCSFTensor(shared_ptr[CTensor], + shared_ptr[CSparseCSFTensor]* out) + + CStatus ConvertArrayToPandas(const PandasOptions& options, + shared_ptr[CArray] arr, + object py_ref, PyObject** out) + + CStatus ConvertChunkedArrayToPandas(const PandasOptions& options, + shared_ptr[CChunkedArray] arr, + object py_ref, PyObject** out) + + CStatus ConvertTableToPandas(const PandasOptions& options, + shared_ptr[CTable] table, + PyObject** out) + + void c_set_default_memory_pool \ + " arrow::py::set_default_memory_pool"(CMemoryPool* pool)\ + + CMemoryPool* c_get_memory_pool \ + " arrow::py::get_memory_pool"() + + cdef cppclass PyBuffer(CBuffer): + @staticmethod + CResult[shared_ptr[CBuffer]] FromPyObject(object obj) + + cdef cppclass PyForeignBuffer(CBuffer): + @staticmethod + CStatus Make(const uint8_t* data, int64_t size, object base, + shared_ptr[CBuffer]* out) + + cdef cppclass PyReadableFile(CRandomAccessFile): + PyReadableFile(object fo) + + cdef cppclass PyOutputStream(COutputStream): + PyOutputStream(object fo) + + cdef cppclass PandasOptions: + CMemoryPool* pool + c_bool strings_to_categorical + c_bool zero_copy_only + c_bool integer_object_nulls + c_bool date_as_object + c_bool timestamp_as_object + c_bool use_threads + c_bool coerce_temporal_nanoseconds + c_bool ignore_timezone + c_bool deduplicate_objects + c_bool safe_cast + c_bool split_blocks + c_bool self_destruct + MapConversionType maps_as_pydicts + c_bool decode_dictionaries + unordered_set[c_string] categorical_columns + unordered_set[c_string] extension_columns + c_bool to_numpy + + cdef cppclass CSerializedPyObject" arrow::py::SerializedPyObject": + shared_ptr[CRecordBatch] batch + vector[shared_ptr[CTensor]] tensors + + CStatus WriteTo(COutputStream* dst) + CStatus GetComponents(CMemoryPool* pool, PyObject** dst) + + CStatus SerializeObject(object context, object sequence, + CSerializedPyObject* out) + + CStatus DeserializeObject(object context, + const CSerializedPyObject& obj, + PyObject* base, PyObject** out) + + CStatus ReadSerializedObject(CRandomAccessFile* src, + CSerializedPyObject* out) + + cdef cppclass SparseTensorCounts: + SparseTensorCounts() + int coo + int csr + int csc + int csf + int ndim_csf + int num_total_tensors() const + int num_total_buffers() const + + CStatus GetSerializedFromComponents( + int num_tensors, + const SparseTensorCounts& num_sparse_tensors, + int num_ndarrays, + int num_buffers, + object buffers, + CSerializedPyObject* out) + + +cdef extern from "arrow/python/api.h" namespace "arrow::py::internal" nogil: + cdef cppclass CTimePoint "arrow::py::internal::TimePoint": + pass + + CTimePoint PyDateTime_to_TimePoint(PyDateTime_DateTime* pydatetime) + int64_t TimePoint_to_ns(CTimePoint val) + CTimePoint TimePoint_from_s(double val) + CTimePoint TimePoint_from_ns(int64_t val) + + CResult[c_string] TzinfoToString(PyObject* pytzinfo) + CResult[PyObject*] StringToTzinfo(c_string) + + +cdef extern from "arrow/python/init.h": + int arrow_init_numpy() except -1 + + +cdef extern from "arrow/python/pyarrow.h" namespace "arrow::py": + int import_pyarrow() except -1 + + +cdef extern from "arrow/python/common.h" namespace "arrow::py": + c_bool IsPyError(const CStatus& status) + void RestorePyError(const CStatus& status) except * + + +cdef extern from "arrow/python/common.h" namespace "arrow::py" nogil: + cdef cppclass SharedPtrNoGIL[T](shared_ptr[T]): + # This looks like the only way to satisfy both Cython 2 and Cython 3 + SharedPtrNoGIL& operator=(...) + cdef cppclass UniquePtrNoGIL[T, DELETER=*](unique_ptr[T, DELETER]): + UniquePtrNoGIL& operator=(...) + + +cdef extern from "arrow/python/inference.h" namespace "arrow::py": + c_bool IsPyBool(object o) + c_bool IsPyInt(object o) + c_bool IsPyFloat(object o) + + +cdef extern from "arrow/python/ipc.h" namespace "arrow::py": + cdef cppclass CPyRecordBatchReader" arrow::py::PyRecordBatchReader" \ + (CRecordBatchReader): + @staticmethod + CResult[shared_ptr[CRecordBatchReader]] Make(shared_ptr[CSchema], + object) + + +cdef extern from "arrow/python/ipc.h" namespace "arrow::py" nogil: + cdef cppclass CCastingRecordBatchReader" arrow::py::CastingRecordBatchReader" \ + (CRecordBatchReader): + @staticmethod + CResult[shared_ptr[CRecordBatchReader]] Make(shared_ptr[CRecordBatchReader], + shared_ptr[CSchema]) + + +cdef extern from "arrow/python/extension_type.h" namespace "arrow::py": + cdef cppclass CPyExtensionType \ + " arrow::py::PyExtensionType"(CExtensionType): + @staticmethod + CStatus FromClass(const shared_ptr[CDataType] storage_type, + const c_string extension_name, object typ, + shared_ptr[CExtensionType]* out) + + @staticmethod + CStatus FromInstance(shared_ptr[CDataType] storage_type, + object inst, shared_ptr[CExtensionType]* out) + + object GetInstance() + CStatus SetInstance(object) + + c_string PyExtensionName() + CStatus RegisterPyExtensionType(shared_ptr[CDataType]) + CStatus UnregisterPyExtensionType(c_string type_name) + + +cdef extern from "arrow/python/benchmark.h" namespace "arrow::py::benchmark": + void Benchmark_PandasObjectIsNull(object lst) except * + + +cdef extern from "arrow/python/gdb.h" namespace "arrow::gdb" nogil: + void GdbTestSession "arrow::gdb::TestSession"() diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_substrait.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_substrait.pxd new file mode 100644 index 0000000000000000000000000000000000000000..c41f4c05d3a77ca4a3ba163b27d9df9f9f234767 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libarrow_substrait.pxd @@ -0,0 +1,77 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from libcpp.vector cimport vector as std_vector + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_acero cimport * + +ctypedef CResult[CDeclaration] CNamedTableProvider(const std_vector[c_string]&, const CSchema&) + +cdef extern from "arrow/engine/substrait/options.h" namespace "arrow::engine" nogil: + cdef enum ConversionStrictness \ + "arrow::engine::ConversionStrictness": + EXACT_ROUNDTRIP \ + "arrow::engine::ConversionStrictness::EXACT_ROUNDTRIP" + PRESERVE_STRUCTURE \ + "arrow::engine::ConversionStrictness::PRESERVE_STRUCTURE" + BEST_EFFORT \ + "arrow::engine::ConversionStrictness::BEST_EFFORT" + + cdef cppclass CConversionOptions \ + "arrow::engine::ConversionOptions": + CConversionOptions() + ConversionStrictness strictness + function[CNamedTableProvider] named_table_provider + c_bool allow_arrow_extensions + +cdef extern from "arrow/engine/substrait/extension_set.h" \ + namespace "arrow::engine" nogil: + + cdef cppclass ExtensionIdRegistry: + std_vector[c_string] GetSupportedSubstraitFunctions() + + ExtensionIdRegistry* default_extension_id_registry() + +cdef extern from "arrow/engine/substrait/relation.h" namespace "arrow::engine" nogil: + + cdef cppclass CNamedExpression "arrow::engine::NamedExpression": + CExpression expression + c_string name + + cdef cppclass CBoundExpressions "arrow::engine::BoundExpressions": + std_vector[CNamedExpression] named_expressions + shared_ptr[CSchema] schema + +cdef extern from "arrow/engine/substrait/serde.h" namespace "arrow::engine" nogil: + + CResult[shared_ptr[CBuffer]] SerializeExpressions( + const CBoundExpressions& bound_expressions, const CConversionOptions& conversion_options) + + CResult[CBoundExpressions] DeserializeExpressions( + const CBuffer& serialized_expressions) + +cdef extern from "arrow/engine/substrait/util.h" namespace "arrow::engine" nogil: + CResult[shared_ptr[CRecordBatchReader]] ExecuteSerializedPlan( + const CBuffer& substrait_buffer, const ExtensionIdRegistry* registry, + CFunctionRegistry* func_registry, const CConversionOptions& conversion_options, + c_bool use_threads) + + CResult[shared_ptr[CBuffer]] SerializeJsonPlan(const c_string& substrait_json) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libgandiva.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libgandiva.pxd new file mode 100644 index 0000000000000000000000000000000000000000..7d76576bef2b9513f53c20c55ec92bb01c8b2766 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libgandiva.pxd @@ -0,0 +1,298 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from libcpp.string cimport string as c_string +from libcpp.unordered_set cimport unordered_set as c_unordered_set +from libc.stdint cimport int64_t, int32_t, uint8_t, uintptr_t + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * + +cdef extern from "gandiva/node.h" namespace "gandiva" nogil: + + cdef cppclass CNode" gandiva::Node": + c_string ToString() + shared_ptr[CDataType] return_type() + + cdef cppclass CGandivaExpression" gandiva::Expression": + c_string ToString() + shared_ptr[CNode] root() + shared_ptr[CField] result() + + ctypedef vector[shared_ptr[CNode]] CNodeVector" gandiva::NodeVector" + + ctypedef vector[shared_ptr[CGandivaExpression]] \ + CExpressionVector" gandiva::ExpressionVector" + +cdef extern from "gandiva/selection_vector.h" namespace "gandiva" nogil: + + cdef cppclass CSelectionVector" gandiva::SelectionVector": + + shared_ptr[CArray] ToArray() + + enum CSelectionVector_Mode" gandiva::SelectionVector::Mode": + CSelectionVector_Mode_NONE" gandiva::SelectionVector::Mode::MODE_NONE" + CSelectionVector_Mode_UINT16" \ + gandiva::SelectionVector::Mode::MODE_UINT16" + CSelectionVector_Mode_UINT32" \ + gandiva::SelectionVector::Mode::MODE_UINT32" + CSelectionVector_Mode_UINT64" \ + gandiva::SelectionVector::Mode::MODE_UINT64" + + cdef CStatus SelectionVector_MakeInt16\ + "gandiva::SelectionVector::MakeInt16"( + int64_t max_slots, CMemoryPool* pool, + shared_ptr[CSelectionVector]* selection_vector) + + cdef CStatus SelectionVector_MakeInt32\ + "gandiva::SelectionVector::MakeInt32"( + int64_t max_slots, CMemoryPool* pool, + shared_ptr[CSelectionVector]* selection_vector) + + cdef CStatus SelectionVector_MakeInt64\ + "gandiva::SelectionVector::MakeInt64"( + int64_t max_slots, CMemoryPool* pool, + shared_ptr[CSelectionVector]* selection_vector) + +cdef inline CSelectionVector_Mode _ensure_selection_mode(str name) except *: + uppercase = name.upper() + if uppercase == 'NONE': + return CSelectionVector_Mode_NONE + elif uppercase == 'UINT16': + return CSelectionVector_Mode_UINT16 + elif uppercase == 'UINT32': + return CSelectionVector_Mode_UINT32 + elif uppercase == 'UINT64': + return CSelectionVector_Mode_UINT64 + else: + raise ValueError('Invalid value for Selection Mode: {!r}'.format(name)) + +cdef inline str _selection_mode_name(CSelectionVector_Mode ctype): + if ctype == CSelectionVector_Mode_NONE: + return 'NONE' + elif ctype == CSelectionVector_Mode_UINT16: + return 'UINT16' + elif ctype == CSelectionVector_Mode_UINT32: + return 'UINT32' + elif ctype == CSelectionVector_Mode_UINT64: + return 'UINT64' + else: + raise RuntimeError('Unexpected CSelectionVector_Mode value') + +cdef extern from "gandiva/condition.h" namespace "gandiva" nogil: + + cdef cppclass CCondition" gandiva::Condition": + c_string ToString() + shared_ptr[CNode] root() + shared_ptr[CField] result() + +cdef extern from "gandiva/arrow.h" namespace "gandiva" nogil: + + ctypedef vector[shared_ptr[CArray]] CArrayVector" gandiva::ArrayVector" + + +cdef extern from "gandiva/tree_expr_builder.h" namespace "gandiva" nogil: + + cdef shared_ptr[CNode] TreeExprBuilder_MakeBoolLiteral \ + "gandiva::TreeExprBuilder::MakeLiteral"(c_bool value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeUInt8Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(uint8_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeUInt16Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(uint16_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeUInt32Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(uint32_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeUInt64Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(uint64_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInt8Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(int8_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInt16Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(int16_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInt32Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(int32_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInt64Literal \ + "gandiva::TreeExprBuilder::MakeLiteral"(int64_t value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeFloatLiteral \ + "gandiva::TreeExprBuilder::MakeLiteral"(float value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeDoubleLiteral \ + "gandiva::TreeExprBuilder::MakeLiteral"(double value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeStringLiteral \ + "gandiva::TreeExprBuilder::MakeStringLiteral"(const c_string& value) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeBinaryLiteral \ + "gandiva::TreeExprBuilder::MakeBinaryLiteral"(const c_string& value) + + cdef shared_ptr[CGandivaExpression] TreeExprBuilder_MakeExpression\ + "gandiva::TreeExprBuilder::MakeExpression"( + shared_ptr[CNode] root_node, shared_ptr[CField] result_field) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeFunction \ + "gandiva::TreeExprBuilder::MakeFunction"( + const c_string& name, const CNodeVector& children, + shared_ptr[CDataType] return_type) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeField \ + "gandiva::TreeExprBuilder::MakeField"(shared_ptr[CField] field) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeIf \ + "gandiva::TreeExprBuilder::MakeIf"( + shared_ptr[CNode] condition, shared_ptr[CNode] this_node, + shared_ptr[CNode] else_node, shared_ptr[CDataType] return_type) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeAnd \ + "gandiva::TreeExprBuilder::MakeAnd"(const CNodeVector& children) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeOr \ + "gandiva::TreeExprBuilder::MakeOr"(const CNodeVector& children) + + cdef shared_ptr[CCondition] TreeExprBuilder_MakeCondition \ + "gandiva::TreeExprBuilder::MakeCondition"( + shared_ptr[CNode] condition) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionInt32 \ + "gandiva::TreeExprBuilder::MakeInExpressionInt32"( + shared_ptr[CNode] node, const c_unordered_set[int32_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionInt64 \ + "gandiva::TreeExprBuilder::MakeInExpressionInt64"( + shared_ptr[CNode] node, const c_unordered_set[int64_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionTime32 \ + "gandiva::TreeExprBuilder::MakeInExpressionTime32"( + shared_ptr[CNode] node, const c_unordered_set[int32_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionTime64 \ + "gandiva::TreeExprBuilder::MakeInExpressionTime64"( + shared_ptr[CNode] node, const c_unordered_set[int64_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionDate32 \ + "gandiva::TreeExprBuilder::MakeInExpressionDate32"( + shared_ptr[CNode] node, const c_unordered_set[int32_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionDate64 \ + "gandiva::TreeExprBuilder::MakeInExpressionDate64"( + shared_ptr[CNode] node, const c_unordered_set[int64_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionTimeStamp \ + "gandiva::TreeExprBuilder::MakeInExpressionTimeStamp"( + shared_ptr[CNode] node, const c_unordered_set[int64_t]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionString \ + "gandiva::TreeExprBuilder::MakeInExpressionString"( + shared_ptr[CNode] node, const c_unordered_set[c_string]& values) + + cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionBinary \ + "gandiva::TreeExprBuilder::MakeInExpressionBinary"( + shared_ptr[CNode] node, const c_unordered_set[c_string]& values) + +cdef extern from "gandiva/projector.h" namespace "gandiva" nogil: + + cdef cppclass CProjector" gandiva::Projector": + + CStatus Evaluate( + const CRecordBatch& batch, CMemoryPool* pool, + const CArrayVector* output) + + CStatus Evaluate( + const CRecordBatch& batch, + const CSelectionVector* selection, + CMemoryPool* pool, + const CArrayVector* output) + + c_string DumpIR() + + cdef CStatus Projector_Make \ + "gandiva::Projector::Make"( + shared_ptr[CSchema] schema, const CExpressionVector& children, + shared_ptr[CProjector]* projector) + + cdef CStatus Projector_Make \ + "gandiva::Projector::Make"( + shared_ptr[CSchema] schema, const CExpressionVector& children, + CSelectionVector_Mode mode, + shared_ptr[CConfiguration] configuration, + shared_ptr[CProjector]* projector) + +cdef extern from "gandiva/filter.h" namespace "gandiva" nogil: + + cdef cppclass CFilter" gandiva::Filter": + + CStatus Evaluate( + const CRecordBatch& batch, + shared_ptr[CSelectionVector] out_selection) + + c_string DumpIR() + + cdef CStatus Filter_Make \ + "gandiva::Filter::Make"( + shared_ptr[CSchema] schema, shared_ptr[CCondition] condition, + shared_ptr[CConfiguration] configuration, + shared_ptr[CFilter]* filter) + +cdef extern from "gandiva/function_signature.h" namespace "gandiva" nogil: + + cdef cppclass CFunctionSignature" gandiva::FunctionSignature": + + CFunctionSignature(const c_string& base_name, + vector[shared_ptr[CDataType]] param_types, + shared_ptr[CDataType] ret_type) + + shared_ptr[CDataType] ret_type() const + + const c_string& base_name() const + + vector[shared_ptr[CDataType]] param_types() const + + c_string ToString() const + +cdef extern from "gandiva/expression_registry.h" namespace "gandiva" nogil: + + cdef vector[shared_ptr[CFunctionSignature]] \ + GetRegisteredFunctionSignatures() + +cdef extern from "gandiva/configuration.h" namespace "gandiva" nogil: + + cdef cppclass CConfiguration" gandiva::Configuration": + + CConfiguration() + + CConfiguration(bint optimize, bint dump_ir) + + void set_optimize(bint optimize) + + void set_dump_ir(bint dump_ir) + + cdef cppclass CConfigurationBuilder \ + " gandiva::ConfigurationBuilder": + @staticmethod + shared_ptr[CConfiguration] DefaultConfiguration() + + CConfigurationBuilder() + + shared_ptr[CConfiguration] build() diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libparquet_encryption.pxd b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libparquet_encryption.pxd new file mode 100644 index 0000000000000000000000000000000000000000..2b40414ce538319dc66d5a2e7a58fc28cb93770e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/includes/libparquet_encryption.pxd @@ -0,0 +1,130 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language = c++ + +from pyarrow.includes.common cimport * +from pyarrow._parquet cimport (ParquetCipher, + CFileEncryptionProperties, + CFileDecryptionProperties, + ParquetCipher_AES_GCM_V1, + ParquetCipher_AES_GCM_CTR_V1) + + +cdef extern from "parquet/encryption/kms_client.h" \ + namespace "parquet::encryption" nogil: + cdef cppclass CKmsClient" parquet::encryption::KmsClient": + c_string WrapKey(const c_string& key_bytes, + const c_string& master_key_identifier) except + + c_string UnwrapKey(const c_string& wrapped_key, + const c_string& master_key_identifier) except + + + cdef cppclass CKeyAccessToken" parquet::encryption::KeyAccessToken": + CKeyAccessToken(const c_string value) + void Refresh(const c_string& new_value) + const c_string& value() const + + cdef cppclass CKmsConnectionConfig \ + " parquet::encryption::KmsConnectionConfig": + CKmsConnectionConfig() + c_string kms_instance_id + c_string kms_instance_url + shared_ptr[CKeyAccessToken] refreshable_key_access_token + unordered_map[c_string, c_string] custom_kms_conf + +# Callbacks for implementing Python kms clients +# Use typedef to emulate syntax for std::function +ctypedef void CallbackWrapKey( + object, const c_string&, const c_string&, c_string*) +ctypedef void CallbackUnwrapKey( + object, const c_string&, const c_string&, c_string*) + +cdef extern from "parquet/encryption/kms_client_factory.h" \ + namespace "parquet::encryption" nogil: + cdef cppclass CKmsClientFactory" parquet::encryption::KmsClientFactory": + shared_ptr[CKmsClient] CreateKmsClient( + const CKmsConnectionConfig& kms_connection_config) except + + +# Callbacks for implementing Python kms client factories +# Use typedef to emulate syntax for std::function +ctypedef void CallbackCreateKmsClient( + object, + const CKmsConnectionConfig&, shared_ptr[CKmsClient]*) + +cdef extern from "parquet/encryption/crypto_factory.h" \ + namespace "parquet::encryption" nogil: + cdef cppclass CEncryptionConfiguration\ + " parquet::encryption::EncryptionConfiguration": + CEncryptionConfiguration(const c_string& footer_key) except + + c_string footer_key + c_string column_keys + ParquetCipher encryption_algorithm + c_bool plaintext_footer + c_bool double_wrapping + double cache_lifetime_seconds + c_bool internal_key_material + int32_t data_key_length_bits + + cdef cppclass CDecryptionConfiguration\ + " parquet::encryption::DecryptionConfiguration": + CDecryptionConfiguration() except + + double cache_lifetime_seconds + + cdef cppclass CCryptoFactory" parquet::encryption::CryptoFactory": + void RegisterKmsClientFactory( + shared_ptr[CKmsClientFactory] kms_client_factory) except + + shared_ptr[CFileEncryptionProperties] GetFileEncryptionProperties( + const CKmsConnectionConfig& kms_connection_config, + const CEncryptionConfiguration& encryption_config) except +* + shared_ptr[CFileDecryptionProperties] GetFileDecryptionProperties( + const CKmsConnectionConfig& kms_connection_config, + const CDecryptionConfiguration& decryption_config) except +* + void RemoveCacheEntriesForToken(const c_string& access_token) except + + void RemoveCacheEntriesForAllTokens() except + + +cdef extern from "arrow/python/parquet_encryption.h" \ + namespace "arrow::py::parquet::encryption" nogil: + cdef cppclass CPyKmsClientVtable \ + " arrow::py::parquet::encryption::PyKmsClientVtable": + CPyKmsClientVtable() + function[CallbackWrapKey] wrap_key + function[CallbackUnwrapKey] unwrap_key + + cdef cppclass CPyKmsClient\ + " arrow::py::parquet::encryption::PyKmsClient"(CKmsClient): + CPyKmsClient(object handler, CPyKmsClientVtable vtable) + + cdef cppclass CPyKmsClientFactoryVtable\ + " arrow::py::parquet::encryption::PyKmsClientFactoryVtable": + CPyKmsClientFactoryVtable() + function[CallbackCreateKmsClient] create_kms_client + + cdef cppclass CPyKmsClientFactory\ + " arrow::py::parquet::encryption::PyKmsClientFactory"( + CKmsClientFactory): + CPyKmsClientFactory(object handler, CPyKmsClientFactoryVtable vtable) + + cdef cppclass CPyCryptoFactory\ + " arrow::py::parquet::encryption::PyCryptoFactory"(CCryptoFactory): + CResult[shared_ptr[CFileEncryptionProperties]] \ + SafeGetFileEncryptionProperties( + const CKmsConnectionConfig& kms_connection_config, + const CEncryptionConfiguration& encryption_config) + CResult[shared_ptr[CFileDecryptionProperties]] \ + SafeGetFileDecryptionProperties( + const CKmsConnectionConfig& kms_connection_config, + const CDecryptionConfiguration& decryption_config) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1600 b/llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1600 new file mode 100644 index 0000000000000000000000000000000000000000..5db69fe303824544f2919fb711fe74ffd57f4441 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1600 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7380496d2fddf51b9d1fd0671117c8ab07f2cae6b2b0dc0186a46cabb8e7859 +size 19654024 diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_array.py b/llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_array.py new file mode 100644 index 0000000000000000000000000000000000000000..156d58326b961718d7ca6ed85345eac8c2b5dfa8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_array.py @@ -0,0 +1,3881 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from collections.abc import Iterable +import datetime +import decimal +import hypothesis as h +import hypothesis.strategies as st +import itertools +import pytest +import struct +import subprocess +import sys +import weakref + +import numpy as np + +import pyarrow as pa +import pyarrow.tests.strategies as past +from pyarrow.vendored.version import Version + + +def test_total_bytes_allocated(): + code = """if 1: + import pyarrow as pa + + assert pa.total_allocated_bytes() == 0 + """ + res = subprocess.run([sys.executable, "-c", code], + universal_newlines=True, stderr=subprocess.PIPE) + if res.returncode != 0: + print(res.stderr, file=sys.stderr) + res.check_returncode() # fail + assert len(res.stderr.splitlines()) == 0 + + +def test_weakref(): + arr = pa.array([1, 2, 3]) + wr = weakref.ref(arr) + assert wr() is not None + del arr + assert wr() is None + + +def test_getitem_NULL(): + arr = pa.array([1, None, 2]) + assert arr[1].as_py() is None + assert arr[1].is_valid is False + assert isinstance(arr[1], pa.Int64Scalar) + + +def test_constructor_raises(): + # This could happen by wrong capitalization. + # ARROW-2638: prevent calling extension class constructors directly + with pytest.raises(TypeError): + pa.Array([1, 2]) + + +def test_list_format(): + arr = pa.array([[1], None, [2, 3, None]]) + result = arr.to_string() + expected = """\ +[ + [ + 1 + ], + null, + [ + 2, + 3, + null + ] +]""" + assert result == expected + + +def test_string_format(): + arr = pa.array(['', None, 'foo']) + result = arr.to_string() + expected = """\ +[ + "", + null, + "foo" +]""" + assert result == expected + + +def test_long_array_format(): + arr = pa.array(range(100)) + result = arr.to_string(window=2) + expected = """\ +[ + 0, + 1, + ... + 98, + 99 +]""" + assert result == expected + + +def test_indented_string_format(): + arr = pa.array(['', None, 'foo']) + result = arr.to_string(indent=1) + expected = '[\n "",\n null,\n "foo"\n]' + + assert result == expected + + +def test_top_level_indented_string_format(): + arr = pa.array(['', None, 'foo']) + result = arr.to_string(top_level_indent=1) + expected = ' [\n "",\n null,\n "foo"\n ]' + + assert result == expected + + +def test_binary_format(): + arr = pa.array([b'\x00', b'', None, b'\x01foo', b'\x80\xff']) + result = arr.to_string() + expected = """\ +[ + 00, + , + null, + 01666F6F, + 80FF +]""" + assert result == expected + + +def test_binary_total_values_length(): + arr = pa.array([b'0000', None, b'11111', b'222222', b'3333333'], + type='binary') + large_arr = pa.array([b'0000', None, b'11111', b'222222', b'3333333'], + type='large_binary') + + assert arr.total_values_length == 22 + assert arr.slice(1, 3).total_values_length == 11 + assert large_arr.total_values_length == 22 + assert large_arr.slice(1, 3).total_values_length == 11 + + +def test_to_numpy_zero_copy(): + arr = pa.array(range(10)) + + np_arr = arr.to_numpy() + + # check for zero copy (both arrays using same memory) + arrow_buf = arr.buffers()[1] + assert arrow_buf.address == np_arr.ctypes.data + + arr = None + import gc + gc.collect() + + # Ensure base is still valid + assert np_arr.base is not None + expected = np.arange(10) + np.testing.assert_array_equal(np_arr, expected) + + +def test_chunked_array_to_numpy_zero_copy(): + elements = [[2, 2, 4], [4, 5, 100]] + + chunked_arr = pa.chunked_array(elements) + + msg = "zero_copy_only must be False for pyarrow.ChunkedArray.to_numpy" + + with pytest.raises(ValueError, match=msg): + chunked_arr.to_numpy(zero_copy_only=True) + + np_arr = chunked_arr.to_numpy() + expected = [2, 2, 4, 4, 5, 100] + np.testing.assert_array_equal(np_arr, expected) + + +def test_to_numpy_unsupported_types(): + # ARROW-2871: Some primitive types are not yet supported in to_numpy + bool_arr = pa.array([True, False, True]) + + with pytest.raises(ValueError): + bool_arr.to_numpy() + + result = bool_arr.to_numpy(zero_copy_only=False) + expected = np.array([True, False, True]) + np.testing.assert_array_equal(result, expected) + + null_arr = pa.array([None, None, None]) + + with pytest.raises(ValueError): + null_arr.to_numpy() + + result = null_arr.to_numpy(zero_copy_only=False) + expected = np.array([None, None, None], dtype=object) + np.testing.assert_array_equal(result, expected) + + arr = pa.array([1, 2, None]) + + with pytest.raises(ValueError, match="with 1 nulls"): + arr.to_numpy() + + +def test_to_numpy_writable(): + arr = pa.array(range(10)) + np_arr = arr.to_numpy() + + # by default not writable for zero-copy conversion + with pytest.raises(ValueError): + np_arr[0] = 10 + + np_arr2 = arr.to_numpy(zero_copy_only=False, writable=True) + np_arr2[0] = 10 + assert arr[0].as_py() == 0 + + # when asking for writable, cannot do zero-copy + with pytest.raises(ValueError): + arr.to_numpy(zero_copy_only=True, writable=True) + + +@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns']) +@pytest.mark.parametrize('tz', [None, "UTC"]) +def test_to_numpy_datetime64(unit, tz): + arr = pa.array([1, 2, 3], pa.timestamp(unit, tz=tz)) + expected = np.array([1, 2, 3], dtype="datetime64[{}]".format(unit)) + np_arr = arr.to_numpy() + np.testing.assert_array_equal(np_arr, expected) + + +@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns']) +def test_to_numpy_timedelta64(unit): + arr = pa.array([1, 2, 3], pa.duration(unit)) + expected = np.array([1, 2, 3], dtype="timedelta64[{}]".format(unit)) + np_arr = arr.to_numpy() + np.testing.assert_array_equal(np_arr, expected) + + +def test_to_numpy_dictionary(): + # ARROW-7591 + arr = pa.array(["a", "b", "a"]).dictionary_encode() + expected = np.array(["a", "b", "a"], dtype=object) + np_arr = arr.to_numpy(zero_copy_only=False) + np.testing.assert_array_equal(np_arr, expected) + + +@pytest.mark.pandas +def test_to_pandas_zero_copy(): + import gc + + arr = pa.array(range(10)) + + for i in range(10): + series = arr.to_pandas() + assert sys.getrefcount(series) == 2 + series = None # noqa + + assert sys.getrefcount(arr) == 2 + + for i in range(10): + arr = pa.array(range(10)) + series = arr.to_pandas() + arr = None + gc.collect() + + # Ensure base is still valid + + # Because of py.test's assert inspection magic, if you put getrefcount + # on the line being examined, it will be 1 higher than you expect + base_refcount = sys.getrefcount(series.values.base) + assert base_refcount == 2 + series.sum() + + +@pytest.mark.nopandas +@pytest.mark.pandas +def test_asarray(): + # ensure this is tested both when pandas is present or not (ARROW-6564) + + arr = pa.array(range(4)) + + # The iterator interface gives back an array of Int64Value's + np_arr = np.asarray([_ for _ in arr]) + assert np_arr.tolist() == [0, 1, 2, 3] + assert np_arr.dtype == np.dtype('O') + assert isinstance(np_arr[0], pa.lib.Int64Value) + + # Calling with the arrow array gives back an array with 'int64' dtype + np_arr = np.asarray(arr) + assert np_arr.tolist() == [0, 1, 2, 3] + assert np_arr.dtype == np.dtype('int64') + + # An optional type can be specified when calling np.asarray + np_arr = np.asarray(arr, dtype='str') + assert np_arr.tolist() == ['0', '1', '2', '3'] + + # If PyArrow array has null values, numpy type will be changed as needed + # to support nulls. + arr = pa.array([0, 1, 2, None]) + assert arr.type == pa.int64() + np_arr = np.asarray(arr) + elements = np_arr.tolist() + assert elements[:3] == [0., 1., 2.] + assert np.isnan(elements[3]) + assert np_arr.dtype == np.dtype('float64') + + # DictionaryType data will be converted to dense numpy array + arr = pa.DictionaryArray.from_arrays( + pa.array([0, 1, 2, 0, 1]), pa.array(['a', 'b', 'c'])) + np_arr = np.asarray(arr) + assert np_arr.dtype == np.dtype('object') + assert np_arr.tolist() == ['a', 'b', 'c', 'a', 'b'] + + +@pytest.mark.parametrize('ty', [ + None, + pa.null(), + pa.int8(), + pa.string() +]) +def test_nulls(ty): + arr = pa.nulls(3, type=ty) + expected = pa.array([None, None, None], type=ty) + + assert len(arr) == 3 + assert arr.equals(expected) + + if ty is None: + assert arr.type == pa.null() + else: + assert arr.type == ty + + +def test_array_from_scalar(): + pytz = pytest.importorskip("pytz") + + today = datetime.date.today() + now = datetime.datetime.now() + now_utc = now.replace(tzinfo=pytz.utc) + now_with_tz = now_utc.astimezone(pytz.timezone('US/Eastern')) + oneday = datetime.timedelta(days=1) + + cases = [ + (None, 1, pa.array([None])), + (None, 10, pa.nulls(10)), + (-1, 3, pa.array([-1, -1, -1], type=pa.int64())), + (2.71, 2, pa.array([2.71, 2.71], type=pa.float64())), + ("string", 4, pa.array(["string"] * 4)), + ( + pa.scalar(8, type=pa.uint8()), + 17, + pa.array([8] * 17, type=pa.uint8()) + ), + (pa.scalar(None), 3, pa.array([None, None, None])), + (pa.scalar(True), 11, pa.array([True] * 11)), + (today, 2, pa.array([today] * 2)), + (now, 10, pa.array([now] * 10)), + ( + now_with_tz, + 2, + pa.array( + [now_utc] * 2, + type=pa.timestamp('us', tz=pytz.timezone('US/Eastern')) + ) + ), + (now.time(), 9, pa.array([now.time()] * 9)), + (oneday, 4, pa.array([oneday] * 4)), + (False, 9, pa.array([False] * 9)), + ([1, 2], 2, pa.array([[1, 2], [1, 2]])), + ( + pa.scalar([-1, 3], type=pa.large_list(pa.int8())), + 5, + pa.array([[-1, 3]] * 5, type=pa.large_list(pa.int8())) + ), + ({'a': 1, 'b': 2}, 3, pa.array([{'a': 1, 'b': 2}] * 3)) + ] + + for value, size, expected in cases: + arr = pa.repeat(value, size) + assert len(arr) == size + assert arr.type.equals(expected.type) + assert arr.equals(expected) + if expected.type == pa.null(): + assert arr.null_count == size + else: + assert arr.null_count == 0 + + +def test_array_from_dictionary_scalar(): + dictionary = ['foo', 'bar', 'baz'] + arr = pa.DictionaryArray.from_arrays([2, 1, 2, 0], dictionary=dictionary) + + result = pa.repeat(arr[0], 5) + expected = pa.DictionaryArray.from_arrays([2] * 5, dictionary=dictionary) + assert result.equals(expected) + + result = pa.repeat(arr[3], 5) + expected = pa.DictionaryArray.from_arrays([0] * 5, dictionary=dictionary) + assert result.equals(expected) + + +def test_array_getitem(): + arr = pa.array(range(10, 15)) + lst = arr.to_pylist() + + for idx in range(-len(arr), len(arr)): + assert arr[idx].as_py() == lst[idx] + for idx in range(-2 * len(arr), -len(arr)): + with pytest.raises(IndexError): + arr[idx] + for idx in range(len(arr), 2 * len(arr)): + with pytest.raises(IndexError): + arr[idx] + + # check that numpy scalars are supported + for idx in range(-len(arr), len(arr)): + assert arr[np.int32(idx)].as_py() == lst[idx] + + +def test_array_slice(): + arr = pa.array(range(10)) + + sliced = arr.slice(2) + expected = pa.array(range(2, 10)) + assert sliced.equals(expected) + + sliced2 = arr.slice(2, 4) + expected2 = pa.array(range(2, 6)) + assert sliced2.equals(expected2) + + # 0 offset + assert arr.slice(0).equals(arr) + + # Slice past end of array + assert len(arr.slice(len(arr))) == 0 + assert len(arr.slice(len(arr) + 2)) == 0 + assert len(arr.slice(len(arr) + 2, 100)) == 0 + + with pytest.raises(IndexError): + arr.slice(-1) + + with pytest.raises(ValueError): + arr.slice(2, -1) + + # Test slice notation + assert arr[2:].equals(arr.slice(2)) + assert arr[2:5].equals(arr.slice(2, 3)) + assert arr[-5:].equals(arr.slice(len(arr) - 5)) + + n = len(arr) + for start in range(-n * 2, n * 2): + for stop in range(-n * 2, n * 2): + res = arr[start:stop] + res.validate() + expected = arr.to_pylist()[start:stop] + assert res.to_pylist() == expected + assert res.to_numpy().tolist() == expected + + +def test_array_slice_negative_step(): + # ARROW-2714 + np_arr = np.arange(20) + arr = pa.array(np_arr) + chunked_arr = pa.chunked_array([arr]) + + cases = [ + slice(None, None, -1), + slice(None, 6, -2), + slice(10, 6, -2), + slice(8, None, -2), + slice(2, 10, -2), + slice(10, 2, -2), + slice(None, None, 2), + slice(0, 10, 2), + slice(15, -25, -1), # GH-38768 + slice(-22, -22, -1), # GH-40642 + ] + + for case in cases: + result = arr[case] + expected = pa.array(np_arr[case]) + assert result.equals(expected) + + result = pa.record_batch([arr], names=['f0'])[case] + expected = pa.record_batch([expected], names=['f0']) + assert result.equals(expected) + + result = chunked_arr[case] + expected = pa.chunked_array([np_arr[case]]) + assert result.equals(expected) + + +def test_array_diff(): + # ARROW-6252 + arr1 = pa.array(['foo'], type=pa.utf8()) + arr2 = pa.array(['foo', 'bar', None], type=pa.utf8()) + arr3 = pa.array([1, 2, 3]) + arr4 = pa.array([[], [1], None], type=pa.list_(pa.int64())) + + assert arr1.diff(arr1) == '' + assert arr1.diff(arr2) == ''' +@@ -1, +1 @@ ++"bar" ++null +''' + assert arr1.diff(arr3).strip() == '# Array types differed: string vs int64' + assert arr1.diff(arr3).strip() == '# Array types differed: string vs int64' + assert arr1.diff(arr4).strip() == ('# Array types differed: string vs ' + 'list') + + +def test_array_iter(): + arr = pa.array(range(10)) + + for i, j in zip(range(10), arr): + assert i == j.as_py() + + assert isinstance(arr, Iterable) + + +def test_struct_array_slice(): + # ARROW-2311: slicing nested arrays needs special care + ty = pa.struct([pa.field('a', pa.int8()), + pa.field('b', pa.float32())]) + arr = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty) + assert arr[1:].to_pylist() == [{'a': 3, 'b': 4.5}, + {'a': 5, 'b': 6.5}] + + +def test_array_factory_invalid_type(): + + class MyObject: + pass + + arr = np.array([MyObject()]) + with pytest.raises(ValueError): + pa.array(arr) + + +def test_array_ref_to_ndarray_base(): + arr = np.array([1, 2, 3]) + + refcount = sys.getrefcount(arr) + arr2 = pa.array(arr) # noqa + assert sys.getrefcount(arr) == (refcount + 1) + + +def test_array_eq(): + # ARROW-2150 / ARROW-9445: we define the __eq__ behavior to be + # data equality (not element-wise equality) + arr1 = pa.array([1, 2, 3], type=pa.int32()) + arr2 = pa.array([1, 2, 3], type=pa.int32()) + arr3 = pa.array([1, 2, 3], type=pa.int64()) + + assert (arr1 == arr2) is True + assert (arr1 != arr2) is False + assert (arr1 == arr3) is False + assert (arr1 != arr3) is True + + assert (arr1 == 1) is False + assert (arr1 == None) is False # noqa: E711 + + +def test_array_from_buffers(): + values_buf = pa.py_buffer(np.int16([4, 5, 6, 7])) + nulls_buf = pa.py_buffer(np.uint8([0b00001101])) + arr = pa.Array.from_buffers(pa.int16(), 4, [nulls_buf, values_buf]) + assert arr.type == pa.int16() + assert arr.to_pylist() == [4, None, 6, 7] + + arr = pa.Array.from_buffers(pa.int16(), 4, [None, values_buf]) + assert arr.type == pa.int16() + assert arr.to_pylist() == [4, 5, 6, 7] + + arr = pa.Array.from_buffers(pa.int16(), 3, [nulls_buf, values_buf], + offset=1) + assert arr.type == pa.int16() + assert arr.to_pylist() == [None, 6, 7] + + with pytest.raises(TypeError): + pa.Array.from_buffers(pa.int16(), 3, ['', ''], offset=1) + + +def test_string_binary_from_buffers(): + array = pa.array(["a", None, "b", "c"]) + + buffers = array.buffers() + copied = pa.StringArray.from_buffers( + len(array), buffers[1], buffers[2], buffers[0], array.null_count, + array.offset) + assert copied.to_pylist() == ["a", None, "b", "c"] + + binary_copy = pa.Array.from_buffers(pa.binary(), len(array), + array.buffers(), array.null_count, + array.offset) + assert binary_copy.to_pylist() == [b"a", None, b"b", b"c"] + + copied = pa.StringArray.from_buffers( + len(array), buffers[1], buffers[2], buffers[0]) + assert copied.to_pylist() == ["a", None, "b", "c"] + + sliced = array[1:] + buffers = sliced.buffers() + copied = pa.StringArray.from_buffers( + len(sliced), buffers[1], buffers[2], buffers[0], -1, sliced.offset) + assert copied.to_pylist() == [None, "b", "c"] + assert copied.null_count == 1 + + # Slice but exclude all null entries so that we don't need to pass + # the null bitmap. + sliced = array[2:] + buffers = sliced.buffers() + copied = pa.StringArray.from_buffers( + len(sliced), buffers[1], buffers[2], None, -1, sliced.offset) + assert copied.to_pylist() == ["b", "c"] + assert copied.null_count == 0 + + +@pytest.mark.parametrize('list_type_factory', [ + pa.list_, pa.large_list, pa.list_view, pa.large_list_view]) +def test_list_from_buffers(list_type_factory): + ty = list_type_factory(pa.int16()) + array = pa.array([[0, 1, 2], None, [], [3, 4, 5]], type=ty) + assert array.type == ty + + buffers = array.buffers() + + with pytest.raises(ValueError): + # No children + pa.Array.from_buffers(ty, 4, buffers[:ty.num_buffers]) + + child = pa.Array.from_buffers(pa.int16(), 6, buffers[ty.num_buffers:]) + copied = pa.Array.from_buffers(ty, 4, buffers[:ty.num_buffers], children=[child]) + assert copied.equals(array) + + with pytest.raises(ValueError): + # too many children + pa.Array.from_buffers(ty, 4, buffers[:ty.num_buffers], + children=[child, child]) + + +def test_struct_from_buffers(): + ty = pa.struct([pa.field('a', pa.int16()), pa.field('b', pa.utf8())]) + array = pa.array([{'a': 0, 'b': 'foo'}, None, {'a': 5, 'b': ''}], + type=ty) + buffers = array.buffers() + + with pytest.raises(ValueError): + # No children + pa.Array.from_buffers(ty, 3, [None, buffers[1]]) + + children = [pa.Array.from_buffers(pa.int16(), 3, buffers[1:3]), + pa.Array.from_buffers(pa.utf8(), 3, buffers[3:])] + copied = pa.Array.from_buffers(ty, 3, buffers[:1], children=children) + assert copied.equals(array) + + with pytest.raises(ValueError): + # not enough many children + pa.Array.from_buffers(ty, 3, [buffers[0]], + children=children[:1]) + + +def test_struct_from_arrays(): + a = pa.array([4, 5, 6], type=pa.int64()) + b = pa.array(["bar", None, ""]) + c = pa.array([[1, 2], None, [3, None]]) + expected_list = [ + {'a': 4, 'b': 'bar', 'c': [1, 2]}, + {'a': 5, 'b': None, 'c': None}, + {'a': 6, 'b': '', 'c': [3, None]}, + ] + + # From field names + arr = pa.StructArray.from_arrays([a, b, c], ["a", "b", "c"]) + assert arr.type == pa.struct( + [("a", a.type), ("b", b.type), ("c", c.type)]) + assert arr.to_pylist() == expected_list + + with pytest.raises(ValueError): + pa.StructArray.from_arrays([a, b, c], ["a", "b"]) + + arr = pa.StructArray.from_arrays([], []) + assert arr.type == pa.struct([]) + assert arr.to_pylist() == [] + + # From fields + fa = pa.field("a", a.type, nullable=False) + fb = pa.field("b", b.type) + fc = pa.field("c", c.type) + arr = pa.StructArray.from_arrays([a, b, c], fields=[fa, fb, fc]) + assert arr.type == pa.struct([fa, fb, fc]) + assert not arr.type[0].nullable + assert arr.to_pylist() == expected_list + + with pytest.raises(ValueError): + pa.StructArray.from_arrays([a, b, c], fields=[fa, fb]) + + arr = pa.StructArray.from_arrays([], fields=[]) + assert arr.type == pa.struct([]) + assert arr.to_pylist() == [] + + # Inconsistent fields + fa2 = pa.field("a", pa.int32()) + with pytest.raises(ValueError, match="int64 vs int32"): + pa.StructArray.from_arrays([a, b, c], fields=[fa2, fb, fc]) + + arrays = [a, b, c] + fields = [fa, fb, fc] + # With mask + mask = pa.array([True, False, False]) + arr = pa.StructArray.from_arrays(arrays, fields=fields, mask=mask) + assert arr.to_pylist() == [None] + expected_list[1:] + + arr = pa.StructArray.from_arrays(arrays, names=['a', 'b', 'c'], mask=mask) + assert arr.to_pylist() == [None] + expected_list[1:] + + # Bad masks + with pytest.raises(TypeError, match='Mask must be'): + pa.StructArray.from_arrays(arrays, fields, mask=[True, False, False]) + + with pytest.raises(ValueError, match='not contain nulls'): + pa.StructArray.from_arrays( + arrays, fields, mask=pa.array([True, False, None])) + + with pytest.raises(TypeError, match='Mask must be'): + pa.StructArray.from_arrays( + arrays, fields, mask=pa.chunked_array([mask])) + + # Non-empty array with no fields https://github.com/apache/arrow/issues/15109 + arr = pa.StructArray.from_arrays([], [], mask=mask) + assert arr.is_null() == mask + assert arr.to_pylist() == [None, {}, {}] + + +def test_struct_array_from_chunked(): + # ARROW-11780 + # Check that we don't segfault when trying to build + # a StructArray from a chunked array. + chunked_arr = pa.chunked_array([[1, 2, 3], [4, 5, 6]]) + + with pytest.raises(TypeError, match="Expected Array"): + pa.StructArray.from_arrays([chunked_arr], ["foo"]) + + +@pytest.mark.parametrize("offset", (0, 1)) +def test_dictionary_from_buffers(offset): + a = pa.array(["one", "two", "three", "two", "one"]).dictionary_encode() + b = pa.DictionaryArray.from_buffers(a.type, len(a)-offset, + a.indices.buffers(), a.dictionary, + offset=offset) + assert a[offset:] == b + + +def test_dictionary_from_numpy(): + indices = np.repeat([0, 1, 2], 2) + dictionary = np.array(['foo', 'bar', 'baz'], dtype=object) + mask = np.array([False, False, True, False, False, False]) + + d1 = pa.DictionaryArray.from_arrays(indices, dictionary) + d2 = pa.DictionaryArray.from_arrays(indices, dictionary, mask=mask) + + assert d1.indices.to_pylist() == indices.tolist() + assert d1.indices.to_pylist() == indices.tolist() + assert d1.dictionary.to_pylist() == dictionary.tolist() + assert d2.dictionary.to_pylist() == dictionary.tolist() + + for i in range(len(indices)): + assert d1[i].as_py() == dictionary[indices[i]] + + if mask[i]: + assert d2[i].as_py() is None + else: + assert d2[i].as_py() == dictionary[indices[i]] + + +def test_dictionary_to_numpy(): + expected = pa.array( + ["foo", "bar", None, "foo"] + ).to_numpy(zero_copy_only=False) + a = pa.DictionaryArray.from_arrays( + pa.array([0, 1, None, 0]), + pa.array(['foo', 'bar']) + ) + np.testing.assert_array_equal(a.to_numpy(zero_copy_only=False), + expected) + + with pytest.raises(pa.ArrowInvalid): + # If this would be changed to no longer raise in the future, + # ensure to test the actual result because, currently, to_numpy takes + # for granted that when zero_copy_only=True there will be no nulls + # (it's the decoding of the DictionaryArray that handles the nulls and + # this is only activated with zero_copy_only=False) + a.to_numpy(zero_copy_only=True) + + anonulls = pa.DictionaryArray.from_arrays( + pa.array([0, 1, 1, 0]), + pa.array(['foo', 'bar']) + ) + expected = pa.array( + ["foo", "bar", "bar", "foo"] + ).to_numpy(zero_copy_only=False) + np.testing.assert_array_equal(anonulls.to_numpy(zero_copy_only=False), + expected) + + with pytest.raises(pa.ArrowInvalid): + anonulls.to_numpy(zero_copy_only=True) + + afloat = pa.DictionaryArray.from_arrays( + pa.array([0, 1, 1, 0]), + pa.array([13.7, 11.0]) + ) + expected = pa.array([13.7, 11.0, 11.0, 13.7]).to_numpy() + np.testing.assert_array_equal(afloat.to_numpy(zero_copy_only=True), + expected) + np.testing.assert_array_equal(afloat.to_numpy(zero_copy_only=False), + expected) + + afloat2 = pa.DictionaryArray.from_arrays( + pa.array([0, 1, None, 0]), + pa.array([13.7, 11.0]) + ) + expected = pa.array( + [13.7, 11.0, None, 13.7] + ).to_numpy(zero_copy_only=False) + np.testing.assert_allclose( + afloat2.to_numpy(zero_copy_only=False), + expected, + equal_nan=True + ) + + # Testing for integers can reveal problems related to dealing + # with None values, as a numpy array of int dtype + # can't contain NaN nor None. + aints = pa.DictionaryArray.from_arrays( + pa.array([0, 1, None, 0]), + pa.array([7, 11]) + ) + expected = pa.array([7, 11, None, 7]).to_numpy(zero_copy_only=False) + np.testing.assert_allclose( + aints.to_numpy(zero_copy_only=False), + expected, + equal_nan=True + ) + + +def test_dictionary_from_boxed_arrays(): + indices = np.repeat([0, 1, 2], 2) + dictionary = np.array(['foo', 'bar', 'baz'], dtype=object) + + iarr = pa.array(indices) + darr = pa.array(dictionary) + + d1 = pa.DictionaryArray.from_arrays(iarr, darr) + + assert d1.indices.to_pylist() == indices.tolist() + assert d1.dictionary.to_pylist() == dictionary.tolist() + + for i in range(len(indices)): + assert d1[i].as_py() == dictionary[indices[i]] + + +def test_dictionary_from_arrays_boundscheck(): + indices1 = pa.array([0, 1, 2, 0, 1, 2]) + indices2 = pa.array([0, -1, 2]) + indices3 = pa.array([0, 1, 2, 3]) + + dictionary = pa.array(['foo', 'bar', 'baz']) + + # Works fine + pa.DictionaryArray.from_arrays(indices1, dictionary) + + with pytest.raises(pa.ArrowException): + pa.DictionaryArray.from_arrays(indices2, dictionary) + + with pytest.raises(pa.ArrowException): + pa.DictionaryArray.from_arrays(indices3, dictionary) + + # If we are confident that the indices are "safe" we can pass safe=False to + # disable the boundschecking + pa.DictionaryArray.from_arrays(indices2, dictionary, safe=False) + + +def test_dictionary_indices(): + # https://issues.apache.org/jira/browse/ARROW-6882 + indices = pa.array([0, 1, 2, 0, 1, 2]) + dictionary = pa.array(['foo', 'bar', 'baz']) + arr = pa.DictionaryArray.from_arrays(indices, dictionary) + arr.indices.validate(full=True) + + +@pytest.mark.parametrize(('list_array_type', 'list_type_factory'), + [(pa.ListArray, pa.list_), + (pa.LargeListArray, pa.large_list)]) +def test_list_from_arrays(list_array_type, list_type_factory): + offsets_arr = np.array([0, 2, 5, 8], dtype='i4') + offsets = pa.array(offsets_arr, type='int32') + pyvalues = [b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h'] + values = pa.array(pyvalues, type='binary') + + result = list_array_type.from_arrays(offsets, values) + expected = pa.array([pyvalues[:2], pyvalues[2:5], pyvalues[5:8]], + type=list_type_factory(pa.binary())) + + assert result.equals(expected) + + # With specified type + typ = list_type_factory(pa.field("name", pa.binary())) + result = list_array_type.from_arrays(offsets, values, typ) + assert result.type == typ + assert result.type.value_field.name == "name" + + # With nulls + offsets = [0, None, 2, 6] + values = [b'a', b'b', b'c', b'd', b'e', b'f'] + + result = list_array_type.from_arrays(offsets, values) + expected = pa.array([values[:2], None, values[2:]], + type=list_type_factory(pa.binary())) + + assert result.equals(expected) + + # Another edge case + offsets2 = [0, 2, None, 6] + result = list_array_type.from_arrays(offsets2, values) + expected = pa.array([values[:2], values[2:], None], + type=list_type_factory(pa.binary())) + assert result.equals(expected) + + # raise on invalid array + offsets = [1, 3, 10] + values = np.arange(5) + with pytest.raises(ValueError): + list_array_type.from_arrays(offsets, values) + + # Non-monotonic offsets + offsets = [0, 3, 2, 6] + values = list(range(6)) + result = list_array_type.from_arrays(offsets, values) + with pytest.raises(ValueError): + result.validate(full=True) + + # mismatching type + typ = list_type_factory(pa.binary()) + with pytest.raises(TypeError): + list_array_type.from_arrays(offsets, values, type=typ) + + +@pytest.mark.parametrize(('list_array_type', 'list_type_factory'), ( + (pa.ListArray, pa.list_), + (pa.LargeListArray, pa.large_list) +)) +@pytest.mark.parametrize("arr", ( + [None, [0]], + [None, [0, None], [0]], + [[0], [1]], +)) +def test_list_array_types_from_arrays( + list_array_type, list_type_factory, arr +): + arr = pa.array(arr, list_type_factory(pa.int8())) + reconstructed_arr = list_array_type.from_arrays( + arr.offsets, arr.values, mask=arr.is_null()) + assert arr == reconstructed_arr + + +@pytest.mark.parametrize(('list_array_type', 'list_type_factory'), ( + (pa.ListArray, pa.list_), + (pa.LargeListArray, pa.large_list) +)) +def test_list_array_types_from_arrays_fail(list_array_type, list_type_factory): + # Fail when manual offsets include nulls and mask passed + # ListArray.offsets doesn't report nulls. + + # This test case arr.offsets == [0, 1, 1, 3, 4] + arr = pa.array([[0], None, [0, None], [0]], list_type_factory(pa.int8())) + offsets = pa.array([0, None, 1, 3, 4]) + + # Using array's offset has no nulls; gives empty lists on top level + reconstructed_arr = list_array_type.from_arrays(arr.offsets, arr.values) + assert reconstructed_arr.to_pylist() == [[0], [], [0, None], [0]] + + # Manually specifying offsets (with nulls) is same as mask at top level + reconstructed_arr = list_array_type.from_arrays(offsets, arr.values) + assert arr == reconstructed_arr + reconstructed_arr = list_array_type.from_arrays(arr.offsets, + arr.values, + mask=arr.is_null()) + assert arr == reconstructed_arr + + # But using both is ambiguous, in this case `offsets` has nulls + with pytest.raises(ValueError, match="Ambiguous to specify both "): + list_array_type.from_arrays(offsets, arr.values, mask=arr.is_null()) + + # Not supported to reconstruct from a slice. + arr_slice = arr[1:] + msg = "Null bitmap with offsets slice not supported." + with pytest.raises(NotImplementedError, match=msg): + list_array_type.from_arrays( + arr_slice.offsets, arr_slice.values, mask=arr_slice.is_null()) + + +def test_map_labelled(): + # ARROW-13735 + t = pa.map_(pa.field("name", "string", nullable=False), "int64") + arr = pa.array([[('a', 1), ('b', 2)], [('c', 3)]], type=t) + assert arr.type.key_field == pa.field("name", pa.utf8(), nullable=False) + assert arr.type.item_field == pa.field("value", pa.int64()) + assert len(arr) == 2 + + +def test_map_from_dict(): + # ARROW-17832 + tup_arr = pa.array([[('a', 1), ('b', 2)], [('c', 3)]], + pa.map_(pa.string(), pa.int64())) + dict_arr = pa.array([{'a': 1, 'b': 2}, {'c': 3}], + pa.map_(pa.string(), pa.int64())) + + assert tup_arr.equals(dict_arr) + + +def test_map_from_arrays(): + offsets_arr = np.array([0, 2, 5, 8], dtype='i4') + offsets = pa.array(offsets_arr, type='int32') + pykeys = [b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h'] + pyitems = list(range(len(pykeys))) + pypairs = list(zip(pykeys, pyitems)) + pyentries = [pypairs[:2], pypairs[2:5], pypairs[5:8]] + keys = pa.array(pykeys, type='binary') + items = pa.array(pyitems, type='i4') + + result = pa.MapArray.from_arrays(offsets, keys, items) + expected = pa.array(pyentries, type=pa.map_(pa.binary(), pa.int32())) + + assert result.equals(expected) + + # With nulls + offsets = [0, None, 2, 6] + pykeys = [b'a', b'b', b'c', b'd', b'e', b'f'] + pyitems = [1, 2, 3, None, 4, 5] + pypairs = list(zip(pykeys, pyitems)) + pyentries = [pypairs[:2], None, pypairs[2:]] + keys = pa.array(pykeys, type='binary') + items = pa.array(pyitems, type='i4') + + result = pa.MapArray.from_arrays(offsets, keys, items) + expected = pa.array(pyentries, type=pa.map_(pa.binary(), pa.int32())) + + assert result.equals(expected) + + # pass in the type explicitly + result = pa.MapArray.from_arrays(offsets, keys, items, pa.map_( + keys.type, + items.type + )) + assert result.equals(expected) + + # pass in invalid types + with pytest.raises(pa.ArrowTypeError, match='Expected map type, got string'): + pa.MapArray.from_arrays(offsets, keys, items, pa.string()) + + with pytest.raises(pa.ArrowTypeError, match='Mismatching map items type'): + pa.MapArray.from_arrays(offsets, keys, items, pa.map_( + keys.type, + # Larger than the original i4 + pa.int64() + )) + + # check invalid usage + offsets = [0, 1, 3, 5] + keys = np.arange(5) + items = np.arange(5) + _ = pa.MapArray.from_arrays(offsets, keys, items) + + # raise on invalid offsets + with pytest.raises(ValueError): + pa.MapArray.from_arrays(offsets + [6], keys, items) + + # raise on length of keys != items + with pytest.raises(ValueError): + pa.MapArray.from_arrays(offsets, keys, np.concatenate([items, items])) + + # raise on keys with null + keys_with_null = list(keys)[:-1] + [None] + assert len(keys_with_null) == len(items) + with pytest.raises(ValueError): + pa.MapArray.from_arrays(offsets, keys_with_null, items) + + +def test_fixed_size_list_from_arrays(): + values = pa.array(range(12), pa.int64()) + result = pa.FixedSizeListArray.from_arrays(values, 4) + assert result.to_pylist() == [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] + assert result.type.equals(pa.list_(pa.int64(), 4)) + + typ = pa.list_(pa.field("name", pa.int64()), 4) + result = pa.FixedSizeListArray.from_arrays(values, type=typ) + assert result.to_pylist() == [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] + assert result.type.equals(typ) + assert result.type.value_field.name == "name" + + result = pa.FixedSizeListArray.from_arrays(values, + type=typ, + mask=pa.array([False, True, False])) + assert result.to_pylist() == [[0, 1, 2, 3], None, [8, 9, 10, 11]] + + result = pa.FixedSizeListArray.from_arrays(values, + list_size=4, + mask=pa.array([False, True, False])) + assert result.to_pylist() == [[0, 1, 2, 3], None, [8, 9, 10, 11]] + + # raise on invalid values / list_size + with pytest.raises(ValueError): + pa.FixedSizeListArray.from_arrays(values, -4) + + with pytest.raises(ValueError): + # array with list size 0 cannot be constructed with from_arrays + pa.FixedSizeListArray.from_arrays(pa.array([], pa.int64()), 0) + + with pytest.raises(ValueError): + # length of values not multiple of 5 + pa.FixedSizeListArray.from_arrays(values, 5) + + typ = pa.list_(pa.int64(), 5) + with pytest.raises(ValueError): + pa.FixedSizeListArray.from_arrays(values, type=typ) + + # raise on mismatching values type + typ = pa.list_(pa.float64(), 4) + with pytest.raises(TypeError): + pa.FixedSizeListArray.from_arrays(values, type=typ) + + # raise on specifying none or both of list_size / type + with pytest.raises(ValueError): + pa.FixedSizeListArray.from_arrays(values) + + typ = pa.list_(pa.int64(), 4) + with pytest.raises(ValueError): + pa.FixedSizeListArray.from_arrays(values, list_size=4, type=typ) + + +def test_variable_list_from_arrays(): + values = pa.array([1, 2, 3, 4], pa.int64()) + offsets = pa.array([0, 2, 4]) + result = pa.ListArray.from_arrays(offsets, values) + assert result.to_pylist() == [[1, 2], [3, 4]] + assert result.type.equals(pa.list_(pa.int64())) + + offsets = pa.array([0, None, 2, 4]) + result = pa.ListArray.from_arrays(offsets, values) + assert result.to_pylist() == [[1, 2], None, [3, 4]] + + # raise if offset out of bounds + with pytest.raises(ValueError): + pa.ListArray.from_arrays(pa.array([-1, 2, 4]), values) + + with pytest.raises(ValueError): + pa.ListArray.from_arrays(pa.array([0, 2, 5]), values) + + +def test_union_from_dense(): + binary = pa.array([b'a', b'b', b'c', b'd'], type='binary') + int64 = pa.array([1, 2, 3], type='int64') + types = pa.array([0, 1, 0, 0, 1, 1, 0], type='int8') + logical_types = pa.array([11, 13, 11, 11, 13, 13, 11], type='int8') + value_offsets = pa.array([0, 0, 1, 2, 1, 2, 3], type='int32') + py_value = [b'a', 1, b'b', b'c', 2, 3, b'd'] + + def check_result(result, expected_field_names, expected_type_codes, + expected_type_code_values): + result.validate(full=True) + actual_field_names = [result.type[i].name + for i in range(result.type.num_fields)] + assert actual_field_names == expected_field_names + assert result.type.mode == "dense" + assert result.type.type_codes == expected_type_codes + assert result.to_pylist() == py_value + assert expected_type_code_values.equals(result.type_codes) + assert value_offsets.equals(result.offsets) + assert result.field(0).equals(binary) + assert result.field(1).equals(int64) + with pytest.raises(KeyError): + result.field(-1) + with pytest.raises(KeyError): + result.field(2) + + # without field names and type codes + check_result(pa.UnionArray.from_dense(types, value_offsets, + [binary, int64]), + expected_field_names=['0', '1'], + expected_type_codes=[0, 1], + expected_type_code_values=types) + + # with field names + check_result(pa.UnionArray.from_dense(types, value_offsets, + [binary, int64], + ['bin', 'int']), + expected_field_names=['bin', 'int'], + expected_type_codes=[0, 1], + expected_type_code_values=types) + + # with type codes + check_result(pa.UnionArray.from_dense(logical_types, value_offsets, + [binary, int64], + type_codes=[11, 13]), + expected_field_names=['0', '1'], + expected_type_codes=[11, 13], + expected_type_code_values=logical_types) + + # with field names and type codes + check_result(pa.UnionArray.from_dense(logical_types, value_offsets, + [binary, int64], + ['bin', 'int'], [11, 13]), + expected_field_names=['bin', 'int'], + expected_type_codes=[11, 13], + expected_type_code_values=logical_types) + + # Bad type ids + arr = pa.UnionArray.from_dense(logical_types, value_offsets, + [binary, int64]) + with pytest.raises(pa.ArrowInvalid): + arr.validate(full=True) + arr = pa.UnionArray.from_dense(types, value_offsets, [binary, int64], + type_codes=[11, 13]) + with pytest.raises(pa.ArrowInvalid): + arr.validate(full=True) + + # Offset larger than child size + bad_offsets = pa.array([0, 0, 1, 2, 1, 2, 4], type='int32') + arr = pa.UnionArray.from_dense(types, bad_offsets, [binary, int64]) + with pytest.raises(pa.ArrowInvalid): + arr.validate(full=True) + + +def test_union_from_sparse(): + binary = pa.array([b'a', b' ', b'b', b'c', b' ', b' ', b'd'], + type='binary') + int64 = pa.array([0, 1, 0, 0, 2, 3, 0], type='int64') + types = pa.array([0, 1, 0, 0, 1, 1, 0], type='int8') + logical_types = pa.array([11, 13, 11, 11, 13, 13, 11], type='int8') + py_value = [b'a', 1, b'b', b'c', 2, 3, b'd'] + + def check_result(result, expected_field_names, expected_type_codes, + expected_type_code_values): + result.validate(full=True) + assert result.to_pylist() == py_value + actual_field_names = [result.type[i].name + for i in range(result.type.num_fields)] + assert actual_field_names == expected_field_names + assert result.type.mode == "sparse" + assert result.type.type_codes == expected_type_codes + assert expected_type_code_values.equals(result.type_codes) + assert result.field(0).equals(binary) + assert result.field(1).equals(int64) + with pytest.raises(pa.ArrowTypeError): + result.offsets + with pytest.raises(KeyError): + result.field(-1) + with pytest.raises(KeyError): + result.field(2) + + # without field names and type codes + check_result(pa.UnionArray.from_sparse(types, [binary, int64]), + expected_field_names=['0', '1'], + expected_type_codes=[0, 1], + expected_type_code_values=types) + + # with field names + check_result(pa.UnionArray.from_sparse(types, [binary, int64], + ['bin', 'int']), + expected_field_names=['bin', 'int'], + expected_type_codes=[0, 1], + expected_type_code_values=types) + + # with type codes + check_result(pa.UnionArray.from_sparse(logical_types, [binary, int64], + type_codes=[11, 13]), + expected_field_names=['0', '1'], + expected_type_codes=[11, 13], + expected_type_code_values=logical_types) + + # with field names and type codes + check_result(pa.UnionArray.from_sparse(logical_types, [binary, int64], + ['bin', 'int'], + [11, 13]), + expected_field_names=['bin', 'int'], + expected_type_codes=[11, 13], + expected_type_code_values=logical_types) + + # Bad type ids + arr = pa.UnionArray.from_sparse(logical_types, [binary, int64]) + with pytest.raises(pa.ArrowInvalid): + arr.validate(full=True) + arr = pa.UnionArray.from_sparse(types, [binary, int64], + type_codes=[11, 13]) + with pytest.raises(pa.ArrowInvalid): + arr.validate(full=True) + + # Invalid child length + with pytest.raises(pa.ArrowInvalid): + arr = pa.UnionArray.from_sparse(logical_types, [binary, int64[1:]]) + + +def test_union_array_to_pylist_with_nulls(): + # ARROW-9556 + arr = pa.UnionArray.from_sparse( + pa.array([0, 1, 0, 0, 1], type=pa.int8()), + [ + pa.array([0.0, 1.1, None, 3.3, 4.4]), + pa.array([True, None, False, True, False]), + ] + ) + assert arr.to_pylist() == [0.0, None, None, 3.3, False] + + arr = pa.UnionArray.from_dense( + pa.array([0, 1, 0, 0, 0, 1, 1], type=pa.int8()), + pa.array([0, 0, 1, 2, 3, 1, 2], type=pa.int32()), + [ + pa.array([0.0, 1.1, None, 3.3]), + pa.array([True, None, False]) + ] + ) + assert arr.to_pylist() == [0.0, True, 1.1, None, 3.3, None, False] + + +def test_union_array_slice(): + # ARROW-2314 + arr = pa.UnionArray.from_sparse(pa.array([0, 0, 1, 1], type=pa.int8()), + [pa.array(["a", "b", "c", "d"]), + pa.array([1, 2, 3, 4])]) + assert arr[1:].to_pylist() == ["b", 3, 4] + + binary = pa.array([b'a', b'b', b'c', b'd'], type='binary') + int64 = pa.array([1, 2, 3], type='int64') + types = pa.array([0, 1, 0, 0, 1, 1, 0], type='int8') + value_offsets = pa.array([0, 0, 2, 1, 1, 2, 3], type='int32') + + arr = pa.UnionArray.from_dense(types, value_offsets, [binary, int64]) + lst = arr.to_pylist() + for i in range(len(arr)): + for j in range(i, len(arr)): + assert arr[i:j].to_pylist() == lst[i:j] + + +def _check_cast_case(case, *, safe=True, check_array_construction=True): + in_data, in_type, out_data, out_type = case + if isinstance(out_data, pa.Array): + assert out_data.type == out_type + expected = out_data + else: + expected = pa.array(out_data, type=out_type) + + # check casting an already created array + if isinstance(in_data, pa.Array): + assert in_data.type == in_type + in_arr = in_data + else: + in_arr = pa.array(in_data, type=in_type) + casted = in_arr.cast(out_type, safe=safe) + casted.validate(full=True) + assert casted.equals(expected) + + # constructing an array with out type which optionally involves casting + # for more see ARROW-1949 + if check_array_construction: + in_arr = pa.array(in_data, type=out_type, safe=safe) + assert in_arr.equals(expected) + + +def test_cast_integers_safe(): + safe_cases = [ + (np.array([0, 1, 2, 3], dtype='i1'), 'int8', + np.array([0, 1, 2, 3], dtype='i4'), pa.int32()), + (np.array([0, 1, 2, 3], dtype='i1'), 'int8', + np.array([0, 1, 2, 3], dtype='u4'), pa.uint16()), + (np.array([0, 1, 2, 3], dtype='i1'), 'int8', + np.array([0, 1, 2, 3], dtype='u1'), pa.uint8()), + (np.array([0, 1, 2, 3], dtype='i1'), 'int8', + np.array([0, 1, 2, 3], dtype='f8'), pa.float64()) + ] + + for case in safe_cases: + _check_cast_case(case) + + unsafe_cases = [ + (np.array([50000], dtype='i4'), 'int32', 'int16'), + (np.array([70000], dtype='i4'), 'int32', 'uint16'), + (np.array([-1], dtype='i4'), 'int32', 'uint16'), + (np.array([50000], dtype='u2'), 'uint16', 'int16') + ] + for in_data, in_type, out_type in unsafe_cases: + in_arr = pa.array(in_data, type=in_type) + + with pytest.raises(pa.ArrowInvalid): + in_arr.cast(out_type) + + +def test_cast_none(): + # ARROW-3735: Ensure that calling cast(None) doesn't segfault. + arr = pa.array([1, 2, 3]) + + with pytest.raises(TypeError): + arr.cast(None) + + +def test_cast_list_to_primitive(): + # ARROW-8070: cast segfaults on unsupported cast from list to utf8 + arr = pa.array([[1, 2], [3, 4]]) + with pytest.raises(NotImplementedError): + arr.cast(pa.int8()) + + arr = pa.array([[b"a", b"b"], [b"c"]], pa.list_(pa.binary())) + with pytest.raises(NotImplementedError): + arr.cast(pa.binary()) + + +def test_slice_chunked_array_zero_chunks(): + # ARROW-8911 + arr = pa.chunked_array([], type='int8') + assert arr.num_chunks == 0 + + result = arr[:] + assert result.equals(arr) + + # Do not crash + arr[:5] + + +def test_cast_chunked_array(): + arrays = [pa.array([1, 2, 3]), pa.array([4, 5, 6])] + carr = pa.chunked_array(arrays) + + target = pa.float64() + casted = carr.cast(target) + expected = pa.chunked_array([x.cast(target) for x in arrays]) + assert casted.equals(expected) + + +def test_cast_chunked_array_empty(): + # ARROW-8142 + for typ1, typ2 in [(pa.dictionary(pa.int8(), pa.string()), pa.string()), + (pa.int64(), pa.int32())]: + + arr = pa.chunked_array([], type=typ1) + result = arr.cast(typ2) + expected = pa.chunked_array([], type=typ2) + assert result.equals(expected) + + +def test_chunked_array_data_warns(): + with pytest.warns(FutureWarning): + res = pa.chunked_array([[]]).data + assert isinstance(res, pa.ChunkedArray) + + +def test_cast_integers_unsafe(): + # We let NumPy do the unsafe casting. + # Note that NEP50 in the NumPy spec no longer allows + # the np.array() constructor to pass the dtype directly + # if it results in an unsafe cast. + unsafe_cases = [ + (np.array([50000], dtype='i4'), 'int32', + np.array([50000]).astype(dtype='i2'), pa.int16()), + (np.array([70000], dtype='i4'), 'int32', + np.array([70000]).astype(dtype='u2'), pa.uint16()), + (np.array([-1], dtype='i4'), 'int32', + np.array([-1]).astype(dtype='u2'), pa.uint16()), + (np.array([50000], dtype='u2'), pa.uint16(), + np.array([50000]).astype(dtype='i2'), pa.int16()) + ] + + for case in unsafe_cases: + _check_cast_case(case, safe=False) + + +def test_floating_point_truncate_safe(): + safe_cases = [ + (np.array([1.0, 2.0, 3.0], dtype='float32'), 'float32', + np.array([1, 2, 3], dtype='i4'), pa.int32()), + (np.array([1.0, 2.0, 3.0], dtype='float64'), 'float64', + np.array([1, 2, 3], dtype='i4'), pa.int32()), + (np.array([-10.0, 20.0, -30.0], dtype='float64'), 'float64', + np.array([-10, 20, -30], dtype='i4'), pa.int32()), + ] + for case in safe_cases: + _check_cast_case(case, safe=True) + + +def test_floating_point_truncate_unsafe(): + unsafe_cases = [ + (np.array([1.1, 2.2, 3.3], dtype='float32'), 'float32', + np.array([1, 2, 3], dtype='i4'), pa.int32()), + (np.array([1.1, 2.2, 3.3], dtype='float64'), 'float64', + np.array([1, 2, 3], dtype='i4'), pa.int32()), + (np.array([-10.1, 20.2, -30.3], dtype='float64'), 'float64', + np.array([-10, 20, -30], dtype='i4'), pa.int32()), + ] + for case in unsafe_cases: + # test safe casting raises + with pytest.raises(pa.ArrowInvalid, match='truncated'): + _check_cast_case(case, safe=True) + + # test unsafe casting truncates + _check_cast_case(case, safe=False) + + +def test_decimal_to_int_safe(): + safe_cases = [ + ( + [decimal.Decimal("123456"), None, decimal.Decimal("-912345")], + pa.decimal128(32, 5), + [123456, None, -912345], + pa.int32() + ), + ( + [decimal.Decimal("1234"), None, decimal.Decimal("-9123")], + pa.decimal128(19, 10), + [1234, None, -9123], + pa.int16() + ), + ( + [decimal.Decimal("123"), None, decimal.Decimal("-91")], + pa.decimal128(19, 10), + [123, None, -91], + pa.int8() + ), + ] + for case in safe_cases: + _check_cast_case(case) + _check_cast_case(case, safe=True) + + +def test_decimal_to_int_value_out_of_bounds(): + out_of_bounds_cases = [ + ( + np.array([ + decimal.Decimal("1234567890123"), + None, + decimal.Decimal("-912345678901234") + ]), + pa.decimal128(32, 5), + [1912276171, None, -135950322], + pa.int32() + ), + ( + [decimal.Decimal("123456"), None, decimal.Decimal("-912345678")], + pa.decimal128(32, 5), + [-7616, None, -19022], + pa.int16() + ), + ( + [decimal.Decimal("1234"), None, decimal.Decimal("-9123")], + pa.decimal128(32, 5), + [-46, None, 93], + pa.int8() + ), + ] + + for case in out_of_bounds_cases: + # test safe casting raises + with pytest.raises(pa.ArrowInvalid, + match='Integer value out of bounds'): + _check_cast_case(case) + + # XXX `safe=False` can be ignored when constructing an array + # from a sequence of Python objects (ARROW-8567) + _check_cast_case(case, safe=False, check_array_construction=False) + + +def test_decimal_to_int_non_integer(): + non_integer_cases = [ + ( + [ + decimal.Decimal("123456.21"), + None, + decimal.Decimal("-912345.13") + ], + pa.decimal128(32, 5), + [123456, None, -912345], + pa.int32() + ), + ( + [decimal.Decimal("1234.134"), None, decimal.Decimal("-9123.1")], + pa.decimal128(19, 10), + [1234, None, -9123], + pa.int16() + ), + ( + [decimal.Decimal("123.1451"), None, decimal.Decimal("-91.21")], + pa.decimal128(19, 10), + [123, None, -91], + pa.int8() + ), + ] + + for case in non_integer_cases: + # test safe casting raises + msg_regexp = 'Rescaling Decimal128 value would cause data loss' + with pytest.raises(pa.ArrowInvalid, match=msg_regexp): + _check_cast_case(case) + + _check_cast_case(case, safe=False) + + +def test_decimal_to_decimal(): + arr = pa.array( + [decimal.Decimal("1234.12"), None], + type=pa.decimal128(19, 10) + ) + result = arr.cast(pa.decimal128(15, 6)) + expected = pa.array( + [decimal.Decimal("1234.12"), None], + type=pa.decimal128(15, 6) + ) + assert result.equals(expected) + + msg_regexp = 'Rescaling Decimal128 value would cause data loss' + with pytest.raises(pa.ArrowInvalid, match=msg_regexp): + result = arr.cast(pa.decimal128(9, 1)) + + result = arr.cast(pa.decimal128(9, 1), safe=False) + expected = pa.array( + [decimal.Decimal("1234.1"), None], + type=pa.decimal128(9, 1) + ) + assert result.equals(expected) + + with pytest.raises(pa.ArrowInvalid, + match='Decimal value does not fit in precision'): + result = arr.cast(pa.decimal128(5, 2)) + + +def test_safe_cast_nan_to_int_raises(): + arr = pa.array([np.nan, 1.]) + + with pytest.raises(pa.ArrowInvalid, match='truncated'): + arr.cast(pa.int64(), safe=True) + + +def test_cast_signed_to_unsigned(): + safe_cases = [ + (np.array([0, 1, 2, 3], dtype='i1'), pa.uint8(), + np.array([0, 1, 2, 3], dtype='u1'), pa.uint8()), + (np.array([0, 1, 2, 3], dtype='i2'), pa.uint16(), + np.array([0, 1, 2, 3], dtype='u2'), pa.uint16()) + ] + + for case in safe_cases: + _check_cast_case(case) + + +def test_cast_from_null(): + in_data = [None] * 3 + in_type = pa.null() + out_types = [ + pa.null(), + pa.uint8(), + pa.float16(), + pa.utf8(), + pa.binary(), + pa.binary(10), + pa.list_(pa.int16()), + pa.list_(pa.int32(), 4), + pa.large_list(pa.uint8()), + pa.decimal128(19, 4), + pa.timestamp('us'), + pa.timestamp('us', tz='UTC'), + pa.timestamp('us', tz='Europe/Paris'), + pa.duration('us'), + pa.month_day_nano_interval(), + pa.struct([pa.field('a', pa.int32()), + pa.field('b', pa.list_(pa.int8())), + pa.field('c', pa.string())]), + pa.dictionary(pa.int32(), pa.string()), + ] + for out_type in out_types: + _check_cast_case((in_data, in_type, in_data, out_type)) + + out_types = [ + + pa.union([pa.field('a', pa.binary(10)), + pa.field('b', pa.string())], mode=pa.lib.UnionMode_DENSE), + pa.union([pa.field('a', pa.binary(10)), + pa.field('b', pa.string())], mode=pa.lib.UnionMode_SPARSE), + ] + in_arr = pa.array(in_data, type=pa.null()) + for out_type in out_types: + with pytest.raises(NotImplementedError): + in_arr.cast(out_type) + + +def test_cast_string_to_number_roundtrip(): + cases = [ + (pa.array(["1", "127", "-128"]), + pa.array([1, 127, -128], type=pa.int8())), + (pa.array([None, "18446744073709551615"]), + pa.array([None, 18446744073709551615], type=pa.uint64())), + ] + for in_arr, expected in cases: + casted = in_arr.cast(expected.type, safe=True) + casted.validate(full=True) + assert casted.equals(expected) + casted_back = casted.cast(in_arr.type, safe=True) + casted_back.validate(full=True) + assert casted_back.equals(in_arr) + + +def test_cast_dictionary(): + # cast to the value type + arr = pa.array( + ["foo", "bar", None], + type=pa.dictionary(pa.int64(), pa.string()) + ) + expected = pa.array(["foo", "bar", None]) + assert arr.type == pa.dictionary(pa.int64(), pa.string()) + assert arr.cast(pa.string()) == expected + + # cast to a different key type + for key_type in [pa.int8(), pa.int16(), pa.int32()]: + typ = pa.dictionary(key_type, pa.string()) + expected = pa.array( + ["foo", "bar", None], + type=pa.dictionary(key_type, pa.string()) + ) + assert arr.cast(typ) == expected + + # shouldn't crash (ARROW-7077) + with pytest.raises(pa.ArrowInvalid): + arr.cast(pa.int32()) + + +def test_view(): + # ARROW-5992 + arr = pa.array(['foo', 'bar', 'baz'], type=pa.utf8()) + expected = pa.array(['foo', 'bar', 'baz'], type=pa.binary()) + + assert arr.view(pa.binary()).equals(expected) + assert arr.view('binary').equals(expected) + + +def test_unique_simple(): + cases = [ + (pa.array([1, 2, 3, 1, 2, 3]), pa.array([1, 2, 3])), + (pa.array(['foo', None, 'bar', 'foo']), + pa.array(['foo', None, 'bar'])), + (pa.array(['foo', None, 'bar', 'foo'], pa.large_binary()), + pa.array(['foo', None, 'bar'], pa.large_binary())), + ] + for arr, expected in cases: + result = arr.unique() + assert result.equals(expected) + result = pa.chunked_array([arr]).unique() + assert result.equals(expected) + + +def test_value_counts_simple(): + cases = [ + (pa.array([1, 2, 3, 1, 2, 3]), + pa.array([1, 2, 3]), + pa.array([2, 2, 2], type=pa.int64())), + (pa.array(['foo', None, 'bar', 'foo']), + pa.array(['foo', None, 'bar']), + pa.array([2, 1, 1], type=pa.int64())), + (pa.array(['foo', None, 'bar', 'foo'], pa.large_binary()), + pa.array(['foo', None, 'bar'], pa.large_binary()), + pa.array([2, 1, 1], type=pa.int64())), + ] + for arr, expected_values, expected_counts in cases: + for arr_in in (arr, pa.chunked_array([arr])): + result = arr_in.value_counts() + assert result.type.equals( + pa.struct([pa.field("values", arr.type), + pa.field("counts", pa.int64())])) + assert result.field("values").equals(expected_values) + assert result.field("counts").equals(expected_counts) + + +def test_unique_value_counts_dictionary_type(): + indices = pa.array([3, 0, 0, 0, 1, 1, 3, 0, 1, 3, 0, 1]) + dictionary = pa.array(['foo', 'bar', 'baz', 'qux']) + + arr = pa.DictionaryArray.from_arrays(indices, dictionary) + + unique_result = arr.unique() + expected = pa.DictionaryArray.from_arrays(indices.unique(), dictionary) + assert unique_result.equals(expected) + + result = arr.value_counts() + assert result.field('values').equals(unique_result) + assert result.field('counts').equals(pa.array([3, 5, 4], type='int64')) + + arr = pa.DictionaryArray.from_arrays( + pa.array([], type='int64'), dictionary) + unique_result = arr.unique() + expected = pa.DictionaryArray.from_arrays(pa.array([], type='int64'), + pa.array([], type='utf8')) + assert unique_result.equals(expected) + + result = arr.value_counts() + assert result.field('values').equals(unique_result) + assert result.field('counts').equals(pa.array([], type='int64')) + + +def test_dictionary_encode_simple(): + cases = [ + (pa.array([1, 2, 3, None, 1, 2, 3]), + pa.DictionaryArray.from_arrays( + pa.array([0, 1, 2, None, 0, 1, 2], type='int32'), + [1, 2, 3])), + (pa.array(['foo', None, 'bar', 'foo']), + pa.DictionaryArray.from_arrays( + pa.array([0, None, 1, 0], type='int32'), + ['foo', 'bar'])), + (pa.array(['foo', None, 'bar', 'foo'], type=pa.large_binary()), + pa.DictionaryArray.from_arrays( + pa.array([0, None, 1, 0], type='int32'), + pa.array(['foo', 'bar'], type=pa.large_binary()))), + ] + for arr, expected in cases: + result = arr.dictionary_encode() + assert result.equals(expected) + result = pa.chunked_array([arr]).dictionary_encode() + assert result.num_chunks == 1 + assert result.chunk(0).equals(expected) + result = pa.chunked_array([], type=arr.type).dictionary_encode() + assert result.num_chunks == 0 + assert result.type == expected.type + + +def test_dictionary_encode_sliced(): + cases = [ + (pa.array([1, 2, 3, None, 1, 2, 3])[1:-1], + pa.DictionaryArray.from_arrays( + pa.array([0, 1, None, 2, 0], type='int32'), + [2, 3, 1])), + (pa.array([None, 'foo', 'bar', 'foo', 'xyzzy'])[1:-1], + pa.DictionaryArray.from_arrays( + pa.array([0, 1, 0], type='int32'), + ['foo', 'bar'])), + (pa.array([None, 'foo', 'bar', 'foo', 'xyzzy'], + type=pa.large_string())[1:-1], + pa.DictionaryArray.from_arrays( + pa.array([0, 1, 0], type='int32'), + pa.array(['foo', 'bar'], type=pa.large_string()))), + ] + for arr, expected in cases: + result = arr.dictionary_encode() + assert result.equals(expected) + result = pa.chunked_array([arr]).dictionary_encode() + assert result.num_chunks == 1 + assert result.type == expected.type + assert result.chunk(0).equals(expected) + result = pa.chunked_array([], type=arr.type).dictionary_encode() + assert result.num_chunks == 0 + assert result.type == expected.type + + # ARROW-9143 dictionary_encode after slice was segfaulting + array = pa.array(['foo', 'bar', 'baz']) + array.slice(1).dictionary_encode() + + +def test_dictionary_encode_zero_length(): + # User-facing experience of ARROW-7008 + arr = pa.array([], type=pa.string()) + encoded = arr.dictionary_encode() + assert len(encoded.dictionary) == 0 + encoded.validate(full=True) + + +def test_dictionary_decode(): + cases = [ + (pa.array([1, 2, 3, None, 1, 2, 3]), + pa.DictionaryArray.from_arrays( + pa.array([0, 1, 2, None, 0, 1, 2], type='int32'), + [1, 2, 3])), + (pa.array(['foo', None, 'bar', 'foo']), + pa.DictionaryArray.from_arrays( + pa.array([0, None, 1, 0], type='int32'), + ['foo', 'bar'])), + (pa.array(['foo', None, 'bar', 'foo'], type=pa.large_binary()), + pa.DictionaryArray.from_arrays( + pa.array([0, None, 1, 0], type='int32'), + pa.array(['foo', 'bar'], type=pa.large_binary()))), + ] + for expected, arr in cases: + result = arr.dictionary_decode() + assert result.equals(expected) + + +def test_cast_time32_to_int(): + arr = pa.array(np.array([0, 1, 2], dtype='int32'), + type=pa.time32('s')) + expected = pa.array([0, 1, 2], type='i4') + + result = arr.cast('i4') + assert result.equals(expected) + + +def test_cast_time64_to_int(): + arr = pa.array(np.array([0, 1, 2], dtype='int64'), + type=pa.time64('us')) + expected = pa.array([0, 1, 2], type='i8') + + result = arr.cast('i8') + assert result.equals(expected) + + +def test_cast_timestamp_to_int(): + arr = pa.array(np.array([0, 1, 2], dtype='int64'), + type=pa.timestamp('us')) + expected = pa.array([0, 1, 2], type='i8') + + result = arr.cast('i8') + assert result.equals(expected) + + +def test_cast_date32_to_int(): + arr = pa.array([0, 1, 2], type='i4') + + result1 = arr.cast('date32') + result2 = result1.cast('i4') + + expected1 = pa.array([ + datetime.date(1970, 1, 1), + datetime.date(1970, 1, 2), + datetime.date(1970, 1, 3) + ]).cast('date32') + + assert result1.equals(expected1) + assert result2.equals(arr) + + +def test_cast_duration_to_int(): + arr = pa.array(np.array([0, 1, 2], dtype='int64'), + type=pa.duration('us')) + expected = pa.array([0, 1, 2], type='i8') + + result = arr.cast('i8') + assert result.equals(expected) + + +def test_cast_binary_to_utf8(): + binary_arr = pa.array([b'foo', b'bar', b'baz'], type=pa.binary()) + utf8_arr = binary_arr.cast(pa.utf8()) + expected = pa.array(['foo', 'bar', 'baz'], type=pa.utf8()) + + assert utf8_arr.equals(expected) + + non_utf8_values = [('mañana').encode('utf-16-le')] + non_utf8_binary = pa.array(non_utf8_values) + assert non_utf8_binary.type == pa.binary() + with pytest.raises(ValueError): + non_utf8_binary.cast(pa.string()) + + non_utf8_all_null = pa.array(non_utf8_values, mask=np.array([True]), + type=pa.binary()) + # No error + casted = non_utf8_all_null.cast(pa.string()) + assert casted.null_count == 1 + + +def test_cast_date64_to_int(): + arr = pa.array(np.array([0, 1, 2], dtype='int64'), + type=pa.date64()) + expected = pa.array([0, 1, 2], type='i8') + + result = arr.cast('i8') + + assert result.equals(expected) + + +def test_date64_from_builtin_datetime(): + val1 = datetime.datetime(2000, 1, 1, 12, 34, 56, 123456) + val2 = datetime.datetime(2000, 1, 1) + result = pa.array([val1, val2], type='date64') + result2 = pa.array([val1.date(), val2.date()], type='date64') + + assert result.equals(result2) + + as_i8 = result.view('int64') + assert as_i8[0].as_py() == as_i8[1].as_py() + + +@pytest.mark.parametrize(('ty', 'values'), [ + ('bool', [True, False, True]), + ('uint8', range(0, 255)), + ('int8', range(0, 128)), + ('uint16', range(0, 10)), + ('int16', range(0, 10)), + ('uint32', range(0, 10)), + ('int32', range(0, 10)), + ('uint64', range(0, 10)), + ('int64', range(0, 10)), + ('float', [0.0, 0.1, 0.2]), + ('double', [0.0, 0.1, 0.2]), + ('string', ['a', 'b', 'c']), + ('binary', [b'a', b'b', b'c']), + (pa.binary(3), [b'abc', b'bcd', b'cde']) +]) +def test_cast_identities(ty, values): + arr = pa.array(values, type=ty) + assert arr.cast(ty).equals(arr) + + +pickle_test_parametrize = pytest.mark.parametrize( + ('data', 'typ'), + [ + ([True, False, True, True], pa.bool_()), + ([1, 2, 4, 6], pa.int64()), + ([1.0, 2.5, None], pa.float64()), + (['a', None, 'b'], pa.string()), + ([], None), + ([[1, 2], [3]], pa.list_(pa.int64())), + ([[4, 5], [6]], pa.large_list(pa.int16())), + ([['a'], None, ['b', 'c']], pa.list_(pa.string())), + ([[1, 2], [3]], pa.list_view(pa.int64())), + ([[4, 5], [6]], pa.large_list_view(pa.int16())), + ([['a'], None, ['b', 'c']], pa.list_view(pa.string())), + ([(1, 'a'), (2, 'c'), None], + pa.struct([pa.field('a', pa.int64()), pa.field('b', pa.string())])) + ] +) + + +@pickle_test_parametrize +def test_array_pickle(data, typ, pickle_module): + # Allocate here so that we don't have any Arrow data allocated. + # This is needed to ensure that allocator tests can be reliable. + array = pa.array(data, type=typ) + for proto in range(0, pickle_module.HIGHEST_PROTOCOL + 1): + result = pickle_module.loads(pickle_module.dumps(array, proto)) + assert array.equals(result) + + +def test_array_pickle_dictionary(pickle_module): + # not included in the above as dictionary array cannot be created with + # the pa.array function + array = pa.DictionaryArray.from_arrays([0, 1, 2, 0, 1], ['a', 'b', 'c']) + for proto in range(0, pickle_module.HIGHEST_PROTOCOL + 1): + result = pickle_module.loads(pickle_module.dumps(array, proto)) + assert array.equals(result) + + +@h.settings(suppress_health_check=(h.HealthCheck.too_slow,)) +@h.given( + past.arrays( + past.all_types, + size=st.integers(min_value=0, max_value=10) + ) +) +def test_pickling(pickle_module, arr): + data = pickle_module.dumps(arr) + restored = pickle_module.loads(data) + assert arr.equals(restored) + + +@pickle_test_parametrize +def test_array_pickle_protocol5(data, typ, pickle_module): + # Test zero-copy pickling with protocol 5 (PEP 574) + array = pa.array(data, type=typ) + addresses = [buf.address if buf is not None else 0 + for buf in array.buffers()] + + for proto in range(5, pickle_module.HIGHEST_PROTOCOL + 1): + buffers = [] + pickled = pickle_module.dumps(array, proto, buffer_callback=buffers.append) + result = pickle_module.loads(pickled, buffers=buffers) + assert array.equals(result) + + result_addresses = [buf.address if buf is not None else 0 + for buf in result.buffers()] + assert result_addresses == addresses + + +@pytest.mark.parametrize( + 'narr', + [ + np.arange(10, dtype=np.int64), + np.arange(10, dtype=np.int32), + np.arange(10, dtype=np.int16), + np.arange(10, dtype=np.int8), + np.arange(10, dtype=np.uint64), + np.arange(10, dtype=np.uint32), + np.arange(10, dtype=np.uint16), + np.arange(10, dtype=np.uint8), + np.arange(10, dtype=np.float64), + np.arange(10, dtype=np.float32), + np.arange(10, dtype=np.float16), + ] +) +def test_to_numpy_roundtrip(narr): + arr = pa.array(narr) + assert narr.dtype == arr.to_numpy().dtype + np.testing.assert_array_equal(narr, arr.to_numpy()) + np.testing.assert_array_equal(narr[:6], arr[:6].to_numpy()) + np.testing.assert_array_equal(narr[2:], arr[2:].to_numpy()) + np.testing.assert_array_equal(narr[2:6], arr[2:6].to_numpy()) + + +def test_array_uint64_from_py_over_range(): + arr = pa.array([2 ** 63], type=pa.uint64()) + expected = pa.array(np.array([2 ** 63], dtype='u8')) + assert arr.equals(expected) + + +def test_array_conversions_no_sentinel_values(): + arr = np.array([1, 2, 3, 4], dtype='int8') + refcount = sys.getrefcount(arr) + arr2 = pa.array(arr) # noqa + assert sys.getrefcount(arr) == (refcount + 1) + + assert arr2.type == 'int8' + + arr3 = pa.array(np.array([1, np.nan, 2, 3, np.nan, 4], dtype='float32'), + type='float32') + assert arr3.type == 'float32' + assert arr3.null_count == 0 + + +def test_time32_time64_from_integer(): + # ARROW-4111 + result = pa.array([1, 2, None], type=pa.time32('s')) + expected = pa.array([datetime.time(second=1), + datetime.time(second=2), None], + type=pa.time32('s')) + assert result.equals(expected) + + result = pa.array([1, 2, None], type=pa.time32('ms')) + expected = pa.array([datetime.time(microsecond=1000), + datetime.time(microsecond=2000), None], + type=pa.time32('ms')) + assert result.equals(expected) + + result = pa.array([1, 2, None], type=pa.time64('us')) + expected = pa.array([datetime.time(microsecond=1), + datetime.time(microsecond=2), None], + type=pa.time64('us')) + assert result.equals(expected) + + result = pa.array([1000, 2000, None], type=pa.time64('ns')) + expected = pa.array([datetime.time(microsecond=1), + datetime.time(microsecond=2), None], + type=pa.time64('ns')) + assert result.equals(expected) + + +def test_binary_string_pandas_null_sentinels(): + # ARROW-6227 + def _check_case(ty): + arr = pa.array(['string', np.nan], type=ty, from_pandas=True) + expected = pa.array(['string', None], type=ty) + assert arr.equals(expected) + _check_case('binary') + _check_case('utf8') + + +def test_pandas_null_sentinels_raise_error(): + # ARROW-6227 + cases = [ + ([None, np.nan], 'null'), + (['string', np.nan], 'binary'), + (['string', np.nan], 'utf8'), + (['string', np.nan], 'large_binary'), + (['string', np.nan], 'large_utf8'), + ([b'string', np.nan], pa.binary(6)), + ([True, np.nan], pa.bool_()), + ([decimal.Decimal('0'), np.nan], pa.decimal128(12, 2)), + ([0, np.nan], pa.date32()), + ([0, np.nan], pa.date32()), + ([0, np.nan], pa.date64()), + ([0, np.nan], pa.time32('s')), + ([0, np.nan], pa.time64('us')), + ([0, np.nan], pa.timestamp('us')), + ([0, np.nan], pa.duration('us')), + ] + for case, ty in cases: + # Both types of exceptions are raised. May want to clean that up + with pytest.raises((ValueError, TypeError)): + pa.array(case, type=ty) + + # from_pandas option suppresses failure + result = pa.array(case, type=ty, from_pandas=True) + assert result.null_count == (1 if ty != 'null' else 2) + + +@pytest.mark.pandas +def test_pandas_null_sentinels_index(): + # ARROW-7023 - ensure that when passing a pandas Index, "from_pandas" + # semantics are used + import pandas as pd + idx = pd.Index([1, 2, np.nan], dtype=object) + result = pa.array(idx) + expected = pa.array([1, 2, np.nan], from_pandas=True) + assert result.equals(expected) + + +def test_array_roundtrip_from_numpy_datetimeD(): + arr = np.array([None, datetime.date(2017, 4, 4)], dtype='datetime64[D]') + + result = pa.array(arr) + expected = pa.array([None, datetime.date(2017, 4, 4)], type=pa.date32()) + assert result.equals(expected) + result = result.to_numpy(zero_copy_only=False) + np.testing.assert_array_equal(result, arr) + assert result.dtype == arr.dtype + + +def test_array_from_naive_datetimes(): + arr = pa.array([ + None, + datetime.datetime(2017, 4, 4, 12, 11, 10), + datetime.datetime(2018, 1, 1, 0, 2, 0) + ]) + assert arr.type == pa.timestamp('us', tz=None) + + +@pytest.mark.parametrize(('dtype', 'type'), [ + ('datetime64[s]', pa.timestamp('s')), + ('datetime64[ms]', pa.timestamp('ms')), + ('datetime64[us]', pa.timestamp('us')), + ('datetime64[ns]', pa.timestamp('ns')) +]) +def test_array_from_numpy_datetime(dtype, type): + data = [ + None, + datetime.datetime(2017, 4, 4, 12, 11, 10), + datetime.datetime(2018, 1, 1, 0, 2, 0) + ] + + # from numpy array + arr = pa.array(np.array(data, dtype=dtype)) + expected = pa.array(data, type=type) + assert arr.equals(expected) + + # from list of numpy scalars + arr = pa.array(list(np.array(data, dtype=dtype))) + assert arr.equals(expected) + + +def test_array_from_different_numpy_datetime_units_raises(): + data = [ + None, + datetime.datetime(2017, 4, 4, 12, 11, 10), + datetime.datetime(2018, 1, 1, 0, 2, 0) + ] + s = np.array(data, dtype='datetime64[s]') + ms = np.array(data, dtype='datetime64[ms]') + data = list(s[:2]) + list(ms[2:]) + + with pytest.raises(pa.ArrowNotImplementedError): + pa.array(data) + + +@pytest.mark.parametrize('unit', ['ns', 'us', 'ms', 's']) +def test_array_from_list_of_timestamps(unit): + n = np.datetime64('NaT', unit) + x = np.datetime64('2017-01-01 01:01:01.111111111', unit) + y = np.datetime64('2018-11-22 12:24:48.111111111', unit) + + a1 = pa.array([n, x, y]) + a2 = pa.array([n, x, y], type=pa.timestamp(unit)) + + assert a1.type == a2.type + assert a1.type.unit == unit + assert a1[0] == a2[0] + + +def test_array_from_timestamp_with_generic_unit(): + n = np.datetime64('NaT') + x = np.datetime64('2017-01-01 01:01:01.111111111') + y = np.datetime64('2018-11-22 12:24:48.111111111') + + with pytest.raises(pa.ArrowNotImplementedError, + match='Unbound or generic datetime64 time unit'): + pa.array([n, x, y]) + + +@pytest.mark.parametrize(('dtype', 'type'), [ + ('timedelta64[s]', pa.duration('s')), + ('timedelta64[ms]', pa.duration('ms')), + ('timedelta64[us]', pa.duration('us')), + ('timedelta64[ns]', pa.duration('ns')) +]) +def test_array_from_numpy_timedelta(dtype, type): + data = [ + None, + datetime.timedelta(1), + datetime.timedelta(0, 1) + ] + + # from numpy array + np_arr = np.array(data, dtype=dtype) + arr = pa.array(np_arr) + assert isinstance(arr, pa.DurationArray) + assert arr.type == type + expected = pa.array(data, type=type) + assert arr.equals(expected) + assert arr.to_pylist() == data + + # from list of numpy scalars + arr = pa.array(list(np.array(data, dtype=dtype))) + assert arr.equals(expected) + assert arr.to_pylist() == data + + +def test_array_from_numpy_timedelta_incorrect_unit(): + # generic (no unit) + td = np.timedelta64(1) + + for data in [[td], np.array([td])]: + with pytest.raises(NotImplementedError): + pa.array(data) + + # unsupported unit + td = np.timedelta64(1, 'M') + for data in [[td], np.array([td])]: + with pytest.raises(NotImplementedError): + pa.array(data) + + +def test_array_from_numpy_ascii(): + arr = np.array(['abcde', 'abc', ''], dtype='|S5') + + arrow_arr = pa.array(arr) + assert arrow_arr.type == 'binary' + expected = pa.array(['abcde', 'abc', ''], type='binary') + assert arrow_arr.equals(expected) + + mask = np.array([False, True, False]) + arrow_arr = pa.array(arr, mask=mask) + expected = pa.array(['abcde', None, ''], type='binary') + assert arrow_arr.equals(expected) + + # Strided variant + arr = np.array(['abcde', 'abc', ''] * 5, dtype='|S5')[::2] + mask = np.array([False, True, False] * 5)[::2] + arrow_arr = pa.array(arr, mask=mask) + + expected = pa.array(['abcde', '', None, 'abcde', '', None, 'abcde', ''], + type='binary') + assert arrow_arr.equals(expected) + + # 0 itemsize + arr = np.array(['', '', ''], dtype='|S0') + arrow_arr = pa.array(arr) + expected = pa.array(['', '', ''], type='binary') + assert arrow_arr.equals(expected) + + +def test_interval_array_from_timedelta(): + data = [ + None, + datetime.timedelta(days=1, seconds=1, microseconds=1, + milliseconds=1, minutes=1, hours=1, weeks=1)] + + # From timedelta (explicit type required) + arr = pa.array(data, pa.month_day_nano_interval()) + assert isinstance(arr, pa.MonthDayNanoIntervalArray) + assert arr.type == pa.month_day_nano_interval() + expected_list = [ + None, + pa.MonthDayNano([0, 8, + (datetime.timedelta(seconds=1, microseconds=1, + milliseconds=1, minutes=1, + hours=1) // + datetime.timedelta(microseconds=1)) * 1000])] + expected = pa.array(expected_list) + assert arr.equals(expected) + assert arr.to_pylist() == expected_list + + +@pytest.mark.pandas +def test_interval_array_from_relativedelta(): + # dateutil is dependency of pandas + from dateutil.relativedelta import relativedelta + from pandas import DateOffset + data = [ + None, + relativedelta(years=1, months=1, + days=1, seconds=1, microseconds=1, + minutes=1, hours=1, weeks=1, leapdays=1)] + # Note leapdays are ignored. + + # From relativedelta + arr = pa.array(data) + assert isinstance(arr, pa.MonthDayNanoIntervalArray) + assert arr.type == pa.month_day_nano_interval() + expected_list = [ + None, + pa.MonthDayNano([13, 8, + (datetime.timedelta(seconds=1, microseconds=1, + minutes=1, hours=1) // + datetime.timedelta(microseconds=1)) * 1000])] + expected = pa.array(expected_list) + assert arr.equals(expected) + assert arr.to_pandas().tolist() == [ + None, DateOffset(months=13, days=8, + microseconds=( + datetime.timedelta(seconds=1, microseconds=1, + minutes=1, hours=1) // + datetime.timedelta(microseconds=1)), + nanoseconds=0)] + with pytest.raises(ValueError): + pa.array([DateOffset(years=((1 << 32) // 12), months=100)]) + with pytest.raises(ValueError): + pa.array([DateOffset(weeks=((1 << 32) // 7), days=100)]) + with pytest.raises(ValueError): + pa.array([DateOffset(seconds=((1 << 64) // 1000000000), + nanoseconds=1)]) + with pytest.raises(ValueError): + pa.array([DateOffset(microseconds=((1 << 64) // 100))]) + + +def test_interval_array_from_tuple(): + data = [None, (1, 2, -3)] + + # From timedelta (explicit type required) + arr = pa.array(data, pa.month_day_nano_interval()) + assert isinstance(arr, pa.MonthDayNanoIntervalArray) + assert arr.type == pa.month_day_nano_interval() + expected_list = [ + None, + pa.MonthDayNano([1, 2, -3])] + expected = pa.array(expected_list) + assert arr.equals(expected) + assert arr.to_pylist() == expected_list + + +@pytest.mark.pandas +def test_interval_array_from_dateoffset(): + from pandas.tseries.offsets import DateOffset + data = [ + None, + DateOffset(years=1, months=1, + days=1, seconds=1, microseconds=1, + minutes=1, hours=1, weeks=1, nanoseconds=1), + DateOffset()] + + arr = pa.array(data) + assert isinstance(arr, pa.MonthDayNanoIntervalArray) + assert arr.type == pa.month_day_nano_interval() + expected_list = [ + None, + pa.MonthDayNano([13, 8, 3661000001001]), + pa.MonthDayNano([0, 0, 0])] + expected = pa.array(expected_list) + assert arr.equals(expected) + expected_from_pandas = [ + None, DateOffset(months=13, days=8, + microseconds=( + datetime.timedelta(seconds=1, microseconds=1, + minutes=1, hours=1) // + datetime.timedelta(microseconds=1)), + nanoseconds=1), + DateOffset(months=0, days=0, microseconds=0, nanoseconds=0)] + + assert arr.to_pandas().tolist() == expected_from_pandas + + # nested list array conversion + actual_list = pa.array([data]).to_pandas().tolist() + assert len(actual_list) == 1 + assert list(actual_list[0]) == expected_from_pandas + + +def test_array_from_numpy_unicode(): + dtypes = ['U5'] + + for dtype in dtypes: + arr = np.array(['abcde', 'abc', ''], dtype=dtype) + + arrow_arr = pa.array(arr) + assert arrow_arr.type == 'utf8' + expected = pa.array(['abcde', 'abc', ''], type='utf8') + assert arrow_arr.equals(expected) + + mask = np.array([False, True, False]) + arrow_arr = pa.array(arr, mask=mask) + expected = pa.array(['abcde', None, ''], type='utf8') + assert arrow_arr.equals(expected) + + # Strided variant + arr = np.array(['abcde', 'abc', ''] * 5, dtype=dtype)[::2] + mask = np.array([False, True, False] * 5)[::2] + arrow_arr = pa.array(arr, mask=mask) + + expected = pa.array(['abcde', '', None, 'abcde', '', None, + 'abcde', ''], type='utf8') + assert arrow_arr.equals(expected) + + # 0 itemsize + arr = np.array(['', '', ''], dtype='= object.__sizeof__(a) + a.nbytes + a = pa.array([1, None, 3], type='int64') + assert a.nbytes == 8*3 + 1 + assert a.get_total_buffer_size() == 8*3 + 1 + assert sys.getsizeof(a) >= object.__sizeof__(a) + a.nbytes + a = pa.array([[1, 2], None, [3, None, 4, 5]], type=pa.list_(pa.int64())) + assert a.nbytes == 62 + assert a.get_total_buffer_size() == 1 + 4 * 4 + 1 + 6 * 8 + assert sys.getsizeof(a) >= object.__sizeof__(a) + a.nbytes + a = pa.array([[[5, 6, 7]], [[9, 10]]], type=pa.list_(pa.list_(pa.int8()))) + assert a.get_total_buffer_size() == (4 * 3) + (4 * 3) + (1 * 5) + assert a.nbytes == 21 + a = pa.array([[[1, 2], [3, 4]], [[5, 6, 7], None, [8]], [[9, 10]]], + type=pa.list_(pa.list_(pa.int8()))) + a1 = a.slice(1, 2) + assert a1.nbytes == (4 * 2) + 1 + (4 * 4) + (1 * 6) + assert a1.get_total_buffer_size() == (4 * 4) + 1 + (4 * 7) + (1 * 10) + + +def test_nbytes_size(): + a = pa.chunked_array([pa.array([1, None, 3], type=pa.int16()), + pa.array([4, 5, 6], type=pa.int16())]) + assert a.nbytes == 13 + + +def test_invalid_tensor_constructor_repr(): + # ARROW-2638: prevent calling extension class constructors directly + with pytest.raises(TypeError): + repr(pa.Tensor([1])) + + +def test_invalid_tensor_construction(): + with pytest.raises(TypeError): + pa.Tensor() + + +@pytest.mark.parametrize(('offset_type', 'list_type_factory'), + [(pa.int32(), pa.list_), (pa.int64(), pa.large_list)]) +def test_list_array_flatten(offset_type, list_type_factory): + typ2 = list_type_factory( + list_type_factory( + pa.int64() + ) + ) + arr2 = pa.array([ + None, + [ + [1, None, 2], + None, + [3, 4] + ], + [], + [ + [], + [5, 6], + None + ], + [ + [7, 8] + ] + ], type=typ2) + offsets2 = pa.array([0, 0, 3, 3, 6, 7], type=offset_type) + + typ1 = list_type_factory(pa.int64()) + arr1 = pa.array([ + [1, None, 2], + None, + [3, 4], + [], + [5, 6], + None, + [7, 8] + ], type=typ1) + offsets1 = pa.array([0, 3, 3, 5, 5, 7, 7, 9], type=offset_type) + + arr0 = pa.array([ + 1, None, 2, + 3, 4, + 5, 6, + 7, 8 + ], type=pa.int64()) + + assert arr2.flatten().equals(arr1) + assert arr2.offsets.equals(offsets2) + assert arr2.values.equals(arr1) + assert arr1.flatten().equals(arr0) + assert arr1.offsets.equals(offsets1) + assert arr1.values.equals(arr0) + assert arr2.flatten().flatten().equals(arr0) + assert arr2.values.values.equals(arr0) + + +@pytest.mark.parametrize('list_type', [ + pa.list_(pa.int32()), + pa.list_(pa.int32(), list_size=2), + pa.large_list(pa.int32())]) +def test_list_value_parent_indices(list_type): + arr = pa.array( + [ + [0, 1], + None, + [None, None], + [3, 4] + ], type=list_type) + expected = pa.array([0, 0, 2, 2, 3, 3], type=pa.int64()) + assert arr.value_parent_indices().equals(expected) + + +@pytest.mark.parametrize(('offset_type', 'list_type'), + [(pa.int32(), pa.list_(pa.int32())), + (pa.int32(), pa.list_(pa.int32(), list_size=2)), + (pa.int64(), pa.large_list(pa.int32()))]) +def test_list_value_lengths(offset_type, list_type): + + # FixedSizeListArray needs fixed list sizes + if getattr(list_type, "list_size", None): + arr = pa.array( + [ + [0, 1], + None, + [None, None], + [3, 4] + ], type=list_type) + expected = pa.array([2, None, 2, 2], type=offset_type) + + # Otherwise create variable list sizes + else: + arr = pa.array( + [ + [0, 1, 2], + None, + [], + [3, 4] + ], type=list_type) + expected = pa.array([3, None, 0, 2], type=offset_type) + assert arr.value_lengths().equals(expected) + + +@pytest.mark.parametrize('list_type_factory', [pa.list_, pa.large_list]) +def test_list_array_flatten_non_canonical(list_type_factory): + # Non-canonical list array (null elements backed by non-empty sublists) + typ = list_type_factory(pa.int64()) + arr = pa.array([[1], [2, 3], [4, 5, 6]], type=typ) + buffers = arr.buffers()[:2] + buffers[0] = pa.py_buffer(b"\x05") # validity bitmap + arr = arr.from_buffers(arr.type, len(arr), buffers, children=[arr.values]) + assert arr.to_pylist() == [[1], None, [4, 5, 6]] + assert arr.offsets.to_pylist() == [0, 1, 3, 6] + + flattened = arr.flatten() + flattened.validate(full=True) + assert flattened.type == typ.value_type + assert flattened.to_pylist() == [1, 4, 5, 6] + + # .values is the physical values array (including masked elements) + assert arr.values.to_pylist() == [1, 2, 3, 4, 5, 6] + + +@pytest.mark.parametrize('klass', [pa.ListArray, pa.LargeListArray]) +def test_list_array_values_offsets_sliced(klass): + # ARROW-7301 + arr = klass.from_arrays(offsets=[0, 3, 4, 6], values=[1, 2, 3, 4, 5, 6]) + assert arr.values.to_pylist() == [1, 2, 3, 4, 5, 6] + assert arr.offsets.to_pylist() == [0, 3, 4, 6] + + # sliced -> values keeps referring to full values buffer, but offsets is + # sliced as well so the offsets correctly point into the full values array + # sliced -> flatten() will return the sliced value array. + arr2 = arr[1:] + assert arr2.values.to_pylist() == [1, 2, 3, 4, 5, 6] + assert arr2.offsets.to_pylist() == [3, 4, 6] + assert arr2.flatten().to_pylist() == [4, 5, 6] + i = arr2.offsets[0].as_py() + j = arr2.offsets[1].as_py() + assert arr2[0].as_py() == arr2.values[i:j].to_pylist() == [4] + + +def test_fixed_size_list_array_flatten(): + typ2 = pa.list_(pa.list_(pa.int64(), 2), 3) + arr2 = pa.array([ + [ + [1, 2], + [3, 4], + [5, 6], + ], + None, + [ + [7, None], + None, + [8, 9] + ], + ], type=typ2) + assert arr2.type.equals(typ2) + + typ1 = pa.list_(pa.int64(), 2) + arr1 = pa.array([ + [1, 2], [3, 4], [5, 6], + [7, None], None, [8, 9] + ], type=typ1) + assert arr1.type.equals(typ1) + assert arr2.flatten().equals(arr1) + + typ0 = pa.int64() + arr0 = pa.array([ + 1, 2, 3, 4, 5, 6, 7, None, 8, 9, + ], type=typ0) + assert arr0.type.equals(typ0) + assert arr1.flatten().equals(arr0) + assert arr2.flatten().flatten().equals(arr0) + + +def test_fixed_size_list_array_flatten_with_slice(): + array = pa.array([[1], [2], [3]], + type=pa.list_(pa.float64(), list_size=1)) + assert array[2:].flatten() == pa.array([3], type=pa.float64()) + + +def test_map_array_values_offsets(): + ty = pa.map_(pa.utf8(), pa.int32()) + ty_values = pa.struct([pa.field("key", pa.utf8(), nullable=False), + pa.field("value", pa.int32())]) + a = pa.array([[('a', 1), ('b', 2)], [('c', 3)]], type=ty) + + assert a.values.type.equals(ty_values) + assert a.values == pa.array([ + {'key': 'a', 'value': 1}, + {'key': 'b', 'value': 2}, + {'key': 'c', 'value': 3}, + ], type=ty_values) + assert a.keys.equals(pa.array(['a', 'b', 'c'])) + assert a.items.equals(pa.array([1, 2, 3], type=pa.int32())) + + assert pa.ListArray.from_arrays(a.offsets, a.keys).equals( + pa.array([['a', 'b'], ['c']])) + assert pa.ListArray.from_arrays(a.offsets, a.items).equals( + pa.array([[1, 2], [3]], type=pa.list_(pa.int32()))) + + with pytest.raises(NotImplementedError): + a.flatten() + + +def test_struct_array_flatten(): + ty = pa.struct([pa.field('x', pa.int16()), + pa.field('y', pa.float32())]) + a = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty) + xs, ys = a.flatten() + assert xs.type == pa.int16() + assert ys.type == pa.float32() + assert xs.to_pylist() == [1, 3, 5] + assert ys.to_pylist() == [2.5, 4.5, 6.5] + xs, ys = a[1:].flatten() + assert xs.to_pylist() == [3, 5] + assert ys.to_pylist() == [4.5, 6.5] + + a = pa.array([(1, 2.5), None, (3, 4.5)], type=ty) + xs, ys = a.flatten() + assert xs.to_pylist() == [1, None, 3] + assert ys.to_pylist() == [2.5, None, 4.5] + xs, ys = a[1:].flatten() + assert xs.to_pylist() == [None, 3] + assert ys.to_pylist() == [None, 4.5] + + a = pa.array([(1, None), (2, 3.5), (None, 4.5)], type=ty) + xs, ys = a.flatten() + assert xs.to_pylist() == [1, 2, None] + assert ys.to_pylist() == [None, 3.5, 4.5] + xs, ys = a[1:].flatten() + assert xs.to_pylist() == [2, None] + assert ys.to_pylist() == [3.5, 4.5] + + a = pa.array([(1, None), None, (None, 2.5)], type=ty) + xs, ys = a.flatten() + assert xs.to_pylist() == [1, None, None] + assert ys.to_pylist() == [None, None, 2.5] + xs, ys = a[1:].flatten() + assert xs.to_pylist() == [None, None] + assert ys.to_pylist() == [None, 2.5] + + +def test_struct_array_field(): + ty = pa.struct([pa.field('x', pa.int16()), + pa.field('y', pa.float32())]) + a = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty) + + x0 = a.field(0) + y0 = a.field(1) + x1 = a.field(-2) + y1 = a.field(-1) + x2 = a.field('x') + y2 = a.field('y') + + assert isinstance(x0, pa.lib.Int16Array) + assert isinstance(y1, pa.lib.FloatArray) + assert x0.equals(pa.array([1, 3, 5], type=pa.int16())) + assert y0.equals(pa.array([2.5, 4.5, 6.5], type=pa.float32())) + assert x0.equals(x1) + assert x0.equals(x2) + assert y0.equals(y1) + assert y0.equals(y2) + + for invalid_index in [None, pa.int16()]: + with pytest.raises(TypeError): + a.field(invalid_index) + + for invalid_index in [3, -3]: + with pytest.raises(IndexError): + a.field(invalid_index) + + for invalid_name in ['z', '']: + with pytest.raises(KeyError): + a.field(invalid_name) + + +def test_struct_array_flattened_field(): + ty = pa.struct([pa.field('x', pa.int16()), + pa.field('y', pa.float32())]) + a = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty, + mask=pa.array([False, True, False])) + + x0 = a._flattened_field(0) + y0 = a._flattened_field(1) + x1 = a._flattened_field(-2) + y1 = a._flattened_field(-1) + x2 = a._flattened_field('x') + y2 = a._flattened_field('y') + + assert isinstance(x0, pa.lib.Int16Array) + assert isinstance(y1, pa.lib.FloatArray) + assert x0.equals(pa.array([1, None, 5], type=pa.int16())) + assert y0.equals(pa.array([2.5, None, 6.5], type=pa.float32())) + assert x0.equals(x1) + assert x0.equals(x2) + assert y0.equals(y1) + assert y0.equals(y2) + + for invalid_index in [None, pa.int16()]: + with pytest.raises(TypeError): + a._flattened_field(invalid_index) + + for invalid_index in [3, -3]: + with pytest.raises(IndexError): + a._flattened_field(invalid_index) + + for invalid_name in ['z', '']: + with pytest.raises(KeyError): + a._flattened_field(invalid_name) + + +def test_empty_cast(): + types = [ + pa.null(), + pa.bool_(), + pa.int8(), + pa.int16(), + pa.int32(), + pa.int64(), + pa.uint8(), + pa.uint16(), + pa.uint32(), + pa.uint64(), + pa.float16(), + pa.float32(), + pa.float64(), + pa.date32(), + pa.date64(), + pa.binary(), + pa.binary(length=4), + pa.string(), + ] + + for (t1, t2) in itertools.product(types, types): + try: + # ARROW-4766: Ensure that supported types conversion don't segfault + # on empty arrays of common types + pa.array([], type=t1).cast(t2) + except (pa.lib.ArrowNotImplementedError, pa.ArrowInvalid): + continue + + +def test_nested_dictionary_array(): + dict_arr = pa.DictionaryArray.from_arrays([0, 1, 0], ['a', 'b']) + list_arr = pa.ListArray.from_arrays([0, 2, 3], dict_arr) + assert list_arr.to_pylist() == [['a', 'b'], ['a']] + + dict_arr = pa.DictionaryArray.from_arrays([0, 1, 0], ['a', 'b']) + dict_arr2 = pa.DictionaryArray.from_arrays([0, 1, 2, 1, 0], dict_arr) + assert dict_arr2.to_pylist() == ['a', 'b', 'a', 'b', 'a'] + + +def test_array_from_numpy_str_utf8(): + # ARROW-3890 -- in Python 3, NPY_UNICODE arrays are produced, but in Python + # 2 they are NPY_STRING (binary), so we must do UTF-8 validation + vec = np.array(["toto", "tata"]) + vec2 = np.array(["toto", "tata"], dtype=object) + + arr = pa.array(vec, pa.string()) + arr2 = pa.array(vec2, pa.string()) + expected = pa.array(["toto", "tata"]) + assert arr.equals(expected) + assert arr2.equals(expected) + + # with mask, separate code path + mask = np.array([False, False], dtype=bool) + arr = pa.array(vec, pa.string(), mask=mask) + assert arr.equals(expected) + + # UTF8 validation failures + vec = np.array([('mañana').encode('utf-16-le')]) + with pytest.raises(ValueError): + pa.array(vec, pa.string()) + + with pytest.raises(ValueError): + pa.array(vec, pa.string(), mask=np.array([False])) + + +@pytest.mark.slow +@pytest.mark.large_memory +def test_numpy_binary_overflow_to_chunked(): + # ARROW-3762, ARROW-5966 + + # 2^31 + 1 bytes + values = [b'x'] + unicode_values = ['x'] + + # Make 10 unique 1MB strings then repeat then 2048 times + unique_strings = { + i: b'x' * ((1 << 20) - 1) + str(i % 10).encode('utf8') + for i in range(10) + } + unicode_unique_strings = {i: x.decode('utf8') + for i, x in unique_strings.items()} + values += [unique_strings[i % 10] for i in range(1 << 11)] + unicode_values += [unicode_unique_strings[i % 10] + for i in range(1 << 11)] + + for case, ex_type in [(values, pa.binary()), + (unicode_values, pa.utf8())]: + arr = np.array(case) + arrow_arr = pa.array(arr) + arr = None + + assert isinstance(arrow_arr, pa.ChunkedArray) + assert arrow_arr.type == ex_type + + # Split up into 16MB chunks. 128 * 16 = 2048, so 129 + assert arrow_arr.num_chunks == 129 + + value_index = 0 + for i in range(arrow_arr.num_chunks): + chunk = arrow_arr.chunk(i) + for val in chunk: + assert val.as_py() == case[value_index] + value_index += 1 + + +@pytest.mark.large_memory +def test_list_child_overflow_to_chunked(): + kilobyte_string = 'x' * 1024 + two_mega = 2**21 + + vals = [[kilobyte_string]] * (two_mega - 1) + arr = pa.array(vals) + assert isinstance(arr, pa.Array) + assert len(arr) == two_mega - 1 + + vals = [[kilobyte_string]] * two_mega + arr = pa.array(vals) + assert isinstance(arr, pa.ChunkedArray) + assert len(arr) == two_mega + assert len(arr.chunk(0)) == two_mega - 1 + assert len(arr.chunk(1)) == 1 + + +def test_infer_type_masked(): + # ARROW-5208 + ty = pa.infer_type(['foo', 'bar', None, 2], + mask=[False, False, False, True]) + assert ty == pa.utf8() + + # all masked + ty = pa.infer_type(['foo', 'bar', None, 2], + mask=np.array([True, True, True, True])) + assert ty == pa.null() + + # length 0 + assert pa.infer_type([], mask=[]) == pa.null() + + +def test_array_masked(): + # ARROW-5208 + arr = pa.array([4, None, 4, 3.], + mask=np.array([False, True, False, True])) + assert arr.type == pa.int64() + + # ndarray dtype=object argument + arr = pa.array(np.array([4, None, 4, 3.], dtype="O"), + mask=np.array([False, True, False, True])) + assert arr.type == pa.int64() + + +def test_array_supported_masks(): + # ARROW-13883 + arr = pa.array([4, None, 4, 3.], + mask=np.array([False, True, False, True])) + assert arr.to_pylist() == [4, None, 4, None] + + arr = pa.array([4, None, 4, 3], + mask=pa.array([False, True, False, True])) + assert arr.to_pylist() == [4, None, 4, None] + + arr = pa.array([4, None, 4, 3], + mask=[False, True, False, True]) + assert arr.to_pylist() == [4, None, 4, None] + + arr = pa.array([4, 3, None, 3], + mask=[False, True, False, True]) + assert arr.to_pylist() == [4, None, None, None] + + # Non boolean values + with pytest.raises(pa.ArrowTypeError): + arr = pa.array([4, None, 4, 3], + mask=pa.array([1.0, 2.0, 3.0, 4.0])) + + with pytest.raises(pa.ArrowTypeError): + arr = pa.array([4, None, 4, 3], + mask=[1.0, 2.0, 3.0, 4.0]) + + with pytest.raises(pa.ArrowTypeError): + arr = pa.array([4, None, 4, 3], + mask=np.array([1.0, 2.0, 3.0, 4.0])) + + with pytest.raises(pa.ArrowTypeError): + arr = pa.array([4, None, 4, 3], + mask=pa.array([False, True, False, True], + mask=pa.array([True, True, True, True]))) + + with pytest.raises(pa.ArrowTypeError): + arr = pa.array([4, None, 4, 3], + mask=pa.array([False, None, False, True])) + + # Numpy arrays only accepts numpy masks + with pytest.raises(TypeError): + arr = pa.array(np.array([4, None, 4, 3.]), + mask=[True, False, True, False]) + + with pytest.raises(TypeError): + arr = pa.array(np.array([4, None, 4, 3.]), + mask=pa.array([True, False, True, False])) + + +@pytest.mark.pandas +def test_array_supported_pandas_masks(): + import pandas + arr = pa.array(pandas.Series([0, 1], name="a", dtype="int64"), + mask=pandas.Series([True, False], dtype='bool')) + assert arr.to_pylist() == [None, 1] + + +def test_binary_array_masked(): + # ARROW-12431 + masked_basic = pa.array([b'\x05'], type=pa.binary(1), + mask=np.array([False])) + assert [b'\x05'] == masked_basic.to_pylist() + + # Fixed Length Binary + masked = pa.array(np.array([b'\x05']), type=pa.binary(1), + mask=np.array([False])) + assert [b'\x05'] == masked.to_pylist() + + masked_nulls = pa.array(np.array([b'\x05']), type=pa.binary(1), + mask=np.array([True])) + assert [None] == masked_nulls.to_pylist() + + # Variable Length Binary + masked = pa.array(np.array([b'\x05']), type=pa.binary(), + mask=np.array([False])) + assert [b'\x05'] == masked.to_pylist() + + masked_nulls = pa.array(np.array([b'\x05']), type=pa.binary(), + mask=np.array([True])) + assert [None] == masked_nulls.to_pylist() + + # Fixed Length Binary, copy + npa = np.array([b'aaa', b'bbb', b'ccc']*10) + arrow_array = pa.array(npa, type=pa.binary(3), + mask=np.array([False, False, False]*10)) + npa[npa == b"bbb"] = b"XXX" + assert ([b'aaa', b'bbb', b'ccc']*10) == arrow_array.to_pylist() + + +def test_binary_array_strided(): + # Masked + nparray = np.array([b"ab", b"cd", b"ef"]) + arrow_array = pa.array(nparray[::2], pa.binary(2), + mask=np.array([False, False])) + assert [b"ab", b"ef"] == arrow_array.to_pylist() + + # Unmasked + nparray = np.array([b"ab", b"cd", b"ef"]) + arrow_array = pa.array(nparray[::2], pa.binary(2)) + assert [b"ab", b"ef"] == arrow_array.to_pylist() + + +def test_array_invalid_mask_raises(): + # ARROW-10742 + cases = [ + ([1, 2], np.array([False, False], dtype="O"), + TypeError, "must be boolean dtype"), + + ([1, 2], np.array([[False], [False]]), + pa.ArrowInvalid, "must be 1D array"), + + ([1, 2, 3], np.array([False, False]), + pa.ArrowInvalid, "different length"), + + (np.array([1, 2]), np.array([False, False], dtype="O"), + TypeError, "must be boolean dtype"), + + (np.array([1, 2]), np.array([[False], [False]]), + ValueError, "must be 1D array"), + + (np.array([1, 2, 3]), np.array([False, False]), + ValueError, "different length"), + ] + for obj, mask, ex, msg in cases: + with pytest.raises(ex, match=msg): + pa.array(obj, mask=mask) + + +def test_array_from_large_pyints(): + # ARROW-5430 + with pytest.raises(OverflowError): + # too large for int64 so dtype must be explicitly provided + pa.array([int(2 ** 63)]) + + +def test_numpy_array_protocol(): + # test the __array__ method on pyarrow.Array + arr = pa.array([1, 2, 3]) + result = np.asarray(arr) + expected = np.array([1, 2, 3], dtype="int64") + np.testing.assert_array_equal(result, expected) + + # this should not raise a deprecation warning with numpy 2.0+ + result = np.array(arr, copy=False) + np.testing.assert_array_equal(result, expected) + + result = np.array(arr, dtype="int64", copy=False) + np.testing.assert_array_equal(result, expected) + + # no zero-copy is possible + arr = pa.array([1, 2, None]) + expected = np.array([1, 2, np.nan], dtype="float64") + result = np.asarray(arr) + np.testing.assert_array_equal(result, expected) + + if Version(np.__version__) < Version("2.0"): + # copy keyword is not strict and not passed down to __array__ + result = np.array(arr, copy=False) + np.testing.assert_array_equal(result, expected) + + result = np.array(arr, dtype="float64", copy=False) + np.testing.assert_array_equal(result, expected) + else: + # starting with numpy 2.0, the copy=False keyword is assumed to be strict + with pytest.raises(ValueError, match="Unable to avoid a copy"): + np.array(arr, copy=False) + + arr = pa.array([1, 2, 3]) + with pytest.raises(ValueError): + np.array(arr, dtype="float64", copy=False) + + # copy=True -> not yet passed by numpy, so we have to call this directly to test + arr = pa.array([1, 2, 3]) + result = arr.__array__(copy=True) + assert result.flags.writeable + + arr = pa.array([1, 2, 3]) + result = arr.__array__(dtype=np.dtype("float64"), copy=True) + assert result.dtype == "float64" + + +def test_array_protocol(): + + class MyArray: + def __init__(self, data): + self.data = data + + def __arrow_array__(self, type=None): + return pa.array(self.data, type=type) + + arr = MyArray(np.array([1, 2, 3], dtype='int64')) + result = pa.array(arr) + expected = pa.array([1, 2, 3], type=pa.int64()) + assert result.equals(expected) + result = pa.array(arr, type=pa.int64()) + expected = pa.array([1, 2, 3], type=pa.int64()) + assert result.equals(expected) + result = pa.array(arr, type=pa.float64()) + expected = pa.array([1, 2, 3], type=pa.float64()) + assert result.equals(expected) + + # raise error when passing size or mask keywords + with pytest.raises(ValueError): + pa.array(arr, mask=np.array([True, False, True])) + with pytest.raises(ValueError): + pa.array(arr, size=3) + + # ensure the return value is an Array + class MyArrayInvalid: + def __init__(self, data): + self.data = data + + def __arrow_array__(self, type=None): + return np.array(self.data) + + arr = MyArrayInvalid(np.array([1, 2, 3], dtype='int64')) + with pytest.raises(TypeError): + pa.array(arr) + + # ARROW-7066 - allow ChunkedArray output + # GH-33727 - if num_chunks=1 return Array + class MyArray2: + def __init__(self, data): + self.data = data + + def __arrow_array__(self, type=None): + return pa.chunked_array([self.data], type=type) + + arr = MyArray2(np.array([1, 2, 3], dtype='int64')) + result = pa.array(arr) + expected = pa.array([1, 2, 3], type=pa.int64()) + assert result.equals(expected) + + class MyArray3: + def __init__(self, data1, data2): + self.data1 = data1 + self.data2 = data2 + + def __arrow_array__(self, type=None): + return pa.chunked_array([self.data1, self.data2], type=type) + + np_arr = np.array([1, 2, 3], dtype='int64') + arr = MyArray3(np_arr, np_arr) + result = pa.array(arr) + expected = pa.chunked_array([[1, 2, 3], [1, 2, 3]], type=pa.int64()) + assert result.equals(expected) + + +def test_c_array_protocol(): + class ArrayWrapper: + def __init__(self, data): + self.data = data + + def __arrow_c_array__(self, requested_schema=None): + return self.data.__arrow_c_array__(requested_schema) + + # Can roundtrip through the C array protocol + arr = ArrayWrapper(pa.array([1, 2, 3], type=pa.int64())) + result = pa.array(arr) + assert result == arr.data + + # Will cast to requested type + result = pa.array(arr, type=pa.int32()) + assert result == pa.array([1, 2, 3], type=pa.int32()) + + +def test_concat_array(): + concatenated = pa.concat_arrays( + [pa.array([1, 2]), pa.array([3, 4])]) + assert concatenated.equals(pa.array([1, 2, 3, 4])) + + +def test_concat_array_different_types(): + with pytest.raises(pa.ArrowInvalid): + pa.concat_arrays([pa.array([1]), pa.array([2.])]) + + +def test_concat_array_invalid_type(): + # ARROW-9920 - do not segfault on non-array input + + with pytest.raises(TypeError, match="should contain Array objects"): + pa.concat_arrays([None]) + + arr = pa.chunked_array([[0, 1], [3, 4]]) + with pytest.raises(TypeError, match="should contain Array objects"): + pa.concat_arrays(arr) + + +@pytest.mark.pandas +def test_to_pandas_timezone(): + # https://issues.apache.org/jira/browse/ARROW-6652 + arr = pa.array([1, 2, 3], type=pa.timestamp('s', tz='Europe/Brussels')) + s = arr.to_pandas() + assert s.dt.tz is not None + arr = pa.chunked_array([arr]) + s = arr.to_pandas() + assert s.dt.tz is not None + + +@pytest.mark.pandas +def test_to_pandas_float16_list(): + # https://github.com/apache/arrow/issues/36168 + expected = [[np.float16(1)], [np.float16(2)], [np.float16(3)]] + arr = pa.array(expected) + result = arr.to_pandas() + assert result[0].dtype == "float16" + assert result.tolist() == expected + + +def test_array_sort(): + arr = pa.array([5, 7, 35], type=pa.int64()) + sorted_arr = arr.sort("descending") + assert sorted_arr.to_pylist() == [35, 7, 5] + + arr = pa.chunked_array([[1, 2, 3], [4, 5, 6]]) + sorted_arr = arr.sort("descending") + assert sorted_arr.to_pylist() == [6, 5, 4, 3, 2, 1] + + arr = pa.array([5, 7, 35, None], type=pa.int64()) + sorted_arr = arr.sort("descending", null_placement="at_end") + assert sorted_arr.to_pylist() == [35, 7, 5, None] + sorted_arr = arr.sort("descending", null_placement="at_start") + assert sorted_arr.to_pylist() == [None, 35, 7, 5] + + +def test_struct_array_sort(): + arr = pa.StructArray.from_arrays([ + pa.array([5, 7, 7, 35], type=pa.int64()), + pa.array(["foo", "car", "bar", "foobar"]) + ], names=["a", "b"]) + + sorted_arr = arr.sort("descending", by="a") + assert sorted_arr.to_pylist() == [ + {"a": 35, "b": "foobar"}, + {"a": 7, "b": "car"}, + {"a": 7, "b": "bar"}, + {"a": 5, "b": "foo"}, + ] + + arr_with_nulls = pa.StructArray.from_arrays([ + pa.array([5, 7, 7, 35], type=pa.int64()), + pa.array(["foo", "car", "bar", "foobar"]) + ], names=["a", "b"], mask=pa.array([False, False, True, False])) + + sorted_arr = arr_with_nulls.sort( + "descending", by="a", null_placement="at_start") + assert sorted_arr.to_pylist() == [ + None, + {"a": 35, "b": "foobar"}, + {"a": 7, "b": "car"}, + {"a": 5, "b": "foo"}, + ] + + sorted_arr = arr_with_nulls.sort( + "descending", by="a", null_placement="at_end") + assert sorted_arr.to_pylist() == [ + {"a": 35, "b": "foobar"}, + {"a": 7, "b": "car"}, + {"a": 5, "b": "foo"}, + None + ] + + +def test_array_accepts_pyarrow_array(): + arr = pa.array([1, 2, 3]) + result = pa.array(arr) + assert arr == result + + # Test casting to a different type + result = pa.array(arr, type=pa.uint8()) + expected = pa.array([1, 2, 3], type=pa.uint8()) + assert expected == result + assert expected.type == pa.uint8() + + # Test casting with safe keyword + arr = pa.array([2 ** 63 - 1], type=pa.int64()) + + with pytest.raises(pa.ArrowInvalid): + pa.array(arr, type=pa.int32()) + + expected = pa.array([-1], type=pa.int32()) + result = pa.array(arr, type=pa.int32(), safe=False) + assert result == expected + + # Test memory_pool keyword is accepted + result = pa.array(arr, memory_pool=pa.default_memory_pool()) + assert arr == result + + +def check_run_end_encoded(ree_array, run_ends, values, logical_length, physical_length, + physical_offset): + assert ree_array.run_ends.to_pylist() == run_ends + assert ree_array.values.to_pylist() == values + assert len(ree_array) == logical_length + assert ree_array.find_physical_length() == physical_length + assert ree_array.find_physical_offset() == physical_offset + + +def check_run_end_encoded_from_arrays_with_type(ree_type=None): + run_ends = [3, 5, 10, 19] + values = [1, 2, 1, 3] + ree_array = pa.RunEndEncodedArray.from_arrays(run_ends, values, ree_type) + check_run_end_encoded(ree_array, run_ends, values, 19, 4, 0) + + +def test_run_end_encoded_from_arrays(): + check_run_end_encoded_from_arrays_with_type() + for run_end_type in [pa.int16(), pa.int32(), pa.int64()]: + for value_type in [pa.uint32(), pa.int32(), pa.uint64(), pa.int64()]: + ree_type = pa.run_end_encoded(run_end_type, value_type) + check_run_end_encoded_from_arrays_with_type(ree_type) + + +def test_run_end_encoded_from_buffers(): + run_ends = [3, 5, 10, 19] + values = [1, 2, 1, 3] + + ree_type = pa.run_end_encoded(run_end_type=pa.int32(), value_type=pa.uint8()) + length = 19 + buffers = [None] + null_count = 0 + offset = 0 + children = [run_ends, values] + + ree_array = pa.RunEndEncodedArray.from_buffers(ree_type, length, buffers, + null_count, offset, + children) + check_run_end_encoded(ree_array, run_ends, values, 19, 4, 0) + # buffers = [] + ree_array = pa.RunEndEncodedArray.from_buffers(ree_type, length, [], + null_count, offset, + children) + check_run_end_encoded(ree_array, run_ends, values, 19, 4, 0) + # null_count = -1 + ree_array = pa.RunEndEncodedArray.from_buffers(ree_type, length, buffers, + -1, offset, + children) + check_run_end_encoded(ree_array, run_ends, values, 19, 4, 0) + # offset = 4 + ree_array = pa.RunEndEncodedArray.from_buffers(ree_type, length - 4, buffers, + null_count, 4, children) + check_run_end_encoded(ree_array, run_ends, values, length - 4, 3, 1) + # buffers = [None, None] + with pytest.raises(ValueError): + pa.RunEndEncodedArray.from_buffers(ree_type, length, [None, None], + null_count, offset, children) + # children = None + with pytest.raises(ValueError): + pa.RunEndEncodedArray.from_buffers(ree_type, length, buffers, + null_count, offset, None) + # len(children) == 1 + with pytest.raises(ValueError): + pa.RunEndEncodedArray.from_buffers(ree_type, length, buffers, + null_count, offset, [run_ends]) + # null_count = 1 + with pytest.raises(ValueError): + pa.RunEndEncodedArray.from_buffers(ree_type, length, buffers, + 1, offset, children) + + +def test_run_end_encoded_from_array_with_type(): + run_ends = [1, 3, 6] + values = [1, 2, 3] + ree_type = pa.run_end_encoded(pa.int32(), pa.int64()) + expected = pa.RunEndEncodedArray.from_arrays(run_ends, values, + ree_type) + + arr = [1, 2, 2, 3, 3, 3] + result = pa.array(arr, type=ree_type) + assert result.equals(expected) + result = pa.array(np.array(arr), type=ree_type) + assert result.equals(expected) + + ree_type_2 = pa.run_end_encoded(pa.int16(), pa.float32()) + result = pa.array(arr, type=ree_type_2) + assert not result.equals(expected) + expected_2 = pa.RunEndEncodedArray.from_arrays(run_ends, values, + ree_type_2) + assert result.equals(expected_2) + + run_ends = [1, 3, 5, 6] + values = [1, 2, 3, None] + expected = pa.RunEndEncodedArray.from_arrays(run_ends, values, + ree_type) + + arr = [1, 2, 2, 3, 3, None] + result = pa.array(arr, type=ree_type) + assert result.equals(expected) + + run_ends = [1, 3, 4, 5, 6] + values = [1, 2, None, 3, None] + expected = pa.RunEndEncodedArray.from_arrays(run_ends, values, + ree_type) + + mask = pa.array([False, False, False, True, False, True]) + result = pa.array(arr, type=ree_type, mask=mask) + assert result.equals(expected) + + +def test_run_end_encoded_to_numpy(): + arr = [1, 2, 2, 3, 3, 3] + ree_array = pa.array(arr, pa.run_end_encoded(pa.int32(), pa.int64())) + expected = np.array(arr) + + np.testing.assert_array_equal(ree_array.to_numpy(zero_copy_only=False), expected) + + with pytest.raises(pa.ArrowInvalid): + ree_array.to_numpy() + + +@pytest.mark.pandas +def test_run_end_encoded_to_pandas(): + arr = [1, 2, 2, 3, 3, 3] + ree_array = pa.array(arr, pa.run_end_encoded(pa.int32(), pa.int64())) + + assert ree_array.to_pandas().tolist() == arr + + with pytest.raises(pa.ArrowInvalid): + ree_array.to_pandas(zero_copy_only=True) + + +@pytest.mark.parametrize(('list_array_type', 'list_type_factory'), + [(pa.ListViewArray, pa.list_view), + (pa.LargeListViewArray, pa.large_list_view)]) +def test_list_view_from_arrays(list_array_type, list_type_factory): + # test in order offsets, similar to ListArray representation + values = [1, 2, 3, 4, 5, 6, None, 7] + offsets = [0, 2, 4, 6] + sizes = [2, 2, 2, 2] + array = list_array_type.from_arrays(offsets, sizes, values) + + assert array.to_pylist() == [[1, 2], [3, 4], [5, 6], [None, 7]] + assert array.values.to_pylist() == values + assert array.offsets.to_pylist() == offsets + assert array.sizes.to_pylist() == sizes + + # with specified type + typ = list_type_factory(pa.field("name", pa.int64())) + result = list_array_type.from_arrays(offsets, sizes, values, typ) + assert result.type == typ + assert result.type.value_field.name == "name" + + # with mismatching type + typ = list_type_factory(pa.binary()) + with pytest.raises(TypeError): + list_array_type.from_arrays(offsets, sizes, values, type=typ) + + # test out of order offsets with overlapping values + values = [1, 2, 3, 4] + offsets = [2, 1, 0] + sizes = [2, 2, 2] + array = list_array_type.from_arrays(offsets, sizes, values) + + assert array.to_pylist() == [[3, 4], [2, 3], [1, 2]] + assert array.values.to_pylist() == values + assert array.offsets.to_pylist() == offsets + assert array.sizes.to_pylist() == sizes + + # test null offsets and empty list values + values = [] + offsets = [0, None] + sizes = [0, 0] + array = list_array_type.from_arrays(offsets, sizes, values) + + assert array.to_pylist() == [[], None] + assert array.values.to_pylist() == values + assert array.offsets.to_pylist() == [0, 0] + assert array.sizes.to_pylist() == sizes + + # test null sizes and empty list values + values = [] + offsets = [0, 0] + sizes = [None, 0] + array = list_array_type.from_arrays(offsets, sizes, values) + + assert array.to_pylist() == [None, []] + assert array.values.to_pylist() == values + assert array.offsets.to_pylist() == offsets + assert array.sizes.to_pylist() == [0, 0] + + # test null bitmask + values = [1, 2] + offsets = [0, 0, 1] + sizes = [1, 0, 1] + mask = pa.array([False, True, False]) + array = list_array_type.from_arrays(offsets, sizes, values, mask=mask) + + assert array.to_pylist() == [[1], None, [2]] + assert array.values.to_pylist() == values + assert array.offsets.to_pylist() == offsets + assert array.sizes.to_pylist() == sizes + + +@pytest.mark.parametrize(('list_array_type', 'list_type_factory'), + [(pa.ListViewArray, pa.list_view), + (pa.LargeListViewArray, pa.large_list_view)]) +def test_list_view_from_arrays_fails(list_array_type, list_type_factory): + values = [1, 2] + offsets = [0, 1, None] + sizes = [1, 1, 0] + mask = pa.array([False, False, True]) + + # Ambiguous to specify both validity map and offsets or sizes with nulls + with pytest.raises(pa.lib.ArrowInvalid): + list_array_type.from_arrays(offsets, sizes, values, mask=mask) + + offsets = [0, 1, 1] + array = list_array_type.from_arrays(offsets, sizes, values, mask=mask) + array_slice = array[1:] + + # List offsets and sizes must not be slices if a validity map is specified + with pytest.raises(pa.lib.ArrowInvalid): + list_array_type.from_arrays( + array_slice.offsets, array_slice.sizes, + array_slice.values, mask=array_slice.is_null()) + + +@pytest.mark.parametrize(('list_array_type', 'list_type_factory', 'offset_type'), + [(pa.ListViewArray, pa.list_view, pa.int32()), + (pa.LargeListViewArray, pa.large_list_view, pa.int64())]) +def test_list_view_flatten(list_array_type, list_type_factory, offset_type): + arr0 = pa.array([ + 1, None, 2, + 3, 4, + 5, 6, + 7, 8 + ], type=pa.int64()) + + typ1 = list_type_factory(pa.int64()) + arr1 = pa.array([ + [1, None, 2], + None, + [3, 4], + [], + [5, 6], + None, + [7, 8] + ], type=typ1) + offsets1 = pa.array([0, 3, 3, 5, 5, 7, 7], type=offset_type) + sizes1 = pa.array([3, 0, 2, 0, 2, 0, 2], type=offset_type) + + typ2 = list_type_factory( + list_type_factory( + pa.int64() + ) + ) + arr2 = pa.array([ + None, + [ + [1, None, 2], + None, + [3, 4] + ], + [], + [ + [], + [5, 6], + None + ], + [ + [7, 8] + ] + ], type=typ2) + offsets2 = pa.array([0, 0, 3, 3, 6], type=offset_type) + sizes2 = pa.array([0, 3, 0, 3, 1], type=offset_type) + + assert arr1.flatten().equals(arr0) + assert arr1.offsets.equals(offsets1) + assert arr1.sizes.equals(sizes1) + assert arr1.values.equals(arr0) + assert arr2.flatten().equals(arr1) + assert arr2.offsets.equals(offsets2) + assert arr2.sizes.equals(sizes2) + assert arr2.values.equals(arr1) + assert arr2.flatten().flatten().equals(arr0) + assert arr2.values.values.equals(arr0) + + # test out of order offsets + values = [1, 2, 3, 4] + offsets = [3, 2, 1, 0] + sizes = [1, 1, 1, 1] + array = list_array_type.from_arrays(offsets, sizes, values) + + assert array.flatten().to_pylist() == [4, 3, 2, 1] + + # test null elements backed by non-empty sublists + mask = pa.array([False, False, False, True]) + array = list_array_type.from_arrays(offsets, sizes, values, mask=mask) + + assert array.flatten().to_pylist() == [4, 3, 2] + assert array.values.to_pylist() == [1, 2, 3, 4] + + +@pytest.mark.parametrize('list_view_type', [pa.ListViewArray, pa.LargeListViewArray]) +def test_list_view_slice(list_view_type): + # sliced -> values keeps referring to full values buffer, but offsets is + # sliced as well so the offsets correctly point into the full values array + # sliced -> flatten() will return the sliced value array. + + array = list_view_type.from_arrays(offsets=[0, 3, 4], sizes=[ + 3, 1, 2], values=[1, 2, 3, 4, 5, 6]) + sliced_array = array[1:] + + assert sliced_array.values.to_pylist() == [1, 2, 3, 4, 5, 6] + assert sliced_array.offsets.to_pylist() == [3, 4] + assert sliced_array.flatten().to_pylist() == [4, 5, 6] + + i = sliced_array.offsets[0].as_py() + j = sliced_array.offsets[1].as_py() + + assert sliced_array[0].as_py() == sliced_array.values[i:j].to_pylist() == [4] diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_flight_async.py b/llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_flight_async.py new file mode 100644 index 0000000000000000000000000000000000000000..f3cd1bbb58e2fc43f8c35c32e3a53b3240a9e9ca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_flight_async.py @@ -0,0 +1,93 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import asyncio + +import pytest + +import pyarrow + +flight = pytest.importorskip("pyarrow.flight") +pytestmark = pytest.mark.flight + + +class ExampleServer(flight.FlightServerBase): + simple_info = flight.FlightInfo( + pyarrow.schema([("a", "int32")]), + flight.FlightDescriptor.for_command(b"simple"), + [], + -1, + -1, + ) + + def get_flight_info(self, context, descriptor): + if descriptor.command == b"simple": + return self.simple_info + elif descriptor.command == b"unknown": + raise NotImplementedError("Unknown command") + + raise NotImplementedError("Unknown descriptor") + + +def async_or_skip(client): + if not client.supports_async: + # Use async error message as skip message + with pytest.raises(NotImplementedError) as e: + client.as_async() + pytest.skip(str(e.value)) + + +@pytest.fixture(scope="module") +def flight_client(): + with ExampleServer() as server: + with flight.connect(f"grpc://localhost:{server.port}") as client: + yield client + + +@pytest.fixture(scope="module") +def async_client(flight_client): + async_or_skip(flight_client) + yield flight_client.as_async() + + +def test_async_support_property(flight_client): + assert isinstance(flight_client.supports_async, bool) + if flight_client.supports_async: + flight_client.as_async() + else: + with pytest.raises(NotImplementedError): + flight_client.as_async() + + +def test_get_flight_info(async_client): + async def _test(): + descriptor = flight.FlightDescriptor.for_command(b"simple") + info = await async_client.get_flight_info(descriptor) + assert info == ExampleServer.simple_info + + asyncio.run(_test()) + + +def test_get_flight_info_error(async_client): + async def _test(): + descriptor = flight.FlightDescriptor.for_command(b"unknown") + with pytest.raises(NotImplementedError) as excinfo: + await async_client.get_flight_info(descriptor) + + assert "Unknown command" in repr(excinfo.value) + + asyncio.run(_test()) diff --git a/llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_io.py b/llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_io.py new file mode 100644 index 0000000000000000000000000000000000000000..17eab871a25754c78f908373785cbce5843075ff --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_io.py @@ -0,0 +1,2121 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import bz2 +from contextlib import contextmanager +from io import (BytesIO, StringIO, TextIOWrapper, BufferedIOBase, IOBase) +import itertools +import gc +import gzip +import math +import os +import pathlib +import pytest +import sys +import tempfile +import weakref + +import numpy as np + +from pyarrow.util import guid +from pyarrow import Codec +import pyarrow as pa + + +def check_large_seeks(file_factory, expected_error=None): + if sys.platform in ('win32', 'darwin'): + pytest.skip("need sparse file support") + try: + filename = tempfile.mktemp(prefix='test_io') + with open(filename, 'wb') as f: + f.truncate(2 ** 32 + 10) + f.seek(2 ** 32 + 5) + f.write(b'mark\n') + if expected_error: + with expected_error: + file_factory(filename) + else: + with file_factory(filename) as f: + assert f.size() == 2 ** 32 + 10 + assert f.seek(2 ** 32 + 5) == 2 ** 32 + 5 + assert f.tell() == 2 ** 32 + 5 + assert f.read(5) == b'mark\n' + assert f.tell() == 2 ** 32 + 10 + finally: + os.unlink(filename) + + +@contextmanager +def assert_file_not_found(): + with pytest.raises(FileNotFoundError): + yield + + +# ---------------------------------------------------------------------- +# Python file-like objects + + +def test_python_file_write(): + buf = BytesIO() + + f = pa.PythonFile(buf) + + assert f.tell() == 0 + + s1 = b'enga\xc3\xb1ado' + s2 = b'foobar' + + f.write(s1) + assert f.tell() == len(s1) + + f.write(s2) + + expected = s1 + s2 + + result = buf.getvalue() + assert result == expected + + assert not f.closed + f.close() + assert f.closed + + with pytest.raises(TypeError, match="binary file expected"): + pa.PythonFile(StringIO()) + + +def test_python_file_read(): + data = b'some sample data' + + buf = BytesIO(data) + f = pa.PythonFile(buf, mode='r') + + assert f.size() == len(data) + + assert f.tell() == 0 + + assert f.read(4) == b'some' + assert f.tell() == 4 + + f.seek(0) + assert f.tell() == 0 + + f.seek(5) + assert f.tell() == 5 + + v = f.read(50) + assert v == b'sample data' + assert len(v) == 11 + + assert f.size() == len(data) + + assert not f.closed + f.close() + assert f.closed + + with pytest.raises(TypeError, match="binary file expected"): + pa.PythonFile(StringIO(), mode='r') + + +@pytest.mark.parametrize("nbytes", (-1, 0, 1, 5, 100)) +@pytest.mark.parametrize("file_offset", (-1, 0, 5, 100)) +def test_python_file_get_stream(nbytes, file_offset): + + data = b'data1data2data3data4data5' + + f = pa.PythonFile(BytesIO(data), mode='r') + + # negative nbytes or offsets don't make sense here, raise ValueError + if nbytes < 0 or file_offset < 0: + with pytest.raises(pa.ArrowInvalid, + match="should be a positive value"): + f.get_stream(file_offset=file_offset, nbytes=nbytes) + f.close() + return + else: + stream = f.get_stream(file_offset=file_offset, nbytes=nbytes) + + # Subsequent calls to 'read' should match behavior if same + # data passed to BytesIO where get_stream should handle if + # nbytes/file_offset results in no bytes b/c out of bounds. + start = min(file_offset, len(data)) + end = min(file_offset + nbytes, len(data)) + buf = BytesIO(data[start:end]) + + # read some chunks + assert stream.read(nbytes=4) == buf.read(4) + assert stream.read(nbytes=6) == buf.read(6) + + # Read to end of each stream + assert stream.read() == buf.read() + + # Try reading past the stream + n = len(data) * 2 + assert stream.read(n) == buf.read(n) + + # NativeFile[CInputStream] is not seekable + with pytest.raises(OSError, match="seekable"): + stream.seek(0) + + stream.close() + assert stream.closed + + +def test_python_file_read_at(): + data = b'some sample data' + + buf = BytesIO(data) + f = pa.PythonFile(buf, mode='r') + + # test simple read at + v = f.read_at(nbytes=5, offset=3) + assert v == b'e sam' + assert len(v) == 5 + + # test reading entire file when nbytes > len(file) + w = f.read_at(nbytes=50, offset=0) + assert w == data + assert len(w) == 16 + + +def test_python_file_readall(): + data = b'some sample data' + + buf = BytesIO(data) + with pa.PythonFile(buf, mode='r') as f: + assert f.readall() == data + + +def test_python_file_readinto(): + length = 10 + data = b'some sample data longer than 10' + dst_buf = bytearray(length) + src_buf = BytesIO(data) + + with pa.PythonFile(src_buf, mode='r') as f: + assert f.readinto(dst_buf) == 10 + + assert dst_buf[:length] == data[:length] + assert len(dst_buf) == length + + +def test_python_file_read_buffer(): + length = 10 + data = b'0123456798' + dst_buf = bytearray(data) + + class DuckReader: + def close(self): + pass + + @property + def closed(self): + return False + + def read_buffer(self, nbytes): + assert nbytes == length + return memoryview(dst_buf)[:nbytes] + + duck_reader = DuckReader() + with pa.PythonFile(duck_reader, mode='r') as f: + buf = f.read_buffer(length) + assert len(buf) == length + assert memoryview(buf).tobytes() == dst_buf[:length] + # buf should point to the same memory, so modifying it + memoryview(buf)[0] = ord(b'x') + # should modify the original + assert dst_buf[0] == ord(b'x') + + +def test_python_file_correct_abc(): + with pa.PythonFile(BytesIO(b''), mode='r') as f: + assert isinstance(f, BufferedIOBase) + assert isinstance(f, IOBase) + + +def test_python_file_iterable(): + data = b'''line1 + line2 + line3 + ''' + + buf = BytesIO(data) + buf2 = BytesIO(data) + + with pa.PythonFile(buf, mode='r') as f: + for read, expected in zip(f, buf2): + assert read == expected + + +def test_python_file_large_seeks(): + def factory(filename): + return pa.PythonFile(open(filename, 'rb')) + + check_large_seeks(factory) + + +def test_bytes_reader(): + # Like a BytesIO, but zero-copy underneath for C++ consumers + data = b'some sample data' + f = pa.BufferReader(data) + assert f.tell() == 0 + + assert f.size() == len(data) + + assert f.read(4) == b'some' + assert f.tell() == 4 + + f.seek(0) + assert f.tell() == 0 + + f.seek(0, 2) + assert f.tell() == len(data) + + f.seek(5) + assert f.tell() == 5 + + assert f.read(50) == b'sample data' + + assert not f.closed + f.close() + assert f.closed + + +def test_bytes_reader_non_bytes(): + with pytest.raises(TypeError): + pa.BufferReader('some sample data') + + +def test_bytes_reader_retains_parent_reference(): + import gc + + # ARROW-421 + def get_buffer(): + data = b'some sample data' * 1000 + reader = pa.BufferReader(data) + reader.seek(5) + return reader.read_buffer(6) + + buf = get_buffer() + gc.collect() + assert buf.to_pybytes() == b'sample' + assert buf.parent is not None + + +def test_python_file_implicit_mode(tmpdir): + path = os.path.join(str(tmpdir), 'foo.txt') + with open(path, 'wb') as f: + pf = pa.PythonFile(f) + assert pf.writable() + assert not pf.readable() + assert not pf.seekable() # PyOutputStream isn't seekable + f.write(b'foobar\n') + + with open(path, 'rb') as f: + pf = pa.PythonFile(f) + assert pf.readable() + assert not pf.writable() + assert pf.seekable() + assert pf.read() == b'foobar\n' + + bio = BytesIO() + pf = pa.PythonFile(bio) + assert pf.writable() + assert not pf.readable() + assert not pf.seekable() + pf.write(b'foobar\n') + assert bio.getvalue() == b'foobar\n' + + +def test_python_file_writelines(tmpdir): + lines = [b'line1\n', b'line2\n' b'line3'] + path = os.path.join(str(tmpdir), 'foo.txt') + with open(path, 'wb') as f: + try: + f = pa.PythonFile(f, mode='w') + assert f.writable() + f.writelines(lines) + finally: + f.close() + + with open(path, 'rb') as f: + try: + f = pa.PythonFile(f, mode='r') + assert f.readable() + assert f.read() == b''.join(lines) + finally: + f.close() + + +def test_python_file_closing(): + bio = BytesIO() + pf = pa.PythonFile(bio) + wr = weakref.ref(pf) + del pf + assert wr() is None # object was destroyed + assert not bio.closed + pf = pa.PythonFile(bio) + pf.close() + assert bio.closed + + +# ---------------------------------------------------------------------- +# Buffers + + +def check_buffer_pickling(buf, pickler): + # Check that buffer survives a pickle roundtrip + for protocol in range(0, pickler.HIGHEST_PROTOCOL + 1): + result = pickler.loads(pickler.dumps(buf, protocol=protocol)) + assert len(result) == len(buf) + assert memoryview(result) == memoryview(buf) + assert result.to_pybytes() == buf.to_pybytes() + assert result.is_mutable == buf.is_mutable + + +def test_buffer_bytes(pickle_module): + val = b'some data' + + buf = pa.py_buffer(val) + assert isinstance(buf, pa.Buffer) + assert not buf.is_mutable + assert buf.is_cpu + + result = buf.to_pybytes() + assert result == val + + check_buffer_pickling(buf, pickle_module) + + +def test_buffer_null_data(pickle_module): + null_buff = pa.foreign_buffer(address=0, size=0) + assert null_buff.to_pybytes() == b"" + assert null_buff.address == 0 + # ARROW-16048: we shouldn't expose a NULL address through the Python + # buffer protocol. + m = memoryview(null_buff) + assert m.tobytes() == b"" + assert pa.py_buffer(m).address != 0 + + check_buffer_pickling(null_buff, pickle_module) + + +def test_buffer_memoryview(pickle_module): + val = b'some data' + + buf = pa.py_buffer(val) + assert isinstance(buf, pa.Buffer) + assert not buf.is_mutable + assert buf.is_cpu + + result = memoryview(buf) + assert result == val + + check_buffer_pickling(buf, pickle_module) + + +def test_buffer_bytearray(pickle_module): + val = bytearray(b'some data') + + buf = pa.py_buffer(val) + assert isinstance(buf, pa.Buffer) + assert buf.is_mutable + assert buf.is_cpu + + result = bytearray(buf) + assert result == val + + check_buffer_pickling(buf, pickle_module) + + +def test_buffer_invalid(): + with pytest.raises(TypeError, + match="(bytes-like object|buffer interface)"): + pa.py_buffer(None) + + +def test_buffer_weakref(): + buf = pa.py_buffer(b'some data') + wr = weakref.ref(buf) + assert wr() is not None + del buf + assert wr() is None + + +@pytest.mark.parametrize('val, expected_hex_buffer', + [(b'check', b'636865636B'), + (b'\a0', b'0730'), + (b'', b'')]) +def test_buffer_hex(val, expected_hex_buffer): + buf = pa.py_buffer(val) + assert buf.hex() == expected_hex_buffer + + +def test_buffer_to_numpy(): + # Make sure creating a numpy array from an arrow buffer works + byte_array = bytearray(20) + byte_array[0] = 42 + buf = pa.py_buffer(byte_array) + array = np.frombuffer(buf, dtype="uint8") + assert array[0] == byte_array[0] + byte_array[0] += 1 + assert array[0] == byte_array[0] + assert array.base == buf + + +def test_buffer_from_numpy(): + # C-contiguous + arr = np.arange(12, dtype=np.int8).reshape((3, 4)) + buf = pa.py_buffer(arr) + assert buf.is_cpu + assert buf.is_mutable + assert buf.to_pybytes() == arr.tobytes() + # F-contiguous; note strides information is lost + buf = pa.py_buffer(arr.T) + assert buf.is_cpu + assert buf.is_mutable + assert buf.to_pybytes() == arr.tobytes() + # Non-contiguous + with pytest.raises(ValueError, match="not contiguous"): + buf = pa.py_buffer(arr.T[::2]) + + +def test_buffer_address(): + b1 = b'some data!' + b2 = bytearray(b1) + b3 = bytearray(b1) + + buf1 = pa.py_buffer(b1) + buf2 = pa.py_buffer(b1) + buf3 = pa.py_buffer(b2) + buf4 = pa.py_buffer(b3) + + assert buf1.address > 0 + assert buf1.address == buf2.address + assert buf3.address != buf2.address + assert buf4.address != buf3.address + + arr = np.arange(5) + buf = pa.py_buffer(arr) + assert buf.address == arr.ctypes.data + + +def test_buffer_equals(): + # Buffer.equals() returns true iff the buffers have the same contents + def eq(a, b): + assert a.equals(b) + assert a == b + assert not (a != b) + + def ne(a, b): + assert not a.equals(b) + assert not (a == b) + assert a != b + + b1 = b'some data!' + b2 = bytearray(b1) + b3 = bytearray(b1) + b3[0] = 42 + buf1 = pa.py_buffer(b1) + buf2 = pa.py_buffer(b2) + buf3 = pa.py_buffer(b2) + buf4 = pa.py_buffer(b3) + buf5 = pa.py_buffer(np.frombuffer(b2, dtype=np.int16)) + eq(buf1, buf1) + eq(buf1, buf2) + eq(buf2, buf3) + ne(buf2, buf4) + # Data type is indifferent + eq(buf2, buf5) + + +def test_buffer_eq_bytes(): + buf = pa.py_buffer(b'some data') + assert buf == b'some data' + assert buf == bytearray(b'some data') + assert buf != b'some dat1' + + with pytest.raises(TypeError): + buf == 'some data' + + +def test_buffer_getitem(): + data = bytearray(b'some data!') + buf = pa.py_buffer(data) + + n = len(data) + for ix in range(-n, n - 1): + assert buf[ix] == data[ix] + + with pytest.raises(IndexError): + buf[n] + + with pytest.raises(IndexError): + buf[-n - 1] + + +def test_buffer_slicing(): + data = b'some data!' + buf = pa.py_buffer(data) + + sliced = buf.slice(2) + expected = pa.py_buffer(b'me data!') + assert sliced.equals(expected) + + sliced2 = buf.slice(2, 4) + expected2 = pa.py_buffer(b'me d') + assert sliced2.equals(expected2) + + # 0 offset + assert buf.slice(0).equals(buf) + + # Slice past end of buffer + assert len(buf.slice(len(buf))) == 0 + + with pytest.raises(IndexError): + buf.slice(-1) + + with pytest.raises(IndexError): + buf.slice(len(buf) + 1) + assert buf[11:].to_pybytes() == b"" + + # Slice stop exceeds buffer length + with pytest.raises(IndexError): + buf.slice(1, len(buf)) + assert buf[1:11].to_pybytes() == buf.to_pybytes()[1:] + + # Negative length + with pytest.raises(IndexError): + buf.slice(1, -1) + + # Test slice notation + assert buf[2:].equals(buf.slice(2)) + assert buf[2:5].equals(buf.slice(2, 3)) + assert buf[-5:].equals(buf.slice(len(buf) - 5)) + assert buf[-5:-2].equals(buf.slice(len(buf) - 5, 3)) + + with pytest.raises(IndexError): + buf[::-1] + with pytest.raises(IndexError): + buf[::2] + + n = len(buf) + for start in range(-n * 2, n * 2): + for stop in range(-n * 2, n * 2): + assert buf[start:stop].to_pybytes() == buf.to_pybytes()[start:stop] + + +def test_buffer_hashing(): + # Buffers are unhashable + with pytest.raises(TypeError, match="unhashable"): + hash(pa.py_buffer(b'123')) + + +def test_buffer_protocol_respects_immutability(): + # ARROW-3228; NumPy's frombuffer ctor determines whether a buffer-like + # object is mutable by first attempting to get a mutable buffer using + # PyObject_FromBuffer. If that fails, it assumes that the object is + # immutable + a = b'12345' + arrow_ref = pa.py_buffer(a) + numpy_ref = np.frombuffer(arrow_ref, dtype=np.uint8) + assert not numpy_ref.flags.writeable + + +def test_foreign_buffer(): + obj = np.array([1, 2], dtype=np.int32) + addr = obj.__array_interface__["data"][0] + size = obj.nbytes + buf = pa.foreign_buffer(addr, size, obj) + wr = weakref.ref(obj) + del obj + assert np.frombuffer(buf, dtype=np.int32).tolist() == [1, 2] + assert wr() is not None + del buf + assert wr() is None + + +def test_allocate_buffer(): + buf = pa.allocate_buffer(100) + assert buf.size == 100 + assert buf.is_mutable + assert buf.parent is None + + bit = b'abcde' + writer = pa.FixedSizeBufferWriter(buf) + writer.write(bit) + + assert buf.to_pybytes()[:5] == bit + + +def test_allocate_buffer_resizable(): + buf = pa.allocate_buffer(100, resizable=True) + assert isinstance(buf, pa.ResizableBuffer) + + buf.resize(200) + assert buf.size == 200 + + +def test_cache_options(): + opts1 = pa.CacheOptions() + opts2 = pa.CacheOptions(hole_size_limit=1024) + opts3 = pa.CacheOptions(hole_size_limit=4096, range_size_limit=8192) + opts4 = pa.CacheOptions(hole_size_limit=4096, + range_size_limit=8192, prefetch_limit=5) + opts5 = pa.CacheOptions(hole_size_limit=4096, + range_size_limit=8192, lazy=False) + opts6 = pa.CacheOptions.from_network_metrics(time_to_first_byte_millis=100, + transfer_bandwidth_mib_per_sec=200, + ideal_bandwidth_utilization_frac=0.9, + max_ideal_request_size_mib=64) + + assert opts1.hole_size_limit == 8192 + assert opts1.range_size_limit == 32 * 1024 * 1024 + assert opts1.lazy is True + assert opts1.prefetch_limit == 0 + + assert opts2.hole_size_limit == 1024 + assert opts2.range_size_limit == 32 * 1024 * 1024 + assert opts2.lazy is True + assert opts2.prefetch_limit == 0 + + assert opts3.hole_size_limit == 4096 + assert opts3.range_size_limit == 8192 + assert opts3.lazy is True + assert opts3.prefetch_limit == 0 + + assert opts4.hole_size_limit == 4096 + assert opts4.range_size_limit == 8192 + assert opts4.lazy is True + assert opts4.prefetch_limit == 5 + + assert opts5.hole_size_limit == 4096 + assert opts5.range_size_limit == 8192 + assert opts5.lazy is False + assert opts5.prefetch_limit == 0 + + assert opts6.lazy is False + + assert opts1 == opts1 + assert opts1 != opts2 + assert opts2 != opts3 + assert opts3 != opts4 + assert opts4 != opts5 + assert opts6 != opts1 + + +def test_cache_options_pickling(pickle_module): + options = [ + pa.CacheOptions(), + pa.CacheOptions(hole_size_limit=4096, range_size_limit=8192, + lazy=True, prefetch_limit=5), + ] + + for option in options: + assert pickle_module.loads(pickle_module.dumps(option)) == option + + +@pytest.mark.parametrize("compression", [ + pytest.param( + "bz2", marks=pytest.mark.xfail(raises=pa.lib.ArrowNotImplementedError) + ), + "brotli", + "gzip", + "lz4", + "zstd", + "snappy" +]) +def test_compress_decompress(compression): + if not Codec.is_available(compression): + pytest.skip("{} support is not built".format(compression)) + + INPUT_SIZE = 10000 + test_data = (np.random.randint(0, 255, size=INPUT_SIZE) + .astype(np.uint8) + .tobytes()) + test_buf = pa.py_buffer(test_data) + + compressed_buf = pa.compress(test_buf, codec=compression) + compressed_bytes = pa.compress(test_data, codec=compression, + asbytes=True) + + assert isinstance(compressed_bytes, bytes) + + decompressed_buf = pa.decompress(compressed_buf, INPUT_SIZE, + codec=compression) + decompressed_bytes = pa.decompress(compressed_bytes, INPUT_SIZE, + codec=compression, asbytes=True) + + assert isinstance(decompressed_bytes, bytes) + + assert decompressed_buf.equals(test_buf) + assert decompressed_bytes == test_data + + with pytest.raises(ValueError): + pa.decompress(compressed_bytes, codec=compression) + + +@pytest.mark.parametrize("compression", [ + pytest.param( + "bz2", marks=pytest.mark.xfail(raises=pa.lib.ArrowNotImplementedError) + ), + "brotli", + "gzip", + "lz4", + "zstd", + "snappy" +]) +def test_compression_level(compression): + if not Codec.is_available(compression): + pytest.skip("{} support is not built".format(compression)) + + codec = Codec(compression) + if codec.name == "snappy": + assert codec.compression_level is None + else: + assert isinstance(codec.compression_level, int) + + # These codecs do not support a compression level + no_level = ['snappy'] + if compression in no_level: + assert not Codec.supports_compression_level(compression) + with pytest.raises(ValueError): + Codec(compression, 0) + with pytest.raises(ValueError): + Codec.minimum_compression_level(compression) + with pytest.raises(ValueError): + Codec.maximum_compression_level(compression) + with pytest.raises(ValueError): + Codec.default_compression_level(compression) + return + + INPUT_SIZE = 10000 + test_data = (np.random.randint(0, 255, size=INPUT_SIZE) + .astype(np.uint8) + .tobytes()) + test_buf = pa.py_buffer(test_data) + + min_level = Codec.minimum_compression_level(compression) + max_level = Codec.maximum_compression_level(compression) + default_level = Codec.default_compression_level(compression) + + assert min_level < max_level + assert default_level >= min_level + assert default_level <= max_level + + for compression_level in range(min_level, max_level+1): + codec = Codec(compression, compression_level) + compressed_buf = codec.compress(test_buf) + compressed_bytes = codec.compress(test_data, asbytes=True) + assert isinstance(compressed_bytes, bytes) + decompressed_buf = codec.decompress(compressed_buf, INPUT_SIZE) + decompressed_bytes = codec.decompress(compressed_bytes, INPUT_SIZE, + asbytes=True) + + assert isinstance(decompressed_bytes, bytes) + + assert decompressed_buf.equals(test_buf) + assert decompressed_bytes == test_data + + with pytest.raises(ValueError): + codec.decompress(compressed_bytes) + + # The ability to set a seed this way is not present on older versions of + # numpy (currently in our python 3.6 CI build). Some inputs might just + # happen to compress the same between the two levels so using seeded + # random numbers is necessary to help get more reliable results + # + # The goal of this part is to ensure the compression_level is being + # passed down to the C++ layer, not to verify the compression algs + # themselves + if not hasattr(np.random, 'default_rng'): + pytest.skip('Requires newer version of numpy') + rng = np.random.default_rng(seed=42) + values = rng.integers(0, 100, 1000) + arr = pa.array(values) + hard_to_compress_buffer = arr.buffers()[1] + + weak_codec = Codec(compression, min_level) + weakly_compressed_buf = weak_codec.compress(hard_to_compress_buffer) + + strong_codec = Codec(compression, max_level) + strongly_compressed_buf = strong_codec.compress(hard_to_compress_buffer) + + assert len(weakly_compressed_buf) > len(strongly_compressed_buf) + + +def test_buffer_memoryview_is_immutable(): + val = b'some data' + + buf = pa.py_buffer(val) + assert not buf.is_mutable + assert isinstance(buf, pa.Buffer) + + result = memoryview(buf) + assert result.readonly + + with pytest.raises(TypeError) as exc: + result[0] = b'h' + assert 'cannot modify read-only' in str(exc.value) + + b = bytes(buf) + with pytest.raises(TypeError) as exc: + b[0] = b'h' + assert 'cannot modify read-only' in str(exc.value) + + +def test_uninitialized_buffer(): + # ARROW-2039: calling Buffer() directly creates an uninitialized object + # ARROW-2638: prevent calling extension class constructors directly + with pytest.raises(TypeError): + pa.Buffer() + + +def test_memory_output_stream(): + # 10 bytes + val = b'dataabcdef' + f = pa.BufferOutputStream() + + K = 1000 + for i in range(K): + f.write(val) + + buf = f.getvalue() + assert len(buf) == len(val) * K + assert buf.to_pybytes() == val * K + + +def test_inmemory_write_after_closed(): + f = pa.BufferOutputStream() + f.write(b'ok') + assert not f.closed + f.getvalue() + assert f.closed + + with pytest.raises(ValueError): + f.write(b'not ok') + + +def test_buffer_protocol_ref_counting(): + def make_buffer(bytes_obj): + return bytearray(pa.py_buffer(bytes_obj)) + + buf = make_buffer(b'foo') + gc.collect() + assert buf == b'foo' + + # ARROW-1053 + val = b'foo' + refcount_before = sys.getrefcount(val) + for i in range(10): + make_buffer(val) + gc.collect() + assert refcount_before == sys.getrefcount(val) + + +def test_nativefile_write_memoryview(): + f = pa.BufferOutputStream() + data = b'ok' + + arr = np.frombuffer(data, dtype='S1') + + f.write(arr) + f.write(bytearray(data)) + f.write(pa.py_buffer(data)) + with pytest.raises(TypeError): + f.write(data.decode('utf8')) + + buf = f.getvalue() + + assert buf.to_pybytes() == data * 3 + + +# ---------------------------------------------------------------------- +# Mock output stream + + +def test_mock_output_stream(): + # Make sure that the MockOutputStream and the BufferOutputStream record the + # same size + + # 10 bytes + val = b'dataabcdef' + + f1 = pa.MockOutputStream() + f2 = pa.BufferOutputStream() + + K = 1000 + for i in range(K): + f1.write(val) + f2.write(val) + + assert f1.size() == len(f2.getvalue()) + + # Do the same test with a table + record_batch = pa.RecordBatch.from_arrays([pa.array([1, 2, 3])], ['a']) + + f1 = pa.MockOutputStream() + f2 = pa.BufferOutputStream() + + stream_writer1 = pa.RecordBatchStreamWriter(f1, record_batch.schema) + stream_writer2 = pa.RecordBatchStreamWriter(f2, record_batch.schema) + + stream_writer1.write_batch(record_batch) + stream_writer2.write_batch(record_batch) + stream_writer1.close() + stream_writer2.close() + + assert f1.size() == len(f2.getvalue()) + + +# ---------------------------------------------------------------------- +# OS files and memory maps + + +@pytest.fixture +def sample_disk_data(request, tmpdir): + SIZE = 4096 + arr = np.random.randint(0, 256, size=SIZE).astype('u1') + data = arr.tobytes()[:SIZE] + + path = os.path.join(str(tmpdir), guid()) + + with open(path, 'wb') as f: + f.write(data) + + def teardown(): + _try_delete(path) + + request.addfinalizer(teardown) + return path, data + + +def _check_native_file_reader(FACTORY, sample_data, + allow_read_out_of_bounds=True): + path, data = sample_data + + f = FACTORY(path, mode='r') + + assert f.read(10) == data[:10] + assert f.read(0) == b'' + assert f.tell() == 10 + + assert f.read() == data[10:] + + assert f.size() == len(data) + + f.seek(0) + assert f.tell() == 0 + + # Seeking past end of file not supported in memory maps + if allow_read_out_of_bounds: + f.seek(len(data) + 1) + assert f.tell() == len(data) + 1 + assert f.read(5) == b'' + + # Test whence argument of seek, ARROW-1287 + assert f.seek(3) == 3 + assert f.seek(3, os.SEEK_CUR) == 6 + assert f.tell() == 6 + + ex_length = len(data) - 2 + assert f.seek(-2, os.SEEK_END) == ex_length + assert f.tell() == ex_length + + +def test_memory_map_reader(sample_disk_data): + _check_native_file_reader(pa.memory_map, sample_disk_data, + allow_read_out_of_bounds=False) + + +def test_memory_map_retain_buffer_reference(sample_disk_data): + path, data = sample_disk_data + + cases = [] + with pa.memory_map(path, 'rb') as f: + cases.append((f.read_buffer(100), data[:100])) + cases.append((f.read_buffer(100), data[100:200])) + cases.append((f.read_buffer(100), data[200:300])) + + # Call gc.collect() for good measure + gc.collect() + + for buf, expected in cases: + assert buf.to_pybytes() == expected + + +def test_os_file_reader(sample_disk_data): + _check_native_file_reader(pa.OSFile, sample_disk_data) + + +def test_os_file_large_seeks(): + check_large_seeks(pa.OSFile) + + +def _try_delete(path): + try: + os.remove(path) + except os.error: + pass + + +def test_memory_map_writer(tmpdir): + SIZE = 4096 + arr = np.random.randint(0, 256, size=SIZE).astype('u1') + data = arr.tobytes()[:SIZE] + + path = os.path.join(str(tmpdir), guid()) + with open(path, 'wb') as f: + f.write(data) + + f = pa.memory_map(path, mode='r+b') + + f.seek(10) + f.write(b'peekaboo') + assert f.tell() == 18 + + f.seek(10) + assert f.read(8) == b'peekaboo' + + f2 = pa.memory_map(path, mode='r+b') + + f2.seek(10) + f2.write(b'booapeak') + f2.seek(10) + + f.seek(10) + assert f.read(8) == b'booapeak' + + # Does not truncate file + f3 = pa.memory_map(path, mode='w') + f3.write(b'foo') + + with pa.memory_map(path) as f4: + assert f4.size() == SIZE + + with pytest.raises(IOError): + f3.read(5) + + f.seek(0) + assert f.read(3) == b'foo' + + +def test_memory_map_resize(tmpdir): + SIZE = 4096 + arr = np.random.randint(0, 256, size=SIZE).astype(np.uint8) + data1 = arr.tobytes()[:(SIZE // 2)] + data2 = arr.tobytes()[(SIZE // 2):] + + path = os.path.join(str(tmpdir), guid()) + + mmap = pa.create_memory_map(path, SIZE / 2) + mmap.write(data1) + + mmap.resize(SIZE) + mmap.write(data2) + + mmap.close() + + with open(path, 'rb') as f: + assert f.read() == arr.tobytes() + + +def test_memory_zero_length(tmpdir): + path = os.path.join(str(tmpdir), guid()) + f = open(path, 'wb') + f.close() + with pa.memory_map(path, mode='r+b') as memory_map: + assert memory_map.size() == 0 + + +def test_memory_map_large_seeks(): + if sys.maxsize >= 2**32: + expected_error = None + else: + expected_error = pytest.raises( + pa.ArrowCapacityError, + match="Requested memory map length 4294967306 " + "does not fit in a C size_t") + check_large_seeks(pa.memory_map, expected_error=expected_error) + + +def test_memory_map_close_remove(tmpdir): + # ARROW-6740: should be able to delete closed memory-mapped file (Windows) + path = os.path.join(str(tmpdir), guid()) + mmap = pa.create_memory_map(path, 4096) + mmap.close() + assert mmap.closed + os.remove(path) # Shouldn't fail + + +def test_memory_map_deref_remove(tmpdir): + path = os.path.join(str(tmpdir), guid()) + pa.create_memory_map(path, 4096) + os.remove(path) # Shouldn't fail + + +def test_os_file_writer(tmpdir): + SIZE = 4096 + arr = np.random.randint(0, 256, size=SIZE).astype('u1') + data = arr.tobytes()[:SIZE] + + path = os.path.join(str(tmpdir), guid()) + with open(path, 'wb') as f: + f.write(data) + + # Truncates file + f2 = pa.OSFile(path, mode='w') + f2.write(b'foo') + + with pa.OSFile(path) as f3: + assert f3.size() == 3 + + with pytest.raises(IOError): + f2.read(5) + f2.close() + + # Append + with pa.OSFile(path, mode='ab') as f4: + f4.write(b'bar') + with pa.OSFile(path) as f5: + assert f5.size() == 6 # foo + bar + + +def test_native_file_write_reject_unicode(): + # ARROW-3227 + nf = pa.BufferOutputStream() + with pytest.raises(TypeError): + nf.write('foo') + + +def test_native_file_modes(tmpdir): + path = os.path.join(str(tmpdir), guid()) + with open(path, 'wb') as f: + f.write(b'foooo') + + with pa.OSFile(path, mode='r') as f: + assert f.mode == 'rb' + assert f.readable() + assert not f.writable() + assert f.seekable() + + with pa.OSFile(path, mode='rb') as f: + assert f.mode == 'rb' + assert f.readable() + assert not f.writable() + assert f.seekable() + + with pa.OSFile(path, mode='w') as f: + assert f.mode == 'wb' + assert not f.readable() + assert f.writable() + assert not f.seekable() + + with pa.OSFile(path, mode='wb') as f: + assert f.mode == 'wb' + assert not f.readable() + assert f.writable() + assert not f.seekable() + + with pa.OSFile(path, mode='ab') as f: + assert f.mode == 'ab' + assert not f.readable() + assert f.writable() + assert not f.seekable() + + with pa.OSFile(path, mode='a') as f: + assert f.mode == 'ab' + assert not f.readable() + assert f.writable() + assert not f.seekable() + + with open(path, 'wb') as f: + f.write(b'foooo') + + with pa.memory_map(path, 'r') as f: + assert f.mode == 'rb' + assert f.readable() + assert not f.writable() + assert f.seekable() + + with pa.memory_map(path, 'r+') as f: + assert f.mode == 'rb+' + assert f.readable() + assert f.writable() + assert f.seekable() + + with pa.memory_map(path, 'r+b') as f: + assert f.mode == 'rb+' + assert f.readable() + assert f.writable() + assert f.seekable() + + +def test_native_file_permissions(tmpdir): + # ARROW-10124: permissions of created files should follow umask + cur_umask = os.umask(0o002) + os.umask(cur_umask) + + path = os.path.join(str(tmpdir), guid()) + with pa.OSFile(path, mode='w'): + pass + assert os.stat(path).st_mode & 0o777 == 0o666 & ~cur_umask + + path = os.path.join(str(tmpdir), guid()) + with pa.memory_map(path, 'w'): + pass + assert os.stat(path).st_mode & 0o777 == 0o666 & ~cur_umask + + +def test_native_file_raises_ValueError_after_close(tmpdir): + path = os.path.join(str(tmpdir), guid()) + with open(path, 'wb') as f: + f.write(b'foooo') + + with pa.OSFile(path, mode='rb') as os_file: + assert not os_file.closed + assert os_file.closed + + with pa.memory_map(path, mode='rb') as mmap_file: + assert not mmap_file.closed + assert mmap_file.closed + + files = [os_file, + mmap_file] + + methods = [('tell', ()), + ('seek', (0,)), + ('size', ()), + ('flush', ()), + ('readable', ()), + ('writable', ()), + ('seekable', ())] + + for f in files: + for method, args in methods: + with pytest.raises(ValueError): + getattr(f, method)(*args) + + +def test_native_file_TextIOWrapper(tmpdir): + data = ('foooo\n' + 'barrr\n' + 'bazzz\n') + + path = os.path.join(str(tmpdir), guid()) + with open(path, 'wb') as f: + f.write(data.encode('utf-8')) + + with TextIOWrapper(pa.OSFile(path, mode='rb')) as fil: + assert fil.readable() + res = fil.read() + assert res == data + assert fil.closed + + with TextIOWrapper(pa.OSFile(path, mode='rb')) as fil: + # Iteration works + lines = list(fil) + assert ''.join(lines) == data + + # Writing + path2 = os.path.join(str(tmpdir), guid()) + with TextIOWrapper(pa.OSFile(path2, mode='wb')) as fil: + assert fil.writable() + fil.write(data) + + with TextIOWrapper(pa.OSFile(path2, mode='rb')) as fil: + res = fil.read() + assert res == data + + +def test_native_file_TextIOWrapper_perf(tmpdir): + # ARROW-16272: TextIOWrapper.readline() shouldn't exhaust a large + # Arrow input stream. + data = b'foo\nquux\n' + path = str(tmpdir / 'largefile.txt') + with open(path, 'wb') as f: + f.write(data * 100_000) + + binary_file = pa.OSFile(path, mode='rb') + with TextIOWrapper(binary_file) as f: + assert binary_file.tell() == 0 + nbytes = 20_000 + lines = f.readlines(nbytes) + assert len(lines) == math.ceil(2 * nbytes / len(data)) + assert nbytes <= binary_file.tell() <= nbytes * 2 + + +def test_native_file_read1(tmpdir): + # ARROW-16272: read1() should not exhaust the input stream if there + # is a large amount of data remaining. + data = b'123\n' * 1_000_000 + path = str(tmpdir / 'largefile.txt') + with open(path, 'wb') as f: + f.write(data) + + chunks = [] + with pa.OSFile(path, mode='rb') as f: + while True: + b = f.read1() + assert len(b) < len(data) + chunks.append(b) + b = f.read1(30_000) + assert len(b) <= 30_000 + chunks.append(b) + if not b: + break + + assert b"".join(chunks) == data + + +@pytest.mark.pandas +def test_native_file_pandas_text_reader(tmpdir): + # ARROW-16272: Pandas' read_csv() should not exhaust an Arrow + # input stream when a small nrows is passed. + import pandas as pd + import pandas.testing as tm + data = b'a,b\n' * 10_000_000 + path = str(tmpdir / 'largefile.txt') + with open(path, 'wb') as f: + f.write(data) + + with pa.OSFile(path, mode='rb') as f: + df = pd.read_csv(f, nrows=10) + expected = pd.DataFrame({'a': ['a'] * 10, 'b': ['b'] * 10}) + tm.assert_frame_equal(df, expected) + # Some readahead occurred, but not up to the end of file + assert f.tell() <= 256 * 1024 + + +def test_native_file_open_error(): + with assert_file_not_found(): + pa.OSFile('non_existent_file', 'rb') + with assert_file_not_found(): + pa.memory_map('non_existent_file', 'rb') + + +# ---------------------------------------------------------------------- +# Buffered streams + +def test_buffered_input_stream(): + raw = pa.BufferReader(b"123456789") + f = pa.BufferedInputStream(raw, buffer_size=4) + assert f.read(2) == b"12" + assert raw.tell() == 4 + f.close() + assert f.closed + assert raw.closed + + +def test_buffered_input_stream_detach_seekable(): + # detach() to a seekable file (io::RandomAccessFile in C++) + f = pa.BufferedInputStream(pa.BufferReader(b"123456789"), buffer_size=4) + assert f.read(2) == b"12" + raw = f.detach() + assert f.closed + assert not raw.closed + assert raw.seekable() + assert raw.read(4) == b"5678" + raw.seek(2) + assert raw.read(4) == b"3456" + + +def test_buffered_input_stream_detach_non_seekable(): + # detach() to a non-seekable file (io::InputStream in C++) + f = pa.BufferedInputStream( + pa.BufferedInputStream(pa.BufferReader(b"123456789"), buffer_size=4), + buffer_size=4) + assert f.read(2) == b"12" + raw = f.detach() + assert f.closed + assert not raw.closed + assert not raw.seekable() + assert raw.read(4) == b"5678" + with pytest.raises(EnvironmentError): + raw.seek(2) + + +def test_buffered_output_stream(): + np_buf = np.zeros(100, dtype=np.int8) # zero-initialized buffer + buf = pa.py_buffer(np_buf) + + raw = pa.FixedSizeBufferWriter(buf) + f = pa.BufferedOutputStream(raw, buffer_size=4) + f.write(b"12") + assert np_buf[:4].tobytes() == b'\0\0\0\0' + f.flush() + assert np_buf[:4].tobytes() == b'12\0\0' + f.write(b"3456789") + f.close() + assert f.closed + assert raw.closed + assert np_buf[:10].tobytes() == b'123456789\0' + + +def test_buffered_output_stream_detach(): + np_buf = np.zeros(100, dtype=np.int8) # zero-initialized buffer + buf = pa.py_buffer(np_buf) + + f = pa.BufferedOutputStream(pa.FixedSizeBufferWriter(buf), buffer_size=4) + f.write(b"12") + assert np_buf[:4].tobytes() == b'\0\0\0\0' + raw = f.detach() + assert f.closed + assert not raw.closed + assert np_buf[:4].tobytes() == b'12\0\0' + + +# ---------------------------------------------------------------------- +# Compressed input and output streams + +def check_compressed_input(data, fn, compression): + raw = pa.OSFile(fn, mode="rb") + with pa.CompressedInputStream(raw, compression) as compressed: + assert not compressed.closed + assert compressed.readable() + assert not compressed.writable() + assert not compressed.seekable() + got = compressed.read() + assert got == data + assert compressed.closed + assert raw.closed + + # Same with read_buffer() + raw = pa.OSFile(fn, mode="rb") + with pa.CompressedInputStream(raw, compression) as compressed: + buf = compressed.read_buffer() + assert isinstance(buf, pa.Buffer) + assert buf.to_pybytes() == data + + +@pytest.mark.gzip +def test_compressed_input_gzip(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + fn = str(tmpdir / "compressed_input_test.gz") + with gzip.open(fn, "wb") as f: + f.write(data) + check_compressed_input(data, fn, "gzip") + + +def test_compressed_input_bz2(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + fn = str(tmpdir / "compressed_input_test.bz2") + with bz2.BZ2File(fn, "w") as f: + f.write(data) + try: + check_compressed_input(data, fn, "bz2") + except NotImplementedError as e: + pytest.skip(str(e)) + + +@pytest.mark.gzip +def test_compressed_input_openfile(tmpdir): + if not Codec.is_available("gzip"): + pytest.skip("gzip support is not built") + + data = b"some test data\n" * 10 + b"eof\n" + fn = str(tmpdir / "test_compressed_input_openfile.gz") + with gzip.open(fn, "wb") as f: + f.write(data) + + with pa.CompressedInputStream(fn, "gzip") as compressed: + buf = compressed.read_buffer() + assert buf.to_pybytes() == data + assert compressed.closed + + with pa.CompressedInputStream(pathlib.Path(fn), "gzip") as compressed: + buf = compressed.read_buffer() + assert buf.to_pybytes() == data + assert compressed.closed + + f = open(fn, "rb") + with pa.CompressedInputStream(f, "gzip") as compressed: + buf = compressed.read_buffer() + assert buf.to_pybytes() == data + assert f.closed + + +def check_compressed_concatenated(data, fn, compression): + raw = pa.OSFile(fn, mode="rb") + with pa.CompressedInputStream(raw, compression) as compressed: + got = compressed.read() + assert got == data + + +@pytest.mark.gzip +def test_compressed_concatenated_gzip(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + fn = str(tmpdir / "compressed_input_test2.gz") + with gzip.open(fn, "wb") as f: + f.write(data[:50]) + with gzip.open(fn, "ab") as f: + f.write(data[50:]) + check_compressed_concatenated(data, fn, "gzip") + + +@pytest.mark.gzip +def test_compressed_input_invalid(): + data = b"foo" * 10 + raw = pa.BufferReader(data) + with pytest.raises(ValueError): + pa.CompressedInputStream(raw, "unknown_compression") + with pytest.raises(TypeError): + pa.CompressedInputStream(raw, None) + + with pa.CompressedInputStream(raw, "gzip") as compressed: + with pytest.raises(IOError, match="zlib inflate failed"): + compressed.read() + + +def make_compressed_output(data, fn, compression): + raw = pa.BufferOutputStream() + with pa.CompressedOutputStream(raw, compression) as compressed: + assert not compressed.closed + assert not compressed.readable() + assert compressed.writable() + assert not compressed.seekable() + compressed.write(data) + assert compressed.closed + assert raw.closed + with open(fn, "wb") as f: + f.write(raw.getvalue()) + + +@pytest.mark.gzip +def test_compressed_output_gzip(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + fn = str(tmpdir / "compressed_output_test.gz") + make_compressed_output(data, fn, "gzip") + with gzip.open(fn, "rb") as f: + got = f.read() + assert got == data + + +def test_compressed_output_bz2(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + fn = str(tmpdir / "compressed_output_test.bz2") + try: + make_compressed_output(data, fn, "bz2") + except NotImplementedError as e: + pytest.skip(str(e)) + with bz2.BZ2File(fn, "r") as f: + got = f.read() + assert got == data + + +def test_output_stream_constructor(tmpdir): + if not Codec.is_available("gzip"): + pytest.skip("gzip support is not built") + with pa.CompressedOutputStream(tmpdir / "ctor.gz", "gzip") as stream: + stream.write(b"test") + with (tmpdir / "ctor2.gz").open("wb") as f: + with pa.CompressedOutputStream(f, "gzip") as stream: + stream.write(b"test") + + +@pytest.mark.parametrize(("path", "expected_compression"), [ + ("file.bz2", "bz2"), + ("file.lz4", "lz4"), + (pathlib.Path("file.gz"), "gzip"), + (pathlib.Path("path/to/file.zst"), "zstd"), +]) +def test_compression_detection(path, expected_compression): + if not Codec.is_available(expected_compression): + with pytest.raises(pa.lib.ArrowNotImplementedError): + Codec.detect(path) + else: + codec = Codec.detect(path) + assert isinstance(codec, Codec) + assert codec.name == expected_compression + + +def test_unknown_compression_raises(): + with pytest.raises(ValueError): + Codec.is_available('unknown') + with pytest.raises(TypeError): + Codec(None) + with pytest.raises(ValueError): + Codec('unknown') + + +@pytest.mark.parametrize("compression", [ + "bz2", + "brotli", + "gzip", + "lz4", + "zstd", + pytest.param( + "snappy", + marks=pytest.mark.xfail(raises=pa.lib.ArrowNotImplementedError) + ) +]) +def test_compressed_roundtrip(compression): + if not Codec.is_available(compression): + pytest.skip("{} support is not built".format(compression)) + + data = b"some test data\n" * 10 + b"eof\n" + raw = pa.BufferOutputStream() + with pa.CompressedOutputStream(raw, compression) as compressed: + compressed.write(data) + + cdata = raw.getvalue() + assert len(cdata) < len(data) + raw = pa.BufferReader(cdata) + with pa.CompressedInputStream(raw, compression) as compressed: + got = compressed.read() + assert got == data + + +@pytest.mark.parametrize( + "compression", + ["bz2", "brotli", "gzip", "lz4", "zstd"] +) +def test_compressed_recordbatch_stream(compression): + if not Codec.is_available(compression): + pytest.skip("{} support is not built".format(compression)) + + # ARROW-4836: roundtrip a RecordBatch through a compressed stream + table = pa.Table.from_arrays([pa.array([1, 2, 3, 4, 5])], ['a']) + raw = pa.BufferOutputStream() + stream = pa.CompressedOutputStream(raw, compression) + writer = pa.RecordBatchStreamWriter(stream, table.schema) + writer.write_table(table, max_chunksize=3) + writer.close() + stream.close() # Flush data + buf = raw.getvalue() + stream = pa.CompressedInputStream(pa.BufferReader(buf), compression) + got_table = pa.RecordBatchStreamReader(stream).read_all() + assert got_table == table + + +# ---------------------------------------------------------------------- +# Transform input streams + +unicode_transcoding_example = ( + "Dès Noël où un zéphyr haï me vêt de glaçons würmiens " + "je dîne d’exquis rôtis de bœuf au kir à l’aÿ d’âge mûr & cætera !" +) + + +def check_transcoding(data, src_encoding, dest_encoding, chunk_sizes): + chunk_sizes = iter(chunk_sizes) + stream = pa.transcoding_input_stream( + pa.BufferReader(data.encode(src_encoding)), + src_encoding, dest_encoding) + out = [] + while True: + buf = stream.read(next(chunk_sizes)) + out.append(buf) + if not buf: + break + out = b''.join(out) + assert out.decode(dest_encoding) == data + + +@pytest.mark.parametrize('src_encoding, dest_encoding', + [('utf-8', 'utf-16'), + ('utf-16', 'utf-8'), + ('utf-8', 'utf-32-le'), + ('utf-8', 'utf-32-be'), + ]) +def test_transcoding_input_stream(src_encoding, dest_encoding): + # All at once + check_transcoding(unicode_transcoding_example, + src_encoding, dest_encoding, [1000, 0]) + # Incremental + check_transcoding(unicode_transcoding_example, + src_encoding, dest_encoding, + itertools.cycle([1, 2, 3, 5])) + + +@pytest.mark.parametrize('src_encoding, dest_encoding', + [('utf-8', 'utf-8'), + ('utf-8', 'UTF8')]) +def test_transcoding_no_ops(src_encoding, dest_encoding): + # No indirection is wasted when a trivial transcoding is requested + stream = pa.BufferReader(b"abc123") + assert pa.transcoding_input_stream( + stream, src_encoding, dest_encoding) is stream + + +@pytest.mark.parametrize('src_encoding, dest_encoding', + [('utf-8', 'ascii'), + ('utf-8', 'latin-1'), + ]) +def test_transcoding_encoding_error(src_encoding, dest_encoding): + # Character \u0100 cannot be represented in the destination encoding + stream = pa.transcoding_input_stream( + pa.BufferReader("\u0100".encode(src_encoding)), + src_encoding, + dest_encoding) + with pytest.raises(UnicodeEncodeError): + stream.read(1) + + +@pytest.mark.parametrize('src_encoding, dest_encoding', + [('utf-8', 'utf-16'), + ('utf-16', 'utf-8'), + ]) +def test_transcoding_decoding_error(src_encoding, dest_encoding): + # The given bytestring is not valid in the source encoding + stream = pa.transcoding_input_stream( + pa.BufferReader(b"\xff\xff\xff\xff"), + src_encoding, + dest_encoding) + with pytest.raises(UnicodeError): + stream.read(1) + + +# ---------------------------------------------------------------------- +# High-level API + +@pytest.mark.gzip +def test_input_stream_buffer(): + data = b"some test data\n" * 10 + b"eof\n" + for arg in [pa.py_buffer(data), memoryview(data)]: + stream = pa.input_stream(arg) + assert stream.read() == data + + gz_data = gzip.compress(data) + stream = pa.input_stream(memoryview(gz_data)) + assert stream.read() == gz_data + stream = pa.input_stream(memoryview(gz_data), compression='gzip') + assert stream.read() == data + + +def test_input_stream_duck_typing(): + # Accept objects having the right file-like methods... + class DuckReader: + + def close(self): + pass + + @property + def closed(self): + return False + + def read(self, nbytes=None): + return b'hello' + + stream = pa.input_stream(DuckReader()) + assert stream.read(5) == b'hello' + + +def test_input_stream_file_path(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + file_path = tmpdir / 'input_stream' + with open(str(file_path), 'wb') as f: + f.write(data) + + stream = pa.input_stream(file_path) + assert stream.read() == data + stream = pa.input_stream(str(file_path)) + assert stream.read() == data + stream = pa.input_stream(pathlib.Path(str(file_path))) + assert stream.read() == data + + +@pytest.mark.gzip +def test_input_stream_file_path_compressed(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + gz_data = gzip.compress(data) + file_path = tmpdir / 'input_stream.gz' + with open(str(file_path), 'wb') as f: + f.write(gz_data) + + stream = pa.input_stream(file_path) + assert stream.read() == data + stream = pa.input_stream(str(file_path)) + assert stream.read() == data + stream = pa.input_stream(pathlib.Path(str(file_path))) + assert stream.read() == data + + stream = pa.input_stream(file_path, compression='gzip') + assert stream.read() == data + stream = pa.input_stream(file_path, compression=None) + assert stream.read() == gz_data + + +def test_input_stream_file_path_buffered(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + file_path = tmpdir / 'input_stream.buffered' + with open(str(file_path), 'wb') as f: + f.write(data) + + stream = pa.input_stream(file_path, buffer_size=32) + assert isinstance(stream, pa.BufferedInputStream) + assert stream.read() == data + stream = pa.input_stream(str(file_path), buffer_size=64) + assert isinstance(stream, pa.BufferedInputStream) + assert stream.read() == data + stream = pa.input_stream(pathlib.Path(str(file_path)), buffer_size=1024) + assert isinstance(stream, pa.BufferedInputStream) + assert stream.read() == data + + unbuffered_stream = pa.input_stream(file_path, buffer_size=0) + assert isinstance(unbuffered_stream, pa.OSFile) + + msg = 'Buffer size must be larger than zero' + with pytest.raises(ValueError, match=msg): + pa.input_stream(file_path, buffer_size=-1) + with pytest.raises(TypeError): + pa.input_stream(file_path, buffer_size='million') + + +@pytest.mark.gzip +def test_input_stream_file_path_compressed_and_buffered(tmpdir): + data = b"some test data\n" * 100 + b"eof\n" + gz_data = gzip.compress(data) + file_path = tmpdir / 'input_stream_compressed_and_buffered.gz' + with open(str(file_path), 'wb') as f: + f.write(gz_data) + + stream = pa.input_stream(file_path, buffer_size=32, compression='gzip') + assert stream.read() == data + stream = pa.input_stream(str(file_path), buffer_size=64) + assert stream.read() == data + stream = pa.input_stream(pathlib.Path(str(file_path)), buffer_size=1024) + assert stream.read() == data + + +@pytest.mark.gzip +def test_input_stream_python_file(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + bio = BytesIO(data) + + stream = pa.input_stream(bio) + assert stream.read() == data + + gz_data = gzip.compress(data) + bio = BytesIO(gz_data) + stream = pa.input_stream(bio) + assert stream.read() == gz_data + bio.seek(0) + stream = pa.input_stream(bio, compression='gzip') + assert stream.read() == data + + file_path = tmpdir / 'input_stream' + with open(str(file_path), 'wb') as f: + f.write(data) + with open(str(file_path), 'rb') as f: + stream = pa.input_stream(f) + assert stream.read() == data + + +@pytest.mark.gzip +def test_input_stream_native_file(): + data = b"some test data\n" * 10 + b"eof\n" + gz_data = gzip.compress(data) + reader = pa.BufferReader(gz_data) + stream = pa.input_stream(reader) + assert stream is reader + reader = pa.BufferReader(gz_data) + stream = pa.input_stream(reader, compression='gzip') + assert stream.read() == data + + +def test_input_stream_errors(tmpdir): + buf = memoryview(b"") + with pytest.raises(ValueError): + pa.input_stream(buf, compression="foo") + + for arg in [bytearray(), StringIO()]: + with pytest.raises(TypeError): + pa.input_stream(arg) + + with assert_file_not_found(): + pa.input_stream("non_existent_file") + + with open(str(tmpdir / 'new_file'), 'wb') as f: + with pytest.raises(TypeError, match="readable file expected"): + pa.input_stream(f) + + +def test_output_stream_buffer(): + data = b"some test data\n" * 10 + b"eof\n" + buf = bytearray(len(data)) + stream = pa.output_stream(pa.py_buffer(buf)) + stream.write(data) + assert buf == data + + buf = bytearray(len(data)) + stream = pa.output_stream(memoryview(buf)) + stream.write(data) + assert buf == data + + +def test_output_stream_duck_typing(): + # Accept objects having the right file-like methods... + class DuckWriter: + def __init__(self): + self.buf = pa.BufferOutputStream() + + def close(self): + pass + + @property + def closed(self): + return False + + def write(self, data): + self.buf.write(data) + + duck_writer = DuckWriter() + stream = pa.output_stream(duck_writer) + assert stream.write(b'hello') + assert duck_writer.buf.getvalue().to_pybytes() == b'hello' + + +def test_output_stream_file_path(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + file_path = tmpdir / 'output_stream' + + def check_data(file_path, data): + with pa.output_stream(file_path) as stream: + stream.write(data) + with open(str(file_path), 'rb') as f: + assert f.read() == data + + check_data(file_path, data) + check_data(str(file_path), data) + check_data(pathlib.Path(str(file_path)), data) + + +@pytest.mark.gzip +def test_output_stream_file_path_compressed(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + file_path = tmpdir / 'output_stream.gz' + + def check_data(file_path, data, **kwargs): + with pa.output_stream(file_path, **kwargs) as stream: + stream.write(data) + with open(str(file_path), 'rb') as f: + return f.read() + + assert gzip.decompress(check_data(file_path, data)) == data + assert gzip.decompress(check_data(str(file_path), data)) == data + assert gzip.decompress( + check_data(pathlib.Path(str(file_path)), data)) == data + + assert gzip.decompress( + check_data(file_path, data, compression='gzip')) == data + assert check_data(file_path, data, compression=None) == data + + with pytest.raises(ValueError, match='Invalid value for compression'): + assert check_data(file_path, data, compression='rabbit') == data + + +def test_output_stream_file_path_buffered(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + file_path = tmpdir / 'output_stream.buffered' + + def check_data(file_path, data, **kwargs): + with pa.output_stream(file_path, **kwargs) as stream: + if kwargs.get('buffer_size', 0) > 0: + assert isinstance(stream, pa.BufferedOutputStream) + stream.write(data) + with open(str(file_path), 'rb') as f: + return f.read() + + unbuffered_stream = pa.output_stream(file_path, buffer_size=0) + assert isinstance(unbuffered_stream, pa.OSFile) + + msg = 'Buffer size must be larger than zero' + with pytest.raises(ValueError, match=msg): + assert check_data(file_path, data, buffer_size=-128) == data + + assert check_data(file_path, data, buffer_size=32) == data + assert check_data(file_path, data, buffer_size=1024) == data + assert check_data(str(file_path), data, buffer_size=32) == data + + result = check_data(pathlib.Path(str(file_path)), data, buffer_size=32) + assert result == data + + +@pytest.mark.gzip +def test_output_stream_file_path_compressed_and_buffered(tmpdir): + data = b"some test data\n" * 100 + b"eof\n" + file_path = tmpdir / 'output_stream_compressed_and_buffered.gz' + + def check_data(file_path, data, **kwargs): + with pa.output_stream(file_path, **kwargs) as stream: + stream.write(data) + with open(str(file_path), 'rb') as f: + return f.read() + + result = check_data(file_path, data, buffer_size=32) + assert gzip.decompress(result) == data + + result = check_data(file_path, data, buffer_size=1024) + assert gzip.decompress(result) == data + + result = check_data(file_path, data, buffer_size=1024, compression='gzip') + assert gzip.decompress(result) == data + + +def test_output_stream_destructor(tmpdir): + # The wrapper returned by pa.output_stream() should respect Python + # file semantics, i.e. destroying it should close the underlying + # file cleanly. + data = b"some test data\n" + file_path = tmpdir / 'output_stream.buffered' + + def check_data(file_path, data, **kwargs): + stream = pa.output_stream(file_path, **kwargs) + stream.write(data) + del stream + gc.collect() + with open(str(file_path), 'rb') as f: + return f.read() + + assert check_data(file_path, data, buffer_size=0) == data + assert check_data(file_path, data, buffer_size=1024) == data + + +@pytest.mark.gzip +def test_output_stream_python_file(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + + def check_data(data, **kwargs): + # XXX cannot use BytesIO because stream.close() is necessary + # to finish writing compressed data, but it will also close the + # underlying BytesIO + fn = str(tmpdir / 'output_stream_file') + with open(fn, 'wb') as f: + with pa.output_stream(f, **kwargs) as stream: + stream.write(data) + with open(fn, 'rb') as f: + return f.read() + + assert check_data(data) == data + assert gzip.decompress(check_data(data, compression='gzip')) == data + + +def test_output_stream_errors(tmpdir): + buf = memoryview(bytearray()) + with pytest.raises(ValueError): + pa.output_stream(buf, compression="foo") + + for arg in [bytearray(), StringIO()]: + with pytest.raises(TypeError): + pa.output_stream(arg) + + fn = str(tmpdir / 'new_file') + with open(fn, 'wb') as f: + pass + with open(fn, 'rb') as f: + with pytest.raises(TypeError, match="writable file expected"): + pa.output_stream(f)