applied-ai-018 commited on
Commit
bcec0a2
·
verified ·
1 Parent(s): 5dee18c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/acero.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/benchmark.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/cffi.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/compute.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/conftest.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/csv.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/cuda.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/dataset.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/feather.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/flight.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/fs.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/ipc.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/json.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/pandas_compat.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/substrait.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/types.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/util.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/bignum-dtoa.h +86 -0
  21. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/bignum.h +154 -0
  22. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/cached-powers.h +66 -0
  23. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/diy-fp.h +139 -0
  24. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/double-conversion.h +34 -0
  25. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/double-to-string.h +472 -0
  26. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/ieee.h +449 -0
  27. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/string-to-double.h +240 -0
  28. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/strtod.h +66 -0
  29. llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/utils.h +420 -0
  30. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/api/io.h +20 -0
  31. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/api/reader.h +35 -0
  32. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/api/schema.h +21 -0
  33. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/api/writer.h +25 -0
  34. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/reader.h +379 -0
  35. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/schema.h +184 -0
  36. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/test_util.h +524 -0
  37. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/writer.h +180 -0
  38. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/bloom_filter.h +363 -0
  39. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/crypto_factory.h +152 -0
  40. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/encryption.h +510 -0
  41. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_material_store.h +57 -0
  42. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_unwrapper.h +94 -0
  43. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_wrapper.h +84 -0
  44. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_system_key_material_store.h +89 -0
  45. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_encryption_key.h +57 -0
  46. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_material.h +129 -0
  47. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_metadata.h +91 -0
  48. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_toolkit.h +106 -0
  49. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/kms_client.h +93 -0
  50. llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/kms_client_factory.h +38 -0
.gitattributes CHANGED
@@ -78,3 +78,4 @@ llmeval-env/lib/python3.10/site-packages/pyarrow/libparquet.so.1600 filter=lfs d
78
  llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1600 filter=lfs diff=lfs merge=lfs -text
79
  llmeval-env/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
80
  llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1600 filter=lfs diff=lfs merge=lfs -text
 
 
78
  llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1600 filter=lfs diff=lfs merge=lfs -text
79
  llmeval-env/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
80
  llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1600 filter=lfs diff=lfs merge=lfs -text
81
+ llmeval-env/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1600 filter=lfs diff=lfs merge=lfs -text
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (15.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/acero.cpython-310.pyc ADDED
Binary file (8.91 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/benchmark.cpython-310.pyc ADDED
Binary file (245 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/cffi.cpython-310.pyc ADDED
Binary file (1.74 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/compute.cpython-310.pyc ADDED
Binary file (19.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (5.46 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/csv.cpython-310.pyc ADDED
Binary file (440 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/cuda.cpython-310.pyc ADDED
Binary file (442 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/dataset.cpython-310.pyc ADDED
Binary file (33.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/feather.cpython-310.pyc ADDED
Binary file (8.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/flight.cpython-310.pyc ADDED
Binary file (1.59 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/fs.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/ipc.cpython-310.pyc ADDED
Binary file (9.06 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/json.cpython-310.pyc ADDED
Binary file (268 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/pandas_compat.cpython-310.pyc ADDED
Binary file (26.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/substrait.cpython-310.pyc ADDED
Binary file (528 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/types.cpython-310.pyc ADDED
Binary file (8.79 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/util.cpython-310.pyc ADDED
Binary file (7.06 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/bignum-dtoa.h ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2010 the V8 project authors. All rights reserved.
2
+ // Redistribution and use in source and binary forms, with or without
3
+ // modification, are permitted provided that the following conditions are
4
+ // met:
5
+ //
6
+ // * Redistributions of source code must retain the above copyright
7
+ // notice, this list of conditions and the following disclaimer.
8
+ // * Redistributions in binary form must reproduce the above
9
+ // copyright notice, this list of conditions and the following
10
+ // disclaimer in the documentation and/or other materials provided
11
+ // with the distribution.
12
+ // * Neither the name of Google Inc. nor the names of its
13
+ // contributors may be used to endorse or promote products derived
14
+ // from this software without specific prior written permission.
15
+ //
16
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+
28
+ #ifndef DOUBLE_CONVERSION_BIGNUM_DTOA_H_
29
+ #define DOUBLE_CONVERSION_BIGNUM_DTOA_H_
30
+
31
+ #include "utils.h"
32
+
33
+ namespace arrow_vendored {
34
+ namespace double_conversion {
35
+
36
+ enum BignumDtoaMode {
37
+ // Return the shortest correct representation.
38
+ // For example the output of 0.299999999999999988897 is (the less accurate but
39
+ // correct) 0.3.
40
+ BIGNUM_DTOA_SHORTEST,
41
+ // Same as BIGNUM_DTOA_SHORTEST but for single-precision floats.
42
+ BIGNUM_DTOA_SHORTEST_SINGLE,
43
+ // Return a fixed number of digits after the decimal point.
44
+ // For instance fixed(0.1, 4) becomes 0.1000
45
+ // If the input number is big, the output will be big.
46
+ BIGNUM_DTOA_FIXED,
47
+ // Return a fixed number of digits, no matter what the exponent is.
48
+ BIGNUM_DTOA_PRECISION
49
+ };
50
+
51
+ // Converts the given double 'v' to ascii.
52
+ // The result should be interpreted as buffer * 10^(point-length).
53
+ // The buffer will be null-terminated.
54
+ //
55
+ // The input v must be > 0 and different from NaN, and Infinity.
56
+ //
57
+ // The output depends on the given mode:
58
+ // - SHORTEST: produce the least amount of digits for which the internal
59
+ // identity requirement is still satisfied. If the digits are printed
60
+ // (together with the correct exponent) then reading this number will give
61
+ // 'v' again. The buffer will choose the representation that is closest to
62
+ // 'v'. If there are two at the same distance, than the number is round up.
63
+ // In this mode the 'requested_digits' parameter is ignored.
64
+ // - FIXED: produces digits necessary to print a given number with
65
+ // 'requested_digits' digits after the decimal point. The produced digits
66
+ // might be too short in which case the caller has to fill the gaps with '0's.
67
+ // Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2.
68
+ // Halfway cases are rounded up. The call toFixed(0.15, 2) thus returns
69
+ // buffer="2", point=0.
70
+ // Note: the length of the returned buffer has no meaning wrt the significance
71
+ // of its digits. That is, just because it contains '0's does not mean that
72
+ // any other digit would not satisfy the internal identity requirement.
73
+ // - PRECISION: produces 'requested_digits' where the first digit is not '0'.
74
+ // Even though the length of produced digits usually equals
75
+ // 'requested_digits', the function is allowed to return fewer digits, in
76
+ // which case the caller has to fill the missing digits with '0's.
77
+ // Halfway cases are again rounded up.
78
+ // 'BignumDtoa' expects the given buffer to be big enough to hold all digits
79
+ // and a terminating null-character.
80
+ void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
81
+ Vector<char> buffer, int* length, int* point);
82
+
83
+ } // namespace double_conversion
84
+ } // namespace arrow_vendored
85
+
86
+ #endif // DOUBLE_CONVERSION_BIGNUM_DTOA_H_
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/bignum.h ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2010 the V8 project authors. All rights reserved.
2
+ // Redistribution and use in source and binary forms, with or without
3
+ // modification, are permitted provided that the following conditions are
4
+ // met:
5
+ //
6
+ // * Redistributions of source code must retain the above copyright
7
+ // notice, this list of conditions and the following disclaimer.
8
+ // * Redistributions in binary form must reproduce the above
9
+ // copyright notice, this list of conditions and the following
10
+ // disclaimer in the documentation and/or other materials provided
11
+ // with the distribution.
12
+ // * Neither the name of Google Inc. nor the names of its
13
+ // contributors may be used to endorse or promote products derived
14
+ // from this software without specific prior written permission.
15
+ //
16
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+
28
+ #ifndef DOUBLE_CONVERSION_BIGNUM_H_
29
+ #define DOUBLE_CONVERSION_BIGNUM_H_
30
+
31
+ #include "utils.h"
32
+
33
+ namespace arrow_vendored {
34
+ namespace double_conversion {
35
+
36
+ class Bignum {
37
+ public:
38
+ // 3584 = 128 * 28. We can represent 2^3584 > 10^1000 accurately.
39
+ // This bignum can encode much bigger numbers, since it contains an
40
+ // exponent.
41
+ static const int kMaxSignificantBits = 3584;
42
+
43
+ Bignum() : used_bigits_(0), exponent_(0) {}
44
+
45
+ void AssignUInt16(const uint16_t value);
46
+ void AssignUInt64(uint64_t value);
47
+ void AssignBignum(const Bignum& other);
48
+
49
+ void AssignDecimalString(const Vector<const char> value);
50
+ void AssignHexString(const Vector<const char> value);
51
+
52
+ void AssignPowerUInt16(uint16_t base, const int exponent);
53
+
54
+ void AddUInt64(const uint64_t operand);
55
+ void AddBignum(const Bignum& other);
56
+ // Precondition: this >= other.
57
+ void SubtractBignum(const Bignum& other);
58
+
59
+ void Square();
60
+ void ShiftLeft(const int shift_amount);
61
+ void MultiplyByUInt32(const uint32_t factor);
62
+ void MultiplyByUInt64(const uint64_t factor);
63
+ void MultiplyByPowerOfTen(const int exponent);
64
+ void Times10() { return MultiplyByUInt32(10); }
65
+ // Pseudocode:
66
+ // int result = this / other;
67
+ // this = this % other;
68
+ // In the worst case this function is in O(this/other).
69
+ uint16_t DivideModuloIntBignum(const Bignum& other);
70
+
71
+ bool ToHexString(char* buffer, const int buffer_size) const;
72
+
73
+ // Returns
74
+ // -1 if a < b,
75
+ // 0 if a == b, and
76
+ // +1 if a > b.
77
+ static int Compare(const Bignum& a, const Bignum& b);
78
+ static bool Equal(const Bignum& a, const Bignum& b) {
79
+ return Compare(a, b) == 0;
80
+ }
81
+ static bool LessEqual(const Bignum& a, const Bignum& b) {
82
+ return Compare(a, b) <= 0;
83
+ }
84
+ static bool Less(const Bignum& a, const Bignum& b) {
85
+ return Compare(a, b) < 0;
86
+ }
87
+ // Returns Compare(a + b, c);
88
+ static int PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c);
89
+ // Returns a + b == c
90
+ static bool PlusEqual(const Bignum& a, const Bignum& b, const Bignum& c) {
91
+ return PlusCompare(a, b, c) == 0;
92
+ }
93
+ // Returns a + b <= c
94
+ static bool PlusLessEqual(const Bignum& a, const Bignum& b, const Bignum& c) {
95
+ return PlusCompare(a, b, c) <= 0;
96
+ }
97
+ // Returns a + b < c
98
+ static bool PlusLess(const Bignum& a, const Bignum& b, const Bignum& c) {
99
+ return PlusCompare(a, b, c) < 0;
100
+ }
101
+ private:
102
+ typedef uint32_t Chunk;
103
+ typedef uint64_t DoubleChunk;
104
+
105
+ static const int kChunkSize = sizeof(Chunk) * 8;
106
+ static const int kDoubleChunkSize = sizeof(DoubleChunk) * 8;
107
+ // With bigit size of 28 we loose some bits, but a double still fits easily
108
+ // into two chunks, and more importantly we can use the Comba multiplication.
109
+ static const int kBigitSize = 28;
110
+ static const Chunk kBigitMask = (1 << kBigitSize) - 1;
111
+ // Every instance allocates kBigitLength chunks on the stack. Bignums cannot
112
+ // grow. There are no checks if the stack-allocated space is sufficient.
113
+ static const int kBigitCapacity = kMaxSignificantBits / kBigitSize;
114
+
115
+ static void EnsureCapacity(const int size) {
116
+ if (size > kBigitCapacity) {
117
+ DOUBLE_CONVERSION_UNREACHABLE();
118
+ }
119
+ }
120
+ void Align(const Bignum& other);
121
+ void Clamp();
122
+ bool IsClamped() const {
123
+ return used_bigits_ == 0 || RawBigit(used_bigits_ - 1) != 0;
124
+ }
125
+ void Zero() {
126
+ used_bigits_ = 0;
127
+ exponent_ = 0;
128
+ }
129
+ // Requires this to have enough capacity (no tests done).
130
+ // Updates used_bigits_ if necessary.
131
+ // shift_amount must be < kBigitSize.
132
+ void BigitsShiftLeft(const int shift_amount);
133
+ // BigitLength includes the "hidden" bigits encoded in the exponent.
134
+ int BigitLength() const { return used_bigits_ + exponent_; }
135
+ Chunk& RawBigit(const int index);
136
+ const Chunk& RawBigit(const int index) const;
137
+ Chunk BigitOrZero(const int index) const;
138
+ void SubtractTimes(const Bignum& other, const int factor);
139
+
140
+ // The Bignum's value is value(bigits_buffer_) * 2^(exponent_ * kBigitSize),
141
+ // where the value of the buffer consists of the lower kBigitSize bits of
142
+ // the first used_bigits_ Chunks in bigits_buffer_, first chunk has lowest
143
+ // significant bits.
144
+ int16_t used_bigits_;
145
+ int16_t exponent_;
146
+ Chunk bigits_buffer_[kBigitCapacity];
147
+
148
+ DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(Bignum);
149
+ };
150
+
151
+ } // namespace double_conversion
152
+ } // namespace arrow_vendored
153
+
154
+ #endif // DOUBLE_CONVERSION_BIGNUM_H_
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/cached-powers.h ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2010 the V8 project authors. All rights reserved.
2
+ // Redistribution and use in source and binary forms, with or without
3
+ // modification, are permitted provided that the following conditions are
4
+ // met:
5
+ //
6
+ // * Redistributions of source code must retain the above copyright
7
+ // notice, this list of conditions and the following disclaimer.
8
+ // * Redistributions in binary form must reproduce the above
9
+ // copyright notice, this list of conditions and the following
10
+ // disclaimer in the documentation and/or other materials provided
11
+ // with the distribution.
12
+ // * Neither the name of Google Inc. nor the names of its
13
+ // contributors may be used to endorse or promote products derived
14
+ // from this software without specific prior written permission.
15
+ //
16
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+
28
+ #ifndef DOUBLE_CONVERSION_CACHED_POWERS_H_
29
+ #define DOUBLE_CONVERSION_CACHED_POWERS_H_
30
+
31
+ #include "diy-fp.h"
32
+
33
+ namespace arrow_vendored {
34
+ namespace double_conversion {
35
+
36
+ namespace PowersOfTenCache {
37
+
38
+ // Not all powers of ten are cached. The decimal exponent of two neighboring
39
+ // cached numbers will differ by kDecimalExponentDistance.
40
+ static const int kDecimalExponentDistance = 8;
41
+
42
+ static const int kMinDecimalExponent = -348;
43
+ static const int kMaxDecimalExponent = 340;
44
+
45
+ // Returns a cached power-of-ten with a binary exponent in the range
46
+ // [min_exponent; max_exponent] (boundaries included).
47
+ void GetCachedPowerForBinaryExponentRange(int min_exponent,
48
+ int max_exponent,
49
+ DiyFp* power,
50
+ int* decimal_exponent);
51
+
52
+ // Returns a cached power of ten x ~= 10^k such that
53
+ // k <= decimal_exponent < k + kCachedPowersDecimalDistance.
54
+ // The given decimal_exponent must satisfy
55
+ // kMinDecimalExponent <= requested_exponent, and
56
+ // requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance.
57
+ void GetCachedPowerForDecimalExponent(int requested_exponent,
58
+ DiyFp* power,
59
+ int* found_exponent);
60
+
61
+ } // namespace PowersOfTenCache
62
+
63
+ } // namespace double_conversion
64
+ } // namespace arrow_vendored
65
+
66
+ #endif // DOUBLE_CONVERSION_CACHED_POWERS_H_
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/diy-fp.h ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2010 the V8 project authors. All rights reserved.
2
+ // Redistribution and use in source and binary forms, with or without
3
+ // modification, are permitted provided that the following conditions are
4
+ // met:
5
+ //
6
+ // * Redistributions of source code must retain the above copyright
7
+ // notice, this list of conditions and the following disclaimer.
8
+ // * Redistributions in binary form must reproduce the above
9
+ // copyright notice, this list of conditions and the following
10
+ // disclaimer in the documentation and/or other materials provided
11
+ // with the distribution.
12
+ // * Neither the name of Google Inc. nor the names of its
13
+ // contributors may be used to endorse or promote products derived
14
+ // from this software without specific prior written permission.
15
+ //
16
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+
28
+ #ifndef DOUBLE_CONVERSION_DIY_FP_H_
29
+ #define DOUBLE_CONVERSION_DIY_FP_H_
30
+
31
+ #include "utils.h"
32
+
33
+ namespace arrow_vendored {
34
+ namespace double_conversion {
35
+
36
+ // This "Do It Yourself Floating Point" class implements a floating-point number
37
+ // with a uint64 significand and an int exponent. Normalized DiyFp numbers will
38
+ // have the most significant bit of the significand set.
39
+ // Multiplication and Subtraction do not normalize their results.
40
+ // DiyFp store only non-negative numbers and are not designed to contain special
41
+ // doubles (NaN and Infinity).
42
+ class DiyFp {
43
+ public:
44
+ static const int kSignificandSize = 64;
45
+
46
+ DiyFp() : f_(0), e_(0) {}
47
+ DiyFp(const uint64_t significand, const int32_t exponent) : f_(significand), e_(exponent) {}
48
+
49
+ // this -= other.
50
+ // The exponents of both numbers must be the same and the significand of this
51
+ // must be greater or equal than the significand of other.
52
+ // The result will not be normalized.
53
+ void Subtract(const DiyFp& other) {
54
+ DOUBLE_CONVERSION_ASSERT(e_ == other.e_);
55
+ DOUBLE_CONVERSION_ASSERT(f_ >= other.f_);
56
+ f_ -= other.f_;
57
+ }
58
+
59
+ // Returns a - b.
60
+ // The exponents of both numbers must be the same and a must be greater
61
+ // or equal than b. The result will not be normalized.
62
+ static DiyFp Minus(const DiyFp& a, const DiyFp& b) {
63
+ DiyFp result = a;
64
+ result.Subtract(b);
65
+ return result;
66
+ }
67
+
68
+ // this *= other.
69
+ void Multiply(const DiyFp& other) {
70
+ // Simply "emulates" a 128 bit multiplication.
71
+ // However: the resulting number only contains 64 bits. The least
72
+ // significant 64 bits are only used for rounding the most significant 64
73
+ // bits.
74
+ const uint64_t kM32 = 0xFFFFFFFFU;
75
+ const uint64_t a = f_ >> 32;
76
+ const uint64_t b = f_ & kM32;
77
+ const uint64_t c = other.f_ >> 32;
78
+ const uint64_t d = other.f_ & kM32;
79
+ const uint64_t ac = a * c;
80
+ const uint64_t bc = b * c;
81
+ const uint64_t ad = a * d;
82
+ const uint64_t bd = b * d;
83
+ // By adding 1U << 31 to tmp we round the final result.
84
+ // Halfway cases will be rounded up.
85
+ const uint64_t tmp = (bd >> 32) + (ad & kM32) + (bc & kM32) + (1U << 31);
86
+ e_ += other.e_ + 64;
87
+ f_ = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32);
88
+ }
89
+
90
+ // returns a * b;
91
+ static DiyFp Times(const DiyFp& a, const DiyFp& b) {
92
+ DiyFp result = a;
93
+ result.Multiply(b);
94
+ return result;
95
+ }
96
+
97
+ void Normalize() {
98
+ DOUBLE_CONVERSION_ASSERT(f_ != 0);
99
+ uint64_t significand = f_;
100
+ int32_t exponent = e_;
101
+
102
+ // This method is mainly called for normalizing boundaries. In general,
103
+ // boundaries need to be shifted by 10 bits, and we optimize for this case.
104
+ const uint64_t k10MSBits = DOUBLE_CONVERSION_UINT64_2PART_C(0xFFC00000, 00000000);
105
+ while ((significand & k10MSBits) == 0) {
106
+ significand <<= 10;
107
+ exponent -= 10;
108
+ }
109
+ while ((significand & kUint64MSB) == 0) {
110
+ significand <<= 1;
111
+ exponent--;
112
+ }
113
+ f_ = significand;
114
+ e_ = exponent;
115
+ }
116
+
117
+ static DiyFp Normalize(const DiyFp& a) {
118
+ DiyFp result = a;
119
+ result.Normalize();
120
+ return result;
121
+ }
122
+
123
+ uint64_t f() const { return f_; }
124
+ int32_t e() const { return e_; }
125
+
126
+ void set_f(uint64_t new_value) { f_ = new_value; }
127
+ void set_e(int32_t new_value) { e_ = new_value; }
128
+
129
+ private:
130
+ static const uint64_t kUint64MSB = DOUBLE_CONVERSION_UINT64_2PART_C(0x80000000, 00000000);
131
+
132
+ uint64_t f_;
133
+ int32_t e_;
134
+ };
135
+
136
+ } // namespace double_conversion
137
+ } // namespace arrow_vendored
138
+
139
+ #endif // DOUBLE_CONVERSION_DIY_FP_H_
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/double-conversion.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2012 the V8 project authors. All rights reserved.
2
+ // Redistribution and use in source and binary forms, with or without
3
+ // modification, are permitted provided that the following conditions are
4
+ // met:
5
+ //
6
+ // * Redistributions of source code must retain the above copyright
7
+ // notice, this list of conditions and the following disclaimer.
8
+ // * Redistributions in binary form must reproduce the above
9
+ // copyright notice, this list of conditions and the following
10
+ // disclaimer in the documentation and/or other materials provided
11
+ // with the distribution.
12
+ // * Neither the name of Google Inc. nor the names of its
13
+ // contributors may be used to endorse or promote products derived
14
+ // from this software without specific prior written permission.
15
+ //
16
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+
28
+ #ifndef DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_
29
+ #define DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_
30
+
31
+ #include "string-to-double.h"
32
+ #include "double-to-string.h"
33
+
34
+ #endif // DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/double-to-string.h ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2012 the V8 project authors. All rights reserved.
2
+ // Redistribution and use in source and binary forms, with or without
3
+ // modification, are permitted provided that the following conditions are
4
+ // met:
5
+ //
6
+ // * Redistributions of source code must retain the above copyright
7
+ // notice, this list of conditions and the following disclaimer.
8
+ // * Redistributions in binary form must reproduce the above
9
+ // copyright notice, this list of conditions and the following
10
+ // disclaimer in the documentation and/or other materials provided
11
+ // with the distribution.
12
+ // * Neither the name of Google Inc. nor the names of its
13
+ // contributors may be used to endorse or promote products derived
14
+ // from this software without specific prior written permission.
15
+ //
16
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+
28
+ #ifndef DOUBLE_CONVERSION_DOUBLE_TO_STRING_H_
29
+ #define DOUBLE_CONVERSION_DOUBLE_TO_STRING_H_
30
+
31
+ #include "utils.h"
32
+
33
+ namespace arrow_vendored {
34
+ namespace double_conversion {
35
+
36
+ class DoubleToStringConverter {
37
+ public:
38
+ // When calling ToFixed with a double > 10^kMaxFixedDigitsBeforePoint
39
+ // or a requested_digits parameter > kMaxFixedDigitsAfterPoint then the
40
+ // function returns false.
41
+ static const int kMaxFixedDigitsBeforePoint = 60;
42
+ static const int kMaxFixedDigitsAfterPoint = 100;
43
+
44
+ // When calling ToExponential with a requested_digits
45
+ // parameter > kMaxExponentialDigits then the function returns false.
46
+ static const int kMaxExponentialDigits = 120;
47
+
48
+ // When calling ToPrecision with a requested_digits
49
+ // parameter < kMinPrecisionDigits or requested_digits > kMaxPrecisionDigits
50
+ // then the function returns false.
51
+ static const int kMinPrecisionDigits = 1;
52
+ static const int kMaxPrecisionDigits = 120;
53
+
54
+ // The maximal number of digits that are needed to emit a double in base 10.
55
+ // A higher precision can be achieved by using more digits, but the shortest
56
+ // accurate representation of any double will never use more digits than
57
+ // kBase10MaximalLength.
58
+ // Note that DoubleToAscii null-terminates its input. So the given buffer
59
+ // should be at least kBase10MaximalLength + 1 characters long.
60
+ static const int kBase10MaximalLength = 17;
61
+
62
+ // The maximal number of digits that are needed to emit a single in base 10.
63
+ // A higher precision can be achieved by using more digits, but the shortest
64
+ // accurate representation of any single will never use more digits than
65
+ // kBase10MaximalLengthSingle.
66
+ static const int kBase10MaximalLengthSingle = 9;
67
+
68
+ // The length of the longest string that 'ToShortest' can produce when the
69
+ // converter is instantiated with EcmaScript defaults (see
70
+ // 'EcmaScriptConverter')
71
+ // This value does not include the trailing '\0' character.
72
+ // This amount of characters is needed for negative values that hit the
73
+ // 'decimal_in_shortest_low' limit. For example: "-0.0000033333333333333333"
74
+ static const int kMaxCharsEcmaScriptShortest = 25;
75
+
76
+ enum Flags {
77
+ NO_FLAGS = 0,
78
+ EMIT_POSITIVE_EXPONENT_SIGN = 1,
79
+ EMIT_TRAILING_DECIMAL_POINT = 2,
80
+ EMIT_TRAILING_ZERO_AFTER_POINT = 4,
81
+ UNIQUE_ZERO = 8,
82
+ NO_TRAILING_ZERO = 16,
83
+ EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL = 32,
84
+ EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL = 64
85
+ };
86
+
87
+ // Flags should be a bit-or combination of the possible Flags-enum.
88
+ // - NO_FLAGS: no special flags.
89
+ // - EMIT_POSITIVE_EXPONENT_SIGN: when the number is converted into exponent
90
+ // form, emits a '+' for positive exponents. Example: 1.2e+2.
91
+ // - EMIT_TRAILING_DECIMAL_POINT: when the input number is an integer and is
92
+ // converted into decimal format then a trailing decimal point is appended.
93
+ // Example: 2345.0 is converted to "2345.".
94
+ // - EMIT_TRAILING_ZERO_AFTER_POINT: in addition to a trailing decimal point
95
+ // emits a trailing '0'-character. This flag requires the
96
+ // EMIT_TRAILING_DECIMAL_POINT flag.
97
+ // Example: 2345.0 is converted to "2345.0".
98
+ // - UNIQUE_ZERO: "-0.0" is converted to "0.0".
99
+ // - NO_TRAILING_ZERO: Trailing zeros are removed from the fractional portion
100
+ // of the result in precision mode. Matches printf's %g.
101
+ // When EMIT_TRAILING_ZERO_AFTER_POINT is also given, one trailing zero is
102
+ // preserved.
103
+ // - EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL: when the input number has
104
+ // exactly one significant digit and is converted into exponent form then a
105
+ // trailing decimal point is appended to the significand in shortest mode
106
+ // or in precision mode with one requested digit.
107
+ // - EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL: in addition to a trailing
108
+ // decimal point emits a trailing '0'-character. This flag requires the
109
+ // EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL flag.
110
+ //
111
+ // Infinity symbol and nan_symbol provide the string representation for these
112
+ // special values. If the string is NULL and the special value is encountered
113
+ // then the conversion functions return false.
114
+ //
115
+ // The exponent_character is used in exponential representations. It is
116
+ // usually 'e' or 'E'.
117
+ //
118
+ // When converting to the shortest representation the converter will
119
+ // represent input numbers in decimal format if they are in the interval
120
+ // [10^decimal_in_shortest_low; 10^decimal_in_shortest_high[
121
+ // (lower boundary included, greater boundary excluded).
122
+ // Example: with decimal_in_shortest_low = -6 and
123
+ // decimal_in_shortest_high = 21:
124
+ // ToShortest(0.000001) -> "0.000001"
125
+ // ToShortest(0.0000001) -> "1e-7"
126
+ // ToShortest(111111111111111111111.0) -> "111111111111111110000"
127
+ // ToShortest(100000000000000000000.0) -> "100000000000000000000"
128
+ // ToShortest(1111111111111111111111.0) -> "1.1111111111111111e+21"
129
+ //
130
+ // When converting to precision mode the converter may add
131
+ // max_leading_padding_zeroes before returning the number in exponential
132
+ // format.
133
+ // Example with max_leading_padding_zeroes_in_precision_mode = 6.
134
+ // ToPrecision(0.0000012345, 2) -> "0.0000012"
135
+ // ToPrecision(0.00000012345, 2) -> "1.2e-7"
136
+ // Similarly the converter may add up to
137
+ // max_trailing_padding_zeroes_in_precision_mode in precision mode to avoid
138
+ // returning an exponential representation. A zero added by the
139
+ // EMIT_TRAILING_ZERO_AFTER_POINT flag is counted for this limit.
140
+ // Examples for max_trailing_padding_zeroes_in_precision_mode = 1:
141
+ // ToPrecision(230.0, 2) -> "230"
142
+ // ToPrecision(230.0, 2) -> "230." with EMIT_TRAILING_DECIMAL_POINT.
143
+ // ToPrecision(230.0, 2) -> "2.3e2" with EMIT_TRAILING_ZERO_AFTER_POINT.
144
+ //
145
+ // When converting numbers with exactly one significant digit to exponent
146
+ // form in shortest mode or in precision mode with one requested digit, the
147
+ // EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT flags have
148
+ // no effect. Use the EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL flag to
149
+ // append a decimal point in this case and the
150
+ // EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL flag to also append a
151
+ // '0'-character in this case.
152
+ // Example with decimal_in_shortest_low = 0:
153
+ // ToShortest(0.0009) -> "9e-4"
154
+ // with EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL deactivated.
155
+ // ToShortest(0.0009) -> "9.e-4"
156
+ // with EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL activated.
157
+ // ToShortest(0.0009) -> "9.0e-4"
158
+ // with EMIT_TRAILING_DECIMAL_POINT_IN_EXPONENTIAL activated and
159
+ // EMIT_TRAILING_ZERO_AFTER_POINT_IN_EXPONENTIAL activated.
160
+ //
161
+ // The min_exponent_width is used for exponential representations.
162
+ // The converter adds leading '0's to the exponent until the exponent
163
+ // is at least min_exponent_width digits long.
164
+ // The min_exponent_width is clamped to 5.
165
+ // As such, the exponent may never have more than 5 digits in total.
166
+ DoubleToStringConverter(int flags,
167
+ const char* infinity_symbol,
168
+ const char* nan_symbol,
169
+ char exponent_character,
170
+ int decimal_in_shortest_low,
171
+ int decimal_in_shortest_high,
172
+ int max_leading_padding_zeroes_in_precision_mode,
173
+ int max_trailing_padding_zeroes_in_precision_mode,
174
+ int min_exponent_width = 0)
175
+ : flags_(flags),
176
+ infinity_symbol_(infinity_symbol),
177
+ nan_symbol_(nan_symbol),
178
+ exponent_character_(exponent_character),
179
+ decimal_in_shortest_low_(decimal_in_shortest_low),
180
+ decimal_in_shortest_high_(decimal_in_shortest_high),
181
+ max_leading_padding_zeroes_in_precision_mode_(
182
+ max_leading_padding_zeroes_in_precision_mode),
183
+ max_trailing_padding_zeroes_in_precision_mode_(
184
+ max_trailing_padding_zeroes_in_precision_mode),
185
+ min_exponent_width_(min_exponent_width) {
186
+ // When 'trailing zero after the point' is set, then 'trailing point'
187
+ // must be set too.
188
+ DOUBLE_CONVERSION_ASSERT(((flags & EMIT_TRAILING_DECIMAL_POINT) != 0) ||
189
+ !((flags & EMIT_TRAILING_ZERO_AFTER_POINT) != 0));
190
+ }
191
+
192
+ // Returns a converter following the EcmaScript specification.
193
+ //
194
+ // Flags: UNIQUE_ZERO and EMIT_POSITIVE_EXPONENT_SIGN.
195
+ // Special values: "Infinity" and "NaN".
196
+ // Lower case 'e' for exponential values.
197
+ // decimal_in_shortest_low: -6
198
+ // decimal_in_shortest_high: 21
199
+ // max_leading_padding_zeroes_in_precision_mode: 6
200
+ // max_trailing_padding_zeroes_in_precision_mode: 0
201
+ static const DoubleToStringConverter& EcmaScriptConverter();
202
+
203
+ // Computes the shortest string of digits that correctly represent the input
204
+ // number. Depending on decimal_in_shortest_low and decimal_in_shortest_high
205
+ // (see constructor) it then either returns a decimal representation, or an
206
+ // exponential representation.
207
+ // Example with decimal_in_shortest_low = -6,
208
+ // decimal_in_shortest_high = 21,
209
+ // EMIT_POSITIVE_EXPONENT_SIGN activated, and
210
+ // EMIT_TRAILING_DECIMAL_POINT deactivated:
211
+ // ToShortest(0.000001) -> "0.000001"
212
+ // ToShortest(0.0000001) -> "1e-7"
213
+ // ToShortest(111111111111111111111.0) -> "111111111111111110000"
214
+ // ToShortest(100000000000000000000.0) -> "100000000000000000000"
215
+ // ToShortest(1111111111111111111111.0) -> "1.1111111111111111e+21"
216
+ //
217
+ // Note: the conversion may round the output if the returned string
218
+ // is accurate enough to uniquely identify the input-number.
219
+ // For example the most precise representation of the double 9e59 equals
220
+ // "899999999999999918767229449717619953810131273674690656206848", but
221
+ // the converter will return the shorter (but still correct) "9e59".
222
+ //
223
+ // Returns true if the conversion succeeds. The conversion always succeeds
224
+ // except when the input value is special and no infinity_symbol or
225
+ // nan_symbol has been given to the constructor.
226
+ //
227
+ // The length of the longest result is the maximum of the length of the
228
+ // following string representations (each with possible examples):
229
+ // - NaN and negative infinity: "NaN", "-Infinity", "-inf".
230
+ // - -10^(decimal_in_shortest_high - 1):
231
+ // "-100000000000000000000", "-1000000000000000.0"
232
+ // - the longest string in range [0; -10^decimal_in_shortest_low]. Generally,
233
+ // this string is 3 + kBase10MaximalLength - decimal_in_shortest_low.
234
+ // (Sign, '0', decimal point, padding zeroes for decimal_in_shortest_low,
235
+ // and the significant digits).
236
+ // "-0.0000033333333333333333", "-0.0012345678901234567"
237
+ // - the longest exponential representation. (A negative number with
238
+ // kBase10MaximalLength significant digits).
239
+ // "-1.7976931348623157e+308", "-1.7976931348623157E308"
240
+ // In addition, the buffer must be able to hold the trailing '\0' character.
241
+ bool ToShortest(double value, StringBuilder* result_builder) const {
242
+ return ToShortestIeeeNumber(value, result_builder, SHORTEST);
243
+ }
244
+
245
+ // Same as ToShortest, but for single-precision floats.
246
+ bool ToShortestSingle(float value, StringBuilder* result_builder) const {
247
+ return ToShortestIeeeNumber(value, result_builder, SHORTEST_SINGLE);
248
+ }
249
+
250
+
251
+ // Computes a decimal representation with a fixed number of digits after the
252
+ // decimal point. The last emitted digit is rounded.
253
+ //
254
+ // Examples:
255
+ // ToFixed(3.12, 1) -> "3.1"
256
+ // ToFixed(3.1415, 3) -> "3.142"
257
+ // ToFixed(1234.56789, 4) -> "1234.5679"
258
+ // ToFixed(1.23, 5) -> "1.23000"
259
+ // ToFixed(0.1, 4) -> "0.1000"
260
+ // ToFixed(1e30, 2) -> "1000000000000000019884624838656.00"
261
+ // ToFixed(0.1, 30) -> "0.100000000000000005551115123126"
262
+ // ToFixed(0.1, 17) -> "0.10000000000000001"
263
+ //
264
+ // If requested_digits equals 0, then the tail of the result depends on
265
+ // the EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT.
266
+ // Examples, for requested_digits == 0,
267
+ // let EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT be
268
+ // - false and false: then 123.45 -> 123
269
+ // 0.678 -> 1
270
+ // - true and false: then 123.45 -> 123.
271
+ // 0.678 -> 1.
272
+ // - true and true: then 123.45 -> 123.0
273
+ // 0.678 -> 1.0
274
+ //
275
+ // Returns true if the conversion succeeds. The conversion always succeeds
276
+ // except for the following cases:
277
+ // - the input value is special and no infinity_symbol or nan_symbol has
278
+ // been provided to the constructor,
279
+ // - 'value' > 10^kMaxFixedDigitsBeforePoint, or
280
+ // - 'requested_digits' > kMaxFixedDigitsAfterPoint.
281
+ // The last two conditions imply that the result for non-special values never
282
+ // contains more than
283
+ // 1 + kMaxFixedDigitsBeforePoint + 1 + kMaxFixedDigitsAfterPoint characters
284
+ // (one additional character for the sign, and one for the decimal point).
285
+ // In addition, the buffer must be able to hold the trailing '\0' character.
286
+ bool ToFixed(double value,
287
+ int requested_digits,
288
+ StringBuilder* result_builder) const;
289
+
290
+ // Computes a representation in exponential format with requested_digits
291
+ // after the decimal point. The last emitted digit is rounded.
292
+ // If requested_digits equals -1, then the shortest exponential representation
293
+ // is computed.
294
+ //
295
+ // Examples with EMIT_POSITIVE_EXPONENT_SIGN deactivated, and
296
+ // exponent_character set to 'e'.
297
+ // ToExponential(3.12, 1) -> "3.1e0"
298
+ // ToExponential(5.0, 3) -> "5.000e0"
299
+ // ToExponential(0.001, 2) -> "1.00e-3"
300
+ // ToExponential(3.1415, -1) -> "3.1415e0"
301
+ // ToExponential(3.1415, 4) -> "3.1415e0"
302
+ // ToExponential(3.1415, 3) -> "3.142e0"
303
+ // ToExponential(123456789000000, 3) -> "1.235e14"
304
+ // ToExponential(1000000000000000019884624838656.0, -1) -> "1e30"
305
+ // ToExponential(1000000000000000019884624838656.0, 32) ->
306
+ // "1.00000000000000001988462483865600e30"
307
+ // ToExponential(1234, 0) -> "1e3"
308
+ //
309
+ // Returns true if the conversion succeeds. The conversion always succeeds
310
+ // except for the following cases:
311
+ // - the input value is special and no infinity_symbol or nan_symbol has
312
+ // been provided to the constructor,
313
+ // - 'requested_digits' > kMaxExponentialDigits.
314
+ //
315
+ // The last condition implies that the result never contains more than
316
+ // kMaxExponentialDigits + 8 characters (the sign, the digit before the
317
+ // decimal point, the decimal point, the exponent character, the
318
+ // exponent's sign, and at most 3 exponent digits).
319
+ // In addition, the buffer must be able to hold the trailing '\0' character.
320
+ bool ToExponential(double value,
321
+ int requested_digits,
322
+ StringBuilder* result_builder) const;
323
+
324
+
325
+ // Computes 'precision' leading digits of the given 'value' and returns them
326
+ // either in exponential or decimal format, depending on
327
+ // max_{leading|trailing}_padding_zeroes_in_precision_mode (given to the
328
+ // constructor).
329
+ // The last computed digit is rounded.
330
+ //
331
+ // Example with max_leading_padding_zeroes_in_precision_mode = 6.
332
+ // ToPrecision(0.0000012345, 2) -> "0.0000012"
333
+ // ToPrecision(0.00000012345, 2) -> "1.2e-7"
334
+ // Similarly the converter may add up to
335
+ // max_trailing_padding_zeroes_in_precision_mode in precision mode to avoid
336
+ // returning an exponential representation. A zero added by the
337
+ // EMIT_TRAILING_ZERO_AFTER_POINT flag is counted for this limit.
338
+ // Examples for max_trailing_padding_zeroes_in_precision_mode = 1:
339
+ // ToPrecision(230.0, 2) -> "230"
340
+ // ToPrecision(230.0, 2) -> "230." with EMIT_TRAILING_DECIMAL_POINT.
341
+ // ToPrecision(230.0, 2) -> "2.3e2" with EMIT_TRAILING_ZERO_AFTER_POINT.
342
+ // Examples for max_trailing_padding_zeroes_in_precision_mode = 3, and no
343
+ // EMIT_TRAILING_ZERO_AFTER_POINT:
344
+ // ToPrecision(123450.0, 6) -> "123450"
345
+ // ToPrecision(123450.0, 5) -> "123450"
346
+ // ToPrecision(123450.0, 4) -> "123500"
347
+ // ToPrecision(123450.0, 3) -> "123000"
348
+ // ToPrecision(123450.0, 2) -> "1.2e5"
349
+ //
350
+ // Returns true if the conversion succeeds. The conversion always succeeds
351
+ // except for the following cases:
352
+ // - the input value is special and no infinity_symbol or nan_symbol has
353
+ // been provided to the constructor,
354
+ // - precision < kMinPericisionDigits
355
+ // - precision > kMaxPrecisionDigits
356
+ //
357
+ // The last condition implies that the result never contains more than
358
+ // kMaxPrecisionDigits + 7 characters (the sign, the decimal point, the
359
+ // exponent character, the exponent's sign, and at most 3 exponent digits).
360
+ // In addition, the buffer must be able to hold the trailing '\0' character.
361
+ bool ToPrecision(double value,
362
+ int precision,
363
+ StringBuilder* result_builder) const;
364
+
365
+ enum DtoaMode {
366
+ // Produce the shortest correct representation.
367
+ // For example the output of 0.299999999999999988897 is (the less accurate
368
+ // but correct) 0.3.
369
+ SHORTEST,
370
+ // Same as SHORTEST, but for single-precision floats.
371
+ SHORTEST_SINGLE,
372
+ // Produce a fixed number of digits after the decimal point.
373
+ // For instance fixed(0.1, 4) becomes 0.1000
374
+ // If the input number is big, the output will be big.
375
+ FIXED,
376
+ // Fixed number of digits (independent of the decimal point).
377
+ PRECISION
378
+ };
379
+
380
+ // Converts the given double 'v' to digit characters. 'v' must not be NaN,
381
+ // +Infinity, or -Infinity. In SHORTEST_SINGLE-mode this restriction also
382
+ // applies to 'v' after it has been casted to a single-precision float. That
383
+ // is, in this mode static_cast<float>(v) must not be NaN, +Infinity or
384
+ // -Infinity.
385
+ //
386
+ // The result should be interpreted as buffer * 10^(point-length).
387
+ //
388
+ // The digits are written to the buffer in the platform's charset, which is
389
+ // often UTF-8 (with ASCII-range digits) but may be another charset, such
390
+ // as EBCDIC.
391
+ //
392
+ // The output depends on the given mode:
393
+ // - SHORTEST: produce the least amount of digits for which the internal
394
+ // identity requirement is still satisfied. If the digits are printed
395
+ // (together with the correct exponent) then reading this number will give
396
+ // 'v' again. The buffer will choose the representation that is closest to
397
+ // 'v'. If there are two at the same distance, than the one farther away
398
+ // from 0 is chosen (halfway cases - ending with 5 - are rounded up).
399
+ // In this mode the 'requested_digits' parameter is ignored.
400
+ // - SHORTEST_SINGLE: same as SHORTEST but with single-precision.
401
+ // - FIXED: produces digits necessary to print a given number with
402
+ // 'requested_digits' digits after the decimal point. The produced digits
403
+ // might be too short in which case the caller has to fill the remainder
404
+ // with '0's.
405
+ // Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2.
406
+ // Halfway cases are rounded towards +/-Infinity (away from 0). The call
407
+ // toFixed(0.15, 2) thus returns buffer="2", point=0.
408
+ // The returned buffer may contain digits that would be truncated from the
409
+ // shortest representation of the input.
410
+ // - PRECISION: produces 'requested_digits' where the first digit is not '0'.
411
+ // Even though the length of produced digits usually equals
412
+ // 'requested_digits', the function is allowed to return fewer digits, in
413
+ // which case the caller has to fill the missing digits with '0's.
414
+ // Halfway cases are again rounded away from 0.
415
+ // DoubleToAscii expects the given buffer to be big enough to hold all
416
+ // digits and a terminating null-character. In SHORTEST-mode it expects a
417
+ // buffer of at least kBase10MaximalLength + 1. In all other modes the
418
+ // requested_digits parameter and the padding-zeroes limit the size of the
419
+ // output. Don't forget the decimal point, the exponent character and the
420
+ // terminating null-character when computing the maximal output size.
421
+ // The given length is only used in debug mode to ensure the buffer is big
422
+ // enough.
423
+ static void DoubleToAscii(double v,
424
+ DtoaMode mode,
425
+ int requested_digits,
426
+ char* buffer,
427
+ int buffer_length,
428
+ bool* sign,
429
+ int* length,
430
+ int* point);
431
+
432
+ private:
433
+ // Implementation for ToShortest and ToShortestSingle.
434
+ bool ToShortestIeeeNumber(double value,
435
+ StringBuilder* result_builder,
436
+ DtoaMode mode) const;
437
+
438
+ // If the value is a special value (NaN or Infinity) constructs the
439
+ // corresponding string using the configured infinity/nan-symbol.
440
+ // If either of them is NULL or the value is not special then the
441
+ // function returns false.
442
+ bool HandleSpecialValues(double value, StringBuilder* result_builder) const;
443
+ // Constructs an exponential representation (i.e. 1.234e56).
444
+ // The given exponent assumes a decimal point after the first decimal digit.
445
+ void CreateExponentialRepresentation(const char* decimal_digits,
446
+ int length,
447
+ int exponent,
448
+ StringBuilder* result_builder) const;
449
+ // Creates a decimal representation (i.e 1234.5678).
450
+ void CreateDecimalRepresentation(const char* decimal_digits,
451
+ int length,
452
+ int decimal_point,
453
+ int digits_after_point,
454
+ StringBuilder* result_builder) const;
455
+
456
+ const int flags_;
457
+ const char* const infinity_symbol_;
458
+ const char* const nan_symbol_;
459
+ const char exponent_character_;
460
+ const int decimal_in_shortest_low_;
461
+ const int decimal_in_shortest_high_;
462
+ const int max_leading_padding_zeroes_in_precision_mode_;
463
+ const int max_trailing_padding_zeroes_in_precision_mode_;
464
+ const int min_exponent_width_;
465
+
466
+ DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS(DoubleToStringConverter);
467
+ };
468
+
469
+ } // namespace double_conversion
470
+ } // namespace arrow_vendored
471
+
472
+ #endif // DOUBLE_CONVERSION_DOUBLE_TO_STRING_H_
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/ieee.h ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2012 the V8 project authors. All rights reserved.
2
+ // Redistribution and use in source and binary forms, with or without
3
+ // modification, are permitted provided that the following conditions are
4
+ // met:
5
+ //
6
+ // * Redistributions of source code must retain the above copyright
7
+ // notice, this list of conditions and the following disclaimer.
8
+ // * Redistributions in binary form must reproduce the above
9
+ // copyright notice, this list of conditions and the following
10
+ // disclaimer in the documentation and/or other materials provided
11
+ // with the distribution.
12
+ // * Neither the name of Google Inc. nor the names of its
13
+ // contributors may be used to endorse or promote products derived
14
+ // from this software without specific prior written permission.
15
+ //
16
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+
28
+ #ifndef DOUBLE_CONVERSION_DOUBLE_H_
29
+ #define DOUBLE_CONVERSION_DOUBLE_H_
30
+
31
+ #include "diy-fp.h"
32
+
33
+ namespace arrow_vendored {
34
+ namespace double_conversion {
35
+
36
+ // We assume that doubles and uint64_t have the same endianness.
37
+ static uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
38
+ static double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
39
+ static uint32_t float_to_uint32(float f) { return BitCast<uint32_t>(f); }
40
+ static float uint32_to_float(uint32_t d32) { return BitCast<float>(d32); }
41
+
42
+ // Helper functions for doubles.
43
+ class Double {
44
+ public:
45
+ static const uint64_t kSignMask = DOUBLE_CONVERSION_UINT64_2PART_C(0x80000000, 00000000);
46
+ static const uint64_t kExponentMask = DOUBLE_CONVERSION_UINT64_2PART_C(0x7FF00000, 00000000);
47
+ static const uint64_t kSignificandMask = DOUBLE_CONVERSION_UINT64_2PART_C(0x000FFFFF, FFFFFFFF);
48
+ static const uint64_t kHiddenBit = DOUBLE_CONVERSION_UINT64_2PART_C(0x00100000, 00000000);
49
+ static const uint64_t kQuietNanBit = DOUBLE_CONVERSION_UINT64_2PART_C(0x00080000, 00000000);
50
+ static const int kPhysicalSignificandSize = 52; // Excludes the hidden bit.
51
+ static const int kSignificandSize = 53;
52
+ static const int kExponentBias = 0x3FF + kPhysicalSignificandSize;
53
+ static const int kMaxExponent = 0x7FF - kExponentBias;
54
+
55
+ Double() : d64_(0) {}
56
+ explicit Double(double d) : d64_(double_to_uint64(d)) {}
57
+ explicit Double(uint64_t d64) : d64_(d64) {}
58
+ explicit Double(DiyFp diy_fp)
59
+ : d64_(DiyFpToUint64(diy_fp)) {}
60
+
61
+ // The value encoded by this Double must be greater or equal to +0.0.
62
+ // It must not be special (infinity, or NaN).
63
+ DiyFp AsDiyFp() const {
64
+ DOUBLE_CONVERSION_ASSERT(Sign() > 0);
65
+ DOUBLE_CONVERSION_ASSERT(!IsSpecial());
66
+ return DiyFp(Significand(), Exponent());
67
+ }
68
+
69
+ // The value encoded by this Double must be strictly greater than 0.
70
+ DiyFp AsNormalizedDiyFp() const {
71
+ DOUBLE_CONVERSION_ASSERT(value() > 0.0);
72
+ uint64_t f = Significand();
73
+ int e = Exponent();
74
+
75
+ // The current double could be a denormal.
76
+ while ((f & kHiddenBit) == 0) {
77
+ f <<= 1;
78
+ e--;
79
+ }
80
+ // Do the final shifts in one go.
81
+ f <<= DiyFp::kSignificandSize - kSignificandSize;
82
+ e -= DiyFp::kSignificandSize - kSignificandSize;
83
+ return DiyFp(f, e);
84
+ }
85
+
86
+ // Returns the double's bit as uint64.
87
+ uint64_t AsUint64() const {
88
+ return d64_;
89
+ }
90
+
91
+ // Returns the next greater double. Returns +infinity on input +infinity.
92
+ double NextDouble() const {
93
+ if (d64_ == kInfinity) return Double(kInfinity).value();
94
+ if (Sign() < 0 && Significand() == 0) {
95
+ // -0.0
96
+ return 0.0;
97
+ }
98
+ if (Sign() < 0) {
99
+ return Double(d64_ - 1).value();
100
+ } else {
101
+ return Double(d64_ + 1).value();
102
+ }
103
+ }
104
+
105
+ double PreviousDouble() const {
106
+ if (d64_ == (kInfinity | kSignMask)) return -Infinity();
107
+ if (Sign() < 0) {
108
+ return Double(d64_ + 1).value();
109
+ } else {
110
+ if (Significand() == 0) return -0.0;
111
+ return Double(d64_ - 1).value();
112
+ }
113
+ }
114
+
115
+ int Exponent() const {
116
+ if (IsDenormal()) return kDenormalExponent;
117
+
118
+ uint64_t d64 = AsUint64();
119
+ int biased_e =
120
+ static_cast<int>((d64 & kExponentMask) >> kPhysicalSignificandSize);
121
+ return biased_e - kExponentBias;
122
+ }
123
+
124
+ uint64_t Significand() const {
125
+ uint64_t d64 = AsUint64();
126
+ uint64_t significand = d64 & kSignificandMask;
127
+ if (!IsDenormal()) {
128
+ return significand + kHiddenBit;
129
+ } else {
130
+ return significand;
131
+ }
132
+ }
133
+
134
+ // Returns true if the double is a denormal.
135
+ bool IsDenormal() const {
136
+ uint64_t d64 = AsUint64();
137
+ return (d64 & kExponentMask) == 0;
138
+ }
139
+
140
+ // We consider denormals not to be special.
141
+ // Hence only Infinity and NaN are special.
142
+ bool IsSpecial() const {
143
+ uint64_t d64 = AsUint64();
144
+ return (d64 & kExponentMask) == kExponentMask;
145
+ }
146
+
147
+ bool IsNan() const {
148
+ uint64_t d64 = AsUint64();
149
+ return ((d64 & kExponentMask) == kExponentMask) &&
150
+ ((d64 & kSignificandMask) != 0);
151
+ }
152
+
153
+ bool IsQuietNan() const {
154
+ #if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__)
155
+ return IsNan() && ((AsUint64() & kQuietNanBit) == 0);
156
+ #else
157
+ return IsNan() && ((AsUint64() & kQuietNanBit) != 0);
158
+ #endif
159
+ }
160
+
161
+ bool IsSignalingNan() const {
162
+ #if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__)
163
+ return IsNan() && ((AsUint64() & kQuietNanBit) != 0);
164
+ #else
165
+ return IsNan() && ((AsUint64() & kQuietNanBit) == 0);
166
+ #endif
167
+ }
168
+
169
+
170
+ bool IsInfinite() const {
171
+ uint64_t d64 = AsUint64();
172
+ return ((d64 & kExponentMask) == kExponentMask) &&
173
+ ((d64 & kSignificandMask) == 0);
174
+ }
175
+
176
+ int Sign() const {
177
+ uint64_t d64 = AsUint64();
178
+ return (d64 & kSignMask) == 0? 1: -1;
179
+ }
180
+
181
+ // Precondition: the value encoded by this Double must be greater or equal
182
+ // than +0.0.
183
+ DiyFp UpperBoundary() const {
184
+ DOUBLE_CONVERSION_ASSERT(Sign() > 0);
185
+ return DiyFp(Significand() * 2 + 1, Exponent() - 1);
186
+ }
187
+
188
+ // Computes the two boundaries of this.
189
+ // The bigger boundary (m_plus) is normalized. The lower boundary has the same
190
+ // exponent as m_plus.
191
+ // Precondition: the value encoded by this Double must be greater than 0.
192
+ void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
193
+ DOUBLE_CONVERSION_ASSERT(value() > 0.0);
194
+ DiyFp v = this->AsDiyFp();
195
+ DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
196
+ DiyFp m_minus;
197
+ if (LowerBoundaryIsCloser()) {
198
+ m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2);
199
+ } else {
200
+ m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1);
201
+ }
202
+ m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e()));
203
+ m_minus.set_e(m_plus.e());
204
+ *out_m_plus = m_plus;
205
+ *out_m_minus = m_minus;
206
+ }
207
+
208
+ bool LowerBoundaryIsCloser() const {
209
+ // The boundary is closer if the significand is of the form f == 2^p-1 then
210
+ // the lower boundary is closer.
211
+ // Think of v = 1000e10 and v- = 9999e9.
212
+ // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but
213
+ // at a distance of 1e8.
214
+ // The only exception is for the smallest normal: the largest denormal is
215
+ // at the same distance as its successor.
216
+ // Note: denormals have the same exponent as the smallest normals.
217
+ bool physical_significand_is_zero = ((AsUint64() & kSignificandMask) == 0);
218
+ return physical_significand_is_zero && (Exponent() != kDenormalExponent);
219
+ }
220
+
221
+ double value() const { return uint64_to_double(d64_); }
222
+
223
+ // Returns the significand size for a given order of magnitude.
224
+ // If v = f*2^e with 2^p-1 <= f <= 2^p then p+e is v's order of magnitude.
225
+ // This function returns the number of significant binary digits v will have
226
+ // once it's encoded into a double. In almost all cases this is equal to
227
+ // kSignificandSize. The only exceptions are denormals. They start with
228
+ // leading zeroes and their effective significand-size is hence smaller.
229
+ static int SignificandSizeForOrderOfMagnitude(int order) {
230
+ if (order >= (kDenormalExponent + kSignificandSize)) {
231
+ return kSignificandSize;
232
+ }
233
+ if (order <= kDenormalExponent) return 0;
234
+ return order - kDenormalExponent;
235
+ }
236
+
237
+ static double Infinity() {
238
+ return Double(kInfinity).value();
239
+ }
240
+
241
+ static double NaN() {
242
+ return Double(kNaN).value();
243
+ }
244
+
245
+ private:
246
+ static const int kDenormalExponent = -kExponentBias + 1;
247
+ static const uint64_t kInfinity = DOUBLE_CONVERSION_UINT64_2PART_C(0x7FF00000, 00000000);
248
+ #if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__)
249
+ static const uint64_t kNaN = DOUBLE_CONVERSION_UINT64_2PART_C(0x7FF7FFFF, FFFFFFFF);
250
+ #else
251
+ static const uint64_t kNaN = DOUBLE_CONVERSION_UINT64_2PART_C(0x7FF80000, 00000000);
252
+ #endif
253
+
254
+
255
+ const uint64_t d64_;
256
+
257
+ static uint64_t DiyFpToUint64(DiyFp diy_fp) {
258
+ uint64_t significand = diy_fp.f();
259
+ int exponent = diy_fp.e();
260
+ while (significand > kHiddenBit + kSignificandMask) {
261
+ significand >>= 1;
262
+ exponent++;
263
+ }
264
+ if (exponent >= kMaxExponent) {
265
+ return kInfinity;
266
+ }
267
+ if (exponent < kDenormalExponent) {
268
+ return 0;
269
+ }
270
+ while (exponent > kDenormalExponent && (significand & kHiddenBit) == 0) {
271
+ significand <<= 1;
272
+ exponent--;
273
+ }
274
+ uint64_t biased_exponent;
275
+ if (exponent == kDenormalExponent && (significand & kHiddenBit) == 0) {
276
+ biased_exponent = 0;
277
+ } else {
278
+ biased_exponent = static_cast<uint64_t>(exponent + kExponentBias);
279
+ }
280
+ return (significand & kSignificandMask) |
281
+ (biased_exponent << kPhysicalSignificandSize);
282
+ }
283
+
284
+ DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(Double);
285
+ };
286
+
287
+ class Single {
288
+ public:
289
+ static const uint32_t kSignMask = 0x80000000;
290
+ static const uint32_t kExponentMask = 0x7F800000;
291
+ static const uint32_t kSignificandMask = 0x007FFFFF;
292
+ static const uint32_t kHiddenBit = 0x00800000;
293
+ static const uint32_t kQuietNanBit = 0x00400000;
294
+ static const int kPhysicalSignificandSize = 23; // Excludes the hidden bit.
295
+ static const int kSignificandSize = 24;
296
+
297
+ Single() : d32_(0) {}
298
+ explicit Single(float f) : d32_(float_to_uint32(f)) {}
299
+ explicit Single(uint32_t d32) : d32_(d32) {}
300
+
301
+ // The value encoded by this Single must be greater or equal to +0.0.
302
+ // It must not be special (infinity, or NaN).
303
+ DiyFp AsDiyFp() const {
304
+ DOUBLE_CONVERSION_ASSERT(Sign() > 0);
305
+ DOUBLE_CONVERSION_ASSERT(!IsSpecial());
306
+ return DiyFp(Significand(), Exponent());
307
+ }
308
+
309
+ // Returns the single's bit as uint64.
310
+ uint32_t AsUint32() const {
311
+ return d32_;
312
+ }
313
+
314
+ int Exponent() const {
315
+ if (IsDenormal()) return kDenormalExponent;
316
+
317
+ uint32_t d32 = AsUint32();
318
+ int biased_e =
319
+ static_cast<int>((d32 & kExponentMask) >> kPhysicalSignificandSize);
320
+ return biased_e - kExponentBias;
321
+ }
322
+
323
+ uint32_t Significand() const {
324
+ uint32_t d32 = AsUint32();
325
+ uint32_t significand = d32 & kSignificandMask;
326
+ if (!IsDenormal()) {
327
+ return significand + kHiddenBit;
328
+ } else {
329
+ return significand;
330
+ }
331
+ }
332
+
333
+ // Returns true if the single is a denormal.
334
+ bool IsDenormal() const {
335
+ uint32_t d32 = AsUint32();
336
+ return (d32 & kExponentMask) == 0;
337
+ }
338
+
339
+ // We consider denormals not to be special.
340
+ // Hence only Infinity and NaN are special.
341
+ bool IsSpecial() const {
342
+ uint32_t d32 = AsUint32();
343
+ return (d32 & kExponentMask) == kExponentMask;
344
+ }
345
+
346
+ bool IsNan() const {
347
+ uint32_t d32 = AsUint32();
348
+ return ((d32 & kExponentMask) == kExponentMask) &&
349
+ ((d32 & kSignificandMask) != 0);
350
+ }
351
+
352
+ bool IsQuietNan() const {
353
+ #if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__)
354
+ return IsNan() && ((AsUint32() & kQuietNanBit) == 0);
355
+ #else
356
+ return IsNan() && ((AsUint32() & kQuietNanBit) != 0);
357
+ #endif
358
+ }
359
+
360
+ bool IsSignalingNan() const {
361
+ #if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__)
362
+ return IsNan() && ((AsUint32() & kQuietNanBit) != 0);
363
+ #else
364
+ return IsNan() && ((AsUint32() & kQuietNanBit) == 0);
365
+ #endif
366
+ }
367
+
368
+
369
+ bool IsInfinite() const {
370
+ uint32_t d32 = AsUint32();
371
+ return ((d32 & kExponentMask) == kExponentMask) &&
372
+ ((d32 & kSignificandMask) == 0);
373
+ }
374
+
375
+ int Sign() const {
376
+ uint32_t d32 = AsUint32();
377
+ return (d32 & kSignMask) == 0? 1: -1;
378
+ }
379
+
380
+ // Computes the two boundaries of this.
381
+ // The bigger boundary (m_plus) is normalized. The lower boundary has the same
382
+ // exponent as m_plus.
383
+ // Precondition: the value encoded by this Single must be greater than 0.
384
+ void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
385
+ DOUBLE_CONVERSION_ASSERT(value() > 0.0);
386
+ DiyFp v = this->AsDiyFp();
387
+ DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
388
+ DiyFp m_minus;
389
+ if (LowerBoundaryIsCloser()) {
390
+ m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2);
391
+ } else {
392
+ m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1);
393
+ }
394
+ m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e()));
395
+ m_minus.set_e(m_plus.e());
396
+ *out_m_plus = m_plus;
397
+ *out_m_minus = m_minus;
398
+ }
399
+
400
+ // Precondition: the value encoded by this Single must be greater or equal
401
+ // than +0.0.
402
+ DiyFp UpperBoundary() const {
403
+ DOUBLE_CONVERSION_ASSERT(Sign() > 0);
404
+ return DiyFp(Significand() * 2 + 1, Exponent() - 1);
405
+ }
406
+
407
+ bool LowerBoundaryIsCloser() const {
408
+ // The boundary is closer if the significand is of the form f == 2^p-1 then
409
+ // the lower boundary is closer.
410
+ // Think of v = 1000e10 and v- = 9999e9.
411
+ // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but
412
+ // at a distance of 1e8.
413
+ // The only exception is for the smallest normal: the largest denormal is
414
+ // at the same distance as its successor.
415
+ // Note: denormals have the same exponent as the smallest normals.
416
+ bool physical_significand_is_zero = ((AsUint32() & kSignificandMask) == 0);
417
+ return physical_significand_is_zero && (Exponent() != kDenormalExponent);
418
+ }
419
+
420
+ float value() const { return uint32_to_float(d32_); }
421
+
422
+ static float Infinity() {
423
+ return Single(kInfinity).value();
424
+ }
425
+
426
+ static float NaN() {
427
+ return Single(kNaN).value();
428
+ }
429
+
430
+ private:
431
+ static const int kExponentBias = 0x7F + kPhysicalSignificandSize;
432
+ static const int kDenormalExponent = -kExponentBias + 1;
433
+ static const int kMaxExponent = 0xFF - kExponentBias;
434
+ static const uint32_t kInfinity = 0x7F800000;
435
+ #if (defined(__mips__) && !defined(__mips_nan2008)) || defined(__hppa__)
436
+ static const uint32_t kNaN = 0x7FBFFFFF;
437
+ #else
438
+ static const uint32_t kNaN = 0x7FC00000;
439
+ #endif
440
+
441
+ const uint32_t d32_;
442
+
443
+ DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(Single);
444
+ };
445
+
446
+ } // namespace double_conversion
447
+ } // namespace arrow_vendored
448
+
449
+ #endif // DOUBLE_CONVERSION_DOUBLE_H_
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/string-to-double.h ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2012 the V8 project authors. All rights reserved.
2
+ // Redistribution and use in source and binary forms, with or without
3
+ // modification, are permitted provided that the following conditions are
4
+ // met:
5
+ //
6
+ // * Redistributions of source code must retain the above copyright
7
+ // notice, this list of conditions and the following disclaimer.
8
+ // * Redistributions in binary form must reproduce the above
9
+ // copyright notice, this list of conditions and the following
10
+ // disclaimer in the documentation and/or other materials provided
11
+ // with the distribution.
12
+ // * Neither the name of Google Inc. nor the names of its
13
+ // contributors may be used to endorse or promote products derived
14
+ // from this software without specific prior written permission.
15
+ //
16
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+
28
+ #ifndef DOUBLE_CONVERSION_STRING_TO_DOUBLE_H_
29
+ #define DOUBLE_CONVERSION_STRING_TO_DOUBLE_H_
30
+
31
+ #include "utils.h"
32
+
33
+ namespace arrow_vendored {
34
+ namespace double_conversion {
35
+
36
+ class StringToDoubleConverter {
37
+ public:
38
+ // Enumeration for allowing octals and ignoring junk when converting
39
+ // strings to numbers.
40
+ enum Flags {
41
+ NO_FLAGS = 0,
42
+ ALLOW_HEX = 1,
43
+ ALLOW_OCTALS = 2,
44
+ ALLOW_TRAILING_JUNK = 4,
45
+ ALLOW_LEADING_SPACES = 8,
46
+ ALLOW_TRAILING_SPACES = 16,
47
+ ALLOW_SPACES_AFTER_SIGN = 32,
48
+ ALLOW_CASE_INSENSITIVITY = 64,
49
+ ALLOW_CASE_INSENSIBILITY = 64, // Deprecated
50
+ ALLOW_HEX_FLOATS = 128,
51
+ };
52
+
53
+ static const uc16 kNoSeparator = '\0';
54
+
55
+ // Flags should be a bit-or combination of the possible Flags-enum.
56
+ // - NO_FLAGS: no special flags.
57
+ // - ALLOW_HEX: recognizes the prefix "0x". Hex numbers may only be integers.
58
+ // Ex: StringToDouble("0x1234") -> 4660.0
59
+ // In StringToDouble("0x1234.56") the characters ".56" are trailing
60
+ // junk. The result of the call is hence dependent on
61
+ // the ALLOW_TRAILING_JUNK flag and/or the junk value.
62
+ // With this flag "0x" is a junk-string. Even with ALLOW_TRAILING_JUNK,
63
+ // the string will not be parsed as "0" followed by junk.
64
+ //
65
+ // - ALLOW_OCTALS: recognizes the prefix "0" for octals:
66
+ // If a sequence of octal digits starts with '0', then the number is
67
+ // read as octal integer. Octal numbers may only be integers.
68
+ // Ex: StringToDouble("01234") -> 668.0
69
+ // StringToDouble("012349") -> 12349.0 // Not a sequence of octal
70
+ // // digits.
71
+ // In StringToDouble("01234.56") the characters ".56" are trailing
72
+ // junk. The result of the call is hence dependent on
73
+ // the ALLOW_TRAILING_JUNK flag and/or the junk value.
74
+ // In StringToDouble("01234e56") the characters "e56" are trailing
75
+ // junk, too.
76
+ // - ALLOW_TRAILING_JUNK: ignore trailing characters that are not part of
77
+ // a double literal.
78
+ // - ALLOW_LEADING_SPACES: skip over leading whitespace, including spaces,
79
+ // new-lines, and tabs.
80
+ // - ALLOW_TRAILING_SPACES: ignore trailing whitespace.
81
+ // - ALLOW_SPACES_AFTER_SIGN: ignore whitespace after the sign.
82
+ // Ex: StringToDouble("- 123.2") -> -123.2.
83
+ // StringToDouble("+ 123.2") -> 123.2
84
+ // - ALLOW_CASE_INSENSITIVITY: ignore case of characters for special values:
85
+ // infinity and nan.
86
+ // - ALLOW_HEX_FLOATS: allows hexadecimal float literals.
87
+ // This *must* start with "0x" and separate the exponent with "p".
88
+ // Examples: 0x1.2p3 == 9.0
89
+ // 0x10.1p0 == 16.0625
90
+ // ALLOW_HEX and ALLOW_HEX_FLOATS are indented.
91
+ //
92
+ // empty_string_value is returned when an empty string is given as input.
93
+ // If ALLOW_LEADING_SPACES or ALLOW_TRAILING_SPACES are set, then a string
94
+ // containing only spaces is converted to the 'empty_string_value', too.
95
+ //
96
+ // junk_string_value is returned when
97
+ // a) ALLOW_TRAILING_JUNK is not set, and a junk character (a character not
98
+ // part of a double-literal) is found.
99
+ // b) ALLOW_TRAILING_JUNK is set, but the string does not start with a
100
+ // double literal.
101
+ //
102
+ // infinity_symbol and nan_symbol are strings that are used to detect
103
+ // inputs that represent infinity and NaN. They can be null, in which case
104
+ // they are ignored.
105
+ // The conversion routine first reads any possible signs. Then it compares the
106
+ // following character of the input-string with the first character of
107
+ // the infinity, and nan-symbol. If either matches, the function assumes, that
108
+ // a match has been found, and expects the following input characters to match
109
+ // the remaining characters of the special-value symbol.
110
+ // This means that the following restrictions apply to special-value symbols:
111
+ // - they must not start with signs ('+', or '-'),
112
+ // - they must not have the same first character.
113
+ // - they must not start with digits.
114
+ //
115
+ // If the separator character is not kNoSeparator, then that specific
116
+ // character is ignored when in between two valid digits of the significant.
117
+ // It is not allowed to appear in the exponent.
118
+ // It is not allowed to lead or trail the number.
119
+ // It is not allowed to appear twice next to each other.
120
+ //
121
+ // Examples:
122
+ // flags = ALLOW_HEX | ALLOW_TRAILING_JUNK,
123
+ // empty_string_value = 0.0,
124
+ // junk_string_value = NaN,
125
+ // infinity_symbol = "infinity",
126
+ // nan_symbol = "nan":
127
+ // StringToDouble("0x1234") -> 4660.0.
128
+ // StringToDouble("0x1234K") -> 4660.0.
129
+ // StringToDouble("") -> 0.0 // empty_string_value.
130
+ // StringToDouble(" ") -> NaN // junk_string_value.
131
+ // StringToDouble(" 1") -> NaN // junk_string_value.
132
+ // StringToDouble("0x") -> NaN // junk_string_value.
133
+ // StringToDouble("-123.45") -> -123.45.
134
+ // StringToDouble("--123.45") -> NaN // junk_string_value.
135
+ // StringToDouble("123e45") -> 123e45.
136
+ // StringToDouble("123E45") -> 123e45.
137
+ // StringToDouble("123e+45") -> 123e45.
138
+ // StringToDouble("123E-45") -> 123e-45.
139
+ // StringToDouble("123e") -> 123.0 // trailing junk ignored.
140
+ // StringToDouble("123e-") -> 123.0 // trailing junk ignored.
141
+ // StringToDouble("+NaN") -> NaN // NaN string literal.
142
+ // StringToDouble("-infinity") -> -inf. // infinity literal.
143
+ // StringToDouble("Infinity") -> NaN // junk_string_value.
144
+ //
145
+ // flags = ALLOW_OCTAL | ALLOW_LEADING_SPACES,
146
+ // empty_string_value = 0.0,
147
+ // junk_string_value = NaN,
148
+ // infinity_symbol = NULL,
149
+ // nan_symbol = NULL:
150
+ // StringToDouble("0x1234") -> NaN // junk_string_value.
151
+ // StringToDouble("01234") -> 668.0.
152
+ // StringToDouble("") -> 0.0 // empty_string_value.
153
+ // StringToDouble(" ") -> 0.0 // empty_string_value.
154
+ // StringToDouble(" 1") -> 1.0
155
+ // StringToDouble("0x") -> NaN // junk_string_value.
156
+ // StringToDouble("0123e45") -> NaN // junk_string_value.
157
+ // StringToDouble("01239E45") -> 1239e45.
158
+ // StringToDouble("-infinity") -> NaN // junk_string_value.
159
+ // StringToDouble("NaN") -> NaN // junk_string_value.
160
+ //
161
+ // flags = NO_FLAGS,
162
+ // separator = ' ':
163
+ // StringToDouble("1 2 3 4") -> 1234.0
164
+ // StringToDouble("1 2") -> NaN // junk_string_value
165
+ // StringToDouble("1 000 000.0") -> 1000000.0
166
+ // StringToDouble("1.000 000") -> 1.0
167
+ // StringToDouble("1.0e1 000") -> NaN // junk_string_value
168
+ StringToDoubleConverter(int flags,
169
+ double empty_string_value,
170
+ double junk_string_value,
171
+ const char* infinity_symbol,
172
+ const char* nan_symbol,
173
+ uc16 separator = kNoSeparator)
174
+ : flags_(flags),
175
+ empty_string_value_(empty_string_value),
176
+ junk_string_value_(junk_string_value),
177
+ infinity_symbol_(infinity_symbol),
178
+ nan_symbol_(nan_symbol),
179
+ separator_(separator) {
180
+ }
181
+
182
+ // Performs the conversion.
183
+ // The output parameter 'processed_characters_count' is set to the number
184
+ // of characters that have been processed to read the number.
185
+ // Spaces than are processed with ALLOW_{LEADING|TRAILING}_SPACES are included
186
+ // in the 'processed_characters_count'. Trailing junk is never included.
187
+ double StringToDouble(const char* buffer,
188
+ int length,
189
+ int* processed_characters_count) const;
190
+
191
+ // Same as StringToDouble above but for 16 bit characters.
192
+ double StringToDouble(const uc16* buffer,
193
+ int length,
194
+ int* processed_characters_count) const;
195
+
196
+ // Same as StringToDouble but reads a float.
197
+ // Note that this is not equivalent to static_cast<float>(StringToDouble(...))
198
+ // due to potential double-rounding.
199
+ float StringToFloat(const char* buffer,
200
+ int length,
201
+ int* processed_characters_count) const;
202
+
203
+ // Same as StringToFloat above but for 16 bit characters.
204
+ float StringToFloat(const uc16* buffer,
205
+ int length,
206
+ int* processed_characters_count) const;
207
+
208
+ // Same as StringToDouble for T = double, and StringToFloat for T = float.
209
+ template <typename T>
210
+ T StringTo(const char* buffer,
211
+ int length,
212
+ int* processed_characters_count) const;
213
+
214
+ // Same as StringTo above but for 16 bit characters.
215
+ template <typename T>
216
+ T StringTo(const uc16* buffer,
217
+ int length,
218
+ int* processed_characters_count) const;
219
+
220
+ private:
221
+ const int flags_;
222
+ const double empty_string_value_;
223
+ const double junk_string_value_;
224
+ const char* const infinity_symbol_;
225
+ const char* const nan_symbol_;
226
+ const uc16 separator_;
227
+
228
+ template <class Iterator>
229
+ double StringToIeee(Iterator start_pointer,
230
+ int length,
231
+ bool read_as_double,
232
+ int* processed_characters_count) const;
233
+
234
+ DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS(StringToDoubleConverter);
235
+ };
236
+
237
+ } // namespace double_conversion
238
+ } // namespace arrow_vendored
239
+
240
+ #endif // DOUBLE_CONVERSION_STRING_TO_DOUBLE_H_
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/strtod.h ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2010 the V8 project authors. All rights reserved.
2
+ // Redistribution and use in source and binary forms, with or without
3
+ // modification, are permitted provided that the following conditions are
4
+ // met:
5
+ //
6
+ // * Redistributions of source code must retain the above copyright
7
+ // notice, this list of conditions and the following disclaimer.
8
+ // * Redistributions in binary form must reproduce the above
9
+ // copyright notice, this list of conditions and the following
10
+ // disclaimer in the documentation and/or other materials provided
11
+ // with the distribution.
12
+ // * Neither the name of Google Inc. nor the names of its
13
+ // contributors may be used to endorse or promote products derived
14
+ // from this software without specific prior written permission.
15
+ //
16
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+
28
+ #ifndef DOUBLE_CONVERSION_STRTOD_H_
29
+ #define DOUBLE_CONVERSION_STRTOD_H_
30
+
31
+ #include "utils.h"
32
+
33
+ namespace arrow_vendored {
34
+ namespace double_conversion {
35
+
36
+ // The buffer must only contain digits in the range [0-9]. It must not
37
+ // contain a dot or a sign. It must not start with '0', and must not be empty.
38
+ double Strtod(Vector<const char> buffer, int exponent);
39
+
40
+ // The buffer must only contain digits in the range [0-9]. It must not
41
+ // contain a dot or a sign. It must not start with '0', and must not be empty.
42
+ float Strtof(Vector<const char> buffer, int exponent);
43
+
44
+ // Same as Strtod, but assumes that 'trimmed' is already trimmed, as if run
45
+ // through TrimAndCut. That is, 'trimmed' must have no leading or trailing
46
+ // zeros, must not be a lone zero, and must not have 'too many' digits.
47
+ double StrtodTrimmed(Vector<const char> trimmed, int exponent);
48
+
49
+ // Same as Strtof, but assumes that 'trimmed' is already trimmed, as if run
50
+ // through TrimAndCut. That is, 'trimmed' must have no leading or trailing
51
+ // zeros, must not be a lone zero, and must not have 'too many' digits.
52
+ float StrtofTrimmed(Vector<const char> trimmed, int exponent);
53
+
54
+ inline Vector<const char> TrimTrailingZeros(Vector<const char> buffer) {
55
+ for (int i = buffer.length() - 1; i >= 0; --i) {
56
+ if (buffer[i] != '0') {
57
+ return buffer.SubVector(0, i + 1);
58
+ }
59
+ }
60
+ return Vector<const char>(buffer.start(), 0);
61
+ }
62
+
63
+ } // namespace double_conversion
64
+ } // namespace arrow_vendored
65
+
66
+ #endif // DOUBLE_CONVERSION_STRTOD_H_
llmeval-env/lib/python3.10/site-packages/pyarrow/include/arrow/vendored/double-conversion/utils.h ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2010 the V8 project authors. All rights reserved.
2
+ // Redistribution and use in source and binary forms, with or without
3
+ // modification, are permitted provided that the following conditions are
4
+ // met:
5
+ //
6
+ // * Redistributions of source code must retain the above copyright
7
+ // notice, this list of conditions and the following disclaimer.
8
+ // * Redistributions in binary form must reproduce the above
9
+ // copyright notice, this list of conditions and the following
10
+ // disclaimer in the documentation and/or other materials provided
11
+ // with the distribution.
12
+ // * Neither the name of Google Inc. nor the names of its
13
+ // contributors may be used to endorse or promote products derived
14
+ // from this software without specific prior written permission.
15
+ //
16
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+
28
+ #ifndef DOUBLE_CONVERSION_UTILS_H_
29
+ #define DOUBLE_CONVERSION_UTILS_H_
30
+
31
+ // Use DOUBLE_CONVERSION_NON_PREFIXED_MACROS to get unprefixed macros as was
32
+ // the case in double-conversion releases prior to 3.1.6
33
+
34
+ #include <cstdlib>
35
+ #include <cstring>
36
+
37
+ // For pre-C++11 compatibility
38
+ #if __cplusplus >= 201103L
39
+ #define DOUBLE_CONVERSION_NULLPTR nullptr
40
+ #else
41
+ #define DOUBLE_CONVERSION_NULLPTR NULL
42
+ #endif
43
+
44
+ #include <cassert>
45
+ #ifndef DOUBLE_CONVERSION_ASSERT
46
+ #define DOUBLE_CONVERSION_ASSERT(condition) \
47
+ assert(condition)
48
+ #endif
49
+ #if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(ASSERT)
50
+ #define ASSERT DOUBLE_CONVERSION_ASSERT
51
+ #endif
52
+
53
+ #ifndef DOUBLE_CONVERSION_UNIMPLEMENTED
54
+ #define DOUBLE_CONVERSION_UNIMPLEMENTED() (abort())
55
+ #endif
56
+ #if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(UNIMPLEMENTED)
57
+ #define UNIMPLEMENTED DOUBLE_CONVERSION_UNIMPLEMENTED
58
+ #endif
59
+
60
+ #ifndef DOUBLE_CONVERSION_NO_RETURN
61
+ #ifdef _MSC_VER
62
+ #define DOUBLE_CONVERSION_NO_RETURN __declspec(noreturn)
63
+ #else
64
+ #define DOUBLE_CONVERSION_NO_RETURN __attribute__((noreturn))
65
+ #endif
66
+ #endif
67
+ #if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(NO_RETURN)
68
+ #define NO_RETURN DOUBLE_CONVERSION_NO_RETURN
69
+ #endif
70
+
71
+ #ifndef DOUBLE_CONVERSION_UNREACHABLE
72
+ #ifdef _MSC_VER
73
+ void DOUBLE_CONVERSION_NO_RETURN abort_noreturn();
74
+ inline void abort_noreturn() { abort(); }
75
+ #define DOUBLE_CONVERSION_UNREACHABLE() (abort_noreturn())
76
+ #else
77
+ #define DOUBLE_CONVERSION_UNREACHABLE() (abort())
78
+ #endif
79
+ #endif
80
+ #if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(UNREACHABLE)
81
+ #define UNREACHABLE DOUBLE_CONVERSION_UNREACHABLE
82
+ #endif
83
+
84
+ // Not all compilers support __has_attribute and combining a check for both
85
+ // ifdef and __has_attribute on the same preprocessor line isn't portable.
86
+ #ifdef __has_attribute
87
+ # define DOUBLE_CONVERSION_HAS_ATTRIBUTE(x) __has_attribute(x)
88
+ #else
89
+ # define DOUBLE_CONVERSION_HAS_ATTRIBUTE(x) 0
90
+ #endif
91
+
92
+ #ifndef DOUBLE_CONVERSION_UNUSED
93
+ #if DOUBLE_CONVERSION_HAS_ATTRIBUTE(unused)
94
+ #define DOUBLE_CONVERSION_UNUSED __attribute__((unused))
95
+ #else
96
+ #define DOUBLE_CONVERSION_UNUSED
97
+ #endif
98
+ #endif
99
+ #if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(UNUSED)
100
+ #define UNUSED DOUBLE_CONVERSION_UNUSED
101
+ #endif
102
+
103
+ #if DOUBLE_CONVERSION_HAS_ATTRIBUTE(uninitialized)
104
+ #define DOUBLE_CONVERSION_STACK_UNINITIALIZED __attribute__((uninitialized))
105
+ #else
106
+ #define DOUBLE_CONVERSION_STACK_UNINITIALIZED
107
+ #endif
108
+ #if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(STACK_UNINITIALIZED)
109
+ #define STACK_UNINITIALIZED DOUBLE_CONVERSION_STACK_UNINITIALIZED
110
+ #endif
111
+
112
+ // Double operations detection based on target architecture.
113
+ // Linux uses a 80bit wide floating point stack on x86. This induces double
114
+ // rounding, which in turn leads to wrong results.
115
+ // An easy way to test if the floating-point operations are correct is to
116
+ // evaluate: 89255.0/1e22. If the floating-point stack is 64 bits wide then
117
+ // the result is equal to 89255e-22.
118
+ // The best way to test this, is to create a division-function and to compare
119
+ // the output of the division with the expected result. (Inlining must be
120
+ // disabled.)
121
+ // On Linux,x86 89255e-22 != Div_double(89255.0/1e22)
122
+ //
123
+ // For example:
124
+ /*
125
+ // -- in div.c
126
+ double Div_double(double x, double y) { return x / y; }
127
+
128
+ // -- in main.c
129
+ double Div_double(double x, double y); // Forward declaration.
130
+
131
+ int main(int argc, char** argv) {
132
+ return Div_double(89255.0, 1e22) == 89255e-22;
133
+ }
134
+ */
135
+ // Run as follows ./main || echo "correct"
136
+ //
137
+ // If it prints "correct" then the architecture should be here, in the "correct" section.
138
+ #if defined(_M_X64) || defined(__x86_64__) || \
139
+ defined(__ARMEL__) || defined(__avr32__) || defined(_M_ARM) || defined(_M_ARM64) || \
140
+ defined(__hppa__) || defined(__ia64__) || \
141
+ defined(__mips__) || \
142
+ defined(__loongarch__) || \
143
+ defined(__nios2__) || defined(__ghs) || \
144
+ defined(__powerpc__) || defined(__ppc__) || defined(__ppc64__) || \
145
+ defined(_POWER) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \
146
+ defined(__sparc__) || defined(__sparc) || defined(__s390__) || \
147
+ defined(__SH4__) || defined(__alpha__) || \
148
+ defined(_MIPS_ARCH_MIPS32R2) || defined(__ARMEB__) ||\
149
+ defined(__AARCH64EL__) || defined(__aarch64__) || defined(__AARCH64EB__) || \
150
+ defined(__riscv) || defined(__e2k__) || \
151
+ defined(__or1k__) || defined(__arc__) || defined(__ARC64__) || \
152
+ defined(__microblaze__) || defined(__XTENSA__) || \
153
+ defined(__EMSCRIPTEN__) || defined(__wasm32__)
154
+ #define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
155
+ #elif defined(__mc68000__) || \
156
+ defined(__pnacl__) || defined(__native_client__)
157
+ #undef DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS
158
+ #elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
159
+ #if defined(_WIN32)
160
+ // Windows uses a 64bit wide floating point stack.
161
+ #define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
162
+ #else
163
+ #undef DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS
164
+ #endif // _WIN32
165
+ #else
166
+ #error Target architecture was not detected as supported by Double-Conversion.
167
+ #endif
168
+ #if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(CORRECT_DOUBLE_OPERATIONS)
169
+ #define CORRECT_DOUBLE_OPERATIONS DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS
170
+ #endif
171
+
172
+ #if defined(_WIN32) && !defined(__MINGW32__)
173
+
174
+ typedef signed char int8_t;
175
+ typedef unsigned char uint8_t;
176
+ typedef short int16_t; // NOLINT
177
+ typedef unsigned short uint16_t; // NOLINT
178
+ typedef int int32_t;
179
+ typedef unsigned int uint32_t;
180
+ typedef __int64 int64_t;
181
+ typedef unsigned __int64 uint64_t;
182
+ // intptr_t and friends are defined in crtdefs.h through stdio.h.
183
+
184
+ #else
185
+
186
+ #include <stdint.h>
187
+
188
+ #endif
189
+
190
+ typedef uint16_t uc16;
191
+
192
+ // The following macro works on both 32 and 64-bit platforms.
193
+ // Usage: instead of writing 0x1234567890123456
194
+ // write DOUBLE_CONVERSION_UINT64_2PART_C(0x12345678,90123456);
195
+ #define DOUBLE_CONVERSION_UINT64_2PART_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
196
+ #if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(UINT64_2PART_C)
197
+ #define UINT64_2PART_C DOUBLE_CONVERSION_UINT64_2PART_C
198
+ #endif
199
+
200
+ // The expression DOUBLE_CONVERSION_ARRAY_SIZE(a) is a compile-time constant of type
201
+ // size_t which represents the number of elements of the given
202
+ // array. You should only use DOUBLE_CONVERSION_ARRAY_SIZE on statically allocated
203
+ // arrays.
204
+ #ifndef DOUBLE_CONVERSION_ARRAY_SIZE
205
+ #define DOUBLE_CONVERSION_ARRAY_SIZE(a) \
206
+ ((sizeof(a) / sizeof(*(a))) / \
207
+ static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
208
+ #endif
209
+ #if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(ARRAY_SIZE)
210
+ #define ARRAY_SIZE DOUBLE_CONVERSION_ARRAY_SIZE
211
+ #endif
212
+
213
+ // A macro to disallow the evil copy constructor and operator= functions
214
+ // This should be used in the private: declarations for a class
215
+ #ifndef DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN
216
+ #define DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(TypeName) \
217
+ TypeName(const TypeName&); \
218
+ void operator=(const TypeName&)
219
+ #endif
220
+ #if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(DC_DISALLOW_COPY_AND_ASSIGN)
221
+ #define DC_DISALLOW_COPY_AND_ASSIGN DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN
222
+ #endif
223
+
224
+ // A macro to disallow all the implicit constructors, namely the
225
+ // default constructor, copy constructor and operator= functions.
226
+ //
227
+ // This should be used in the private: declarations for a class
228
+ // that wants to prevent anyone from instantiating it. This is
229
+ // especially useful for classes containing only static methods.
230
+ #ifndef DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS
231
+ #define DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
232
+ TypeName(); \
233
+ DOUBLE_CONVERSION_DISALLOW_COPY_AND_ASSIGN(TypeName)
234
+ #endif
235
+ #if defined(DOUBLE_CONVERSION_NON_PREFIXED_MACROS) && !defined(DC_DISALLOW_IMPLICIT_CONSTRUCTORS)
236
+ #define DC_DISALLOW_IMPLICIT_CONSTRUCTORS DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS
237
+ #endif
238
+
239
+ namespace arrow_vendored {
240
+ namespace double_conversion {
241
+
242
+ inline int StrLength(const char* string) {
243
+ size_t length = strlen(string);
244
+ DOUBLE_CONVERSION_ASSERT(length == static_cast<size_t>(static_cast<int>(length)));
245
+ return static_cast<int>(length);
246
+ }
247
+
248
+ // This is a simplified version of V8's Vector class.
249
+ template <typename T>
250
+ class Vector {
251
+ public:
252
+ Vector() : start_(DOUBLE_CONVERSION_NULLPTR), length_(0) {}
253
+ Vector(T* data, int len) : start_(data), length_(len) {
254
+ DOUBLE_CONVERSION_ASSERT(len == 0 || (len > 0 && data != DOUBLE_CONVERSION_NULLPTR));
255
+ }
256
+
257
+ // Returns a vector using the same backing storage as this one,
258
+ // spanning from and including 'from', to but not including 'to'.
259
+ Vector<T> SubVector(int from, int to) {
260
+ DOUBLE_CONVERSION_ASSERT(to <= length_);
261
+ DOUBLE_CONVERSION_ASSERT(from < to);
262
+ DOUBLE_CONVERSION_ASSERT(0 <= from);
263
+ return Vector<T>(start() + from, to - from);
264
+ }
265
+
266
+ // Returns the length of the vector.
267
+ int length() const { return length_; }
268
+
269
+ // Returns whether or not the vector is empty.
270
+ bool is_empty() const { return length_ == 0; }
271
+
272
+ // Returns the pointer to the start of the data in the vector.
273
+ T* start() const { return start_; }
274
+
275
+ // Access individual vector elements - checks bounds in debug mode.
276
+ T& operator[](int index) const {
277
+ DOUBLE_CONVERSION_ASSERT(0 <= index && index < length_);
278
+ return start_[index];
279
+ }
280
+
281
+ T& first() { return start_[0]; }
282
+
283
+ T& last() { return start_[length_ - 1]; }
284
+
285
+ void pop_back() {
286
+ DOUBLE_CONVERSION_ASSERT(!is_empty());
287
+ --length_;
288
+ }
289
+
290
+ private:
291
+ T* start_;
292
+ int length_;
293
+ };
294
+
295
+
296
+ // Helper class for building result strings in a character buffer. The
297
+ // purpose of the class is to use safe operations that checks the
298
+ // buffer bounds on all operations in debug mode.
299
+ class StringBuilder {
300
+ public:
301
+ StringBuilder(char* buffer, int buffer_size)
302
+ : buffer_(buffer, buffer_size), position_(0) { }
303
+
304
+ ~StringBuilder() { if (!is_finalized()) Finalize(); }
305
+
306
+ int size() const { return buffer_.length(); }
307
+
308
+ // Get the current position in the builder.
309
+ int position() const {
310
+ DOUBLE_CONVERSION_ASSERT(!is_finalized());
311
+ return position_;
312
+ }
313
+
314
+ // Reset the position.
315
+ void Reset() { position_ = 0; }
316
+
317
+ // Add a single character to the builder. It is not allowed to add
318
+ // 0-characters; use the Finalize() method to terminate the string
319
+ // instead.
320
+ void AddCharacter(char c) {
321
+ DOUBLE_CONVERSION_ASSERT(c != '\0');
322
+ DOUBLE_CONVERSION_ASSERT(!is_finalized() && position_ < buffer_.length());
323
+ buffer_[position_++] = c;
324
+ }
325
+
326
+ // Add an entire string to the builder. Uses strlen() internally to
327
+ // compute the length of the input string.
328
+ void AddString(const char* s) {
329
+ AddSubstring(s, StrLength(s));
330
+ }
331
+
332
+ // Add the first 'n' characters of the given string 's' to the
333
+ // builder. The input string must have enough characters.
334
+ void AddSubstring(const char* s, int n) {
335
+ DOUBLE_CONVERSION_ASSERT(!is_finalized() && position_ + n < buffer_.length());
336
+ DOUBLE_CONVERSION_ASSERT(static_cast<size_t>(n) <= strlen(s));
337
+ memmove(&buffer_[position_], s, static_cast<size_t>(n));
338
+ position_ += n;
339
+ }
340
+
341
+
342
+ // Add character padding to the builder. If count is non-positive,
343
+ // nothing is added to the builder.
344
+ void AddPadding(char c, int count) {
345
+ for (int i = 0; i < count; i++) {
346
+ AddCharacter(c);
347
+ }
348
+ }
349
+
350
+ // Finalize the string by 0-terminating it and returning the buffer.
351
+ char* Finalize() {
352
+ DOUBLE_CONVERSION_ASSERT(!is_finalized() && position_ < buffer_.length());
353
+ buffer_[position_] = '\0';
354
+ // Make sure nobody managed to add a 0-character to the
355
+ // buffer while building the string.
356
+ DOUBLE_CONVERSION_ASSERT(strlen(buffer_.start()) == static_cast<size_t>(position_));
357
+ position_ = -1;
358
+ DOUBLE_CONVERSION_ASSERT(is_finalized());
359
+ return buffer_.start();
360
+ }
361
+
362
+ private:
363
+ Vector<char> buffer_;
364
+ int position_;
365
+
366
+ bool is_finalized() const { return position_ < 0; }
367
+
368
+ DOUBLE_CONVERSION_DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
369
+ };
370
+
371
+ // The type-based aliasing rule allows the compiler to assume that pointers of
372
+ // different types (for some definition of different) never alias each other.
373
+ // Thus the following code does not work:
374
+ //
375
+ // float f = foo();
376
+ // int fbits = *(int*)(&f);
377
+ //
378
+ // The compiler 'knows' that the int pointer can't refer to f since the types
379
+ // don't match, so the compiler may cache f in a register, leaving random data
380
+ // in fbits. Using C++ style casts makes no difference, however a pointer to
381
+ // char data is assumed to alias any other pointer. This is the 'memcpy
382
+ // exception'.
383
+ //
384
+ // Bit_cast uses the memcpy exception to move the bits from a variable of one
385
+ // type of a variable of another type. Of course the end result is likely to
386
+ // be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
387
+ // will completely optimize BitCast away.
388
+ //
389
+ // There is an additional use for BitCast.
390
+ // Recent gccs will warn when they see casts that may result in breakage due to
391
+ // the type-based aliasing rule. If you have checked that there is no breakage
392
+ // you can use BitCast to cast one pointer type to another. This confuses gcc
393
+ // enough that it can no longer see that you have cast one pointer type to
394
+ // another thus avoiding the warning.
395
+ template <class Dest, class Source>
396
+ Dest BitCast(const Source& source) {
397
+ // Compile time assertion: sizeof(Dest) == sizeof(Source)
398
+ // A compile error here means your Dest and Source have different sizes.
399
+ #if __cplusplus >= 201103L
400
+ static_assert(sizeof(Dest) == sizeof(Source),
401
+ "source and destination size mismatch");
402
+ #else
403
+ DOUBLE_CONVERSION_UNUSED
404
+ typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1];
405
+ #endif
406
+
407
+ Dest dest;
408
+ memmove(&dest, &source, sizeof(dest));
409
+ return dest;
410
+ }
411
+
412
+ template <class Dest, class Source>
413
+ Dest BitCast(Source* source) {
414
+ return BitCast<Dest>(reinterpret_cast<uintptr_t>(source));
415
+ }
416
+
417
+ } // namespace double_conversion
418
+ } // namespace arrow_vendored
419
+
420
+ #endif // DOUBLE_CONVERSION_UTILS_H_
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/api/io.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "parquet/exception.h"
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/api/reader.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ // Column reader API
21
+ #include "parquet/column_reader.h"
22
+ #include "parquet/column_scanner.h"
23
+ #include "parquet/exception.h"
24
+ #include "parquet/file_reader.h"
25
+ #include "parquet/metadata.h"
26
+ #include "parquet/platform.h"
27
+ #include "parquet/printer.h"
28
+ #include "parquet/properties.h"
29
+ #include "parquet/statistics.h"
30
+
31
+ // Schemas
32
+ #include "parquet/api/schema.h"
33
+
34
+ // IO
35
+ #include "parquet/api/io.h"
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/api/schema.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ // Schemas
21
+ #include "parquet/schema.h"
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/api/writer.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "parquet/api/io.h"
21
+ #include "parquet/api/schema.h"
22
+ #include "parquet/column_writer.h"
23
+ #include "parquet/exception.h"
24
+ #include "parquet/file_writer.h"
25
+ #include "parquet/statistics.h"
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/reader.h ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ // N.B. we don't include async_generator.h as it's relatively heavy
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <vector>
25
+
26
+ #include "parquet/file_reader.h"
27
+ #include "parquet/platform.h"
28
+ #include "parquet/properties.h"
29
+
30
+ namespace arrow {
31
+
32
+ class ChunkedArray;
33
+ class KeyValueMetadata;
34
+ class RecordBatchReader;
35
+ struct Scalar;
36
+ class Schema;
37
+ class Table;
38
+ class RecordBatch;
39
+
40
+ } // namespace arrow
41
+
42
+ namespace parquet {
43
+
44
+ class FileMetaData;
45
+ class SchemaDescriptor;
46
+
47
+ namespace arrow {
48
+
49
+ class ColumnChunkReader;
50
+ class ColumnReader;
51
+ struct SchemaManifest;
52
+ class RowGroupReader;
53
+
54
+ /// \brief Arrow read adapter class for deserializing Parquet files as Arrow row batches.
55
+ ///
56
+ /// This interfaces caters for different use cases and thus provides different
57
+ /// interfaces. In its most simplistic form, we cater for a user that wants to
58
+ /// read the whole Parquet at once with the `FileReader::ReadTable` method.
59
+ ///
60
+ /// More advanced users that also want to implement parallelism on top of each
61
+ /// single Parquet files should do this on the RowGroup level. For this, they can
62
+ /// call `FileReader::RowGroup(i)->ReadTable` to receive only the specified
63
+ /// RowGroup as a table.
64
+ ///
65
+ /// In the most advanced situation, where a consumer wants to independently read
66
+ /// RowGroups in parallel and consume each column individually, they can call
67
+ /// `FileReader::RowGroup(i)->Column(j)->Read` and receive an `arrow::Column`
68
+ /// instance.
69
+ ///
70
+ /// Finally, one can also get a stream of record batches using
71
+ /// `FileReader::GetRecordBatchReader()`. This can internally decode columns
72
+ /// in parallel if use_threads was enabled in the ArrowReaderProperties.
73
+ ///
74
+ /// The parquet format supports an optional integer field_id which can be assigned
75
+ /// to a field. Arrow will convert these field IDs to a metadata key named
76
+ /// PARQUET:field_id on the appropriate field.
77
+ // TODO(wesm): nested data does not always make sense with this user
78
+ // interface unless you are only reading a single leaf node from a branch of
79
+ // a table. For example:
80
+ //
81
+ // repeated group data {
82
+ // optional group record {
83
+ // optional int32 val1;
84
+ // optional byte_array val2;
85
+ // optional bool val3;
86
+ // }
87
+ // optional int32 val4;
88
+ // }
89
+ //
90
+ // In the Parquet file, there are 4 leaf nodes:
91
+ //
92
+ // * data.record.val1
93
+ // * data.record.val2
94
+ // * data.record.val3
95
+ // * data.val4
96
+ //
97
+ // When materializing this data in an Arrow array, we would have:
98
+ //
99
+ // data: list<struct<
100
+ // record: struct<
101
+ // val1: int32,
102
+ // val2: string (= list<uint8>),
103
+ // val3: bool,
104
+ // >,
105
+ // val4: int32
106
+ // >>
107
+ //
108
+ // However, in the Parquet format, each leaf node has its own repetition and
109
+ // definition levels describing the structure of the intermediate nodes in
110
+ // this array structure. Thus, we will need to scan the leaf data for a group
111
+ // of leaf nodes part of the same type tree to create a single result Arrow
112
+ // nested array structure.
113
+ //
114
+ // This is additionally complicated "chunky" repeated fields or very large byte
115
+ // arrays
116
+ class PARQUET_EXPORT FileReader {
117
+ public:
118
+ /// Factory function to create a FileReader from a ParquetFileReader and properties
119
+ static ::arrow::Status Make(::arrow::MemoryPool* pool,
120
+ std::unique_ptr<ParquetFileReader> reader,
121
+ const ArrowReaderProperties& properties,
122
+ std::unique_ptr<FileReader>* out);
123
+
124
+ /// Factory function to create a FileReader from a ParquetFileReader
125
+ static ::arrow::Status Make(::arrow::MemoryPool* pool,
126
+ std::unique_ptr<ParquetFileReader> reader,
127
+ std::unique_ptr<FileReader>* out);
128
+
129
+ // Since the distribution of columns amongst a Parquet file's row groups may
130
+ // be uneven (the number of values in each column chunk can be different), we
131
+ // provide a column-oriented read interface. The ColumnReader hides the
132
+ // details of paging through the file's row groups and yielding
133
+ // fully-materialized arrow::Array instances
134
+ //
135
+ // Returns error status if the column of interest is not flat.
136
+ // The indicated column index is relative to the schema
137
+ virtual ::arrow::Status GetColumn(int i, std::unique_ptr<ColumnReader>* out) = 0;
138
+
139
+ /// \brief Return arrow schema for all the columns.
140
+ virtual ::arrow::Status GetSchema(std::shared_ptr<::arrow::Schema>* out) = 0;
141
+
142
+ /// \brief Read column as a whole into a chunked array.
143
+ ///
144
+ /// The index i refers the index of the top level schema field, which may
145
+ /// be nested or flat - e.g.
146
+ ///
147
+ /// 0 foo.bar
148
+ /// foo.bar.baz
149
+ /// foo.qux
150
+ /// 1 foo2
151
+ /// 2 foo3
152
+ ///
153
+ /// i=0 will read the entire foo struct, i=1 the foo2 primitive column etc
154
+ virtual ::arrow::Status ReadColumn(int i,
155
+ std::shared_ptr<::arrow::ChunkedArray>* out) = 0;
156
+
157
+ /// \brief Return a RecordBatchReader of all row groups and columns.
158
+ virtual ::arrow::Status GetRecordBatchReader(
159
+ std::unique_ptr<::arrow::RecordBatchReader>* out) = 0;
160
+
161
+ /// \brief Return a RecordBatchReader of row groups selected from row_group_indices.
162
+ ///
163
+ /// Note that the ordering in row_group_indices matters. FileReaders must outlive
164
+ /// their RecordBatchReaders.
165
+ ///
166
+ /// \returns error Status if row_group_indices contains an invalid index
167
+ virtual ::arrow::Status GetRecordBatchReader(
168
+ const std::vector<int>& row_group_indices,
169
+ std::unique_ptr<::arrow::RecordBatchReader>* out) = 0;
170
+
171
+ /// \brief Return a RecordBatchReader of row groups selected from
172
+ /// row_group_indices, whose columns are selected by column_indices.
173
+ ///
174
+ /// Note that the ordering in row_group_indices and column_indices
175
+ /// matter. FileReaders must outlive their RecordBatchReaders.
176
+ ///
177
+ /// \returns error Status if either row_group_indices or column_indices
178
+ /// contains an invalid index
179
+ virtual ::arrow::Status GetRecordBatchReader(
180
+ const std::vector<int>& row_group_indices, const std::vector<int>& column_indices,
181
+ std::unique_ptr<::arrow::RecordBatchReader>* out) = 0;
182
+
183
+ /// \brief Return a RecordBatchReader of row groups selected from
184
+ /// row_group_indices, whose columns are selected by column_indices.
185
+ ///
186
+ /// Note that the ordering in row_group_indices and column_indices
187
+ /// matter. FileReaders must outlive their RecordBatchReaders.
188
+ ///
189
+ /// \param row_group_indices which row groups to read (order determines read order).
190
+ /// \param column_indices which columns to read (order determines output schema).
191
+ /// \param[out] out record batch stream from parquet data.
192
+ ///
193
+ /// \returns error Status if either row_group_indices or column_indices
194
+ /// contains an invalid index
195
+ ::arrow::Status GetRecordBatchReader(const std::vector<int>& row_group_indices,
196
+ const std::vector<int>& column_indices,
197
+ std::shared_ptr<::arrow::RecordBatchReader>* out);
198
+ ::arrow::Status GetRecordBatchReader(const std::vector<int>& row_group_indices,
199
+ std::shared_ptr<::arrow::RecordBatchReader>* out);
200
+ ::arrow::Status GetRecordBatchReader(std::shared_ptr<::arrow::RecordBatchReader>* out);
201
+
202
+ /// \brief Return a generator of record batches.
203
+ ///
204
+ /// The FileReader must outlive the generator, so this requires that you pass in a
205
+ /// shared_ptr.
206
+ ///
207
+ /// \returns error Result if either row_group_indices or column_indices contains an
208
+ /// invalid index
209
+ virtual ::arrow::Result<
210
+ std::function<::arrow::Future<std::shared_ptr<::arrow::RecordBatch>>()>>
211
+ GetRecordBatchGenerator(std::shared_ptr<FileReader> reader,
212
+ const std::vector<int> row_group_indices,
213
+ const std::vector<int> column_indices,
214
+ ::arrow::internal::Executor* cpu_executor = NULLPTR,
215
+ int64_t rows_to_readahead = 0) = 0;
216
+
217
+ /// Read all columns into a Table
218
+ virtual ::arrow::Status ReadTable(std::shared_ptr<::arrow::Table>* out) = 0;
219
+
220
+ /// \brief Read the given columns into a Table
221
+ ///
222
+ /// The indicated column indices are relative to the internal representation
223
+ /// of the parquet table. For instance :
224
+ /// 0 foo.bar
225
+ /// foo.bar.baz 0
226
+ /// foo.bar.baz2 1
227
+ /// foo.qux 2
228
+ /// 1 foo2 3
229
+ /// 2 foo3 4
230
+ ///
231
+ /// i=0 will read foo.bar.baz, i=1 will read only foo.bar.baz2 and so on.
232
+ /// Only leaf fields have indices; foo itself doesn't have an index.
233
+ /// To get the index for a particular leaf field, one can use
234
+ /// manifest().schema_fields to get the top level fields, and then walk the
235
+ /// tree to identify the relevant leaf fields and access its column_index.
236
+ /// To get the total number of leaf fields, use FileMetadata.num_columns().
237
+ virtual ::arrow::Status ReadTable(const std::vector<int>& column_indices,
238
+ std::shared_ptr<::arrow::Table>* out) = 0;
239
+
240
+ virtual ::arrow::Status ReadRowGroup(int i, const std::vector<int>& column_indices,
241
+ std::shared_ptr<::arrow::Table>* out) = 0;
242
+
243
+ virtual ::arrow::Status ReadRowGroup(int i, std::shared_ptr<::arrow::Table>* out) = 0;
244
+
245
+ virtual ::arrow::Status ReadRowGroups(const std::vector<int>& row_groups,
246
+ const std::vector<int>& column_indices,
247
+ std::shared_ptr<::arrow::Table>* out) = 0;
248
+
249
+ virtual ::arrow::Status ReadRowGroups(const std::vector<int>& row_groups,
250
+ std::shared_ptr<::arrow::Table>* out) = 0;
251
+
252
+ /// \brief Scan file contents with one thread, return number of rows
253
+ virtual ::arrow::Status ScanContents(std::vector<int> columns,
254
+ const int32_t column_batch_size,
255
+ int64_t* num_rows) = 0;
256
+
257
+ /// \brief Return a reader for the RowGroup, this object must not outlive the
258
+ /// FileReader.
259
+ virtual std::shared_ptr<RowGroupReader> RowGroup(int row_group_index) = 0;
260
+
261
+ /// \brief The number of row groups in the file
262
+ virtual int num_row_groups() const = 0;
263
+
264
+ virtual ParquetFileReader* parquet_reader() const = 0;
265
+
266
+ /// Set whether to use multiple threads during reads of multiple columns.
267
+ /// By default only one thread is used.
268
+ virtual void set_use_threads(bool use_threads) = 0;
269
+
270
+ /// Set number of records to read per batch for the RecordBatchReader.
271
+ virtual void set_batch_size(int64_t batch_size) = 0;
272
+
273
+ virtual const ArrowReaderProperties& properties() const = 0;
274
+
275
+ virtual const SchemaManifest& manifest() const = 0;
276
+
277
+ virtual ~FileReader() = default;
278
+ };
279
+
280
+ class RowGroupReader {
281
+ public:
282
+ virtual ~RowGroupReader() = default;
283
+ virtual std::shared_ptr<ColumnChunkReader> Column(int column_index) = 0;
284
+ virtual ::arrow::Status ReadTable(const std::vector<int>& column_indices,
285
+ std::shared_ptr<::arrow::Table>* out) = 0;
286
+ virtual ::arrow::Status ReadTable(std::shared_ptr<::arrow::Table>* out) = 0;
287
+
288
+ private:
289
+ struct Iterator;
290
+ };
291
+
292
+ class ColumnChunkReader {
293
+ public:
294
+ virtual ~ColumnChunkReader() = default;
295
+ virtual ::arrow::Status Read(std::shared_ptr<::arrow::ChunkedArray>* out) = 0;
296
+ };
297
+
298
+ // At this point, the column reader is a stream iterator. It only knows how to
299
+ // read the next batch of values for a particular column from the file until it
300
+ // runs out.
301
+ //
302
+ // We also do not expose any internal Parquet details, such as row groups. This
303
+ // might change in the future.
304
+ class PARQUET_EXPORT ColumnReader {
305
+ public:
306
+ virtual ~ColumnReader() = default;
307
+
308
+ // Scan the next array of the indicated size. The actual size of the
309
+ // returned array may be less than the passed size depending how much data is
310
+ // available in the file.
311
+ //
312
+ // When all the data in the file has been exhausted, the result is set to
313
+ // nullptr.
314
+ //
315
+ // Returns Status::OK on a successful read, including if you have exhausted
316
+ // the data available in the file.
317
+ virtual ::arrow::Status NextBatch(int64_t batch_size,
318
+ std::shared_ptr<::arrow::ChunkedArray>* out) = 0;
319
+ };
320
+
321
+ /// \brief Experimental helper class for bindings (like Python) that struggle
322
+ /// either with std::move or C++ exceptions
323
+ class PARQUET_EXPORT FileReaderBuilder {
324
+ public:
325
+ FileReaderBuilder();
326
+
327
+ /// Create FileReaderBuilder from Arrow file and optional properties / metadata
328
+ ::arrow::Status Open(std::shared_ptr<::arrow::io::RandomAccessFile> file,
329
+ const ReaderProperties& properties = default_reader_properties(),
330
+ std::shared_ptr<FileMetaData> metadata = NULLPTR);
331
+
332
+ /// Create FileReaderBuilder from file path and optional properties / metadata
333
+ ::arrow::Status OpenFile(const std::string& path, bool memory_map = false,
334
+ const ReaderProperties& props = default_reader_properties(),
335
+ std::shared_ptr<FileMetaData> metadata = NULLPTR);
336
+
337
+ ParquetFileReader* raw_reader() { return raw_reader_.get(); }
338
+
339
+ /// Set Arrow MemoryPool for memory allocation
340
+ FileReaderBuilder* memory_pool(::arrow::MemoryPool* pool);
341
+ /// Set Arrow reader properties
342
+ FileReaderBuilder* properties(const ArrowReaderProperties& arg_properties);
343
+ /// Build FileReader instance
344
+ ::arrow::Status Build(std::unique_ptr<FileReader>* out);
345
+ ::arrow::Result<std::unique_ptr<FileReader>> Build();
346
+
347
+ private:
348
+ ::arrow::MemoryPool* pool_;
349
+ ArrowReaderProperties properties_;
350
+ std::unique_ptr<ParquetFileReader> raw_reader_;
351
+ };
352
+
353
+ /// \defgroup parquet-arrow-reader-factories Factory functions for Parquet Arrow readers
354
+ ///
355
+ /// @{
356
+
357
+ /// \brief Build FileReader from Arrow file and MemoryPool
358
+ ///
359
+ /// Advanced settings are supported through the FileReaderBuilder class.
360
+ PARQUET_EXPORT
361
+ ::arrow::Status OpenFile(std::shared_ptr<::arrow::io::RandomAccessFile>,
362
+ ::arrow::MemoryPool* allocator,
363
+ std::unique_ptr<FileReader>* reader);
364
+
365
+ /// @}
366
+
367
+ PARQUET_EXPORT
368
+ ::arrow::Status StatisticsAsScalars(const Statistics& Statistics,
369
+ std::shared_ptr<::arrow::Scalar>* min,
370
+ std::shared_ptr<::arrow::Scalar>* max);
371
+
372
+ namespace internal {
373
+
374
+ PARQUET_EXPORT
375
+ ::arrow::Status FuzzReader(const uint8_t* data, int64_t size);
376
+
377
+ } // namespace internal
378
+ } // namespace arrow
379
+ } // namespace parquet
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/schema.h ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cassert>
21
+ #include <memory>
22
+ #include <unordered_map>
23
+ #include <unordered_set>
24
+ #include <vector>
25
+
26
+ #include "arrow/result.h"
27
+ #include "arrow/status.h"
28
+ #include "arrow/type.h"
29
+ #include "arrow/type_fwd.h"
30
+
31
+ #include "parquet/level_conversion.h"
32
+ #include "parquet/platform.h"
33
+ #include "parquet/schema.h"
34
+
35
+ namespace parquet {
36
+
37
+ class ArrowReaderProperties;
38
+ class ArrowWriterProperties;
39
+ class WriterProperties;
40
+
41
+ namespace arrow {
42
+
43
+ /// \defgroup arrow-to-parquet-schema-conversion Functions to convert an Arrow
44
+ /// schema into a Parquet schema.
45
+ ///
46
+ /// @{
47
+
48
+ PARQUET_EXPORT
49
+ ::arrow::Status FieldToNode(const std::shared_ptr<::arrow::Field>& field,
50
+ const WriterProperties& properties,
51
+ const ArrowWriterProperties& arrow_properties,
52
+ schema::NodePtr* out);
53
+
54
+ PARQUET_EXPORT
55
+ ::arrow::Status ToParquetSchema(const ::arrow::Schema* arrow_schema,
56
+ const WriterProperties& properties,
57
+ const ArrowWriterProperties& arrow_properties,
58
+ std::shared_ptr<SchemaDescriptor>* out);
59
+
60
+ PARQUET_EXPORT
61
+ ::arrow::Status ToParquetSchema(const ::arrow::Schema* arrow_schema,
62
+ const WriterProperties& properties,
63
+ std::shared_ptr<SchemaDescriptor>* out);
64
+
65
+ /// @}
66
+
67
+ /// \defgroup parquet-to-arrow-schema-conversion Functions to convert a Parquet
68
+ /// schema into an Arrow schema.
69
+ ///
70
+ /// @{
71
+
72
+ PARQUET_EXPORT
73
+ ::arrow::Status FromParquetSchema(
74
+ const SchemaDescriptor* parquet_schema, const ArrowReaderProperties& properties,
75
+ const std::shared_ptr<const ::arrow::KeyValueMetadata>& key_value_metadata,
76
+ std::shared_ptr<::arrow::Schema>* out);
77
+
78
+ PARQUET_EXPORT
79
+ ::arrow::Status FromParquetSchema(const SchemaDescriptor* parquet_schema,
80
+ const ArrowReaderProperties& properties,
81
+ std::shared_ptr<::arrow::Schema>* out);
82
+
83
+ PARQUET_EXPORT
84
+ ::arrow::Status FromParquetSchema(const SchemaDescriptor* parquet_schema,
85
+ std::shared_ptr<::arrow::Schema>* out);
86
+
87
+ /// @}
88
+
89
+ /// \brief Bridge between an arrow::Field and parquet column indices.
90
+ struct PARQUET_EXPORT SchemaField {
91
+ std::shared_ptr<::arrow::Field> field;
92
+ std::vector<SchemaField> children;
93
+
94
+ // Only set for leaf nodes
95
+ int column_index = -1;
96
+
97
+ parquet::internal::LevelInfo level_info;
98
+
99
+ bool is_leaf() const { return column_index != -1; }
100
+ };
101
+
102
+ /// \brief Bridge between a parquet Schema and an arrow Schema.
103
+ ///
104
+ /// Expose parquet columns as a tree structure. Useful traverse and link
105
+ /// between arrow's Schema and parquet's Schema.
106
+ struct PARQUET_EXPORT SchemaManifest {
107
+ static ::arrow::Status Make(
108
+ const SchemaDescriptor* schema,
109
+ const std::shared_ptr<const ::arrow::KeyValueMetadata>& metadata,
110
+ const ArrowReaderProperties& properties, SchemaManifest* manifest);
111
+
112
+ const SchemaDescriptor* descr;
113
+ std::shared_ptr<::arrow::Schema> origin_schema;
114
+ std::shared_ptr<const ::arrow::KeyValueMetadata> schema_metadata;
115
+ std::vector<SchemaField> schema_fields;
116
+
117
+ std::unordered_map<int, const SchemaField*> column_index_to_field;
118
+ std::unordered_map<const SchemaField*, const SchemaField*> child_to_parent;
119
+
120
+ ::arrow::Status GetColumnField(int column_index, const SchemaField** out) const {
121
+ auto it = column_index_to_field.find(column_index);
122
+ if (it == column_index_to_field.end()) {
123
+ return ::arrow::Status::KeyError("Column index ", column_index,
124
+ " not found in schema manifest, may be malformed");
125
+ }
126
+ *out = it->second;
127
+ return ::arrow::Status::OK();
128
+ }
129
+
130
+ const SchemaField* GetParent(const SchemaField* field) const {
131
+ // Returns nullptr also if not found
132
+ auto it = child_to_parent.find(field);
133
+ if (it == child_to_parent.end()) {
134
+ return NULLPTR;
135
+ }
136
+ return it->second;
137
+ }
138
+
139
+ /// Coalesce a list of field indices (relative to the equivalent arrow::Schema) which
140
+ /// correspond to the column root (first node below the parquet schema's root group) of
141
+ /// each leaf referenced in column_indices.
142
+ ///
143
+ /// For example, for leaves `a.b.c`, `a.b.d.e`, and `i.j.k` (column_indices=[0,1,3])
144
+ /// the roots are `a` and `i` (return=[0,2]).
145
+ ///
146
+ /// root
147
+ /// -- a <------
148
+ /// -- -- b | |
149
+ /// -- -- -- c |
150
+ /// -- -- -- d |
151
+ /// -- -- -- -- e
152
+ /// -- f
153
+ /// -- -- g
154
+ /// -- -- -- h
155
+ /// -- i <---
156
+ /// -- -- j |
157
+ /// -- -- -- k
158
+ ::arrow::Result<std::vector<int>> GetFieldIndices(
159
+ const std::vector<int>& column_indices) const {
160
+ const schema::GroupNode* group = descr->group_node();
161
+ std::unordered_set<int> already_added;
162
+
163
+ std::vector<int> out;
164
+ for (int column_idx : column_indices) {
165
+ if (column_idx < 0 || column_idx >= descr->num_columns()) {
166
+ return ::arrow::Status::IndexError("Column index ", column_idx, " is not valid");
167
+ }
168
+
169
+ auto field_node = descr->GetColumnRoot(column_idx);
170
+ auto field_idx = group->FieldIndex(*field_node);
171
+ if (field_idx == -1) {
172
+ return ::arrow::Status::IndexError("Column index ", column_idx, " is not valid");
173
+ }
174
+
175
+ if (already_added.insert(field_idx).second) {
176
+ out.push_back(field_idx);
177
+ }
178
+ }
179
+ return out;
180
+ }
181
+ };
182
+
183
+ } // namespace arrow
184
+ } // namespace parquet
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/test_util.h ADDED
@@ -0,0 +1,524 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <limits>
21
+ #include <memory>
22
+ #include <random>
23
+ #include <string>
24
+ #include <utility>
25
+ #include <vector>
26
+
27
+ #include "arrow/array.h"
28
+ #include "arrow/array/builder_binary.h"
29
+ #include "arrow/array/builder_decimal.h"
30
+ #include "arrow/array/builder_primitive.h"
31
+ #include "arrow/testing/gtest_util.h"
32
+ #include "arrow/testing/random.h"
33
+ #include "arrow/type_fwd.h"
34
+ #include "arrow/type_traits.h"
35
+ #include "arrow/util/decimal.h"
36
+ #include "arrow/util/float16.h"
37
+ #include "parquet/column_reader.h"
38
+ #include "parquet/test_util.h"
39
+
40
+ namespace parquet {
41
+
42
+ using internal::RecordReader;
43
+
44
+ namespace arrow {
45
+
46
+ using ::arrow::Array;
47
+ using ::arrow::ChunkedArray;
48
+ using ::arrow::Status;
49
+
50
+ template <int32_t PRECISION>
51
+ struct DecimalWithPrecisionAndScale {
52
+ static_assert(PRECISION >= 1 && PRECISION <= 38, "Invalid precision value");
53
+
54
+ using type = ::arrow::Decimal128Type;
55
+ static constexpr ::arrow::Type::type type_id = ::arrow::Decimal128Type::type_id;
56
+ static constexpr int32_t precision = PRECISION;
57
+ static constexpr int32_t scale = PRECISION - 1;
58
+ };
59
+
60
+ template <int32_t PRECISION>
61
+ struct Decimal256WithPrecisionAndScale {
62
+ static_assert(PRECISION >= 1 && PRECISION <= 76, "Invalid precision value");
63
+
64
+ using type = ::arrow::Decimal256Type;
65
+ static constexpr ::arrow::Type::type type_id = ::arrow::Decimal256Type::type_id;
66
+ static constexpr int32_t precision = PRECISION;
67
+ static constexpr int32_t scale = PRECISION - 1;
68
+ };
69
+
70
+ template <class ArrowType>
71
+ ::arrow::enable_if_floating_point<ArrowType, Status> NonNullArray(
72
+ size_t size, std::shared_ptr<Array>* out) {
73
+ using c_type = typename ArrowType::c_type;
74
+ std::vector<c_type> values;
75
+ if constexpr (::arrow::is_half_float_type<ArrowType>::value) {
76
+ values.resize(size);
77
+ test::random_float16_numbers(static_cast<int>(size), 0, ::arrow::util::Float16(0.0f),
78
+ ::arrow::util::Float16(1.0f), values.data());
79
+ } else {
80
+ ::arrow::random_real(size, 0, static_cast<c_type>(0), static_cast<c_type>(1),
81
+ &values);
82
+ }
83
+ ::arrow::NumericBuilder<ArrowType> builder;
84
+ RETURN_NOT_OK(builder.AppendValues(values.data(), values.size()));
85
+ return builder.Finish(out);
86
+ }
87
+
88
+ template <class ArrowType>
89
+ ::arrow::enable_if_integer<ArrowType, Status> NonNullArray(size_t size,
90
+ std::shared_ptr<Array>* out) {
91
+ std::vector<typename ArrowType::c_type> values;
92
+ ::arrow::randint(size, 0, 64, &values);
93
+
94
+ // Passing data type so this will work with TimestampType too
95
+ ::arrow::NumericBuilder<ArrowType> builder(std::make_shared<ArrowType>(),
96
+ ::arrow::default_memory_pool());
97
+ RETURN_NOT_OK(builder.AppendValues(values.data(), values.size()));
98
+ return builder.Finish(out);
99
+ }
100
+
101
+ template <class ArrowType>
102
+ ::arrow::enable_if_date<ArrowType, Status> NonNullArray(size_t size,
103
+ std::shared_ptr<Array>* out) {
104
+ std::vector<typename ArrowType::c_type> values;
105
+ ::arrow::randint(size, 0, 24, &values);
106
+ for (size_t i = 0; i < size; i++) {
107
+ values[i] *= 86400000;
108
+ }
109
+
110
+ // Passing data type so this will work with TimestampType too
111
+ ::arrow::NumericBuilder<ArrowType> builder(std::make_shared<ArrowType>(),
112
+ ::arrow::default_memory_pool());
113
+ RETURN_NOT_OK(builder.AppendValues(values.data(), values.size()));
114
+ return builder.Finish(out);
115
+ }
116
+
117
+ template <class ArrowType>
118
+ ::arrow::enable_if_base_binary<ArrowType, Status> NonNullArray(
119
+ size_t size, std::shared_ptr<Array>* out) {
120
+ using BuilderType = typename ::arrow::TypeTraits<ArrowType>::BuilderType;
121
+ BuilderType builder;
122
+ for (size_t i = 0; i < size; i++) {
123
+ RETURN_NOT_OK(builder.Append("test-string"));
124
+ }
125
+ return builder.Finish(out);
126
+ }
127
+
128
+ template <typename ArrowType>
129
+ ::arrow::enable_if_fixed_size_binary<ArrowType, Status> NonNullArray(
130
+ size_t size, std::shared_ptr<Array>* out) {
131
+ using BuilderType = typename ::arrow::TypeTraits<ArrowType>::BuilderType;
132
+ // set byte_width to the length of "fixed": 5
133
+ // todo: find a way to generate test data with more diversity.
134
+ BuilderType builder(::arrow::fixed_size_binary(5));
135
+ for (size_t i = 0; i < size; i++) {
136
+ RETURN_NOT_OK(builder.Append("fixed"));
137
+ }
138
+ return builder.Finish(out);
139
+ }
140
+
141
+ template <int32_t byte_width>
142
+ static void random_decimals(int64_t n, uint32_t seed, int32_t precision, uint8_t* out) {
143
+ auto gen = ::arrow::random::RandomArrayGenerator(seed);
144
+ std::shared_ptr<Array> decimals;
145
+ if constexpr (byte_width == 16) {
146
+ decimals = gen.Decimal128(::arrow::decimal128(precision, 0), n);
147
+ } else {
148
+ decimals = gen.Decimal256(::arrow::decimal256(precision, 0), n);
149
+ }
150
+ std::memcpy(out, decimals->data()->GetValues<uint8_t>(1, 0), byte_width * n);
151
+ }
152
+
153
+ template <typename ArrowType, int32_t precision = ArrowType::precision>
154
+ ::arrow::enable_if_t<
155
+ std::is_same<ArrowType, DecimalWithPrecisionAndScale<precision>>::value, Status>
156
+ NonNullArray(size_t size, std::shared_ptr<Array>* out) {
157
+ constexpr int32_t kDecimalPrecision = precision;
158
+ constexpr int32_t kDecimalScale = DecimalWithPrecisionAndScale<precision>::scale;
159
+
160
+ const auto type = ::arrow::decimal(kDecimalPrecision, kDecimalScale);
161
+ ::arrow::Decimal128Builder builder(type);
162
+ const int32_t byte_width =
163
+ static_cast<const ::arrow::Decimal128Type&>(*type).byte_width();
164
+
165
+ constexpr int32_t seed = 0;
166
+
167
+ ARROW_ASSIGN_OR_RAISE(auto out_buf, ::arrow::AllocateBuffer(size * byte_width));
168
+ random_decimals<::arrow::Decimal128Type::kByteWidth>(size, seed, kDecimalPrecision,
169
+ out_buf->mutable_data());
170
+
171
+ RETURN_NOT_OK(builder.AppendValues(out_buf->data(), size));
172
+ return builder.Finish(out);
173
+ }
174
+
175
+ template <typename ArrowType, int32_t precision = ArrowType::precision>
176
+ ::arrow::enable_if_t<
177
+ std::is_same<ArrowType, Decimal256WithPrecisionAndScale<precision>>::value, Status>
178
+ NonNullArray(size_t size, std::shared_ptr<Array>* out) {
179
+ constexpr int32_t kDecimalPrecision = precision;
180
+ constexpr int32_t kDecimalScale = Decimal256WithPrecisionAndScale<precision>::scale;
181
+
182
+ const auto type = ::arrow::decimal256(kDecimalPrecision, kDecimalScale);
183
+ ::arrow::Decimal256Builder builder(type);
184
+ const int32_t byte_width =
185
+ static_cast<const ::arrow::Decimal256Type&>(*type).byte_width();
186
+
187
+ constexpr int32_t seed = 0;
188
+
189
+ ARROW_ASSIGN_OR_RAISE(auto out_buf, ::arrow::AllocateBuffer(size * byte_width));
190
+ random_decimals<::arrow::Decimal256Type::kByteWidth>(size, seed, kDecimalPrecision,
191
+ out_buf->mutable_data());
192
+
193
+ RETURN_NOT_OK(builder.AppendValues(out_buf->data(), size));
194
+ return builder.Finish(out);
195
+ }
196
+
197
+ template <class ArrowType>
198
+ ::arrow::enable_if_boolean<ArrowType, Status> NonNullArray(size_t size,
199
+ std::shared_ptr<Array>* out) {
200
+ std::vector<uint8_t> values;
201
+ ::arrow::randint(size, 0, 1, &values);
202
+ ::arrow::BooleanBuilder builder;
203
+ RETURN_NOT_OK(builder.AppendValues(values.data(), values.size()));
204
+ return builder.Finish(out);
205
+ }
206
+
207
+ // This helper function only supports (size/2) nulls.
208
+ template <typename ArrowType>
209
+ ::arrow::enable_if_floating_point<ArrowType, Status> NullableArray(
210
+ size_t size, size_t num_nulls, uint32_t seed, std::shared_ptr<Array>* out) {
211
+ using c_type = typename ArrowType::c_type;
212
+ std::vector<c_type> values;
213
+ if constexpr (::arrow::is_half_float_type<ArrowType>::value) {
214
+ values.resize(size);
215
+ test::random_float16_numbers(static_cast<int>(size), 0, ::arrow::util::Float16(-1e4f),
216
+ ::arrow::util::Float16(1e4f), values.data());
217
+ } else {
218
+ ::arrow::random_real(size, seed, static_cast<c_type>(-1e10),
219
+ static_cast<c_type>(1e10), &values);
220
+ }
221
+ std::vector<uint8_t> valid_bytes(size, 1);
222
+
223
+ for (size_t i = 0; i < num_nulls; i++) {
224
+ valid_bytes[i * 2] = 0;
225
+ }
226
+
227
+ ::arrow::NumericBuilder<ArrowType> builder;
228
+ RETURN_NOT_OK(builder.AppendValues(values.data(), values.size(), valid_bytes.data()));
229
+ return builder.Finish(out);
230
+ }
231
+
232
+ // This helper function only supports (size/2) nulls.
233
+ template <typename ArrowType>
234
+ ::arrow::enable_if_integer<ArrowType, Status> NullableArray(size_t size, size_t num_nulls,
235
+ uint32_t seed,
236
+ std::shared_ptr<Array>* out) {
237
+ std::vector<typename ArrowType::c_type> values;
238
+
239
+ // Seed is random in Arrow right now
240
+ (void)seed;
241
+ ::arrow::randint(size, 0, 64, &values);
242
+ std::vector<uint8_t> valid_bytes(size, 1);
243
+
244
+ for (size_t i = 0; i < num_nulls; i++) {
245
+ valid_bytes[i * 2] = 0;
246
+ }
247
+
248
+ // Passing data type so this will work with TimestampType too
249
+ ::arrow::NumericBuilder<ArrowType> builder(std::make_shared<ArrowType>(),
250
+ ::arrow::default_memory_pool());
251
+ RETURN_NOT_OK(builder.AppendValues(values.data(), values.size(), valid_bytes.data()));
252
+ return builder.Finish(out);
253
+ }
254
+
255
+ template <typename ArrowType>
256
+ ::arrow::enable_if_date<ArrowType, Status> NullableArray(size_t size, size_t num_nulls,
257
+ uint32_t seed,
258
+ std::shared_ptr<Array>* out) {
259
+ std::vector<typename ArrowType::c_type> values;
260
+
261
+ // Seed is random in Arrow right now
262
+ (void)seed;
263
+ ::arrow::randint(size, 0, 24, &values);
264
+ for (size_t i = 0; i < size; i++) {
265
+ values[i] *= 86400000;
266
+ }
267
+ std::vector<uint8_t> valid_bytes(size, 1);
268
+
269
+ for (size_t i = 0; i < num_nulls; i++) {
270
+ valid_bytes[i * 2] = 0;
271
+ }
272
+
273
+ // Passing data type so this will work with TimestampType too
274
+ ::arrow::NumericBuilder<ArrowType> builder(std::make_shared<ArrowType>(),
275
+ ::arrow::default_memory_pool());
276
+ RETURN_NOT_OK(builder.AppendValues(values.data(), values.size(), valid_bytes.data()));
277
+ return builder.Finish(out);
278
+ }
279
+
280
+ // This helper function only supports (size/2) nulls yet.
281
+ template <typename ArrowType>
282
+ ::arrow::enable_if_base_binary<ArrowType, Status> NullableArray(
283
+ size_t size, size_t num_nulls, uint32_t seed, std::shared_ptr<::arrow::Array>* out) {
284
+ std::vector<uint8_t> valid_bytes(size, 1);
285
+
286
+ for (size_t i = 0; i < num_nulls; i++) {
287
+ valid_bytes[i * 2] = 0;
288
+ }
289
+
290
+ using BuilderType = typename ::arrow::TypeTraits<ArrowType>::BuilderType;
291
+ BuilderType builder;
292
+
293
+ const int kBufferSize = 10;
294
+ uint8_t buffer[kBufferSize];
295
+ for (size_t i = 0; i < size; i++) {
296
+ if (!valid_bytes[i]) {
297
+ RETURN_NOT_OK(builder.AppendNull());
298
+ } else {
299
+ ::arrow::random_bytes(kBufferSize, seed + static_cast<uint32_t>(i), buffer);
300
+ if (ArrowType::is_utf8) {
301
+ // Trivially force data to be valid UTF8 by making it all ASCII
302
+ for (auto& byte : buffer) {
303
+ byte &= 0x7f;
304
+ }
305
+ }
306
+ RETURN_NOT_OK(builder.Append(buffer, kBufferSize));
307
+ }
308
+ }
309
+ return builder.Finish(out);
310
+ }
311
+
312
+ // This helper function only supports (size/2) nulls yet,
313
+ // same as NullableArray<String|Binary>(..)
314
+ template <typename ArrowType>
315
+ ::arrow::enable_if_fixed_size_binary<ArrowType, Status> NullableArray(
316
+ size_t size, size_t num_nulls, uint32_t seed, std::shared_ptr<::arrow::Array>* out) {
317
+ std::vector<uint8_t> valid_bytes(size, 1);
318
+
319
+ for (size_t i = 0; i < num_nulls; i++) {
320
+ valid_bytes[i * 2] = 0;
321
+ }
322
+
323
+ using BuilderType = typename ::arrow::TypeTraits<ArrowType>::BuilderType;
324
+ const int byte_width = 10;
325
+ BuilderType builder(::arrow::fixed_size_binary(byte_width));
326
+
327
+ const int kBufferSize = byte_width;
328
+ uint8_t buffer[kBufferSize];
329
+ for (size_t i = 0; i < size; i++) {
330
+ if (!valid_bytes[i]) {
331
+ RETURN_NOT_OK(builder.AppendNull());
332
+ } else {
333
+ ::arrow::random_bytes(kBufferSize, seed + static_cast<uint32_t>(i), buffer);
334
+ RETURN_NOT_OK(builder.Append(buffer));
335
+ }
336
+ }
337
+ return builder.Finish(out);
338
+ }
339
+
340
+ template <typename ArrowType, int32_t precision = ArrowType::precision>
341
+ ::arrow::enable_if_t<
342
+ std::is_same<ArrowType, DecimalWithPrecisionAndScale<precision>>::value, Status>
343
+ NullableArray(size_t size, size_t num_nulls, uint32_t seed,
344
+ std::shared_ptr<::arrow::Array>* out) {
345
+ std::vector<uint8_t> valid_bytes(size, '\1');
346
+
347
+ for (size_t i = 0; i < num_nulls; ++i) {
348
+ valid_bytes[i * 2] = '\0';
349
+ }
350
+
351
+ constexpr int32_t kDecimalPrecision = precision;
352
+ constexpr int32_t kDecimalScale = DecimalWithPrecisionAndScale<precision>::scale;
353
+ const auto type = ::arrow::decimal(kDecimalPrecision, kDecimalScale);
354
+ const int32_t byte_width =
355
+ static_cast<const ::arrow::Decimal128Type&>(*type).byte_width();
356
+
357
+ ARROW_ASSIGN_OR_RAISE(auto out_buf, ::arrow::AllocateBuffer(size * byte_width));
358
+
359
+ random_decimals<::arrow::Decimal128Type::kByteWidth>(size, seed, precision,
360
+ out_buf->mutable_data());
361
+
362
+ ::arrow::Decimal128Builder builder(type);
363
+ RETURN_NOT_OK(builder.AppendValues(out_buf->data(), size, valid_bytes.data()));
364
+ return builder.Finish(out);
365
+ }
366
+
367
+ template <typename ArrowType, int32_t precision = ArrowType::precision>
368
+ ::arrow::enable_if_t<
369
+ std::is_same<ArrowType, Decimal256WithPrecisionAndScale<precision>>::value, Status>
370
+ NullableArray(size_t size, size_t num_nulls, uint32_t seed,
371
+ std::shared_ptr<::arrow::Array>* out) {
372
+ std::vector<uint8_t> valid_bytes(size, '\1');
373
+
374
+ for (size_t i = 0; i < num_nulls; ++i) {
375
+ valid_bytes[i * 2] = '\0';
376
+ }
377
+
378
+ constexpr int32_t kDecimalPrecision = precision;
379
+ constexpr int32_t kDecimalScale = Decimal256WithPrecisionAndScale<precision>::scale;
380
+ const auto type = ::arrow::decimal256(kDecimalPrecision, kDecimalScale);
381
+ const int32_t byte_width =
382
+ static_cast<const ::arrow::Decimal256Type&>(*type).byte_width();
383
+
384
+ ARROW_ASSIGN_OR_RAISE(auto out_buf, ::arrow::AllocateBuffer(size * byte_width));
385
+
386
+ random_decimals<::arrow::Decimal256Type::kByteWidth>(size, seed, precision,
387
+ out_buf->mutable_data());
388
+
389
+ ::arrow::Decimal256Builder builder(type);
390
+ RETURN_NOT_OK(builder.AppendValues(out_buf->data(), size, valid_bytes.data()));
391
+ return builder.Finish(out);
392
+ }
393
+
394
+ // This helper function only supports (size/2) nulls yet.
395
+ template <class ArrowType>
396
+ ::arrow::enable_if_boolean<ArrowType, Status> NullableArray(size_t size, size_t num_nulls,
397
+ uint32_t seed,
398
+ std::shared_ptr<Array>* out) {
399
+ std::vector<uint8_t> values;
400
+
401
+ // Seed is random in Arrow right now
402
+ (void)seed;
403
+
404
+ ::arrow::randint(size, 0, 1, &values);
405
+ std::vector<uint8_t> valid_bytes(size, 1);
406
+
407
+ for (size_t i = 0; i < num_nulls; i++) {
408
+ valid_bytes[i * 2] = 0;
409
+ }
410
+
411
+ ::arrow::BooleanBuilder builder;
412
+ RETURN_NOT_OK(builder.AppendValues(values.data(), values.size(), valid_bytes.data()));
413
+ return builder.Finish(out);
414
+ }
415
+
416
+ /// Wrap an Array into a ListArray by splitting it up into size lists.
417
+ ///
418
+ /// This helper function only supports (size/2) nulls.
419
+ Status MakeListArray(const std::shared_ptr<Array>& values, int64_t size,
420
+ int64_t null_count, const std::string& item_name,
421
+ bool nullable_values, std::shared_ptr<::arrow::ListArray>* out) {
422
+ // We always include an empty list
423
+ int64_t non_null_entries = size - null_count - 1;
424
+ int64_t length_per_entry = values->length() / non_null_entries;
425
+
426
+ auto offsets = AllocateBuffer();
427
+ RETURN_NOT_OK(offsets->Resize((size + 1) * sizeof(int32_t)));
428
+ int32_t* offsets_ptr = reinterpret_cast<int32_t*>(offsets->mutable_data());
429
+
430
+ auto null_bitmap = AllocateBuffer();
431
+ int64_t bitmap_size = ::arrow::bit_util::BytesForBits(size);
432
+ RETURN_NOT_OK(null_bitmap->Resize(bitmap_size));
433
+ uint8_t* null_bitmap_ptr = null_bitmap->mutable_data();
434
+ memset(null_bitmap_ptr, 0, bitmap_size);
435
+
436
+ int32_t current_offset = 0;
437
+ for (int64_t i = 0; i < size; i++) {
438
+ offsets_ptr[i] = current_offset;
439
+ if (!(((i % 2) == 0) && ((i / 2) < null_count))) {
440
+ // Non-null list (list with index 1 is always empty).
441
+ ::arrow::bit_util::SetBit(null_bitmap_ptr, i);
442
+ if (i != 1) {
443
+ current_offset += static_cast<int32_t>(length_per_entry);
444
+ }
445
+ }
446
+ }
447
+ offsets_ptr[size] = static_cast<int32_t>(values->length());
448
+
449
+ auto value_field = ::arrow::field(item_name, values->type(), nullable_values);
450
+ *out = std::make_shared<::arrow::ListArray>(::arrow::list(value_field), size, offsets,
451
+ values, null_bitmap, null_count);
452
+
453
+ return Status::OK();
454
+ }
455
+
456
+ // Make an array containing only empty lists, with a null values array
457
+ Status MakeEmptyListsArray(int64_t size, std::shared_ptr<Array>* out_array) {
458
+ // Allocate an offsets buffer containing only zeroes
459
+ const int64_t offsets_nbytes = (size + 1) * sizeof(int32_t);
460
+ ARROW_ASSIGN_OR_RAISE(auto offsets_buffer, ::arrow::AllocateBuffer(offsets_nbytes));
461
+ memset(offsets_buffer->mutable_data(), 0, offsets_nbytes);
462
+
463
+ auto value_field =
464
+ ::arrow::field("item", ::arrow::float64(), false /* nullable_values */);
465
+ auto list_type = ::arrow::list(value_field);
466
+
467
+ std::vector<std::shared_ptr<Buffer>> child_buffers = {nullptr /* null bitmap */,
468
+ nullptr /* values */};
469
+ auto child_data =
470
+ ::arrow::ArrayData::Make(value_field->type(), 0, std::move(child_buffers));
471
+
472
+ std::vector<std::shared_ptr<Buffer>> buffers = {nullptr /* bitmap */,
473
+ std::move(offsets_buffer)};
474
+ auto array_data = ::arrow::ArrayData::Make(list_type, size, std::move(buffers));
475
+ array_data->child_data.push_back(child_data);
476
+
477
+ *out_array = ::arrow::MakeArray(array_data);
478
+ return Status::OK();
479
+ }
480
+
481
+ std::shared_ptr<::arrow::Table> MakeSimpleTable(
482
+ const std::shared_ptr<ChunkedArray>& values, bool nullable) {
483
+ auto schema = ::arrow::schema({::arrow::field("col", values->type(), nullable)});
484
+ return ::arrow::Table::Make(schema, {values});
485
+ }
486
+
487
+ std::shared_ptr<::arrow::Table> MakeSimpleTable(const std::shared_ptr<Array>& values,
488
+ bool nullable) {
489
+ auto carr = std::make_shared<::arrow::ChunkedArray>(values);
490
+ return MakeSimpleTable(carr, nullable);
491
+ }
492
+
493
+ template <typename T>
494
+ void ExpectArray(T* expected, Array* result) {
495
+ auto p_array = static_cast<::arrow::PrimitiveArray*>(result);
496
+ for (int i = 0; i < result->length(); i++) {
497
+ EXPECT_EQ(expected[i], reinterpret_cast<const T*>(p_array->values()->data())[i]);
498
+ }
499
+ }
500
+
501
+ template <typename ArrowType>
502
+ void ExpectArrayT(void* expected, Array* result) {
503
+ ::arrow::PrimitiveArray* p_array = static_cast<::arrow::PrimitiveArray*>(result);
504
+ for (int64_t i = 0; i < result->length(); i++) {
505
+ EXPECT_EQ(reinterpret_cast<typename ArrowType::c_type*>(expected)[i],
506
+ reinterpret_cast<const typename ArrowType::c_type*>(
507
+ p_array->values()->data())[i]);
508
+ }
509
+ }
510
+
511
+ template <>
512
+ void ExpectArrayT<::arrow::BooleanType>(void* expected, Array* result) {
513
+ ::arrow::BooleanBuilder builder;
514
+ ARROW_EXPECT_OK(
515
+ builder.AppendValues(reinterpret_cast<uint8_t*>(expected), result->length()));
516
+
517
+ std::shared_ptr<Array> expected_array;
518
+ ARROW_EXPECT_OK(builder.Finish(&expected_array));
519
+ EXPECT_TRUE(result->Equals(*expected_array));
520
+ }
521
+
522
+ } // namespace arrow
523
+
524
+ } // namespace parquet
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/arrow/writer.h ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+
23
+ #include "parquet/platform.h"
24
+ #include "parquet/properties.h"
25
+
26
+ namespace arrow {
27
+
28
+ class Array;
29
+ class ChunkedArray;
30
+ class RecordBatch;
31
+ class Schema;
32
+ class Table;
33
+
34
+ } // namespace arrow
35
+
36
+ namespace parquet {
37
+
38
+ class FileMetaData;
39
+ class ParquetFileWriter;
40
+
41
+ namespace arrow {
42
+
43
+ /// \brief Iterative FileWriter class
44
+ ///
45
+ /// For basic usage, can write a Table at a time, creating one or more row
46
+ /// groups per write call.
47
+ ///
48
+ /// For advanced usage, can write column-by-column: Start a new RowGroup or
49
+ /// Chunk with NewRowGroup, then write column-by-column the whole column chunk.
50
+ ///
51
+ /// If PARQUET:field_id is present as a metadata key on a field, and the corresponding
52
+ /// value is a nonnegative integer, then it will be used as the field_id in the parquet
53
+ /// file.
54
+ class PARQUET_EXPORT FileWriter {
55
+ public:
56
+ static ::arrow::Status Make(MemoryPool* pool, std::unique_ptr<ParquetFileWriter> writer,
57
+ std::shared_ptr<::arrow::Schema> schema,
58
+ std::shared_ptr<ArrowWriterProperties> arrow_properties,
59
+ std::unique_ptr<FileWriter>* out);
60
+
61
+ /// \brief Try to create an Arrow to Parquet file writer.
62
+ ///
63
+ /// \param schema schema of data that will be passed.
64
+ /// \param pool memory pool to use.
65
+ /// \param sink output stream to write Parquet data.
66
+ /// \param properties general Parquet writer properties.
67
+ /// \param arrow_properties Arrow-specific writer properties.
68
+ ///
69
+ /// \since 11.0.0
70
+ static ::arrow::Result<std::unique_ptr<FileWriter>> Open(
71
+ const ::arrow::Schema& schema, MemoryPool* pool,
72
+ std::shared_ptr<::arrow::io::OutputStream> sink,
73
+ std::shared_ptr<WriterProperties> properties = default_writer_properties(),
74
+ std::shared_ptr<ArrowWriterProperties> arrow_properties =
75
+ default_arrow_writer_properties());
76
+
77
+ ARROW_DEPRECATED("Deprecated in 11.0.0. Use Result-returning variants instead.")
78
+ static ::arrow::Status Open(const ::arrow::Schema& schema, MemoryPool* pool,
79
+ std::shared_ptr<::arrow::io::OutputStream> sink,
80
+ std::shared_ptr<WriterProperties> properties,
81
+ std::unique_ptr<FileWriter>* writer);
82
+ ARROW_DEPRECATED("Deprecated in 11.0.0. Use Result-returning variants instead.")
83
+ static ::arrow::Status Open(const ::arrow::Schema& schema, MemoryPool* pool,
84
+ std::shared_ptr<::arrow::io::OutputStream> sink,
85
+ std::shared_ptr<WriterProperties> properties,
86
+ std::shared_ptr<ArrowWriterProperties> arrow_properties,
87
+ std::unique_ptr<FileWriter>* writer);
88
+
89
+ /// Return the Arrow schema to be written to.
90
+ virtual std::shared_ptr<::arrow::Schema> schema() const = 0;
91
+
92
+ /// \brief Write a Table to Parquet.
93
+ ///
94
+ /// \param table Arrow table to write.
95
+ /// \param chunk_size maximum number of rows to write per row group.
96
+ virtual ::arrow::Status WriteTable(
97
+ const ::arrow::Table& table, int64_t chunk_size = DEFAULT_MAX_ROW_GROUP_LENGTH) = 0;
98
+
99
+ /// \brief Start a new row group.
100
+ ///
101
+ /// Returns an error if not all columns have been written.
102
+ ///
103
+ /// \param chunk_size the number of rows in the next row group.
104
+ virtual ::arrow::Status NewRowGroup(int64_t chunk_size) = 0;
105
+
106
+ /// \brief Write ColumnChunk in row group using an array.
107
+ virtual ::arrow::Status WriteColumnChunk(const ::arrow::Array& data) = 0;
108
+
109
+ /// \brief Write ColumnChunk in row group using slice of a ChunkedArray
110
+ virtual ::arrow::Status WriteColumnChunk(
111
+ const std::shared_ptr<::arrow::ChunkedArray>& data, int64_t offset,
112
+ int64_t size) = 0;
113
+
114
+ /// \brief Write ColumnChunk in a row group using a ChunkedArray
115
+ virtual ::arrow::Status WriteColumnChunk(
116
+ const std::shared_ptr<::arrow::ChunkedArray>& data) = 0;
117
+
118
+ /// \brief Start a new buffered row group.
119
+ ///
120
+ /// Returns an error if not all columns have been written.
121
+ virtual ::arrow::Status NewBufferedRowGroup() = 0;
122
+
123
+ /// \brief Write a RecordBatch into the buffered row group.
124
+ ///
125
+ /// Multiple RecordBatches can be written into the same row group
126
+ /// through this method.
127
+ ///
128
+ /// WriterProperties.max_row_group_length() is respected and a new
129
+ /// row group will be created if the current row group exceeds the
130
+ /// limit.
131
+ ///
132
+ /// Batches get flushed to the output stream once NewBufferedRowGroup()
133
+ /// or Close() is called.
134
+ ///
135
+ /// WARNING: If you are writing multiple files in parallel in the same
136
+ /// executor, deadlock may occur if ArrowWriterProperties::use_threads
137
+ /// is set to true to write columns in parallel. Please disable use_threads
138
+ /// option in this case.
139
+ virtual ::arrow::Status WriteRecordBatch(const ::arrow::RecordBatch& batch) = 0;
140
+
141
+ /// \brief Write the footer and close the file.
142
+ virtual ::arrow::Status Close() = 0;
143
+ virtual ~FileWriter();
144
+
145
+ virtual MemoryPool* memory_pool() const = 0;
146
+ /// \brief Return the file metadata, only available after calling Close().
147
+ virtual const std::shared_ptr<FileMetaData> metadata() const = 0;
148
+ };
149
+
150
+ /// \brief Write Parquet file metadata only to indicated Arrow OutputStream
151
+ PARQUET_EXPORT
152
+ ::arrow::Status WriteFileMetaData(const FileMetaData& file_metadata,
153
+ ::arrow::io::OutputStream* sink);
154
+
155
+ /// \brief Write metadata-only Parquet file to indicated Arrow OutputStream
156
+ PARQUET_EXPORT
157
+ ::arrow::Status WriteMetaDataFile(const FileMetaData& file_metadata,
158
+ ::arrow::io::OutputStream* sink);
159
+
160
+ /// \brief Write a Table to Parquet.
161
+ ///
162
+ /// This writes one table in a single shot. To write a Parquet file with
163
+ /// multiple tables iteratively, see parquet::arrow::FileWriter.
164
+ ///
165
+ /// \param table Table to write.
166
+ /// \param pool memory pool to use.
167
+ /// \param sink output stream to write Parquet data.
168
+ /// \param chunk_size maximum number of rows to write per row group.
169
+ /// \param properties general Parquet writer properties.
170
+ /// \param arrow_properties Arrow-specific writer properties.
171
+ ::arrow::Status PARQUET_EXPORT
172
+ WriteTable(const ::arrow::Table& table, MemoryPool* pool,
173
+ std::shared_ptr<::arrow::io::OutputStream> sink,
174
+ int64_t chunk_size = DEFAULT_MAX_ROW_GROUP_LENGTH,
175
+ std::shared_ptr<WriterProperties> properties = default_writer_properties(),
176
+ std::shared_ptr<ArrowWriterProperties> arrow_properties =
177
+ default_arrow_writer_properties());
178
+
179
+ } // namespace arrow
180
+ } // namespace parquet
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/bloom_filter.h ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cmath>
21
+ #include <cstdint>
22
+ #include <memory>
23
+
24
+ #include "arrow/util/bit_util.h"
25
+ #include "arrow/util/logging.h"
26
+ #include "parquet/hasher.h"
27
+ #include "parquet/platform.h"
28
+ #include "parquet/types.h"
29
+
30
+ namespace parquet {
31
+
32
+ // A Bloom filter is a compact structure to indicate whether an item is not in a set or
33
+ // probably in a set. The Bloom filter usually consists of a bit set that represents a
34
+ // set of elements, a hash strategy and a Bloom filter algorithm.
35
+ class PARQUET_EXPORT BloomFilter {
36
+ public:
37
+ // Maximum Bloom filter size, it sets to HDFS default block size 128MB
38
+ // This value will be reconsidered when implementing Bloom filter producer.
39
+ static constexpr uint32_t kMaximumBloomFilterBytes = 128 * 1024 * 1024;
40
+
41
+ /// Determine whether an element exist in set or not.
42
+ ///
43
+ /// @param hash the element to contain.
44
+ /// @return false if value is definitely not in set, and true means PROBABLY
45
+ /// in set.
46
+ virtual bool FindHash(uint64_t hash) const = 0;
47
+
48
+ /// Insert element to set represented by Bloom filter bitset.
49
+ /// @param hash the hash of value to insert into Bloom filter.
50
+ virtual void InsertHash(uint64_t hash) = 0;
51
+
52
+ /// Insert elements to set represented by Bloom filter bitset.
53
+ /// @param hashes the hash values to insert into Bloom filter.
54
+ /// @param num_values the number of hash values to insert.
55
+ virtual void InsertHashes(const uint64_t* hashes, int num_values) = 0;
56
+
57
+ /// Write this Bloom filter to an output stream. A Bloom filter structure should
58
+ /// include bitset length, hash strategy, algorithm, and bitset.
59
+ ///
60
+ /// @param sink the output stream to write
61
+ virtual void WriteTo(ArrowOutputStream* sink) const = 0;
62
+
63
+ /// Get the number of bytes of bitset
64
+ virtual uint32_t GetBitsetSize() const = 0;
65
+
66
+ /// Compute hash for 32 bits value by using its plain encoding result.
67
+ ///
68
+ /// @param value the value to hash.
69
+ /// @return hash result.
70
+ virtual uint64_t Hash(int32_t value) const = 0;
71
+
72
+ /// Compute hash for 64 bits value by using its plain encoding result.
73
+ ///
74
+ /// @param value the value to hash.
75
+ /// @return hash result.
76
+ virtual uint64_t Hash(int64_t value) const = 0;
77
+
78
+ /// Compute hash for float value by using its plain encoding result.
79
+ ///
80
+ /// @param value the value to hash.
81
+ /// @return hash result.
82
+ virtual uint64_t Hash(float value) const = 0;
83
+
84
+ /// Compute hash for double value by using its plain encoding result.
85
+ ///
86
+ /// @param value the value to hash.
87
+ /// @return hash result.
88
+ virtual uint64_t Hash(double value) const = 0;
89
+
90
+ /// Compute hash for Int96 value by using its plain encoding result.
91
+ ///
92
+ /// @param value the value to hash.
93
+ /// @return hash result.
94
+ virtual uint64_t Hash(const Int96* value) const = 0;
95
+
96
+ /// Compute hash for ByteArray value by using its plain encoding result.
97
+ ///
98
+ /// @param value the value to hash.
99
+ /// @return hash result.
100
+ virtual uint64_t Hash(const ByteArray* value) const = 0;
101
+
102
+ /// Compute hash for fixed byte array value by using its plain encoding result.
103
+ ///
104
+ /// @param value the value address.
105
+ /// @param len the value length.
106
+ /// @return hash result.
107
+ virtual uint64_t Hash(const FLBA* value, uint32_t len) const = 0;
108
+
109
+ /// Batch compute hashes for 32 bits values by using its plain encoding result.
110
+ ///
111
+ /// @param values values a pointer to the values to hash.
112
+ /// @param num_values the number of values to hash.
113
+ /// @param hashes a pointer to the output hash values, its length should be equal to
114
+ /// num_values.
115
+ virtual void Hashes(const int32_t* values, int num_values, uint64_t* hashes) const = 0;
116
+
117
+ /// Batch compute hashes for 64 bits values by using its plain encoding result.
118
+ ///
119
+ /// @param values values a pointer to the values to hash.
120
+ /// @param num_values the number of values to hash.
121
+ /// @param hashes a pointer to the output hash values, its length should be equal to
122
+ /// num_values.
123
+ virtual void Hashes(const int64_t* values, int num_values, uint64_t* hashes) const = 0;
124
+
125
+ /// Batch compute hashes for float values by using its plain encoding result.
126
+ ///
127
+ /// @param values values a pointer to the values to hash.
128
+ /// @param num_values the number of values to hash.
129
+ /// @param hashes a pointer to the output hash values, its length should be equal to
130
+ /// num_values.
131
+ virtual void Hashes(const float* values, int num_values, uint64_t* hashes) const = 0;
132
+
133
+ /// Batch compute hashes for double values by using its plain encoding result.
134
+ ///
135
+ /// @param values values a pointer to the values to hash.
136
+ /// @param num_values the number of values to hash.
137
+ /// @param hashes a pointer to the output hash values, its length should be equal to
138
+ /// num_values.
139
+ virtual void Hashes(const double* values, int num_values, uint64_t* hashes) const = 0;
140
+
141
+ /// Batch compute hashes for Int96 values by using its plain encoding result.
142
+ ///
143
+ /// @param values values a pointer to the values to hash.
144
+ /// @param num_values the number of values to hash.
145
+ /// @param hashes a pointer to the output hash values, its length should be equal to
146
+ /// num_values.
147
+ virtual void Hashes(const Int96* values, int num_values, uint64_t* hashes) const = 0;
148
+
149
+ /// Batch compute hashes for ByteArray values by using its plain encoding result.
150
+ ///
151
+ /// @param values values a pointer to the values to hash.
152
+ /// @param num_values the number of values to hash.
153
+ /// @param hashes a pointer to the output hash values, its length should be equal to
154
+ /// num_values.
155
+ virtual void Hashes(const ByteArray* values, int num_values,
156
+ uint64_t* hashes) const = 0;
157
+
158
+ /// Batch compute hashes for fixed byte array values by using its plain encoding result.
159
+ ///
160
+ /// @param values values a pointer to the values to hash.
161
+ /// @param type_len the value length.
162
+ /// @param num_values the number of values to hash.
163
+ /// @param hashes a pointer to the output hash values, its length should be equal to
164
+ /// num_values.
165
+ virtual void Hashes(const FLBA* values, uint32_t type_len, int num_values,
166
+ uint64_t* hashes) const = 0;
167
+
168
+ virtual ~BloomFilter() = default;
169
+
170
+ protected:
171
+ // Hash strategy available for Bloom filter.
172
+ enum class HashStrategy : uint32_t { XXHASH = 0 };
173
+
174
+ // Bloom filter algorithm.
175
+ enum class Algorithm : uint32_t { BLOCK = 0 };
176
+
177
+ enum class CompressionStrategy : uint32_t { UNCOMPRESSED = 0 };
178
+ };
179
+
180
+ /// The BlockSplitBloomFilter is implemented using block-based Bloom filters from
181
+ /// Putze et al.'s "Cache-,Hash- and Space-Efficient Bloom filters". The basic idea is to
182
+ /// hash the item to a tiny Bloom filter which size fit a single cache line or smaller.
183
+ ///
184
+ /// This implementation sets 8 bits in each tiny Bloom filter. Each tiny Bloom
185
+ /// filter is 32 bytes to take advantage of 32-byte SIMD instructions.
186
+ class PARQUET_EXPORT BlockSplitBloomFilter : public BloomFilter {
187
+ public:
188
+ /// The constructor of BlockSplitBloomFilter. It uses XXH64 as hash function.
189
+ ///
190
+ /// \param pool memory pool to use.
191
+ explicit BlockSplitBloomFilter(
192
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool());
193
+
194
+ /// Initialize the BlockSplitBloomFilter. The range of num_bytes should be within
195
+ /// [kMinimumBloomFilterBytes, kMaximumBloomFilterBytes], it will be
196
+ /// rounded up/down to lower/upper bound if num_bytes is out of range and also
197
+ /// will be rounded up to a power of 2.
198
+ ///
199
+ /// @param num_bytes The number of bytes to store Bloom filter bitset.
200
+ void Init(uint32_t num_bytes);
201
+
202
+ /// Initialize the BlockSplitBloomFilter. It copies the bitset as underlying
203
+ /// bitset because the given bitset may not satisfy the 32-byte alignment requirement
204
+ /// which may lead to segfault when performing SIMD instructions. It is the caller's
205
+ /// responsibility to free the bitset passed in. This is used when reconstructing
206
+ /// a Bloom filter from a parquet file.
207
+ ///
208
+ /// @param bitset The given bitset to initialize the Bloom filter.
209
+ /// @param num_bytes The number of bytes of given bitset.
210
+ void Init(const uint8_t* bitset, uint32_t num_bytes);
211
+
212
+ /// Minimum Bloom filter size, it sets to 32 bytes to fit a tiny Bloom filter.
213
+ static constexpr uint32_t kMinimumBloomFilterBytes = 32;
214
+
215
+ /// Calculate optimal size according to the number of distinct values and false
216
+ /// positive probability.
217
+ ///
218
+ /// @param ndv The number of distinct values.
219
+ /// @param fpp The false positive probability.
220
+ /// @return it always return a value between kMinimumBloomFilterBytes and
221
+ /// kMaximumBloomFilterBytes, and the return value is always a power of 2
222
+ static uint32_t OptimalNumOfBytes(uint32_t ndv, double fpp) {
223
+ uint32_t optimal_num_of_bits = OptimalNumOfBits(ndv, fpp);
224
+ DCHECK(::arrow::bit_util::IsMultipleOf8(optimal_num_of_bits));
225
+ return optimal_num_of_bits >> 3;
226
+ }
227
+
228
+ /// Calculate optimal size according to the number of distinct values and false
229
+ /// positive probability.
230
+ ///
231
+ /// @param ndv The number of distinct values.
232
+ /// @param fpp The false positive probability.
233
+ /// @return it always return a value between kMinimumBloomFilterBytes * 8 and
234
+ /// kMaximumBloomFilterBytes * 8, and the return value is always a power of 16
235
+ static uint32_t OptimalNumOfBits(uint32_t ndv, double fpp) {
236
+ DCHECK(fpp > 0.0 && fpp < 1.0);
237
+ const double m = -8.0 * ndv / log(1 - pow(fpp, 1.0 / 8));
238
+ uint32_t num_bits;
239
+
240
+ // Handle overflow.
241
+ if (m < 0 || m > kMaximumBloomFilterBytes << 3) {
242
+ num_bits = static_cast<uint32_t>(kMaximumBloomFilterBytes << 3);
243
+ } else {
244
+ num_bits = static_cast<uint32_t>(m);
245
+ }
246
+
247
+ // Round up to lower bound
248
+ if (num_bits < kMinimumBloomFilterBytes << 3) {
249
+ num_bits = kMinimumBloomFilterBytes << 3;
250
+ }
251
+
252
+ // Get next power of 2 if bits is not power of 2.
253
+ if ((num_bits & (num_bits - 1)) != 0) {
254
+ num_bits = static_cast<uint32_t>(::arrow::bit_util::NextPower2(num_bits));
255
+ }
256
+
257
+ // Round down to upper bound
258
+ if (num_bits > kMaximumBloomFilterBytes << 3) {
259
+ num_bits = kMaximumBloomFilterBytes << 3;
260
+ }
261
+
262
+ return num_bits;
263
+ }
264
+
265
+ bool FindHash(uint64_t hash) const override;
266
+ void InsertHash(uint64_t hash) override;
267
+ void InsertHashes(const uint64_t* hashes, int num_values) override;
268
+ void WriteTo(ArrowOutputStream* sink) const override;
269
+ uint32_t GetBitsetSize() const override { return num_bytes_; }
270
+
271
+ uint64_t Hash(int32_t value) const override { return hasher_->Hash(value); }
272
+ uint64_t Hash(int64_t value) const override { return hasher_->Hash(value); }
273
+ uint64_t Hash(float value) const override { return hasher_->Hash(value); }
274
+ uint64_t Hash(double value) const override { return hasher_->Hash(value); }
275
+ uint64_t Hash(const Int96* value) const override { return hasher_->Hash(value); }
276
+ uint64_t Hash(const ByteArray* value) const override { return hasher_->Hash(value); }
277
+ uint64_t Hash(const FLBA* value, uint32_t len) const override {
278
+ return hasher_->Hash(value, len);
279
+ }
280
+
281
+ void Hashes(const int32_t* values, int num_values, uint64_t* hashes) const override {
282
+ hasher_->Hashes(values, num_values, hashes);
283
+ }
284
+ void Hashes(const int64_t* values, int num_values, uint64_t* hashes) const override {
285
+ hasher_->Hashes(values, num_values, hashes);
286
+ }
287
+ void Hashes(const float* values, int num_values, uint64_t* hashes) const override {
288
+ hasher_->Hashes(values, num_values, hashes);
289
+ }
290
+ void Hashes(const double* values, int num_values, uint64_t* hashes) const override {
291
+ hasher_->Hashes(values, num_values, hashes);
292
+ }
293
+ void Hashes(const Int96* values, int num_values, uint64_t* hashes) const override {
294
+ hasher_->Hashes(values, num_values, hashes);
295
+ }
296
+ void Hashes(const ByteArray* values, int num_values, uint64_t* hashes) const override {
297
+ hasher_->Hashes(values, num_values, hashes);
298
+ }
299
+ void Hashes(const FLBA* values, uint32_t type_len, int num_values,
300
+ uint64_t* hashes) const override {
301
+ hasher_->Hashes(values, type_len, num_values, hashes);
302
+ }
303
+
304
+ uint64_t Hash(const int32_t* value) const { return hasher_->Hash(*value); }
305
+ uint64_t Hash(const int64_t* value) const { return hasher_->Hash(*value); }
306
+ uint64_t Hash(const float* value) const { return hasher_->Hash(*value); }
307
+ uint64_t Hash(const double* value) const { return hasher_->Hash(*value); }
308
+
309
+ /// Deserialize the Bloom filter from an input stream. It is used when reconstructing
310
+ /// a Bloom filter from a parquet filter.
311
+ ///
312
+ /// @param properties The parquet reader properties.
313
+ /// @param input_stream The input stream from which to construct the bloom filter.
314
+ /// @param bloom_filter_length The length of the serialized bloom filter including
315
+ /// header.
316
+ /// @return The BlockSplitBloomFilter.
317
+ static BlockSplitBloomFilter Deserialize(
318
+ const ReaderProperties& properties, ArrowInputStream* input_stream,
319
+ std::optional<int64_t> bloom_filter_length = std::nullopt);
320
+
321
+ private:
322
+ inline void InsertHashImpl(uint64_t hash);
323
+
324
+ // Bytes in a tiny Bloom filter block.
325
+ static constexpr int kBytesPerFilterBlock = 32;
326
+
327
+ // The number of bits to be set in each tiny Bloom filter
328
+ static constexpr int kBitsSetPerBlock = 8;
329
+
330
+ // A mask structure used to set bits in each tiny Bloom filter.
331
+ struct BlockMask {
332
+ uint32_t item[kBitsSetPerBlock];
333
+ };
334
+
335
+ // The block-based algorithm needs eight odd SALT values to calculate eight indexes
336
+ // of bit to set, one bit in each 32-bit word.
337
+ static constexpr uint32_t SALT[kBitsSetPerBlock] = {
338
+ 0x47b6137bU, 0x44974d91U, 0x8824ad5bU, 0xa2b7289dU,
339
+ 0x705495c7U, 0x2df1424bU, 0x9efc4947U, 0x5c6bfb31U};
340
+
341
+ // Memory pool to allocate aligned buffer for bitset
342
+ ::arrow::MemoryPool* pool_;
343
+
344
+ // The underlying buffer of bitset.
345
+ std::shared_ptr<Buffer> data_;
346
+
347
+ // The number of bytes of Bloom filter bitset.
348
+ uint32_t num_bytes_;
349
+
350
+ // Hash strategy used in this Bloom filter.
351
+ HashStrategy hash_strategy_;
352
+
353
+ // Algorithm used in this Bloom filter.
354
+ Algorithm algorithm_;
355
+
356
+ // Compression used in this Bloom filter.
357
+ CompressionStrategy compression_strategy_;
358
+
359
+ // The hash pointer points to actual hash class used.
360
+ std::unique_ptr<Hasher> hasher_;
361
+ };
362
+
363
+ } // namespace parquet
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/crypto_factory.h ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+
22
+ #include "parquet/encryption/encryption.h"
23
+ #include "parquet/encryption/file_key_wrapper.h"
24
+ #include "parquet/encryption/key_toolkit.h"
25
+ #include "parquet/encryption/kms_client_factory.h"
26
+ #include "parquet/platform.h"
27
+
28
+ namespace parquet::encryption {
29
+
30
+ static constexpr ParquetCipher::type kDefaultEncryptionAlgorithm =
31
+ ParquetCipher::AES_GCM_V1;
32
+ static constexpr bool kDefaultPlaintextFooter = false;
33
+ static constexpr bool kDefaultDoubleWrapping = true;
34
+ static constexpr double kDefaultCacheLifetimeSeconds = 600; // 10 minutes
35
+ static constexpr bool kDefaultInternalKeyMaterial = true;
36
+ static constexpr bool kDefaultUniformEncryption = false;
37
+ static constexpr int32_t kDefaultDataKeyLengthBits = 128;
38
+
39
+ struct PARQUET_EXPORT EncryptionConfiguration {
40
+ explicit EncryptionConfiguration(const std::string& footer_key)
41
+ : footer_key(footer_key) {}
42
+
43
+ /// ID of the master key for footer encryption/signing
44
+ std::string footer_key;
45
+
46
+ /// List of columns to encrypt, with master key IDs (see HIVE-21848).
47
+ /// Format: "masterKeyID:colName,colName;masterKeyID:colName..."
48
+ /// Either
49
+ /// (1) column_keys must be set
50
+ /// or
51
+ /// (2) uniform_encryption must be set to true
52
+ /// If none of (1) and (2) are true, or if both are true, an exception will be
53
+ /// thrown.
54
+ std::string column_keys;
55
+
56
+ /// Encrypt footer and all columns with the same encryption key.
57
+ bool uniform_encryption = kDefaultUniformEncryption;
58
+
59
+ /// Parquet encryption algorithm. Can be "AES_GCM_V1" (default), or "AES_GCM_CTR_V1".
60
+ ParquetCipher::type encryption_algorithm = kDefaultEncryptionAlgorithm;
61
+
62
+ /// Write files with plaintext footer.
63
+ /// The default is false - files are written with encrypted footer.
64
+ bool plaintext_footer = kDefaultPlaintextFooter;
65
+
66
+ /// Use double wrapping - where data encryption keys (DEKs) are encrypted with key
67
+ /// encryption keys (KEKs), which in turn are encrypted with master keys.
68
+ /// The default is true. If set to false, use single wrapping - where DEKs are
69
+ /// encrypted directly with master keys.
70
+ bool double_wrapping = kDefaultDoubleWrapping;
71
+
72
+ /// Lifetime of cached entities (key encryption keys, local wrapping keys, KMS client
73
+ /// objects).
74
+ /// The default is 600 (10 minutes).
75
+ double cache_lifetime_seconds = kDefaultCacheLifetimeSeconds;
76
+
77
+ /// Store key material inside Parquet file footers; this mode doesn’t produce
78
+ /// additional files. By default, true. If set to false, key material is stored in
79
+ /// separate files in the same folder, which enables key rotation for immutable
80
+ /// Parquet files.
81
+ bool internal_key_material = kDefaultInternalKeyMaterial;
82
+
83
+ /// Length of data encryption keys (DEKs), randomly generated by parquet key
84
+ /// management tools. Can be 128, 192 or 256 bits.
85
+ /// The default is 128 bits.
86
+ int32_t data_key_length_bits = kDefaultDataKeyLengthBits;
87
+ };
88
+
89
+ struct PARQUET_EXPORT DecryptionConfiguration {
90
+ /// Lifetime of cached entities (key encryption keys, local wrapping keys, KMS client
91
+ /// objects).
92
+ /// The default is 600 (10 minutes).
93
+ double cache_lifetime_seconds = kDefaultCacheLifetimeSeconds;
94
+ };
95
+
96
+ /// This is a core class, that translates the parameters of high level encryption (like
97
+ /// the names of encrypted columns, names of master keys, etc), into parameters of low
98
+ /// level encryption (like the key metadata, DEK, etc). A factory that produces the low
99
+ /// level FileEncryptionProperties and FileDecryptionProperties objects, from the high
100
+ /// level parameters.
101
+ class PARQUET_EXPORT CryptoFactory {
102
+ public:
103
+ /// a KmsClientFactory object must be registered via this method before calling any of
104
+ /// GetFileEncryptionProperties()/GetFileDecryptionProperties() methods.
105
+ void RegisterKmsClientFactory(std::shared_ptr<KmsClientFactory> kms_client_factory);
106
+
107
+ /// Get the encryption properties for a Parquet file.
108
+ /// If external key material is used then a file system and path to the
109
+ /// parquet file must be provided.
110
+ std::shared_ptr<FileEncryptionProperties> GetFileEncryptionProperties(
111
+ const KmsConnectionConfig& kms_connection_config,
112
+ const EncryptionConfiguration& encryption_config, const std::string& file_path = "",
113
+ const std::shared_ptr<::arrow::fs::FileSystem>& file_system = NULLPTR);
114
+
115
+ /// Get decryption properties for a Parquet file.
116
+ /// If external key material is used then a file system and path to the
117
+ /// parquet file must be provided.
118
+ std::shared_ptr<FileDecryptionProperties> GetFileDecryptionProperties(
119
+ const KmsConnectionConfig& kms_connection_config,
120
+ const DecryptionConfiguration& decryption_config, const std::string& file_path = "",
121
+ const std::shared_ptr<::arrow::fs::FileSystem>& file_system = NULLPTR);
122
+
123
+ void RemoveCacheEntriesForToken(const std::string& access_token) {
124
+ key_toolkit_->RemoveCacheEntriesForToken(access_token);
125
+ }
126
+
127
+ void RemoveCacheEntriesForAllTokens() {
128
+ key_toolkit_->RemoveCacheEntriesForAllTokens();
129
+ }
130
+
131
+ /// Rotates master encryption keys for a Parquet file that uses external key material.
132
+ /// In single wrapping mode, data encryption keys are decrypted with the old master keys
133
+ /// and then re-encrypted with new master keys.
134
+ /// In double wrapping mode, key encryption keys are decrypted with the old master keys
135
+ /// and then re-encrypted with new master keys.
136
+ /// This relies on the KMS supporting versioning, such that the old master key is
137
+ /// used when unwrapping a key, and the latest version is used when wrapping a key.
138
+ void RotateMasterKeys(const KmsConnectionConfig& kms_connection_config,
139
+ const std::string& parquet_file_path,
140
+ const std::shared_ptr<::arrow::fs::FileSystem>& file_system,
141
+ bool double_wrapping = kDefaultDoubleWrapping,
142
+ double cache_lifetime_seconds = kDefaultCacheLifetimeSeconds);
143
+
144
+ private:
145
+ ColumnPathToEncryptionPropertiesMap GetColumnEncryptionProperties(
146
+ int dek_length, const std::string& column_keys, FileKeyWrapper* key_wrapper);
147
+
148
+ /// Key utilities object for kms client initialization and cache control
149
+ std::shared_ptr<KeyToolkit> key_toolkit_ = std::make_shared<KeyToolkit>();
150
+ };
151
+
152
+ } // namespace parquet::encryption
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/encryption.h ADDED
@@ -0,0 +1,510 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <map>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <utility>
24
+
25
+ #include "parquet/exception.h"
26
+ #include "parquet/schema.h"
27
+ #include "parquet/types.h"
28
+
29
+ namespace parquet {
30
+
31
+ static constexpr ParquetCipher::type kDefaultEncryptionAlgorithm =
32
+ ParquetCipher::AES_GCM_V1;
33
+ static constexpr int32_t kMaximalAadMetadataLength = 256;
34
+ static constexpr bool kDefaultEncryptedFooter = true;
35
+ static constexpr bool kDefaultCheckSignature = true;
36
+ static constexpr bool kDefaultAllowPlaintextFiles = false;
37
+ static constexpr int32_t kAadFileUniqueLength = 8;
38
+
39
+ class ColumnDecryptionProperties;
40
+ using ColumnPathToDecryptionPropertiesMap =
41
+ std::map<std::string, std::shared_ptr<ColumnDecryptionProperties>>;
42
+
43
+ class ColumnEncryptionProperties;
44
+ using ColumnPathToEncryptionPropertiesMap =
45
+ std::map<std::string, std::shared_ptr<ColumnEncryptionProperties>>;
46
+
47
+ class PARQUET_EXPORT DecryptionKeyRetriever {
48
+ public:
49
+ virtual std::string GetKey(const std::string& key_metadata) = 0;
50
+ virtual ~DecryptionKeyRetriever() {}
51
+ };
52
+
53
+ /// Simple integer key retriever
54
+ class PARQUET_EXPORT IntegerKeyIdRetriever : public DecryptionKeyRetriever {
55
+ public:
56
+ void PutKey(uint32_t key_id, const std::string& key);
57
+ std::string GetKey(const std::string& key_metadata) override;
58
+
59
+ private:
60
+ std::map<uint32_t, std::string> key_map_;
61
+ };
62
+
63
+ // Simple string key retriever
64
+ class PARQUET_EXPORT StringKeyIdRetriever : public DecryptionKeyRetriever {
65
+ public:
66
+ void PutKey(const std::string& key_id, const std::string& key);
67
+ std::string GetKey(const std::string& key_metadata) override;
68
+
69
+ private:
70
+ std::map<std::string, std::string> key_map_;
71
+ };
72
+
73
+ class PARQUET_EXPORT HiddenColumnException : public ParquetException {
74
+ public:
75
+ explicit HiddenColumnException(const std::string& columnPath)
76
+ : ParquetException(columnPath.c_str()) {}
77
+ };
78
+
79
+ class PARQUET_EXPORT KeyAccessDeniedException : public ParquetException {
80
+ public:
81
+ explicit KeyAccessDeniedException(const std::string& columnPath)
82
+ : ParquetException(columnPath.c_str()) {}
83
+ };
84
+
85
+ inline const uint8_t* str2bytes(const std::string& str) {
86
+ if (str.empty()) return NULLPTR;
87
+
88
+ char* cbytes = const_cast<char*>(str.c_str());
89
+ return reinterpret_cast<const uint8_t*>(cbytes);
90
+ }
91
+
92
+ class PARQUET_EXPORT ColumnEncryptionProperties {
93
+ public:
94
+ class PARQUET_EXPORT Builder {
95
+ public:
96
+ /// Convenience builder for encrypted columns.
97
+ explicit Builder(const std::string& name) : Builder(name, true) {}
98
+
99
+ /// Convenience builder for encrypted columns.
100
+ explicit Builder(const std::shared_ptr<schema::ColumnPath>& path)
101
+ : Builder(path->ToDotString(), true) {}
102
+
103
+ /// Set a column-specific key.
104
+ /// If key is not set on an encrypted column, the column will
105
+ /// be encrypted with the footer key.
106
+ /// keyBytes Key length must be either 16, 24 or 32 bytes.
107
+ /// The key is cloned, and will be wiped out (array values set to 0) upon completion
108
+ /// of file writing.
109
+ /// Caller is responsible for wiping out the input key array.
110
+ Builder* key(std::string column_key);
111
+
112
+ /// Set a key retrieval metadata.
113
+ /// use either key_metadata() or key_id(), not both
114
+ Builder* key_metadata(const std::string& key_metadata);
115
+
116
+ /// A convenience function to set key metadata using a string id.
117
+ /// Set a key retrieval metadata (converted from String).
118
+ /// use either key_metadata() or key_id(), not both
119
+ /// key_id will be converted to metadata (UTF-8 array).
120
+ Builder* key_id(const std::string& key_id);
121
+
122
+ std::shared_ptr<ColumnEncryptionProperties> build() {
123
+ return std::shared_ptr<ColumnEncryptionProperties>(
124
+ new ColumnEncryptionProperties(encrypted_, column_path_, key_, key_metadata_));
125
+ }
126
+
127
+ private:
128
+ const std::string column_path_;
129
+ bool encrypted_;
130
+ std::string key_;
131
+ std::string key_metadata_;
132
+
133
+ Builder(const std::string path, bool encrypted)
134
+ : column_path_(path), encrypted_(encrypted) {}
135
+ };
136
+
137
+ std::string column_path() const { return column_path_; }
138
+ bool is_encrypted() const { return encrypted_; }
139
+ bool is_encrypted_with_footer_key() const { return encrypted_with_footer_key_; }
140
+ std::string key() const { return key_; }
141
+ std::string key_metadata() const { return key_metadata_; }
142
+
143
+ /// Upon completion of file writing, the encryption key
144
+ /// will be wiped out.
145
+ void WipeOutEncryptionKey() { key_.clear(); }
146
+
147
+ bool is_utilized() {
148
+ if (key_.empty())
149
+ return false; // can re-use column properties without encryption keys
150
+ return utilized_;
151
+ }
152
+
153
+ /// ColumnEncryptionProperties object can be used for writing one file only.
154
+ /// Mark ColumnEncryptionProperties as utilized once it is used in
155
+ /// FileEncryptionProperties as the encryption key will be wiped out upon
156
+ /// completion of file writing.
157
+ void set_utilized() { utilized_ = true; }
158
+
159
+ std::shared_ptr<ColumnEncryptionProperties> DeepClone() {
160
+ std::string key_copy = key_;
161
+ return std::shared_ptr<ColumnEncryptionProperties>(new ColumnEncryptionProperties(
162
+ encrypted_, column_path_, key_copy, key_metadata_));
163
+ }
164
+
165
+ ColumnEncryptionProperties() = default;
166
+ ColumnEncryptionProperties(const ColumnEncryptionProperties& other) = default;
167
+ ColumnEncryptionProperties(ColumnEncryptionProperties&& other) = default;
168
+
169
+ private:
170
+ const std::string column_path_;
171
+ bool encrypted_;
172
+ bool encrypted_with_footer_key_;
173
+ std::string key_;
174
+ std::string key_metadata_;
175
+ bool utilized_;
176
+ explicit ColumnEncryptionProperties(bool encrypted, const std::string& column_path,
177
+ const std::string& key,
178
+ const std::string& key_metadata);
179
+ };
180
+
181
+ class PARQUET_EXPORT ColumnDecryptionProperties {
182
+ public:
183
+ class PARQUET_EXPORT Builder {
184
+ public:
185
+ explicit Builder(const std::string& name) : column_path_(name) {}
186
+
187
+ explicit Builder(const std::shared_ptr<schema::ColumnPath>& path)
188
+ : Builder(path->ToDotString()) {}
189
+
190
+ /// Set an explicit column key. If applied on a file that contains
191
+ /// key metadata for this column the metadata will be ignored,
192
+ /// the column will be decrypted with this key.
193
+ /// key length must be either 16, 24 or 32 bytes.
194
+ Builder* key(const std::string& key);
195
+
196
+ std::shared_ptr<ColumnDecryptionProperties> build();
197
+
198
+ private:
199
+ const std::string column_path_;
200
+ std::string key_;
201
+ };
202
+
203
+ ColumnDecryptionProperties() = default;
204
+ ColumnDecryptionProperties(const ColumnDecryptionProperties& other) = default;
205
+ ColumnDecryptionProperties(ColumnDecryptionProperties&& other) = default;
206
+
207
+ std::string column_path() const { return column_path_; }
208
+ std::string key() const { return key_; }
209
+ bool is_utilized() { return utilized_; }
210
+
211
+ /// ColumnDecryptionProperties object can be used for reading one file only.
212
+ /// Mark ColumnDecryptionProperties as utilized once it is used in
213
+ /// FileDecryptionProperties as the encryption key will be wiped out upon
214
+ /// completion of file reading.
215
+ void set_utilized() { utilized_ = true; }
216
+
217
+ /// Upon completion of file reading, the encryption key
218
+ /// will be wiped out.
219
+ void WipeOutDecryptionKey();
220
+
221
+ std::shared_ptr<ColumnDecryptionProperties> DeepClone();
222
+
223
+ private:
224
+ const std::string column_path_;
225
+ std::string key_;
226
+ bool utilized_;
227
+
228
+ /// This class is only required for setting explicit column decryption keys -
229
+ /// to override key retriever (or to provide keys when key metadata and/or
230
+ /// key retriever are not available)
231
+ explicit ColumnDecryptionProperties(const std::string& column_path,
232
+ const std::string& key);
233
+ };
234
+
235
+ class PARQUET_EXPORT AADPrefixVerifier {
236
+ public:
237
+ /// Verifies identity (AAD Prefix) of individual file,
238
+ /// or of file collection in a data set.
239
+ /// Throws exception if an AAD prefix is wrong.
240
+ /// In a data set, AAD Prefixes should be collected,
241
+ /// and then checked for missing files.
242
+ virtual void Verify(const std::string& aad_prefix) = 0;
243
+ virtual ~AADPrefixVerifier() {}
244
+ };
245
+
246
+ class PARQUET_EXPORT FileDecryptionProperties {
247
+ public:
248
+ class PARQUET_EXPORT Builder {
249
+ public:
250
+ Builder() {
251
+ check_plaintext_footer_integrity_ = kDefaultCheckSignature;
252
+ plaintext_files_allowed_ = kDefaultAllowPlaintextFiles;
253
+ }
254
+
255
+ /// Set an explicit footer key. If applied on a file that contains
256
+ /// footer key metadata the metadata will be ignored, the footer
257
+ /// will be decrypted/verified with this key.
258
+ /// If explicit key is not set, footer key will be fetched from
259
+ /// key retriever.
260
+ /// With explicit keys or AAD prefix, new encryption properties object must be
261
+ /// created for each encrypted file.
262
+ /// Explicit encryption keys (footer and column) are cloned.
263
+ /// Upon completion of file reading, the cloned encryption keys in the properties
264
+ /// will be wiped out (array values set to 0).
265
+ /// Caller is responsible for wiping out the input key array.
266
+ /// param footerKey Key length must be either 16, 24 or 32 bytes.
267
+ Builder* footer_key(const std::string footer_key);
268
+
269
+ /// Set explicit column keys (decryption properties).
270
+ /// Its also possible to set a key retriever on this property object.
271
+ /// Upon file decryption, availability of explicit keys is checked before
272
+ /// invocation of the retriever callback.
273
+ /// If an explicit key is available for a footer or a column,
274
+ /// its key metadata will be ignored.
275
+ Builder* column_keys(
276
+ const ColumnPathToDecryptionPropertiesMap& column_decryption_properties);
277
+
278
+ /// Set a key retriever callback. Its also possible to
279
+ /// set explicit footer or column keys on this file property object.
280
+ /// Upon file decryption, availability of explicit keys is checked before
281
+ /// invocation of the retriever callback.
282
+ /// If an explicit key is available for a footer or a column,
283
+ /// its key metadata will be ignored.
284
+ Builder* key_retriever(const std::shared_ptr<DecryptionKeyRetriever>& key_retriever);
285
+
286
+ /// Skip integrity verification of plaintext footers.
287
+ /// If not called, integrity of plaintext footers will be checked in runtime,
288
+ /// and an exception will be thrown in the following situations:
289
+ /// - footer signing key is not available
290
+ /// (not passed, or not found by key retriever)
291
+ /// - footer content and signature don't match
292
+ Builder* disable_footer_signature_verification() {
293
+ check_plaintext_footer_integrity_ = false;
294
+ return this;
295
+ }
296
+
297
+ /// Explicitly supply the file AAD prefix.
298
+ /// A must when a prefix is used for file encryption, but not stored in file.
299
+ /// If AAD prefix is stored in file, it will be compared to the explicitly
300
+ /// supplied value and an exception will be thrown if they differ.
301
+ Builder* aad_prefix(const std::string& aad_prefix);
302
+
303
+ /// Set callback for verification of AAD Prefixes stored in file.
304
+ Builder* aad_prefix_verifier(std::shared_ptr<AADPrefixVerifier> aad_prefix_verifier);
305
+
306
+ /// By default, reading plaintext (unencrypted) files is not
307
+ /// allowed when using a decryptor
308
+ /// - in order to detect files that were not encrypted by mistake.
309
+ /// However, the default behavior can be overridden by calling this method.
310
+ /// The caller should use then a different method to ensure encryption
311
+ /// of files with sensitive data.
312
+ Builder* plaintext_files_allowed() {
313
+ plaintext_files_allowed_ = true;
314
+ return this;
315
+ }
316
+
317
+ std::shared_ptr<FileDecryptionProperties> build() {
318
+ return std::shared_ptr<FileDecryptionProperties>(new FileDecryptionProperties(
319
+ footer_key_, key_retriever_, check_plaintext_footer_integrity_, aad_prefix_,
320
+ aad_prefix_verifier_, column_decryption_properties_, plaintext_files_allowed_));
321
+ }
322
+
323
+ private:
324
+ std::string footer_key_;
325
+ std::string aad_prefix_;
326
+ std::shared_ptr<AADPrefixVerifier> aad_prefix_verifier_;
327
+ ColumnPathToDecryptionPropertiesMap column_decryption_properties_;
328
+
329
+ std::shared_ptr<DecryptionKeyRetriever> key_retriever_;
330
+ bool check_plaintext_footer_integrity_;
331
+ bool plaintext_files_allowed_;
332
+ };
333
+
334
+ std::string column_key(const std::string& column_path) const;
335
+
336
+ std::string footer_key() const { return footer_key_; }
337
+
338
+ std::string aad_prefix() const { return aad_prefix_; }
339
+
340
+ const std::shared_ptr<DecryptionKeyRetriever>& key_retriever() const {
341
+ return key_retriever_;
342
+ }
343
+
344
+ bool check_plaintext_footer_integrity() const {
345
+ return check_plaintext_footer_integrity_;
346
+ }
347
+
348
+ bool plaintext_files_allowed() const { return plaintext_files_allowed_; }
349
+
350
+ const std::shared_ptr<AADPrefixVerifier>& aad_prefix_verifier() const {
351
+ return aad_prefix_verifier_;
352
+ }
353
+
354
+ /// Upon completion of file reading, the encryption keys in the properties
355
+ /// will be wiped out (array values set to 0).
356
+ void WipeOutDecryptionKeys();
357
+
358
+ bool is_utilized();
359
+
360
+ /// FileDecryptionProperties object can be used for reading one file only.
361
+ /// Mark FileDecryptionProperties as utilized once it is used to read a file as the
362
+ /// encryption keys will be wiped out upon completion of file reading.
363
+ void set_utilized() { utilized_ = true; }
364
+
365
+ /// FileDecryptionProperties object can be used for reading one file only.
366
+ /// (unless this object keeps the keyRetrieval callback only, and no explicit
367
+ /// keys or aadPrefix).
368
+ /// At the end, keys are wiped out in the memory.
369
+ /// This method allows to clone identical properties for another file,
370
+ /// with an option to update the aadPrefix (if newAadPrefix is null,
371
+ /// aadPrefix will be cloned too)
372
+ std::shared_ptr<FileDecryptionProperties> DeepClone(std::string new_aad_prefix = "");
373
+
374
+ private:
375
+ std::string footer_key_;
376
+ std::string aad_prefix_;
377
+ std::shared_ptr<AADPrefixVerifier> aad_prefix_verifier_;
378
+
379
+ const std::string empty_string_ = "";
380
+ ColumnPathToDecryptionPropertiesMap column_decryption_properties_;
381
+
382
+ std::shared_ptr<DecryptionKeyRetriever> key_retriever_;
383
+ bool check_plaintext_footer_integrity_;
384
+ bool plaintext_files_allowed_;
385
+ bool utilized_;
386
+
387
+ FileDecryptionProperties(
388
+ const std::string& footer_key,
389
+ std::shared_ptr<DecryptionKeyRetriever> key_retriever,
390
+ bool check_plaintext_footer_integrity, const std::string& aad_prefix,
391
+ std::shared_ptr<AADPrefixVerifier> aad_prefix_verifier,
392
+ const ColumnPathToDecryptionPropertiesMap& column_decryption_properties,
393
+ bool plaintext_files_allowed);
394
+ };
395
+
396
+ class PARQUET_EXPORT FileEncryptionProperties {
397
+ public:
398
+ class PARQUET_EXPORT Builder {
399
+ public:
400
+ explicit Builder(const std::string& footer_key)
401
+ : parquet_cipher_(kDefaultEncryptionAlgorithm),
402
+ encrypted_footer_(kDefaultEncryptedFooter) {
403
+ footer_key_ = footer_key;
404
+ store_aad_prefix_in_file_ = false;
405
+ }
406
+
407
+ /// Create files with plaintext footer.
408
+ /// If not called, the files will be created with encrypted footer (default).
409
+ Builder* set_plaintext_footer() {
410
+ encrypted_footer_ = false;
411
+ return this;
412
+ }
413
+
414
+ /// Set encryption algorithm.
415
+ /// If not called, files will be encrypted with AES_GCM_V1 (default).
416
+ Builder* algorithm(ParquetCipher::type parquet_cipher) {
417
+ parquet_cipher_ = parquet_cipher;
418
+ return this;
419
+ }
420
+
421
+ /// Set a key retrieval metadata (converted from String).
422
+ /// use either footer_key_metadata or footer_key_id, not both.
423
+ Builder* footer_key_id(const std::string& key_id);
424
+
425
+ /// Set a key retrieval metadata.
426
+ /// use either footer_key_metadata or footer_key_id, not both.
427
+ Builder* footer_key_metadata(const std::string& footer_key_metadata);
428
+
429
+ /// Set the file AAD Prefix.
430
+ Builder* aad_prefix(const std::string& aad_prefix);
431
+
432
+ /// Skip storing AAD Prefix in file.
433
+ /// If not called, and if AAD Prefix is set, it will be stored.
434
+ Builder* disable_aad_prefix_storage();
435
+
436
+ /// Set the list of encrypted columns and their properties (keys etc).
437
+ /// If not called, all columns will be encrypted with the footer key.
438
+ /// If called, the file columns not in the list will be left unencrypted.
439
+ Builder* encrypted_columns(
440
+ const ColumnPathToEncryptionPropertiesMap& encrypted_columns);
441
+
442
+ std::shared_ptr<FileEncryptionProperties> build() {
443
+ return std::shared_ptr<FileEncryptionProperties>(new FileEncryptionProperties(
444
+ parquet_cipher_, footer_key_, footer_key_metadata_, encrypted_footer_,
445
+ aad_prefix_, store_aad_prefix_in_file_, encrypted_columns_));
446
+ }
447
+
448
+ private:
449
+ ParquetCipher::type parquet_cipher_;
450
+ bool encrypted_footer_;
451
+ std::string footer_key_;
452
+ std::string footer_key_metadata_;
453
+
454
+ std::string aad_prefix_;
455
+ bool store_aad_prefix_in_file_;
456
+ ColumnPathToEncryptionPropertiesMap encrypted_columns_;
457
+ };
458
+ bool encrypted_footer() const { return encrypted_footer_; }
459
+
460
+ EncryptionAlgorithm algorithm() const { return algorithm_; }
461
+
462
+ std::string footer_key() const { return footer_key_; }
463
+
464
+ std::string footer_key_metadata() const { return footer_key_metadata_; }
465
+
466
+ std::string file_aad() const { return file_aad_; }
467
+
468
+ std::shared_ptr<ColumnEncryptionProperties> column_encryption_properties(
469
+ const std::string& column_path);
470
+
471
+ bool is_utilized() const { return utilized_; }
472
+
473
+ /// FileEncryptionProperties object can be used for writing one file only.
474
+ /// Mark FileEncryptionProperties as utilized once it is used to write a file as the
475
+ /// encryption keys will be wiped out upon completion of file writing.
476
+ void set_utilized() { utilized_ = true; }
477
+
478
+ /// Upon completion of file writing, the encryption keys
479
+ /// will be wiped out (array values set to 0).
480
+ void WipeOutEncryptionKeys();
481
+
482
+ /// FileEncryptionProperties object can be used for writing one file only.
483
+ /// (at the end, keys are wiped out in the memory).
484
+ /// This method allows to clone identical properties for another file,
485
+ /// with an option to update the aadPrefix (if newAadPrefix is null,
486
+ /// aadPrefix will be cloned too)
487
+ std::shared_ptr<FileEncryptionProperties> DeepClone(std::string new_aad_prefix = "");
488
+
489
+ ColumnPathToEncryptionPropertiesMap encrypted_columns() const {
490
+ return encrypted_columns_;
491
+ }
492
+
493
+ private:
494
+ EncryptionAlgorithm algorithm_;
495
+ std::string footer_key_;
496
+ std::string footer_key_metadata_;
497
+ bool encrypted_footer_;
498
+ std::string file_aad_;
499
+ std::string aad_prefix_;
500
+ bool utilized_;
501
+ bool store_aad_prefix_in_file_;
502
+ ColumnPathToEncryptionPropertiesMap encrypted_columns_;
503
+
504
+ FileEncryptionProperties(ParquetCipher::type cipher, const std::string& footer_key,
505
+ const std::string& footer_key_metadata, bool encrypted_footer,
506
+ const std::string& aad_prefix, bool store_aad_prefix_in_file,
507
+ const ColumnPathToEncryptionPropertiesMap& encrypted_columns);
508
+ };
509
+
510
+ } // namespace parquet
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_material_store.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <set>
21
+ #include <string>
22
+ #include <unordered_map>
23
+
24
+ #include "arrow/filesystem/filesystem.h"
25
+ #include "parquet/platform.h"
26
+
27
+ namespace parquet::encryption {
28
+
29
+ /// Stores encryption key material outside the Parquet file, for example in a separate
30
+ /// small file in the same folder. This is important for “key rotation”, when MEKs have to
31
+ /// be changed (if compromised; or periodically, just in case) - without modifying the
32
+ /// Parquet files (often immutable).
33
+ class PARQUET_EXPORT FileKeyMaterialStore {
34
+ public:
35
+ /// Add key material for one encryption key.
36
+ virtual void AddKeyMaterial(std::string key_id_in_file, std::string key_material) = 0;
37
+
38
+ /// Get key material
39
+ virtual std::string GetKeyMaterial(std::string key_id_in_file) = 0;
40
+
41
+ /// After key material was added for all keys in the given Parquet file,
42
+ /// save material in persistent store.
43
+ virtual void SaveMaterial() = 0;
44
+
45
+ /// Remove key material from persistent store. Used in key rotation.
46
+ virtual void RemoveMaterial() = 0;
47
+
48
+ /// Move key material to another store. Used in key rotation.
49
+ virtual void MoveMaterialTo(std::shared_ptr<FileKeyMaterialStore> target_key_store) = 0;
50
+
51
+ /// Returns the Set of all key IDs in this store (for the given Parquet file)
52
+ virtual std::vector<std::string> GetKeyIDSet() = 0;
53
+
54
+ virtual ~FileKeyMaterialStore() {}
55
+ };
56
+
57
+ } // namespace parquet::encryption
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_unwrapper.h ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/util/concurrent_map.h"
21
+
22
+ #include "parquet/encryption/encryption.h"
23
+ #include "parquet/encryption/file_system_key_material_store.h"
24
+ #include "parquet/encryption/key_material.h"
25
+ #include "parquet/encryption/key_toolkit.h"
26
+ #include "parquet/encryption/key_toolkit_internal.h"
27
+ #include "parquet/encryption/kms_client.h"
28
+ #include "parquet/platform.h"
29
+
30
+ namespace parquet::encryption {
31
+
32
+ // This class will retrieve the key from "key metadata", following these steps:
33
+ // 1. Parse "key metadata" (see structure in KeyMetadata class).
34
+ // 2. Retrieve "key material" which can be stored inside or outside "key metadata".
35
+ // 3. Unwrap the "data encryption key" from "key material". There are 2 modes:
36
+ // 3.1. single wrapping: decrypt the wrapped "data encryption key" directly with "master
37
+ // encryption key" 3.2. double wrapping: 2 steps: 3.2.1. "key encryption key" is decrypted
38
+ // with "master encryption key" 3.2.2. "data encryption key" is decrypted with the above
39
+ // "key encryption key"
40
+ class PARQUET_EXPORT FileKeyUnwrapper : public DecryptionKeyRetriever {
41
+ public:
42
+ /// key_toolkit and kms_connection_config is to get KmsClient from cache or create
43
+ /// KmsClient if it's not in the cache yet. cache_entry_lifetime_seconds is life time of
44
+ /// KmsClient in the cache.
45
+ /// If the file uses external key material then the Parquet file path and file
46
+ /// system must be specified.
47
+ FileKeyUnwrapper(std::shared_ptr<KeyToolkit> key_toolkit,
48
+ const KmsConnectionConfig& kms_connection_config,
49
+ double cache_lifetime_seconds, const std::string& file_path = "",
50
+ const std::shared_ptr<::arrow::fs::FileSystem>& file_system = NULLPTR);
51
+
52
+ /// Constructor overload that takes a raw pointer to the KeyToolkit
53
+ FileKeyUnwrapper(KeyToolkit* key_toolkit,
54
+ const KmsConnectionConfig& kms_connection_config,
55
+ double cache_lifetime_seconds, const std::string& file_path = "",
56
+ const std::shared_ptr<::arrow::fs::FileSystem>& file_system = NULLPTR);
57
+
58
+ /// Constructor overload that takes a raw pointer to the KeyToolkit and
59
+ /// accepts an existing key_material_store rather than using
60
+ /// the file path and file system to create one when needed.
61
+ FileKeyUnwrapper(KeyToolkit* key_toolkit,
62
+ const KmsConnectionConfig& kms_connection_config,
63
+ double cache_lifetime_seconds,
64
+ std::shared_ptr<FileKeyMaterialStore> key_material_store);
65
+
66
+ /// Get the data key from key metadata
67
+ std::string GetKey(const std::string& key_metadata) override;
68
+
69
+ /// Get the data key along with the master key id from key material
70
+ KeyWithMasterId GetDataEncryptionKey(const KeyMaterial& key_material);
71
+
72
+ private:
73
+ FileKeyUnwrapper(std::shared_ptr<KeyToolkit> key_toolkit_owner, KeyToolkit* key_toolkit,
74
+ const KmsConnectionConfig& kms_connection_config,
75
+ double cache_lifetime_seconds,
76
+ std::shared_ptr<FileKeyMaterialStore> key_material_store,
77
+ const std::string& file_path,
78
+ const std::shared_ptr<::arrow::fs::FileSystem>& file_system);
79
+
80
+ std::shared_ptr<KmsClient> GetKmsClientFromConfigOrKeyMaterial(
81
+ const KeyMaterial& key_material);
82
+
83
+ /// A map of Key Encryption Key (KEK) ID -> KEK bytes, for the current token
84
+ std::shared_ptr<::arrow::util::ConcurrentMap<std::string, std::string>> kek_per_kek_id_;
85
+ std::shared_ptr<KeyToolkit> key_toolkit_owner_;
86
+ KeyToolkit* key_toolkit_;
87
+ KmsConnectionConfig kms_connection_config_;
88
+ const double cache_entry_lifetime_seconds_;
89
+ std::shared_ptr<FileKeyMaterialStore> key_material_store_;
90
+ const std::string file_path_;
91
+ std::shared_ptr<::arrow::fs::FileSystem> file_system_;
92
+ };
93
+
94
+ } // namespace parquet::encryption
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_wrapper.h ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <string>
22
+ #include <unordered_map>
23
+
24
+ #include "arrow/util/concurrent_map.h"
25
+
26
+ #include "parquet/encryption/file_key_material_store.h"
27
+ #include "parquet/encryption/key_encryption_key.h"
28
+ #include "parquet/encryption/key_toolkit.h"
29
+ #include "parquet/encryption/kms_client.h"
30
+ #include "parquet/platform.h"
31
+
32
+ namespace parquet::encryption {
33
+
34
+ // This class will generate "key metadata" from "data encryption key" and "master key",
35
+ // following these steps:
36
+ // 1. Wrap "data encryption key". There are 2 modes:
37
+ // 1.1. single wrapping: encrypt "data encryption key" directly with "master encryption
38
+ // key"
39
+ // 1.2. double wrapping: 2 steps:
40
+ // 1.2.1. "key encryption key" is randomized (see KeyEncryptionKey class)
41
+ // 1.2.2. "data encryption key" is encrypted with the above "key encryption key"
42
+ // 2. Create "key material" (see structure in KeyMaterial class)
43
+ // 3. Create "key metadata" with "key material" inside or a reference to outside "key
44
+ // material" (see structure in KeyMetadata class).
45
+ class PARQUET_EXPORT FileKeyWrapper {
46
+ public:
47
+ static constexpr int kKeyEncryptionKeyLength = 16;
48
+ static constexpr int kKeyEncryptionKeyIdLength = 16;
49
+
50
+ /// key_toolkit and kms_connection_config is to get KmsClient from the cache or create
51
+ /// KmsClient if it's not in the cache yet. cache_entry_lifetime_seconds is life time of
52
+ /// KmsClient in the cache. key_material_store is to store "key material" outside
53
+ /// parquet file, NULL if "key material" is stored inside parquet file.
54
+ FileKeyWrapper(KeyToolkit* key_toolkit,
55
+ const KmsConnectionConfig& kms_connection_config,
56
+ std::shared_ptr<FileKeyMaterialStore> key_material_store,
57
+ double cache_entry_lifetime_seconds, bool double_wrapping);
58
+
59
+ /// Creates key_metadata field for a given data key, via wrapping the key with the
60
+ /// master key.
61
+ /// When external key material is used, an identifier is usually generated automatically
62
+ /// but may be specified explicitly to support key rotation,
63
+ /// which requires keeping the same identifiers.
64
+ std::string GetEncryptionKeyMetadata(const std::string& data_key,
65
+ const std::string& master_key_id,
66
+ bool is_footer_key,
67
+ std::string key_id_in_file = "");
68
+
69
+ private:
70
+ KeyEncryptionKey CreateKeyEncryptionKey(const std::string& master_key_id);
71
+
72
+ /// A map of Master Encryption Key ID -> KeyEncryptionKey, for the current token
73
+ std::shared_ptr<::arrow::util::ConcurrentMap<std::string, KeyEncryptionKey>>
74
+ kek_per_master_key_id_;
75
+
76
+ std::shared_ptr<KmsClient> kms_client_;
77
+ KmsConnectionConfig kms_connection_config_;
78
+ std::shared_ptr<FileKeyMaterialStore> key_material_store_;
79
+ const double cache_entry_lifetime_seconds_;
80
+ const bool double_wrapping_;
81
+ uint16_t key_counter_;
82
+ };
83
+
84
+ } // namespace parquet::encryption
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_system_key_material_store.h ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <set>
21
+ #include <string>
22
+ #include <unordered_map>
23
+
24
+ #include "arrow/filesystem/filesystem.h"
25
+
26
+ #include "parquet/encryption/file_key_material_store.h"
27
+
28
+ namespace parquet::encryption {
29
+
30
+ /// A FileKeyMaterialStore that stores key material in a file system file in the same
31
+ /// folder as the Parquet file.
32
+ class PARQUET_EXPORT FileSystemKeyMaterialStore : public FileKeyMaterialStore {
33
+ public:
34
+ static constexpr const char kKeyMaterialFilePrefix[] = "_KEY_MATERIAL_FOR_";
35
+ static constexpr const char kTempFilePrefix[] = "_TMP";
36
+ static constexpr const char kKeyMaterialFileSuffix[] = ".json";
37
+
38
+ FileSystemKeyMaterialStore() {}
39
+ FileSystemKeyMaterialStore(const std::string& key_material_file_path,
40
+ const std::shared_ptr<::arrow::fs::FileSystem>& file_system);
41
+
42
+ /// Creates a new file system key material store for a parquet file.
43
+ /// When use_tmp_prefix is true, files are saved with an extra _TMP prefix so they don't
44
+ /// conflict with existing external material files. This is useful during key rotation
45
+ /// so that temporary key material files can be created while using the existing key
46
+ /// material, before moving the key material to the non-temporary location.
47
+ static std::shared_ptr<FileSystemKeyMaterialStore> Make(
48
+ const std::string& parquet_file_path,
49
+ const std::shared_ptr<::arrow::fs::FileSystem>& file_system, bool use_tmp_prefix);
50
+
51
+ /// Add key material for one encryption key.
52
+ void AddKeyMaterial(std::string key_id_in_file, std::string key_material) {
53
+ key_material_map_.insert({key_id_in_file, key_material});
54
+ }
55
+
56
+ /// Get key material
57
+ std::string GetKeyMaterial(std::string key_id_in_file) {
58
+ if (key_material_map_.empty()) {
59
+ LoadKeyMaterialMap();
60
+ }
61
+ auto found = key_material_map_.find(key_id_in_file);
62
+ return found->second;
63
+ }
64
+
65
+ /// After key material was added for all keys in the given Parquet file,
66
+ /// save material in persistent store.
67
+ void SaveMaterial();
68
+
69
+ /// Remove key material from persistent store. Used in key rotation.
70
+ void RemoveMaterial();
71
+
72
+ /// Move key material to another store. Used in key rotation.
73
+ void MoveMaterialTo(std::shared_ptr<FileKeyMaterialStore> target_key_store);
74
+
75
+ /// Returns the Set of all key IDs in this store (for the given Parquet file)
76
+ std::vector<std::string> GetKeyIDSet();
77
+
78
+ private:
79
+ std::string GetStorageFilePath() { return key_material_file_path_; }
80
+
81
+ std::string BuildKeyMaterialMapJson();
82
+ void LoadKeyMaterialMap();
83
+ std::string key_material_file_path_;
84
+ std::shared_ptr<::arrow::fs::FileSystem> file_system_;
85
+ /// Maps ID of a key in Parquet file and key material
86
+ std::unordered_map<std::string, std::string> key_material_map_;
87
+ };
88
+
89
+ } // namespace parquet::encryption
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_encryption_key.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <vector>
22
+
23
+ #include "arrow/util/base64.h"
24
+
25
+ namespace parquet::encryption {
26
+
27
+ // In the double wrapping mode, each "data encryption key" (DEK) is encrypted with a “key
28
+ // encryption key” (KEK), that in turn is encrypted with a "master encryption key" (MEK).
29
+ // In a writer process, a random KEK is generated for each MEK ID, and cached in a <MEK-ID
30
+ // : KEK> map. This allows to perform an interaction with a KMS server only once for each
31
+ // MEK, in order to wrap its KEK. "Data encryption key" (DEK) wrapping is performed
32
+ // locally, and does not involve an interaction with a KMS server.
33
+ class KeyEncryptionKey {
34
+ public:
35
+ KeyEncryptionKey(std::string kek_bytes, std::string kek_id,
36
+ std::string encoded_wrapped_kek)
37
+ : kek_bytes_(std::move(kek_bytes)),
38
+ kek_id_(std::move(kek_id)),
39
+ encoded_kek_id_(::arrow::util::base64_encode(kek_id_)),
40
+ encoded_wrapped_kek_(std::move(encoded_wrapped_kek)) {}
41
+
42
+ const std::string& kek_bytes() const { return kek_bytes_; }
43
+
44
+ const std::string& kek_id() const { return kek_id_; }
45
+
46
+ const std::string& encoded_kek_id() const { return encoded_kek_id_; }
47
+
48
+ const std::string& encoded_wrapped_kek() const { return encoded_wrapped_kek_; }
49
+
50
+ private:
51
+ std::string kek_bytes_;
52
+ std::string kek_id_;
53
+ std::string encoded_kek_id_;
54
+ std::string encoded_wrapped_kek_;
55
+ };
56
+
57
+ } // namespace parquet::encryption
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_material.h ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+
22
+ #include "parquet/platform.h"
23
+
24
+ namespace arrow {
25
+ namespace json {
26
+ namespace internal {
27
+ class ObjectParser;
28
+ } // namespace internal
29
+ } // namespace json
30
+ } // namespace arrow
31
+
32
+ namespace parquet::encryption {
33
+
34
+ // KeyMaterial class represents the "key material", keeping the information that allows
35
+ // readers to recover an encryption key (see description of the KeyMetadata class). The
36
+ // keytools package (PARQUET-1373) implements the "envelope encryption" pattern, in a
37
+ // "single wrapping" or "double wrapping" mode. In the single wrapping mode, the key
38
+ // material is generated by encrypting the "data encryption key" (DEK) by a "master key".
39
+ // In the double wrapping mode, the key material is generated by encrypting the DEK by a
40
+ // "key encryption key" (KEK), that in turn is encrypted by a "master key".
41
+ //
42
+ // Key material is kept in a flat json object, with the following fields:
43
+ // 1. "keyMaterialType" - a String, with the type of key material. In the current
44
+ // version, only one value is allowed - "PKMT1" (stands
45
+ // for "parquet key management tools, version 1"). For external key material storage,
46
+ // this field is written in both "key metadata" and "key material" jsons. For internal
47
+ // key material storage, this field is written only once in the common json.
48
+ // 2. "isFooterKey" - a boolean. If true, means that the material belongs to a file footer
49
+ // key, and keeps additional information (such as
50
+ // KMS instance ID and URL). If false, means that the material belongs to a column
51
+ // key.
52
+ // 3. "kmsInstanceID" - a String, with the KMS Instance ID. Written only in footer key
53
+ // material.
54
+ // 4. "kmsInstanceURL" - a String, with the KMS Instance URL. Written only in footer key
55
+ // material.
56
+ // 5. "masterKeyID" - a String, with the ID of the master key used to generate the
57
+ // material.
58
+ // 6. "wrappedDEK" - a String, with the wrapped DEK (base64 encoding).
59
+ // 7. "doubleWrapping" - a boolean. If true, means that the material was generated in
60
+ // double wrapping mode.
61
+ // If false - in single wrapping mode.
62
+ // 8. "keyEncryptionKeyID" - a String, with the ID of the KEK used to generate the
63
+ // material. Written only in double wrapping mode.
64
+ // 9. "wrappedKEK" - a String, with the wrapped KEK (base64 encoding). Written only in
65
+ // double wrapping mode.
66
+ class PARQUET_EXPORT KeyMaterial {
67
+ public:
68
+ // these fields are defined in a specification and should never be changed
69
+ static constexpr const char kKeyMaterialTypeField[] = "keyMaterialType";
70
+ static constexpr const char kKeyMaterialType1[] = "PKMT1";
71
+
72
+ static constexpr const char kFooterKeyIdInFile[] = "footerKey";
73
+ static constexpr const char kColumnKeyIdInFilePrefix[] = "columnKey";
74
+
75
+ static constexpr const char kIsFooterKeyField[] = "isFooterKey";
76
+ static constexpr const char kDoubleWrappingField[] = "doubleWrapping";
77
+ static constexpr const char kKmsInstanceIdField[] = "kmsInstanceID";
78
+ static constexpr const char kKmsInstanceUrlField[] = "kmsInstanceURL";
79
+ static constexpr const char kMasterKeyIdField[] = "masterKeyID";
80
+ static constexpr const char kWrappedDataEncryptionKeyField[] = "wrappedDEK";
81
+ static constexpr const char kKeyEncryptionKeyIdField[] = "keyEncryptionKeyID";
82
+ static constexpr const char kWrappedKeyEncryptionKeyField[] = "wrappedKEK";
83
+
84
+ public:
85
+ KeyMaterial() = default;
86
+
87
+ static KeyMaterial Parse(const std::string& key_material_string);
88
+
89
+ static KeyMaterial Parse(
90
+ const ::arrow::json::internal::ObjectParser* key_material_json);
91
+
92
+ /// This method returns a json string that will be stored either inside a parquet file
93
+ /// or in a key material store outside the parquet file.
94
+ static std::string SerializeToJson(bool is_footer_key,
95
+ const std::string& kms_instance_id,
96
+ const std::string& kms_instance_url,
97
+ const std::string& master_key_id,
98
+ bool is_double_wrapped, const std::string& kek_id,
99
+ const std::string& encoded_wrapped_kek,
100
+ const std::string& encoded_wrapped_dek,
101
+ bool is_internal_storage);
102
+
103
+ bool is_footer_key() const { return is_footer_key_; }
104
+ bool is_double_wrapped() const { return is_double_wrapped_; }
105
+ const std::string& master_key_id() const { return master_key_id_; }
106
+ const std::string& wrapped_dek() const { return encoded_wrapped_dek_; }
107
+ const std::string& kek_id() const { return kek_id_; }
108
+ const std::string& wrapped_kek() const { return encoded_wrapped_kek_; }
109
+ const std::string& kms_instance_id() const { return kms_instance_id_; }
110
+ const std::string& kms_instance_url() const { return kms_instance_url_; }
111
+
112
+ private:
113
+ KeyMaterial(bool is_footer_key, const std::string& kms_instance_id,
114
+ const std::string& kms_instance_url, const std::string& master_key_id,
115
+ bool is_double_wrapped, const std::string& kek_id,
116
+ const std::string& encoded_wrapped_kek,
117
+ const std::string& encoded_wrapped_dek);
118
+
119
+ bool is_footer_key_;
120
+ std::string kms_instance_id_;
121
+ std::string kms_instance_url_;
122
+ std::string master_key_id_;
123
+ bool is_double_wrapped_;
124
+ std::string kek_id_;
125
+ std::string encoded_wrapped_kek_;
126
+ std::string encoded_wrapped_dek_;
127
+ };
128
+
129
+ } // namespace parquet::encryption
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_metadata.h ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+ #include <variant>
22
+
23
+ #include "parquet/encryption/key_material.h"
24
+ #include "parquet/exception.h"
25
+ #include "parquet/platform.h"
26
+
27
+ namespace parquet::encryption {
28
+
29
+ // Parquet encryption specification defines "key metadata" as an arbitrary byte array,
30
+ // generated by file writers for each encryption key, and passed to the low level API for
31
+ // storage in the file footer. The "key metadata" field is made available to file readers
32
+ // to enable recovery of the key. This interface can be utilized for implementation
33
+ // of any key management scheme.
34
+ //
35
+ // The keytools package (PARQUET-1373) implements one approach, of many possible, to key
36
+ // management and to generation of the "key metadata" fields. This approach, based on the
37
+ // "envelope encryption" pattern, allows integration with KMS servers. It keeps the actual
38
+ // material, required to recover a key, in a "key material" object (see the KeyMaterial
39
+ // class for details). This class is implemented to support version 1 of the parquet key
40
+ // management tools specification.
41
+ //
42
+ // KeyMetadata writes (and reads) the "key metadata" field as a flat json object,
43
+ // with the following fields:
44
+ // 1. "keyMaterialType" - a String, with the type of key material.
45
+ // 2. "internalStorage" - a boolean. If true, means that "key material" is kept inside the
46
+ // "key metadata" field. If false, "key material" is kept externally (outside Parquet
47
+ // files) - in this case, "key metadata" keeps a reference to the external "key material".
48
+ // 3. "keyReference" - a String, with the reference to the external "key material".
49
+ // Written only if internalStorage is false.
50
+ //
51
+ // If internalStorage is true, "key material" is a part of "key metadata", and the json
52
+ // keeps additional fields, described in the KeyMaterial class.
53
+ class PARQUET_EXPORT KeyMetadata {
54
+ public:
55
+ static constexpr const char kKeyMaterialInternalStorageField[] = "internalStorage";
56
+ static constexpr const char kKeyReferenceField[] = "keyReference";
57
+
58
+ /// key_metadata_bytes is the key metadata field stored in the parquet file,
59
+ /// in the serialized json object format.
60
+ static KeyMetadata Parse(const std::string& key_metadata_bytes);
61
+
62
+ static std::string CreateSerializedForExternalMaterial(
63
+ const std::string& key_reference);
64
+
65
+ bool key_material_stored_internally() const { return is_internal_storage_; }
66
+
67
+ const KeyMaterial& key_material() const {
68
+ if (!is_internal_storage_) {
69
+ throw ParquetException("key material is stored externally.");
70
+ }
71
+ return ::std::get<KeyMaterial>(key_material_or_reference_);
72
+ }
73
+
74
+ const std::string& key_reference() const {
75
+ if (is_internal_storage_) {
76
+ throw ParquetException("key material is stored internally.");
77
+ }
78
+ return ::std::get<std::string>(key_material_or_reference_);
79
+ }
80
+
81
+ private:
82
+ explicit KeyMetadata(const KeyMaterial& key_material);
83
+ explicit KeyMetadata(const std::string& key_reference);
84
+
85
+ bool is_internal_storage_;
86
+ /// If is_internal_storage_ is true, KeyMaterial is set,
87
+ /// else a string referencing to an outside "key material" is set.
88
+ ::std::variant<KeyMaterial, std::string> key_material_or_reference_;
89
+ };
90
+
91
+ } // namespace parquet::encryption
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_toolkit.h ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <string>
22
+
23
+ #include "parquet/encryption/key_encryption_key.h"
24
+ #include "parquet/encryption/kms_client.h"
25
+ #include "parquet/encryption/kms_client_factory.h"
26
+ #include "parquet/encryption/two_level_cache_with_expiration.h"
27
+ #include "parquet/platform.h"
28
+
29
+ namespace parquet::encryption {
30
+
31
+ static constexpr uint64_t kCacheCleanPeriodForKeyRotation = 60 * 60; // 1 hour
32
+
33
+ // KeyToolkit is a utility that keeps various tools for key management (such as key
34
+ // rotation, kms client instantiation, cache control, etc), plus a number of auxiliary
35
+ // classes for internal use.
36
+ class PARQUET_EXPORT KeyToolkit {
37
+ public:
38
+ KeyToolkit() { last_cache_clean_for_key_rotation_time_ = {}; }
39
+
40
+ /// KMS client two level cache: token -> KMSInstanceId -> KmsClient
41
+ TwoLevelCacheWithExpiration<std::shared_ptr<KmsClient>>& kms_client_cache_per_token() {
42
+ return kms_client_cache_;
43
+ }
44
+ /// Key encryption key two level cache for wrapping: token -> MasterEncryptionKeyId ->
45
+ /// KeyEncryptionKey
46
+ TwoLevelCacheWithExpiration<KeyEncryptionKey>& kek_write_cache_per_token() {
47
+ return key_encryption_key_write_cache_;
48
+ }
49
+
50
+ /// Key encryption key two level cache for unwrapping: token -> KeyEncryptionKeyId ->
51
+ /// KeyEncryptionKeyBytes
52
+ TwoLevelCacheWithExpiration<std::string>& kek_read_cache_per_token() {
53
+ return key_encryption_key_read_cache_;
54
+ }
55
+
56
+ std::shared_ptr<KmsClient> GetKmsClient(
57
+ const KmsConnectionConfig& kms_connection_config, double cache_entry_lifetime_ms);
58
+
59
+ /// Flush any caches that are tied to the (compromised) access_token
60
+ void RemoveCacheEntriesForToken(const std::string& access_token);
61
+
62
+ void RemoveCacheEntriesForAllTokens();
63
+
64
+ void RegisterKmsClientFactory(std::shared_ptr<KmsClientFactory> kms_client_factory) {
65
+ if (kms_client_factory_ != NULLPTR) {
66
+ throw ParquetException("KMS client factory has already been registered.");
67
+ }
68
+ kms_client_factory_ = std::move(kms_client_factory);
69
+ }
70
+
71
+ /// Key rotation. In the single wrapping mode, decrypts data keys with old master keys,
72
+ /// then encrypts them with new master keys. In the double wrapping mode, decrypts KEKs
73
+ /// (key encryption keys) with old master keys, generates new KEKs and encrypts them
74
+ /// with new master keys. Works only if key material is not stored internally in file
75
+ /// footers. Not supported in local key wrapping mode. Method can be run by multiple
76
+ /// threads, but each thread must work on different files.
77
+ void RotateMasterKeys(const KmsConnectionConfig& kms_connection_config,
78
+ const std::string& parquet_file_path,
79
+ const std::shared_ptr<::arrow::fs::FileSystem>& file_system,
80
+ bool double_wrapping, double cache_lifetime_seconds);
81
+
82
+ private:
83
+ TwoLevelCacheWithExpiration<std::shared_ptr<KmsClient>> kms_client_cache_;
84
+ TwoLevelCacheWithExpiration<KeyEncryptionKey> key_encryption_key_write_cache_;
85
+ TwoLevelCacheWithExpiration<std::string> key_encryption_key_read_cache_;
86
+ std::shared_ptr<KmsClientFactory> kms_client_factory_;
87
+ mutable ::arrow::util::Mutex last_cache_clean_for_key_rotation_time_mutex_;
88
+ internal::TimePoint last_cache_clean_for_key_rotation_time_;
89
+ };
90
+
91
+ // "data encryption key" and "master key identifier" are paired together as output when
92
+ // parsing from "key material"
93
+ class PARQUET_EXPORT KeyWithMasterId {
94
+ public:
95
+ KeyWithMasterId(std::string key_bytes, std::string master_id)
96
+ : key_bytes_(std::move(key_bytes)), master_id_(std::move(master_id)) {}
97
+
98
+ const std::string& data_key() const { return key_bytes_; }
99
+ const std::string& master_id() const { return master_id_; }
100
+
101
+ private:
102
+ const std::string key_bytes_;
103
+ const std::string master_id_;
104
+ };
105
+
106
+ } // namespace parquet::encryption
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/kms_client.h ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <string>
22
+ #include <unordered_map>
23
+
24
+ #include "arrow/util/mutex.h"
25
+
26
+ #include "parquet/exception.h"
27
+ #include "parquet/platform.h"
28
+
29
+ namespace parquet::encryption {
30
+
31
+ /// This class wraps the key access token of a KMS server. If your token changes over
32
+ /// time, you should keep the reference to the KeyAccessToken object and call Refresh()
33
+ /// method every time you have a new token.
34
+ class PARQUET_EXPORT KeyAccessToken {
35
+ public:
36
+ KeyAccessToken() = default;
37
+
38
+ explicit KeyAccessToken(const std::string value) : value_(value) {}
39
+
40
+ void Refresh(const std::string& new_value) {
41
+ auto lock = mutex_.Lock();
42
+ value_ = new_value;
43
+ }
44
+
45
+ const std::string& value() const {
46
+ auto lock = mutex_.Lock();
47
+ return value_;
48
+ }
49
+
50
+ private:
51
+ std::string value_;
52
+ mutable ::arrow::util::Mutex mutex_;
53
+ };
54
+
55
+ struct PARQUET_EXPORT KmsConnectionConfig {
56
+ std::string kms_instance_id;
57
+ std::string kms_instance_url;
58
+ /// If the access token is changed in the future, you should keep a reference to
59
+ /// this object and call Refresh() on it whenever there is a new access token.
60
+ std::shared_ptr<KeyAccessToken> refreshable_key_access_token;
61
+ std::unordered_map<std::string, std::string> custom_kms_conf;
62
+
63
+ KmsConnectionConfig();
64
+
65
+ const std::string& key_access_token() const {
66
+ if (refreshable_key_access_token == NULLPTR ||
67
+ refreshable_key_access_token->value().empty()) {
68
+ throw ParquetException("key access token is not set!");
69
+ }
70
+ return refreshable_key_access_token->value();
71
+ }
72
+
73
+ void SetDefaultIfEmpty();
74
+ };
75
+
76
+ class PARQUET_EXPORT KmsClient {
77
+ public:
78
+ static constexpr const char kKmsInstanceIdDefault[] = "DEFAULT";
79
+ static constexpr const char kKmsInstanceUrlDefault[] = "DEFAULT";
80
+ static constexpr const char kKeyAccessTokenDefault[] = "DEFAULT";
81
+
82
+ /// Wraps a key - encrypts it with the master key, encodes the result
83
+ /// and potentially adds a KMS-specific metadata.
84
+ virtual std::string WrapKey(const std::string& key_bytes,
85
+ const std::string& master_key_identifier) = 0;
86
+
87
+ /// Decrypts (unwraps) a key with the master key.
88
+ virtual std::string UnwrapKey(const std::string& wrapped_key,
89
+ const std::string& master_key_identifier) = 0;
90
+ virtual ~KmsClient() {}
91
+ };
92
+
93
+ } // namespace parquet::encryption
llmeval-env/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/kms_client_factory.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "parquet/encryption/kms_client.h"
21
+ #include "parquet/platform.h"
22
+
23
+ namespace parquet::encryption {
24
+
25
+ class PARQUET_EXPORT KmsClientFactory {
26
+ public:
27
+ explicit KmsClientFactory(bool wrap_locally = false) : wrap_locally_(wrap_locally) {}
28
+
29
+ virtual ~KmsClientFactory() = default;
30
+
31
+ virtual std::shared_ptr<KmsClient> CreateKmsClient(
32
+ const KmsConnectionConfig& kms_connection_config) = 0;
33
+
34
+ protected:
35
+ bool wrap_locally_;
36
+ };
37
+
38
+ } // namespace parquet::encryption