applied-ai-018 commited on
Commit
6f37534
·
verified ·
1 Parent(s): 287adac

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/benchmark_util.h +47 -0
  2. env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/column_reader.h +501 -0
  3. env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/column_writer.h +307 -0
  4. env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_unwrapper.h +80 -0
  5. env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_material.h +129 -0
  6. env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/two_level_cache_with_expiration.h +157 -0
  7. env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/page_index.h +372 -0
  8. env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/pch.h +28 -0
  9. env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/platform.h +112 -0
  10. env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/printer.h +46 -0
  11. env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/stream_writer.h +243 -0
  12. env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/test_util.h +834 -0
  13. env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/windows_compatibility.h +21 -0
  14. env-llmeval/lib/python3.10/site-packages/pyarrow/interchange/__init__.py +20 -0
  15. env-llmeval/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/dataframe.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/pyarrow/interchange/buffer.py +107 -0
  17. env-llmeval/lib/python3.10/site-packages/pyarrow/interchange/column.py +529 -0
  18. env-llmeval/lib/python3.10/site-packages/pyarrow/interchange/dataframe.py +217 -0
  19. env-llmeval/lib/python3.10/site-packages/pyarrow/interchange/from_dataframe.py +614 -0
  20. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/CMakeLists.txt +18 -0
  21. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/api.h +30 -0
  22. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_pandas.cc +2578 -0
  23. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_pandas.h +146 -0
  24. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_python_internal.h +49 -0
  25. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/async.h +60 -0
  26. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/benchmark.cc +38 -0
  27. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/benchmark.h +36 -0
  28. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/common.cc +203 -0
  29. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/common.h +458 -0
  30. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/csv.h +42 -0
  31. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/datetime.cc +663 -0
  32. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/datetime.h +231 -0
  33. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/decimal.cc +246 -0
  34. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/deserialize.cc +495 -0
  35. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/deserialize.h +106 -0
  36. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/extension_type.cc +217 -0
  37. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/extension_type.h +85 -0
  38. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/filesystem.cc +206 -0
  39. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/flight.cc +388 -0
  40. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/flight.h +350 -0
  41. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/gdb.cc +530 -0
  42. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/helpers.cc +470 -0
  43. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/helpers.h +159 -0
  44. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/inference.cc +745 -0
  45. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/inference.h +64 -0
  46. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/init.cc +24 -0
  47. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/init.h +26 -0
  48. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/io.cc +384 -0
  49. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/io.h +121 -0
  50. env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/ipc.cc +67 -0
env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/benchmark_util.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <random>
21
+ #include <string>
22
+ #include <vector>
23
+
24
+ #include "parquet/types.h"
25
+
26
+ namespace parquet::benchmark {
27
+
28
+ template <typename T>
29
+ void GenerateBenchmarkData(uint32_t size, uint32_t seed, T* data,
30
+ std::vector<uint8_t>* heap, uint32_t data_string_length);
31
+
32
+ #define _GENERATE_BENCHMARK_DATA_DECL(KLASS) \
33
+ template <> \
34
+ void GenerateBenchmarkData(uint32_t size, uint32_t seed, KLASS* data, \
35
+ std::vector<uint8_t>* heap, uint32_t data_string_length);
36
+
37
+ _GENERATE_BENCHMARK_DATA_DECL(int32_t)
38
+ _GENERATE_BENCHMARK_DATA_DECL(int64_t)
39
+ _GENERATE_BENCHMARK_DATA_DECL(float)
40
+ _GENERATE_BENCHMARK_DATA_DECL(double)
41
+ _GENERATE_BENCHMARK_DATA_DECL(ByteArray)
42
+ _GENERATE_BENCHMARK_DATA_DECL(FLBA)
43
+ _GENERATE_BENCHMARK_DATA_DECL(Int96)
44
+
45
+ #undef _GENERATE_BENCHMARK_DATA_DECL
46
+
47
+ } // namespace parquet::benchmark
env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/column_reader.h ADDED
@@ -0,0 +1,501 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <utility>
23
+ #include <vector>
24
+
25
+ #include "parquet/exception.h"
26
+ #include "parquet/level_conversion.h"
27
+ #include "parquet/metadata.h"
28
+ #include "parquet/platform.h"
29
+ #include "parquet/properties.h"
30
+ #include "parquet/schema.h"
31
+ #include "parquet/types.h"
32
+
33
+ namespace arrow {
34
+
35
+ class Array;
36
+ class ChunkedArray;
37
+
38
+ namespace bit_util {
39
+ class BitReader;
40
+ } // namespace bit_util
41
+
42
+ namespace util {
43
+ class RleDecoder;
44
+ } // namespace util
45
+
46
+ } // namespace arrow
47
+
48
+ namespace parquet {
49
+
50
+ class Decryptor;
51
+ class Page;
52
+
53
+ // 16 MB is the default maximum page header size
54
+ static constexpr uint32_t kDefaultMaxPageHeaderSize = 16 * 1024 * 1024;
55
+
56
+ // 16 KB is the default expected page header size
57
+ static constexpr uint32_t kDefaultPageHeaderSize = 16 * 1024;
58
+
59
+ // \brief DataPageStats stores encoded statistics and number of values/rows for
60
+ // a page.
61
+ struct PARQUET_EXPORT DataPageStats {
62
+ DataPageStats(const EncodedStatistics* encoded_statistics, int32_t num_values,
63
+ std::optional<int32_t> num_rows)
64
+ : encoded_statistics(encoded_statistics),
65
+ num_values(num_values),
66
+ num_rows(num_rows) {}
67
+
68
+ // Encoded statistics extracted from the page header.
69
+ // Nullptr if there are no statistics in the page header.
70
+ const EncodedStatistics* encoded_statistics;
71
+ // Number of values stored in the page. Filled for both V1 and V2 data pages.
72
+ // For repeated fields, this can be greater than number of rows. For
73
+ // non-repeated fields, this will be the same as the number of rows.
74
+ int32_t num_values;
75
+ // Number of rows stored in the page. std::nullopt if not available.
76
+ std::optional<int32_t> num_rows;
77
+ };
78
+
79
+ class PARQUET_EXPORT LevelDecoder {
80
+ public:
81
+ LevelDecoder();
82
+ ~LevelDecoder();
83
+
84
+ // Initialize the LevelDecoder state with new data
85
+ // and return the number of bytes consumed
86
+ int SetData(Encoding::type encoding, int16_t max_level, int num_buffered_values,
87
+ const uint8_t* data, int32_t data_size);
88
+
89
+ void SetDataV2(int32_t num_bytes, int16_t max_level, int num_buffered_values,
90
+ const uint8_t* data);
91
+
92
+ // Decodes a batch of levels into an array and returns the number of levels decoded
93
+ int Decode(int batch_size, int16_t* levels);
94
+
95
+ private:
96
+ int bit_width_;
97
+ int num_values_remaining_;
98
+ Encoding::type encoding_;
99
+ std::unique_ptr<::arrow::util::RleDecoder> rle_decoder_;
100
+ std::unique_ptr<::arrow::bit_util::BitReader> bit_packed_decoder_;
101
+ int16_t max_level_;
102
+ };
103
+
104
+ struct CryptoContext {
105
+ CryptoContext(bool start_with_dictionary_page, int16_t rg_ordinal, int16_t col_ordinal,
106
+ std::shared_ptr<Decryptor> meta, std::shared_ptr<Decryptor> data)
107
+ : start_decrypt_with_dictionary_page(start_with_dictionary_page),
108
+ row_group_ordinal(rg_ordinal),
109
+ column_ordinal(col_ordinal),
110
+ meta_decryptor(std::move(meta)),
111
+ data_decryptor(std::move(data)) {}
112
+ CryptoContext() {}
113
+
114
+ bool start_decrypt_with_dictionary_page = false;
115
+ int16_t row_group_ordinal = -1;
116
+ int16_t column_ordinal = -1;
117
+ std::shared_ptr<Decryptor> meta_decryptor;
118
+ std::shared_ptr<Decryptor> data_decryptor;
119
+ };
120
+
121
+ // Abstract page iterator interface. This way, we can feed column pages to the
122
+ // ColumnReader through whatever mechanism we choose
123
+ class PARQUET_EXPORT PageReader {
124
+ using DataPageFilter = std::function<bool(const DataPageStats&)>;
125
+
126
+ public:
127
+ virtual ~PageReader() = default;
128
+
129
+ static std::unique_ptr<PageReader> Open(
130
+ std::shared_ptr<ArrowInputStream> stream, int64_t total_num_values,
131
+ Compression::type codec, bool always_compressed = false,
132
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool(),
133
+ const CryptoContext* ctx = NULLPTR);
134
+ static std::unique_ptr<PageReader> Open(std::shared_ptr<ArrowInputStream> stream,
135
+ int64_t total_num_values,
136
+ Compression::type codec,
137
+ const ReaderProperties& properties,
138
+ bool always_compressed = false,
139
+ const CryptoContext* ctx = NULLPTR);
140
+
141
+ // If data_page_filter is present (not null), NextPage() will call the
142
+ // callback function exactly once per page in the order the pages appear in
143
+ // the column. If the callback function returns true the page will be
144
+ // skipped. The callback will be called only if the page type is DATA_PAGE or
145
+ // DATA_PAGE_V2. Dictionary pages will not be skipped.
146
+ // Caller is responsible for checking that statistics are correct using
147
+ // ApplicationVersion::HasCorrectStatistics().
148
+ // \note API EXPERIMENTAL
149
+ void set_data_page_filter(DataPageFilter data_page_filter) {
150
+ data_page_filter_ = std::move(data_page_filter);
151
+ }
152
+
153
+ // @returns: shared_ptr<Page>(nullptr) on EOS, std::shared_ptr<Page>
154
+ // containing new Page otherwise
155
+ //
156
+ // The returned Page may contain references that aren't guaranteed to live
157
+ // beyond the next call to NextPage().
158
+ virtual std::shared_ptr<Page> NextPage() = 0;
159
+
160
+ virtual void set_max_page_header_size(uint32_t size) = 0;
161
+
162
+ protected:
163
+ // Callback that decides if we should skip a page or not.
164
+ DataPageFilter data_page_filter_;
165
+ };
166
+
167
+ class PARQUET_EXPORT ColumnReader {
168
+ public:
169
+ virtual ~ColumnReader() = default;
170
+
171
+ static std::shared_ptr<ColumnReader> Make(
172
+ const ColumnDescriptor* descr, std::unique_ptr<PageReader> pager,
173
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool());
174
+
175
+ // Returns true if there are still values in this column.
176
+ virtual bool HasNext() = 0;
177
+
178
+ virtual Type::type type() const = 0;
179
+
180
+ virtual const ColumnDescriptor* descr() const = 0;
181
+
182
+ // Get the encoding that can be exposed by this reader. If it returns
183
+ // dictionary encoding, then ReadBatchWithDictionary can be used to read data.
184
+ //
185
+ // \note API EXPERIMENTAL
186
+ virtual ExposedEncoding GetExposedEncoding() = 0;
187
+
188
+ protected:
189
+ friend class RowGroupReader;
190
+ // Set the encoding that can be exposed by this reader.
191
+ //
192
+ // \note API EXPERIMENTAL
193
+ virtual void SetExposedEncoding(ExposedEncoding encoding) = 0;
194
+ };
195
+
196
+ // API to read values from a single column. This is a main client facing API.
197
+ template <typename DType>
198
+ class TypedColumnReader : public ColumnReader {
199
+ public:
200
+ typedef typename DType::c_type T;
201
+
202
+ // Read a batch of repetition levels, definition levels, and values from the
203
+ // column.
204
+ //
205
+ // Since null values are not stored in the values, the number of values read
206
+ // may be less than the number of repetition and definition levels. With
207
+ // nested data this is almost certainly true.
208
+ //
209
+ // Set def_levels or rep_levels to nullptr if you want to skip reading them.
210
+ // This is only safe if you know through some other source that there are no
211
+ // undefined values.
212
+ //
213
+ // To fully exhaust a row group, you must read batches until the number of
214
+ // values read reaches the number of stored values according to the metadata.
215
+ //
216
+ // This API is the same for both V1 and V2 of the DataPage
217
+ //
218
+ // @returns: actual number of levels read (see values_read for number of values read)
219
+ virtual int64_t ReadBatch(int64_t batch_size, int16_t* def_levels, int16_t* rep_levels,
220
+ T* values, int64_t* values_read) = 0;
221
+
222
+ /// Read a batch of repetition levels, definition levels, and values from the
223
+ /// column and leave spaces for null entries on the lowest level in the values
224
+ /// buffer.
225
+ ///
226
+ /// In comparison to ReadBatch the length of repetition and definition levels
227
+ /// is the same as of the number of values read for max_definition_level == 1.
228
+ /// In the case of max_definition_level > 1, the repetition and definition
229
+ /// levels are larger than the values but the values include the null entries
230
+ /// with definition_level == (max_definition_level - 1).
231
+ ///
232
+ /// To fully exhaust a row group, you must read batches until the number of
233
+ /// values read reaches the number of stored values according to the metadata.
234
+ ///
235
+ /// @param batch_size the number of levels to read
236
+ /// @param[out] def_levels The Parquet definition levels, output has
237
+ /// the length levels_read.
238
+ /// @param[out] rep_levels The Parquet repetition levels, output has
239
+ /// the length levels_read.
240
+ /// @param[out] values The values in the lowest nested level including
241
+ /// spacing for nulls on the lowest levels; output has the length
242
+ /// values_read.
243
+ /// @param[out] valid_bits Memory allocated for a bitmap that indicates if
244
+ /// the row is null or on the maximum definition level. For performance
245
+ /// reasons the underlying buffer should be able to store 1 bit more than
246
+ /// required. If this requires an additional byte, this byte is only read
247
+ /// but never written to.
248
+ /// @param valid_bits_offset The offset in bits of the valid_bits where the
249
+ /// first relevant bit resides.
250
+ /// @param[out] levels_read The number of repetition/definition levels that were read.
251
+ /// @param[out] values_read The number of values read, this includes all
252
+ /// non-null entries as well as all null-entries on the lowest level
253
+ /// (i.e. definition_level == max_definition_level - 1)
254
+ /// @param[out] null_count The number of nulls on the lowest levels.
255
+ /// (i.e. (values_read - null_count) is total number of non-null entries)
256
+ ///
257
+ /// \deprecated Since 4.0.0
258
+ ARROW_DEPRECATED("Doesn't handle nesting correctly and unused outside of unit tests.")
259
+ virtual int64_t ReadBatchSpaced(int64_t batch_size, int16_t* def_levels,
260
+ int16_t* rep_levels, T* values, uint8_t* valid_bits,
261
+ int64_t valid_bits_offset, int64_t* levels_read,
262
+ int64_t* values_read, int64_t* null_count) = 0;
263
+
264
+ // Skip reading values. This method will work for both repeated and
265
+ // non-repeated fields. Note that this method is skipping values and not
266
+ // records. This distinction is important for repeated fields, meaning that
267
+ // we are not skipping over the values to the next record. For example,
268
+ // consider the following two consecutive records containing one repeated field:
269
+ // {[1, 2, 3]}, {[4, 5]}. If we Skip(2), our next read value will be 3, which
270
+ // is inside the first record.
271
+ // Returns the number of values skipped.
272
+ virtual int64_t Skip(int64_t num_values_to_skip) = 0;
273
+
274
+ // Read a batch of repetition levels, definition levels, and indices from the
275
+ // column. And read the dictionary if a dictionary page is encountered during
276
+ // reading pages. This API is similar to ReadBatch(), with ability to read
277
+ // dictionary and indices. It is only valid to call this method when the reader can
278
+ // expose dictionary encoding. (i.e., the reader's GetExposedEncoding() returns
279
+ // DICTIONARY).
280
+ //
281
+ // The dictionary is read along with the data page. When there's no data page,
282
+ // the dictionary won't be returned.
283
+ //
284
+ // @param batch_size The batch size to read
285
+ // @param[out] def_levels The Parquet definition levels.
286
+ // @param[out] rep_levels The Parquet repetition levels.
287
+ // @param[out] indices The dictionary indices.
288
+ // @param[out] indices_read The number of indices read.
289
+ // @param[out] dict The pointer to dictionary values. It will return nullptr if
290
+ // there's no data page. Each column chunk only has one dictionary page. The dictionary
291
+ // is owned by the reader, so the caller is responsible for copying the dictionary
292
+ // values before the reader gets destroyed.
293
+ // @param[out] dict_len The dictionary length. It will return 0 if there's no data
294
+ // page.
295
+ // @returns: actual number of levels read (see indices_read for number of
296
+ // indices read
297
+ //
298
+ // \note API EXPERIMENTAL
299
+ virtual int64_t ReadBatchWithDictionary(int64_t batch_size, int16_t* def_levels,
300
+ int16_t* rep_levels, int32_t* indices,
301
+ int64_t* indices_read, const T** dict,
302
+ int32_t* dict_len) = 0;
303
+ };
304
+
305
+ namespace internal {
306
+
307
+ /// \brief Stateful column reader that delimits semantic records for both flat
308
+ /// and nested columns
309
+ ///
310
+ /// \note API EXPERIMENTAL
311
+ /// \since 1.3.0
312
+ class PARQUET_EXPORT RecordReader {
313
+ public:
314
+ /// \brief Creates a record reader.
315
+ /// @param descr Column descriptor
316
+ /// @param leaf_info Level info, used to determine if a column is nullable or not
317
+ /// @param pool Memory pool to use for buffering values and rep/def levels
318
+ /// @param read_dictionary True if reading directly as Arrow dictionary-encoded
319
+ /// @param read_dense_for_nullable True if reading dense and not leaving space for null
320
+ /// values
321
+ static std::shared_ptr<RecordReader> Make(
322
+ const ColumnDescriptor* descr, LevelInfo leaf_info,
323
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool(),
324
+ bool read_dictionary = false, bool read_dense_for_nullable = false);
325
+
326
+ virtual ~RecordReader() = default;
327
+
328
+ /// \brief Attempt to read indicated number of records from column chunk
329
+ /// Note that for repeated fields, a record may have more than one value
330
+ /// and all of them are read. If read_dense_for_nullable() it will
331
+ /// not leave any space for null values. Otherwise, it will read spaced.
332
+ /// \return number of records read
333
+ virtual int64_t ReadRecords(int64_t num_records) = 0;
334
+
335
+ /// \brief Attempt to skip indicated number of records from column chunk.
336
+ /// Note that for repeated fields, a record may have more than one value
337
+ /// and all of them are skipped.
338
+ /// \return number of records skipped
339
+ virtual int64_t SkipRecords(int64_t num_records) = 0;
340
+
341
+ /// \brief Pre-allocate space for data. Results in better flat read performance
342
+ virtual void Reserve(int64_t num_values) = 0;
343
+
344
+ /// \brief Clear consumed values and repetition/definition levels as the
345
+ /// result of calling ReadRecords
346
+ /// For FLBA and ByteArray types, call GetBuilderChunks() to reset them.
347
+ virtual void Reset() = 0;
348
+
349
+ /// \brief Transfer filled values buffer to caller. A new one will be
350
+ /// allocated in subsequent ReadRecords calls
351
+ virtual std::shared_ptr<ResizableBuffer> ReleaseValues() = 0;
352
+
353
+ /// \brief Transfer filled validity bitmap buffer to caller. A new one will
354
+ /// be allocated in subsequent ReadRecords calls
355
+ virtual std::shared_ptr<ResizableBuffer> ReleaseIsValid() = 0;
356
+
357
+ /// \brief Return true if the record reader has more internal data yet to
358
+ /// process
359
+ virtual bool HasMoreData() const = 0;
360
+
361
+ /// \brief Advance record reader to the next row group. Must be set before
362
+ /// any records could be read/skipped.
363
+ /// \param[in] reader obtained from RowGroupReader::GetColumnPageReader
364
+ virtual void SetPageReader(std::unique_ptr<PageReader> reader) = 0;
365
+
366
+ /// \brief Returns the underlying column reader's descriptor.
367
+ virtual const ColumnDescriptor* descr() const = 0;
368
+
369
+ virtual void DebugPrintState() = 0;
370
+
371
+ /// \brief Returns the dictionary owned by the current decoder. Throws an
372
+ /// exception if the current decoder is not for dictionary encoding. The caller is
373
+ /// responsible for casting the returned pointer to proper type depending on the
374
+ /// column's physical type. An example:
375
+ /// const ByteArray* dict = reinterpret_cast<const ByteArray*>(ReadDictionary(&len));
376
+ /// or:
377
+ /// const float* dict = reinterpret_cast<const float*>(ReadDictionary(&len));
378
+ /// \param[out] dictionary_length The number of dictionary entries.
379
+ virtual const void* ReadDictionary(int32_t* dictionary_length) = 0;
380
+
381
+ /// \brief Decoded definition levels
382
+ int16_t* def_levels() const {
383
+ return reinterpret_cast<int16_t*>(def_levels_->mutable_data());
384
+ }
385
+
386
+ /// \brief Decoded repetition levels
387
+ int16_t* rep_levels() const {
388
+ return reinterpret_cast<int16_t*>(rep_levels_->mutable_data());
389
+ }
390
+
391
+ /// \brief Decoded values, including nulls, if any
392
+ /// FLBA and ByteArray types do not use this array and read into their own
393
+ /// builders.
394
+ uint8_t* values() const { return values_->mutable_data(); }
395
+
396
+ /// \brief Number of values written, including space left for nulls if any.
397
+ /// If this Reader was constructed with read_dense_for_nullable(), there is no space for
398
+ /// nulls and null_count() will be 0. There is no read-ahead/buffering for values. For
399
+ /// FLBA and ByteArray types this value reflects the values written with the last
400
+ /// ReadRecords call since those readers will reset the values after each call.
401
+ int64_t values_written() const { return values_written_; }
402
+
403
+ /// \brief Number of definition / repetition levels (from those that have
404
+ /// been decoded) that have been consumed inside the reader.
405
+ int64_t levels_position() const { return levels_position_; }
406
+
407
+ /// \brief Number of definition / repetition levels that have been written
408
+ /// internally in the reader. This may be larger than values_written() because
409
+ /// for repeated fields we need to look at the levels in advance to figure out
410
+ /// the record boundaries.
411
+ int64_t levels_written() const { return levels_written_; }
412
+
413
+ /// \brief Number of nulls in the leaf that we have read so far into the
414
+ /// values vector. This is only valid when !read_dense_for_nullable(). When
415
+ /// read_dense_for_nullable() it will always be 0.
416
+ int64_t null_count() const { return null_count_; }
417
+
418
+ /// \brief True if the leaf values are nullable
419
+ bool nullable_values() const { return nullable_values_; }
420
+
421
+ /// \brief True if reading directly as Arrow dictionary-encoded
422
+ bool read_dictionary() const { return read_dictionary_; }
423
+
424
+ /// \brief True if reading dense for nullable columns.
425
+ bool read_dense_for_nullable() const { return read_dense_for_nullable_; }
426
+
427
+ protected:
428
+ /// \brief Indicates if we can have nullable values. Note that repeated fields
429
+ /// may or may not be nullable.
430
+ bool nullable_values_;
431
+
432
+ bool at_record_start_;
433
+ int64_t records_read_;
434
+
435
+ /// \brief Stores values. These values are populated based on each ReadRecords
436
+ /// call. No extra values are buffered for the next call. SkipRecords will not
437
+ /// add any value to this buffer.
438
+ std::shared_ptr<::arrow::ResizableBuffer> values_;
439
+ /// \brief False for BYTE_ARRAY, in which case we don't allocate the values
440
+ /// buffer and we directly read into builder classes.
441
+ bool uses_values_;
442
+
443
+ /// \brief Values that we have read into 'values_' + 'null_count_'.
444
+ int64_t values_written_;
445
+ int64_t values_capacity_;
446
+ int64_t null_count_;
447
+
448
+ /// \brief Each bit corresponds to one element in 'values_' and specifies if it
449
+ /// is null or not null. Not set if read_dense_for_nullable_ is true.
450
+ std::shared_ptr<::arrow::ResizableBuffer> valid_bits_;
451
+
452
+ /// \brief Buffer for definition levels. May contain more levels than
453
+ /// is actually read. This is because we read levels ahead to
454
+ /// figure out record boundaries for repeated fields.
455
+ /// For flat required fields, 'def_levels_' and 'rep_levels_' are not
456
+ /// populated. For non-repeated fields 'rep_levels_' is not populated.
457
+ /// 'def_levels_' and 'rep_levels_' must be of the same size if present.
458
+ std::shared_ptr<::arrow::ResizableBuffer> def_levels_;
459
+ /// \brief Buffer for repetition levels. Only populated for repeated
460
+ /// fields.
461
+ std::shared_ptr<::arrow::ResizableBuffer> rep_levels_;
462
+
463
+ /// \brief Number of definition / repetition levels that have been written
464
+ /// internally in the reader. This may be larger than values_written() since
465
+ /// for repeated fields we need to look at the levels in advance to figure out
466
+ /// the record boundaries.
467
+ int64_t levels_written_;
468
+ /// \brief Position of the next level that should be consumed.
469
+ int64_t levels_position_;
470
+ int64_t levels_capacity_;
471
+
472
+ bool read_dictionary_ = false;
473
+ // If true, we will not leave any space for the null values in the values_
474
+ // vector.
475
+ bool read_dense_for_nullable_ = false;
476
+ };
477
+
478
+ class BinaryRecordReader : virtual public RecordReader {
479
+ public:
480
+ virtual std::vector<std::shared_ptr<::arrow::Array>> GetBuilderChunks() = 0;
481
+ };
482
+
483
+ /// \brief Read records directly to dictionary-encoded Arrow form (int32
484
+ /// indices). Only valid for BYTE_ARRAY columns
485
+ class DictionaryRecordReader : virtual public RecordReader {
486
+ public:
487
+ virtual std::shared_ptr<::arrow::ChunkedArray> GetResult() = 0;
488
+ };
489
+
490
+ } // namespace internal
491
+
492
+ using BoolReader = TypedColumnReader<BooleanType>;
493
+ using Int32Reader = TypedColumnReader<Int32Type>;
494
+ using Int64Reader = TypedColumnReader<Int64Type>;
495
+ using Int96Reader = TypedColumnReader<Int96Type>;
496
+ using FloatReader = TypedColumnReader<FloatType>;
497
+ using DoubleReader = TypedColumnReader<DoubleType>;
498
+ using ByteArrayReader = TypedColumnReader<ByteArrayType>;
499
+ using FixedLenByteArrayReader = TypedColumnReader<FLBAType>;
500
+
501
+ } // namespace parquet
env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/column_writer.h ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <cstring>
22
+ #include <memory>
23
+
24
+ #include "arrow/util/compression.h"
25
+ #include "parquet/exception.h"
26
+ #include "parquet/platform.h"
27
+ #include "parquet/types.h"
28
+
29
+ namespace arrow {
30
+
31
+ class Array;
32
+
33
+ namespace bit_util {
34
+ class BitWriter;
35
+ } // namespace bit_util
36
+
37
+ namespace util {
38
+ class RleEncoder;
39
+ class CodecOptions;
40
+ } // namespace util
41
+
42
+ } // namespace arrow
43
+
44
+ namespace parquet {
45
+
46
+ struct ArrowWriteContext;
47
+ class ColumnChunkMetaDataBuilder;
48
+ class ColumnDescriptor;
49
+ class ColumnIndexBuilder;
50
+ class DataPage;
51
+ class DictionaryPage;
52
+ class Encryptor;
53
+ class OffsetIndexBuilder;
54
+ class WriterProperties;
55
+
56
+ class PARQUET_EXPORT LevelEncoder {
57
+ public:
58
+ LevelEncoder();
59
+ ~LevelEncoder();
60
+
61
+ static int MaxBufferSize(Encoding::type encoding, int16_t max_level,
62
+ int num_buffered_values);
63
+
64
+ // Initialize the LevelEncoder.
65
+ void Init(Encoding::type encoding, int16_t max_level, int num_buffered_values,
66
+ uint8_t* data, int data_size);
67
+
68
+ // Encodes a batch of levels from an array and returns the number of levels encoded
69
+ int Encode(int batch_size, const int16_t* levels);
70
+
71
+ int32_t len() {
72
+ if (encoding_ != Encoding::RLE) {
73
+ throw ParquetException("Only implemented for RLE encoding");
74
+ }
75
+ return rle_length_;
76
+ }
77
+
78
+ private:
79
+ int bit_width_;
80
+ int rle_length_;
81
+ Encoding::type encoding_;
82
+ std::unique_ptr<::arrow::util::RleEncoder> rle_encoder_;
83
+ std::unique_ptr<::arrow::bit_util::BitWriter> bit_packed_encoder_;
84
+ };
85
+
86
+ class PARQUET_EXPORT PageWriter {
87
+ public:
88
+ virtual ~PageWriter() {}
89
+
90
+ static std::unique_ptr<PageWriter> Open(
91
+ std::shared_ptr<ArrowOutputStream> sink, Compression::type codec,
92
+ ColumnChunkMetaDataBuilder* metadata, int16_t row_group_ordinal = -1,
93
+ int16_t column_chunk_ordinal = -1,
94
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool(),
95
+ bool buffered_row_group = false,
96
+ std::shared_ptr<Encryptor> header_encryptor = NULLPTR,
97
+ std::shared_ptr<Encryptor> data_encryptor = NULLPTR,
98
+ bool page_write_checksum_enabled = false,
99
+ // column_index_builder MUST outlive the PageWriter
100
+ ColumnIndexBuilder* column_index_builder = NULLPTR,
101
+ // offset_index_builder MUST outlive the PageWriter
102
+ OffsetIndexBuilder* offset_index_builder = NULLPTR,
103
+ const CodecOptions& codec_options = CodecOptions{});
104
+
105
+ ARROW_DEPRECATED("Deprecated in 13.0.0. Use CodecOptions-taking overload instead.")
106
+ static std::unique_ptr<PageWriter> Open(
107
+ std::shared_ptr<ArrowOutputStream> sink, Compression::type codec,
108
+ int compression_level, ColumnChunkMetaDataBuilder* metadata,
109
+ int16_t row_group_ordinal = -1, int16_t column_chunk_ordinal = -1,
110
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool(),
111
+ bool buffered_row_group = false,
112
+ std::shared_ptr<Encryptor> header_encryptor = NULLPTR,
113
+ std::shared_ptr<Encryptor> data_encryptor = NULLPTR,
114
+ bool page_write_checksum_enabled = false,
115
+ // column_index_builder MUST outlive the PageWriter
116
+ ColumnIndexBuilder* column_index_builder = NULLPTR,
117
+ // offset_index_builder MUST outlive the PageWriter
118
+ OffsetIndexBuilder* offset_index_builder = NULLPTR);
119
+
120
+ // The Column Writer decides if dictionary encoding is used if set and
121
+ // if the dictionary encoding has fallen back to default encoding on reaching dictionary
122
+ // page limit
123
+ virtual void Close(bool has_dictionary, bool fallback) = 0;
124
+
125
+ // Return the number of uncompressed bytes written (including header size)
126
+ virtual int64_t WriteDataPage(const DataPage& page) = 0;
127
+
128
+ // Return the number of uncompressed bytes written (including header size)
129
+ virtual int64_t WriteDictionaryPage(const DictionaryPage& page) = 0;
130
+
131
+ /// \brief The total number of bytes written as serialized data and
132
+ /// dictionary pages to the sink so far.
133
+ virtual int64_t total_compressed_bytes_written() const = 0;
134
+
135
+ virtual bool has_compressor() = 0;
136
+
137
+ virtual void Compress(const Buffer& src_buffer, ResizableBuffer* dest_buffer) = 0;
138
+ };
139
+
140
+ class PARQUET_EXPORT ColumnWriter {
141
+ public:
142
+ virtual ~ColumnWriter() = default;
143
+
144
+ static std::shared_ptr<ColumnWriter> Make(ColumnChunkMetaDataBuilder*,
145
+ std::unique_ptr<PageWriter>,
146
+ const WriterProperties* properties);
147
+
148
+ /// \brief Closes the ColumnWriter, commits any buffered values to pages.
149
+ /// \return Total size of the column in bytes
150
+ virtual int64_t Close() = 0;
151
+
152
+ /// \brief The physical Parquet type of the column
153
+ virtual Type::type type() const = 0;
154
+
155
+ /// \brief The schema for the column
156
+ virtual const ColumnDescriptor* descr() const = 0;
157
+
158
+ /// \brief The number of rows written so far
159
+ virtual int64_t rows_written() const = 0;
160
+
161
+ /// \brief The total size of the compressed pages + page headers. Values
162
+ /// are still buffered and not written to a pager yet
163
+ ///
164
+ /// So in un-buffered mode, it always returns 0
165
+ virtual int64_t total_compressed_bytes() const = 0;
166
+
167
+ /// \brief The total number of bytes written as serialized data and
168
+ /// dictionary pages to the ColumnChunk so far
169
+ /// These bytes are uncompressed bytes.
170
+ virtual int64_t total_bytes_written() const = 0;
171
+
172
+ /// \brief The total number of bytes written as serialized data and
173
+ /// dictionary pages to the ColumnChunk so far.
174
+ /// If the column is uncompressed, the value would be equal to
175
+ /// total_bytes_written().
176
+ virtual int64_t total_compressed_bytes_written() const = 0;
177
+
178
+ /// \brief Estimated size of the values that are not written to a page yet.
179
+ virtual int64_t estimated_buffered_value_bytes() const = 0;
180
+
181
+ /// \brief The file-level writer properties
182
+ virtual const WriterProperties* properties() = 0;
183
+
184
+ /// \brief Write Apache Arrow columnar data directly to ColumnWriter. Returns
185
+ /// error status if the array data type is not compatible with the concrete
186
+ /// writer type.
187
+ ///
188
+ /// leaf_array is always a primitive (possibly dictionary encoded type).
189
+ /// Leaf_field_nullable indicates whether the leaf array is considered nullable
190
+ /// according to its schema in a Table or its parent array.
191
+ virtual ::arrow::Status WriteArrow(const int16_t* def_levels, const int16_t* rep_levels,
192
+ int64_t num_levels, const ::arrow::Array& leaf_array,
193
+ ArrowWriteContext* ctx,
194
+ bool leaf_field_nullable) = 0;
195
+ };
196
+
197
+ // API to write values to a single column. This is the main client facing API.
198
+ template <typename DType>
199
+ class TypedColumnWriter : public ColumnWriter {
200
+ public:
201
+ using T = typename DType::c_type;
202
+
203
+ // Write a batch of repetition levels, definition levels, and values to the
204
+ // column.
205
+ // `num_values` is the number of logical leaf values.
206
+ // `def_levels` (resp. `rep_levels`) can be null if the column's max definition level
207
+ // (resp. max repetition level) is 0.
208
+ // If not null, each of `def_levels` and `rep_levels` must have at least
209
+ // `num_values`.
210
+ //
211
+ // The number of physical values written (taken from `values`) is returned.
212
+ // It can be smaller than `num_values` is there are some undefined values.
213
+ virtual int64_t WriteBatch(int64_t num_values, const int16_t* def_levels,
214
+ const int16_t* rep_levels, const T* values) = 0;
215
+
216
+ /// Write a batch of repetition levels, definition levels, and values to the
217
+ /// column.
218
+ ///
219
+ /// In comparison to WriteBatch the length of repetition and definition levels
220
+ /// is the same as of the number of values read for max_definition_level == 1.
221
+ /// In the case of max_definition_level > 1, the repetition and definition
222
+ /// levels are larger than the values but the values include the null entries
223
+ /// with definition_level == (max_definition_level - 1). Thus we have to differentiate
224
+ /// in the parameters of this function if the input has the length of num_values or the
225
+ /// _number of rows in the lowest nesting level_.
226
+ ///
227
+ /// In the case that the most inner node in the Parquet is required, the _number of rows
228
+ /// in the lowest nesting level_ is equal to the number of non-null values. If the
229
+ /// inner-most schema node is optional, the _number of rows in the lowest nesting level_
230
+ /// also includes all values with definition_level == (max_definition_level - 1).
231
+ ///
232
+ /// @param num_values number of levels to write.
233
+ /// @param def_levels The Parquet definition levels, length is num_values
234
+ /// @param rep_levels The Parquet repetition levels, length is num_values
235
+ /// @param valid_bits Bitmap that indicates if the row is null on the lowest nesting
236
+ /// level. The length is number of rows in the lowest nesting level.
237
+ /// @param valid_bits_offset The offset in bits of the valid_bits where the
238
+ /// first relevant bit resides.
239
+ /// @param values The values in the lowest nested level including
240
+ /// spacing for nulls on the lowest levels; input has the length
241
+ /// of the number of rows on the lowest nesting level.
242
+ virtual void WriteBatchSpaced(int64_t num_values, const int16_t* def_levels,
243
+ const int16_t* rep_levels, const uint8_t* valid_bits,
244
+ int64_t valid_bits_offset, const T* values) = 0;
245
+ };
246
+
247
+ using BoolWriter = TypedColumnWriter<BooleanType>;
248
+ using Int32Writer = TypedColumnWriter<Int32Type>;
249
+ using Int64Writer = TypedColumnWriter<Int64Type>;
250
+ using Int96Writer = TypedColumnWriter<Int96Type>;
251
+ using FloatWriter = TypedColumnWriter<FloatType>;
252
+ using DoubleWriter = TypedColumnWriter<DoubleType>;
253
+ using ByteArrayWriter = TypedColumnWriter<ByteArrayType>;
254
+ using FixedLenByteArrayWriter = TypedColumnWriter<FLBAType>;
255
+
256
+ namespace internal {
257
+
258
+ /**
259
+ * Timestamp conversion constants
260
+ */
261
+ constexpr int64_t kJulianEpochOffsetDays = INT64_C(2440588);
262
+
263
+ template <int64_t UnitPerDay, int64_t NanosecondsPerUnit>
264
+ inline void ArrowTimestampToImpalaTimestamp(const int64_t time, Int96* impala_timestamp) {
265
+ int64_t julian_days = (time / UnitPerDay) + kJulianEpochOffsetDays;
266
+ (*impala_timestamp).value[2] = (uint32_t)julian_days;
267
+
268
+ int64_t last_day_units = time % UnitPerDay;
269
+ auto last_day_nanos = last_day_units * NanosecondsPerUnit;
270
+ // impala_timestamp will be unaligned every other entry so do memcpy instead
271
+ // of assign and reinterpret cast to avoid undefined behavior.
272
+ std::memcpy(impala_timestamp, &last_day_nanos, sizeof(int64_t));
273
+ }
274
+
275
+ constexpr int64_t kSecondsInNanos = INT64_C(1000000000);
276
+
277
+ inline void SecondsToImpalaTimestamp(const int64_t seconds, Int96* impala_timestamp) {
278
+ ArrowTimestampToImpalaTimestamp<kSecondsPerDay, kSecondsInNanos>(seconds,
279
+ impala_timestamp);
280
+ }
281
+
282
+ constexpr int64_t kMillisecondsInNanos = kSecondsInNanos / INT64_C(1000);
283
+
284
+ inline void MillisecondsToImpalaTimestamp(const int64_t milliseconds,
285
+ Int96* impala_timestamp) {
286
+ ArrowTimestampToImpalaTimestamp<kMillisecondsPerDay, kMillisecondsInNanos>(
287
+ milliseconds, impala_timestamp);
288
+ }
289
+
290
+ constexpr int64_t kMicrosecondsInNanos = kMillisecondsInNanos / INT64_C(1000);
291
+
292
+ inline void MicrosecondsToImpalaTimestamp(const int64_t microseconds,
293
+ Int96* impala_timestamp) {
294
+ ArrowTimestampToImpalaTimestamp<kMicrosecondsPerDay, kMicrosecondsInNanos>(
295
+ microseconds, impala_timestamp);
296
+ }
297
+
298
+ constexpr int64_t kNanosecondsInNanos = INT64_C(1);
299
+
300
+ inline void NanosecondsToImpalaTimestamp(const int64_t nanoseconds,
301
+ Int96* impala_timestamp) {
302
+ ArrowTimestampToImpalaTimestamp<kNanosecondsPerDay, kNanosecondsInNanos>(
303
+ nanoseconds, impala_timestamp);
304
+ }
305
+
306
+ } // namespace internal
307
+ } // namespace parquet
env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/file_key_unwrapper.h ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/util/concurrent_map.h"
21
+
22
+ #include "parquet/encryption/encryption.h"
23
+ #include "parquet/encryption/file_system_key_material_store.h"
24
+ #include "parquet/encryption/key_material.h"
25
+ #include "parquet/encryption/key_toolkit.h"
26
+ #include "parquet/encryption/key_toolkit_internal.h"
27
+ #include "parquet/encryption/kms_client.h"
28
+ #include "parquet/platform.h"
29
+
30
+ namespace parquet::encryption {
31
+
32
+ // This class will retrieve the key from "key metadata", following these steps:
33
+ // 1. Parse "key metadata" (see structure in KeyMetadata class).
34
+ // 2. Retrieve "key material" which can be stored inside or outside "key metadata".
35
+ // 3. Unwrap the "data encryption key" from "key material". There are 2 modes:
36
+ // 3.1. single wrapping: decrypt the wrapped "data encryption key" directly with "master
37
+ // encryption key" 3.2. double wrapping: 2 steps: 3.2.1. "key encryption key" is decrypted
38
+ // with "master encryption key" 3.2.2. "data encryption key" is decrypted with the above
39
+ // "key encryption key"
40
+ class PARQUET_EXPORT FileKeyUnwrapper : public DecryptionKeyRetriever {
41
+ public:
42
+ /// key_toolkit and kms_connection_config is to get KmsClient from cache or create
43
+ /// KmsClient if it's not in the cache yet. cache_entry_lifetime_seconds is life time of
44
+ /// KmsClient in the cache.
45
+ /// If the file uses external key material then the Parquet file path and file
46
+ /// system must be specified.
47
+ FileKeyUnwrapper(KeyToolkit* key_toolkit,
48
+ const KmsConnectionConfig& kms_connection_config,
49
+ double cache_lifetime_seconds, const std::string& file_path = "",
50
+ const std::shared_ptr<::arrow::fs::FileSystem>& file_system = NULLPTR);
51
+
52
+ /// Constructor overload that accepts an existing key_material_store rather than using
53
+ /// the file path and file system to create one when needed. This is useful for key
54
+ /// rotation to allow accessing the key material store after it is used.
55
+ FileKeyUnwrapper(KeyToolkit* key_toolkit,
56
+ const KmsConnectionConfig& kms_connection_config,
57
+ double cache_lifetime_seconds,
58
+ std::shared_ptr<FileKeyMaterialStore> key_material_store);
59
+
60
+ /// Get the data key from key metadata
61
+ std::string GetKey(const std::string& key_metadata) override;
62
+
63
+ /// Get the data key along with the master key id from key material
64
+ KeyWithMasterId GetDataEncryptionKey(const KeyMaterial& key_material);
65
+
66
+ private:
67
+ std::shared_ptr<KmsClient> GetKmsClientFromConfigOrKeyMaterial(
68
+ const KeyMaterial& key_material);
69
+
70
+ /// A map of Key Encryption Key (KEK) ID -> KEK bytes, for the current token
71
+ std::shared_ptr<::arrow::util::ConcurrentMap<std::string, std::string>> kek_per_kek_id_;
72
+ KeyToolkit* key_toolkit_;
73
+ KmsConnectionConfig kms_connection_config_;
74
+ const double cache_entry_lifetime_seconds_;
75
+ std::shared_ptr<FileKeyMaterialStore> key_material_store_;
76
+ const std::string file_path_;
77
+ std::shared_ptr<::arrow::fs::FileSystem> file_system_;
78
+ };
79
+
80
+ } // namespace parquet::encryption
env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/key_material.h ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+
22
+ #include "parquet/platform.h"
23
+
24
+ namespace arrow {
25
+ namespace json {
26
+ namespace internal {
27
+ class ObjectParser;
28
+ } // namespace internal
29
+ } // namespace json
30
+ } // namespace arrow
31
+
32
+ namespace parquet::encryption {
33
+
34
+ // KeyMaterial class represents the "key material", keeping the information that allows
35
+ // readers to recover an encryption key (see description of the KeyMetadata class). The
36
+ // keytools package (PARQUET-1373) implements the "envelope encryption" pattern, in a
37
+ // "single wrapping" or "double wrapping" mode. In the single wrapping mode, the key
38
+ // material is generated by encrypting the "data encryption key" (DEK) by a "master key".
39
+ // In the double wrapping mode, the key material is generated by encrypting the DEK by a
40
+ // "key encryption key" (KEK), that in turn is encrypted by a "master key".
41
+ //
42
+ // Key material is kept in a flat json object, with the following fields:
43
+ // 1. "keyMaterialType" - a String, with the type of key material. In the current
44
+ // version, only one value is allowed - "PKMT1" (stands
45
+ // for "parquet key management tools, version 1"). For external key material storage,
46
+ // this field is written in both "key metadata" and "key material" jsons. For internal
47
+ // key material storage, this field is written only once in the common json.
48
+ // 2. "isFooterKey" - a boolean. If true, means that the material belongs to a file footer
49
+ // key, and keeps additional information (such as
50
+ // KMS instance ID and URL). If false, means that the material belongs to a column
51
+ // key.
52
+ // 3. "kmsInstanceID" - a String, with the KMS Instance ID. Written only in footer key
53
+ // material.
54
+ // 4. "kmsInstanceURL" - a String, with the KMS Instance URL. Written only in footer key
55
+ // material.
56
+ // 5. "masterKeyID" - a String, with the ID of the master key used to generate the
57
+ // material.
58
+ // 6. "wrappedDEK" - a String, with the wrapped DEK (base64 encoding).
59
+ // 7. "doubleWrapping" - a boolean. If true, means that the material was generated in
60
+ // double wrapping mode.
61
+ // If false - in single wrapping mode.
62
+ // 8. "keyEncryptionKeyID" - a String, with the ID of the KEK used to generate the
63
+ // material. Written only in double wrapping mode.
64
+ // 9. "wrappedKEK" - a String, with the wrapped KEK (base64 encoding). Written only in
65
+ // double wrapping mode.
66
+ class PARQUET_EXPORT KeyMaterial {
67
+ public:
68
+ // these fields are defined in a specification and should never be changed
69
+ static constexpr const char kKeyMaterialTypeField[] = "keyMaterialType";
70
+ static constexpr const char kKeyMaterialType1[] = "PKMT1";
71
+
72
+ static constexpr const char kFooterKeyIdInFile[] = "footerKey";
73
+ static constexpr const char kColumnKeyIdInFilePrefix[] = "columnKey";
74
+
75
+ static constexpr const char kIsFooterKeyField[] = "isFooterKey";
76
+ static constexpr const char kDoubleWrappingField[] = "doubleWrapping";
77
+ static constexpr const char kKmsInstanceIdField[] = "kmsInstanceID";
78
+ static constexpr const char kKmsInstanceUrlField[] = "kmsInstanceURL";
79
+ static constexpr const char kMasterKeyIdField[] = "masterKeyID";
80
+ static constexpr const char kWrappedDataEncryptionKeyField[] = "wrappedDEK";
81
+ static constexpr const char kKeyEncryptionKeyIdField[] = "keyEncryptionKeyID";
82
+ static constexpr const char kWrappedKeyEncryptionKeyField[] = "wrappedKEK";
83
+
84
+ public:
85
+ KeyMaterial() = default;
86
+
87
+ static KeyMaterial Parse(const std::string& key_material_string);
88
+
89
+ static KeyMaterial Parse(
90
+ const ::arrow::json::internal::ObjectParser* key_material_json);
91
+
92
+ /// This method returns a json string that will be stored either inside a parquet file
93
+ /// or in a key material store outside the parquet file.
94
+ static std::string SerializeToJson(bool is_footer_key,
95
+ const std::string& kms_instance_id,
96
+ const std::string& kms_instance_url,
97
+ const std::string& master_key_id,
98
+ bool is_double_wrapped, const std::string& kek_id,
99
+ const std::string& encoded_wrapped_kek,
100
+ const std::string& encoded_wrapped_dek,
101
+ bool is_internal_storage);
102
+
103
+ bool is_footer_key() const { return is_footer_key_; }
104
+ bool is_double_wrapped() const { return is_double_wrapped_; }
105
+ const std::string& master_key_id() const { return master_key_id_; }
106
+ const std::string& wrapped_dek() const { return encoded_wrapped_dek_; }
107
+ const std::string& kek_id() const { return kek_id_; }
108
+ const std::string& wrapped_kek() const { return encoded_wrapped_kek_; }
109
+ const std::string& kms_instance_id() const { return kms_instance_id_; }
110
+ const std::string& kms_instance_url() const { return kms_instance_url_; }
111
+
112
+ private:
113
+ KeyMaterial(bool is_footer_key, const std::string& kms_instance_id,
114
+ const std::string& kms_instance_url, const std::string& master_key_id,
115
+ bool is_double_wrapped, const std::string& kek_id,
116
+ const std::string& encoded_wrapped_kek,
117
+ const std::string& encoded_wrapped_dek);
118
+
119
+ bool is_footer_key_;
120
+ std::string kms_instance_id_;
121
+ std::string kms_instance_url_;
122
+ std::string master_key_id_;
123
+ bool is_double_wrapped_;
124
+ std::string kek_id_;
125
+ std::string encoded_wrapped_kek_;
126
+ std::string encoded_wrapped_dek_;
127
+ };
128
+
129
+ } // namespace parquet::encryption
env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/encryption/two_level_cache_with_expiration.h ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <chrono>
21
+ #include <unordered_map>
22
+
23
+ #include "arrow/util/concurrent_map.h"
24
+ #include "arrow/util/mutex.h"
25
+
26
+ namespace parquet::encryption {
27
+
28
+ using ::arrow::util::ConcurrentMap;
29
+
30
+ namespace internal {
31
+
32
+ using TimePoint =
33
+ std::chrono::time_point<std::chrono::system_clock, std::chrono::duration<double>>;
34
+
35
+ inline TimePoint CurrentTimePoint() { return std::chrono::system_clock::now(); }
36
+
37
+ template <typename E>
38
+ class ExpiringCacheEntry {
39
+ public:
40
+ ExpiringCacheEntry() = default;
41
+
42
+ ExpiringCacheEntry(E cached_item, double expiration_interval_seconds)
43
+ : expiration_timestamp_(CurrentTimePoint() +
44
+ std::chrono::duration<double>(expiration_interval_seconds)),
45
+ cached_item_(std::move(cached_item)) {}
46
+
47
+ bool IsExpired() const {
48
+ const auto now = CurrentTimePoint();
49
+ return (now > expiration_timestamp_);
50
+ }
51
+
52
+ E cached_item() { return cached_item_; }
53
+
54
+ private:
55
+ const TimePoint expiration_timestamp_;
56
+ E cached_item_;
57
+ };
58
+
59
+ // This class is to avoid the below warning when compiling KeyToolkit class with VS2015
60
+ // warning C4503: decorated name length exceeded, name was truncated
61
+ template <typename V>
62
+ class ExpiringCacheMapEntry {
63
+ public:
64
+ ExpiringCacheMapEntry() = default;
65
+
66
+ explicit ExpiringCacheMapEntry(
67
+ std::shared_ptr<ConcurrentMap<std::string, V>> cached_item,
68
+ double expiration_interval_seconds)
69
+ : map_cache_(cached_item, expiration_interval_seconds) {}
70
+
71
+ bool IsExpired() { return map_cache_.IsExpired(); }
72
+
73
+ std::shared_ptr<ConcurrentMap<std::string, V>> cached_item() {
74
+ return map_cache_.cached_item();
75
+ }
76
+
77
+ private:
78
+ // ConcurrentMap object may be accessed and modified at many places at the same time,
79
+ // from multiple threads, or even removed from cache.
80
+ ExpiringCacheEntry<std::shared_ptr<ConcurrentMap<std::string, V>>> map_cache_;
81
+ };
82
+
83
+ } // namespace internal
84
+
85
+ // Two-level cache with expiration of internal caches according to token lifetime.
86
+ // External cache is per token, internal is per string key.
87
+ // Wrapper class around:
88
+ // std::unordered_map<std::string,
89
+ // internal::ExpiringCacheEntry<std::unordered_map<std::string, V>>>
90
+ // This cache is safe to be shared between threads.
91
+ template <typename V>
92
+ class TwoLevelCacheWithExpiration {
93
+ public:
94
+ TwoLevelCacheWithExpiration() {
95
+ last_cache_cleanup_timestamp_ = internal::CurrentTimePoint();
96
+ }
97
+
98
+ std::shared_ptr<ConcurrentMap<std::string, V>> GetOrCreateInternalCache(
99
+ const std::string& access_token, double cache_entry_lifetime_seconds) {
100
+ auto lock = mutex_.Lock();
101
+
102
+ auto external_cache_entry = cache_.find(access_token);
103
+ if (external_cache_entry == cache_.end() ||
104
+ external_cache_entry->second.IsExpired()) {
105
+ cache_.insert({access_token, internal::ExpiringCacheMapEntry<V>(
106
+ std::shared_ptr<ConcurrentMap<std::string, V>>(
107
+ new ConcurrentMap<std::string, V>()),
108
+ cache_entry_lifetime_seconds)});
109
+ }
110
+
111
+ return cache_[access_token].cached_item();
112
+ }
113
+
114
+ void CheckCacheForExpiredTokens(double cache_cleanup_period_seconds) {
115
+ auto lock = mutex_.Lock();
116
+
117
+ const auto now = internal::CurrentTimePoint();
118
+ if (now > (last_cache_cleanup_timestamp_ +
119
+ std::chrono::duration<double>(cache_cleanup_period_seconds))) {
120
+ RemoveExpiredEntriesNoMutex();
121
+ last_cache_cleanup_timestamp_ =
122
+ now + std::chrono::duration<double>(cache_cleanup_period_seconds);
123
+ }
124
+ }
125
+
126
+ void RemoveExpiredEntriesFromCache() {
127
+ auto lock = mutex_.Lock();
128
+
129
+ RemoveExpiredEntriesNoMutex();
130
+ }
131
+
132
+ void Remove(const std::string& access_token) {
133
+ auto lock = mutex_.Lock();
134
+ cache_.erase(access_token);
135
+ }
136
+
137
+ void Clear() {
138
+ auto lock = mutex_.Lock();
139
+ cache_.clear();
140
+ }
141
+
142
+ private:
143
+ void RemoveExpiredEntriesNoMutex() {
144
+ for (auto it = cache_.begin(); it != cache_.end();) {
145
+ if (it->second.IsExpired()) {
146
+ it = cache_.erase(it);
147
+ } else {
148
+ ++it;
149
+ }
150
+ }
151
+ }
152
+ std::unordered_map<std::string, internal::ExpiringCacheMapEntry<V>> cache_;
153
+ internal::TimePoint last_cache_cleanup_timestamp_;
154
+ ::arrow::util::Mutex mutex_;
155
+ };
156
+
157
+ } // namespace parquet::encryption
env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/page_index.h ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/io/interfaces.h"
21
+ #include "parquet/encryption/type_fwd.h"
22
+ #include "parquet/types.h"
23
+
24
+ #include <optional>
25
+ #include <vector>
26
+
27
+ namespace parquet {
28
+
29
+ class EncodedStatistics;
30
+ struct PageIndexLocation;
31
+
32
+ /// \brief ColumnIndex is a proxy around format::ColumnIndex.
33
+ class PARQUET_EXPORT ColumnIndex {
34
+ public:
35
+ /// \brief Create a ColumnIndex from a serialized thrift message.
36
+ static std::unique_ptr<ColumnIndex> Make(const ColumnDescriptor& descr,
37
+ const void* serialized_index,
38
+ uint32_t index_len,
39
+ const ReaderProperties& properties,
40
+ Decryptor* decryptor = NULLPTR);
41
+
42
+ virtual ~ColumnIndex() = default;
43
+
44
+ /// \brief A bitmap with a bit set for each data page that has only null values.
45
+ ///
46
+ /// The length of this vector is equal to the number of data pages in the column.
47
+ virtual const std::vector<bool>& null_pages() const = 0;
48
+
49
+ /// \brief A vector of encoded lower bounds for each data page in this column.
50
+ ///
51
+ /// `null_pages` should be inspected first, as only pages with non-null values
52
+ /// may have their lower bounds populated.
53
+ virtual const std::vector<std::string>& encoded_min_values() const = 0;
54
+
55
+ /// \brief A vector of encoded upper bounds for each data page in this column.
56
+ ///
57
+ /// `null_pages` should be inspected first, as only pages with non-null values
58
+ /// may have their upper bounds populated.
59
+ virtual const std::vector<std::string>& encoded_max_values() const = 0;
60
+
61
+ /// \brief The ordering of lower and upper bounds.
62
+ ///
63
+ /// The boundary order applies across all lower bounds, and all upper bounds,
64
+ /// respectively. However, the order between lower bounds and upper bounds
65
+ /// cannot be derived from this.
66
+ virtual BoundaryOrder::type boundary_order() const = 0;
67
+
68
+ /// \brief Whether per-page null count information is available.
69
+ virtual bool has_null_counts() const = 0;
70
+
71
+ /// \brief An optional vector with the number of null values in each data page.
72
+ ///
73
+ /// `has_null_counts` should be called first to determine if this information is
74
+ /// available.
75
+ virtual const std::vector<int64_t>& null_counts() const = 0;
76
+
77
+ /// \brief A vector of page indices for non-null pages.
78
+ virtual const std::vector<int32_t>& non_null_page_indices() const = 0;
79
+ };
80
+
81
+ /// \brief Typed implementation of ColumnIndex.
82
+ template <typename DType>
83
+ class PARQUET_EXPORT TypedColumnIndex : public ColumnIndex {
84
+ public:
85
+ using T = typename DType::c_type;
86
+
87
+ /// \brief A vector of lower bounds for each data page in this column.
88
+ ///
89
+ /// This is like `encoded_min_values`, but with the values decoded according to
90
+ /// the column's physical type.
91
+ /// `min_values` and `max_values` can be used together with `boundary_order`
92
+ /// in order to prune some data pages when searching for specific values.
93
+ virtual const std::vector<T>& min_values() const = 0;
94
+
95
+ /// \brief A vector of upper bounds for each data page in this column.
96
+ ///
97
+ /// Just like `min_values`, but for upper bounds instead of lower bounds.
98
+ virtual const std::vector<T>& max_values() const = 0;
99
+ };
100
+
101
+ using BoolColumnIndex = TypedColumnIndex<BooleanType>;
102
+ using Int32ColumnIndex = TypedColumnIndex<Int32Type>;
103
+ using Int64ColumnIndex = TypedColumnIndex<Int64Type>;
104
+ using FloatColumnIndex = TypedColumnIndex<FloatType>;
105
+ using DoubleColumnIndex = TypedColumnIndex<DoubleType>;
106
+ using ByteArrayColumnIndex = TypedColumnIndex<ByteArrayType>;
107
+ using FLBAColumnIndex = TypedColumnIndex<FLBAType>;
108
+
109
+ /// \brief PageLocation is a proxy around format::PageLocation.
110
+ struct PARQUET_EXPORT PageLocation {
111
+ /// File offset of the data page.
112
+ int64_t offset;
113
+ /// Total compressed size of the data page and header.
114
+ int32_t compressed_page_size;
115
+ /// Row id of the first row in the page within the row group.
116
+ int64_t first_row_index;
117
+ };
118
+
119
+ /// \brief OffsetIndex is a proxy around format::OffsetIndex.
120
+ class PARQUET_EXPORT OffsetIndex {
121
+ public:
122
+ /// \brief Create a OffsetIndex from a serialized thrift message.
123
+ static std::unique_ptr<OffsetIndex> Make(const void* serialized_index,
124
+ uint32_t index_len,
125
+ const ReaderProperties& properties,
126
+ Decryptor* decryptor = NULLPTR);
127
+
128
+ virtual ~OffsetIndex() = default;
129
+
130
+ /// \brief A vector of locations for each data page in this column.
131
+ virtual const std::vector<PageLocation>& page_locations() const = 0;
132
+ };
133
+
134
+ /// \brief Interface for reading the page index for a Parquet row group.
135
+ class PARQUET_EXPORT RowGroupPageIndexReader {
136
+ public:
137
+ virtual ~RowGroupPageIndexReader() = default;
138
+
139
+ /// \brief Read column index of a column chunk.
140
+ ///
141
+ /// \param[in] i column ordinal of the column chunk.
142
+ /// \returns column index of the column or nullptr if it does not exist.
143
+ /// \throws ParquetException if the index is out of bound.
144
+ virtual std::shared_ptr<ColumnIndex> GetColumnIndex(int32_t i) = 0;
145
+
146
+ /// \brief Read offset index of a column chunk.
147
+ ///
148
+ /// \param[in] i column ordinal of the column chunk.
149
+ /// \returns offset index of the column or nullptr if it does not exist.
150
+ /// \throws ParquetException if the index is out of bound.
151
+ virtual std::shared_ptr<OffsetIndex> GetOffsetIndex(int32_t i) = 0;
152
+ };
153
+
154
+ struct PageIndexSelection {
155
+ /// Specifies whether to read the column index.
156
+ bool column_index = false;
157
+ /// Specifies whether to read the offset index.
158
+ bool offset_index = false;
159
+ };
160
+
161
+ PARQUET_EXPORT
162
+ std::ostream& operator<<(std::ostream& out, const PageIndexSelection& params);
163
+
164
+ struct RowGroupIndexReadRange {
165
+ /// Base start and total size of column index of all column chunks in a row group.
166
+ /// If none of the column chunks have column index, it is set to std::nullopt.
167
+ std::optional<::arrow::io::ReadRange> column_index = std::nullopt;
168
+ /// Base start and total size of offset index of all column chunks in a row group.
169
+ /// If none of the column chunks have offset index, it is set to std::nullopt.
170
+ std::optional<::arrow::io::ReadRange> offset_index = std::nullopt;
171
+ };
172
+
173
+ /// \brief Interface for reading the page index for a Parquet file.
174
+ class PARQUET_EXPORT PageIndexReader {
175
+ public:
176
+ virtual ~PageIndexReader() = default;
177
+
178
+ /// \brief Create a PageIndexReader instance.
179
+ /// \returns a PageIndexReader instance.
180
+ /// WARNING: The returned PageIndexReader references to all the input parameters, so
181
+ /// it must not outlive all of the input parameters. Usually these input parameters
182
+ /// come from the same ParquetFileReader object, so it must not outlive the reader
183
+ /// that creates this PageIndexReader.
184
+ static std::shared_ptr<PageIndexReader> Make(
185
+ ::arrow::io::RandomAccessFile* input, std::shared_ptr<FileMetaData> file_metadata,
186
+ const ReaderProperties& properties,
187
+ InternalFileDecryptor* file_decryptor = NULLPTR);
188
+
189
+ /// \brief Get the page index reader of a specific row group.
190
+ /// \param[in] i row group ordinal to get page index reader.
191
+ /// \returns RowGroupPageIndexReader of the specified row group. A nullptr may or may
192
+ /// not be returned if the page index for the row group is unavailable. It is
193
+ /// the caller's responsibility to check the return value of follow-up calls
194
+ /// to the RowGroupPageIndexReader.
195
+ /// \throws ParquetException if the index is out of bound.
196
+ virtual std::shared_ptr<RowGroupPageIndexReader> RowGroup(int i) = 0;
197
+
198
+ /// \brief Advise the reader which part of page index will be read later.
199
+ ///
200
+ /// The PageIndexReader can optionally prefetch and cache page index that
201
+ /// may be read later to get better performance.
202
+ ///
203
+ /// The contract of this function is as below:
204
+ /// 1) If WillNeed() has not been called for a specific row group and the page index
205
+ /// exists, follow-up calls to get column index or offset index of all columns in
206
+ /// this row group SHOULD NOT FAIL, but the performance may not be optimal.
207
+ /// 2) If WillNeed() has been called for a specific row group, follow-up calls to get
208
+ /// page index are limited to columns and index type requested by WillNeed().
209
+ /// So it MAY FAIL if columns that are not requested by WillNeed() are requested.
210
+ /// 3) Later calls to WillNeed() MAY OVERRIDE previous calls of same row groups.
211
+ /// For example,
212
+ /// 1) If WillNeed() is not called for row group 0, then follow-up calls to read
213
+ /// column index and/or offset index of all columns of row group 0 should not
214
+ /// fail if its page index exists.
215
+ /// 2) If WillNeed() is called for columns 0 and 1 for row group 0, then follow-up
216
+ /// call to read page index of column 2 for row group 0 MAY FAIL even if its
217
+ /// page index exists.
218
+ /// 3) If WillNeed() is called for row group 0 with offset index only, then
219
+ /// follow-up call to read column index of row group 0 MAY FAIL even if
220
+ /// the column index of this column exists.
221
+ /// 4) If WillNeed() is called for columns 0 and 1 for row group 0, then later
222
+ /// call to WillNeed() for columns 1 and 2 for row group 0. The later one
223
+ /// overrides previous call and only columns 1 and 2 of row group 0 are allowed
224
+ /// to access.
225
+ ///
226
+ /// \param[in] row_group_indices list of row group ordinal to read page index later.
227
+ /// \param[in] column_indices list of column ordinal to read page index later. If it is
228
+ /// empty, it means all columns in the row group will be read.
229
+ /// \param[in] selection which kind of page index is required later.
230
+ virtual void WillNeed(const std::vector<int32_t>& row_group_indices,
231
+ const std::vector<int32_t>& column_indices,
232
+ const PageIndexSelection& selection) = 0;
233
+
234
+ /// \brief Advise the reader page index of these row groups will not be read anymore.
235
+ ///
236
+ /// The PageIndexReader implementation has the opportunity to cancel any prefetch or
237
+ /// release resource that are related to these row groups.
238
+ ///
239
+ /// \param[in] row_group_indices list of row group ordinal that whose page index will
240
+ /// not be accessed anymore.
241
+ virtual void WillNotNeed(const std::vector<int32_t>& row_group_indices) = 0;
242
+
243
+ /// \brief Determine the column index and offset index ranges for the given row group.
244
+ ///
245
+ /// \param[in] row_group_metadata row group metadata to get column chunk metadata.
246
+ /// \param[in] columns list of column ordinals to get page index. If the list is empty,
247
+ /// it means all columns in the row group.
248
+ /// \returns RowGroupIndexReadRange of the specified row group. Throws ParquetException
249
+ /// if the selected column ordinal is out of bound or metadata of page index
250
+ /// is corrupted.
251
+ static RowGroupIndexReadRange DeterminePageIndexRangesInRowGroup(
252
+ const RowGroupMetaData& row_group_metadata, const std::vector<int32_t>& columns);
253
+ };
254
+
255
+ /// \brief Interface for collecting column index of data pages in a column chunk.
256
+ class PARQUET_EXPORT ColumnIndexBuilder {
257
+ public:
258
+ /// \brief API convenience to create a ColumnIndexBuilder.
259
+ static std::unique_ptr<ColumnIndexBuilder> Make(const ColumnDescriptor* descr);
260
+
261
+ virtual ~ColumnIndexBuilder() = default;
262
+
263
+ /// \brief Add statistics of a data page.
264
+ ///
265
+ /// If the ColumnIndexBuilder has seen any corrupted statistics, it will
266
+ /// not update statistics anymore.
267
+ ///
268
+ /// \param stats Page statistics in the encoded form.
269
+ virtual void AddPage(const EncodedStatistics& stats) = 0;
270
+
271
+ /// \brief Complete the column index.
272
+ ///
273
+ /// Once called, AddPage() can no longer be called.
274
+ /// WriteTo() and Build() can only called after Finish() has been called.
275
+ virtual void Finish() = 0;
276
+
277
+ /// \brief Serialize the column index thrift message.
278
+ ///
279
+ /// If the ColumnIndexBuilder has seen any corrupted statistics, it will
280
+ /// not write any data to the sink.
281
+ ///
282
+ /// \param[out] sink output stream to write the serialized message.
283
+ /// \param[in] encryptor encryptor to encrypt the serialized column index.
284
+ virtual void WriteTo(::arrow::io::OutputStream* sink,
285
+ Encryptor* encryptor = NULLPTR) const = 0;
286
+
287
+ /// \brief Create a ColumnIndex directly.
288
+ ///
289
+ /// \return If the ColumnIndexBuilder has seen any corrupted statistics, it simply
290
+ /// returns nullptr. Otherwise the column index is built and returned.
291
+ virtual std::unique_ptr<ColumnIndex> Build() const = 0;
292
+ };
293
+
294
+ /// \brief Interface for collecting offset index of data pages in a column chunk.
295
+ class PARQUET_EXPORT OffsetIndexBuilder {
296
+ public:
297
+ /// \brief API convenience to create a OffsetIndexBuilder.
298
+ static std::unique_ptr<OffsetIndexBuilder> Make();
299
+
300
+ virtual ~OffsetIndexBuilder() = default;
301
+
302
+ /// \brief Add page location of a data page.
303
+ virtual void AddPage(int64_t offset, int32_t compressed_page_size,
304
+ int64_t first_row_index) = 0;
305
+
306
+ /// \brief Add page location of a data page.
307
+ void AddPage(const PageLocation& page_location) {
308
+ AddPage(page_location.offset, page_location.compressed_page_size,
309
+ page_location.first_row_index);
310
+ }
311
+
312
+ /// \brief Complete the offset index.
313
+ ///
314
+ /// In the buffered row group mode, data pages are flushed into memory
315
+ /// sink and the OffsetIndexBuilder has only collected the relative offset
316
+ /// which requires adjustment once they are flushed to the file.
317
+ ///
318
+ /// \param final_position Final stream offset to add for page offset adjustment.
319
+ virtual void Finish(int64_t final_position) = 0;
320
+
321
+ /// \brief Serialize the offset index thrift message.
322
+ ///
323
+ /// \param[out] sink output stream to write the serialized message.
324
+ /// \param[in] encryptor encryptor to encrypt the serialized offset index.
325
+ virtual void WriteTo(::arrow::io::OutputStream* sink,
326
+ Encryptor* encryptor = NULLPTR) const = 0;
327
+
328
+ /// \brief Create an OffsetIndex directly.
329
+ virtual std::unique_ptr<OffsetIndex> Build() const = 0;
330
+ };
331
+
332
+ /// \brief Interface for collecting page index of a parquet file.
333
+ class PARQUET_EXPORT PageIndexBuilder {
334
+ public:
335
+ /// \brief API convenience to create a PageIndexBuilder.
336
+ static std::unique_ptr<PageIndexBuilder> Make(
337
+ const SchemaDescriptor* schema, InternalFileEncryptor* file_encryptor = NULLPTR);
338
+
339
+ virtual ~PageIndexBuilder() = default;
340
+
341
+ /// \brief Start a new row group.
342
+ virtual void AppendRowGroup() = 0;
343
+
344
+ /// \brief Get the ColumnIndexBuilder from column ordinal.
345
+ ///
346
+ /// \param i Column ordinal.
347
+ /// \return ColumnIndexBuilder for the column and its memory ownership belongs to
348
+ /// the PageIndexBuilder.
349
+ virtual ColumnIndexBuilder* GetColumnIndexBuilder(int32_t i) = 0;
350
+
351
+ /// \brief Get the OffsetIndexBuilder from column ordinal.
352
+ ///
353
+ /// \param i Column ordinal.
354
+ /// \return OffsetIndexBuilder for the column and its memory ownership belongs to
355
+ /// the PageIndexBuilder.
356
+ virtual OffsetIndexBuilder* GetOffsetIndexBuilder(int32_t i) = 0;
357
+
358
+ /// \brief Complete the page index builder and no more write is allowed.
359
+ virtual void Finish() = 0;
360
+
361
+ /// \brief Serialize the page index thrift message.
362
+ ///
363
+ /// Only valid column indexes and offset indexes are serialized and their locations
364
+ /// are set.
365
+ ///
366
+ /// \param[out] sink The output stream to write the page index.
367
+ /// \param[out] location The location of all page index to the start of sink.
368
+ virtual void WriteTo(::arrow::io::OutputStream* sink,
369
+ PageIndexLocation* location) const = 0;
370
+ };
371
+
372
+ } // namespace parquet
env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/pch.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Often-used headers, for precompiling.
19
+ // If updating this header, please make sure you check compilation speed
20
+ // before checking in. Adding headers which are not used extremely often
21
+ // may incur a slowdown, since it makes the precompiled header heavier to load.
22
+
23
+ #include "parquet/encoding.h"
24
+ #include "parquet/exception.h"
25
+ #include "parquet/metadata.h"
26
+ #include "parquet/properties.h"
27
+ #include "parquet/schema.h"
28
+ #include "parquet/types.h"
env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/platform.h ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+
23
+ #include "arrow/buffer.h" // IWYU pragma: export
24
+ #include "arrow/io/interfaces.h" // IWYU pragma: export
25
+ #include "arrow/status.h" // IWYU pragma: export
26
+ #include "arrow/type_fwd.h" // IWYU pragma: export
27
+ #include "arrow/util/macros.h" // IWYU pragma: export
28
+
29
+ #if defined(_WIN32) || defined(__CYGWIN__)
30
+
31
+ #if defined(_MSC_VER)
32
+ #pragma warning(push)
33
+ // Disable warning for STL types usage in DLL interface
34
+ // https://web.archive.org/web/20130317015847/http://connect.microsoft.com/VisualStudio/feedback/details/696593/vc-10-vs-2010-basic-string-exports
35
+ #pragma warning(disable : 4275 4251)
36
+ // Disable diamond inheritance warnings
37
+ #pragma warning(disable : 4250)
38
+ // Disable macro redefinition warnings
39
+ #pragma warning(disable : 4005)
40
+ // Disable extern before exported template warnings
41
+ #pragma warning(disable : 4910)
42
+ #else
43
+ #pragma GCC diagnostic ignored "-Wattributes"
44
+ #endif
45
+
46
+ #ifdef PARQUET_STATIC
47
+ #define PARQUET_EXPORT
48
+ #elif defined(PARQUET_EXPORTING)
49
+ #define PARQUET_EXPORT __declspec(dllexport)
50
+ #else
51
+ #define PARQUET_EXPORT __declspec(dllimport)
52
+ #endif
53
+
54
+ #define PARQUET_NO_EXPORT
55
+
56
+ #else // Not Windows
57
+ #ifndef PARQUET_EXPORT
58
+ #define PARQUET_EXPORT __attribute__((visibility("default")))
59
+ #endif
60
+ #ifndef PARQUET_NO_EXPORT
61
+ #define PARQUET_NO_EXPORT __attribute__((visibility("hidden")))
62
+ #endif
63
+ #endif // Non-Windows
64
+
65
+ // This is a complicated topic, some reading on it:
66
+ // http://www.codesynthesis.com/~boris/blog/2010/01/18/dll-export-cxx-templates/
67
+ #if defined(_MSC_VER) || defined(__clang__)
68
+ #define PARQUET_TEMPLATE_CLASS_EXPORT
69
+ #define PARQUET_TEMPLATE_EXPORT PARQUET_EXPORT
70
+ #else
71
+ #define PARQUET_TEMPLATE_CLASS_EXPORT PARQUET_EXPORT
72
+ #define PARQUET_TEMPLATE_EXPORT
73
+ #endif
74
+
75
+ #define PARQUET_DISALLOW_COPY_AND_ASSIGN ARROW_DISALLOW_COPY_AND_ASSIGN
76
+
77
+ #define PARQUET_NORETURN ARROW_NORETURN
78
+ #define PARQUET_DEPRECATED ARROW_DEPRECATED
79
+
80
+ // If ARROW_VALGRIND set when compiling unit tests, also define
81
+ // PARQUET_VALGRIND
82
+ #ifdef ARROW_VALGRIND
83
+ #define PARQUET_VALGRIND
84
+ #endif
85
+
86
+ namespace parquet {
87
+
88
+ using Buffer = ::arrow::Buffer;
89
+ using Codec = ::arrow::util::Codec;
90
+ using CodecOptions = ::arrow::util::CodecOptions;
91
+ using Compression = ::arrow::Compression;
92
+ using MemoryPool = ::arrow::MemoryPool;
93
+ using MutableBuffer = ::arrow::MutableBuffer;
94
+ using ResizableBuffer = ::arrow::ResizableBuffer;
95
+ using ResizableBuffer = ::arrow::ResizableBuffer;
96
+ using ArrowInputFile = ::arrow::io::RandomAccessFile;
97
+ using ArrowInputStream = ::arrow::io::InputStream;
98
+ using ArrowOutputStream = ::arrow::io::OutputStream;
99
+
100
+ constexpr int64_t kDefaultOutputStreamSize = 1024;
101
+
102
+ constexpr int16_t kNonPageOrdinal = static_cast<int16_t>(-1);
103
+
104
+ PARQUET_EXPORT
105
+ std::shared_ptr<::arrow::io::BufferOutputStream> CreateOutputStream(
106
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool());
107
+
108
+ PARQUET_EXPORT
109
+ std::shared_ptr<ResizableBuffer> AllocateBuffer(
110
+ ::arrow::MemoryPool* pool = ::arrow::default_memory_pool(), int64_t size = 0);
111
+
112
+ } // namespace parquet
env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/printer.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <iosfwd>
21
+ #include <list>
22
+
23
+ #include "parquet/platform.h"
24
+
25
+ namespace parquet {
26
+
27
+ class ParquetFileReader;
28
+
29
+ class PARQUET_EXPORT ParquetFilePrinter {
30
+ private:
31
+ ParquetFileReader* fileReader;
32
+
33
+ public:
34
+ explicit ParquetFilePrinter(ParquetFileReader* reader) : fileReader(reader) {}
35
+ ~ParquetFilePrinter() {}
36
+
37
+ void DebugPrint(std::ostream& stream, std::list<int> selected_columns,
38
+ bool print_values = false, bool format_dump = false,
39
+ bool print_key_value_metadata = false,
40
+ const char* filename = "No Name");
41
+
42
+ void JSONPrint(std::ostream& stream, std::list<int> selected_columns,
43
+ const char* filename = "No Name");
44
+ };
45
+
46
+ } // namespace parquet
env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/stream_writer.h ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <array>
21
+ #include <chrono>
22
+ #include <cstdint>
23
+ #include <memory>
24
+ #include <optional>
25
+ #include <string>
26
+ #include <string_view>
27
+ #include <vector>
28
+
29
+ #include "parquet/column_writer.h"
30
+ #include "parquet/file_writer.h"
31
+
32
+ namespace parquet {
33
+
34
+ /// \brief A class for writing Parquet files using an output stream type API.
35
+ ///
36
+ /// The values given must be of the correct type i.e. the type must
37
+ /// match the file schema exactly otherwise a ParquetException will be
38
+ /// thrown.
39
+ ///
40
+ /// The user must explicitly indicate the end of the row using the
41
+ /// EndRow() function or EndRow output manipulator.
42
+ ///
43
+ /// A maximum row group size can be configured, the default size is
44
+ /// 512MB. Alternatively the row group size can be set to zero and the
45
+ /// user can create new row groups by calling the EndRowGroup()
46
+ /// function or using the EndRowGroup output manipulator.
47
+ ///
48
+ /// Required and optional fields are supported:
49
+ /// - Required fields are written using operator<<(T)
50
+ /// - Optional fields are written using
51
+ /// operator<<(std::optional<T>).
52
+ ///
53
+ /// Note that operator<<(T) can be used to write optional fields.
54
+ ///
55
+ /// Similarly, operator<<(std::optional<T>) can be used to
56
+ /// write required fields. However if the optional parameter does not
57
+ /// have a value (i.e. it is nullopt) then a ParquetException will be
58
+ /// raised.
59
+ ///
60
+ /// Currently there is no support for repeated fields.
61
+ ///
62
+ class PARQUET_EXPORT StreamWriter {
63
+ public:
64
+ template <typename T>
65
+ using optional = ::std::optional<T>;
66
+
67
+ // N.B. Default constructed objects are not usable. This
68
+ // constructor is provided so that the object may be move
69
+ // assigned afterwards.
70
+ StreamWriter() = default;
71
+
72
+ explicit StreamWriter(std::unique_ptr<ParquetFileWriter> writer);
73
+
74
+ ~StreamWriter() = default;
75
+
76
+ static void SetDefaultMaxRowGroupSize(int64_t max_size);
77
+
78
+ void SetMaxRowGroupSize(int64_t max_size);
79
+
80
+ int current_column() const { return column_index_; }
81
+
82
+ int64_t current_row() const { return current_row_; }
83
+
84
+ int num_columns() const;
85
+
86
+ // Moving is possible.
87
+ StreamWriter(StreamWriter&&) = default;
88
+ StreamWriter& operator=(StreamWriter&&) = default;
89
+
90
+ // Copying is not allowed.
91
+ StreamWriter(const StreamWriter&) = delete;
92
+ StreamWriter& operator=(const StreamWriter&) = delete;
93
+
94
+ /// \brief Output operators for required fields.
95
+ /// These can also be used for optional fields when a value must be set.
96
+ StreamWriter& operator<<(bool v);
97
+
98
+ StreamWriter& operator<<(int8_t v);
99
+
100
+ StreamWriter& operator<<(uint8_t v);
101
+
102
+ StreamWriter& operator<<(int16_t v);
103
+
104
+ StreamWriter& operator<<(uint16_t v);
105
+
106
+ StreamWriter& operator<<(int32_t v);
107
+
108
+ StreamWriter& operator<<(uint32_t v);
109
+
110
+ StreamWriter& operator<<(int64_t v);
111
+
112
+ StreamWriter& operator<<(uint64_t v);
113
+
114
+ StreamWriter& operator<<(const std::chrono::milliseconds& v);
115
+
116
+ StreamWriter& operator<<(const std::chrono::microseconds& v);
117
+
118
+ StreamWriter& operator<<(float v);
119
+
120
+ StreamWriter& operator<<(double v);
121
+
122
+ StreamWriter& operator<<(char v);
123
+
124
+ /// \brief Helper class to write fixed length strings.
125
+ /// This is useful as the standard string view (such as
126
+ /// std::string_view) is for variable length data.
127
+ struct PARQUET_EXPORT FixedStringView {
128
+ FixedStringView() = default;
129
+
130
+ explicit FixedStringView(const char* data_ptr);
131
+
132
+ FixedStringView(const char* data_ptr, std::size_t data_len);
133
+
134
+ const char* data{NULLPTR};
135
+ std::size_t size{0};
136
+ };
137
+
138
+ /// \brief Output operators for fixed length strings.
139
+ template <int N>
140
+ StreamWriter& operator<<(const char (&v)[N]) {
141
+ return WriteFixedLength(v, N);
142
+ }
143
+ template <std::size_t N>
144
+ StreamWriter& operator<<(const std::array<char, N>& v) {
145
+ return WriteFixedLength(v.data(), N);
146
+ }
147
+ StreamWriter& operator<<(FixedStringView v);
148
+
149
+ /// \brief Output operators for variable length strings.
150
+ StreamWriter& operator<<(const char* v);
151
+ StreamWriter& operator<<(const std::string& v);
152
+ StreamWriter& operator<<(::std::string_view v);
153
+
154
+ /// \brief Output operator for optional fields.
155
+ template <typename T>
156
+ StreamWriter& operator<<(const optional<T>& v) {
157
+ if (v) {
158
+ return operator<<(*v);
159
+ }
160
+ SkipOptionalColumn();
161
+ return *this;
162
+ }
163
+
164
+ /// \brief Skip the next N columns of optional data. If there are
165
+ /// less than N columns remaining then the excess columns are
166
+ /// ignored.
167
+ /// \throws ParquetException if there is an attempt to skip any
168
+ /// required column.
169
+ /// \return Number of columns actually skipped.
170
+ int64_t SkipColumns(int num_columns_to_skip);
171
+
172
+ /// \brief Terminate the current row and advance to next one.
173
+ /// \throws ParquetException if all columns in the row were not
174
+ /// written or skipped.
175
+ void EndRow();
176
+
177
+ /// \brief Terminate the current row group and create new one.
178
+ void EndRowGroup();
179
+
180
+ protected:
181
+ template <typename WriterType, typename T>
182
+ StreamWriter& Write(const T v) {
183
+ auto writer = static_cast<WriterType*>(row_group_writer_->column(column_index_++));
184
+
185
+ writer->WriteBatch(kBatchSizeOne, &kDefLevelOne, &kRepLevelZero, &v);
186
+
187
+ if (max_row_group_size_ > 0) {
188
+ row_group_size_ += writer->estimated_buffered_value_bytes();
189
+ }
190
+ return *this;
191
+ }
192
+
193
+ StreamWriter& WriteVariableLength(const char* data_ptr, std::size_t data_len);
194
+
195
+ StreamWriter& WriteFixedLength(const char* data_ptr, std::size_t data_len);
196
+
197
+ void CheckColumn(Type::type physical_type, ConvertedType::type converted_type,
198
+ int length = -1);
199
+
200
+ /// \brief Skip the next column which must be optional.
201
+ /// \throws ParquetException if the next column does not exist or is
202
+ /// not optional.
203
+ void SkipOptionalColumn();
204
+
205
+ void WriteNullValue(ColumnWriter* writer);
206
+
207
+ private:
208
+ using node_ptr_type = std::shared_ptr<schema::PrimitiveNode>;
209
+
210
+ struct null_deleter {
211
+ void operator()(void*) {}
212
+ };
213
+
214
+ int32_t column_index_{0};
215
+ int64_t current_row_{0};
216
+ int64_t row_group_size_{0};
217
+ int64_t max_row_group_size_{default_row_group_size_};
218
+
219
+ std::unique_ptr<ParquetFileWriter> file_writer_;
220
+ std::unique_ptr<RowGroupWriter, null_deleter> row_group_writer_;
221
+ std::vector<node_ptr_type> nodes_;
222
+
223
+ static constexpr int16_t kDefLevelZero = 0;
224
+ static constexpr int16_t kDefLevelOne = 1;
225
+ static constexpr int16_t kRepLevelZero = 0;
226
+ static constexpr int64_t kBatchSizeOne = 1;
227
+
228
+ static int64_t default_row_group_size_;
229
+ };
230
+
231
+ struct PARQUET_EXPORT EndRowType {};
232
+ constexpr EndRowType EndRow = {};
233
+
234
+ struct PARQUET_EXPORT EndRowGroupType {};
235
+ constexpr EndRowGroupType EndRowGroup = {};
236
+
237
+ PARQUET_EXPORT
238
+ StreamWriter& operator<<(StreamWriter&, EndRowType);
239
+
240
+ PARQUET_EXPORT
241
+ StreamWriter& operator<<(StreamWriter&, EndRowGroupType);
242
+
243
+ } // namespace parquet
env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/test_util.h ADDED
@@ -0,0 +1,834 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This module defines an abstract interface for iterating through pages in a
19
+ // Parquet column chunk within a row group. It could be extended in the future
20
+ // to iterate through all data pages in all chunks in a file.
21
+
22
+ #pragma once
23
+
24
+ #include <algorithm>
25
+ #include <limits>
26
+ #include <memory>
27
+ #include <random>
28
+ #include <string>
29
+ #include <utility>
30
+ #include <vector>
31
+
32
+ #include <gtest/gtest.h>
33
+
34
+ #include "arrow/io/memory.h"
35
+ #include "arrow/testing/util.h"
36
+ #include "arrow/util/float16.h"
37
+
38
+ #include "parquet/column_page.h"
39
+ #include "parquet/column_reader.h"
40
+ #include "parquet/column_writer.h"
41
+ #include "parquet/encoding.h"
42
+ #include "parquet/platform.h"
43
+
44
+ // https://github.com/google/googletest/pull/2904 might not be available
45
+ // in our version of gtest/gmock
46
+ #define EXPECT_THROW_THAT(callable, ex_type, property) \
47
+ EXPECT_THROW( \
48
+ try { (callable)(); } catch (const ex_type& err) { \
49
+ EXPECT_THAT(err, (property)); \
50
+ throw; \
51
+ }, \
52
+ ex_type)
53
+
54
+ namespace parquet {
55
+
56
+ static constexpr int FLBA_LENGTH = 12;
57
+
58
+ inline bool operator==(const FixedLenByteArray& a, const FixedLenByteArray& b) {
59
+ return 0 == memcmp(a.ptr, b.ptr, FLBA_LENGTH);
60
+ }
61
+
62
+ namespace test {
63
+
64
+ typedef ::testing::Types<BooleanType, Int32Type, Int64Type, Int96Type, FloatType,
65
+ DoubleType, ByteArrayType, FLBAType>
66
+ ParquetTypes;
67
+
68
+ class ParquetTestException : public parquet::ParquetException {
69
+ using ParquetException::ParquetException;
70
+ };
71
+
72
+ const char* get_data_dir();
73
+ std::string get_bad_data_dir();
74
+
75
+ std::string get_data_file(const std::string& filename, bool is_good = true);
76
+
77
+ template <typename T>
78
+ static inline void assert_vector_equal(const std::vector<T>& left,
79
+ const std::vector<T>& right) {
80
+ ASSERT_EQ(left.size(), right.size());
81
+
82
+ for (size_t i = 0; i < left.size(); ++i) {
83
+ ASSERT_EQ(left[i], right[i]) << i;
84
+ }
85
+ }
86
+
87
+ template <typename T>
88
+ static inline bool vector_equal(const std::vector<T>& left, const std::vector<T>& right) {
89
+ if (left.size() != right.size()) {
90
+ return false;
91
+ }
92
+
93
+ for (size_t i = 0; i < left.size(); ++i) {
94
+ if (left[i] != right[i]) {
95
+ std::cerr << "index " << i << " left was " << left[i] << " right was " << right[i]
96
+ << std::endl;
97
+ return false;
98
+ }
99
+ }
100
+
101
+ return true;
102
+ }
103
+
104
+ template <typename T>
105
+ static std::vector<T> slice(const std::vector<T>& values, int start, int end) {
106
+ if (end < start) {
107
+ return std::vector<T>(0);
108
+ }
109
+
110
+ std::vector<T> out(end - start);
111
+ for (int i = start; i < end; ++i) {
112
+ out[i - start] = values[i];
113
+ }
114
+ return out;
115
+ }
116
+
117
+ void random_bytes(int n, uint32_t seed, std::vector<uint8_t>* out);
118
+ void random_bools(int n, double p, uint32_t seed, bool* out);
119
+
120
+ template <typename T>
121
+ inline void random_numbers(int n, uint32_t seed, T min_value, T max_value, T* out) {
122
+ std::default_random_engine gen(seed);
123
+ std::uniform_int_distribution<T> d(min_value, max_value);
124
+ for (int i = 0; i < n; ++i) {
125
+ out[i] = d(gen);
126
+ }
127
+ }
128
+
129
+ template <>
130
+ inline void random_numbers(int n, uint32_t seed, float min_value, float max_value,
131
+ float* out) {
132
+ std::default_random_engine gen(seed);
133
+ std::uniform_real_distribution<float> d(min_value, max_value);
134
+ for (int i = 0; i < n; ++i) {
135
+ out[i] = d(gen);
136
+ }
137
+ }
138
+
139
+ template <>
140
+ inline void random_numbers(int n, uint32_t seed, double min_value, double max_value,
141
+ double* out) {
142
+ std::default_random_engine gen(seed);
143
+ std::uniform_real_distribution<double> d(min_value, max_value);
144
+ for (int i = 0; i < n; ++i) {
145
+ out[i] = d(gen);
146
+ }
147
+ }
148
+
149
+ void random_Int96_numbers(int n, uint32_t seed, int32_t min_value, int32_t max_value,
150
+ Int96* out);
151
+
152
+ void random_float16_numbers(int n, uint32_t seed, ::arrow::util::Float16 min_value,
153
+ ::arrow::util::Float16 max_value, uint16_t* out);
154
+
155
+ void random_fixed_byte_array(int n, uint32_t seed, uint8_t* buf, int len, FLBA* out);
156
+
157
+ void random_byte_array(int n, uint32_t seed, uint8_t* buf, ByteArray* out, int min_size,
158
+ int max_size);
159
+
160
+ void random_byte_array(int n, uint32_t seed, uint8_t* buf, ByteArray* out, int max_size);
161
+
162
+ void prefixed_random_byte_array(int n, uint32_t seed, uint8_t* buf, ByteArray* out,
163
+ int min_size, int max_size, double prefixed_probability);
164
+
165
+ void prefixed_random_byte_array(int n, uint32_t seed, uint8_t* buf, int len, FLBA* out,
166
+ double prefixed_probability);
167
+
168
+ template <typename Type, typename Sequence>
169
+ std::shared_ptr<Buffer> EncodeValues(Encoding::type encoding, bool use_dictionary,
170
+ const Sequence& values, int length,
171
+ const ColumnDescriptor* descr) {
172
+ auto encoder = MakeTypedEncoder<Type>(encoding, use_dictionary, descr);
173
+ encoder->Put(values, length);
174
+ return encoder->FlushValues();
175
+ }
176
+
177
+ template <typename T>
178
+ static void InitValues(int num_values, uint32_t seed, std::vector<T>& values,
179
+ std::vector<uint8_t>& buffer) {
180
+ random_numbers(num_values, seed, std::numeric_limits<T>::min(),
181
+ std::numeric_limits<T>::max(), values.data());
182
+ }
183
+
184
+ template <typename T>
185
+ static void InitValues(int num_values, std::vector<T>& values,
186
+ std::vector<uint8_t>& buffer) {
187
+ InitValues(num_values, 0, values, buffer);
188
+ }
189
+
190
+ template <typename T>
191
+ static void InitDictValues(int num_values, int num_dicts, std::vector<T>& values,
192
+ std::vector<uint8_t>& buffer) {
193
+ int repeat_factor = num_values / num_dicts;
194
+ InitValues<T>(num_dicts, values, buffer);
195
+ // add some repeated values
196
+ for (int j = 1; j < repeat_factor; ++j) {
197
+ for (int i = 0; i < num_dicts; ++i) {
198
+ std::memcpy(&values[num_dicts * j + i], &values[i], sizeof(T));
199
+ }
200
+ }
201
+ // computed only dict_per_page * repeat_factor - 1 values < num_values
202
+ // compute remaining
203
+ for (int i = num_dicts * repeat_factor; i < num_values; ++i) {
204
+ std::memcpy(&values[i], &values[i - num_dicts * repeat_factor], sizeof(T));
205
+ }
206
+ }
207
+
208
+ template <>
209
+ inline void InitDictValues<bool>(int num_values, int num_dicts, std::vector<bool>& values,
210
+ std::vector<uint8_t>& buffer) {
211
+ // No op for bool
212
+ }
213
+
214
+ class MockPageReader : public PageReader {
215
+ public:
216
+ explicit MockPageReader(const std::vector<std::shared_ptr<Page>>& pages)
217
+ : pages_(pages), page_index_(0) {}
218
+
219
+ std::shared_ptr<Page> NextPage() override {
220
+ if (page_index_ == static_cast<int>(pages_.size())) {
221
+ // EOS to consumer
222
+ return std::shared_ptr<Page>(nullptr);
223
+ }
224
+ return pages_[page_index_++];
225
+ }
226
+
227
+ // No-op
228
+ void set_max_page_header_size(uint32_t size) override {}
229
+
230
+ private:
231
+ std::vector<std::shared_ptr<Page>> pages_;
232
+ int page_index_;
233
+ };
234
+
235
+ // TODO(wesm): this is only used for testing for now. Refactor to form part of
236
+ // primary file write path
237
+ template <typename Type>
238
+ class DataPageBuilder {
239
+ public:
240
+ using c_type = typename Type::c_type;
241
+
242
+ // This class writes data and metadata to the passed inputs
243
+ explicit DataPageBuilder(ArrowOutputStream* sink)
244
+ : sink_(sink),
245
+ num_values_(0),
246
+ encoding_(Encoding::PLAIN),
247
+ definition_level_encoding_(Encoding::RLE),
248
+ repetition_level_encoding_(Encoding::RLE),
249
+ have_def_levels_(false),
250
+ have_rep_levels_(false),
251
+ have_values_(false) {}
252
+
253
+ void AppendDefLevels(const std::vector<int16_t>& levels, int16_t max_level,
254
+ Encoding::type encoding = Encoding::RLE) {
255
+ AppendLevels(levels, max_level, encoding);
256
+
257
+ num_values_ = std::max(static_cast<int32_t>(levels.size()), num_values_);
258
+ definition_level_encoding_ = encoding;
259
+ have_def_levels_ = true;
260
+ }
261
+
262
+ void AppendRepLevels(const std::vector<int16_t>& levels, int16_t max_level,
263
+ Encoding::type encoding = Encoding::RLE) {
264
+ AppendLevels(levels, max_level, encoding);
265
+
266
+ num_values_ = std::max(static_cast<int32_t>(levels.size()), num_values_);
267
+ repetition_level_encoding_ = encoding;
268
+ have_rep_levels_ = true;
269
+ }
270
+
271
+ void AppendValues(const ColumnDescriptor* d, const std::vector<c_type>& values,
272
+ Encoding::type encoding = Encoding::PLAIN) {
273
+ std::shared_ptr<Buffer> values_sink = EncodeValues<Type>(
274
+ encoding, false, values.data(), static_cast<int>(values.size()), d);
275
+ PARQUET_THROW_NOT_OK(sink_->Write(values_sink->data(), values_sink->size()));
276
+
277
+ num_values_ = std::max(static_cast<int32_t>(values.size()), num_values_);
278
+ encoding_ = encoding;
279
+ have_values_ = true;
280
+ }
281
+
282
+ int32_t num_values() const { return num_values_; }
283
+
284
+ Encoding::type encoding() const { return encoding_; }
285
+
286
+ Encoding::type rep_level_encoding() const { return repetition_level_encoding_; }
287
+
288
+ Encoding::type def_level_encoding() const { return definition_level_encoding_; }
289
+
290
+ private:
291
+ ArrowOutputStream* sink_;
292
+
293
+ int32_t num_values_;
294
+ Encoding::type encoding_;
295
+ Encoding::type definition_level_encoding_;
296
+ Encoding::type repetition_level_encoding_;
297
+
298
+ bool have_def_levels_;
299
+ bool have_rep_levels_;
300
+ bool have_values_;
301
+
302
+ // Used internally for both repetition and definition levels
303
+ void AppendLevels(const std::vector<int16_t>& levels, int16_t max_level,
304
+ Encoding::type encoding) {
305
+ if (encoding != Encoding::RLE) {
306
+ ParquetException::NYI("only rle encoding currently implemented");
307
+ }
308
+
309
+ std::vector<uint8_t> encode_buffer(LevelEncoder::MaxBufferSize(
310
+ Encoding::RLE, max_level, static_cast<int>(levels.size())));
311
+
312
+ // We encode into separate memory from the output stream because the
313
+ // RLE-encoded bytes have to be preceded in the stream by their absolute
314
+ // size.
315
+ LevelEncoder encoder;
316
+ encoder.Init(encoding, max_level, static_cast<int>(levels.size()),
317
+ encode_buffer.data(), static_cast<int>(encode_buffer.size()));
318
+
319
+ encoder.Encode(static_cast<int>(levels.size()), levels.data());
320
+
321
+ int32_t rle_bytes = encoder.len();
322
+ PARQUET_THROW_NOT_OK(
323
+ sink_->Write(reinterpret_cast<const uint8_t*>(&rle_bytes), sizeof(int32_t)));
324
+ PARQUET_THROW_NOT_OK(sink_->Write(encode_buffer.data(), rle_bytes));
325
+ }
326
+ };
327
+
328
+ template <>
329
+ inline void DataPageBuilder<BooleanType>::AppendValues(const ColumnDescriptor* d,
330
+ const std::vector<bool>& values,
331
+ Encoding::type encoding) {
332
+ if (encoding != Encoding::PLAIN) {
333
+ ParquetException::NYI("only plain encoding currently implemented");
334
+ }
335
+
336
+ auto encoder = MakeTypedEncoder<BooleanType>(Encoding::PLAIN, false, d);
337
+ dynamic_cast<BooleanEncoder*>(encoder.get())
338
+ ->Put(values, static_cast<int>(values.size()));
339
+ std::shared_ptr<Buffer> buffer = encoder->FlushValues();
340
+ PARQUET_THROW_NOT_OK(sink_->Write(buffer->data(), buffer->size()));
341
+
342
+ num_values_ = std::max(static_cast<int32_t>(values.size()), num_values_);
343
+ encoding_ = encoding;
344
+ have_values_ = true;
345
+ }
346
+
347
+ template <typename Type>
348
+ static std::shared_ptr<DataPageV1> MakeDataPage(
349
+ const ColumnDescriptor* d, const std::vector<typename Type::c_type>& values,
350
+ int num_vals, Encoding::type encoding, const uint8_t* indices, int indices_size,
351
+ const std::vector<int16_t>& def_levels, int16_t max_def_level,
352
+ const std::vector<int16_t>& rep_levels, int16_t max_rep_level) {
353
+ int num_values = 0;
354
+
355
+ auto page_stream = CreateOutputStream();
356
+ test::DataPageBuilder<Type> page_builder(page_stream.get());
357
+
358
+ if (!rep_levels.empty()) {
359
+ page_builder.AppendRepLevels(rep_levels, max_rep_level);
360
+ }
361
+ if (!def_levels.empty()) {
362
+ page_builder.AppendDefLevels(def_levels, max_def_level);
363
+ }
364
+
365
+ if (encoding == Encoding::PLAIN) {
366
+ page_builder.AppendValues(d, values, encoding);
367
+ num_values = std::max(page_builder.num_values(), num_vals);
368
+ } else { // DICTIONARY PAGES
369
+ PARQUET_THROW_NOT_OK(page_stream->Write(indices, indices_size));
370
+ num_values = std::max(page_builder.num_values(), num_vals);
371
+ }
372
+
373
+ PARQUET_ASSIGN_OR_THROW(auto buffer, page_stream->Finish());
374
+
375
+ return std::make_shared<DataPageV1>(buffer, num_values, encoding,
376
+ page_builder.def_level_encoding(),
377
+ page_builder.rep_level_encoding(), buffer->size());
378
+ }
379
+
380
+ template <typename TYPE>
381
+ class DictionaryPageBuilder {
382
+ public:
383
+ typedef typename TYPE::c_type TC;
384
+ static constexpr int TN = TYPE::type_num;
385
+ using SpecializedEncoder = typename EncodingTraits<TYPE>::Encoder;
386
+
387
+ // This class writes data and metadata to the passed inputs
388
+ explicit DictionaryPageBuilder(const ColumnDescriptor* d)
389
+ : num_dict_values_(0), have_values_(false) {
390
+ auto encoder = MakeTypedEncoder<TYPE>(Encoding::PLAIN, true, d);
391
+ dict_traits_ = dynamic_cast<DictEncoder<TYPE>*>(encoder.get());
392
+ encoder_.reset(dynamic_cast<SpecializedEncoder*>(encoder.release()));
393
+ }
394
+
395
+ ~DictionaryPageBuilder() {}
396
+
397
+ std::shared_ptr<Buffer> AppendValues(const std::vector<TC>& values) {
398
+ int num_values = static_cast<int>(values.size());
399
+ // Dictionary encoding
400
+ encoder_->Put(values.data(), num_values);
401
+ num_dict_values_ = dict_traits_->num_entries();
402
+ have_values_ = true;
403
+ return encoder_->FlushValues();
404
+ }
405
+
406
+ std::shared_ptr<Buffer> WriteDict() {
407
+ std::shared_ptr<Buffer> dict_buffer =
408
+ AllocateBuffer(::arrow::default_memory_pool(), dict_traits_->dict_encoded_size());
409
+ dict_traits_->WriteDict(dict_buffer->mutable_data());
410
+ return dict_buffer;
411
+ }
412
+
413
+ int32_t num_values() const { return num_dict_values_; }
414
+
415
+ private:
416
+ DictEncoder<TYPE>* dict_traits_;
417
+ std::unique_ptr<SpecializedEncoder> encoder_;
418
+ int32_t num_dict_values_;
419
+ bool have_values_;
420
+ };
421
+
422
+ template <>
423
+ inline DictionaryPageBuilder<BooleanType>::DictionaryPageBuilder(
424
+ const ColumnDescriptor* d) {
425
+ ParquetException::NYI("only plain encoding currently implemented for boolean");
426
+ }
427
+
428
+ template <>
429
+ inline std::shared_ptr<Buffer> DictionaryPageBuilder<BooleanType>::WriteDict() {
430
+ ParquetException::NYI("only plain encoding currently implemented for boolean");
431
+ return nullptr;
432
+ }
433
+
434
+ template <>
435
+ inline std::shared_ptr<Buffer> DictionaryPageBuilder<BooleanType>::AppendValues(
436
+ const std::vector<TC>& values) {
437
+ ParquetException::NYI("only plain encoding currently implemented for boolean");
438
+ return nullptr;
439
+ }
440
+
441
+ template <typename Type>
442
+ inline static std::shared_ptr<DictionaryPage> MakeDictPage(
443
+ const ColumnDescriptor* d, const std::vector<typename Type::c_type>& values,
444
+ const std::vector<int>& values_per_page, Encoding::type encoding,
445
+ std::vector<std::shared_ptr<Buffer>>& rle_indices) {
446
+ test::DictionaryPageBuilder<Type> page_builder(d);
447
+ int num_pages = static_cast<int>(values_per_page.size());
448
+ int value_start = 0;
449
+
450
+ for (int i = 0; i < num_pages; i++) {
451
+ rle_indices.push_back(page_builder.AppendValues(
452
+ slice(values, value_start, value_start + values_per_page[i])));
453
+ value_start += values_per_page[i];
454
+ }
455
+
456
+ auto buffer = page_builder.WriteDict();
457
+
458
+ return std::make_shared<DictionaryPage>(buffer, page_builder.num_values(),
459
+ Encoding::PLAIN);
460
+ }
461
+
462
+ // Given def/rep levels and values create multiple dict pages
463
+ template <typename Type>
464
+ inline static void PaginateDict(const ColumnDescriptor* d,
465
+ const std::vector<typename Type::c_type>& values,
466
+ const std::vector<int16_t>& def_levels,
467
+ int16_t max_def_level,
468
+ const std::vector<int16_t>& rep_levels,
469
+ int16_t max_rep_level, int num_levels_per_page,
470
+ const std::vector<int>& values_per_page,
471
+ std::vector<std::shared_ptr<Page>>& pages,
472
+ Encoding::type encoding = Encoding::RLE_DICTIONARY) {
473
+ int num_pages = static_cast<int>(values_per_page.size());
474
+ std::vector<std::shared_ptr<Buffer>> rle_indices;
475
+ std::shared_ptr<DictionaryPage> dict_page =
476
+ MakeDictPage<Type>(d, values, values_per_page, encoding, rle_indices);
477
+ pages.push_back(dict_page);
478
+ int def_level_start = 0;
479
+ int def_level_end = 0;
480
+ int rep_level_start = 0;
481
+ int rep_level_end = 0;
482
+ for (int i = 0; i < num_pages; i++) {
483
+ if (max_def_level > 0) {
484
+ def_level_start = i * num_levels_per_page;
485
+ def_level_end = (i + 1) * num_levels_per_page;
486
+ }
487
+ if (max_rep_level > 0) {
488
+ rep_level_start = i * num_levels_per_page;
489
+ rep_level_end = (i + 1) * num_levels_per_page;
490
+ }
491
+ std::shared_ptr<DataPageV1> data_page = MakeDataPage<Int32Type>(
492
+ d, {}, values_per_page[i], encoding, rle_indices[i]->data(),
493
+ static_cast<int>(rle_indices[i]->size()),
494
+ slice(def_levels, def_level_start, def_level_end), max_def_level,
495
+ slice(rep_levels, rep_level_start, rep_level_end), max_rep_level);
496
+ pages.push_back(data_page);
497
+ }
498
+ }
499
+
500
+ // Given def/rep levels and values create multiple plain pages
501
+ template <typename Type>
502
+ static inline void PaginatePlain(const ColumnDescriptor* d,
503
+ const std::vector<typename Type::c_type>& values,
504
+ const std::vector<int16_t>& def_levels,
505
+ int16_t max_def_level,
506
+ const std::vector<int16_t>& rep_levels,
507
+ int16_t max_rep_level, int num_levels_per_page,
508
+ const std::vector<int>& values_per_page,
509
+ std::vector<std::shared_ptr<Page>>& pages,
510
+ Encoding::type encoding = Encoding::PLAIN) {
511
+ int num_pages = static_cast<int>(values_per_page.size());
512
+ int def_level_start = 0;
513
+ int def_level_end = 0;
514
+ int rep_level_start = 0;
515
+ int rep_level_end = 0;
516
+ int value_start = 0;
517
+ for (int i = 0; i < num_pages; i++) {
518
+ if (max_def_level > 0) {
519
+ def_level_start = i * num_levels_per_page;
520
+ def_level_end = (i + 1) * num_levels_per_page;
521
+ }
522
+ if (max_rep_level > 0) {
523
+ rep_level_start = i * num_levels_per_page;
524
+ rep_level_end = (i + 1) * num_levels_per_page;
525
+ }
526
+ std::shared_ptr<DataPage> page = MakeDataPage<Type>(
527
+ d, slice(values, value_start, value_start + values_per_page[i]),
528
+ values_per_page[i], encoding, nullptr, 0,
529
+ slice(def_levels, def_level_start, def_level_end), max_def_level,
530
+ slice(rep_levels, rep_level_start, rep_level_end), max_rep_level);
531
+ pages.push_back(page);
532
+ value_start += values_per_page[i];
533
+ }
534
+ }
535
+
536
+ // Generates pages from randomly generated data
537
+ template <typename Type>
538
+ static inline int MakePages(const ColumnDescriptor* d, int num_pages, int levels_per_page,
539
+ std::vector<int16_t>& def_levels,
540
+ std::vector<int16_t>& rep_levels,
541
+ std::vector<typename Type::c_type>& values,
542
+ std::vector<uint8_t>& buffer,
543
+ std::vector<std::shared_ptr<Page>>& pages,
544
+ Encoding::type encoding = Encoding::PLAIN,
545
+ uint32_t seed = 0) {
546
+ int num_levels = levels_per_page * num_pages;
547
+ int num_values = 0;
548
+ int16_t zero = 0;
549
+ int16_t max_def_level = d->max_definition_level();
550
+ int16_t max_rep_level = d->max_repetition_level();
551
+ std::vector<int> values_per_page(num_pages, levels_per_page);
552
+ // Create definition levels
553
+ if (max_def_level > 0 && num_levels != 0) {
554
+ def_levels.resize(num_levels);
555
+ random_numbers(num_levels, seed, zero, max_def_level, def_levels.data());
556
+ for (int p = 0; p < num_pages; p++) {
557
+ int num_values_per_page = 0;
558
+ for (int i = 0; i < levels_per_page; i++) {
559
+ if (def_levels[i + p * levels_per_page] == max_def_level) {
560
+ num_values_per_page++;
561
+ num_values++;
562
+ }
563
+ }
564
+ values_per_page[p] = num_values_per_page;
565
+ }
566
+ } else {
567
+ num_values = num_levels;
568
+ }
569
+ // Create repetition levels
570
+ if (max_rep_level > 0 && num_levels != 0) {
571
+ rep_levels.resize(num_levels);
572
+ // Using a different seed so that def_levels and rep_levels are different.
573
+ random_numbers(num_levels, seed + 789, zero, max_rep_level, rep_levels.data());
574
+ // The generated levels are random. Force the very first page to start with a new
575
+ // record.
576
+ rep_levels[0] = 0;
577
+ // For a null value, rep_levels and def_levels are both 0.
578
+ // If we have a repeated value right after this, it needs to start with
579
+ // rep_level = 0 to indicate a new record.
580
+ for (int i = 0; i < num_levels - 1; ++i) {
581
+ if (rep_levels[i] == 0 && def_levels[i] == 0) {
582
+ rep_levels[i + 1] = 0;
583
+ }
584
+ }
585
+ }
586
+ // Create values
587
+ values.resize(num_values);
588
+ if (encoding == Encoding::PLAIN) {
589
+ InitValues<typename Type::c_type>(num_values, values, buffer);
590
+ PaginatePlain<Type>(d, values, def_levels, max_def_level, rep_levels, max_rep_level,
591
+ levels_per_page, values_per_page, pages);
592
+ } else if (encoding == Encoding::RLE_DICTIONARY ||
593
+ encoding == Encoding::PLAIN_DICTIONARY) {
594
+ // Calls InitValues and repeats the data
595
+ InitDictValues<typename Type::c_type>(num_values, levels_per_page, values, buffer);
596
+ PaginateDict<Type>(d, values, def_levels, max_def_level, rep_levels, max_rep_level,
597
+ levels_per_page, values_per_page, pages);
598
+ }
599
+
600
+ return num_values;
601
+ }
602
+
603
+ // ----------------------------------------------------------------------
604
+ // Test data generation
605
+
606
+ template <>
607
+ void inline InitValues<bool>(int num_values, uint32_t seed, std::vector<bool>& values,
608
+ std::vector<uint8_t>& buffer) {
609
+ values = {};
610
+ if (seed == 0) {
611
+ seed = static_cast<uint32_t>(::arrow::random_seed());
612
+ }
613
+ ::arrow::random_is_valid(num_values, 0.5, &values, static_cast<int>(seed));
614
+ }
615
+
616
+ template <>
617
+ inline void InitValues<ByteArray>(int num_values, uint32_t seed,
618
+ std::vector<ByteArray>& values,
619
+ std::vector<uint8_t>& buffer) {
620
+ int max_byte_array_len = 12;
621
+ int num_bytes = static_cast<int>(max_byte_array_len + sizeof(uint32_t));
622
+ size_t nbytes = num_values * num_bytes;
623
+ buffer.resize(nbytes);
624
+ random_byte_array(num_values, seed, buffer.data(), values.data(), max_byte_array_len);
625
+ }
626
+
627
+ inline void InitWideByteArrayValues(int num_values, std::vector<ByteArray>& values,
628
+ std::vector<uint8_t>& buffer, int min_len,
629
+ int max_len) {
630
+ int num_bytes = static_cast<int>(max_len + sizeof(uint32_t));
631
+ size_t nbytes = num_values * num_bytes;
632
+ buffer.resize(nbytes);
633
+ random_byte_array(num_values, 0, buffer.data(), values.data(), min_len, max_len);
634
+ }
635
+
636
+ template <>
637
+ inline void InitValues<FLBA>(int num_values, uint32_t seed, std::vector<FLBA>& values,
638
+ std::vector<uint8_t>& buffer) {
639
+ size_t nbytes = num_values * FLBA_LENGTH;
640
+ buffer.resize(nbytes);
641
+ random_fixed_byte_array(num_values, seed, buffer.data(), FLBA_LENGTH, values.data());
642
+ }
643
+
644
+ template <>
645
+ inline void InitValues<Int96>(int num_values, uint32_t seed, std::vector<Int96>& values,
646
+ std::vector<uint8_t>& buffer) {
647
+ random_Int96_numbers(num_values, seed, std::numeric_limits<int32_t>::min(),
648
+ std::numeric_limits<int32_t>::max(), values.data());
649
+ }
650
+
651
+ inline std::string TestColumnName(int i) {
652
+ std::stringstream col_name;
653
+ col_name << "column_" << i;
654
+ return col_name.str();
655
+ }
656
+
657
+ // This class lives here because of its dependency on the InitValues specializations.
658
+ template <typename TestType>
659
+ class PrimitiveTypedTest : public ::testing::Test {
660
+ public:
661
+ using c_type = typename TestType::c_type;
662
+
663
+ void SetUpSchema(Repetition::type repetition, int num_columns = 1) {
664
+ std::vector<schema::NodePtr> fields;
665
+
666
+ for (int i = 0; i < num_columns; ++i) {
667
+ std::string name = TestColumnName(i);
668
+ fields.push_back(schema::PrimitiveNode::Make(name, repetition, TestType::type_num,
669
+ ConvertedType::NONE, FLBA_LENGTH));
670
+ }
671
+ node_ = schema::GroupNode::Make("schema", Repetition::REQUIRED, fields);
672
+ schema_.Init(node_);
673
+ }
674
+
675
+ void GenerateData(int64_t num_values, uint32_t seed = 0);
676
+ void SetupValuesOut(int64_t num_values);
677
+ void SyncValuesOut();
678
+
679
+ protected:
680
+ schema::NodePtr node_;
681
+ SchemaDescriptor schema_;
682
+
683
+ // Input buffers
684
+ std::vector<c_type> values_;
685
+
686
+ std::vector<int16_t> def_levels_;
687
+
688
+ std::vector<uint8_t> buffer_;
689
+ // Pointer to the values, needed as we cannot use std::vector<bool>::data()
690
+ c_type* values_ptr_;
691
+ std::vector<uint8_t> bool_buffer_;
692
+
693
+ // Output buffers
694
+ std::vector<c_type> values_out_;
695
+ std::vector<uint8_t> bool_buffer_out_;
696
+ c_type* values_out_ptr_;
697
+ };
698
+
699
+ template <typename TestType>
700
+ inline void PrimitiveTypedTest<TestType>::SyncValuesOut() {}
701
+
702
+ template <>
703
+ inline void PrimitiveTypedTest<BooleanType>::SyncValuesOut() {
704
+ std::vector<uint8_t>::const_iterator source_iterator = bool_buffer_out_.begin();
705
+ std::vector<c_type>::iterator destination_iterator = values_out_.begin();
706
+ while (source_iterator != bool_buffer_out_.end()) {
707
+ *destination_iterator++ = *source_iterator++ != 0;
708
+ }
709
+ }
710
+
711
+ template <typename TestType>
712
+ inline void PrimitiveTypedTest<TestType>::SetupValuesOut(int64_t num_values) {
713
+ values_out_.clear();
714
+ values_out_.resize(num_values);
715
+ values_out_ptr_ = values_out_.data();
716
+ }
717
+
718
+ template <>
719
+ inline void PrimitiveTypedTest<BooleanType>::SetupValuesOut(int64_t num_values) {
720
+ values_out_.clear();
721
+ values_out_.resize(num_values);
722
+
723
+ bool_buffer_out_.clear();
724
+ bool_buffer_out_.resize(num_values);
725
+ // Write once to all values so we can copy it without getting Valgrind errors
726
+ // about uninitialised values.
727
+ std::fill(bool_buffer_out_.begin(), bool_buffer_out_.end(), true);
728
+ values_out_ptr_ = reinterpret_cast<bool*>(bool_buffer_out_.data());
729
+ }
730
+
731
+ template <typename TestType>
732
+ inline void PrimitiveTypedTest<TestType>::GenerateData(int64_t num_values,
733
+ uint32_t seed) {
734
+ def_levels_.resize(num_values);
735
+ values_.resize(num_values);
736
+
737
+ InitValues<c_type>(static_cast<int>(num_values), seed, values_, buffer_);
738
+ values_ptr_ = values_.data();
739
+
740
+ std::fill(def_levels_.begin(), def_levels_.end(), 1);
741
+ }
742
+
743
+ template <>
744
+ inline void PrimitiveTypedTest<BooleanType>::GenerateData(int64_t num_values,
745
+ uint32_t seed) {
746
+ def_levels_.resize(num_values);
747
+ values_.resize(num_values);
748
+
749
+ InitValues<c_type>(static_cast<int>(num_values), seed, values_, buffer_);
750
+ bool_buffer_.resize(num_values);
751
+ std::copy(values_.begin(), values_.end(), bool_buffer_.begin());
752
+ values_ptr_ = reinterpret_cast<bool*>(bool_buffer_.data());
753
+
754
+ std::fill(def_levels_.begin(), def_levels_.end(), 1);
755
+ }
756
+
757
+ // ----------------------------------------------------------------------
758
+ // test data generation
759
+
760
+ template <typename T>
761
+ inline void GenerateData(int num_values, T* out, std::vector<uint8_t>* heap) {
762
+ // seed the prng so failure is deterministic
763
+ random_numbers(num_values, 0, std::numeric_limits<T>::min(),
764
+ std::numeric_limits<T>::max(), out);
765
+ }
766
+
767
+ template <typename T>
768
+ inline void GenerateBoundData(int num_values, T* out, T min, T max,
769
+ std::vector<uint8_t>* heap) {
770
+ // seed the prng so failure is deterministic
771
+ random_numbers(num_values, 0, min, max, out);
772
+ }
773
+
774
+ template <>
775
+ inline void GenerateData<bool>(int num_values, bool* out, std::vector<uint8_t>* heap) {
776
+ // seed the prng so failure is deterministic
777
+ random_bools(num_values, 0.5, 0, out);
778
+ }
779
+
780
+ template <>
781
+ inline void GenerateData<Int96>(int num_values, Int96* out, std::vector<uint8_t>* heap) {
782
+ // seed the prng so failure is deterministic
783
+ random_Int96_numbers(num_values, 0, std::numeric_limits<int32_t>::min(),
784
+ std::numeric_limits<int32_t>::max(), out);
785
+ }
786
+
787
+ template <>
788
+ inline void GenerateData<ByteArray>(int num_values, ByteArray* out,
789
+ std::vector<uint8_t>* heap) {
790
+ int max_byte_array_len = 12;
791
+ heap->resize(num_values * max_byte_array_len);
792
+ // seed the prng so failure is deterministic
793
+ random_byte_array(num_values, 0, heap->data(), out, 2, max_byte_array_len);
794
+ }
795
+
796
+ // Generate ByteArray or FLBA data where there is a given probability
797
+ // for each value to share a common prefix with its predecessor.
798
+ // This is useful to exercise prefix-based encodings such as DELTA_BYTE_ARRAY.
799
+ template <typename T>
800
+ inline void GeneratePrefixedData(int num_values, T* out, std::vector<uint8_t>* heap,
801
+ double prefixed_probability);
802
+
803
+ template <>
804
+ inline void GeneratePrefixedData(int num_values, ByteArray* out,
805
+ std::vector<uint8_t>* heap,
806
+ double prefixed_probability) {
807
+ int max_byte_array_len = 12;
808
+ heap->resize(num_values * max_byte_array_len);
809
+ // seed the prng so failure is deterministic
810
+ prefixed_random_byte_array(num_values, /*seed=*/0, heap->data(), out, /*min_size=*/2,
811
+ /*max_size=*/max_byte_array_len, prefixed_probability);
812
+ }
813
+
814
+ static constexpr int kGenerateDataFLBALength = 8;
815
+
816
+ template <>
817
+ inline void GeneratePrefixedData<FLBA>(int num_values, FLBA* out,
818
+ std::vector<uint8_t>* heap,
819
+ double prefixed_probability) {
820
+ heap->resize(num_values * kGenerateDataFLBALength);
821
+ // seed the prng so failure is deterministic
822
+ prefixed_random_byte_array(num_values, /*seed=*/0, heap->data(),
823
+ kGenerateDataFLBALength, out, prefixed_probability);
824
+ }
825
+
826
+ template <>
827
+ inline void GenerateData<FLBA>(int num_values, FLBA* out, std::vector<uint8_t>* heap) {
828
+ heap->resize(num_values * kGenerateDataFLBALength);
829
+ // seed the prng so failure is deterministic
830
+ random_fixed_byte_array(num_values, 0, heap->data(), kGenerateDataFLBALength, out);
831
+ }
832
+
833
+ } // namespace test
834
+ } // namespace parquet
env-llmeval/lib/python3.10/site-packages/pyarrow/include/parquet/windows_compatibility.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/util/windows_compatibility.h"
21
+ #include "parquet/windows_fixup.h"
env-llmeval/lib/python3.10/site-packages/pyarrow/interchange/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ # flake8: noqa
19
+
20
+ from .from_dataframe import from_dataframe
env-llmeval/lib/python3.10/site-packages/pyarrow/interchange/__pycache__/dataframe.cpython-310.pyc ADDED
Binary file (7.44 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pyarrow/interchange/buffer.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ from __future__ import annotations
19
+ import enum
20
+
21
+ import pyarrow as pa
22
+
23
+
24
+ class DlpackDeviceType(enum.IntEnum):
25
+ """Integer enum for device type codes matching DLPack."""
26
+
27
+ CPU = 1
28
+ CUDA = 2
29
+ CPU_PINNED = 3
30
+ OPENCL = 4
31
+ VULKAN = 7
32
+ METAL = 8
33
+ VPI = 9
34
+ ROCM = 10
35
+
36
+
37
+ class _PyArrowBuffer:
38
+ """
39
+ Data in the buffer is guaranteed to be contiguous in memory.
40
+
41
+ Note that there is no dtype attribute present, a buffer can be thought of
42
+ as simply a block of memory. However, if the column that the buffer is
43
+ attached to has a dtype that's supported by DLPack and ``__dlpack__`` is
44
+ implemented, then that dtype information will be contained in the return
45
+ value from ``__dlpack__``.
46
+
47
+ This distinction is useful to support both data exchange via DLPack on a
48
+ buffer and (b) dtypes like variable-length strings which do not have a
49
+ fixed number of bytes per element.
50
+ """
51
+
52
+ def __init__(self, x: pa.Buffer, allow_copy: bool = True) -> None:
53
+ """
54
+ Handle PyArrow Buffers.
55
+ """
56
+ self._x = x
57
+
58
+ @property
59
+ def bufsize(self) -> int:
60
+ """
61
+ Buffer size in bytes.
62
+ """
63
+ return self._x.size
64
+
65
+ @property
66
+ def ptr(self) -> int:
67
+ """
68
+ Pointer to start of the buffer as an integer.
69
+ """
70
+ return self._x.address
71
+
72
+ def __dlpack__(self):
73
+ """
74
+ Produce DLPack capsule (see array API standard).
75
+
76
+ Raises:
77
+ - TypeError : if the buffer contains unsupported dtypes.
78
+ - NotImplementedError : if DLPack support is not implemented
79
+
80
+ Useful to have to connect to array libraries. Support optional because
81
+ it's not completely trivial to implement for a Python-only library.
82
+ """
83
+ raise NotImplementedError("__dlpack__")
84
+
85
+ def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:
86
+ """
87
+ Device type and device ID for where the data in the buffer resides.
88
+ Uses device type codes matching DLPack.
89
+ Note: must be implemented even if ``__dlpack__`` is not.
90
+ """
91
+ if self._x.is_cpu:
92
+ return (DlpackDeviceType.CPU, None)
93
+ else:
94
+ raise NotImplementedError("__dlpack_device__")
95
+
96
+ def __repr__(self) -> str:
97
+ return (
98
+ "PyArrowBuffer(" +
99
+ str(
100
+ {
101
+ "bufsize": self.bufsize,
102
+ "ptr": self.ptr,
103
+ "device": self.__dlpack_device__()[0].name,
104
+ }
105
+ ) +
106
+ ")"
107
+ )
env-llmeval/lib/python3.10/site-packages/pyarrow/interchange/column.py ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ from __future__ import annotations
19
+
20
+ import enum
21
+ from typing import (
22
+ Any,
23
+ Dict,
24
+ Iterable,
25
+ Optional,
26
+ Tuple,
27
+ )
28
+
29
+ import sys
30
+ if sys.version_info >= (3, 8):
31
+ from typing import TypedDict
32
+ else:
33
+ from typing_extensions import TypedDict
34
+
35
+ import pyarrow as pa
36
+ import pyarrow.compute as pc
37
+ from pyarrow.interchange.buffer import _PyArrowBuffer
38
+
39
+
40
+ class DtypeKind(enum.IntEnum):
41
+ """
42
+ Integer enum for data types.
43
+
44
+ Attributes
45
+ ----------
46
+ INT : int
47
+ Matches to signed integer data type.
48
+ UINT : int
49
+ Matches to unsigned integer data type.
50
+ FLOAT : int
51
+ Matches to floating point data type.
52
+ BOOL : int
53
+ Matches to boolean data type.
54
+ STRING : int
55
+ Matches to string data type (UTF-8 encoded).
56
+ DATETIME : int
57
+ Matches to datetime data type.
58
+ CATEGORICAL : int
59
+ Matches to categorical data type.
60
+ """
61
+
62
+ INT = 0
63
+ UINT = 1
64
+ FLOAT = 2
65
+ BOOL = 20
66
+ STRING = 21 # UTF-8
67
+ DATETIME = 22
68
+ CATEGORICAL = 23
69
+
70
+
71
+ Dtype = Tuple[DtypeKind, int, str, str] # see Column.dtype
72
+
73
+
74
+ _PYARROW_KINDS = {
75
+ pa.int8(): (DtypeKind.INT, "c"),
76
+ pa.int16(): (DtypeKind.INT, "s"),
77
+ pa.int32(): (DtypeKind.INT, "i"),
78
+ pa.int64(): (DtypeKind.INT, "l"),
79
+ pa.uint8(): (DtypeKind.UINT, "C"),
80
+ pa.uint16(): (DtypeKind.UINT, "S"),
81
+ pa.uint32(): (DtypeKind.UINT, "I"),
82
+ pa.uint64(): (DtypeKind.UINT, "L"),
83
+ pa.float16(): (DtypeKind.FLOAT, "e"),
84
+ pa.float32(): (DtypeKind.FLOAT, "f"),
85
+ pa.float64(): (DtypeKind.FLOAT, "g"),
86
+ pa.bool_(): (DtypeKind.BOOL, "b"),
87
+ pa.string(): (DtypeKind.STRING, "u"),
88
+ pa.large_string(): (DtypeKind.STRING, "U"),
89
+ }
90
+
91
+
92
+ class ColumnNullType(enum.IntEnum):
93
+ """
94
+ Integer enum for null type representation.
95
+
96
+ Attributes
97
+ ----------
98
+ NON_NULLABLE : int
99
+ Non-nullable column.
100
+ USE_NAN : int
101
+ Use explicit float NaN value.
102
+ USE_SENTINEL : int
103
+ Sentinel value besides NaN.
104
+ USE_BITMASK : int
105
+ The bit is set/unset representing a null on a certain position.
106
+ USE_BYTEMASK : int
107
+ The byte is set/unset representing a null on a certain position.
108
+ """
109
+
110
+ NON_NULLABLE = 0
111
+ USE_NAN = 1
112
+ USE_SENTINEL = 2
113
+ USE_BITMASK = 3
114
+ USE_BYTEMASK = 4
115
+
116
+
117
+ class ColumnBuffers(TypedDict):
118
+ # first element is a buffer containing the column data;
119
+ # second element is the data buffer's associated dtype
120
+ data: Tuple[_PyArrowBuffer, Dtype]
121
+
122
+ # first element is a buffer containing mask values indicating missing data;
123
+ # second element is the mask value buffer's associated dtype.
124
+ # None if the null representation is not a bit or byte mask
125
+ validity: Optional[Tuple[_PyArrowBuffer, Dtype]]
126
+
127
+ # first element is a buffer containing the offset values for
128
+ # variable-size binary data (e.g., variable-length strings);
129
+ # second element is the offsets buffer's associated dtype.
130
+ # None if the data buffer does not have an associated offsets buffer
131
+ offsets: Optional[Tuple[_PyArrowBuffer, Dtype]]
132
+
133
+
134
+ class CategoricalDescription(TypedDict):
135
+ # whether the ordering of dictionary indices is semantically meaningful
136
+ is_ordered: bool
137
+ # whether a dictionary-style mapping of categorical values to other objects
138
+ # exists
139
+ is_dictionary: bool
140
+ # Python-level only (e.g. ``{int: str}``).
141
+ # None if not a dictionary-style categorical.
142
+ categories: Optional[_PyArrowColumn]
143
+
144
+
145
+ class Endianness:
146
+ """Enum indicating the byte-order of a data-type."""
147
+
148
+ LITTLE = "<"
149
+ BIG = ">"
150
+ NATIVE = "="
151
+ NA = "|"
152
+
153
+
154
+ class NoBufferPresent(Exception):
155
+ """Exception to signal that there is no requested buffer."""
156
+
157
+
158
+ class _PyArrowColumn:
159
+ """
160
+ A column object, with only the methods and properties required by the
161
+ interchange protocol defined.
162
+
163
+ A column can contain one or more chunks. Each chunk can contain up to three
164
+ buffers - a data buffer, a mask buffer (depending on null representation),
165
+ and an offsets buffer (if variable-size binary; e.g., variable-length
166
+ strings).
167
+
168
+ TBD: Arrow has a separate "null" dtype, and has no separate mask concept.
169
+ Instead, it seems to use "children" for both columns with a bit mask,
170
+ and for nested dtypes. Unclear whether this is elegant or confusing.
171
+ This design requires checking the null representation explicitly.
172
+
173
+ The Arrow design requires checking:
174
+ 1. the ARROW_FLAG_NULLABLE (for sentinel values)
175
+ 2. if a column has two children, combined with one of those children
176
+ having a null dtype.
177
+
178
+ Making the mask concept explicit seems useful. One null dtype would
179
+ not be enough to cover both bit and byte masks, so that would mean
180
+ even more checking if we did it the Arrow way.
181
+
182
+ TBD: there's also the "chunk" concept here, which is implicit in Arrow as
183
+ multiple buffers per array (= column here). Semantically it may make
184
+ sense to have both: chunks were meant for example for lazy evaluation
185
+ of data which doesn't fit in memory, while multiple buffers per column
186
+ could also come from doing a selection operation on a single
187
+ contiguous buffer.
188
+
189
+ Given these concepts, one would expect chunks to be all of the same
190
+ size (say a 10,000 row dataframe could have 10 chunks of 1,000 rows),
191
+ while multiple buffers could have data-dependent lengths. Not an issue
192
+ in pandas if one column is backed by a single NumPy array, but in
193
+ Arrow it seems possible.
194
+ Are multiple chunks *and* multiple buffers per column necessary for
195
+ the purposes of this interchange protocol, or must producers either
196
+ reuse the chunk concept for this or copy the data?
197
+
198
+ Note: this Column object can only be produced by ``__dataframe__``, so
199
+ doesn't need its own version or ``__column__`` protocol.
200
+ """
201
+
202
+ def __init__(
203
+ self, column: pa.Array | pa.ChunkedArray, allow_copy: bool = True
204
+ ) -> None:
205
+ """
206
+ Handles PyArrow Arrays and ChunkedArrays.
207
+ """
208
+ # Store the column as a private attribute
209
+ if isinstance(column, pa.ChunkedArray):
210
+ if column.num_chunks == 1:
211
+ column = column.chunk(0)
212
+ else:
213
+ if not allow_copy:
214
+ raise RuntimeError(
215
+ "Chunks will be combined and a copy is required which "
216
+ "is forbidden by allow_copy=False"
217
+ )
218
+ column = column.combine_chunks()
219
+
220
+ self._allow_copy = allow_copy
221
+
222
+ if pa.types.is_boolean(column.type):
223
+ if not allow_copy:
224
+ raise RuntimeError(
225
+ "Boolean column will be casted to uint8 and a copy "
226
+ "is required which is forbidden by allow_copy=False"
227
+ )
228
+ self._dtype = self._dtype_from_arrowdtype(column.type, 8)
229
+ self._col = pc.cast(column, pa.uint8())
230
+ else:
231
+ self._col = column
232
+ dtype = self._col.type
233
+ try:
234
+ bit_width = dtype.bit_width
235
+ except ValueError:
236
+ # in case of a variable-length strings, considered as array
237
+ # of bytes (8 bits)
238
+ bit_width = 8
239
+ self._dtype = self._dtype_from_arrowdtype(dtype, bit_width)
240
+
241
+ def size(self) -> int:
242
+ """
243
+ Size of the column, in elements.
244
+
245
+ Corresponds to DataFrame.num_rows() if column is a single chunk;
246
+ equal to size of this current chunk otherwise.
247
+
248
+ Is a method rather than a property because it may cause a (potentially
249
+ expensive) computation for some dataframe implementations.
250
+ """
251
+ return len(self._col)
252
+
253
+ @property
254
+ def offset(self) -> int:
255
+ """
256
+ Offset of first element.
257
+
258
+ May be > 0 if using chunks; for example for a column with N chunks of
259
+ equal size M (only the last chunk may be shorter),
260
+ ``offset = n * M``, ``n = 0 .. N-1``.
261
+ """
262
+ return self._col.offset
263
+
264
+ @property
265
+ def dtype(self) -> Tuple[DtypeKind, int, str, str]:
266
+ """
267
+ Dtype description as a tuple ``(kind, bit-width, format string,
268
+ endianness)``.
269
+
270
+ Bit-width : the number of bits as an integer
271
+ Format string : data type description format string in Apache Arrow C
272
+ Data Interface format.
273
+ Endianness : current only native endianness (``=``) is supported
274
+
275
+ Notes:
276
+ - Kind specifiers are aligned with DLPack where possible (hence the
277
+ jump to 20, leave enough room for future extension)
278
+ - Masks must be specified as boolean with either bit width 1 (for
279
+ bit masks) or 8 (for byte masks).
280
+ - Dtype width in bits was preferred over bytes
281
+ - Endianness isn't too useful, but included now in case in the
282
+ future we need to support non-native endianness
283
+ - Went with Apache Arrow format strings over NumPy format strings
284
+ because they're more complete from a dataframe perspective
285
+ - Format strings are mostly useful for datetime specification, and
286
+ for categoricals.
287
+ - For categoricals, the format string describes the type of the
288
+ categorical in the data buffer. In case of a separate encoding of
289
+ the categorical (e.g. an integer to string mapping), this can
290
+ be derived from ``self.describe_categorical``.
291
+ - Data types not included: complex, Arrow-style null, binary,
292
+ decimal, and nested (list, struct, map, union) dtypes.
293
+ """
294
+ return self._dtype
295
+
296
+ def _dtype_from_arrowdtype(
297
+ self, dtype: pa.DataType, bit_width: int
298
+ ) -> Tuple[DtypeKind, int, str, str]:
299
+ """
300
+ See `self.dtype` for details.
301
+ """
302
+ # Note: 'c' (complex) not handled yet (not in array spec v1).
303
+ # 'b', 'B' (bytes), 'S', 'a', (old-style string) 'V' (void)
304
+ # not handled datetime and timedelta both map to datetime
305
+ # (is timedelta handled?)
306
+
307
+ if pa.types.is_timestamp(dtype):
308
+ kind = DtypeKind.DATETIME
309
+ ts = dtype.unit[0]
310
+ tz = dtype.tz if dtype.tz else ""
311
+ f_string = "ts{ts}:{tz}".format(ts=ts, tz=tz)
312
+ return kind, bit_width, f_string, Endianness.NATIVE
313
+ elif pa.types.is_dictionary(dtype):
314
+ kind = DtypeKind.CATEGORICAL
315
+ arr = self._col
316
+ indices_dtype = arr.indices.type
317
+ _, f_string = _PYARROW_KINDS.get(indices_dtype)
318
+ return kind, bit_width, f_string, Endianness.NATIVE
319
+ else:
320
+ kind, f_string = _PYARROW_KINDS.get(dtype, (None, None))
321
+ if kind is None:
322
+ raise ValueError(
323
+ f"Data type {dtype} not supported by interchange protocol")
324
+
325
+ return kind, bit_width, f_string, Endianness.NATIVE
326
+
327
+ @property
328
+ def describe_categorical(self) -> CategoricalDescription:
329
+ """
330
+ If the dtype is categorical, there are two options:
331
+ - There are only values in the data buffer.
332
+ - There is a separate non-categorical Column encoding categorical
333
+ values.
334
+
335
+ Raises TypeError if the dtype is not categorical
336
+
337
+ Returns the dictionary with description on how to interpret the
338
+ data buffer:
339
+ - "is_ordered" : bool, whether the ordering of dictionary indices
340
+ is semantically meaningful.
341
+ - "is_dictionary" : bool, whether a mapping of
342
+ categorical values to other objects exists
343
+ - "categories" : Column representing the (implicit) mapping of
344
+ indices to category values (e.g. an array of
345
+ cat1, cat2, ...). None if not a dictionary-style
346
+ categorical.
347
+
348
+ TBD: are there any other in-memory representations that are needed?
349
+ """
350
+ arr = self._col
351
+ if not pa.types.is_dictionary(arr.type):
352
+ raise TypeError(
353
+ "describe_categorical only works on a column with "
354
+ "categorical dtype!"
355
+ )
356
+
357
+ return {
358
+ "is_ordered": self._col.type.ordered,
359
+ "is_dictionary": True,
360
+ "categories": _PyArrowColumn(arr.dictionary),
361
+ }
362
+
363
+ @property
364
+ def describe_null(self) -> Tuple[ColumnNullType, Any]:
365
+ """
366
+ Return the missing value (or "null") representation the column dtype
367
+ uses, as a tuple ``(kind, value)``.
368
+
369
+ Value : if kind is "sentinel value", the actual value. If kind is a bit
370
+ mask or a byte mask, the value (0 or 1) indicating a missing value.
371
+ None otherwise.
372
+ """
373
+ # In case of no missing values, we need to set ColumnNullType to
374
+ # non nullable as in the current __dataframe__ protocol bit/byte masks
375
+ # cannot be None
376
+ if self.null_count == 0:
377
+ return ColumnNullType.NON_NULLABLE, None
378
+ else:
379
+ return ColumnNullType.USE_BITMASK, 0
380
+
381
+ @property
382
+ def null_count(self) -> int:
383
+ """
384
+ Number of null elements, if known.
385
+
386
+ Note: Arrow uses -1 to indicate "unknown", but None seems cleaner.
387
+ """
388
+ arrow_null_count = self._col.null_count
389
+ n = arrow_null_count if arrow_null_count != -1 else None
390
+ return n
391
+
392
+ @property
393
+ def metadata(self) -> Dict[str, Any]:
394
+ """
395
+ The metadata for the column. See `DataFrame.metadata` for more details.
396
+ """
397
+ pass
398
+
399
+ def num_chunks(self) -> int:
400
+ """
401
+ Return the number of chunks the column consists of.
402
+ """
403
+ return 1
404
+
405
+ def get_chunks(
406
+ self, n_chunks: Optional[int] = None
407
+ ) -> Iterable[_PyArrowColumn]:
408
+ """
409
+ Return an iterator yielding the chunks.
410
+
411
+ See `DataFrame.get_chunks` for details on ``n_chunks``.
412
+ """
413
+ if n_chunks and n_chunks > 1:
414
+ chunk_size = self.size() // n_chunks
415
+ if self.size() % n_chunks != 0:
416
+ chunk_size += 1
417
+
418
+ array = self._col
419
+ i = 0
420
+ for start in range(0, chunk_size * n_chunks, chunk_size):
421
+ yield _PyArrowColumn(
422
+ array.slice(start, chunk_size), self._allow_copy
423
+ )
424
+ i += 1
425
+ else:
426
+ yield self
427
+
428
+ def get_buffers(self) -> ColumnBuffers:
429
+ """
430
+ Return a dictionary containing the underlying buffers.
431
+
432
+ The returned dictionary has the following contents:
433
+
434
+ - "data": a two-element tuple whose first element is a buffer
435
+ containing the data and whose second element is the data
436
+ buffer's associated dtype.
437
+ - "validity": a two-element tuple whose first element is a buffer
438
+ containing mask values indicating missing data and
439
+ whose second element is the mask value buffer's
440
+ associated dtype. None if the null representation is
441
+ not a bit or byte mask.
442
+ - "offsets": a two-element tuple whose first element is a buffer
443
+ containing the offset values for variable-size binary
444
+ data (e.g., variable-length strings) and whose second
445
+ element is the offsets buffer's associated dtype. None
446
+ if the data buffer does not have an associated offsets
447
+ buffer.
448
+ """
449
+ buffers: ColumnBuffers = {
450
+ "data": self._get_data_buffer(),
451
+ "validity": None,
452
+ "offsets": None,
453
+ }
454
+
455
+ try:
456
+ buffers["validity"] = self._get_validity_buffer()
457
+ except NoBufferPresent:
458
+ pass
459
+
460
+ try:
461
+ buffers["offsets"] = self._get_offsets_buffer()
462
+ except NoBufferPresent:
463
+ pass
464
+
465
+ return buffers
466
+
467
+ def _get_data_buffer(
468
+ self,
469
+ ) -> Tuple[_PyArrowBuffer, Any]: # Any is for self.dtype tuple
470
+ """
471
+ Return the buffer containing the data and the buffer's
472
+ associated dtype.
473
+ """
474
+ array = self._col
475
+ dtype = self.dtype
476
+
477
+ # In case of dictionary arrays, use indices
478
+ # to define a buffer, codes are transferred through
479
+ # describe_categorical()
480
+ if pa.types.is_dictionary(array.type):
481
+ array = array.indices
482
+ dtype = _PyArrowColumn(array).dtype
483
+
484
+ n = len(array.buffers())
485
+ if n == 2:
486
+ return _PyArrowBuffer(array.buffers()[1]), dtype
487
+ elif n == 3:
488
+ return _PyArrowBuffer(array.buffers()[2]), dtype
489
+
490
+ def _get_validity_buffer(self) -> Tuple[_PyArrowBuffer, Any]:
491
+ """
492
+ Return the buffer containing the mask values indicating missing data
493
+ and the buffer's associated dtype.
494
+ Raises NoBufferPresent if null representation is not a bit or byte
495
+ mask.
496
+ """
497
+ # Define the dtype of the returned buffer
498
+ dtype = (DtypeKind.BOOL, 1, "b", Endianness.NATIVE)
499
+ array = self._col
500
+ buff = array.buffers()[0]
501
+ if buff:
502
+ return _PyArrowBuffer(buff), dtype
503
+ else:
504
+ raise NoBufferPresent(
505
+ "There are no missing values so "
506
+ "does not have a separate mask")
507
+
508
+ def _get_offsets_buffer(self) -> Tuple[_PyArrowBuffer, Any]:
509
+ """
510
+ Return the buffer containing the offset values for variable-size binary
511
+ data (e.g., variable-length strings) and the buffer's associated dtype.
512
+ Raises NoBufferPresent if the data buffer does not have an associated
513
+ offsets buffer.
514
+ """
515
+ array = self._col
516
+ n = len(array.buffers())
517
+ if n == 2:
518
+ raise NoBufferPresent(
519
+ "This column has a fixed-length dtype so "
520
+ "it does not have an offsets buffer"
521
+ )
522
+ elif n == 3:
523
+ # Define the dtype of the returned buffer
524
+ dtype = self._col.type
525
+ if pa.types.is_large_string(dtype):
526
+ dtype = (DtypeKind.INT, 64, "l", Endianness.NATIVE)
527
+ else:
528
+ dtype = (DtypeKind.INT, 32, "i", Endianness.NATIVE)
529
+ return _PyArrowBuffer(array.buffers()[1]), dtype
env-llmeval/lib/python3.10/site-packages/pyarrow/interchange/dataframe.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ from __future__ import annotations
19
+ from typing import (
20
+ Any,
21
+ Iterable,
22
+ Optional,
23
+ Sequence,
24
+ )
25
+
26
+ import pyarrow as pa
27
+
28
+ from pyarrow.interchange.column import _PyArrowColumn
29
+
30
+
31
+ class _PyArrowDataFrame:
32
+ """
33
+ A data frame class, with only the methods required by the interchange
34
+ protocol defined.
35
+
36
+ A "data frame" represents an ordered collection of named columns.
37
+ A column's "name" must be a unique string.
38
+ Columns may be accessed by name or by position.
39
+
40
+ This could be a public data frame class, or an object with the methods and
41
+ attributes defined on this DataFrame class could be returned from the
42
+ ``__dataframe__`` method of a public data frame class in a library adhering
43
+ to the dataframe interchange protocol specification.
44
+ """
45
+
46
+ def __init__(
47
+ self, df: pa.Table | pa.RecordBatch,
48
+ nan_as_null: bool = False,
49
+ allow_copy: bool = True
50
+ ) -> None:
51
+ """
52
+ Constructor - an instance of this (private) class is returned from
53
+ `pa.Table.__dataframe__` or `pa.RecordBatch.__dataframe__`.
54
+ """
55
+ self._df = df
56
+ # ``nan_as_null`` is a keyword intended for the consumer to tell the
57
+ # producer to overwrite null values in the data with ``NaN`` (or
58
+ # ``NaT``).
59
+ if nan_as_null is True:
60
+ raise RuntimeError(
61
+ "nan_as_null=True currently has no effect, "
62
+ "use the default nan_as_null=False"
63
+ )
64
+ self._nan_as_null = nan_as_null
65
+ self._allow_copy = allow_copy
66
+
67
+ def __dataframe__(
68
+ self, nan_as_null: bool = False, allow_copy: bool = True
69
+ ) -> _PyArrowDataFrame:
70
+ """
71
+ Construct a new exchange object, potentially changing the parameters.
72
+ ``nan_as_null`` is a keyword intended for the consumer to tell the
73
+ producer to overwrite null values in the data with ``NaN``.
74
+ It is intended for cases where the consumer does not support the bit
75
+ mask or byte mask that is the producer's native representation.
76
+ ``allow_copy`` is a keyword that defines whether or not the library is
77
+ allowed to make a copy of the data. For example, copying data would be
78
+ necessary if a library supports strided buffers, given that this
79
+ protocol specifies contiguous buffers.
80
+ """
81
+ return _PyArrowDataFrame(self._df, nan_as_null, allow_copy)
82
+
83
+ @property
84
+ def metadata(self) -> dict[str, Any]:
85
+ """
86
+ The metadata for the data frame, as a dictionary with string keys. The
87
+ contents of `metadata` may be anything, they are meant for a library
88
+ to store information that it needs to, e.g., roundtrip losslessly or
89
+ for two implementations to share data that is not (yet) part of the
90
+ interchange protocol specification. For avoiding collisions with other
91
+ entries, please add name the keys with the name of the library
92
+ followed by a period and the desired name, e.g, ``pandas.indexcol``.
93
+ """
94
+ # The metadata for the data frame, as a dictionary with string keys.
95
+ # Add schema metadata here (pandas metadata or custom metadata)
96
+ if self._df.schema.metadata:
97
+ schema_metadata = {"pyarrow." + k.decode('utf8'): v.decode('utf8')
98
+ for k, v in self._df.schema.metadata.items()}
99
+ return schema_metadata
100
+ else:
101
+ return {}
102
+
103
+ def num_columns(self) -> int:
104
+ """
105
+ Return the number of columns in the DataFrame.
106
+ """
107
+ return self._df.num_columns
108
+
109
+ def num_rows(self) -> int:
110
+ """
111
+ Return the number of rows in the DataFrame, if available.
112
+ """
113
+ return self._df.num_rows
114
+
115
+ def num_chunks(self) -> int:
116
+ """
117
+ Return the number of chunks the DataFrame consists of.
118
+ """
119
+ if isinstance(self._df, pa.RecordBatch):
120
+ return 1
121
+ else:
122
+ # pyarrow.Table can have columns with different number
123
+ # of chunks so we take the number of chunks that
124
+ # .to_batches() returns as it takes the min chunk size
125
+ # of all the columns (to_batches is a zero copy method)
126
+ batches = self._df.to_batches()
127
+ return len(batches)
128
+
129
+ def column_names(self) -> Iterable[str]:
130
+ """
131
+ Return an iterator yielding the column names.
132
+ """
133
+ return self._df.schema.names
134
+
135
+ def get_column(self, i: int) -> _PyArrowColumn:
136
+ """
137
+ Return the column at the indicated position.
138
+ """
139
+ return _PyArrowColumn(self._df.column(i),
140
+ allow_copy=self._allow_copy)
141
+
142
+ def get_column_by_name(self, name: str) -> _PyArrowColumn:
143
+ """
144
+ Return the column whose name is the indicated name.
145
+ """
146
+ return _PyArrowColumn(self._df.column(name),
147
+ allow_copy=self._allow_copy)
148
+
149
+ def get_columns(self) -> Iterable[_PyArrowColumn]:
150
+ """
151
+ Return an iterator yielding the columns.
152
+ """
153
+ return [
154
+ _PyArrowColumn(col, allow_copy=self._allow_copy)
155
+ for col in self._df.columns
156
+ ]
157
+
158
+ def select_columns(self, indices: Sequence[int]) -> _PyArrowDataFrame:
159
+ """
160
+ Create a new DataFrame by selecting a subset of columns by index.
161
+ """
162
+ return _PyArrowDataFrame(
163
+ self._df.select(list(indices)), self._nan_as_null, self._allow_copy
164
+ )
165
+
166
+ def select_columns_by_name(
167
+ self, names: Sequence[str]
168
+ ) -> _PyArrowDataFrame:
169
+ """
170
+ Create a new DataFrame by selecting a subset of columns by name.
171
+ """
172
+ return _PyArrowDataFrame(
173
+ self._df.select(list(names)), self._nan_as_null, self._allow_copy
174
+ )
175
+
176
+ def get_chunks(
177
+ self, n_chunks: Optional[int] = None
178
+ ) -> Iterable[_PyArrowDataFrame]:
179
+ """
180
+ Return an iterator yielding the chunks.
181
+
182
+ By default (None), yields the chunks that the data is stored as by the
183
+ producer. If given, ``n_chunks`` must be a multiple of
184
+ ``self.num_chunks()``, meaning the producer must subdivide each chunk
185
+ before yielding it.
186
+
187
+ Note that the producer must ensure that all columns are chunked the
188
+ same way.
189
+ """
190
+ # Subdivide chunks
191
+ if n_chunks and n_chunks > 1:
192
+ chunk_size = self.num_rows() // n_chunks
193
+ if self.num_rows() % n_chunks != 0:
194
+ chunk_size += 1
195
+ if isinstance(self._df, pa.Table):
196
+ batches = self._df.to_batches(max_chunksize=chunk_size)
197
+ else:
198
+ batches = []
199
+ for start in range(0, chunk_size * n_chunks, chunk_size):
200
+ batches.append(self._df.slice(start, chunk_size))
201
+ # In case when the size of the chunk is such that the resulting
202
+ # list is one less chunk then n_chunks -> append an empty chunk
203
+ if len(batches) == n_chunks - 1:
204
+ batches.append(pa.record_batch([[]], schema=self._df.schema))
205
+ # yields the chunks that the data is stored as
206
+ else:
207
+ if isinstance(self._df, pa.Table):
208
+ batches = self._df.to_batches()
209
+ else:
210
+ batches = [self._df]
211
+
212
+ # Create an iterator of RecordBatches
213
+ iterator = [_PyArrowDataFrame(batch,
214
+ self._nan_as_null,
215
+ self._allow_copy)
216
+ for batch in batches]
217
+ return iterator
env-llmeval/lib/python3.10/site-packages/pyarrow/interchange/from_dataframe.py ADDED
@@ -0,0 +1,614 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ from __future__ import annotations
19
+
20
+ from typing import (
21
+ Any,
22
+ Tuple,
23
+ )
24
+
25
+ from pyarrow.interchange.column import (
26
+ DtypeKind,
27
+ ColumnBuffers,
28
+ ColumnNullType,
29
+ )
30
+
31
+ import pyarrow as pa
32
+ import re
33
+
34
+ import pyarrow.compute as pc
35
+ from pyarrow.interchange.column import Dtype
36
+
37
+
38
+ # A typing protocol could be added later to let Mypy validate code using
39
+ # `from_dataframe` better.
40
+ DataFrameObject = Any
41
+ ColumnObject = Any
42
+ BufferObject = Any
43
+
44
+
45
+ _PYARROW_DTYPES: dict[DtypeKind, dict[int, Any]] = {
46
+ DtypeKind.INT: {8: pa.int8(),
47
+ 16: pa.int16(),
48
+ 32: pa.int32(),
49
+ 64: pa.int64()},
50
+ DtypeKind.UINT: {8: pa.uint8(),
51
+ 16: pa.uint16(),
52
+ 32: pa.uint32(),
53
+ 64: pa.uint64()},
54
+ DtypeKind.FLOAT: {16: pa.float16(),
55
+ 32: pa.float32(),
56
+ 64: pa.float64()},
57
+ DtypeKind.BOOL: {1: pa.bool_(),
58
+ 8: pa.uint8()},
59
+ DtypeKind.STRING: {8: pa.string()},
60
+ }
61
+
62
+
63
+ def from_dataframe(df: DataFrameObject, allow_copy=True) -> pa.Table:
64
+ """
65
+ Build a ``pa.Table`` from any DataFrame supporting the interchange protocol.
66
+
67
+ Parameters
68
+ ----------
69
+ df : DataFrameObject
70
+ Object supporting the interchange protocol, i.e. `__dataframe__`
71
+ method.
72
+ allow_copy : bool, default: True
73
+ Whether to allow copying the memory to perform the conversion
74
+ (if false then zero-copy approach is requested).
75
+
76
+ Returns
77
+ -------
78
+ pa.Table
79
+
80
+ Examples
81
+ --------
82
+ >>> import pyarrow
83
+ >>> from pyarrow.interchange import from_dataframe
84
+
85
+ Convert a pandas dataframe to a pyarrow table:
86
+
87
+ >>> import pandas as pd
88
+ >>> df = pd.DataFrame({
89
+ ... "n_attendees": [100, 10, 1],
90
+ ... "country": ["Italy", "Spain", "Slovenia"],
91
+ ... })
92
+ >>> df
93
+ n_attendees country
94
+ 0 100 Italy
95
+ 1 10 Spain
96
+ 2 1 Slovenia
97
+ >>> from_dataframe(df)
98
+ pyarrow.Table
99
+ n_attendees: int64
100
+ country: large_string
101
+ ----
102
+ n_attendees: [[100,10,1]]
103
+ country: [["Italy","Spain","Slovenia"]]
104
+ """
105
+ if isinstance(df, pa.Table):
106
+ return df
107
+ elif isinstance(df, pa.RecordBatch):
108
+ return pa.Table.from_batches([df])
109
+
110
+ if not hasattr(df, "__dataframe__"):
111
+ raise ValueError("`df` does not support __dataframe__")
112
+
113
+ return _from_dataframe(df.__dataframe__(allow_copy=allow_copy),
114
+ allow_copy=allow_copy)
115
+
116
+
117
+ def _from_dataframe(df: DataFrameObject, allow_copy=True):
118
+ """
119
+ Build a ``pa.Table`` from the DataFrame interchange object.
120
+
121
+ Parameters
122
+ ----------
123
+ df : DataFrameObject
124
+ Object supporting the interchange protocol, i.e. `__dataframe__`
125
+ method.
126
+ allow_copy : bool, default: True
127
+ Whether to allow copying the memory to perform the conversion
128
+ (if false then zero-copy approach is requested).
129
+
130
+ Returns
131
+ -------
132
+ pa.Table
133
+ """
134
+ batches = []
135
+ for chunk in df.get_chunks():
136
+ batch = protocol_df_chunk_to_pyarrow(chunk, allow_copy)
137
+ batches.append(batch)
138
+
139
+ if not batches:
140
+ batch = protocol_df_chunk_to_pyarrow(df)
141
+ batches.append(batch)
142
+
143
+ return pa.Table.from_batches(batches)
144
+
145
+
146
+ def protocol_df_chunk_to_pyarrow(
147
+ df: DataFrameObject,
148
+ allow_copy: bool = True
149
+ ) -> pa.RecordBatch:
150
+ """
151
+ Convert interchange protocol chunk to ``pa.RecordBatch``.
152
+
153
+ Parameters
154
+ ----------
155
+ df : DataFrameObject
156
+ Object supporting the interchange protocol, i.e. `__dataframe__`
157
+ method.
158
+ allow_copy : bool, default: True
159
+ Whether to allow copying the memory to perform the conversion
160
+ (if false then zero-copy approach is requested).
161
+
162
+ Returns
163
+ -------
164
+ pa.RecordBatch
165
+ """
166
+ # We need a dict of columns here, with each column being a pa.Array
167
+ columns: dict[str, pa.Array] = {}
168
+ for name in df.column_names():
169
+ if not isinstance(name, str):
170
+ raise ValueError(f"Column {name} is not a string")
171
+ if name in columns:
172
+ raise ValueError(f"Column {name} is not unique")
173
+ col = df.get_column_by_name(name)
174
+ dtype = col.dtype[0]
175
+ if dtype in (
176
+ DtypeKind.INT,
177
+ DtypeKind.UINT,
178
+ DtypeKind.FLOAT,
179
+ DtypeKind.STRING,
180
+ DtypeKind.DATETIME,
181
+ ):
182
+ columns[name] = column_to_array(col, allow_copy)
183
+ elif dtype == DtypeKind.BOOL:
184
+ columns[name] = bool_column_to_array(col, allow_copy)
185
+ elif dtype == DtypeKind.CATEGORICAL:
186
+ columns[name] = categorical_column_to_dictionary(col, allow_copy)
187
+ else:
188
+ raise NotImplementedError(f"Data type {dtype} not handled yet")
189
+
190
+ return pa.RecordBatch.from_pydict(columns)
191
+
192
+
193
+ def column_to_array(
194
+ col: ColumnObject,
195
+ allow_copy: bool = True,
196
+ ) -> pa.Array:
197
+ """
198
+ Convert a column holding one of the primitive dtypes to a PyArrow array.
199
+ A primitive type is one of: int, uint, float, bool (1 bit).
200
+
201
+ Parameters
202
+ ----------
203
+ col : ColumnObject
204
+ allow_copy : bool, default: True
205
+ Whether to allow copying the memory to perform the conversion
206
+ (if false then zero-copy approach is requested).
207
+
208
+ Returns
209
+ -------
210
+ pa.Array
211
+ """
212
+ buffers = col.get_buffers()
213
+ data_type = col.dtype
214
+ data = buffers_to_array(buffers, data_type,
215
+ col.size(),
216
+ col.describe_null,
217
+ col.offset,
218
+ allow_copy)
219
+ return data
220
+
221
+
222
+ def bool_column_to_array(
223
+ col: ColumnObject,
224
+ allow_copy: bool = True,
225
+ ) -> pa.Array:
226
+ """
227
+ Convert a column holding boolean dtype to a PyArrow array.
228
+
229
+ Parameters
230
+ ----------
231
+ col : ColumnObject
232
+ allow_copy : bool, default: True
233
+ Whether to allow copying the memory to perform the conversion
234
+ (if false then zero-copy approach is requested).
235
+
236
+ Returns
237
+ -------
238
+ pa.Array
239
+ """
240
+ buffers = col.get_buffers()
241
+ size = buffers["data"][1][1]
242
+
243
+ # If booleans are byte-packed a copy to bit-packed will be made
244
+ if size == 8 and not allow_copy:
245
+ raise RuntimeError(
246
+ "Boolean column will be casted from uint8 and a copy "
247
+ "is required which is forbidden by allow_copy=False"
248
+ )
249
+
250
+ data_type = col.dtype
251
+ data = buffers_to_array(buffers, data_type,
252
+ col.size(),
253
+ col.describe_null,
254
+ col.offset)
255
+ if size == 8:
256
+ data = pc.cast(data, pa.bool_())
257
+
258
+ return data
259
+
260
+
261
+ def categorical_column_to_dictionary(
262
+ col: ColumnObject,
263
+ allow_copy: bool = True,
264
+ ) -> pa.DictionaryArray:
265
+ """
266
+ Convert a column holding categorical data to a pa.DictionaryArray.
267
+
268
+ Parameters
269
+ ----------
270
+ col : ColumnObject
271
+ allow_copy : bool, default: True
272
+ Whether to allow copying the memory to perform the conversion
273
+ (if false then zero-copy approach is requested).
274
+
275
+ Returns
276
+ -------
277
+ pa.DictionaryArray
278
+ """
279
+ if not allow_copy:
280
+ raise RuntimeError(
281
+ "Categorical column will be casted from uint8 and a copy "
282
+ "is required which is forbidden by allow_copy=False"
283
+ )
284
+
285
+ categorical = col.describe_categorical
286
+
287
+ if not categorical["is_dictionary"]:
288
+ raise NotImplementedError(
289
+ "Non-dictionary categoricals not supported yet")
290
+
291
+ # We need to first convert the dictionary column
292
+ cat_column = categorical["categories"]
293
+ dictionary = column_to_array(cat_column)
294
+ # Then we need to convert the indices
295
+ # Here we need to use the buffer data type!
296
+ buffers = col.get_buffers()
297
+ _, data_type = buffers["data"]
298
+ indices = buffers_to_array(buffers, data_type,
299
+ col.size(),
300
+ col.describe_null,
301
+ col.offset)
302
+
303
+ # Constructing a pa.DictionaryArray
304
+ dict_array = pa.DictionaryArray.from_arrays(indices, dictionary)
305
+
306
+ return dict_array
307
+
308
+
309
+ def parse_datetime_format_str(format_str):
310
+ """Parse datetime `format_str` to interpret the `data`."""
311
+
312
+ # timestamp 'ts{unit}:tz'
313
+ timestamp_meta = re.match(r"ts([smun]):(.*)", format_str)
314
+ if timestamp_meta:
315
+ unit, tz = timestamp_meta.group(1), timestamp_meta.group(2)
316
+ if unit != "s":
317
+ # the format string describes only a first letter of the unit, so
318
+ # add one extra letter to convert the unit to numpy-style:
319
+ # 'm' -> 'ms', 'u' -> 'us', 'n' -> 'ns'
320
+ unit += "s"
321
+
322
+ return unit, tz
323
+
324
+ raise NotImplementedError(f"DateTime kind is not supported: {format_str}")
325
+
326
+
327
+ def map_date_type(data_type):
328
+ """Map column date type to pyarrow date type. """
329
+ kind, bit_width, f_string, _ = data_type
330
+
331
+ if kind == DtypeKind.DATETIME:
332
+ unit, tz = parse_datetime_format_str(f_string)
333
+ return pa.timestamp(unit, tz=tz)
334
+ else:
335
+ pa_dtype = _PYARROW_DTYPES.get(kind, {}).get(bit_width, None)
336
+
337
+ # Error if dtype is not supported
338
+ if pa_dtype:
339
+ return pa_dtype
340
+ else:
341
+ raise NotImplementedError(
342
+ f"Conversion for {data_type} is not yet supported.")
343
+
344
+
345
+ def buffers_to_array(
346
+ buffers: ColumnBuffers,
347
+ data_type: Tuple[DtypeKind, int, str, str],
348
+ length: int,
349
+ describe_null: ColumnNullType,
350
+ offset: int = 0,
351
+ allow_copy: bool = True,
352
+ ) -> pa.Array:
353
+ """
354
+ Build a PyArrow array from the passed buffer.
355
+
356
+ Parameters
357
+ ----------
358
+ buffer : ColumnBuffers
359
+ Dictionary containing tuples of underlying buffers and
360
+ their associated dtype.
361
+ data_type : Tuple[DtypeKind, int, str, str],
362
+ Dtype description of the column as a tuple ``(kind, bit-width, format string,
363
+ endianness)``.
364
+ length : int
365
+ The number of values in the array.
366
+ describe_null: ColumnNullType
367
+ Null representation the column dtype uses,
368
+ as a tuple ``(kind, value)``
369
+ offset : int, default: 0
370
+ Number of elements to offset from the start of the buffer.
371
+ allow_copy : bool, default: True
372
+ Whether to allow copying the memory to perform the conversion
373
+ (if false then zero-copy approach is requested).
374
+
375
+ Returns
376
+ -------
377
+ pa.Array
378
+
379
+ Notes
380
+ -----
381
+ The returned array doesn't own the memory. The caller of this function
382
+ is responsible for keeping the memory owner object alive as long as
383
+ the returned PyArrow array is being used.
384
+ """
385
+ data_buff, _ = buffers["data"]
386
+ try:
387
+ validity_buff, validity_dtype = buffers["validity"]
388
+ except TypeError:
389
+ validity_buff = None
390
+ try:
391
+ offset_buff, offset_dtype = buffers["offsets"]
392
+ except TypeError:
393
+ offset_buff = None
394
+
395
+ # Construct a pyarrow Buffer
396
+ data_pa_buffer = pa.foreign_buffer(data_buff.ptr, data_buff.bufsize,
397
+ base=data_buff)
398
+
399
+ # Construct a validity pyarrow Buffer, if applicable
400
+ if validity_buff:
401
+ validity_pa_buff = validity_buffer_from_mask(validity_buff,
402
+ validity_dtype,
403
+ describe_null,
404
+ length,
405
+ offset,
406
+ allow_copy)
407
+ else:
408
+ validity_pa_buff = validity_buffer_nan_sentinel(data_pa_buffer,
409
+ data_type,
410
+ describe_null,
411
+ length,
412
+ offset,
413
+ allow_copy)
414
+
415
+ # Construct a pyarrow Array from buffers
416
+ data_dtype = map_date_type(data_type)
417
+
418
+ if offset_buff:
419
+ _, offset_bit_width, _, _ = offset_dtype
420
+ # If an offset buffer exists, construct an offset pyarrow Buffer
421
+ # and add it to the construction of an array
422
+ offset_pa_buffer = pa.foreign_buffer(offset_buff.ptr,
423
+ offset_buff.bufsize,
424
+ base=offset_buff)
425
+
426
+ if data_type[2] == 'U':
427
+ string_type = pa.large_string()
428
+ else:
429
+ if offset_bit_width == 64:
430
+ string_type = pa.large_string()
431
+ else:
432
+ string_type = pa.string()
433
+ array = pa.Array.from_buffers(
434
+ string_type,
435
+ length,
436
+ [validity_pa_buff, offset_pa_buffer, data_pa_buffer],
437
+ offset=offset,
438
+ )
439
+ else:
440
+ array = pa.Array.from_buffers(
441
+ data_dtype,
442
+ length,
443
+ [validity_pa_buff, data_pa_buffer],
444
+ offset=offset,
445
+ )
446
+
447
+ return array
448
+
449
+
450
+ def validity_buffer_from_mask(
451
+ validity_buff: BufferObject,
452
+ validity_dtype: Dtype,
453
+ describe_null: ColumnNullType,
454
+ length: int,
455
+ offset: int = 0,
456
+ allow_copy: bool = True,
457
+ ) -> pa.Buffer:
458
+ """
459
+ Build a PyArrow buffer from the passed mask buffer.
460
+
461
+ Parameters
462
+ ----------
463
+ validity_buff : BufferObject
464
+ Tuple of underlying validity buffer and associated dtype.
465
+ validity_dtype : Dtype
466
+ Dtype description as a tuple ``(kind, bit-width, format string,
467
+ endianness)``.
468
+ describe_null : ColumnNullType
469
+ Null representation the column dtype uses,
470
+ as a tuple ``(kind, value)``
471
+ length : int
472
+ The number of values in the array.
473
+ offset : int, default: 0
474
+ Number of elements to offset from the start of the buffer.
475
+ allow_copy : bool, default: True
476
+ Whether to allow copying the memory to perform the conversion
477
+ (if false then zero-copy approach is requested).
478
+
479
+ Returns
480
+ -------
481
+ pa.Buffer
482
+ """
483
+ null_kind, sentinel_val = describe_null
484
+ validity_kind, _, _, _ = validity_dtype
485
+ assert validity_kind == DtypeKind.BOOL
486
+
487
+ if null_kind == ColumnNullType.NON_NULLABLE:
488
+ # Sliced array can have a NON_NULLABLE ColumnNullType due
489
+ # to no missing values in that slice of an array though the bitmask
490
+ # exists and validity_buff must be set to None in this case
491
+ return None
492
+
493
+ elif null_kind == ColumnNullType.USE_BYTEMASK or (
494
+ null_kind == ColumnNullType.USE_BITMASK and sentinel_val == 1
495
+ ):
496
+ buff = pa.foreign_buffer(validity_buff.ptr,
497
+ validity_buff.bufsize,
498
+ base=validity_buff)
499
+
500
+ if null_kind == ColumnNullType.USE_BYTEMASK:
501
+ if not allow_copy:
502
+ raise RuntimeError(
503
+ "To create a bitmask a copy of the data is "
504
+ "required which is forbidden by allow_copy=False"
505
+ )
506
+ mask = pa.Array.from_buffers(pa.int8(), length,
507
+ [None, buff],
508
+ offset=offset)
509
+ mask_bool = pc.cast(mask, pa.bool_())
510
+ else:
511
+ mask_bool = pa.Array.from_buffers(pa.bool_(), length,
512
+ [None, buff],
513
+ offset=offset)
514
+
515
+ if sentinel_val == 1:
516
+ mask_bool = pc.invert(mask_bool)
517
+
518
+ return mask_bool.buffers()[1]
519
+
520
+ elif null_kind == ColumnNullType.USE_BITMASK and sentinel_val == 0:
521
+ return pa.foreign_buffer(validity_buff.ptr,
522
+ validity_buff.bufsize,
523
+ base=validity_buff)
524
+ else:
525
+ raise NotImplementedError(
526
+ f"{describe_null} null representation is not yet supported.")
527
+
528
+
529
+ def validity_buffer_nan_sentinel(
530
+ data_pa_buffer: BufferObject,
531
+ data_type: Dtype,
532
+ describe_null: ColumnNullType,
533
+ length: int,
534
+ offset: int = 0,
535
+ allow_copy: bool = True,
536
+ ) -> pa.Buffer:
537
+ """
538
+ Build a PyArrow buffer from NaN or sentinel values.
539
+
540
+ Parameters
541
+ ----------
542
+ data_pa_buffer : pa.Buffer
543
+ PyArrow buffer for the column data.
544
+ data_type : Dtype
545
+ Dtype description as a tuple ``(kind, bit-width, format string,
546
+ endianness)``.
547
+ describe_null : ColumnNullType
548
+ Null representation the column dtype uses,
549
+ as a tuple ``(kind, value)``
550
+ length : int
551
+ The number of values in the array.
552
+ offset : int, default: 0
553
+ Number of elements to offset from the start of the buffer.
554
+ allow_copy : bool, default: True
555
+ Whether to allow copying the memory to perform the conversion
556
+ (if false then zero-copy approach is requested).
557
+
558
+ Returns
559
+ -------
560
+ pa.Buffer
561
+ """
562
+ kind, bit_width, _, _ = data_type
563
+ data_dtype = map_date_type(data_type)
564
+ null_kind, sentinel_val = describe_null
565
+
566
+ # Check for float NaN values
567
+ if null_kind == ColumnNullType.USE_NAN:
568
+ if not allow_copy:
569
+ raise RuntimeError(
570
+ "To create a bitmask a copy of the data is "
571
+ "required which is forbidden by allow_copy=False"
572
+ )
573
+
574
+ if kind == DtypeKind.FLOAT and bit_width == 16:
575
+ # 'pyarrow.compute.is_nan' kernel not yet implemented
576
+ # for float16
577
+ raise NotImplementedError(
578
+ f"{data_type} with {null_kind} is not yet supported.")
579
+ else:
580
+ pyarrow_data = pa.Array.from_buffers(
581
+ data_dtype,
582
+ length,
583
+ [None, data_pa_buffer],
584
+ offset=offset,
585
+ )
586
+ mask = pc.is_nan(pyarrow_data)
587
+ mask = pc.invert(mask)
588
+ return mask.buffers()[1]
589
+
590
+ # Check for sentinel values
591
+ elif null_kind == ColumnNullType.USE_SENTINEL:
592
+ if not allow_copy:
593
+ raise RuntimeError(
594
+ "To create a bitmask a copy of the data is "
595
+ "required which is forbidden by allow_copy=False"
596
+ )
597
+
598
+ if kind == DtypeKind.DATETIME:
599
+ sentinel_dtype = pa.int64()
600
+ else:
601
+ sentinel_dtype = data_dtype
602
+ pyarrow_data = pa.Array.from_buffers(sentinel_dtype,
603
+ length,
604
+ [None, data_pa_buffer],
605
+ offset=offset)
606
+ sentinel_arr = pc.equal(pyarrow_data, sentinel_val)
607
+ mask_bool = pc.invert(sentinel_arr)
608
+ return mask_bool.buffers()[1]
609
+
610
+ elif null_kind == ColumnNullType.NON_NULLABLE:
611
+ pass
612
+ else:
613
+ raise NotImplementedError(
614
+ f"{describe_null} null representation is not yet supported.")
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/CMakeLists.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+ arrow_install_all_headers("arrow/python")
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/api.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/python/arrow_to_pandas.h"
21
+ #include "arrow/python/common.h"
22
+ #include "arrow/python/datetime.h"
23
+ #include "arrow/python/deserialize.h"
24
+ #include "arrow/python/helpers.h"
25
+ #include "arrow/python/inference.h"
26
+ #include "arrow/python/io.h"
27
+ #include "arrow/python/numpy_convert.h"
28
+ #include "arrow/python/numpy_to_arrow.h"
29
+ #include "arrow/python/python_to_arrow.h"
30
+ #include "arrow/python/serialize.h"
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_pandas.cc ADDED
@@ -0,0 +1,2578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Functions for pandas conversion via NumPy
19
+
20
+ #include "arrow/python/arrow_to_pandas.h"
21
+ #include "arrow/python/numpy_interop.h" // IWYU pragma: expand
22
+
23
+ #include <cmath>
24
+ #include <cstdint>
25
+ #include <iostream>
26
+ #include <memory>
27
+ #include <mutex>
28
+ #include <string>
29
+ #include <string_view>
30
+ #include <unordered_map>
31
+ #include <utility>
32
+ #include <vector>
33
+
34
+ #include "arrow/array.h"
35
+ #include "arrow/buffer.h"
36
+ #include "arrow/datum.h"
37
+ #include "arrow/status.h"
38
+ #include "arrow/table.h"
39
+ #include "arrow/type.h"
40
+ #include "arrow/type_traits.h"
41
+ #include "arrow/util/checked_cast.h"
42
+ #include "arrow/util/hashing.h"
43
+ #include "arrow/util/int_util.h"
44
+ #include "arrow/util/logging.h"
45
+ #include "arrow/util/macros.h"
46
+ #include "arrow/util/parallel.h"
47
+ #include "arrow/visit_type_inline.h"
48
+
49
+ #include "arrow/compute/api.h"
50
+
51
+ #include "arrow/python/arrow_to_python_internal.h"
52
+ #include "arrow/python/common.h"
53
+ #include "arrow/python/datetime.h"
54
+ #include "arrow/python/decimal.h"
55
+ #include "arrow/python/helpers.h"
56
+ #include "arrow/python/numpy_convert.h"
57
+ #include "arrow/python/numpy_internal.h"
58
+ #include "arrow/python/pyarrow.h"
59
+ #include "arrow/python/python_to_arrow.h"
60
+ #include "arrow/python/type_traits.h"
61
+
62
+ namespace arrow {
63
+
64
+ class MemoryPool;
65
+
66
+ using internal::checked_cast;
67
+ using internal::CheckIndexBounds;
68
+ using internal::OptionalParallelFor;
69
+
70
+ namespace py {
71
+ namespace {
72
+
73
+ // Fix options for conversion of an inner (child) array.
74
+ PandasOptions MakeInnerOptions(PandasOptions options) {
75
+ // Make sure conversion of inner dictionary arrays always returns an array,
76
+ // not a dict {'indices': array, 'dictionary': array, 'ordered': bool}
77
+ options.decode_dictionaries = true;
78
+ options.categorical_columns.clear();
79
+ options.strings_to_categorical = false;
80
+
81
+ // In ARROW-7723, we found as a result of ARROW-3789 that second
82
+ // through microsecond resolution tz-aware timestamps were being promoted to
83
+ // use the DATETIME_NANO_TZ conversion path, yielding a datetime64[ns] NumPy
84
+ // array in this function. PyArray_GETITEM returns datetime.datetime for
85
+ // units second through microsecond but PyLong for nanosecond (because
86
+ // datetime.datetime does not support nanoseconds).
87
+ // We force the object conversion to preserve the value of the timezone.
88
+ // Nanoseconds are returned as integers.
89
+ options.coerce_temporal_nanoseconds = false;
90
+
91
+ return options;
92
+ }
93
+
94
+ // ----------------------------------------------------------------------
95
+ // PyCapsule code for setting ndarray base to reference C++ object
96
+
97
+ struct ArrayCapsule {
98
+ std::shared_ptr<Array> array;
99
+ };
100
+
101
+ struct BufferCapsule {
102
+ std::shared_ptr<Buffer> buffer;
103
+ };
104
+
105
+ void ArrayCapsule_Destructor(PyObject* capsule) {
106
+ delete reinterpret_cast<ArrayCapsule*>(PyCapsule_GetPointer(capsule, "arrow::Array"));
107
+ }
108
+
109
+ void BufferCapsule_Destructor(PyObject* capsule) {
110
+ delete reinterpret_cast<BufferCapsule*>(PyCapsule_GetPointer(capsule, "arrow::Buffer"));
111
+ }
112
+
113
+ // ----------------------------------------------------------------------
114
+ // pandas 0.x DataFrame conversion internals
115
+
116
+ using internal::arrow_traits;
117
+ using internal::npy_traits;
118
+
119
+ template <typename T>
120
+ struct WrapBytes {};
121
+
122
+ template <>
123
+ struct WrapBytes<StringType> {
124
+ static inline PyObject* Wrap(const char* data, int64_t length) {
125
+ return PyUnicode_FromStringAndSize(data, length);
126
+ }
127
+ };
128
+
129
+ template <>
130
+ struct WrapBytes<LargeStringType> {
131
+ static inline PyObject* Wrap(const char* data, int64_t length) {
132
+ return PyUnicode_FromStringAndSize(data, length);
133
+ }
134
+ };
135
+
136
+ template <>
137
+ struct WrapBytes<BinaryType> {
138
+ static inline PyObject* Wrap(const char* data, int64_t length) {
139
+ return PyBytes_FromStringAndSize(data, length);
140
+ }
141
+ };
142
+
143
+ template <>
144
+ struct WrapBytes<LargeBinaryType> {
145
+ static inline PyObject* Wrap(const char* data, int64_t length) {
146
+ return PyBytes_FromStringAndSize(data, length);
147
+ }
148
+ };
149
+
150
+ template <>
151
+ struct WrapBytes<FixedSizeBinaryType> {
152
+ static inline PyObject* Wrap(const char* data, int64_t length) {
153
+ return PyBytes_FromStringAndSize(data, length);
154
+ }
155
+ };
156
+
157
+ static inline bool ListTypeSupported(const DataType& type) {
158
+ switch (type.id()) {
159
+ case Type::BOOL:
160
+ case Type::UINT8:
161
+ case Type::INT8:
162
+ case Type::UINT16:
163
+ case Type::INT16:
164
+ case Type::UINT32:
165
+ case Type::INT32:
166
+ case Type::INT64:
167
+ case Type::UINT64:
168
+ case Type::HALF_FLOAT:
169
+ case Type::FLOAT:
170
+ case Type::DOUBLE:
171
+ case Type::DECIMAL128:
172
+ case Type::DECIMAL256:
173
+ case Type::BINARY:
174
+ case Type::LARGE_BINARY:
175
+ case Type::STRING:
176
+ case Type::LARGE_STRING:
177
+ case Type::DATE32:
178
+ case Type::DATE64:
179
+ case Type::STRUCT:
180
+ case Type::MAP:
181
+ case Type::TIME32:
182
+ case Type::TIME64:
183
+ case Type::TIMESTAMP:
184
+ case Type::DURATION:
185
+ case Type::DICTIONARY:
186
+ case Type::INTERVAL_MONTH_DAY_NANO:
187
+ case Type::NA: // empty list
188
+ // The above types are all supported.
189
+ return true;
190
+ case Type::FIXED_SIZE_LIST:
191
+ case Type::LIST:
192
+ case Type::LARGE_LIST: {
193
+ const auto& list_type = checked_cast<const BaseListType&>(type);
194
+ return ListTypeSupported(*list_type.value_type());
195
+ }
196
+ case Type::EXTENSION: {
197
+ const auto& ext = checked_cast<const ExtensionType&>(*type.GetSharedPtr());
198
+ return ListTypeSupported(*(ext.storage_type()));
199
+ }
200
+ default:
201
+ break;
202
+ }
203
+ return false;
204
+ }
205
+
206
+ Status CapsulizeArray(const std::shared_ptr<Array>& arr, PyObject** out) {
207
+ auto capsule = new ArrayCapsule{{arr}};
208
+ *out = PyCapsule_New(reinterpret_cast<void*>(capsule), "arrow::Array",
209
+ &ArrayCapsule_Destructor);
210
+ if (*out == nullptr) {
211
+ delete capsule;
212
+ RETURN_IF_PYERROR();
213
+ }
214
+ return Status::OK();
215
+ }
216
+
217
+ Status CapsulizeBuffer(const std::shared_ptr<Buffer>& buffer, PyObject** out) {
218
+ auto capsule = new BufferCapsule{{buffer}};
219
+ *out = PyCapsule_New(reinterpret_cast<void*>(capsule), "arrow::Buffer",
220
+ &BufferCapsule_Destructor);
221
+ if (*out == nullptr) {
222
+ delete capsule;
223
+ RETURN_IF_PYERROR();
224
+ }
225
+ return Status::OK();
226
+ }
227
+
228
+ Status SetNdarrayBase(PyArrayObject* arr, PyObject* base) {
229
+ if (PyArray_SetBaseObject(arr, base) == -1) {
230
+ // Error occurred, trust that SetBaseObject sets the error state
231
+ Py_XDECREF(base);
232
+ RETURN_IF_PYERROR();
233
+ }
234
+ return Status::OK();
235
+ }
236
+
237
+ Status SetBufferBase(PyArrayObject* arr, const std::shared_ptr<Buffer>& buffer) {
238
+ PyObject* base;
239
+ RETURN_NOT_OK(CapsulizeBuffer(buffer, &base));
240
+ return SetNdarrayBase(arr, base);
241
+ }
242
+
243
+ inline void set_numpy_metadata(int type, const DataType* datatype, PyArray_Descr* out) {
244
+ auto metadata = reinterpret_cast<PyArray_DatetimeDTypeMetaData*>(out->c_metadata);
245
+ if (type == NPY_DATETIME) {
246
+ if (datatype->id() == Type::TIMESTAMP) {
247
+ const auto& timestamp_type = checked_cast<const TimestampType&>(*datatype);
248
+ metadata->meta.base = internal::NumPyFrequency(timestamp_type.unit());
249
+ } else {
250
+ DCHECK(false) << "NPY_DATETIME views only supported for Arrow TIMESTAMP types";
251
+ }
252
+ } else if (type == NPY_TIMEDELTA) {
253
+ DCHECK_EQ(datatype->id(), Type::DURATION);
254
+ const auto& duration_type = checked_cast<const DurationType&>(*datatype);
255
+ metadata->meta.base = internal::NumPyFrequency(duration_type.unit());
256
+ }
257
+ }
258
+
259
+ Status PyArray_NewFromPool(int nd, npy_intp* dims, PyArray_Descr* descr, MemoryPool* pool,
260
+ PyObject** out) {
261
+ // ARROW-6570: Allocate memory from MemoryPool for a couple reasons
262
+ //
263
+ // * Track allocations
264
+ // * Get better performance through custom allocators
265
+ int64_t total_size = descr->elsize;
266
+ for (int i = 0; i < nd; ++i) {
267
+ total_size *= dims[i];
268
+ }
269
+
270
+ ARROW_ASSIGN_OR_RAISE(auto buffer, AllocateBuffer(total_size, pool));
271
+ *out = PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims,
272
+ /*strides=*/nullptr,
273
+ /*data=*/buffer->mutable_data(),
274
+ /*flags=*/NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEABLE,
275
+ /*obj=*/nullptr);
276
+ if (*out == nullptr) {
277
+ RETURN_IF_PYERROR();
278
+ // Trust that error set if NULL returned
279
+ }
280
+ return SetBufferBase(reinterpret_cast<PyArrayObject*>(*out), std::move(buffer));
281
+ }
282
+
283
+ template <typename T = void>
284
+ inline const T* GetPrimitiveValues(const Array& arr) {
285
+ if (arr.length() == 0) {
286
+ return nullptr;
287
+ }
288
+ const int elsize = arr.type()->byte_width();
289
+ const auto& prim_arr = checked_cast<const PrimitiveArray&>(arr);
290
+ return reinterpret_cast<const T*>(prim_arr.values()->data() + arr.offset() * elsize);
291
+ }
292
+
293
+ Status MakeNumPyView(std::shared_ptr<Array> arr, PyObject* py_ref, int npy_type, int ndim,
294
+ npy_intp* dims, PyObject** out) {
295
+ PyAcquireGIL lock;
296
+
297
+ PyArray_Descr* descr = internal::GetSafeNumPyDtype(npy_type);
298
+ set_numpy_metadata(npy_type, arr->type().get(), descr);
299
+ PyObject* result = PyArray_NewFromDescr(
300
+ &PyArray_Type, descr, ndim, dims, /*strides=*/nullptr,
301
+ const_cast<void*>(GetPrimitiveValues(*arr)), /*flags=*/0, nullptr);
302
+ PyArrayObject* np_arr = reinterpret_cast<PyArrayObject*>(result);
303
+ if (np_arr == nullptr) {
304
+ // Error occurred, trust that error set
305
+ return Status::OK();
306
+ }
307
+
308
+ PyObject* base;
309
+ if (py_ref == nullptr) {
310
+ // Capsule will be owned by the ndarray, no incref necessary. See
311
+ // ARROW-1973
312
+ RETURN_NOT_OK(CapsulizeArray(arr, &base));
313
+ } else {
314
+ Py_INCREF(py_ref);
315
+ base = py_ref;
316
+ }
317
+ RETURN_NOT_OK(SetNdarrayBase(np_arr, base));
318
+
319
+ // Do not allow Arrow data to be mutated
320
+ PyArray_CLEARFLAGS(np_arr, NPY_ARRAY_WRITEABLE);
321
+ *out = result;
322
+ return Status::OK();
323
+ }
324
+
325
+ class PandasWriter {
326
+ public:
327
+ enum type {
328
+ OBJECT,
329
+ UINT8,
330
+ INT8,
331
+ UINT16,
332
+ INT16,
333
+ UINT32,
334
+ INT32,
335
+ UINT64,
336
+ INT64,
337
+ HALF_FLOAT,
338
+ FLOAT,
339
+ DOUBLE,
340
+ BOOL,
341
+ DATETIME_DAY,
342
+ DATETIME_SECOND,
343
+ DATETIME_MILLI,
344
+ DATETIME_MICRO,
345
+ DATETIME_NANO,
346
+ DATETIME_SECOND_TZ,
347
+ DATETIME_MILLI_TZ,
348
+ DATETIME_MICRO_TZ,
349
+ DATETIME_NANO_TZ,
350
+ TIMEDELTA_SECOND,
351
+ TIMEDELTA_MILLI,
352
+ TIMEDELTA_MICRO,
353
+ TIMEDELTA_NANO,
354
+ CATEGORICAL,
355
+ EXTENSION
356
+ };
357
+
358
+ PandasWriter(const PandasOptions& options, int64_t num_rows, int num_columns)
359
+ : options_(options), num_rows_(num_rows), num_columns_(num_columns) {
360
+ PyAcquireGIL lock;
361
+ internal::InitPandasStaticData();
362
+ }
363
+ virtual ~PandasWriter() {}
364
+
365
+ void SetBlockData(PyObject* arr) {
366
+ block_arr_.reset(arr);
367
+ block_data_ =
368
+ reinterpret_cast<uint8_t*>(PyArray_DATA(reinterpret_cast<PyArrayObject*>(arr)));
369
+ }
370
+
371
+ /// \brief Either copy or wrap single array to create pandas-compatible array
372
+ /// for Series or DataFrame. num_columns_ can only be 1. Will try to zero
373
+ /// copy if possible (or error if not possible and zero_copy_only=True)
374
+ virtual Status TransferSingle(std::shared_ptr<ChunkedArray> data, PyObject* py_ref) = 0;
375
+
376
+ /// \brief Copy ChunkedArray into a multi-column block
377
+ virtual Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) = 0;
378
+
379
+ Status EnsurePlacementAllocated() {
380
+ std::lock_guard<std::mutex> guard(allocation_lock_);
381
+ if (placement_data_ != nullptr) {
382
+ return Status::OK();
383
+ }
384
+ PyAcquireGIL lock;
385
+ npy_intp placement_dims[1] = {num_columns_};
386
+ PyObject* placement_arr = PyArray_SimpleNew(1, placement_dims, NPY_INT64);
387
+ RETURN_IF_PYERROR();
388
+ placement_arr_.reset(placement_arr);
389
+ placement_data_ = reinterpret_cast<int64_t*>(
390
+ PyArray_DATA(reinterpret_cast<PyArrayObject*>(placement_arr)));
391
+ return Status::OK();
392
+ }
393
+
394
+ Status EnsureAllocated() {
395
+ std::lock_guard<std::mutex> guard(allocation_lock_);
396
+ if (block_data_ != nullptr) {
397
+ return Status::OK();
398
+ }
399
+ RETURN_NOT_OK(Allocate());
400
+ return Status::OK();
401
+ }
402
+
403
+ virtual bool CanZeroCopy(const ChunkedArray& data) const { return false; }
404
+
405
+ virtual Status Write(std::shared_ptr<ChunkedArray> data, int64_t abs_placement,
406
+ int64_t rel_placement) {
407
+ RETURN_NOT_OK(EnsurePlacementAllocated());
408
+ if (num_columns_ == 1 && options_.allow_zero_copy_blocks) {
409
+ RETURN_NOT_OK(TransferSingle(data, /*py_ref=*/nullptr));
410
+ } else {
411
+ RETURN_NOT_OK(
412
+ CheckNoZeroCopy("Cannot do zero copy conversion into "
413
+ "multi-column DataFrame block"));
414
+ RETURN_NOT_OK(EnsureAllocated());
415
+ RETURN_NOT_OK(CopyInto(data, rel_placement));
416
+ }
417
+ placement_data_[rel_placement] = abs_placement;
418
+ return Status::OK();
419
+ }
420
+
421
+ virtual Status GetDataFrameResult(PyObject** out) {
422
+ PyObject* result = PyDict_New();
423
+ RETURN_IF_PYERROR();
424
+
425
+ PyObject* block;
426
+ RETURN_NOT_OK(GetResultBlock(&block));
427
+
428
+ PyDict_SetItemString(result, "block", block);
429
+ PyDict_SetItemString(result, "placement", placement_arr_.obj());
430
+
431
+ RETURN_NOT_OK(AddResultMetadata(result));
432
+ *out = result;
433
+ return Status::OK();
434
+ }
435
+
436
+ // Caller steals the reference to this object
437
+ virtual Status GetSeriesResult(PyObject** out) {
438
+ RETURN_NOT_OK(MakeBlock1D());
439
+ // Caller owns the object now
440
+ *out = block_arr_.detach();
441
+ return Status::OK();
442
+ }
443
+
444
+ protected:
445
+ virtual Status AddResultMetadata(PyObject* result) { return Status::OK(); }
446
+
447
+ Status MakeBlock1D() {
448
+ // For Series or for certain DataFrame block types, we need to shape to a
449
+ // 1D array when there is only one column
450
+ PyAcquireGIL lock;
451
+
452
+ DCHECK_EQ(1, num_columns_);
453
+
454
+ npy_intp new_dims[1] = {static_cast<npy_intp>(num_rows_)};
455
+ PyArray_Dims dims;
456
+ dims.ptr = new_dims;
457
+ dims.len = 1;
458
+
459
+ PyObject* reshaped = PyArray_Newshape(
460
+ reinterpret_cast<PyArrayObject*>(block_arr_.obj()), &dims, NPY_ANYORDER);
461
+ RETURN_IF_PYERROR();
462
+
463
+ // ARROW-8801: Here a PyArrayObject is created that is not being managed by
464
+ // any OwnedRef object. This object is then put in the resulting object
465
+ // with PyDict_SetItemString, which increments the reference count, so a
466
+ // memory leak ensues. There are several ways to fix the memory leak but a
467
+ // simple one is to put the reshaped 1D block array in this OwnedRefNoGIL
468
+ // so it will be correctly decref'd when this class is destructed.
469
+ block_arr_.reset(reshaped);
470
+ return Status::OK();
471
+ }
472
+
473
+ virtual Status GetResultBlock(PyObject** out) {
474
+ *out = block_arr_.obj();
475
+ return Status::OK();
476
+ }
477
+
478
+ Status CheckNoZeroCopy(const std::string& message) {
479
+ if (options_.zero_copy_only) {
480
+ return Status::Invalid(message);
481
+ }
482
+ return Status::OK();
483
+ }
484
+
485
+ Status CheckNotZeroCopyOnly(const ChunkedArray& data) {
486
+ if (options_.zero_copy_only) {
487
+ return Status::Invalid("Needed to copy ", data.num_chunks(), " chunks with ",
488
+ data.null_count(), " nulls, but zero_copy_only was True");
489
+ }
490
+ return Status::OK();
491
+ }
492
+
493
+ virtual Status Allocate() {
494
+ return Status::NotImplemented("Override Allocate in subclasses");
495
+ }
496
+
497
+ Status AllocateNDArray(int npy_type, int ndim = 2) {
498
+ PyAcquireGIL lock;
499
+
500
+ PyObject* block_arr = nullptr;
501
+ npy_intp block_dims[2] = {0, 0};
502
+
503
+ if (ndim == 2) {
504
+ block_dims[0] = num_columns_;
505
+ block_dims[1] = num_rows_;
506
+ } else {
507
+ block_dims[0] = num_rows_;
508
+ }
509
+ PyArray_Descr* descr = internal::GetSafeNumPyDtype(npy_type);
510
+ if (PyDataType_REFCHK(descr)) {
511
+ // ARROW-6876: if the array has refcounted items, let Numpy
512
+ // own the array memory so as to decref elements on array destruction
513
+ block_arr = PyArray_SimpleNewFromDescr(ndim, block_dims, descr);
514
+ RETURN_IF_PYERROR();
515
+ } else {
516
+ RETURN_NOT_OK(
517
+ PyArray_NewFromPool(ndim, block_dims, descr, options_.pool, &block_arr));
518
+ }
519
+
520
+ SetBlockData(block_arr);
521
+ return Status::OK();
522
+ }
523
+
524
+ void SetDatetimeUnit(NPY_DATETIMEUNIT unit) {
525
+ PyAcquireGIL lock;
526
+ auto date_dtype = reinterpret_cast<PyArray_DatetimeDTypeMetaData*>(
527
+ PyArray_DESCR(reinterpret_cast<PyArrayObject*>(block_arr_.obj()))->c_metadata);
528
+ date_dtype->meta.base = unit;
529
+ }
530
+
531
+ PandasOptions options_;
532
+
533
+ std::mutex allocation_lock_;
534
+
535
+ int64_t num_rows_;
536
+ int num_columns_;
537
+
538
+ OwnedRefNoGIL block_arr_;
539
+ uint8_t* block_data_ = nullptr;
540
+
541
+ // ndarray<int32>
542
+ OwnedRefNoGIL placement_arr_;
543
+ int64_t* placement_data_ = nullptr;
544
+
545
+ private:
546
+ ARROW_DISALLOW_COPY_AND_ASSIGN(PandasWriter);
547
+ };
548
+
549
+ template <typename InType, typename OutType>
550
+ inline void ConvertIntegerWithNulls(const PandasOptions& options,
551
+ const ChunkedArray& data, OutType* out_values) {
552
+ for (int c = 0; c < data.num_chunks(); c++) {
553
+ const auto& arr = *data.chunk(c);
554
+ const InType* in_values = GetPrimitiveValues<InType>(arr);
555
+ // Upcast to double, set NaN as appropriate
556
+
557
+ for (int i = 0; i < arr.length(); ++i) {
558
+ *out_values++ =
559
+ arr.IsNull(i) ? static_cast<OutType>(NAN) : static_cast<OutType>(in_values[i]);
560
+ }
561
+ }
562
+ }
563
+
564
+ template <typename T>
565
+ inline void ConvertIntegerNoNullsSameType(const PandasOptions& options,
566
+ const ChunkedArray& data, T* out_values) {
567
+ for (int c = 0; c < data.num_chunks(); c++) {
568
+ const auto& arr = *data.chunk(c);
569
+ if (arr.length() > 0) {
570
+ const T* in_values = GetPrimitiveValues<T>(arr);
571
+ memcpy(out_values, in_values, sizeof(T) * arr.length());
572
+ out_values += arr.length();
573
+ }
574
+ }
575
+ }
576
+
577
+ template <typename InType, typename OutType>
578
+ inline void ConvertIntegerNoNullsCast(const PandasOptions& options,
579
+ const ChunkedArray& data, OutType* out_values) {
580
+ for (int c = 0; c < data.num_chunks(); c++) {
581
+ const auto& arr = *data.chunk(c);
582
+ const InType* in_values = GetPrimitiveValues<InType>(arr);
583
+ for (int64_t i = 0; i < arr.length(); ++i) {
584
+ *out_values = in_values[i];
585
+ }
586
+ }
587
+ }
588
+
589
+ template <typename T, typename Enable = void>
590
+ struct MemoizationTraits {
591
+ using Scalar = typename T::c_type;
592
+ };
593
+
594
+ template <typename T>
595
+ struct MemoizationTraits<T, enable_if_has_string_view<T>> {
596
+ // For binary, we memoize string_view as a scalar value to avoid having to
597
+ // unnecessarily copy the memory into the memo table data structure
598
+ using Scalar = std::string_view;
599
+ };
600
+
601
+ // Generic Array -> PyObject** converter that handles object deduplication, if
602
+ // requested
603
+ template <typename Type, typename WrapFunction>
604
+ inline Status ConvertAsPyObjects(const PandasOptions& options, const ChunkedArray& data,
605
+ WrapFunction&& wrap_func, PyObject** out_values) {
606
+ using ArrayType = typename TypeTraits<Type>::ArrayType;
607
+ using Scalar = typename MemoizationTraits<Type>::Scalar;
608
+
609
+ ::arrow::internal::ScalarMemoTable<Scalar> memo_table(options.pool);
610
+ std::vector<PyObject*> unique_values;
611
+ int32_t memo_size = 0;
612
+
613
+ auto WrapMemoized = [&](const Scalar& value, PyObject** out_values) {
614
+ int32_t memo_index;
615
+ RETURN_NOT_OK(memo_table.GetOrInsert(value, &memo_index));
616
+ if (memo_index == memo_size) {
617
+ // New entry
618
+ RETURN_NOT_OK(wrap_func(value, out_values));
619
+ unique_values.push_back(*out_values);
620
+ ++memo_size;
621
+ } else {
622
+ // Duplicate entry
623
+ Py_INCREF(unique_values[memo_index]);
624
+ *out_values = unique_values[memo_index];
625
+ }
626
+ return Status::OK();
627
+ };
628
+
629
+ auto WrapUnmemoized = [&](const Scalar& value, PyObject** out_values) {
630
+ return wrap_func(value, out_values);
631
+ };
632
+
633
+ for (int c = 0; c < data.num_chunks(); c++) {
634
+ const auto& arr = arrow::internal::checked_cast<const ArrayType&>(*data.chunk(c));
635
+ if (options.deduplicate_objects) {
636
+ RETURN_NOT_OK(internal::WriteArrayObjects(arr, WrapMemoized, out_values));
637
+ } else {
638
+ RETURN_NOT_OK(internal::WriteArrayObjects(arr, WrapUnmemoized, out_values));
639
+ }
640
+ out_values += arr.length();
641
+ }
642
+ return Status::OK();
643
+ }
644
+
645
+ Status ConvertStruct(PandasOptions options, const ChunkedArray& data,
646
+ PyObject** out_values) {
647
+ if (data.num_chunks() == 0) {
648
+ return Status::OK();
649
+ }
650
+ // ChunkedArray has at least one chunk
651
+ auto arr = checked_cast<const StructArray*>(data.chunk(0).get());
652
+ // Use it to cache the struct type and number of fields for all chunks
653
+ int32_t num_fields = arr->num_fields();
654
+ auto array_type = arr->type();
655
+ std::vector<OwnedRef> fields_data(num_fields * data.num_chunks());
656
+ OwnedRef dict_item;
657
+
658
+ // See notes in MakeInnerOptions.
659
+ options = MakeInnerOptions(std::move(options));
660
+ // Don't blindly convert because timestamps in lists are handled differently.
661
+ options.timestamp_as_object = true;
662
+
663
+ for (int c = 0; c < data.num_chunks(); c++) {
664
+ auto fields_data_offset = c * num_fields;
665
+ auto arr = checked_cast<const StructArray*>(data.chunk(c).get());
666
+ // Convert the struct arrays first
667
+ for (int32_t i = 0; i < num_fields; i++) {
668
+ auto field = arr->field(static_cast<int>(i));
669
+ // In case the field is an extension array, use .storage() to convert to Pandas
670
+ if (field->type()->id() == Type::EXTENSION) {
671
+ const ExtensionArray& arr_ext = checked_cast<const ExtensionArray&>(*field);
672
+ field = arr_ext.storage();
673
+ }
674
+ RETURN_NOT_OK(ConvertArrayToPandas(options, field, nullptr,
675
+ fields_data[i + fields_data_offset].ref()));
676
+ DCHECK(PyArray_Check(fields_data[i + fields_data_offset].obj()));
677
+ }
678
+
679
+ // Construct a dictionary for each row
680
+ const bool has_nulls = data.null_count() > 0;
681
+ for (int64_t i = 0; i < arr->length(); ++i) {
682
+ if (has_nulls && arr->IsNull(i)) {
683
+ Py_INCREF(Py_None);
684
+ *out_values = Py_None;
685
+ } else {
686
+ // Build the new dict object for the row
687
+ dict_item.reset(PyDict_New());
688
+ RETURN_IF_PYERROR();
689
+ for (int32_t field_idx = 0; field_idx < num_fields; ++field_idx) {
690
+ OwnedRef field_value;
691
+ auto name = array_type->field(static_cast<int>(field_idx))->name();
692
+ if (!arr->field(static_cast<int>(field_idx))->IsNull(i)) {
693
+ // Value exists in child array, obtain it
694
+ auto array = reinterpret_cast<PyArrayObject*>(
695
+ fields_data[field_idx + fields_data_offset].obj());
696
+ auto ptr = reinterpret_cast<const char*>(PyArray_GETPTR1(array, i));
697
+ field_value.reset(PyArray_GETITEM(array, ptr));
698
+ RETURN_IF_PYERROR();
699
+ } else {
700
+ // Translate the Null to a None
701
+ Py_INCREF(Py_None);
702
+ field_value.reset(Py_None);
703
+ }
704
+ // PyDict_SetItemString increments reference count
705
+ auto setitem_result =
706
+ PyDict_SetItemString(dict_item.obj(), name.c_str(), field_value.obj());
707
+ RETURN_IF_PYERROR();
708
+ DCHECK_EQ(setitem_result, 0);
709
+ }
710
+ *out_values = dict_item.obj();
711
+ // Grant ownership to the resulting array
712
+ Py_INCREF(*out_values);
713
+ }
714
+ ++out_values;
715
+ }
716
+ }
717
+ return Status::OK();
718
+ }
719
+
720
+ Status DecodeDictionaries(MemoryPool* pool, const std::shared_ptr<DataType>& dense_type,
721
+ ArrayVector* arrays) {
722
+ compute::ExecContext ctx(pool);
723
+ compute::CastOptions options;
724
+ for (size_t i = 0; i < arrays->size(); ++i) {
725
+ ARROW_ASSIGN_OR_RAISE((*arrays)[i],
726
+ compute::Cast(*(*arrays)[i], dense_type, options, &ctx));
727
+ }
728
+ return Status::OK();
729
+ }
730
+
731
+ Status DecodeDictionaries(MemoryPool* pool, const std::shared_ptr<DataType>& dense_type,
732
+ std::shared_ptr<ChunkedArray>* array) {
733
+ auto chunks = (*array)->chunks();
734
+ RETURN_NOT_OK(DecodeDictionaries(pool, dense_type, &chunks));
735
+ *array = std::make_shared<ChunkedArray>(std::move(chunks), dense_type);
736
+ return Status::OK();
737
+ }
738
+
739
+ template <typename ListArrayT>
740
+ Status ConvertListsLike(PandasOptions options, const ChunkedArray& data,
741
+ PyObject** out_values) {
742
+ // Get column of underlying value arrays
743
+ ArrayVector value_arrays;
744
+ for (int c = 0; c < data.num_chunks(); c++) {
745
+ const auto& arr = checked_cast<const ListArrayT&>(*data.chunk(c));
746
+ // values() does not account for offsets, so we need to slice into it.
747
+ // We can't use Flatten(), because it removes the values behind a null list
748
+ // value, and that makes the offsets into original list values and our
749
+ // flattened_values array different.
750
+ std::shared_ptr<Array> flattened_values = arr.values()->Slice(
751
+ arr.value_offset(0), arr.value_offset(arr.length()) - arr.value_offset(0));
752
+ if (arr.value_type()->id() == Type::EXTENSION) {
753
+ const auto& arr_ext = checked_cast<const ExtensionArray&>(*flattened_values);
754
+ value_arrays.emplace_back(arr_ext.storage());
755
+ } else {
756
+ value_arrays.emplace_back(flattened_values);
757
+ }
758
+ }
759
+
760
+ using ListArrayType = typename ListArrayT::TypeClass;
761
+ const auto& list_type = checked_cast<const ListArrayType&>(*data.type());
762
+ auto value_type = list_type.value_type();
763
+ if (value_type->id() == Type::EXTENSION) {
764
+ value_type = checked_cast<const ExtensionType&>(*value_type).storage_type();
765
+ }
766
+
767
+ auto flat_column = std::make_shared<ChunkedArray>(value_arrays, value_type);
768
+
769
+ options = MakeInnerOptions(std::move(options));
770
+
771
+ OwnedRefNoGIL owned_numpy_array;
772
+ RETURN_NOT_OK(ConvertChunkedArrayToPandas(options, flat_column, nullptr,
773
+ owned_numpy_array.ref()));
774
+ PyObject* numpy_array = owned_numpy_array.obj();
775
+ DCHECK(PyArray_Check(numpy_array));
776
+
777
+ int64_t chunk_offset = 0;
778
+ for (int c = 0; c < data.num_chunks(); c++) {
779
+ const auto& arr = checked_cast<const ListArrayT&>(*data.chunk(c));
780
+ const bool has_nulls = data.null_count() > 0;
781
+ for (int64_t i = 0; i < arr.length(); ++i) {
782
+ if (has_nulls && arr.IsNull(i)) {
783
+ Py_INCREF(Py_None);
784
+ *out_values = Py_None;
785
+ } else {
786
+ // Need to subtract value_offset(0) since the original chunk might be a slice
787
+ // into another array.
788
+ OwnedRef start(PyLong_FromLongLong(arr.value_offset(i) + chunk_offset -
789
+ arr.value_offset(0)));
790
+ OwnedRef end(PyLong_FromLongLong(arr.value_offset(i + 1) + chunk_offset -
791
+ arr.value_offset(0)));
792
+ OwnedRef slice(PySlice_New(start.obj(), end.obj(), nullptr));
793
+
794
+ if (ARROW_PREDICT_FALSE(slice.obj() == nullptr)) {
795
+ // Fall out of loop, will return from RETURN_IF_PYERROR
796
+ break;
797
+ }
798
+ *out_values = PyObject_GetItem(numpy_array, slice.obj());
799
+
800
+ if (*out_values == nullptr) {
801
+ // Fall out of loop, will return from RETURN_IF_PYERROR
802
+ break;
803
+ }
804
+ }
805
+ ++out_values;
806
+ }
807
+ RETURN_IF_PYERROR();
808
+
809
+ chunk_offset += arr.value_offset(arr.length()) - arr.value_offset(0);
810
+ }
811
+
812
+ return Status::OK();
813
+ }
814
+
815
+ template <typename F1, typename F2, typename F3>
816
+ Status ConvertMapHelper(F1 resetRow, F2 addPairToRow, F3 stealRow,
817
+ const ChunkedArray& data, PyArrayObject* py_keys,
818
+ PyArrayObject* py_items,
819
+ // needed for null checks in items
820
+ const std::vector<std::shared_ptr<Array>> item_arrays,
821
+ PyObject** out_values) {
822
+ OwnedRef key_value;
823
+ OwnedRef item_value;
824
+
825
+ int64_t chunk_offset = 0;
826
+ for (int c = 0; c < data.num_chunks(); ++c) {
827
+ const auto& arr = checked_cast<const MapArray&>(*data.chunk(c));
828
+ const bool has_nulls = data.null_count() > 0;
829
+
830
+ // Make a list of key/item pairs for each row in array
831
+ for (int64_t i = 0; i < arr.length(); ++i) {
832
+ if (has_nulls && arr.IsNull(i)) {
833
+ Py_INCREF(Py_None);
834
+ *out_values = Py_None;
835
+ } else {
836
+ int64_t entry_offset = arr.value_offset(i);
837
+ int64_t num_pairs = arr.value_offset(i + 1) - entry_offset;
838
+
839
+ // Build the new list object for the row of Python pairs
840
+ RETURN_NOT_OK(resetRow(num_pairs));
841
+
842
+ // Add each key/item pair in the row
843
+ for (int64_t j = 0; j < num_pairs; ++j) {
844
+ // Get key value, key is non-nullable for a valid row
845
+ auto ptr_key = reinterpret_cast<const char*>(
846
+ PyArray_GETPTR1(py_keys, chunk_offset + entry_offset + j));
847
+ key_value.reset(PyArray_GETITEM(py_keys, ptr_key));
848
+ RETURN_IF_PYERROR();
849
+
850
+ if (item_arrays[c]->IsNull(entry_offset + j)) {
851
+ // Translate the Null to a None
852
+ Py_INCREF(Py_None);
853
+ item_value.reset(Py_None);
854
+ } else {
855
+ // Get valid value from item array
856
+ auto ptr_item = reinterpret_cast<const char*>(
857
+ PyArray_GETPTR1(py_items, chunk_offset + entry_offset + j));
858
+ item_value.reset(PyArray_GETITEM(py_items, ptr_item));
859
+ RETURN_IF_PYERROR();
860
+ }
861
+
862
+ // Add the key/item pair to the row
863
+ RETURN_NOT_OK(addPairToRow(j, key_value, item_value));
864
+ }
865
+
866
+ // Pass ownership to the resulting array
867
+ *out_values = stealRow();
868
+ }
869
+ ++out_values;
870
+ }
871
+ RETURN_IF_PYERROR();
872
+
873
+ chunk_offset += arr.values()->length();
874
+ }
875
+
876
+ return Status::OK();
877
+ }
878
+
879
+ // A more helpful error message around TypeErrors that may stem from unhashable keys
880
+ Status CheckMapAsPydictsTypeError() {
881
+ if (ARROW_PREDICT_TRUE(!PyErr_Occurred())) {
882
+ return Status::OK();
883
+ }
884
+ if (PyErr_ExceptionMatches(PyExc_TypeError)) {
885
+ // Modify the error string directly, so it is re-raised
886
+ // with our additional info.
887
+ //
888
+ // There are not many interesting things happening when this
889
+ // is hit. This is intended to only be called directly after
890
+ // PyDict_SetItem, where a finite set of errors could occur.
891
+ PyObject *type, *value, *traceback;
892
+ PyErr_Fetch(&type, &value, &traceback);
893
+ std::string message;
894
+ RETURN_NOT_OK(internal::PyObject_StdStringStr(value, &message));
895
+ message +=
896
+ ". If keys are not hashable, then you must use the option "
897
+ "[maps_as_pydicts=None (default)]";
898
+
899
+ // resets the error
900
+ PyErr_SetString(PyExc_TypeError, message.c_str());
901
+ }
902
+ return ConvertPyError();
903
+ }
904
+
905
+ Status CheckForDuplicateKeys(bool error_on_duplicate_keys, Py_ssize_t total_dict_len,
906
+ Py_ssize_t total_raw_len) {
907
+ if (total_dict_len < total_raw_len) {
908
+ const char* message =
909
+ "[maps_as_pydicts] "
910
+ "After conversion of Arrow maps to pydicts, "
911
+ "detected data loss due to duplicate keys. "
912
+ "Original input length is [%lld], total converted pydict length is [%lld].";
913
+ std::array<char, 256> buf;
914
+ std::snprintf(buf.data(), buf.size(), message, total_raw_len, total_dict_len);
915
+
916
+ if (error_on_duplicate_keys) {
917
+ return Status::UnknownError(buf.data());
918
+ } else {
919
+ ARROW_LOG(WARNING) << buf.data();
920
+ }
921
+ }
922
+ return Status::OK();
923
+ }
924
+
925
+ Status ConvertMap(PandasOptions options, const ChunkedArray& data,
926
+ PyObject** out_values) {
927
+ // Get columns of underlying key/item arrays
928
+ std::vector<std::shared_ptr<Array>> key_arrays;
929
+ std::vector<std::shared_ptr<Array>> item_arrays;
930
+ for (int c = 0; c < data.num_chunks(); ++c) {
931
+ const auto& map_arr = checked_cast<const MapArray&>(*data.chunk(c));
932
+ key_arrays.emplace_back(map_arr.keys());
933
+ item_arrays.emplace_back(map_arr.items());
934
+ }
935
+
936
+ const auto& map_type = checked_cast<const MapType&>(*data.type());
937
+ auto key_type = map_type.key_type();
938
+ auto item_type = map_type.item_type();
939
+
940
+ // ARROW-6899: Convert dictionary-encoded children to dense instead of
941
+ // failing below. A more efficient conversion than this could be done later
942
+ if (key_type->id() == Type::DICTIONARY) {
943
+ auto dense_type = checked_cast<const DictionaryType&>(*key_type).value_type();
944
+ RETURN_NOT_OK(DecodeDictionaries(options.pool, dense_type, &key_arrays));
945
+ key_type = dense_type;
946
+ }
947
+ if (item_type->id() == Type::DICTIONARY) {
948
+ auto dense_type = checked_cast<const DictionaryType&>(*item_type).value_type();
949
+ RETURN_NOT_OK(DecodeDictionaries(options.pool, dense_type, &item_arrays));
950
+ item_type = dense_type;
951
+ }
952
+
953
+ // See notes in MakeInnerOptions.
954
+ options = MakeInnerOptions(std::move(options));
955
+ // Don't blindly convert because timestamps in lists are handled differently.
956
+ options.timestamp_as_object = true;
957
+
958
+ auto flat_keys = std::make_shared<ChunkedArray>(key_arrays, key_type);
959
+ auto flat_items = std::make_shared<ChunkedArray>(item_arrays, item_type);
960
+ OwnedRefNoGIL owned_numpy_keys;
961
+ RETURN_NOT_OK(
962
+ ConvertChunkedArrayToPandas(options, flat_keys, nullptr, owned_numpy_keys.ref()));
963
+ OwnedRefNoGIL owned_numpy_items;
964
+ RETURN_NOT_OK(
965
+ ConvertChunkedArrayToPandas(options, flat_items, nullptr, owned_numpy_items.ref()));
966
+ PyArrayObject* py_keys = reinterpret_cast<PyArrayObject*>(owned_numpy_keys.obj());
967
+ PyArrayObject* py_items = reinterpret_cast<PyArrayObject*>(owned_numpy_items.obj());
968
+
969
+ if (options.maps_as_pydicts == MapConversionType::DEFAULT) {
970
+ // The default behavior to express an Arrow MAP as a list of [(key, value), ...] pairs
971
+ OwnedRef list_item;
972
+ return ConvertMapHelper(
973
+ [&list_item](int64_t num_pairs) {
974
+ list_item.reset(PyList_New(num_pairs));
975
+ return CheckPyError();
976
+ },
977
+ [&list_item](int64_t idx, OwnedRef& key_value, OwnedRef& item_value) {
978
+ PyList_SET_ITEM(list_item.obj(), idx,
979
+ PyTuple_Pack(2, key_value.obj(), item_value.obj()));
980
+ return CheckPyError();
981
+ },
982
+ [&list_item] { return list_item.detach(); }, data, py_keys, py_items, item_arrays,
983
+ out_values);
984
+ } else {
985
+ // Use a native pydict
986
+ OwnedRef dict_item;
987
+ Py_ssize_t total_dict_len{0};
988
+ Py_ssize_t total_raw_len{0};
989
+
990
+ bool error_on_duplicate_keys;
991
+ if (options.maps_as_pydicts == MapConversionType::LOSSY) {
992
+ error_on_duplicate_keys = false;
993
+ } else if (options.maps_as_pydicts == MapConversionType::STRICT_) {
994
+ error_on_duplicate_keys = true;
995
+ } else {
996
+ auto val = std::underlying_type_t<MapConversionType>(options.maps_as_pydicts);
997
+ return Status::UnknownError("Received unknown option for maps_as_pydicts: " +
998
+ std::to_string(val));
999
+ }
1000
+
1001
+ auto status = ConvertMapHelper(
1002
+ [&dict_item, &total_raw_len](int64_t num_pairs) {
1003
+ total_raw_len += num_pairs;
1004
+ dict_item.reset(PyDict_New());
1005
+ return CheckPyError();
1006
+ },
1007
+ [&dict_item]([[maybe_unused]] int64_t idx, OwnedRef& key_value,
1008
+ OwnedRef& item_value) {
1009
+ auto setitem_result =
1010
+ PyDict_SetItem(dict_item.obj(), key_value.obj(), item_value.obj());
1011
+ ARROW_RETURN_NOT_OK(CheckMapAsPydictsTypeError());
1012
+ // returns -1 if there are internal errors around hashing/resizing
1013
+ return setitem_result == 0 ? Status::OK()
1014
+ : Status::UnknownError(
1015
+ "[maps_as_pydicts] "
1016
+ "Unexpected failure inserting Arrow (key, "
1017
+ "value) pair into Python dict");
1018
+ },
1019
+ [&dict_item, &total_dict_len] {
1020
+ total_dict_len += PyDict_Size(dict_item.obj());
1021
+ return dict_item.detach();
1022
+ },
1023
+ data, py_keys, py_items, item_arrays, out_values);
1024
+
1025
+ ARROW_RETURN_NOT_OK(status);
1026
+ // If there were no errors generating the pydicts,
1027
+ // then check if we detected any data loss from duplicate keys.
1028
+ return CheckForDuplicateKeys(error_on_duplicate_keys, total_dict_len, total_raw_len);
1029
+ }
1030
+ }
1031
+
1032
+ template <typename InType, typename OutType>
1033
+ inline void ConvertNumericNullable(const ChunkedArray& data, InType na_value,
1034
+ OutType* out_values) {
1035
+ for (int c = 0; c < data.num_chunks(); c++) {
1036
+ const auto& arr = *data.chunk(c);
1037
+ const InType* in_values = GetPrimitiveValues<InType>(arr);
1038
+
1039
+ if (arr.null_count() > 0) {
1040
+ for (int64_t i = 0; i < arr.length(); ++i) {
1041
+ *out_values++ = arr.IsNull(i) ? na_value : in_values[i];
1042
+ }
1043
+ } else {
1044
+ memcpy(out_values, in_values, sizeof(InType) * arr.length());
1045
+ out_values += arr.length();
1046
+ }
1047
+ }
1048
+ }
1049
+
1050
+ template <typename InType, typename OutType>
1051
+ inline void ConvertNumericNullableCast(const ChunkedArray& data, InType na_value,
1052
+ OutType* out_values) {
1053
+ for (int c = 0; c < data.num_chunks(); c++) {
1054
+ const auto& arr = *data.chunk(c);
1055
+ const InType* in_values = GetPrimitiveValues<InType>(arr);
1056
+
1057
+ for (int64_t i = 0; i < arr.length(); ++i) {
1058
+ *out_values++ = arr.IsNull(i) ? static_cast<OutType>(na_value)
1059
+ : static_cast<OutType>(in_values[i]);
1060
+ }
1061
+ }
1062
+ }
1063
+
1064
+ template <int NPY_TYPE>
1065
+ class TypedPandasWriter : public PandasWriter {
1066
+ public:
1067
+ using T = typename npy_traits<NPY_TYPE>::value_type;
1068
+
1069
+ using PandasWriter::PandasWriter;
1070
+
1071
+ Status TransferSingle(std::shared_ptr<ChunkedArray> data, PyObject* py_ref) override {
1072
+ if (CanZeroCopy(*data)) {
1073
+ PyObject* wrapped;
1074
+ npy_intp dims[2] = {static_cast<npy_intp>(num_columns_),
1075
+ static_cast<npy_intp>(num_rows_)};
1076
+ RETURN_NOT_OK(
1077
+ MakeNumPyView(data->chunk(0), py_ref, NPY_TYPE, /*ndim=*/2, dims, &wrapped));
1078
+ SetBlockData(wrapped);
1079
+ return Status::OK();
1080
+ } else {
1081
+ RETURN_NOT_OK(CheckNotZeroCopyOnly(*data));
1082
+ RETURN_NOT_OK(EnsureAllocated());
1083
+ return CopyInto(data, /*rel_placement=*/0);
1084
+ }
1085
+ }
1086
+
1087
+ Status CheckTypeExact(const DataType& type, Type::type expected) {
1088
+ if (type.id() != expected) {
1089
+ // TODO(wesm): stringify NumPy / pandas type
1090
+ return Status::NotImplemented("Cannot write Arrow data of type ", type.ToString());
1091
+ }
1092
+ return Status::OK();
1093
+ }
1094
+
1095
+ T* GetBlockColumnStart(int64_t rel_placement) {
1096
+ return reinterpret_cast<T*>(block_data_) + rel_placement * num_rows_;
1097
+ }
1098
+
1099
+ protected:
1100
+ Status Allocate() override { return AllocateNDArray(NPY_TYPE); }
1101
+ };
1102
+
1103
+ struct ObjectWriterVisitor {
1104
+ const PandasOptions& options;
1105
+ const ChunkedArray& data;
1106
+ PyObject** out_values;
1107
+
1108
+ Status Visit(const NullType& type) {
1109
+ for (int c = 0; c < data.num_chunks(); c++) {
1110
+ std::shared_ptr<Array> arr = data.chunk(c);
1111
+
1112
+ for (int64_t i = 0; i < arr->length(); ++i) {
1113
+ // All values are null
1114
+ Py_INCREF(Py_None);
1115
+ *out_values = Py_None;
1116
+ ++out_values;
1117
+ }
1118
+ }
1119
+ return Status::OK();
1120
+ }
1121
+
1122
+ Status Visit(const BooleanType& type) {
1123
+ for (int c = 0; c < data.num_chunks(); c++) {
1124
+ const auto& arr = checked_cast<const BooleanArray&>(*data.chunk(c));
1125
+
1126
+ for (int64_t i = 0; i < arr.length(); ++i) {
1127
+ if (arr.IsNull(i)) {
1128
+ Py_INCREF(Py_None);
1129
+ *out_values++ = Py_None;
1130
+ } else if (arr.Value(i)) {
1131
+ // True
1132
+ Py_INCREF(Py_True);
1133
+ *out_values++ = Py_True;
1134
+ } else {
1135
+ // False
1136
+ Py_INCREF(Py_False);
1137
+ *out_values++ = Py_False;
1138
+ }
1139
+ }
1140
+ }
1141
+ return Status::OK();
1142
+ }
1143
+
1144
+ template <typename Type>
1145
+ enable_if_integer<Type, Status> Visit(const Type& type) {
1146
+ using T = typename Type::c_type;
1147
+ auto WrapValue = [](T value, PyObject** out) {
1148
+ *out = std::is_signed<T>::value ? PyLong_FromLongLong(value)
1149
+ : PyLong_FromUnsignedLongLong(value);
1150
+ RETURN_IF_PYERROR();
1151
+ return Status::OK();
1152
+ };
1153
+ return ConvertAsPyObjects<Type>(options, data, WrapValue, out_values);
1154
+ }
1155
+
1156
+ template <typename Type>
1157
+ enable_if_t<is_base_binary_type<Type>::value || is_fixed_size_binary_type<Type>::value,
1158
+ Status>
1159
+ Visit(const Type& type) {
1160
+ auto WrapValue = [](const std::string_view& view, PyObject** out) {
1161
+ *out = WrapBytes<Type>::Wrap(view.data(), view.length());
1162
+ if (*out == nullptr) {
1163
+ PyErr_Clear();
1164
+ return Status::UnknownError("Wrapping ", view, " failed");
1165
+ }
1166
+ return Status::OK();
1167
+ };
1168
+ return ConvertAsPyObjects<Type>(options, data, WrapValue, out_values);
1169
+ }
1170
+
1171
+ template <typename Type>
1172
+ enable_if_date<Type, Status> Visit(const Type& type) {
1173
+ auto WrapValue = [](typename Type::c_type value, PyObject** out) {
1174
+ RETURN_NOT_OK(internal::PyDate_from_int(value, Type::UNIT, out));
1175
+ RETURN_IF_PYERROR();
1176
+ return Status::OK();
1177
+ };
1178
+ return ConvertAsPyObjects<Type>(options, data, WrapValue, out_values);
1179
+ }
1180
+
1181
+ template <typename Type>
1182
+ enable_if_time<Type, Status> Visit(const Type& type) {
1183
+ const TimeUnit::type unit = type.unit();
1184
+ auto WrapValue = [unit](typename Type::c_type value, PyObject** out) {
1185
+ RETURN_NOT_OK(internal::PyTime_from_int(value, unit, out));
1186
+ RETURN_IF_PYERROR();
1187
+ return Status::OK();
1188
+ };
1189
+ return ConvertAsPyObjects<Type>(options, data, WrapValue, out_values);
1190
+ }
1191
+
1192
+ template <typename Type>
1193
+ enable_if_timestamp<Type, Status> Visit(const Type& type) {
1194
+ const TimeUnit::type unit = type.unit();
1195
+ OwnedRef tzinfo;
1196
+
1197
+ auto ConvertTimezoneNaive = [&](typename Type::c_type value, PyObject** out) {
1198
+ RETURN_NOT_OK(internal::PyDateTime_from_int(value, unit, out));
1199
+ RETURN_IF_PYERROR();
1200
+ return Status::OK();
1201
+ };
1202
+ auto ConvertTimezoneAware = [&](typename Type::c_type value, PyObject** out) {
1203
+ PyObject* naive_datetime;
1204
+ RETURN_NOT_OK(ConvertTimezoneNaive(value, &naive_datetime));
1205
+
1206
+ // convert the timezone naive datetime object to timezone aware
1207
+ // two step conversion of the datetime mimics Python's code:
1208
+ // dt.replace(tzinfo=datetime.timezone.utc).astimezone(tzinfo)
1209
+ // first step: replacing timezone with timezone.utc (replace method)
1210
+ OwnedRef args(PyTuple_New(0));
1211
+ OwnedRef keywords(PyDict_New());
1212
+ PyDict_SetItemString(keywords.obj(), "tzinfo", PyDateTime_TimeZone_UTC);
1213
+ OwnedRef naive_datetime_replace(PyObject_GetAttrString(naive_datetime, "replace"));
1214
+ OwnedRef datetime_utc(
1215
+ PyObject_Call(naive_datetime_replace.obj(), args.obj(), keywords.obj()));
1216
+ // second step: adjust the datetime to tzinfo timezone (astimezone method)
1217
+ *out = PyObject_CallMethod(datetime_utc.obj(), "astimezone", "O", tzinfo.obj());
1218
+
1219
+ // the timezone naive object is no longer required
1220
+ Py_DECREF(naive_datetime);
1221
+ RETURN_IF_PYERROR();
1222
+
1223
+ return Status::OK();
1224
+ };
1225
+
1226
+ if (!type.timezone().empty() && !options.ignore_timezone) {
1227
+ // convert timezone aware
1228
+ PyObject* tzobj;
1229
+ ARROW_ASSIGN_OR_RAISE(tzobj, internal::StringToTzinfo(type.timezone()));
1230
+ tzinfo.reset(tzobj);
1231
+ RETURN_IF_PYERROR();
1232
+ RETURN_NOT_OK(
1233
+ ConvertAsPyObjects<Type>(options, data, ConvertTimezoneAware, out_values));
1234
+ } else {
1235
+ // convert timezone naive
1236
+ RETURN_NOT_OK(
1237
+ ConvertAsPyObjects<Type>(options, data, ConvertTimezoneNaive, out_values));
1238
+ }
1239
+
1240
+ return Status::OK();
1241
+ }
1242
+
1243
+ template <typename Type>
1244
+ enable_if_t<std::is_same<Type, MonthDayNanoIntervalType>::value, Status> Visit(
1245
+ const Type& type) {
1246
+ OwnedRef args(PyTuple_New(0));
1247
+ OwnedRef kwargs(PyDict_New());
1248
+ RETURN_IF_PYERROR();
1249
+ auto to_date_offset = [&](const MonthDayNanoIntervalType::MonthDayNanos& interval,
1250
+ PyObject** out) {
1251
+ DCHECK(internal::BorrowPandasDataOffsetType() != nullptr);
1252
+ // DateOffset objects do not add nanoseconds component to pd.Timestamp.
1253
+ // as of Pandas 1.3.3
1254
+ // (https://github.com/pandas-dev/pandas/issues/43892).
1255
+ // So convert microseconds and remainder to preserve data
1256
+ // but give users more expected results.
1257
+ int64_t microseconds = interval.nanoseconds / 1000;
1258
+ int64_t nanoseconds;
1259
+ if (interval.nanoseconds >= 0) {
1260
+ nanoseconds = interval.nanoseconds % 1000;
1261
+ } else {
1262
+ nanoseconds = -((-interval.nanoseconds) % 1000);
1263
+ }
1264
+
1265
+ PyDict_SetItemString(kwargs.obj(), "months", PyLong_FromLong(interval.months));
1266
+ PyDict_SetItemString(kwargs.obj(), "days", PyLong_FromLong(interval.days));
1267
+ PyDict_SetItemString(kwargs.obj(), "microseconds",
1268
+ PyLong_FromLongLong(microseconds));
1269
+ PyDict_SetItemString(kwargs.obj(), "nanoseconds", PyLong_FromLongLong(nanoseconds));
1270
+ *out =
1271
+ PyObject_Call(internal::BorrowPandasDataOffsetType(), args.obj(), kwargs.obj());
1272
+ RETURN_IF_PYERROR();
1273
+ return Status::OK();
1274
+ };
1275
+ return ConvertAsPyObjects<MonthDayNanoIntervalType>(options, data, to_date_offset,
1276
+ out_values);
1277
+ }
1278
+
1279
+ Status Visit(const Decimal128Type& type) {
1280
+ OwnedRef decimal;
1281
+ OwnedRef Decimal;
1282
+ RETURN_NOT_OK(internal::ImportModule("decimal", &decimal));
1283
+ RETURN_NOT_OK(internal::ImportFromModule(decimal.obj(), "Decimal", &Decimal));
1284
+ PyObject* decimal_constructor = Decimal.obj();
1285
+
1286
+ for (int c = 0; c < data.num_chunks(); c++) {
1287
+ const auto& arr = checked_cast<const arrow::Decimal128Array&>(*data.chunk(c));
1288
+
1289
+ for (int64_t i = 0; i < arr.length(); ++i) {
1290
+ if (arr.IsNull(i)) {
1291
+ Py_INCREF(Py_None);
1292
+ *out_values++ = Py_None;
1293
+ } else {
1294
+ *out_values++ =
1295
+ internal::DecimalFromString(decimal_constructor, arr.FormatValue(i));
1296
+ RETURN_IF_PYERROR();
1297
+ }
1298
+ }
1299
+ }
1300
+
1301
+ return Status::OK();
1302
+ }
1303
+
1304
+ Status Visit(const Decimal256Type& type) {
1305
+ OwnedRef decimal;
1306
+ OwnedRef Decimal;
1307
+ RETURN_NOT_OK(internal::ImportModule("decimal", &decimal));
1308
+ RETURN_NOT_OK(internal::ImportFromModule(decimal.obj(), "Decimal", &Decimal));
1309
+ PyObject* decimal_constructor = Decimal.obj();
1310
+
1311
+ for (int c = 0; c < data.num_chunks(); c++) {
1312
+ const auto& arr = checked_cast<const arrow::Decimal256Array&>(*data.chunk(c));
1313
+
1314
+ for (int64_t i = 0; i < arr.length(); ++i) {
1315
+ if (arr.IsNull(i)) {
1316
+ Py_INCREF(Py_None);
1317
+ *out_values++ = Py_None;
1318
+ } else {
1319
+ *out_values++ =
1320
+ internal::DecimalFromString(decimal_constructor, arr.FormatValue(i));
1321
+ RETURN_IF_PYERROR();
1322
+ }
1323
+ }
1324
+ }
1325
+
1326
+ return Status::OK();
1327
+ }
1328
+
1329
+ template <typename T>
1330
+ enable_if_t<is_fixed_size_list_type<T>::value || is_var_length_list_type<T>::value,
1331
+ Status>
1332
+ Visit(const T& type) {
1333
+ using ArrayType = typename TypeTraits<T>::ArrayType;
1334
+ if (!ListTypeSupported(*type.value_type())) {
1335
+ return Status::NotImplemented(
1336
+ "Not implemented type for conversion from List to Pandas: ",
1337
+ type.value_type()->ToString());
1338
+ }
1339
+ return ConvertListsLike<ArrayType>(options, data, out_values);
1340
+ }
1341
+
1342
+ Status Visit(const MapType& type) { return ConvertMap(options, data, out_values); }
1343
+
1344
+ Status Visit(const StructType& type) {
1345
+ return ConvertStruct(options, data, out_values);
1346
+ }
1347
+
1348
+ template <typename Type>
1349
+ enable_if_t<is_floating_type<Type>::value ||
1350
+ std::is_same<DictionaryType, Type>::value ||
1351
+ std::is_same<DurationType, Type>::value ||
1352
+ std::is_same<RunEndEncodedType, Type>::value ||
1353
+ std::is_same<ListViewType, Type>::value ||
1354
+ std::is_same<LargeListViewType, Type>::value ||
1355
+ std::is_same<ExtensionType, Type>::value ||
1356
+ (std::is_base_of<IntervalType, Type>::value &&
1357
+ !std::is_same<MonthDayNanoIntervalType, Type>::value) ||
1358
+ std::is_base_of<UnionType, Type>::value ||
1359
+ std::is_base_of<BinaryViewType, Type>::value,
1360
+ Status>
1361
+ Visit(const Type& type) {
1362
+ return Status::NotImplemented("No implemented conversion to object dtype: ",
1363
+ type.ToString());
1364
+ }
1365
+ };
1366
+
1367
+ class ObjectWriter : public TypedPandasWriter<NPY_OBJECT> {
1368
+ public:
1369
+ using TypedPandasWriter<NPY_OBJECT>::TypedPandasWriter;
1370
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1371
+ PyAcquireGIL lock;
1372
+ ObjectWriterVisitor visitor{this->options_, *data,
1373
+ this->GetBlockColumnStart(rel_placement)};
1374
+ return VisitTypeInline(*data->type(), &visitor);
1375
+ }
1376
+ };
1377
+
1378
+ static inline bool IsNonNullContiguous(const ChunkedArray& data) {
1379
+ return data.num_chunks() == 1 && data.null_count() == 0;
1380
+ }
1381
+
1382
+ template <int NPY_TYPE>
1383
+ class IntWriter : public TypedPandasWriter<NPY_TYPE> {
1384
+ public:
1385
+ using ArrowType = typename npy_traits<NPY_TYPE>::TypeClass;
1386
+ using TypedPandasWriter<NPY_TYPE>::TypedPandasWriter;
1387
+
1388
+ bool CanZeroCopy(const ChunkedArray& data) const override {
1389
+ return IsNonNullContiguous(data);
1390
+ }
1391
+
1392
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1393
+ RETURN_NOT_OK(this->CheckTypeExact(*data->type(), ArrowType::type_id));
1394
+ ConvertIntegerNoNullsSameType<typename ArrowType::c_type>(
1395
+ this->options_, *data, this->GetBlockColumnStart(rel_placement));
1396
+ return Status::OK();
1397
+ }
1398
+ };
1399
+
1400
+ template <int NPY_TYPE>
1401
+ class FloatWriter : public TypedPandasWriter<NPY_TYPE> {
1402
+ public:
1403
+ using ArrowType = typename npy_traits<NPY_TYPE>::TypeClass;
1404
+ using TypedPandasWriter<NPY_TYPE>::TypedPandasWriter;
1405
+ using T = typename ArrowType::c_type;
1406
+
1407
+ bool CanZeroCopy(const ChunkedArray& data) const override {
1408
+ return IsNonNullContiguous(data) && data.type()->id() == ArrowType::type_id;
1409
+ }
1410
+
1411
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1412
+ Type::type in_type = data->type()->id();
1413
+ auto out_values = this->GetBlockColumnStart(rel_placement);
1414
+
1415
+ #define INTEGER_CASE(IN_TYPE) \
1416
+ ConvertIntegerWithNulls<IN_TYPE, T>(this->options_, *data, out_values); \
1417
+ break;
1418
+
1419
+ switch (in_type) {
1420
+ case Type::UINT8:
1421
+ INTEGER_CASE(uint8_t);
1422
+ case Type::INT8:
1423
+ INTEGER_CASE(int8_t);
1424
+ case Type::UINT16:
1425
+ INTEGER_CASE(uint16_t);
1426
+ case Type::INT16:
1427
+ INTEGER_CASE(int16_t);
1428
+ case Type::UINT32:
1429
+ INTEGER_CASE(uint32_t);
1430
+ case Type::INT32:
1431
+ INTEGER_CASE(int32_t);
1432
+ case Type::UINT64:
1433
+ INTEGER_CASE(uint64_t);
1434
+ case Type::INT64:
1435
+ INTEGER_CASE(int64_t);
1436
+ case Type::HALF_FLOAT:
1437
+ ConvertNumericNullableCast(*data, npy_traits<NPY_TYPE>::na_sentinel, out_values);
1438
+ case Type::FLOAT:
1439
+ ConvertNumericNullableCast(*data, npy_traits<NPY_TYPE>::na_sentinel, out_values);
1440
+ break;
1441
+ case Type::DOUBLE:
1442
+ ConvertNumericNullableCast(*data, npy_traits<NPY_TYPE>::na_sentinel, out_values);
1443
+ break;
1444
+ default:
1445
+ return Status::NotImplemented("Cannot write Arrow data of type ",
1446
+ data->type()->ToString(),
1447
+ " to a Pandas floating point block");
1448
+ }
1449
+
1450
+ #undef INTEGER_CASE
1451
+
1452
+ return Status::OK();
1453
+ }
1454
+ };
1455
+
1456
+ using UInt8Writer = IntWriter<NPY_UINT8>;
1457
+ using Int8Writer = IntWriter<NPY_INT8>;
1458
+ using UInt16Writer = IntWriter<NPY_UINT16>;
1459
+ using Int16Writer = IntWriter<NPY_INT16>;
1460
+ using UInt32Writer = IntWriter<NPY_UINT32>;
1461
+ using Int32Writer = IntWriter<NPY_INT32>;
1462
+ using UInt64Writer = IntWriter<NPY_UINT64>;
1463
+ using Int64Writer = IntWriter<NPY_INT64>;
1464
+ using Float16Writer = FloatWriter<NPY_FLOAT16>;
1465
+ using Float32Writer = FloatWriter<NPY_FLOAT32>;
1466
+ using Float64Writer = FloatWriter<NPY_FLOAT64>;
1467
+
1468
+ class BoolWriter : public TypedPandasWriter<NPY_BOOL> {
1469
+ public:
1470
+ using TypedPandasWriter<NPY_BOOL>::TypedPandasWriter;
1471
+
1472
+ Status TransferSingle(std::shared_ptr<ChunkedArray> data, PyObject* py_ref) override {
1473
+ RETURN_NOT_OK(
1474
+ CheckNoZeroCopy("Zero copy conversions not possible with "
1475
+ "boolean types"));
1476
+ RETURN_NOT_OK(EnsureAllocated());
1477
+ return CopyInto(data, /*rel_placement=*/0);
1478
+ }
1479
+
1480
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1481
+ RETURN_NOT_OK(this->CheckTypeExact(*data->type(), Type::BOOL));
1482
+ auto out_values = this->GetBlockColumnStart(rel_placement);
1483
+ for (int c = 0; c < data->num_chunks(); c++) {
1484
+ const auto& arr = checked_cast<const BooleanArray&>(*data->chunk(c));
1485
+ for (int64_t i = 0; i < arr.length(); ++i) {
1486
+ *out_values++ = static_cast<uint8_t>(arr.Value(i));
1487
+ }
1488
+ }
1489
+ return Status::OK();
1490
+ }
1491
+ };
1492
+
1493
+ // ----------------------------------------------------------------------
1494
+ // Date / timestamp types
1495
+
1496
+ template <typename T, int64_t SHIFT>
1497
+ inline void ConvertDatetime(const ChunkedArray& data, int64_t* out_values) {
1498
+ for (int c = 0; c < data.num_chunks(); c++) {
1499
+ const auto& arr = *data.chunk(c);
1500
+ const T* in_values = GetPrimitiveValues<T>(arr);
1501
+
1502
+ for (int64_t i = 0; i < arr.length(); ++i) {
1503
+ *out_values++ = arr.IsNull(i) ? kPandasTimestampNull
1504
+ : (static_cast<int64_t>(in_values[i]) * SHIFT);
1505
+ }
1506
+ }
1507
+ }
1508
+
1509
+ template <typename T, int SHIFT>
1510
+ void ConvertDatesShift(const ChunkedArray& data, int64_t* out_values) {
1511
+ for (int c = 0; c < data.num_chunks(); c++) {
1512
+ const auto& arr = *data.chunk(c);
1513
+ const T* in_values = GetPrimitiveValues<T>(arr);
1514
+ for (int64_t i = 0; i < arr.length(); ++i) {
1515
+ *out_values++ = arr.IsNull(i) ? kPandasTimestampNull
1516
+ : static_cast<int64_t>(in_values[i]) / SHIFT;
1517
+ }
1518
+ }
1519
+ }
1520
+
1521
+ class DatetimeDayWriter : public TypedPandasWriter<NPY_DATETIME> {
1522
+ public:
1523
+ using TypedPandasWriter<NPY_DATETIME>::TypedPandasWriter;
1524
+
1525
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1526
+ int64_t* out_values = this->GetBlockColumnStart(rel_placement);
1527
+ const auto& type = checked_cast<const DateType&>(*data->type());
1528
+ switch (type.unit()) {
1529
+ case DateUnit::DAY:
1530
+ ConvertDatesShift<int32_t, 1LL>(*data, out_values);
1531
+ break;
1532
+ case DateUnit::MILLI:
1533
+ ConvertDatesShift<int64_t, 86400000LL>(*data, out_values);
1534
+ break;
1535
+ }
1536
+ return Status::OK();
1537
+ }
1538
+
1539
+ protected:
1540
+ Status Allocate() override {
1541
+ RETURN_NOT_OK(this->AllocateNDArray(NPY_DATETIME));
1542
+ SetDatetimeUnit(NPY_FR_D);
1543
+ return Status::OK();
1544
+ }
1545
+ };
1546
+
1547
+ template <TimeUnit::type UNIT>
1548
+ class DatetimeWriter : public TypedPandasWriter<NPY_DATETIME> {
1549
+ public:
1550
+ using TypedPandasWriter<NPY_DATETIME>::TypedPandasWriter;
1551
+
1552
+ bool CanZeroCopy(const ChunkedArray& data) const override {
1553
+ if (data.type()->id() == Type::TIMESTAMP) {
1554
+ const auto& type = checked_cast<const TimestampType&>(*data.type());
1555
+ return IsNonNullContiguous(data) && type.unit() == UNIT;
1556
+ } else {
1557
+ return false;
1558
+ }
1559
+ }
1560
+
1561
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1562
+ const auto& ts_type = checked_cast<const TimestampType&>(*data->type());
1563
+ DCHECK_EQ(UNIT, ts_type.unit()) << "Should only call instances of this writer "
1564
+ << "with arrays of the correct unit";
1565
+ ConvertNumericNullable<int64_t>(*data, kPandasTimestampNull,
1566
+ this->GetBlockColumnStart(rel_placement));
1567
+ return Status::OK();
1568
+ }
1569
+
1570
+ protected:
1571
+ Status Allocate() override {
1572
+ RETURN_NOT_OK(this->AllocateNDArray(NPY_DATETIME));
1573
+ SetDatetimeUnit(internal::NumPyFrequency(UNIT));
1574
+ return Status::OK();
1575
+ }
1576
+ };
1577
+
1578
+ using DatetimeSecondWriter = DatetimeWriter<TimeUnit::SECOND>;
1579
+
1580
+ class DatetimeMilliWriter : public DatetimeWriter<TimeUnit::MILLI> {
1581
+ public:
1582
+ using DatetimeWriter<TimeUnit::MILLI>::DatetimeWriter;
1583
+
1584
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1585
+ Type::type type = data->type()->id();
1586
+ int64_t* out_values = this->GetBlockColumnStart(rel_placement);
1587
+ if (type == Type::DATE32) {
1588
+ // Convert from days since epoch to datetime64[ms]
1589
+ ConvertDatetime<int32_t, 86400000L>(*data, out_values);
1590
+ } else if (type == Type::DATE64) {
1591
+ ConvertNumericNullable<int64_t>(*data, kPandasTimestampNull, out_values);
1592
+ } else {
1593
+ const auto& ts_type = checked_cast<const TimestampType&>(*data->type());
1594
+ DCHECK_EQ(TimeUnit::MILLI, ts_type.unit())
1595
+ << "Should only call instances of this writer "
1596
+ << "with arrays of the correct unit";
1597
+ ConvertNumericNullable<int64_t>(*data, kPandasTimestampNull, out_values);
1598
+ }
1599
+ return Status::OK();
1600
+ }
1601
+ };
1602
+
1603
+ using DatetimeMicroWriter = DatetimeWriter<TimeUnit::MICRO>;
1604
+
1605
+ class DatetimeNanoWriter : public DatetimeWriter<TimeUnit::NANO> {
1606
+ public:
1607
+ using DatetimeWriter<TimeUnit::NANO>::DatetimeWriter;
1608
+
1609
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1610
+ Type::type type = data->type()->id();
1611
+ int64_t* out_values = this->GetBlockColumnStart(rel_placement);
1612
+ compute::ExecContext ctx(options_.pool);
1613
+ compute::CastOptions options;
1614
+ if (options_.safe_cast) {
1615
+ options = compute::CastOptions::Safe();
1616
+ } else {
1617
+ options = compute::CastOptions::Unsafe();
1618
+ }
1619
+ Datum out;
1620
+ auto target_type = timestamp(TimeUnit::NANO);
1621
+
1622
+ if (type == Type::DATE32) {
1623
+ // Convert from days since epoch to datetime64[ns]
1624
+ ConvertDatetime<int32_t, kNanosecondsInDay>(*data, out_values);
1625
+ } else if (type == Type::DATE64) {
1626
+ // Date64Type is millisecond timestamp stored as int64_t
1627
+ // TODO(wesm): Do we want to make sure to zero out the milliseconds?
1628
+ ConvertDatetime<int64_t, 1000000L>(*data, out_values);
1629
+ } else if (type == Type::TIMESTAMP) {
1630
+ const auto& ts_type = checked_cast<const TimestampType&>(*data->type());
1631
+
1632
+ if (ts_type.unit() == TimeUnit::NANO) {
1633
+ ConvertNumericNullable<int64_t>(*data, kPandasTimestampNull, out_values);
1634
+ } else if (ts_type.unit() == TimeUnit::MICRO || ts_type.unit() == TimeUnit::MILLI ||
1635
+ ts_type.unit() == TimeUnit::SECOND) {
1636
+ ARROW_ASSIGN_OR_RAISE(out, compute::Cast(data, target_type, options, &ctx));
1637
+ ConvertNumericNullable<int64_t>(*out.chunked_array(), kPandasTimestampNull,
1638
+ out_values);
1639
+ } else {
1640
+ return Status::NotImplemented("Unsupported time unit");
1641
+ }
1642
+ } else {
1643
+ return Status::NotImplemented("Cannot write Arrow data of type ",
1644
+ data->type()->ToString(),
1645
+ " to a Pandas datetime block.");
1646
+ }
1647
+ return Status::OK();
1648
+ }
1649
+ };
1650
+
1651
+ template <typename BASE>
1652
+ class DatetimeTZWriter : public BASE {
1653
+ public:
1654
+ DatetimeTZWriter(const PandasOptions& options, const std::string& timezone,
1655
+ int64_t num_rows)
1656
+ : BASE(options, num_rows, 1), timezone_(timezone) {}
1657
+
1658
+ protected:
1659
+ Status GetResultBlock(PyObject** out) override {
1660
+ RETURN_NOT_OK(this->MakeBlock1D());
1661
+ *out = this->block_arr_.obj();
1662
+ return Status::OK();
1663
+ }
1664
+
1665
+ Status AddResultMetadata(PyObject* result) override {
1666
+ PyObject* py_tz = PyUnicode_FromStringAndSize(
1667
+ timezone_.c_str(), static_cast<Py_ssize_t>(timezone_.size()));
1668
+ RETURN_IF_PYERROR();
1669
+ PyDict_SetItemString(result, "timezone", py_tz);
1670
+ Py_DECREF(py_tz);
1671
+ return Status::OK();
1672
+ }
1673
+
1674
+ private:
1675
+ std::string timezone_;
1676
+ };
1677
+
1678
+ using DatetimeSecondTZWriter = DatetimeTZWriter<DatetimeSecondWriter>;
1679
+ using DatetimeMilliTZWriter = DatetimeTZWriter<DatetimeMilliWriter>;
1680
+ using DatetimeMicroTZWriter = DatetimeTZWriter<DatetimeMicroWriter>;
1681
+ using DatetimeNanoTZWriter = DatetimeTZWriter<DatetimeNanoWriter>;
1682
+
1683
+ template <TimeUnit::type UNIT>
1684
+ class TimedeltaWriter : public TypedPandasWriter<NPY_TIMEDELTA> {
1685
+ public:
1686
+ using TypedPandasWriter<NPY_TIMEDELTA>::TypedPandasWriter;
1687
+
1688
+ Status AllocateTimedelta(int ndim) {
1689
+ RETURN_NOT_OK(this->AllocateNDArray(NPY_TIMEDELTA, ndim));
1690
+ SetDatetimeUnit(internal::NumPyFrequency(UNIT));
1691
+ return Status::OK();
1692
+ }
1693
+
1694
+ bool CanZeroCopy(const ChunkedArray& data) const override {
1695
+ const auto& type = checked_cast<const DurationType&>(*data.type());
1696
+ return IsNonNullContiguous(data) && type.unit() == UNIT;
1697
+ }
1698
+
1699
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1700
+ const auto& type = checked_cast<const DurationType&>(*data->type());
1701
+ DCHECK_EQ(UNIT, type.unit()) << "Should only call instances of this writer "
1702
+ << "with arrays of the correct unit";
1703
+ ConvertNumericNullable<int64_t>(*data, kPandasTimestampNull,
1704
+ this->GetBlockColumnStart(rel_placement));
1705
+ return Status::OK();
1706
+ }
1707
+
1708
+ protected:
1709
+ Status Allocate() override { return AllocateTimedelta(2); }
1710
+ };
1711
+
1712
+ using TimedeltaSecondWriter = TimedeltaWriter<TimeUnit::SECOND>;
1713
+ using TimedeltaMilliWriter = TimedeltaWriter<TimeUnit::MILLI>;
1714
+ using TimedeltaMicroWriter = TimedeltaWriter<TimeUnit::MICRO>;
1715
+
1716
+ class TimedeltaNanoWriter : public TimedeltaWriter<TimeUnit::NANO> {
1717
+ public:
1718
+ using TimedeltaWriter<TimeUnit::NANO>::TimedeltaWriter;
1719
+
1720
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1721
+ Type::type type = data->type()->id();
1722
+ int64_t* out_values = this->GetBlockColumnStart(rel_placement);
1723
+ if (type == Type::DURATION) {
1724
+ const auto& ts_type = checked_cast<const DurationType&>(*data->type());
1725
+ if (ts_type.unit() == TimeUnit::NANO) {
1726
+ ConvertNumericNullable<int64_t>(*data, kPandasTimestampNull, out_values);
1727
+ } else if (ts_type.unit() == TimeUnit::MICRO) {
1728
+ ConvertDatetime<int64_t, 1000L>(*data, out_values);
1729
+ } else if (ts_type.unit() == TimeUnit::MILLI) {
1730
+ ConvertDatetime<int64_t, 1000000L>(*data, out_values);
1731
+ } else if (ts_type.unit() == TimeUnit::SECOND) {
1732
+ ConvertDatetime<int64_t, 1000000000L>(*data, out_values);
1733
+ } else {
1734
+ return Status::NotImplemented("Unsupported time unit");
1735
+ }
1736
+ } else {
1737
+ return Status::NotImplemented("Cannot write Arrow data of type ",
1738
+ data->type()->ToString(),
1739
+ " to a Pandas timedelta block.");
1740
+ }
1741
+ return Status::OK();
1742
+ }
1743
+ };
1744
+
1745
+ Status MakeZeroLengthArray(const std::shared_ptr<DataType>& type,
1746
+ std::shared_ptr<Array>* out) {
1747
+ std::unique_ptr<ArrayBuilder> builder;
1748
+ RETURN_NOT_OK(MakeBuilder(default_memory_pool(), type, &builder));
1749
+ RETURN_NOT_OK(builder->Resize(0));
1750
+ return builder->Finish(out);
1751
+ }
1752
+
1753
+ bool NeedDictionaryUnification(const ChunkedArray& data) {
1754
+ if (data.num_chunks() < 2) {
1755
+ return false;
1756
+ }
1757
+ const auto& arr_first = checked_cast<const DictionaryArray&>(*data.chunk(0));
1758
+ for (int c = 1; c < data.num_chunks(); c++) {
1759
+ const auto& arr = checked_cast<const DictionaryArray&>(*data.chunk(c));
1760
+ if (!(arr_first.dictionary()->Equals(arr.dictionary()))) {
1761
+ return true;
1762
+ }
1763
+ }
1764
+ return false;
1765
+ }
1766
+
1767
+ template <typename IndexType>
1768
+ class CategoricalWriter
1769
+ : public TypedPandasWriter<arrow_traits<IndexType::type_id>::npy_type> {
1770
+ public:
1771
+ using TRAITS = arrow_traits<IndexType::type_id>;
1772
+ using ArrayType = typename TypeTraits<IndexType>::ArrayType;
1773
+ using T = typename TRAITS::T;
1774
+
1775
+ explicit CategoricalWriter(const PandasOptions& options, int64_t num_rows)
1776
+ : TypedPandasWriter<TRAITS::npy_type>(options, num_rows, 1),
1777
+ ordered_(false),
1778
+ needs_copy_(false) {}
1779
+
1780
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1781
+ return Status::NotImplemented("categorical type");
1782
+ }
1783
+
1784
+ Status TransferSingle(std::shared_ptr<ChunkedArray> data, PyObject* py_ref) override {
1785
+ const auto& dict_type = checked_cast<const DictionaryType&>(*data->type());
1786
+ std::shared_ptr<Array> dict;
1787
+ if (data->num_chunks() == 0) {
1788
+ // no dictionary values => create empty array
1789
+ RETURN_NOT_OK(this->AllocateNDArray(TRAITS::npy_type, 1));
1790
+ RETURN_NOT_OK(MakeZeroLengthArray(dict_type.value_type(), &dict));
1791
+ } else {
1792
+ DCHECK_EQ(IndexType::type_id, dict_type.index_type()->id());
1793
+ RETURN_NOT_OK(WriteIndices(*data, &dict));
1794
+ }
1795
+
1796
+ PyObject* pydict;
1797
+ RETURN_NOT_OK(ConvertArrayToPandas(this->options_, dict, nullptr, &pydict));
1798
+ dictionary_.reset(pydict);
1799
+ ordered_ = dict_type.ordered();
1800
+ return Status::OK();
1801
+ }
1802
+
1803
+ Status Write(std::shared_ptr<ChunkedArray> data, int64_t abs_placement,
1804
+ int64_t rel_placement) override {
1805
+ RETURN_NOT_OK(this->EnsurePlacementAllocated());
1806
+ RETURN_NOT_OK(TransferSingle(data, /*py_ref=*/nullptr));
1807
+ this->placement_data_[rel_placement] = abs_placement;
1808
+ return Status::OK();
1809
+ }
1810
+
1811
+ Status GetSeriesResult(PyObject** out) override {
1812
+ PyAcquireGIL lock;
1813
+
1814
+ PyObject* result = PyDict_New();
1815
+ RETURN_IF_PYERROR();
1816
+
1817
+ // Expected single array dictionary layout
1818
+ PyDict_SetItemString(result, "indices", this->block_arr_.obj());
1819
+ RETURN_IF_PYERROR();
1820
+ RETURN_NOT_OK(AddResultMetadata(result));
1821
+
1822
+ *out = result;
1823
+ return Status::OK();
1824
+ }
1825
+
1826
+ protected:
1827
+ Status AddResultMetadata(PyObject* result) override {
1828
+ PyDict_SetItemString(result, "dictionary", dictionary_.obj());
1829
+ PyObject* py_ordered = ordered_ ? Py_True : Py_False;
1830
+ Py_INCREF(py_ordered);
1831
+ PyDict_SetItemString(result, "ordered", py_ordered);
1832
+ return Status::OK();
1833
+ }
1834
+
1835
+ Status WriteIndicesUniform(const ChunkedArray& data) {
1836
+ RETURN_NOT_OK(this->AllocateNDArray(TRAITS::npy_type, 1));
1837
+ T* out_values = reinterpret_cast<T*>(this->block_data_);
1838
+
1839
+ for (int c = 0; c < data.num_chunks(); c++) {
1840
+ const auto& arr = checked_cast<const DictionaryArray&>(*data.chunk(c));
1841
+ const auto& indices = checked_cast<const ArrayType&>(*arr.indices());
1842
+ auto values = reinterpret_cast<const T*>(indices.raw_values());
1843
+
1844
+ RETURN_NOT_OK(CheckIndexBounds(*indices.data(), arr.dictionary()->length()));
1845
+ // Null is -1 in CategoricalBlock
1846
+ for (int i = 0; i < arr.length(); ++i) {
1847
+ if (indices.IsValid(i)) {
1848
+ *out_values++ = values[i];
1849
+ } else {
1850
+ *out_values++ = -1;
1851
+ }
1852
+ }
1853
+ }
1854
+ return Status::OK();
1855
+ }
1856
+
1857
+ Status WriteIndicesVarying(const ChunkedArray& data, std::shared_ptr<Array>* out_dict) {
1858
+ // Yield int32 indices to allow for dictionary outgrowing the current index
1859
+ // type
1860
+ RETURN_NOT_OK(this->AllocateNDArray(NPY_INT32, 1));
1861
+ auto out_values = reinterpret_cast<int32_t*>(this->block_data_);
1862
+
1863
+ const auto& dict_type = checked_cast<const DictionaryType&>(*data.type());
1864
+
1865
+ ARROW_ASSIGN_OR_RAISE(auto unifier, DictionaryUnifier::Make(dict_type.value_type(),
1866
+ this->options_.pool));
1867
+ for (int c = 0; c < data.num_chunks(); c++) {
1868
+ const auto& arr = checked_cast<const DictionaryArray&>(*data.chunk(c));
1869
+ const auto& indices = checked_cast<const ArrayType&>(*arr.indices());
1870
+ auto values = reinterpret_cast<const T*>(indices.raw_values());
1871
+
1872
+ std::shared_ptr<Buffer> transpose_buffer;
1873
+ RETURN_NOT_OK(unifier->Unify(*arr.dictionary(), &transpose_buffer));
1874
+
1875
+ auto transpose = reinterpret_cast<const int32_t*>(transpose_buffer->data());
1876
+ int64_t dict_length = arr.dictionary()->length();
1877
+
1878
+ RETURN_NOT_OK(CheckIndexBounds(*indices.data(), dict_length));
1879
+
1880
+ // Null is -1 in CategoricalBlock
1881
+ for (int i = 0; i < arr.length(); ++i) {
1882
+ if (indices.IsValid(i)) {
1883
+ *out_values++ = transpose[values[i]];
1884
+ } else {
1885
+ *out_values++ = -1;
1886
+ }
1887
+ }
1888
+ }
1889
+
1890
+ std::shared_ptr<DataType> unused_type;
1891
+ return unifier->GetResult(&unused_type, out_dict);
1892
+ }
1893
+
1894
+ Status WriteIndices(const ChunkedArray& data, std::shared_ptr<Array>* out_dict) {
1895
+ DCHECK_GT(data.num_chunks(), 0);
1896
+
1897
+ // Sniff the first chunk
1898
+ const auto& arr_first = checked_cast<const DictionaryArray&>(*data.chunk(0));
1899
+ const auto indices_first = std::static_pointer_cast<ArrayType>(arr_first.indices());
1900
+
1901
+ if (data.num_chunks() == 1 && indices_first->null_count() == 0) {
1902
+ RETURN_NOT_OK(
1903
+ CheckIndexBounds(*indices_first->data(), arr_first.dictionary()->length()));
1904
+
1905
+ PyObject* wrapped;
1906
+ npy_intp dims[1] = {static_cast<npy_intp>(this->num_rows_)};
1907
+ RETURN_NOT_OK(MakeNumPyView(indices_first, /*py_ref=*/nullptr, TRAITS::npy_type,
1908
+ /*ndim=*/1, dims, &wrapped));
1909
+ this->SetBlockData(wrapped);
1910
+ *out_dict = arr_first.dictionary();
1911
+ } else {
1912
+ RETURN_NOT_OK(this->CheckNotZeroCopyOnly(data));
1913
+ if (NeedDictionaryUnification(data)) {
1914
+ RETURN_NOT_OK(WriteIndicesVarying(data, out_dict));
1915
+ } else {
1916
+ RETURN_NOT_OK(WriteIndicesUniform(data));
1917
+ *out_dict = arr_first.dictionary();
1918
+ }
1919
+ }
1920
+ return Status::OK();
1921
+ }
1922
+
1923
+ OwnedRefNoGIL dictionary_;
1924
+ bool ordered_;
1925
+ bool needs_copy_;
1926
+ };
1927
+
1928
+ class ExtensionWriter : public PandasWriter {
1929
+ public:
1930
+ using PandasWriter::PandasWriter;
1931
+
1932
+ Status Allocate() override {
1933
+ // no-op
1934
+ return Status::OK();
1935
+ }
1936
+
1937
+ Status TransferSingle(std::shared_ptr<ChunkedArray> data, PyObject* py_ref) override {
1938
+ PyAcquireGIL lock;
1939
+ PyObject* py_array;
1940
+ py_array = wrap_chunked_array(data);
1941
+ py_array_.reset(py_array);
1942
+
1943
+ return Status::OK();
1944
+ }
1945
+
1946
+ Status CopyInto(std::shared_ptr<ChunkedArray> data, int64_t rel_placement) override {
1947
+ return TransferSingle(data, nullptr);
1948
+ }
1949
+
1950
+ Status GetDataFrameResult(PyObject** out) override {
1951
+ PyAcquireGIL lock;
1952
+ PyObject* result = PyDict_New();
1953
+ RETURN_IF_PYERROR();
1954
+
1955
+ PyDict_SetItemString(result, "py_array", py_array_.obj());
1956
+ PyDict_SetItemString(result, "placement", placement_arr_.obj());
1957
+ *out = result;
1958
+ return Status::OK();
1959
+ }
1960
+
1961
+ Status GetSeriesResult(PyObject** out) override {
1962
+ *out = py_array_.detach();
1963
+ return Status::OK();
1964
+ }
1965
+
1966
+ protected:
1967
+ OwnedRefNoGIL py_array_;
1968
+ };
1969
+
1970
+ Status MakeWriter(const PandasOptions& options, PandasWriter::type writer_type,
1971
+ const DataType& type, int64_t num_rows, int num_columns,
1972
+ std::shared_ptr<PandasWriter>* writer) {
1973
+ #define BLOCK_CASE(NAME, TYPE) \
1974
+ case PandasWriter::NAME: \
1975
+ *writer = std::make_shared<TYPE>(options, num_rows, num_columns); \
1976
+ break;
1977
+
1978
+ #define CATEGORICAL_CASE(TYPE) \
1979
+ case TYPE::type_id: \
1980
+ *writer = std::make_shared<CategoricalWriter<TYPE>>(options, num_rows); \
1981
+ break;
1982
+
1983
+ #define TZ_CASE(NAME, TYPE) \
1984
+ case PandasWriter::NAME: { \
1985
+ const auto& ts_type = checked_cast<const TimestampType&>(type); \
1986
+ *writer = std::make_shared<TYPE>(options, ts_type.timezone(), num_rows); \
1987
+ } break;
1988
+
1989
+ switch (writer_type) {
1990
+ case PandasWriter::CATEGORICAL: {
1991
+ const auto& index_type = *checked_cast<const DictionaryType&>(type).index_type();
1992
+ switch (index_type.id()) {
1993
+ CATEGORICAL_CASE(Int8Type);
1994
+ CATEGORICAL_CASE(Int16Type);
1995
+ CATEGORICAL_CASE(Int32Type);
1996
+ CATEGORICAL_CASE(Int64Type);
1997
+ case Type::UINT8:
1998
+ case Type::UINT16:
1999
+ case Type::UINT32:
2000
+ case Type::UINT64:
2001
+ return Status::TypeError(
2002
+ "Converting unsigned dictionary indices to pandas",
2003
+ " not yet supported, index type: ", index_type.ToString());
2004
+ default:
2005
+ // Unreachable
2006
+ DCHECK(false);
2007
+ break;
2008
+ }
2009
+ } break;
2010
+ case PandasWriter::EXTENSION:
2011
+ *writer = std::make_shared<ExtensionWriter>(options, num_rows, num_columns);
2012
+ break;
2013
+ BLOCK_CASE(OBJECT, ObjectWriter);
2014
+ BLOCK_CASE(UINT8, UInt8Writer);
2015
+ BLOCK_CASE(INT8, Int8Writer);
2016
+ BLOCK_CASE(UINT16, UInt16Writer);
2017
+ BLOCK_CASE(INT16, Int16Writer);
2018
+ BLOCK_CASE(UINT32, UInt32Writer);
2019
+ BLOCK_CASE(INT32, Int32Writer);
2020
+ BLOCK_CASE(UINT64, UInt64Writer);
2021
+ BLOCK_CASE(INT64, Int64Writer);
2022
+ BLOCK_CASE(HALF_FLOAT, Float16Writer);
2023
+ BLOCK_CASE(FLOAT, Float32Writer);
2024
+ BLOCK_CASE(DOUBLE, Float64Writer);
2025
+ BLOCK_CASE(BOOL, BoolWriter);
2026
+ BLOCK_CASE(DATETIME_DAY, DatetimeDayWriter);
2027
+ BLOCK_CASE(DATETIME_SECOND, DatetimeSecondWriter);
2028
+ BLOCK_CASE(DATETIME_MILLI, DatetimeMilliWriter);
2029
+ BLOCK_CASE(DATETIME_MICRO, DatetimeMicroWriter);
2030
+ BLOCK_CASE(DATETIME_NANO, DatetimeNanoWriter);
2031
+ BLOCK_CASE(TIMEDELTA_SECOND, TimedeltaSecondWriter);
2032
+ BLOCK_CASE(TIMEDELTA_MILLI, TimedeltaMilliWriter);
2033
+ BLOCK_CASE(TIMEDELTA_MICRO, TimedeltaMicroWriter);
2034
+ BLOCK_CASE(TIMEDELTA_NANO, TimedeltaNanoWriter);
2035
+ TZ_CASE(DATETIME_SECOND_TZ, DatetimeSecondTZWriter);
2036
+ TZ_CASE(DATETIME_MILLI_TZ, DatetimeMilliTZWriter);
2037
+ TZ_CASE(DATETIME_MICRO_TZ, DatetimeMicroTZWriter);
2038
+ TZ_CASE(DATETIME_NANO_TZ, DatetimeNanoTZWriter);
2039
+ default:
2040
+ return Status::NotImplemented("Unsupported block type");
2041
+ }
2042
+
2043
+ #undef BLOCK_CASE
2044
+ #undef CATEGORICAL_CASE
2045
+
2046
+ return Status::OK();
2047
+ }
2048
+
2049
+ static Status GetPandasWriterType(const ChunkedArray& data, const PandasOptions& options,
2050
+ PandasWriter::type* output_type) {
2051
+ #define INTEGER_CASE(NAME) \
2052
+ *output_type = \
2053
+ data.null_count() > 0 \
2054
+ ? options.integer_object_nulls ? PandasWriter::OBJECT : PandasWriter::DOUBLE \
2055
+ : PandasWriter::NAME; \
2056
+ break;
2057
+
2058
+ switch (data.type()->id()) {
2059
+ case Type::BOOL:
2060
+ *output_type = data.null_count() > 0 ? PandasWriter::OBJECT : PandasWriter::BOOL;
2061
+ break;
2062
+ case Type::UINT8:
2063
+ INTEGER_CASE(UINT8);
2064
+ case Type::INT8:
2065
+ INTEGER_CASE(INT8);
2066
+ case Type::UINT16:
2067
+ INTEGER_CASE(UINT16);
2068
+ case Type::INT16:
2069
+ INTEGER_CASE(INT16);
2070
+ case Type::UINT32:
2071
+ INTEGER_CASE(UINT32);
2072
+ case Type::INT32:
2073
+ INTEGER_CASE(INT32);
2074
+ case Type::UINT64:
2075
+ INTEGER_CASE(UINT64);
2076
+ case Type::INT64:
2077
+ INTEGER_CASE(INT64);
2078
+ case Type::HALF_FLOAT:
2079
+ *output_type = PandasWriter::HALF_FLOAT;
2080
+ break;
2081
+ case Type::FLOAT:
2082
+ *output_type = PandasWriter::FLOAT;
2083
+ break;
2084
+ case Type::DOUBLE:
2085
+ *output_type = PandasWriter::DOUBLE;
2086
+ break;
2087
+ case Type::STRING: // fall through
2088
+ case Type::LARGE_STRING: // fall through
2089
+ case Type::BINARY: // fall through
2090
+ case Type::LARGE_BINARY:
2091
+ case Type::NA: // fall through
2092
+ case Type::FIXED_SIZE_BINARY: // fall through
2093
+ case Type::STRUCT: // fall through
2094
+ case Type::TIME32: // fall through
2095
+ case Type::TIME64: // fall through
2096
+ case Type::DECIMAL128: // fall through
2097
+ case Type::DECIMAL256: // fall through
2098
+ case Type::INTERVAL_MONTH_DAY_NANO: // fall through
2099
+ *output_type = PandasWriter::OBJECT;
2100
+ break;
2101
+ case Type::DATE32:
2102
+ if (options.date_as_object) {
2103
+ *output_type = PandasWriter::OBJECT;
2104
+ } else if (options.coerce_temporal_nanoseconds) {
2105
+ *output_type = PandasWriter::DATETIME_NANO;
2106
+ } else if (options.to_numpy) {
2107
+ // Numpy supports Day, but Pandas does not
2108
+ *output_type = PandasWriter::DATETIME_DAY;
2109
+ } else {
2110
+ *output_type = PandasWriter::DATETIME_MILLI;
2111
+ }
2112
+ break;
2113
+ case Type::DATE64:
2114
+ if (options.date_as_object) {
2115
+ *output_type = PandasWriter::OBJECT;
2116
+ } else if (options.coerce_temporal_nanoseconds) {
2117
+ *output_type = PandasWriter::DATETIME_NANO;
2118
+ } else {
2119
+ *output_type = PandasWriter::DATETIME_MILLI;
2120
+ }
2121
+ break;
2122
+ case Type::TIMESTAMP: {
2123
+ const auto& ts_type = checked_cast<const TimestampType&>(*data.type());
2124
+ if (options.timestamp_as_object && ts_type.unit() != TimeUnit::NANO) {
2125
+ // Nanoseconds are never out of bounds for pandas, so in that case
2126
+ // we don't convert to object
2127
+ *output_type = PandasWriter::OBJECT;
2128
+ } else if (options.coerce_temporal_nanoseconds) {
2129
+ if (!ts_type.timezone().empty()) {
2130
+ *output_type = PandasWriter::DATETIME_NANO_TZ;
2131
+ } else {
2132
+ *output_type = PandasWriter::DATETIME_NANO;
2133
+ }
2134
+ } else {
2135
+ if (!ts_type.timezone().empty()) {
2136
+ switch (ts_type.unit()) {
2137
+ case TimeUnit::SECOND:
2138
+ *output_type = PandasWriter::DATETIME_SECOND_TZ;
2139
+ break;
2140
+ case TimeUnit::MILLI:
2141
+ *output_type = PandasWriter::DATETIME_MILLI_TZ;
2142
+ break;
2143
+ case TimeUnit::MICRO:
2144
+ *output_type = PandasWriter::DATETIME_MICRO_TZ;
2145
+ break;
2146
+ case TimeUnit::NANO:
2147
+ *output_type = PandasWriter::DATETIME_NANO_TZ;
2148
+ break;
2149
+ }
2150
+ } else {
2151
+ switch (ts_type.unit()) {
2152
+ case TimeUnit::SECOND:
2153
+ *output_type = PandasWriter::DATETIME_SECOND;
2154
+ break;
2155
+ case TimeUnit::MILLI:
2156
+ *output_type = PandasWriter::DATETIME_MILLI;
2157
+ break;
2158
+ case TimeUnit::MICRO:
2159
+ *output_type = PandasWriter::DATETIME_MICRO;
2160
+ break;
2161
+ case TimeUnit::NANO:
2162
+ *output_type = PandasWriter::DATETIME_NANO;
2163
+ break;
2164
+ }
2165
+ }
2166
+ }
2167
+ } break;
2168
+ case Type::DURATION: {
2169
+ const auto& dur_type = checked_cast<const DurationType&>(*data.type());
2170
+ if (options.coerce_temporal_nanoseconds) {
2171
+ *output_type = PandasWriter::TIMEDELTA_NANO;
2172
+ } else {
2173
+ switch (dur_type.unit()) {
2174
+ case TimeUnit::SECOND:
2175
+ *output_type = PandasWriter::TIMEDELTA_SECOND;
2176
+ break;
2177
+ case TimeUnit::MILLI:
2178
+ *output_type = PandasWriter::TIMEDELTA_MILLI;
2179
+ break;
2180
+ case TimeUnit::MICRO:
2181
+ *output_type = PandasWriter::TIMEDELTA_MICRO;
2182
+ break;
2183
+ case TimeUnit::NANO:
2184
+ *output_type = PandasWriter::TIMEDELTA_NANO;
2185
+ break;
2186
+ }
2187
+ }
2188
+ } break;
2189
+ case Type::FIXED_SIZE_LIST:
2190
+ case Type::LIST:
2191
+ case Type::LARGE_LIST:
2192
+ case Type::MAP: {
2193
+ auto list_type = std::static_pointer_cast<BaseListType>(data.type());
2194
+ if (!ListTypeSupported(*list_type->value_type())) {
2195
+ return Status::NotImplemented("Not implemented type for Arrow list to pandas: ",
2196
+ list_type->value_type()->ToString());
2197
+ }
2198
+ *output_type = PandasWriter::OBJECT;
2199
+ } break;
2200
+ case Type::DICTIONARY:
2201
+ *output_type = PandasWriter::CATEGORICAL;
2202
+ break;
2203
+ case Type::EXTENSION:
2204
+ *output_type = PandasWriter::EXTENSION;
2205
+ break;
2206
+ default:
2207
+ return Status::NotImplemented(
2208
+ "No known equivalent Pandas block for Arrow data of type ",
2209
+ data.type()->ToString(), " is known.");
2210
+ }
2211
+ return Status::OK();
2212
+ }
2213
+
2214
+ // Construct the exact pandas "BlockManager" memory layout
2215
+ //
2216
+ // * For each column determine the correct output pandas type
2217
+ // * Allocate 2D blocks (ncols x nrows) for each distinct data type in output
2218
+ // * Allocate block placement arrays
2219
+ // * Write Arrow columns out into each slice of memory; populate block
2220
+ // * placement arrays as we go
2221
+ class PandasBlockCreator {
2222
+ public:
2223
+ using WriterMap = std::unordered_map<int, std::shared_ptr<PandasWriter>>;
2224
+
2225
+ explicit PandasBlockCreator(const PandasOptions& options, FieldVector fields,
2226
+ ChunkedArrayVector arrays)
2227
+ : options_(options), fields_(std::move(fields)), arrays_(std::move(arrays)) {
2228
+ num_columns_ = static_cast<int>(arrays_.size());
2229
+ if (num_columns_ > 0) {
2230
+ num_rows_ = arrays_[0]->length();
2231
+ }
2232
+ column_block_placement_.resize(num_columns_);
2233
+ }
2234
+ virtual ~PandasBlockCreator() = default;
2235
+
2236
+ virtual Status Convert(PyObject** out) = 0;
2237
+
2238
+ Status AppendBlocks(const WriterMap& blocks, PyObject* list) {
2239
+ for (const auto& it : blocks) {
2240
+ PyObject* item;
2241
+ RETURN_NOT_OK(it.second->GetDataFrameResult(&item));
2242
+ if (PyList_Append(list, item) < 0) {
2243
+ RETURN_IF_PYERROR();
2244
+ }
2245
+
2246
+ // ARROW-1017; PyList_Append increments object refcount
2247
+ Py_DECREF(item);
2248
+ }
2249
+ return Status::OK();
2250
+ }
2251
+
2252
+ protected:
2253
+ PandasOptions options_;
2254
+
2255
+ FieldVector fields_;
2256
+ ChunkedArrayVector arrays_;
2257
+ int num_columns_;
2258
+ int64_t num_rows_;
2259
+
2260
+ // column num -> relative placement within internal block
2261
+ std::vector<int> column_block_placement_;
2262
+ };
2263
+
2264
+ // Helper function for extension chunked arrays
2265
+ // Constructing a storage chunked array of an extension chunked array
2266
+ std::shared_ptr<ChunkedArray> GetStorageChunkedArray(std::shared_ptr<ChunkedArray> arr) {
2267
+ auto value_type = checked_cast<const ExtensionType&>(*arr->type()).storage_type();
2268
+ ArrayVector storage_arrays;
2269
+ for (int c = 0; c < arr->num_chunks(); c++) {
2270
+ const auto& arr_ext = checked_cast<const ExtensionArray&>(*arr->chunk(c));
2271
+ storage_arrays.emplace_back(arr_ext.storage());
2272
+ }
2273
+ return std::make_shared<ChunkedArray>(std::move(storage_arrays), value_type);
2274
+ };
2275
+
2276
+ class ConsolidatedBlockCreator : public PandasBlockCreator {
2277
+ public:
2278
+ using PandasBlockCreator::PandasBlockCreator;
2279
+
2280
+ Status Convert(PyObject** out) override {
2281
+ column_types_.resize(num_columns_);
2282
+ RETURN_NOT_OK(CreateBlocks());
2283
+ RETURN_NOT_OK(WriteTableToBlocks());
2284
+ PyAcquireGIL lock;
2285
+
2286
+ PyObject* result = PyList_New(0);
2287
+ RETURN_IF_PYERROR();
2288
+
2289
+ RETURN_NOT_OK(AppendBlocks(blocks_, result));
2290
+ RETURN_NOT_OK(AppendBlocks(singleton_blocks_, result));
2291
+
2292
+ *out = result;
2293
+ return Status::OK();
2294
+ }
2295
+
2296
+ Status GetBlockType(int column_index, PandasWriter::type* out) {
2297
+ if (options_.extension_columns.count(fields_[column_index]->name())) {
2298
+ *out = PandasWriter::EXTENSION;
2299
+ return Status::OK();
2300
+ } else {
2301
+ // In case of an extension array default to the storage type
2302
+ if (arrays_[column_index]->type()->id() == Type::EXTENSION) {
2303
+ arrays_[column_index] = GetStorageChunkedArray(arrays_[column_index]);
2304
+ }
2305
+ return GetPandasWriterType(*arrays_[column_index], options_, out);
2306
+ }
2307
+ }
2308
+
2309
+ Status CreateBlocks() {
2310
+ for (int i = 0; i < num_columns_; ++i) {
2311
+ const DataType& type = *arrays_[i]->type();
2312
+ PandasWriter::type output_type;
2313
+ RETURN_NOT_OK(GetBlockType(i, &output_type));
2314
+
2315
+ int block_placement = 0;
2316
+ std::shared_ptr<PandasWriter> writer;
2317
+ if (output_type == PandasWriter::CATEGORICAL ||
2318
+ output_type == PandasWriter::DATETIME_SECOND_TZ ||
2319
+ output_type == PandasWriter::DATETIME_MILLI_TZ ||
2320
+ output_type == PandasWriter::DATETIME_MICRO_TZ ||
2321
+ output_type == PandasWriter::DATETIME_NANO_TZ ||
2322
+ output_type == PandasWriter::EXTENSION) {
2323
+ RETURN_NOT_OK(MakeWriter(options_, output_type, type, num_rows_,
2324
+ /*num_columns=*/1, &writer));
2325
+ singleton_blocks_[i] = writer;
2326
+ } else {
2327
+ auto it = block_sizes_.find(output_type);
2328
+ if (it != block_sizes_.end()) {
2329
+ block_placement = it->second;
2330
+ // Increment count
2331
+ ++it->second;
2332
+ } else {
2333
+ // Add key to map
2334
+ block_sizes_[output_type] = 1;
2335
+ }
2336
+ }
2337
+ column_types_[i] = output_type;
2338
+ column_block_placement_[i] = block_placement;
2339
+ }
2340
+
2341
+ // Create normal non-categorical blocks
2342
+ for (const auto& it : this->block_sizes_) {
2343
+ PandasWriter::type output_type = static_cast<PandasWriter::type>(it.first);
2344
+ std::shared_ptr<PandasWriter> block;
2345
+ RETURN_NOT_OK(MakeWriter(this->options_, output_type, /*unused*/ *null(), num_rows_,
2346
+ it.second, &block));
2347
+ this->blocks_[output_type] = block;
2348
+ }
2349
+ return Status::OK();
2350
+ }
2351
+
2352
+ Status GetWriter(int i, std::shared_ptr<PandasWriter>* block) {
2353
+ PandasWriter::type output_type = this->column_types_[i];
2354
+ switch (output_type) {
2355
+ case PandasWriter::CATEGORICAL:
2356
+ case PandasWriter::DATETIME_SECOND_TZ:
2357
+ case PandasWriter::DATETIME_MILLI_TZ:
2358
+ case PandasWriter::DATETIME_MICRO_TZ:
2359
+ case PandasWriter::DATETIME_NANO_TZ:
2360
+ case PandasWriter::EXTENSION: {
2361
+ auto it = this->singleton_blocks_.find(i);
2362
+ if (it == this->singleton_blocks_.end()) {
2363
+ return Status::KeyError("No block allocated");
2364
+ }
2365
+ *block = it->second;
2366
+ } break;
2367
+ default:
2368
+ auto it = this->blocks_.find(output_type);
2369
+ if (it == this->blocks_.end()) {
2370
+ return Status::KeyError("No block allocated");
2371
+ }
2372
+ *block = it->second;
2373
+ break;
2374
+ }
2375
+ return Status::OK();
2376
+ }
2377
+
2378
+ Status WriteTableToBlocks() {
2379
+ auto WriteColumn = [this](int i) {
2380
+ std::shared_ptr<PandasWriter> block;
2381
+ RETURN_NOT_OK(this->GetWriter(i, &block));
2382
+ // ARROW-3789 Use std::move on the array to permit self-destructing
2383
+ return block->Write(std::move(arrays_[i]), i, this->column_block_placement_[i]);
2384
+ };
2385
+
2386
+ return OptionalParallelFor(options_.use_threads, num_columns_, WriteColumn);
2387
+ }
2388
+
2389
+ private:
2390
+ // column num -> block type id
2391
+ std::vector<PandasWriter::type> column_types_;
2392
+
2393
+ // block type -> type count
2394
+ std::unordered_map<int, int> block_sizes_;
2395
+ std::unordered_map<int, const DataType*> block_types_;
2396
+
2397
+ // block type -> block
2398
+ WriterMap blocks_;
2399
+
2400
+ WriterMap singleton_blocks_;
2401
+ };
2402
+
2403
+ /// \brief Create blocks for pandas.DataFrame block manager using one block per
2404
+ /// column strategy. This permits some zero-copy optimizations as well as the
2405
+ /// ability for the table to "self-destruct" if selected by the user.
2406
+ class SplitBlockCreator : public PandasBlockCreator {
2407
+ public:
2408
+ using PandasBlockCreator::PandasBlockCreator;
2409
+
2410
+ Status GetWriter(int i, std::shared_ptr<PandasWriter>* writer) {
2411
+ PandasWriter::type output_type = PandasWriter::OBJECT;
2412
+ const DataType& type = *arrays_[i]->type();
2413
+ if (options_.extension_columns.count(fields_[i]->name())) {
2414
+ output_type = PandasWriter::EXTENSION;
2415
+ } else {
2416
+ // Null count needed to determine output type
2417
+ RETURN_NOT_OK(GetPandasWriterType(*arrays_[i], options_, &output_type));
2418
+ }
2419
+ return MakeWriter(this->options_, output_type, type, num_rows_, 1, writer);
2420
+ }
2421
+
2422
+ Status Convert(PyObject** out) override {
2423
+ PyAcquireGIL lock;
2424
+
2425
+ PyObject* result = PyList_New(0);
2426
+ RETURN_IF_PYERROR();
2427
+
2428
+ for (int i = 0; i < num_columns_; ++i) {
2429
+ std::shared_ptr<PandasWriter> writer;
2430
+ RETURN_NOT_OK(GetWriter(i, &writer));
2431
+ // ARROW-3789 Use std::move on the array to permit self-destructing
2432
+ RETURN_NOT_OK(writer->Write(std::move(arrays_[i]), i, /*rel_placement=*/0));
2433
+
2434
+ PyObject* item;
2435
+ RETURN_NOT_OK(writer->GetDataFrameResult(&item));
2436
+ if (PyList_Append(result, item) < 0) {
2437
+ RETURN_IF_PYERROR();
2438
+ }
2439
+ // PyList_Append increments object refcount
2440
+ Py_DECREF(item);
2441
+ }
2442
+
2443
+ *out = result;
2444
+ return Status::OK();
2445
+ }
2446
+
2447
+ private:
2448
+ std::vector<std::shared_ptr<PandasWriter>> writers_;
2449
+ };
2450
+
2451
+ Status ConvertCategoricals(const PandasOptions& options, ChunkedArrayVector* arrays,
2452
+ FieldVector* fields) {
2453
+ std::vector<int> columns_to_encode;
2454
+
2455
+ // For Categorical conversions
2456
+ auto EncodeColumn = [&](int j) {
2457
+ int i = columns_to_encode[j];
2458
+ if (options.zero_copy_only) {
2459
+ return Status::Invalid("Need to dictionary encode a column, but ",
2460
+ "only zero-copy conversions allowed");
2461
+ }
2462
+ compute::ExecContext ctx(options.pool);
2463
+ ARROW_ASSIGN_OR_RAISE(
2464
+ Datum out, DictionaryEncode((*arrays)[i],
2465
+ compute::DictionaryEncodeOptions::Defaults(), &ctx));
2466
+ (*arrays)[i] = out.chunked_array();
2467
+ (*fields)[i] = (*fields)[i]->WithType((*arrays)[i]->type());
2468
+ return Status::OK();
2469
+ };
2470
+
2471
+ if (!options.categorical_columns.empty()) {
2472
+ for (int i = 0; i < static_cast<int>(arrays->size()); i++) {
2473
+ if ((*arrays)[i]->type()->id() != Type::DICTIONARY &&
2474
+ options.categorical_columns.count((*fields)[i]->name())) {
2475
+ columns_to_encode.push_back(i);
2476
+ }
2477
+ }
2478
+ }
2479
+ if (options.strings_to_categorical) {
2480
+ for (int i = 0; i < static_cast<int>(arrays->size()); i++) {
2481
+ if (is_base_binary_like((*arrays)[i]->type()->id())) {
2482
+ columns_to_encode.push_back(i);
2483
+ }
2484
+ }
2485
+ }
2486
+ return OptionalParallelFor(options.use_threads,
2487
+ static_cast<int>(columns_to_encode.size()), EncodeColumn);
2488
+ }
2489
+
2490
+ } // namespace
2491
+
2492
+ Status ConvertArrayToPandas(const PandasOptions& options, std::shared_ptr<Array> arr,
2493
+ PyObject* py_ref, PyObject** out) {
2494
+ return ConvertChunkedArrayToPandas(
2495
+ options, std::make_shared<ChunkedArray>(std::move(arr)), py_ref, out);
2496
+ }
2497
+
2498
+ Status ConvertChunkedArrayToPandas(const PandasOptions& options,
2499
+ std::shared_ptr<ChunkedArray> arr, PyObject* py_ref,
2500
+ PyObject** out) {
2501
+ if (options.decode_dictionaries && arr->type()->id() == Type::DICTIONARY) {
2502
+ const auto& dense_type =
2503
+ checked_cast<const DictionaryType&>(*arr->type()).value_type();
2504
+ RETURN_NOT_OK(DecodeDictionaries(options.pool, dense_type, &arr));
2505
+ DCHECK_NE(arr->type()->id(), Type::DICTIONARY);
2506
+
2507
+ // The original Python DictionaryArray won't own the memory anymore
2508
+ // as we actually built a new array when we decoded the DictionaryArray
2509
+ // thus let the final resulting numpy array own the memory through a Capsule
2510
+ py_ref = nullptr;
2511
+ }
2512
+
2513
+ if (options.strings_to_categorical && is_base_binary_like(arr->type()->id())) {
2514
+ if (options.zero_copy_only) {
2515
+ return Status::Invalid("Need to dictionary encode a column, but ",
2516
+ "only zero-copy conversions allowed");
2517
+ }
2518
+ compute::ExecContext ctx(options.pool);
2519
+ ARROW_ASSIGN_OR_RAISE(
2520
+ Datum out,
2521
+ DictionaryEncode(arr, compute::DictionaryEncodeOptions::Defaults(), &ctx));
2522
+ arr = out.chunked_array();
2523
+ }
2524
+
2525
+ PandasOptions modified_options = options;
2526
+ modified_options.strings_to_categorical = false;
2527
+
2528
+ // ARROW-7596: We permit the hybrid Series/DataFrame code path to do zero copy
2529
+ // optimizations that we do not allow in the default case when converting
2530
+ // Table->DataFrame
2531
+ modified_options.allow_zero_copy_blocks = true;
2532
+
2533
+ // In case of an extension array default to the storage type
2534
+ if (arr->type()->id() == Type::EXTENSION) {
2535
+ arr = GetStorageChunkedArray(arr);
2536
+ }
2537
+
2538
+ PandasWriter::type output_type;
2539
+ RETURN_NOT_OK(GetPandasWriterType(*arr, modified_options, &output_type));
2540
+ if (options.decode_dictionaries) {
2541
+ DCHECK_NE(output_type, PandasWriter::CATEGORICAL);
2542
+ }
2543
+
2544
+ std::shared_ptr<PandasWriter> writer;
2545
+ RETURN_NOT_OK(MakeWriter(modified_options, output_type, *arr->type(), arr->length(),
2546
+ /*num_columns=*/1, &writer));
2547
+ RETURN_NOT_OK(writer->TransferSingle(std::move(arr), py_ref));
2548
+ return writer->GetSeriesResult(out);
2549
+ }
2550
+
2551
+ Status ConvertTableToPandas(const PandasOptions& options, std::shared_ptr<Table> table,
2552
+ PyObject** out) {
2553
+ ChunkedArrayVector arrays = table->columns();
2554
+ FieldVector fields = table->fields();
2555
+
2556
+ // ARROW-3789: allow "self-destructing" by releasing references to columns as
2557
+ // we convert them to pandas
2558
+ table = nullptr;
2559
+
2560
+ RETURN_NOT_OK(ConvertCategoricals(options, &arrays, &fields));
2561
+
2562
+ PandasOptions modified_options = options;
2563
+ modified_options.strings_to_categorical = false;
2564
+ modified_options.categorical_columns.clear();
2565
+
2566
+ if (options.split_blocks) {
2567
+ modified_options.allow_zero_copy_blocks = true;
2568
+ SplitBlockCreator helper(modified_options, std::move(fields), std::move(arrays));
2569
+ return helper.Convert(out);
2570
+ } else {
2571
+ ConsolidatedBlockCreator helper(modified_options, std::move(fields),
2572
+ std::move(arrays));
2573
+ return helper.Convert(out);
2574
+ }
2575
+ }
2576
+
2577
+ } // namespace py
2578
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_pandas.h ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Functions for converting between pandas's NumPy-based data representation
19
+ // and Arrow data structures
20
+
21
+ #pragma once
22
+
23
+ #include "arrow/python/platform.h"
24
+
25
+ #include <memory>
26
+ #include <string>
27
+ #include <unordered_set>
28
+
29
+ #include "arrow/memory_pool.h"
30
+ #include "arrow/python/visibility.h"
31
+
32
+ namespace arrow {
33
+
34
+ class Array;
35
+ class ChunkedArray;
36
+ class Column;
37
+ class DataType;
38
+ class MemoryPool;
39
+ class Status;
40
+ class Table;
41
+
42
+ namespace py {
43
+
44
+ enum class MapConversionType {
45
+ DEFAULT, // convert arrow maps to assoc lists (list of kev-value tuples) in Pandas
46
+ LOSSY, // report warnings when lossiness is encountered due to duplicate keys
47
+ STRICT_, // raise a Python exception when lossiness is encountered due to duplicate
48
+ // keys
49
+ };
50
+
51
+ struct PandasOptions {
52
+ /// arrow::MemoryPool to use for memory allocations
53
+ MemoryPool* pool = default_memory_pool();
54
+
55
+ /// If true, we will convert all string columns to categoricals
56
+ bool strings_to_categorical = false;
57
+ bool zero_copy_only = false;
58
+ bool integer_object_nulls = false;
59
+ bool date_as_object = false;
60
+ bool timestamp_as_object = false;
61
+ bool use_threads = false;
62
+
63
+ /// Coerce all date and timestamp to datetime64[ns]
64
+ bool coerce_temporal_nanoseconds = false;
65
+
66
+ /// Used to maintain backwards compatibility for
67
+ /// timezone bugs (see ARROW-9528). Should be removed
68
+ /// after Arrow 2.0 release.
69
+ bool ignore_timezone = false;
70
+
71
+ /// \brief If true, do not create duplicate PyObject versions of equal
72
+ /// objects. This only applies to immutable objects like strings or datetime
73
+ /// objects
74
+ bool deduplicate_objects = false;
75
+
76
+ /// \brief For certain data types, a cast is needed in order to store the
77
+ /// data in a pandas DataFrame or Series (e.g. timestamps are always stored
78
+ /// as nanoseconds in pandas). This option controls whether it is a safe
79
+ /// cast or not.
80
+ bool safe_cast = true;
81
+
82
+ /// \brief If true, create one block per column rather than consolidated
83
+ /// blocks (1 per data type). Do zero-copy wrapping when there are no
84
+ /// nulls. pandas currently will consolidate the blocks on its own, causing
85
+ /// increased memory use, so keep this in mind if you are working on a
86
+ /// memory-constrained situation.
87
+ bool split_blocks = false;
88
+
89
+ /// \brief If true, allow non-writable zero-copy views to be created for
90
+ /// single column blocks. This option is also used to provide zero copy for
91
+ /// Series data
92
+ bool allow_zero_copy_blocks = false;
93
+
94
+ /// \brief If true, attempt to deallocate buffers in passed Arrow object if
95
+ /// it is the only remaining shared_ptr copy of it. See ARROW-3789 for
96
+ /// original context for this feature. Only currently implemented for Table
97
+ /// conversions
98
+ bool self_destruct = false;
99
+
100
+ /// \brief The default behavior (DEFAULT), is to convert Arrow Map arrays to
101
+ /// Python association lists (list-of-tuples) in the same order as the Arrow
102
+ /// Map, as in [(key1, value1), (key2, value2), ...]
103
+ /// If LOSSY or STRICT, convert Arrow Map arrays to native Python dicts.
104
+ /// This can change the ordering of (key, value) pairs, and will deduplicate
105
+ /// multiple keys, resulting in a possible loss of data.
106
+ /// If 'lossy', this key deduplication results in a warning printed
107
+ /// when detected. If 'strict', this instead results in an exception
108
+ /// being raised when detected.
109
+ MapConversionType maps_as_pydicts = MapConversionType::DEFAULT;
110
+
111
+ // Used internally for nested arrays.
112
+ bool decode_dictionaries = false;
113
+
114
+ // Columns that should be casted to categorical
115
+ std::unordered_set<std::string> categorical_columns;
116
+
117
+ // Columns that should be passed through to be converted to
118
+ // ExtensionArray/Block
119
+ std::unordered_set<std::string> extension_columns;
120
+
121
+ // Used internally to decipher between to_numpy() and to_pandas() when
122
+ // the expected output differs
123
+ bool to_numpy = false;
124
+ };
125
+
126
+ ARROW_PYTHON_EXPORT
127
+ Status ConvertArrayToPandas(const PandasOptions& options, std::shared_ptr<Array> arr,
128
+ PyObject* py_ref, PyObject** out);
129
+
130
+ ARROW_PYTHON_EXPORT
131
+ Status ConvertChunkedArrayToPandas(const PandasOptions& options,
132
+ std::shared_ptr<ChunkedArray> col, PyObject* py_ref,
133
+ PyObject** out);
134
+
135
+ // Convert a whole table as efficiently as possible to a pandas.DataFrame.
136
+ //
137
+ // The returned Python object is a list of tuples consisting of the exact 2D
138
+ // BlockManager structure of the pandas.DataFrame used as of pandas 0.19.x.
139
+ //
140
+ // tuple item: (indices: ndarray[int32], block: ndarray[TYPE, ndim=2])
141
+ ARROW_PYTHON_EXPORT
142
+ Status ConvertTableToPandas(const PandasOptions& options, std::shared_ptr<Table> table,
143
+ PyObject** out);
144
+
145
+ } // namespace py
146
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_python_internal.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/array.h"
21
+ #include "arrow/python/platform.h"
22
+
23
+ namespace arrow {
24
+ namespace py {
25
+ namespace internal {
26
+ // TODO(ARROW-12976): See if we can refactor Pandas ObjectWriter logic
27
+ // to the .cc file and move this there as well if we can.
28
+
29
+ // Converts array to a sequency of python objects.
30
+ template <typename ArrayType, typename WriteValue, typename Assigner>
31
+ inline Status WriteArrayObjects(const ArrayType& arr, WriteValue&& write_func,
32
+ Assigner out_values) {
33
+ // TODO(ARROW-12976): Use visitor here?
34
+ const bool has_nulls = arr.null_count() > 0;
35
+ for (int64_t i = 0; i < arr.length(); ++i) {
36
+ if (has_nulls && arr.IsNull(i)) {
37
+ Py_INCREF(Py_None);
38
+ *out_values = Py_None;
39
+ } else {
40
+ RETURN_NOT_OK(write_func(arr.GetView(i), out_values));
41
+ }
42
+ ++out_values;
43
+ }
44
+ return Status::OK();
45
+ }
46
+
47
+ } // namespace internal
48
+ } // namespace py
49
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/async.h ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <utility>
21
+
22
+ #include "arrow/python/common.h"
23
+ #include "arrow/status.h"
24
+ #include "arrow/util/future.h"
25
+
26
+ namespace arrow::py {
27
+
28
+ /// \brief Bind a Python callback to an arrow::Future.
29
+ ///
30
+ /// If the Future finishes successfully, py_wrapper is called with its
31
+ /// result value and should return a PyObject*. If py_wrapper is successful,
32
+ /// py_cb is called with its return value.
33
+ ///
34
+ /// If either the Future or py_wrapper fails, py_cb is called with the
35
+ /// associated Python exception.
36
+ ///
37
+ /// \param future The future to bind to.
38
+ /// \param py_cb The Python callback function. Will be passed the result of
39
+ /// py_wrapper, or a Python exception if the future failed or one was
40
+ /// raised by py_wrapper.
41
+ /// \param py_wrapper A function (likely defined in Cython) to convert the C++
42
+ /// result of the future to a Python object.
43
+ template <typename T, typename PyWrapper = PyObject* (*)(T)>
44
+ void BindFuture(Future<T> future, PyObject* py_cb, PyWrapper py_wrapper) {
45
+ Py_INCREF(py_cb);
46
+ OwnedRefNoGIL cb_ref(py_cb);
47
+
48
+ auto future_cb = [cb_ref = std::move(cb_ref),
49
+ py_wrapper = std::move(py_wrapper)](Result<T> result) {
50
+ SafeCallIntoPythonVoid([&]() {
51
+ OwnedRef py_value_or_exc{WrapResult(std::move(result), std::move(py_wrapper))};
52
+ Py_XDECREF(
53
+ PyObject_CallFunctionObjArgs(cb_ref.obj(), py_value_or_exc.obj(), NULLPTR));
54
+ ARROW_WARN_NOT_OK(CheckPyError(), "Internal error in async call");
55
+ });
56
+ };
57
+ future.AddCallback(std::move(future_cb));
58
+ }
59
+
60
+ } // namespace arrow::py
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/benchmark.cc ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include "arrow/python/benchmark.h"
19
+ #include "arrow/python/helpers.h"
20
+
21
+ namespace arrow {
22
+ namespace py {
23
+ namespace benchmark {
24
+
25
+ void Benchmark_PandasObjectIsNull(PyObject* list) {
26
+ if (!PyList_CheckExact(list)) {
27
+ PyErr_SetString(PyExc_TypeError, "expected a list");
28
+ return;
29
+ }
30
+ Py_ssize_t i, n = PyList_GET_SIZE(list);
31
+ for (i = 0; i < n; i++) {
32
+ internal::PandasObjectIsNull(PyList_GET_ITEM(list, i));
33
+ }
34
+ }
35
+
36
+ } // namespace benchmark
37
+ } // namespace py
38
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/benchmark.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/python/platform.h"
21
+
22
+ #include "arrow/python/visibility.h"
23
+
24
+ namespace arrow {
25
+ namespace py {
26
+ namespace benchmark {
27
+
28
+ // Micro-benchmark routines for use from ASV
29
+
30
+ // Run PandasObjectIsNull() once over every object in *list*
31
+ ARROW_PYTHON_EXPORT
32
+ void Benchmark_PandasObjectIsNull(PyObject* list);
33
+
34
+ } // namespace benchmark
35
+ } // namespace py
36
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/common.cc ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include "arrow/python/common.h"
19
+
20
+ #include <cstdlib>
21
+ #include <mutex>
22
+ #include <string>
23
+
24
+ #include "arrow/memory_pool.h"
25
+ #include "arrow/status.h"
26
+ #include "arrow/util/checked_cast.h"
27
+ #include "arrow/util/logging.h"
28
+
29
+ #include "arrow/python/helpers.h"
30
+
31
+ namespace arrow {
32
+
33
+ using internal::checked_cast;
34
+
35
+ namespace py {
36
+
37
+ static std::mutex memory_pool_mutex;
38
+ static MemoryPool* default_python_pool = nullptr;
39
+
40
+ void set_default_memory_pool(MemoryPool* pool) {
41
+ std::lock_guard<std::mutex> guard(memory_pool_mutex);
42
+ default_python_pool = pool;
43
+ }
44
+
45
+ MemoryPool* get_memory_pool() {
46
+ std::lock_guard<std::mutex> guard(memory_pool_mutex);
47
+ if (default_python_pool) {
48
+ return default_python_pool;
49
+ } else {
50
+ return default_memory_pool();
51
+ }
52
+ }
53
+
54
+ // ----------------------------------------------------------------------
55
+ // PythonErrorDetail
56
+
57
+ namespace {
58
+
59
+ const char kErrorDetailTypeId[] = "arrow::py::PythonErrorDetail";
60
+
61
+ // Try to match the Python exception type with an appropriate Status code
62
+ StatusCode MapPyError(PyObject* exc_type) {
63
+ StatusCode code;
64
+
65
+ if (PyErr_GivenExceptionMatches(exc_type, PyExc_MemoryError)) {
66
+ code = StatusCode::OutOfMemory;
67
+ } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_IndexError)) {
68
+ code = StatusCode::IndexError;
69
+ } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_KeyError)) {
70
+ code = StatusCode::KeyError;
71
+ } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_TypeError)) {
72
+ code = StatusCode::TypeError;
73
+ } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_ValueError) ||
74
+ PyErr_GivenExceptionMatches(exc_type, PyExc_OverflowError)) {
75
+ code = StatusCode::Invalid;
76
+ } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_EnvironmentError)) {
77
+ code = StatusCode::IOError;
78
+ } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_NotImplementedError)) {
79
+ code = StatusCode::NotImplemented;
80
+ } else {
81
+ code = StatusCode::UnknownError;
82
+ }
83
+ return code;
84
+ }
85
+
86
+ // PythonErrorDetail indicates a Python exception was raised.
87
+ class PythonErrorDetail : public StatusDetail {
88
+ public:
89
+ const char* type_id() const override { return kErrorDetailTypeId; }
90
+
91
+ std::string ToString() const override {
92
+ // This is simple enough not to need the GIL
93
+ const auto ty = reinterpret_cast<const PyTypeObject*>(exc_type_.obj());
94
+ // XXX Should we also print traceback?
95
+ return std::string("Python exception: ") + ty->tp_name;
96
+ }
97
+
98
+ void RestorePyError() const {
99
+ Py_INCREF(exc_type_.obj());
100
+ Py_INCREF(exc_value_.obj());
101
+ Py_INCREF(exc_traceback_.obj());
102
+ PyErr_Restore(exc_type_.obj(), exc_value_.obj(), exc_traceback_.obj());
103
+ }
104
+
105
+ PyObject* exc_type() const { return exc_type_.obj(); }
106
+
107
+ PyObject* exc_value() const { return exc_value_.obj(); }
108
+
109
+ static std::shared_ptr<PythonErrorDetail> FromPyError() {
110
+ PyObject* exc_type = nullptr;
111
+ PyObject* exc_value = nullptr;
112
+ PyObject* exc_traceback = nullptr;
113
+
114
+ PyErr_Fetch(&exc_type, &exc_value, &exc_traceback);
115
+ PyErr_NormalizeException(&exc_type, &exc_value, &exc_traceback);
116
+ ARROW_CHECK(exc_type)
117
+ << "PythonErrorDetail::FromPyError called without a Python error set";
118
+ DCHECK(PyType_Check(exc_type));
119
+ DCHECK(exc_value); // Ensured by PyErr_NormalizeException, double-check
120
+ if (exc_traceback == nullptr) {
121
+ // Needed by PyErr_Restore()
122
+ Py_INCREF(Py_None);
123
+ exc_traceback = Py_None;
124
+ }
125
+
126
+ std::shared_ptr<PythonErrorDetail> detail(new PythonErrorDetail);
127
+ detail->exc_type_.reset(exc_type);
128
+ detail->exc_value_.reset(exc_value);
129
+ detail->exc_traceback_.reset(exc_traceback);
130
+ return detail;
131
+ }
132
+
133
+ protected:
134
+ PythonErrorDetail() = default;
135
+
136
+ OwnedRefNoGIL exc_type_, exc_value_, exc_traceback_;
137
+ };
138
+
139
+ } // namespace
140
+
141
+ // ----------------------------------------------------------------------
142
+ // Python exception <-> Status
143
+
144
+ Status ConvertPyError(StatusCode code) {
145
+ auto detail = PythonErrorDetail::FromPyError();
146
+ if (code == StatusCode::UnknownError) {
147
+ code = MapPyError(detail->exc_type());
148
+ }
149
+
150
+ std::string message;
151
+ RETURN_NOT_OK(internal::PyObject_StdStringStr(detail->exc_value(), &message));
152
+ return Status(code, message, detail);
153
+ }
154
+
155
+ bool IsPyError(const Status& status) {
156
+ if (status.ok()) {
157
+ return false;
158
+ }
159
+ auto detail = status.detail();
160
+ bool result = detail != nullptr && detail->type_id() == kErrorDetailTypeId;
161
+ return result;
162
+ }
163
+
164
+ void RestorePyError(const Status& status) {
165
+ ARROW_CHECK(IsPyError(status));
166
+ const auto& detail = checked_cast<const PythonErrorDetail&>(*status.detail());
167
+ detail.RestorePyError();
168
+ }
169
+
170
+ // ----------------------------------------------------------------------
171
+ // PyBuffer
172
+
173
+ PyBuffer::PyBuffer() : Buffer(nullptr, 0) {}
174
+
175
+ Status PyBuffer::Init(PyObject* obj) {
176
+ if (!PyObject_GetBuffer(obj, &py_buf_, PyBUF_ANY_CONTIGUOUS)) {
177
+ data_ = reinterpret_cast<const uint8_t*>(py_buf_.buf);
178
+ ARROW_CHECK_NE(data_, nullptr) << "Null pointer in Py_buffer";
179
+ size_ = py_buf_.len;
180
+ capacity_ = py_buf_.len;
181
+ is_mutable_ = !py_buf_.readonly;
182
+ return Status::OK();
183
+ } else {
184
+ return ConvertPyError(StatusCode::Invalid);
185
+ }
186
+ }
187
+
188
+ Result<std::shared_ptr<Buffer>> PyBuffer::FromPyObject(PyObject* obj) {
189
+ PyBuffer* buf = new PyBuffer();
190
+ std::shared_ptr<Buffer> res(buf);
191
+ RETURN_NOT_OK(buf->Init(obj));
192
+ return res;
193
+ }
194
+
195
+ PyBuffer::~PyBuffer() {
196
+ if (data_ != nullptr) {
197
+ PyAcquireGIL lock;
198
+ PyBuffer_Release(&py_buf_);
199
+ }
200
+ }
201
+
202
+ } // namespace py
203
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/common.h ADDED
@@ -0,0 +1,458 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+ #include <memory>
22
+ #include <optional>
23
+ #include <utility>
24
+
25
+ #include "arrow/buffer.h"
26
+ #include "arrow/python/pyarrow.h"
27
+ #include "arrow/python/visibility.h"
28
+ #include "arrow/result.h"
29
+ #include "arrow/util/macros.h"
30
+
31
+ namespace arrow {
32
+
33
+ class MemoryPool;
34
+ template <class T>
35
+ class Result;
36
+
37
+ namespace py {
38
+
39
+ // Convert current Python error to a Status. The Python error state is cleared
40
+ // and can be restored with RestorePyError().
41
+ ARROW_PYTHON_EXPORT Status ConvertPyError(StatusCode code = StatusCode::UnknownError);
42
+ // Query whether the given Status is a Python error (as wrapped by ConvertPyError()).
43
+ ARROW_PYTHON_EXPORT bool IsPyError(const Status& status);
44
+ // Restore a Python error wrapped in a Status.
45
+ ARROW_PYTHON_EXPORT void RestorePyError(const Status& status);
46
+
47
+ // Catch a pending Python exception and return the corresponding Status.
48
+ // If no exception is pending, Status::OK() is returned.
49
+ inline Status CheckPyError(StatusCode code = StatusCode::UnknownError) {
50
+ if (ARROW_PREDICT_TRUE(!PyErr_Occurred())) {
51
+ return Status::OK();
52
+ } else {
53
+ return ConvertPyError(code);
54
+ }
55
+ }
56
+
57
+ #define RETURN_IF_PYERROR() ARROW_RETURN_NOT_OK(CheckPyError())
58
+
59
+ #define PY_RETURN_IF_ERROR(CODE) ARROW_RETURN_NOT_OK(CheckPyError(CODE))
60
+
61
+ // For Cython, as you can't define template C++ functions in Cython, only use them.
62
+ // This function can set a Python exception. It assumes that T has a (cheap)
63
+ // default constructor.
64
+ template <class T>
65
+ T GetResultValue(Result<T> result) {
66
+ if (ARROW_PREDICT_TRUE(result.ok())) {
67
+ return *std::move(result);
68
+ } else {
69
+ int r = internal::check_status(result.status()); // takes the GIL
70
+ assert(r == -1); // should have errored out
71
+ ARROW_UNUSED(r);
72
+ return {};
73
+ }
74
+ }
75
+
76
+ /// \brief Wrap a Result and return the corresponding Python object.
77
+ ///
78
+ /// If the Result is successful, py_wrapper is called with its result value
79
+ /// and should return a PyObject*. If py_wrapper is successful (returns
80
+ /// a non-NULL value), its return value is returned.
81
+ ///
82
+ /// If either the Result or py_wrapper fails, the associated Python exception
83
+ /// is raised and NULL is returned.
84
+ //
85
+ /// \param result The Result whose value to wrap in a Python object.
86
+ /// \param py_wrapper A function (likely defined in Cython) to convert the C++
87
+ /// value of the Result to a Python object.
88
+ /// \return A new Python reference, or NULL if an exception occurred
89
+ template <typename T, typename PyWrapper = PyObject* (*)(T)>
90
+ PyObject* WrapResult(Result<T> result, PyWrapper&& py_wrapper) {
91
+ static_assert(std::is_same_v<PyObject*, decltype(py_wrapper(std::declval<T>()))>,
92
+ "PyWrapper argument to WrapResult should return a PyObject* "
93
+ "when called with a T*");
94
+ Status st = result.status();
95
+ if (st.ok()) {
96
+ PyObject* py_value = py_wrapper(result.MoveValueUnsafe());
97
+ st = CheckPyError();
98
+ if (st.ok()) {
99
+ return py_value;
100
+ }
101
+ Py_XDECREF(py_value); // should be null, but who knows
102
+ }
103
+ // Status is an error, convert it to an exception.
104
+ return internal::convert_status(st);
105
+ }
106
+
107
+ // A RAII-style helper that ensures the GIL is acquired inside a lexical block.
108
+ class ARROW_PYTHON_EXPORT PyAcquireGIL {
109
+ public:
110
+ PyAcquireGIL() : acquired_gil_(false) { acquire(); }
111
+
112
+ ~PyAcquireGIL() { release(); }
113
+
114
+ void acquire() {
115
+ if (!acquired_gil_) {
116
+ state_ = PyGILState_Ensure();
117
+ acquired_gil_ = true;
118
+ }
119
+ }
120
+
121
+ // idempotent
122
+ void release() {
123
+ if (acquired_gil_) {
124
+ PyGILState_Release(state_);
125
+ acquired_gil_ = false;
126
+ }
127
+ }
128
+
129
+ private:
130
+ bool acquired_gil_;
131
+ PyGILState_STATE state_;
132
+ ARROW_DISALLOW_COPY_AND_ASSIGN(PyAcquireGIL);
133
+ };
134
+
135
+ // A RAII-style helper that releases the GIL until the end of a lexical block
136
+ class ARROW_PYTHON_EXPORT PyReleaseGIL {
137
+ public:
138
+ PyReleaseGIL() : ptr_(PyEval_SaveThread(), &unique_ptr_deleter) {}
139
+
140
+ private:
141
+ static void unique_ptr_deleter(PyThreadState* state) {
142
+ if (state) {
143
+ PyEval_RestoreThread(state);
144
+ }
145
+ }
146
+ std::unique_ptr<PyThreadState, decltype(&unique_ptr_deleter)> ptr_;
147
+ };
148
+
149
+ // A helper to call safely into the Python interpreter from arbitrary C++ code.
150
+ // The GIL is acquired, and the current thread's error status is preserved.
151
+ template <typename Function>
152
+ auto SafeCallIntoPython(Function&& func) -> decltype(func()) {
153
+ PyAcquireGIL lock;
154
+ PyObject* exc_type;
155
+ PyObject* exc_value;
156
+ PyObject* exc_traceback;
157
+ PyErr_Fetch(&exc_type, &exc_value, &exc_traceback);
158
+ auto maybe_status = std::forward<Function>(func)();
159
+ // If the return Status is a "Python error", the current Python error status
160
+ // describes the error and shouldn't be clobbered.
161
+ if (!IsPyError(::arrow::internal::GenericToStatus(maybe_status)) &&
162
+ exc_type != NULLPTR) {
163
+ PyErr_Restore(exc_type, exc_value, exc_traceback);
164
+ }
165
+ return maybe_status;
166
+ }
167
+
168
+ template <typename Function>
169
+ auto SafeCallIntoPythonVoid(Function&& func) -> decltype(func()) {
170
+ PyAcquireGIL lock;
171
+ PyObject* exc_type;
172
+ PyObject* exc_value;
173
+ PyObject* exc_traceback;
174
+ PyErr_Fetch(&exc_type, &exc_value, &exc_traceback);
175
+ func();
176
+ if (exc_type != NULLPTR) {
177
+ PyErr_Restore(exc_type, exc_value, exc_traceback);
178
+ }
179
+ }
180
+
181
+ // A RAII primitive that DECREFs the underlying PyObject* when it
182
+ // goes out of scope.
183
+ class ARROW_PYTHON_EXPORT OwnedRef {
184
+ public:
185
+ OwnedRef() : obj_(NULLPTR) {}
186
+ OwnedRef(OwnedRef&& other) : OwnedRef(other.detach()) {}
187
+ explicit OwnedRef(PyObject* obj) : obj_(obj) {}
188
+
189
+ OwnedRef& operator=(OwnedRef&& other) {
190
+ obj_ = other.detach();
191
+ return *this;
192
+ }
193
+
194
+ ~OwnedRef() {
195
+ // GH-38626: destructor may be called after the Python interpreter is finalized.
196
+ if (Py_IsInitialized()) {
197
+ reset();
198
+ }
199
+ }
200
+
201
+ void reset(PyObject* obj) {
202
+ Py_XDECREF(obj_);
203
+ obj_ = obj;
204
+ }
205
+
206
+ void reset() { reset(NULLPTR); }
207
+
208
+ PyObject* detach() {
209
+ PyObject* result = obj_;
210
+ obj_ = NULLPTR;
211
+ return result;
212
+ }
213
+
214
+ PyObject* obj() const { return obj_; }
215
+
216
+ PyObject** ref() { return &obj_; }
217
+
218
+ operator bool() const { return obj_ != NULLPTR; }
219
+
220
+ private:
221
+ ARROW_DISALLOW_COPY_AND_ASSIGN(OwnedRef);
222
+
223
+ PyObject* obj_;
224
+ };
225
+
226
+ // Same as OwnedRef, but ensures the GIL is taken when it goes out of scope.
227
+ // This is for situations where the GIL is not always known to be held
228
+ // (e.g. if it is released in the middle of a function for performance reasons)
229
+ class ARROW_PYTHON_EXPORT OwnedRefNoGIL : public OwnedRef {
230
+ public:
231
+ OwnedRefNoGIL() : OwnedRef() {}
232
+ OwnedRefNoGIL(OwnedRefNoGIL&& other) : OwnedRef(other.detach()) {}
233
+ explicit OwnedRefNoGIL(PyObject* obj) : OwnedRef(obj) {}
234
+
235
+ ~OwnedRefNoGIL() {
236
+ // GH-38626: destructor may be called after the Python interpreter is finalized.
237
+ if (Py_IsInitialized() && obj() != NULLPTR) {
238
+ PyAcquireGIL lock;
239
+ reset();
240
+ }
241
+ }
242
+ };
243
+
244
+ template <template <typename...> typename SmartPtr, typename... Ts>
245
+ class SmartPtrNoGIL : public SmartPtr<Ts...> {
246
+ using Base = SmartPtr<Ts...>;
247
+
248
+ public:
249
+ template <typename... Args>
250
+ SmartPtrNoGIL(Args&&... args) : Base(std::forward<Args>(args)...) {}
251
+
252
+ ~SmartPtrNoGIL() { reset(); }
253
+
254
+ template <typename... Args>
255
+ void reset(Args&&... args) {
256
+ auto release_guard = optional_gil_release();
257
+ Base::reset(std::forward<Args>(args)...);
258
+ }
259
+
260
+ template <typename V>
261
+ SmartPtrNoGIL& operator=(V&& v) {
262
+ auto release_guard = optional_gil_release();
263
+ Base::operator=(std::forward<V>(v));
264
+ return *this;
265
+ }
266
+
267
+ private:
268
+ // Only release the GIL if we own an object *and* the Python runtime is
269
+ // valid *and* the GIL is held.
270
+ std::optional<PyReleaseGIL> optional_gil_release() const {
271
+ if (this->get() != nullptr && Py_IsInitialized() && PyGILState_Check()) {
272
+ return PyReleaseGIL();
273
+ }
274
+ return {};
275
+ }
276
+ };
277
+
278
+ /// \brief A std::shared_ptr<T, ...> subclass that releases the GIL when destroying T
279
+ template <typename... Ts>
280
+ using SharedPtrNoGIL = SmartPtrNoGIL<std::shared_ptr, Ts...>;
281
+
282
+ /// \brief A std::unique_ptr<T, ...> subclass that releases the GIL when destroying T
283
+ template <typename... Ts>
284
+ using UniquePtrNoGIL = SmartPtrNoGIL<std::unique_ptr, Ts...>;
285
+
286
+ template <typename Fn>
287
+ struct BoundFunction;
288
+
289
+ template <typename... Args>
290
+ struct BoundFunction<void(PyObject*, Args...)> {
291
+ // We bind `cdef void fn(object, ...)` to get a `Status(...)`
292
+ // where the Status contains any Python error raised by `fn`
293
+ using Unbound = void(PyObject*, Args...);
294
+ using Bound = Status(Args...);
295
+
296
+ BoundFunction(Unbound* unbound, PyObject* bound_arg)
297
+ : unbound_(unbound), bound_arg_(bound_arg) {}
298
+
299
+ Status Invoke(Args... args) const {
300
+ PyAcquireGIL lock;
301
+ unbound_(bound_arg_.obj(), std::forward<Args>(args)...);
302
+ RETURN_IF_PYERROR();
303
+ return Status::OK();
304
+ }
305
+
306
+ Unbound* unbound_;
307
+ OwnedRefNoGIL bound_arg_;
308
+ };
309
+
310
+ template <typename Return, typename... Args>
311
+ struct BoundFunction<Return(PyObject*, Args...)> {
312
+ // We bind `cdef Return fn(object, ...)` to get a `Result<Return>(...)`
313
+ // where the Result contains any Python error raised by `fn` or the
314
+ // return value from `fn`.
315
+ using Unbound = Return(PyObject*, Args...);
316
+ using Bound = Result<Return>(Args...);
317
+
318
+ BoundFunction(Unbound* unbound, PyObject* bound_arg)
319
+ : unbound_(unbound), bound_arg_(bound_arg) {}
320
+
321
+ Result<Return> Invoke(Args... args) const {
322
+ PyAcquireGIL lock;
323
+ Return ret = unbound_(bound_arg_.obj(), std::forward<Args>(args)...);
324
+ RETURN_IF_PYERROR();
325
+ return ret;
326
+ }
327
+
328
+ Unbound* unbound_;
329
+ OwnedRefNoGIL bound_arg_;
330
+ };
331
+
332
+ template <typename OutFn, typename Return, typename... Args>
333
+ std::function<OutFn> BindFunction(Return (*unbound)(PyObject*, Args...),
334
+ PyObject* bound_arg) {
335
+ using Fn = BoundFunction<Return(PyObject*, Args...)>;
336
+
337
+ static_assert(std::is_same<typename Fn::Bound, OutFn>::value,
338
+ "requested bound function of unsupported type");
339
+
340
+ Py_XINCREF(bound_arg);
341
+ auto bound_fn = std::make_shared<Fn>(unbound, bound_arg);
342
+ return
343
+ [bound_fn](Args... args) { return bound_fn->Invoke(std::forward<Args>(args)...); };
344
+ }
345
+
346
+ // A temporary conversion of a Python object to a bytes area.
347
+ struct PyBytesView {
348
+ const char* bytes;
349
+ Py_ssize_t size;
350
+ bool is_utf8;
351
+
352
+ static Result<PyBytesView> FromString(PyObject* obj, bool check_utf8 = false) {
353
+ PyBytesView self;
354
+ ARROW_RETURN_NOT_OK(self.ParseString(obj, check_utf8));
355
+ return std::move(self);
356
+ }
357
+
358
+ static Result<PyBytesView> FromUnicode(PyObject* obj) {
359
+ PyBytesView self;
360
+ ARROW_RETURN_NOT_OK(self.ParseUnicode(obj));
361
+ return std::move(self);
362
+ }
363
+
364
+ static Result<PyBytesView> FromBinary(PyObject* obj) {
365
+ PyBytesView self;
366
+ ARROW_RETURN_NOT_OK(self.ParseBinary(obj));
367
+ return std::move(self);
368
+ }
369
+
370
+ // View the given Python object as string-like, i.e. str or (utf8) bytes
371
+ Status ParseString(PyObject* obj, bool check_utf8 = false) {
372
+ if (PyUnicode_Check(obj)) {
373
+ return ParseUnicode(obj);
374
+ } else {
375
+ ARROW_RETURN_NOT_OK(ParseBinary(obj));
376
+ if (check_utf8) {
377
+ // Check the bytes are utf8 utf-8
378
+ OwnedRef decoded(PyUnicode_FromStringAndSize(bytes, size));
379
+ if (ARROW_PREDICT_TRUE(!PyErr_Occurred())) {
380
+ is_utf8 = true;
381
+ } else {
382
+ PyErr_Clear();
383
+ is_utf8 = false;
384
+ }
385
+ }
386
+ return Status::OK();
387
+ }
388
+ }
389
+
390
+ // View the given Python object as unicode string
391
+ Status ParseUnicode(PyObject* obj) {
392
+ // The utf-8 representation is cached on the unicode object
393
+ bytes = PyUnicode_AsUTF8AndSize(obj, &size);
394
+ RETURN_IF_PYERROR();
395
+ is_utf8 = true;
396
+ return Status::OK();
397
+ }
398
+
399
+ // View the given Python object as binary-like, i.e. bytes
400
+ Status ParseBinary(PyObject* obj) {
401
+ if (PyBytes_Check(obj)) {
402
+ bytes = PyBytes_AS_STRING(obj);
403
+ size = PyBytes_GET_SIZE(obj);
404
+ is_utf8 = false;
405
+ } else if (PyByteArray_Check(obj)) {
406
+ bytes = PyByteArray_AS_STRING(obj);
407
+ size = PyByteArray_GET_SIZE(obj);
408
+ is_utf8 = false;
409
+ } else if (PyMemoryView_Check(obj)) {
410
+ PyObject* ref = PyMemoryView_GetContiguous(obj, PyBUF_READ, 'C');
411
+ RETURN_IF_PYERROR();
412
+ Py_buffer* buffer = PyMemoryView_GET_BUFFER(ref);
413
+ bytes = reinterpret_cast<const char*>(buffer->buf);
414
+ size = buffer->len;
415
+ is_utf8 = false;
416
+ } else {
417
+ return Status::TypeError("Expected bytes, got a '", Py_TYPE(obj)->tp_name,
418
+ "' object");
419
+ }
420
+ return Status::OK();
421
+ }
422
+
423
+ protected:
424
+ OwnedRef ref;
425
+ };
426
+
427
+ class ARROW_PYTHON_EXPORT PyBuffer : public Buffer {
428
+ public:
429
+ /// While memoryview objects support multi-dimensional buffers, PyBuffer only supports
430
+ /// one-dimensional byte buffers.
431
+ ~PyBuffer();
432
+
433
+ static Result<std::shared_ptr<Buffer>> FromPyObject(PyObject* obj);
434
+
435
+ private:
436
+ PyBuffer();
437
+ Status Init(PyObject*);
438
+
439
+ Py_buffer py_buf_;
440
+ };
441
+
442
+ // Return the common PyArrow memory pool
443
+ ARROW_PYTHON_EXPORT void set_default_memory_pool(MemoryPool* pool);
444
+ ARROW_PYTHON_EXPORT MemoryPool* get_memory_pool();
445
+
446
+ // This is annoying: because C++11 does not allow implicit conversion of string
447
+ // literals to non-const char*, we need to go through some gymnastics to use
448
+ // PyObject_CallMethod without a lot of pain (its arguments are non-const
449
+ // char*)
450
+ template <typename... ArgTypes>
451
+ static inline PyObject* cpp_PyObject_CallMethod(PyObject* obj, const char* method_name,
452
+ const char* argspec, ArgTypes... args) {
453
+ return PyObject_CallMethod(obj, const_cast<char*>(method_name),
454
+ const_cast<char*>(argspec), args...);
455
+ }
456
+
457
+ } // namespace py
458
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/csv.h ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <vector>
24
+
25
+ #include "arrow/csv/options.h"
26
+ #include "arrow/python/common.h"
27
+ #include "arrow/util/macros.h"
28
+
29
+ namespace arrow {
30
+ namespace py {
31
+ namespace csv {
32
+
33
+ using PyInvalidRowCallback = std::function<::arrow::csv::InvalidRowResult(
34
+ PyObject*, const ::arrow::csv::InvalidRow&)>;
35
+
36
+ ARROW_PYTHON_EXPORT
37
+ ::arrow::csv::InvalidRowHandler MakeInvalidRowHandler(PyInvalidRowCallback,
38
+ PyObject* handler);
39
+
40
+ } // namespace csv
41
+ } // namespace py
42
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/datetime.cc ADDED
@@ -0,0 +1,663 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+ #include "datetime.h"
18
+
19
+ #include <algorithm>
20
+ #include <chrono>
21
+ #include <iomanip>
22
+ #include <regex>
23
+ #include <string_view>
24
+
25
+ #include "arrow/array.h"
26
+ #include "arrow/python/arrow_to_python_internal.h"
27
+ #include "arrow/python/common.h"
28
+ #include "arrow/python/helpers.h"
29
+ #include "arrow/python/platform.h"
30
+ #include "arrow/scalar.h"
31
+ #include "arrow/status.h"
32
+ #include "arrow/type.h"
33
+ #include "arrow/util/logging.h"
34
+ #include "arrow/util/regex.h"
35
+ #include "arrow/util/value_parsing.h"
36
+
37
+ namespace arrow {
38
+
39
+ using internal::RegexMatch;
40
+
41
+ namespace py {
42
+ namespace internal {
43
+
44
+ namespace {
45
+
46
+ bool MatchFixedOffset(const std::string& tz, std::string_view* sign,
47
+ std::string_view* hour, std::string_view* minute) {
48
+ static const std::regex regex("^([+-])(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9])$");
49
+ if (tz.size() < 5) {
50
+ return false;
51
+ }
52
+ return RegexMatch(regex, tz, {sign, hour, minute});
53
+ }
54
+
55
+ constexpr char* NonConst(const char* st) {
56
+ // Hack for python versions < 3.7 where members of PyStruct members
57
+ // where non-const (C++ doesn't like assigning string literals to these types)
58
+ return const_cast<char*>(st);
59
+ }
60
+
61
+ static PyTypeObject MonthDayNanoTupleType = {};
62
+
63
+ static PyStructSequence_Field MonthDayNanoField[] = {
64
+ {NonConst("months"), NonConst("The number of months in the interval")},
65
+ {NonConst("days"), NonConst("The number days in the interval")},
66
+ {NonConst("nanoseconds"), NonConst("The number of nanoseconds in the interval")},
67
+ {nullptr, nullptr}};
68
+
69
+ static PyStructSequence_Desc MonthDayNanoTupleDesc = {
70
+ NonConst("MonthDayNano"),
71
+ NonConst("A calendar interval consisting of months, days and nanoseconds."),
72
+ MonthDayNanoField,
73
+ /*n_in_sequence=*/3};
74
+
75
+ } // namespace
76
+
77
+ #ifndef PYPY_VERSION
78
+ PyDateTime_CAPI* datetime_api = nullptr;
79
+
80
+ void InitDatetime() {
81
+ PyAcquireGIL lock;
82
+ datetime_api =
83
+ reinterpret_cast<PyDateTime_CAPI*>(PyCapsule_Import(PyDateTime_CAPSULE_NAME, 0));
84
+ if (datetime_api == nullptr) {
85
+ Py_FatalError("Could not import datetime C API");
86
+ }
87
+ }
88
+ #endif
89
+
90
+ // The following code is adapted from
91
+ // https://github.com/numpy/numpy/blob/main/numpy/core/src/multiarray/datetime.c
92
+
93
+ // Days per month, regular year and leap year
94
+ static int64_t _days_per_month_table[2][12] = {
95
+ {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31},
96
+ {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}};
97
+
98
+ static bool is_leapyear(int64_t year) {
99
+ return (year & 0x3) == 0 && // year % 4 == 0
100
+ ((year % 100) != 0 || (year % 400) == 0);
101
+ }
102
+
103
+ // Calculates the days offset from the 1970 epoch.
104
+ static int64_t get_days_from_date(int64_t date_year, int64_t date_month,
105
+ int64_t date_day) {
106
+ int64_t i, month;
107
+ int64_t year, days = 0;
108
+ int64_t* month_lengths;
109
+
110
+ year = date_year - 1970;
111
+ days = year * 365;
112
+
113
+ // Adjust for leap years
114
+ if (days >= 0) {
115
+ // 1968 is the closest leap year before 1970.
116
+ // Exclude the current year, so add 1.
117
+ year += 1;
118
+ // Add one day for each 4 years
119
+ days += year / 4;
120
+ // 1900 is the closest previous year divisible by 100
121
+ year += 68;
122
+ // Subtract one day for each 100 years
123
+ days -= year / 100;
124
+ // 1600 is the closest previous year divisible by 400
125
+ year += 300;
126
+ // Add one day for each 400 years
127
+ days += year / 400;
128
+ } else {
129
+ // 1972 is the closest later year after 1970.
130
+ // Include the current year, so subtract 2.
131
+ year -= 2;
132
+ // Subtract one day for each 4 years
133
+ days += year / 4;
134
+ // 2000 is the closest later year divisible by 100
135
+ year -= 28;
136
+ // Add one day for each 100 years
137
+ days -= year / 100;
138
+ // 2000 is also the closest later year divisible by 400
139
+ // Subtract one day for each 400 years
140
+ days += year / 400;
141
+ }
142
+
143
+ month_lengths = _days_per_month_table[is_leapyear(date_year)];
144
+ month = date_month - 1;
145
+
146
+ // Add the months
147
+ for (i = 0; i < month; ++i) {
148
+ days += month_lengths[i];
149
+ }
150
+
151
+ // Add the days
152
+ days += date_day - 1;
153
+
154
+ return days;
155
+ }
156
+
157
+ // Modifies '*days_' to be the day offset within the year,
158
+ // and returns the year.
159
+ static int64_t days_to_yearsdays(int64_t* days_) {
160
+ const int64_t days_per_400years = (400 * 365 + 100 - 4 + 1);
161
+ // Adjust so it's relative to the year 2000 (divisible by 400)
162
+ int64_t days = (*days_) - (365 * 30 + 7);
163
+ int64_t year;
164
+
165
+ // Break down the 400 year cycle to get the year and day within the year
166
+ if (days >= 0) {
167
+ year = 400 * (days / days_per_400years);
168
+ days = days % days_per_400years;
169
+ } else {
170
+ year = 400 * ((days - (days_per_400years - 1)) / days_per_400years);
171
+ days = days % days_per_400years;
172
+ if (days < 0) {
173
+ days += days_per_400years;
174
+ }
175
+ }
176
+
177
+ // Work out the year/day within the 400 year cycle
178
+ if (days >= 366) {
179
+ year += 100 * ((days - 1) / (100 * 365 + 25 - 1));
180
+ days = (days - 1) % (100 * 365 + 25 - 1);
181
+ if (days >= 365) {
182
+ year += 4 * ((days + 1) / (4 * 365 + 1));
183
+ days = (days + 1) % (4 * 365 + 1);
184
+ if (days >= 366) {
185
+ year += (days - 1) / 365;
186
+ days = (days - 1) % 365;
187
+ }
188
+ }
189
+ }
190
+
191
+ *days_ = days;
192
+ return year + 2000;
193
+ }
194
+
195
+ // Extracts the month and year and day number from a number of days
196
+ static void get_date_from_days(int64_t days, int64_t* date_year, int64_t* date_month,
197
+ int64_t* date_day) {
198
+ int64_t *month_lengths, i;
199
+
200
+ *date_year = days_to_yearsdays(&days);
201
+ month_lengths = _days_per_month_table[is_leapyear(*date_year)];
202
+
203
+ for (i = 0; i < 12; ++i) {
204
+ if (days < month_lengths[i]) {
205
+ *date_month = i + 1;
206
+ *date_day = days + 1;
207
+ return;
208
+ } else {
209
+ days -= month_lengths[i];
210
+ }
211
+ }
212
+
213
+ // Should never get here
214
+ return;
215
+ }
216
+
217
+ // Splitting time quantities, for example splitting total seconds into
218
+ // minutes and remaining seconds. After we run
219
+ // int64_t remaining = split_time(total, quotient, &next)
220
+ // we have
221
+ // total = next * quotient + remaining. Handles negative values by propagating
222
+ // them: If total is negative, next will be negative and remaining will
223
+ // always be non-negative.
224
+ static inline int64_t split_time(int64_t total, int64_t quotient, int64_t* next) {
225
+ int64_t r = total % quotient;
226
+ if (r < 0) {
227
+ *next = total / quotient - 1;
228
+ return r + quotient;
229
+ } else {
230
+ *next = total / quotient;
231
+ return r;
232
+ }
233
+ }
234
+
235
+ static inline Status PyTime_convert_int(int64_t val, const TimeUnit::type unit,
236
+ int64_t* hour, int64_t* minute, int64_t* second,
237
+ int64_t* microsecond) {
238
+ switch (unit) {
239
+ case TimeUnit::NANO:
240
+ if (val % 1000 != 0) {
241
+ return Status::Invalid("Value ", val, " has non-zero nanoseconds");
242
+ }
243
+ val /= 1000;
244
+ // fall through
245
+ case TimeUnit::MICRO:
246
+ *microsecond = split_time(val, 1000000LL, &val);
247
+ *second = split_time(val, 60, &val);
248
+ *minute = split_time(val, 60, hour);
249
+ break;
250
+ case TimeUnit::MILLI:
251
+ *microsecond = split_time(val, 1000, &val) * 1000;
252
+ // fall through
253
+ case TimeUnit::SECOND:
254
+ *second = split_time(val, 60, &val);
255
+ *minute = split_time(val, 60, hour);
256
+ break;
257
+ default:
258
+ break;
259
+ }
260
+ return Status::OK();
261
+ }
262
+
263
+ static inline Status PyDate_convert_int(int64_t val, const DateUnit unit, int64_t* year,
264
+ int64_t* month, int64_t* day) {
265
+ switch (unit) {
266
+ case DateUnit::MILLI:
267
+ val /= 86400000LL; // fall through
268
+ case DateUnit::DAY:
269
+ get_date_from_days(val, year, month, day);
270
+ default:
271
+ break;
272
+ }
273
+ return Status::OK();
274
+ }
275
+
276
+ PyObject* NewMonthDayNanoTupleType() {
277
+ if (MonthDayNanoTupleType.tp_name == nullptr) {
278
+ if (PyStructSequence_InitType2(&MonthDayNanoTupleType, &MonthDayNanoTupleDesc) != 0) {
279
+ Py_FatalError("Could not initialize MonthDayNanoTuple");
280
+ }
281
+ }
282
+ Py_INCREF(&MonthDayNanoTupleType);
283
+ return (PyObject*)&MonthDayNanoTupleType;
284
+ }
285
+
286
+ Status PyTime_from_int(int64_t val, const TimeUnit::type unit, PyObject** out) {
287
+ int64_t hour = 0, minute = 0, second = 0, microsecond = 0;
288
+ RETURN_NOT_OK(PyTime_convert_int(val, unit, &hour, &minute, &second, &microsecond));
289
+ *out = PyTime_FromTime(static_cast<int32_t>(hour), static_cast<int32_t>(minute),
290
+ static_cast<int32_t>(second), static_cast<int32_t>(microsecond));
291
+ return Status::OK();
292
+ }
293
+
294
+ Status PyDate_from_int(int64_t val, const DateUnit unit, PyObject** out) {
295
+ int64_t year = 0, month = 0, day = 0;
296
+ RETURN_NOT_OK(PyDate_convert_int(val, unit, &year, &month, &day));
297
+ *out = PyDate_FromDate(static_cast<int32_t>(year), static_cast<int32_t>(month),
298
+ static_cast<int32_t>(day));
299
+ return Status::OK();
300
+ }
301
+
302
+ Status PyDateTime_from_int(int64_t val, const TimeUnit::type unit, PyObject** out) {
303
+ int64_t hour = 0, minute = 0, second = 0, microsecond = 0;
304
+ RETURN_NOT_OK(PyTime_convert_int(val, unit, &hour, &minute, &second, &microsecond));
305
+ int64_t total_days = 0;
306
+ hour = split_time(hour, 24, &total_days);
307
+ int64_t year = 0, month = 0, day = 0;
308
+ get_date_from_days(total_days, &year, &month, &day);
309
+ *out = PyDateTime_FromDateAndTime(
310
+ static_cast<int32_t>(year), static_cast<int32_t>(month), static_cast<int32_t>(day),
311
+ static_cast<int32_t>(hour), static_cast<int32_t>(minute),
312
+ static_cast<int32_t>(second), static_cast<int32_t>(microsecond));
313
+ return Status::OK();
314
+ }
315
+
316
+ int64_t PyDate_to_days(PyDateTime_Date* pydate) {
317
+ return get_days_from_date(PyDateTime_GET_YEAR(pydate), PyDateTime_GET_MONTH(pydate),
318
+ PyDateTime_GET_DAY(pydate));
319
+ }
320
+
321
+ Result<int64_t> PyDateTime_utcoffset_s(PyObject* obj) {
322
+ // calculate offset from UTC timezone in seconds
323
+ // supports only PyDateTime_DateTime and PyDateTime_Time objects
324
+ OwnedRef pyoffset(PyObject_CallMethod(obj, "utcoffset", NULL));
325
+ RETURN_IF_PYERROR();
326
+ if (pyoffset.obj() != nullptr && pyoffset.obj() != Py_None) {
327
+ auto delta = reinterpret_cast<PyDateTime_Delta*>(pyoffset.obj());
328
+ return internal::PyDelta_to_s(delta);
329
+ } else {
330
+ return 0;
331
+ }
332
+ }
333
+
334
+ Result<std::string> PyTZInfo_utcoffset_hhmm(PyObject* pytzinfo) {
335
+ // attempt to convert timezone offset objects to "+/-{hh}:{mm}" format
336
+ OwnedRef pydelta_object(PyObject_CallMethod(pytzinfo, "utcoffset", "O", Py_None));
337
+ RETURN_IF_PYERROR();
338
+
339
+ if (!PyDelta_Check(pydelta_object.obj())) {
340
+ return Status::Invalid(
341
+ "Object returned by tzinfo.utcoffset(None) is not an instance of "
342
+ "datetime.timedelta");
343
+ }
344
+ auto pydelta = reinterpret_cast<PyDateTime_Delta*>(pydelta_object.obj());
345
+
346
+ // retrieve the offset as seconds
347
+ auto total_seconds = internal::PyDelta_to_s(pydelta);
348
+
349
+ // determine whether the offset is positive or negative
350
+ auto sign = (total_seconds < 0) ? "-" : "+";
351
+ total_seconds = abs(total_seconds);
352
+
353
+ // calculate offset components
354
+ int64_t hours, minutes, seconds;
355
+ seconds = split_time(total_seconds, 60, &minutes);
356
+ minutes = split_time(minutes, 60, &hours);
357
+ if (seconds > 0) {
358
+ // check there are no remaining seconds
359
+ return Status::Invalid("Offset must represent whole number of minutes");
360
+ }
361
+
362
+ // construct the timezone string
363
+ std::stringstream stream;
364
+ stream << sign << std::setfill('0') << std::setw(2) << hours << ":" << std::setfill('0')
365
+ << std::setw(2) << minutes;
366
+ return stream.str();
367
+ }
368
+
369
+ // Converted from python. See https://github.com/apache/arrow/pull/7604
370
+ // for details.
371
+ Result<PyObject*> StringToTzinfo(const std::string& tz) {
372
+ std::string_view sign_str, hour_str, minute_str;
373
+ OwnedRef pytz;
374
+ OwnedRef zoneinfo;
375
+ OwnedRef datetime;
376
+
377
+ if (internal::ImportModule("pytz", &pytz).ok()) {
378
+ if (MatchFixedOffset(tz, &sign_str, &hour_str, &minute_str)) {
379
+ int sign = -1;
380
+ if (sign_str == "+") {
381
+ sign = 1;
382
+ }
383
+ OwnedRef fixed_offset;
384
+ RETURN_NOT_OK(internal::ImportFromModule(pytz.obj(), "FixedOffset", &fixed_offset));
385
+ uint32_t minutes, hours;
386
+ if (!::arrow::internal::ParseUnsigned(hour_str.data(), hour_str.size(), &hours) ||
387
+ !::arrow::internal::ParseUnsigned(minute_str.data(), minute_str.size(),
388
+ &minutes)) {
389
+ return Status::Invalid("Invalid timezone: ", tz);
390
+ }
391
+ OwnedRef total_minutes(PyLong_FromLong(
392
+ sign * ((static_cast<int>(hours) * 60) + static_cast<int>(minutes))));
393
+ RETURN_IF_PYERROR();
394
+ auto tzinfo =
395
+ PyObject_CallFunctionObjArgs(fixed_offset.obj(), total_minutes.obj(), NULL);
396
+ RETURN_IF_PYERROR();
397
+ return tzinfo;
398
+ }
399
+
400
+ OwnedRef timezone;
401
+ RETURN_NOT_OK(internal::ImportFromModule(pytz.obj(), "timezone", &timezone));
402
+ OwnedRef py_tz_string(
403
+ PyUnicode_FromStringAndSize(tz.c_str(), static_cast<Py_ssize_t>(tz.size())));
404
+ auto tzinfo = PyObject_CallFunctionObjArgs(timezone.obj(), py_tz_string.obj(), NULL);
405
+ RETURN_IF_PYERROR();
406
+ return tzinfo;
407
+ }
408
+
409
+ // catch fixed offset if pytz is not present
410
+ if (MatchFixedOffset(tz, &sign_str, &hour_str, &minute_str)) {
411
+ RETURN_NOT_OK(internal::ImportModule("datetime", &datetime));
412
+ int sign = -1;
413
+ if (sign_str == "+") {
414
+ sign = 1;
415
+ }
416
+
417
+ // import timezone and timedelta module to create a tzinfo object
418
+ OwnedRef class_timezone;
419
+ OwnedRef class_timedelta;
420
+ RETURN_NOT_OK(
421
+ internal::ImportFromModule(datetime.obj(), "timezone", &class_timezone));
422
+ RETURN_NOT_OK(
423
+ internal::ImportFromModule(datetime.obj(), "timedelta", &class_timedelta));
424
+
425
+ // check input
426
+ uint32_t minutes, hours;
427
+ if (!::arrow::internal::ParseUnsigned(hour_str.data(), hour_str.size(), &hours) ||
428
+ !::arrow::internal::ParseUnsigned(minute_str.data(), minute_str.size(),
429
+ &minutes)) {
430
+ return Status::Invalid("Invalid timezone: ", tz);
431
+ }
432
+
433
+ // save offset as a signed integer
434
+ OwnedRef total_minutes(PyLong_FromLong(
435
+ sign * ((static_cast<int>(hours) * 60) + static_cast<int>(minutes))));
436
+ // create zero integers for empty arguments in datetime.timedelta
437
+ OwnedRef zero(PyLong_FromLong(static_cast<int>(0)));
438
+
439
+ // call datetime.timedelta to get correct offset object for datetime.timezone
440
+ auto offset =
441
+ PyObject_CallFunctionObjArgs(class_timedelta.obj(), zero.obj(), zero.obj(),
442
+ zero.obj(), zero.obj(), total_minutes.obj(), NULL);
443
+ RETURN_IF_PYERROR();
444
+ // call datetime.timezone
445
+ auto tzinfo = PyObject_CallFunctionObjArgs(class_timezone.obj(), offset, NULL);
446
+ RETURN_IF_PYERROR();
447
+ return tzinfo;
448
+ }
449
+
450
+ // fallback on zoneinfo if tz is string and pytz is not present
451
+ if (internal::ImportModule("zoneinfo", &zoneinfo).ok()) {
452
+ OwnedRef class_zoneinfo;
453
+ RETURN_NOT_OK(
454
+ internal::ImportFromModule(zoneinfo.obj(), "ZoneInfo", &class_zoneinfo));
455
+ OwnedRef py_tz_string(
456
+ PyUnicode_FromStringAndSize(tz.c_str(), static_cast<Py_ssize_t>(tz.size())));
457
+ auto tzinfo =
458
+ PyObject_CallFunctionObjArgs(class_zoneinfo.obj(), py_tz_string.obj(), NULL);
459
+ RETURN_IF_PYERROR();
460
+ return tzinfo;
461
+ }
462
+
463
+ return Status::Invalid(
464
+ "Pytz package or Python>=3.8 for zoneinfo module must be installed.");
465
+ }
466
+
467
+ Result<std::string> TzinfoToString(PyObject* tzinfo) {
468
+ OwnedRef module_pytz; // import pytz
469
+ OwnedRef module_datetime; // import datetime
470
+ OwnedRef module_zoneinfo; // import zoneinfo
471
+ OwnedRef module_dateutil; // import dateutil
472
+ OwnedRef class_timezone; // from datetime import timezone
473
+ OwnedRef class_fixedoffset; // from pytz import _FixedOffset
474
+ OwnedRef class_basetzinfo; // from pytz import BaseTzInfo
475
+ OwnedRef class_zoneinfo; // from zoneinfo import ZoneInfo
476
+ OwnedRef class_tzfile; // from zoneinfo import tzfile
477
+
478
+ // import necessary modules
479
+ RETURN_NOT_OK(internal::ImportModule("datetime", &module_datetime));
480
+ // import necessary classes
481
+ RETURN_NOT_OK(
482
+ internal::ImportFromModule(module_datetime.obj(), "timezone", &class_timezone));
483
+
484
+ // check that it's a valid tzinfo object
485
+ if (!PyTZInfo_Check(tzinfo)) {
486
+ return Status::TypeError("Not an instance of datetime.tzinfo");
487
+ }
488
+
489
+ // if tzinfo is an instance of datetime.timezone return the
490
+ // HH:MM offset string representation
491
+ if (PyObject_IsInstance(tzinfo, class_timezone.obj())) {
492
+ // still recognize datetime.timezone.utc as UTC (instead of +00:00)
493
+ OwnedRef tzname_object(PyObject_CallMethod(tzinfo, "tzname", "O", Py_None));
494
+ RETURN_IF_PYERROR();
495
+ if (PyUnicode_Check(tzname_object.obj())) {
496
+ std::string result;
497
+ RETURN_NOT_OK(internal::PyUnicode_AsStdString(tzname_object.obj(), &result));
498
+ if (result == "UTC") {
499
+ return result;
500
+ }
501
+ }
502
+ return PyTZInfo_utcoffset_hhmm(tzinfo);
503
+ }
504
+
505
+ // Try to import pytz if it is available
506
+ if (internal::ImportModule("pytz", &module_pytz).ok()) {
507
+ RETURN_NOT_OK(internal::ImportFromModule(module_pytz.obj(), "_FixedOffset",
508
+ &class_fixedoffset));
509
+ RETURN_NOT_OK(
510
+ internal::ImportFromModule(module_pytz.obj(), "BaseTzInfo", &class_basetzinfo));
511
+ }
512
+
513
+ // if tzinfo is an instance of pytz._FixedOffset return the
514
+ // HH:MM offset string representation
515
+ if (module_pytz.obj() != nullptr &&
516
+ PyObject_IsInstance(tzinfo, class_fixedoffset.obj())) {
517
+ OwnedRef tzname_object(PyObject_CallMethod(tzinfo, "tzname", "O", Py_None));
518
+ RETURN_IF_PYERROR();
519
+ return PyTZInfo_utcoffset_hhmm(tzinfo);
520
+ }
521
+
522
+ // if pytz is installed and tzinfo is and instance of pytz.BaseTzInfo
523
+ if (module_pytz.obj() != nullptr &&
524
+ PyObject_IsInstance(tzinfo, class_basetzinfo.obj())) {
525
+ OwnedRef zone(PyObject_GetAttrString(tzinfo, "zone"));
526
+ RETURN_IF_PYERROR();
527
+ std::string result;
528
+ RETURN_NOT_OK(internal::PyUnicode_AsStdString(zone.obj(), &result));
529
+ return result;
530
+ }
531
+
532
+ // Try to import zoneinfo if it is available
533
+ if (internal::ImportModule("zoneinfo", &module_zoneinfo).ok()) {
534
+ RETURN_NOT_OK(
535
+ internal::ImportFromModule(module_zoneinfo.obj(), "ZoneInfo", &class_zoneinfo));
536
+ }
537
+
538
+ // if zoneinfo is installed and tzinfo is an instance of zoneinfo.ZoneInfo
539
+ if (module_zoneinfo.obj() != nullptr &&
540
+ PyObject_IsInstance(tzinfo, class_zoneinfo.obj())) {
541
+ OwnedRef key(PyObject_GetAttrString(tzinfo, "key"));
542
+ RETURN_IF_PYERROR();
543
+ std::string result;
544
+ RETURN_NOT_OK(internal::PyUnicode_AsStdString(key.obj(), &result));
545
+ return result;
546
+ }
547
+
548
+ // Try to import dateutil if it is available
549
+ if (internal::ImportModule("dateutil.tz", &module_dateutil).ok()) {
550
+ RETURN_NOT_OK(
551
+ internal::ImportFromModule(module_dateutil.obj(), "tzfile", &class_tzfile));
552
+ }
553
+
554
+ // if dateutil is installed and tzinfo is an instance of dateutil.tz.tzfile
555
+ if (module_dateutil.obj() != nullptr &&
556
+ PyObject_IsInstance(tzinfo, class_tzfile.obj())) {
557
+ OwnedRef _filename(PyObject_GetAttrString(tzinfo, "_filename"));
558
+ RETURN_IF_PYERROR();
559
+ std::string result;
560
+ RETURN_NOT_OK(internal::PyUnicode_AsStdString(_filename.obj(), &result));
561
+ // _filename returns a full path in general ('/usr/share/zoneinfo/Europe/Paris')
562
+ // or POSIX name on Windows ('Europe/Paris') - we need a substring in first case
563
+ std::size_t pos = result.find("zoneinfo/");
564
+ if (pos != std::string::npos) {
565
+ return result.substr(pos + 9);
566
+ }
567
+ return result;
568
+ }
569
+
570
+ // attempt to call tzinfo.tzname(None)
571
+ OwnedRef tzname_object(PyObject_CallMethod(tzinfo, "tzname", "O", Py_None));
572
+ RETURN_IF_PYERROR();
573
+ if (PyUnicode_Check(tzname_object.obj())) {
574
+ std::string result;
575
+ RETURN_NOT_OK(internal::PyUnicode_AsStdString(tzname_object.obj(), &result));
576
+ return result;
577
+ }
578
+
579
+ // fall back to HH:MM offset string representation based on tzinfo.utcoffset(None)
580
+ return PyTZInfo_utcoffset_hhmm(tzinfo);
581
+ }
582
+
583
+ PyObject* MonthDayNanoIntervalToNamedTuple(
584
+ const MonthDayNanoIntervalType::MonthDayNanos& interval) {
585
+ OwnedRef tuple(PyStructSequence_New(&MonthDayNanoTupleType));
586
+ if (ARROW_PREDICT_FALSE(tuple.obj() == nullptr)) {
587
+ return nullptr;
588
+ }
589
+ PyStructSequence_SetItem(tuple.obj(), /*pos=*/0, PyLong_FromLong(interval.months));
590
+ PyStructSequence_SetItem(tuple.obj(), /*pos=*/1, PyLong_FromLong(interval.days));
591
+ PyStructSequence_SetItem(tuple.obj(), /*pos=*/2,
592
+ PyLong_FromLongLong(interval.nanoseconds));
593
+ return tuple.detach();
594
+ }
595
+
596
+ namespace {
597
+
598
+ // Wrapper around a Python list object that mimics dereference and assignment
599
+ // operations.
600
+ struct PyListAssigner {
601
+ public:
602
+ explicit PyListAssigner(PyObject* list) : list_(list) { DCHECK(PyList_Check(list_)); }
603
+
604
+ PyListAssigner& operator*() { return *this; }
605
+
606
+ void operator=(PyObject* obj) {
607
+ if (ARROW_PREDICT_FALSE(PyList_SetItem(list_, current_index_, obj) == -1)) {
608
+ Py_FatalError("list did not have the correct preallocated size.");
609
+ }
610
+ }
611
+
612
+ PyListAssigner& operator++() {
613
+ current_index_++;
614
+ return *this;
615
+ }
616
+
617
+ PyListAssigner& operator+=(int64_t offset) {
618
+ current_index_ += offset;
619
+ return *this;
620
+ }
621
+
622
+ private:
623
+ PyObject* list_;
624
+ int64_t current_index_ = 0;
625
+ };
626
+
627
+ } // namespace
628
+
629
+ Result<PyObject*> MonthDayNanoIntervalArrayToPyList(
630
+ const MonthDayNanoIntervalArray& array) {
631
+ OwnedRef out_list(PyList_New(array.length()));
632
+ RETURN_IF_PYERROR();
633
+ PyListAssigner out_objects(out_list.obj());
634
+ auto& interval_array =
635
+ arrow::internal::checked_cast<const MonthDayNanoIntervalArray&>(array);
636
+ RETURN_NOT_OK(internal::WriteArrayObjects(
637
+ interval_array,
638
+ [&](const MonthDayNanoIntervalType::MonthDayNanos& interval, PyListAssigner& out) {
639
+ PyObject* tuple = internal::MonthDayNanoIntervalToNamedTuple(interval);
640
+ if (ARROW_PREDICT_FALSE(tuple == nullptr)) {
641
+ RETURN_IF_PYERROR();
642
+ }
643
+
644
+ *out = tuple;
645
+ return Status::OK();
646
+ },
647
+ out_objects));
648
+ return out_list.detach();
649
+ }
650
+
651
+ Result<PyObject*> MonthDayNanoIntervalScalarToPyObject(
652
+ const MonthDayNanoIntervalScalar& scalar) {
653
+ if (scalar.is_valid) {
654
+ return internal::MonthDayNanoIntervalToNamedTuple(scalar.value);
655
+ } else {
656
+ Py_INCREF(Py_None);
657
+ return Py_None;
658
+ }
659
+ }
660
+
661
+ } // namespace internal
662
+ } // namespace py
663
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/datetime.h ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+ #include <chrono>
22
+
23
+ #include "arrow/python/platform.h"
24
+ #include "arrow/python/visibility.h"
25
+ #include "arrow/result.h"
26
+ #include "arrow/status.h"
27
+ #include "arrow/type.h"
28
+ #include "arrow/type_fwd.h"
29
+ #include "arrow/util/int_util_overflow.h"
30
+ #include "arrow/util/logging.h"
31
+
32
+ // By default, PyDateTimeAPI is a *static* variable. This forces
33
+ // PyDateTime_IMPORT to be called in every C/C++ module using the
34
+ // C datetime API. This is error-prone and potentially costly.
35
+ // Instead, we redefine PyDateTimeAPI to point to a global variable,
36
+ // which is initialized once by calling InitDatetime().
37
+ #ifdef PYPY_VERSION
38
+ #include "datetime.h"
39
+ #else
40
+ #define PyDateTimeAPI ::arrow::py::internal::datetime_api
41
+ #endif
42
+
43
+ namespace arrow {
44
+ using internal::AddWithOverflow;
45
+ using internal::MultiplyWithOverflow;
46
+ namespace py {
47
+ namespace internal {
48
+
49
+ #ifndef PYPY_VERSION
50
+ extern PyDateTime_CAPI* datetime_api;
51
+
52
+ ARROW_PYTHON_EXPORT
53
+ void InitDatetime();
54
+ #endif
55
+
56
+ // Returns the MonthDayNano namedtuple type (increments the reference count).
57
+ ARROW_PYTHON_EXPORT
58
+ PyObject* NewMonthDayNanoTupleType();
59
+
60
+ ARROW_PYTHON_EXPORT
61
+ inline int64_t PyTime_to_us(PyObject* pytime) {
62
+ return (PyDateTime_TIME_GET_HOUR(pytime) * 3600000000LL +
63
+ PyDateTime_TIME_GET_MINUTE(pytime) * 60000000LL +
64
+ PyDateTime_TIME_GET_SECOND(pytime) * 1000000LL +
65
+ PyDateTime_TIME_GET_MICROSECOND(pytime));
66
+ }
67
+
68
+ ARROW_PYTHON_EXPORT
69
+ inline int64_t PyTime_to_s(PyObject* pytime) { return PyTime_to_us(pytime) / 1000000; }
70
+
71
+ ARROW_PYTHON_EXPORT
72
+ inline int64_t PyTime_to_ms(PyObject* pytime) { return PyTime_to_us(pytime) / 1000; }
73
+
74
+ ARROW_PYTHON_EXPORT
75
+ inline int64_t PyTime_to_ns(PyObject* pytime) { return PyTime_to_us(pytime) * 1000; }
76
+
77
+ ARROW_PYTHON_EXPORT
78
+ Status PyTime_from_int(int64_t val, const TimeUnit::type unit, PyObject** out);
79
+
80
+ ARROW_PYTHON_EXPORT
81
+ Status PyDate_from_int(int64_t val, const DateUnit unit, PyObject** out);
82
+
83
+ // WARNING: This function returns a naive datetime.
84
+ ARROW_PYTHON_EXPORT
85
+ Status PyDateTime_from_int(int64_t val, const TimeUnit::type unit, PyObject** out);
86
+
87
+ // This declaration must be the same as in filesystem/filesystem.h
88
+ using TimePoint =
89
+ std::chrono::time_point<std::chrono::system_clock, std::chrono::nanoseconds>;
90
+
91
+ ARROW_PYTHON_EXPORT
92
+ int64_t PyDate_to_days(PyDateTime_Date* pydate);
93
+
94
+ ARROW_PYTHON_EXPORT
95
+ inline int64_t PyDate_to_s(PyDateTime_Date* pydate) {
96
+ return PyDate_to_days(pydate) * 86400LL;
97
+ }
98
+
99
+ ARROW_PYTHON_EXPORT
100
+ inline int64_t PyDate_to_ms(PyDateTime_Date* pydate) {
101
+ return PyDate_to_days(pydate) * 86400000LL;
102
+ }
103
+
104
+ ARROW_PYTHON_EXPORT
105
+ inline int64_t PyDateTime_to_s(PyDateTime_DateTime* pydatetime) {
106
+ return (PyDate_to_s(reinterpret_cast<PyDateTime_Date*>(pydatetime)) +
107
+ PyDateTime_DATE_GET_HOUR(pydatetime) * 3600LL +
108
+ PyDateTime_DATE_GET_MINUTE(pydatetime) * 60LL +
109
+ PyDateTime_DATE_GET_SECOND(pydatetime));
110
+ }
111
+
112
+ ARROW_PYTHON_EXPORT
113
+ inline int64_t PyDateTime_to_ms(PyDateTime_DateTime* pydatetime) {
114
+ return (PyDateTime_to_s(pydatetime) * 1000LL +
115
+ PyDateTime_DATE_GET_MICROSECOND(pydatetime) / 1000);
116
+ }
117
+
118
+ ARROW_PYTHON_EXPORT
119
+ inline int64_t PyDateTime_to_us(PyDateTime_DateTime* pydatetime) {
120
+ return (PyDateTime_to_s(pydatetime) * 1000000LL +
121
+ PyDateTime_DATE_GET_MICROSECOND(pydatetime));
122
+ }
123
+
124
+ ARROW_PYTHON_EXPORT
125
+ inline int64_t PyDateTime_to_ns(PyDateTime_DateTime* pydatetime) {
126
+ return PyDateTime_to_us(pydatetime) * 1000LL;
127
+ }
128
+
129
+ ARROW_PYTHON_EXPORT
130
+ inline TimePoint PyDateTime_to_TimePoint(PyDateTime_DateTime* pydatetime) {
131
+ return TimePoint(TimePoint::duration(PyDateTime_to_ns(pydatetime)));
132
+ }
133
+
134
+ ARROW_PYTHON_EXPORT
135
+ inline int64_t TimePoint_to_ns(TimePoint val) { return val.time_since_epoch().count(); }
136
+
137
+ ARROW_PYTHON_EXPORT
138
+ inline TimePoint TimePoint_from_s(double val) {
139
+ return TimePoint(TimePoint::duration(static_cast<int64_t>(1e9 * val)));
140
+ }
141
+
142
+ ARROW_PYTHON_EXPORT
143
+ inline TimePoint TimePoint_from_ns(int64_t val) {
144
+ return TimePoint(TimePoint::duration(val));
145
+ }
146
+
147
+ ARROW_PYTHON_EXPORT
148
+ inline int64_t PyDelta_to_s(PyDateTime_Delta* pytimedelta) {
149
+ return (PyDateTime_DELTA_GET_DAYS(pytimedelta) * 86400LL +
150
+ PyDateTime_DELTA_GET_SECONDS(pytimedelta));
151
+ }
152
+
153
+ ARROW_PYTHON_EXPORT
154
+ inline int64_t PyDelta_to_ms(PyDateTime_Delta* pytimedelta) {
155
+ return (PyDelta_to_s(pytimedelta) * 1000LL +
156
+ PyDateTime_DELTA_GET_MICROSECONDS(pytimedelta) / 1000);
157
+ }
158
+
159
+ ARROW_PYTHON_EXPORT
160
+ inline Result<int64_t> PyDelta_to_us(PyDateTime_Delta* pytimedelta) {
161
+ int64_t result = PyDelta_to_s(pytimedelta);
162
+ if (MultiplyWithOverflow(result, 1000000LL, &result)) {
163
+ return Status::Invalid("Timedelta too large to fit in 64-bit integer");
164
+ }
165
+ if (AddWithOverflow(result, PyDateTime_DELTA_GET_MICROSECONDS(pytimedelta), &result)) {
166
+ return Status::Invalid("Timedelta too large to fit in 64-bit integer");
167
+ }
168
+ return result;
169
+ }
170
+
171
+ ARROW_PYTHON_EXPORT
172
+ inline Result<int64_t> PyDelta_to_ns(PyDateTime_Delta* pytimedelta) {
173
+ ARROW_ASSIGN_OR_RAISE(int64_t result, PyDelta_to_us(pytimedelta));
174
+ if (MultiplyWithOverflow(result, 1000LL, &result)) {
175
+ return Status::Invalid("Timedelta too large to fit in 64-bit integer");
176
+ }
177
+ return result;
178
+ }
179
+
180
+ ARROW_PYTHON_EXPORT
181
+ Result<int64_t> PyDateTime_utcoffset_s(PyObject* pydatetime);
182
+
183
+ /// \brief Convert a time zone name into a time zone object.
184
+ ///
185
+ /// Supported input strings are:
186
+ /// * As used in the Olson time zone database (the "tz database" or
187
+ /// "tzdata"), such as "America/New_York"
188
+ /// * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30
189
+ /// GIL must be held when calling this method.
190
+ ARROW_PYTHON_EXPORT
191
+ Result<PyObject*> StringToTzinfo(const std::string& tz);
192
+
193
+ /// \brief Convert a time zone object to a string representation.
194
+ ///
195
+ /// The output strings are:
196
+ /// * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30
197
+ /// if the input object is either an instance of pytz._FixedOffset or
198
+ /// datetime.timedelta
199
+ /// * The timezone's name if the input object's tzname() method returns with a
200
+ /// non-empty timezone name such as "UTC" or "America/New_York"
201
+ ///
202
+ /// GIL must be held when calling this method.
203
+ ARROW_PYTHON_EXPORT
204
+ Result<std::string> TzinfoToString(PyObject* pytzinfo);
205
+
206
+ /// \brief Convert MonthDayNano to a python namedtuple.
207
+ ///
208
+ /// Return a named tuple (pyarrow.MonthDayNano) containing attributes
209
+ /// "months", "days", "nanoseconds" in the given order
210
+ /// with values extracted from the fields on interval.
211
+ ///
212
+ /// GIL must be held when calling this method.
213
+ ARROW_PYTHON_EXPORT
214
+ PyObject* MonthDayNanoIntervalToNamedTuple(
215
+ const MonthDayNanoIntervalType::MonthDayNanos& interval);
216
+
217
+ /// \brief Convert the given Array to a PyList object containing
218
+ /// pyarrow.MonthDayNano objects.
219
+ ARROW_PYTHON_EXPORT
220
+ Result<PyObject*> MonthDayNanoIntervalArrayToPyList(
221
+ const MonthDayNanoIntervalArray& array);
222
+
223
+ /// \brief Convert the Scalar object to a pyarrow.MonthDayNano (or None if
224
+ /// is isn't valid).
225
+ ARROW_PYTHON_EXPORT
226
+ Result<PyObject*> MonthDayNanoIntervalScalarToPyObject(
227
+ const MonthDayNanoIntervalScalar& scalar);
228
+
229
+ } // namespace internal
230
+ } // namespace py
231
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/decimal.cc ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include <algorithm>
19
+ #include <limits>
20
+
21
+ #include "arrow/python/common.h"
22
+ #include "arrow/python/decimal.h"
23
+ #include "arrow/python/helpers.h"
24
+ #include "arrow/type_fwd.h"
25
+ #include "arrow/util/decimal.h"
26
+ #include "arrow/util/logging.h"
27
+
28
+ namespace arrow {
29
+ namespace py {
30
+ namespace internal {
31
+
32
+ Status ImportDecimalType(OwnedRef* decimal_type) {
33
+ OwnedRef decimal_module;
34
+ RETURN_NOT_OK(ImportModule("decimal", &decimal_module));
35
+ RETURN_NOT_OK(ImportFromModule(decimal_module.obj(), "Decimal", decimal_type));
36
+ return Status::OK();
37
+ }
38
+
39
+ Status PythonDecimalToString(PyObject* python_decimal, std::string* out) {
40
+ // Call Python's str(decimal_object)
41
+ return PyObject_StdStringStr(python_decimal, out);
42
+ }
43
+
44
+ // \brief Infer the precision and scale of a Python decimal.Decimal instance
45
+ // \param python_decimal[in] An instance of decimal.Decimal
46
+ // \param precision[out] The value of the inferred precision
47
+ // \param scale[out] The value of the inferred scale
48
+ // \return The status of the operation
49
+ static Status InferDecimalPrecisionAndScale(PyObject* python_decimal, int32_t* precision,
50
+ int32_t* scale) {
51
+ DCHECK_NE(python_decimal, NULLPTR);
52
+ DCHECK_NE(precision, NULLPTR);
53
+ DCHECK_NE(scale, NULLPTR);
54
+
55
+ // TODO(phillipc): Make sure we perform PyDecimal_Check(python_decimal) as a DCHECK
56
+ OwnedRef as_tuple(PyObject_CallMethod(python_decimal, const_cast<char*>("as_tuple"),
57
+ const_cast<char*>("")));
58
+ RETURN_IF_PYERROR();
59
+ DCHECK(PyTuple_Check(as_tuple.obj()));
60
+
61
+ OwnedRef digits(PyObject_GetAttrString(as_tuple.obj(), "digits"));
62
+ RETURN_IF_PYERROR();
63
+ DCHECK(PyTuple_Check(digits.obj()));
64
+
65
+ const auto num_digits = static_cast<int32_t>(PyTuple_Size(digits.obj()));
66
+ RETURN_IF_PYERROR();
67
+
68
+ OwnedRef py_exponent(PyObject_GetAttrString(as_tuple.obj(), "exponent"));
69
+ RETURN_IF_PYERROR();
70
+ DCHECK(IsPyInteger(py_exponent.obj()));
71
+
72
+ const auto exponent = static_cast<int32_t>(PyLong_AsLong(py_exponent.obj()));
73
+ RETURN_IF_PYERROR();
74
+
75
+ if (exponent < 0) {
76
+ // If exponent > num_digits, we have a number with leading zeros
77
+ // such as 0.01234. Ensure we have enough precision for leading zeros
78
+ // (which are not included in num_digits).
79
+ *precision = std::max(num_digits, -exponent);
80
+ *scale = -exponent;
81
+ } else {
82
+ // Trailing zeros are not included in num_digits, need to add to precision.
83
+ // Note we don't generate negative scales as they are poorly supported
84
+ // in non-Arrow systems.
85
+ *precision = num_digits + exponent;
86
+ *scale = 0;
87
+ }
88
+ return Status::OK();
89
+ }
90
+
91
+ PyObject* DecimalFromString(PyObject* decimal_constructor,
92
+ const std::string& decimal_string) {
93
+ DCHECK_NE(decimal_constructor, nullptr);
94
+
95
+ auto string_size = decimal_string.size();
96
+ DCHECK_GT(string_size, 0);
97
+
98
+ auto string_bytes = decimal_string.c_str();
99
+ DCHECK_NE(string_bytes, nullptr);
100
+
101
+ return PyObject_CallFunction(decimal_constructor, const_cast<char*>("s#"), string_bytes,
102
+ static_cast<Py_ssize_t>(string_size));
103
+ }
104
+
105
+ namespace {
106
+
107
+ template <typename ArrowDecimal>
108
+ Status DecimalFromStdString(const std::string& decimal_string,
109
+ const DecimalType& arrow_type, ArrowDecimal* out) {
110
+ int32_t inferred_precision;
111
+ int32_t inferred_scale;
112
+
113
+ RETURN_NOT_OK(ArrowDecimal::FromString(decimal_string, out, &inferred_precision,
114
+ &inferred_scale));
115
+
116
+ const int32_t precision = arrow_type.precision();
117
+ const int32_t scale = arrow_type.scale();
118
+
119
+ if (scale != inferred_scale) {
120
+ DCHECK_NE(out, NULLPTR);
121
+ ARROW_ASSIGN_OR_RAISE(*out, out->Rescale(inferred_scale, scale));
122
+ }
123
+
124
+ auto inferred_scale_delta = inferred_scale - scale;
125
+ if (ARROW_PREDICT_FALSE((inferred_precision - inferred_scale_delta) > precision)) {
126
+ return Status::Invalid(
127
+ "Decimal type with precision ", inferred_precision,
128
+ " does not fit into precision inferred from first array element: ", precision);
129
+ }
130
+
131
+ return Status::OK();
132
+ }
133
+
134
+ template <typename ArrowDecimal>
135
+ Status InternalDecimalFromPythonDecimal(PyObject* python_decimal,
136
+ const DecimalType& arrow_type,
137
+ ArrowDecimal* out) {
138
+ DCHECK_NE(python_decimal, NULLPTR);
139
+ DCHECK_NE(out, NULLPTR);
140
+
141
+ std::string string;
142
+ RETURN_NOT_OK(PythonDecimalToString(python_decimal, &string));
143
+ return DecimalFromStdString(string, arrow_type, out);
144
+ }
145
+
146
+ template <typename ArrowDecimal>
147
+ Status InternalDecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type,
148
+ ArrowDecimal* out) {
149
+ DCHECK_NE(obj, NULLPTR);
150
+ DCHECK_NE(out, NULLPTR);
151
+
152
+ if (IsPyInteger(obj)) {
153
+ // TODO: add a fast path for small-ish ints
154
+ std::string string;
155
+ RETURN_NOT_OK(PyObject_StdStringStr(obj, &string));
156
+ return DecimalFromStdString(string, arrow_type, out);
157
+ } else if (PyDecimal_Check(obj)) {
158
+ return InternalDecimalFromPythonDecimal<ArrowDecimal>(obj, arrow_type, out);
159
+ } else {
160
+ return Status::TypeError("int or Decimal object expected, got ",
161
+ Py_TYPE(obj)->tp_name);
162
+ }
163
+ }
164
+
165
+ } // namespace
166
+
167
+ Status DecimalFromPythonDecimal(PyObject* python_decimal, const DecimalType& arrow_type,
168
+ Decimal128* out) {
169
+ return InternalDecimalFromPythonDecimal(python_decimal, arrow_type, out);
170
+ }
171
+
172
+ Status DecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type,
173
+ Decimal128* out) {
174
+ return InternalDecimalFromPyObject(obj, arrow_type, out);
175
+ }
176
+
177
+ Status DecimalFromPythonDecimal(PyObject* python_decimal, const DecimalType& arrow_type,
178
+ Decimal256* out) {
179
+ return InternalDecimalFromPythonDecimal(python_decimal, arrow_type, out);
180
+ }
181
+
182
+ Status DecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type,
183
+ Decimal256* out) {
184
+ return InternalDecimalFromPyObject(obj, arrow_type, out);
185
+ }
186
+
187
+ bool PyDecimal_Check(PyObject* obj) {
188
+ static OwnedRef decimal_type;
189
+ if (!decimal_type.obj()) {
190
+ ARROW_CHECK_OK(ImportDecimalType(&decimal_type));
191
+ DCHECK(PyType_Check(decimal_type.obj()));
192
+ }
193
+ // PyObject_IsInstance() is slower as it has to check for virtual subclasses
194
+ const int result =
195
+ PyType_IsSubtype(Py_TYPE(obj), reinterpret_cast<PyTypeObject*>(decimal_type.obj()));
196
+ ARROW_CHECK_NE(result, -1) << " error during PyType_IsSubtype check";
197
+ return result == 1;
198
+ }
199
+
200
+ bool PyDecimal_ISNAN(PyObject* obj) {
201
+ DCHECK(PyDecimal_Check(obj)) << "obj is not an instance of decimal.Decimal";
202
+ OwnedRef is_nan(
203
+ PyObject_CallMethod(obj, const_cast<char*>("is_nan"), const_cast<char*>("")));
204
+ return PyObject_IsTrue(is_nan.obj()) == 1;
205
+ }
206
+
207
+ DecimalMetadata::DecimalMetadata()
208
+ : DecimalMetadata(std::numeric_limits<int32_t>::min(),
209
+ std::numeric_limits<int32_t>::min()) {}
210
+
211
+ DecimalMetadata::DecimalMetadata(int32_t precision, int32_t scale)
212
+ : precision_(precision), scale_(scale) {}
213
+
214
+ Status DecimalMetadata::Update(int32_t suggested_precision, int32_t suggested_scale) {
215
+ const int32_t current_scale = scale_;
216
+ scale_ = std::max(current_scale, suggested_scale);
217
+
218
+ const int32_t current_precision = precision_;
219
+
220
+ if (current_precision == std::numeric_limits<int32_t>::min()) {
221
+ precision_ = suggested_precision;
222
+ } else {
223
+ auto num_digits = std::max(current_precision - current_scale,
224
+ suggested_precision - suggested_scale);
225
+ precision_ = std::max(num_digits + scale_, current_precision);
226
+ }
227
+
228
+ return Status::OK();
229
+ }
230
+
231
+ Status DecimalMetadata::Update(PyObject* object) {
232
+ bool is_decimal = PyDecimal_Check(object);
233
+
234
+ if (ARROW_PREDICT_FALSE(!is_decimal || PyDecimal_ISNAN(object))) {
235
+ return Status::OK();
236
+ }
237
+
238
+ int32_t precision = 0;
239
+ int32_t scale = 0;
240
+ RETURN_NOT_OK(InferDecimalPrecisionAndScale(object, &precision, &scale));
241
+ return Update(precision, scale);
242
+ }
243
+
244
+ } // namespace internal
245
+ } // namespace py
246
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/deserialize.cc ADDED
@@ -0,0 +1,495 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include "arrow/python/deserialize.h"
19
+
20
+ #include "arrow/python/numpy_interop.h"
21
+
22
+ #include <cstdint>
23
+ #include <memory>
24
+ #include <string>
25
+ #include <utility>
26
+ #include <vector>
27
+
28
+ #include <numpy/arrayobject.h>
29
+ #include <numpy/arrayscalars.h>
30
+
31
+ #include "arrow/array.h"
32
+ #include "arrow/io/interfaces.h"
33
+ #include "arrow/io/memory.h"
34
+ #include "arrow/ipc/options.h"
35
+ #include "arrow/ipc/reader.h"
36
+ #include "arrow/ipc/util.h"
37
+ #include "arrow/ipc/writer.h"
38
+ #include "arrow/table.h"
39
+ #include "arrow/util/checked_cast.h"
40
+ #include "arrow/util/logging.h"
41
+ #include "arrow/util/value_parsing.h"
42
+
43
+ #include "arrow/python/common.h"
44
+ #include "arrow/python/datetime.h"
45
+ #include "arrow/python/helpers.h"
46
+ #include "arrow/python/numpy_convert.h"
47
+ #include "arrow/python/pyarrow.h"
48
+ #include "arrow/python/serialize.h"
49
+
50
+ namespace arrow {
51
+
52
+ using internal::checked_cast;
53
+ using internal::ParseValue;
54
+
55
+ namespace py {
56
+
57
+ Status CallDeserializeCallback(PyObject* context, PyObject* value,
58
+ PyObject** deserialized_object);
59
+
60
+ Status DeserializeTuple(PyObject* context, const Array& array, int64_t start_idx,
61
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
62
+ PyObject** out);
63
+
64
+ Status DeserializeList(PyObject* context, const Array& array, int64_t start_idx,
65
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
66
+ PyObject** out);
67
+
68
+ Status DeserializeSet(PyObject* context, const Array& array, int64_t start_idx,
69
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
70
+ PyObject** out);
71
+
72
+ Status DeserializeDict(PyObject* context, const Array& array, int64_t start_idx,
73
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
74
+ PyObject** out) {
75
+ const auto& data = checked_cast<const StructArray&>(array);
76
+ OwnedRef keys, vals;
77
+ OwnedRef result(PyDict_New());
78
+ RETURN_IF_PYERROR();
79
+
80
+ DCHECK_EQ(2, data.num_fields());
81
+
82
+ RETURN_NOT_OK(DeserializeList(context, *data.field(0), start_idx, stop_idx, base, blobs,
83
+ keys.ref()));
84
+ RETURN_NOT_OK(DeserializeList(context, *data.field(1), start_idx, stop_idx, base, blobs,
85
+ vals.ref()));
86
+ for (int64_t i = start_idx; i < stop_idx; ++i) {
87
+ // PyDict_SetItem behaves differently from PyList_SetItem and PyTuple_SetItem.
88
+ // The latter two steal references whereas PyDict_SetItem does not. So we need
89
+ // to make sure the reference count is decremented by letting the OwnedRef
90
+ // go out of scope at the end.
91
+ int ret = PyDict_SetItem(result.obj(), PyList_GET_ITEM(keys.obj(), i - start_idx),
92
+ PyList_GET_ITEM(vals.obj(), i - start_idx));
93
+ if (ret != 0) {
94
+ return ConvertPyError();
95
+ }
96
+ }
97
+ static PyObject* py_type = PyUnicode_FromString("_pytype_");
98
+ if (PyDict_Contains(result.obj(), py_type)) {
99
+ RETURN_NOT_OK(CallDeserializeCallback(context, result.obj(), out));
100
+ } else {
101
+ *out = result.detach();
102
+ }
103
+ return Status::OK();
104
+ }
105
+
106
+ Status DeserializeArray(int32_t index, PyObject* base, const SerializedPyObject& blobs,
107
+ PyObject** out) {
108
+ RETURN_NOT_OK(py::TensorToNdarray(blobs.ndarrays[index], base, out));
109
+ // Mark the array as immutable
110
+ OwnedRef flags(PyObject_GetAttrString(*out, "flags"));
111
+ if (flags.obj() == NULL) {
112
+ return ConvertPyError();
113
+ }
114
+ if (PyObject_SetAttrString(flags.obj(), "writeable", Py_False) < 0) {
115
+ return ConvertPyError();
116
+ }
117
+ return Status::OK();
118
+ }
119
+
120
+ Status GetValue(PyObject* context, const Array& arr, int64_t index, int8_t type,
121
+ PyObject* base, const SerializedPyObject& blobs, PyObject** result) {
122
+ switch (type) {
123
+ case PythonType::NONE:
124
+ Py_INCREF(Py_None);
125
+ *result = Py_None;
126
+ return Status::OK();
127
+ case PythonType::BOOL:
128
+ *result = PyBool_FromLong(checked_cast<const BooleanArray&>(arr).Value(index));
129
+ return Status::OK();
130
+ case PythonType::PY2INT:
131
+ case PythonType::INT: {
132
+ *result = PyLong_FromSsize_t(checked_cast<const Int64Array&>(arr).Value(index));
133
+ return Status::OK();
134
+ }
135
+ case PythonType::BYTES: {
136
+ auto view = checked_cast<const BinaryArray&>(arr).GetView(index);
137
+ *result = PyBytes_FromStringAndSize(view.data(), view.length());
138
+ return CheckPyError();
139
+ }
140
+ case PythonType::STRING: {
141
+ auto view = checked_cast<const StringArray&>(arr).GetView(index);
142
+ *result = PyUnicode_FromStringAndSize(view.data(), view.length());
143
+ return CheckPyError();
144
+ }
145
+ case PythonType::HALF_FLOAT: {
146
+ *result = PyHalf_FromHalf(checked_cast<const HalfFloatArray&>(arr).Value(index));
147
+ RETURN_IF_PYERROR();
148
+ return Status::OK();
149
+ }
150
+ case PythonType::FLOAT:
151
+ *result = PyFloat_FromDouble(checked_cast<const FloatArray&>(arr).Value(index));
152
+ return Status::OK();
153
+ case PythonType::DOUBLE:
154
+ *result = PyFloat_FromDouble(checked_cast<const DoubleArray&>(arr).Value(index));
155
+ return Status::OK();
156
+ case PythonType::DATE64: {
157
+ RETURN_NOT_OK(internal::PyDateTime_from_int(
158
+ checked_cast<const Date64Array&>(arr).Value(index), TimeUnit::MICRO, result));
159
+ RETURN_IF_PYERROR();
160
+ return Status::OK();
161
+ }
162
+ case PythonType::LIST: {
163
+ const auto& l = checked_cast<const ListArray&>(arr);
164
+ return DeserializeList(context, *l.values(), l.value_offset(index),
165
+ l.value_offset(index + 1), base, blobs, result);
166
+ }
167
+ case PythonType::DICT: {
168
+ const auto& l = checked_cast<const ListArray&>(arr);
169
+ return DeserializeDict(context, *l.values(), l.value_offset(index),
170
+ l.value_offset(index + 1), base, blobs, result);
171
+ }
172
+ case PythonType::TUPLE: {
173
+ const auto& l = checked_cast<const ListArray&>(arr);
174
+ return DeserializeTuple(context, *l.values(), l.value_offset(index),
175
+ l.value_offset(index + 1), base, blobs, result);
176
+ }
177
+ case PythonType::SET: {
178
+ const auto& l = checked_cast<const ListArray&>(arr);
179
+ return DeserializeSet(context, *l.values(), l.value_offset(index),
180
+ l.value_offset(index + 1), base, blobs, result);
181
+ }
182
+ case PythonType::TENSOR: {
183
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
184
+ *result = wrap_tensor(blobs.tensors[ref]);
185
+ return Status::OK();
186
+ }
187
+ case PythonType::SPARSECOOTENSOR: {
188
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
189
+ const std::shared_ptr<SparseCOOTensor>& sparse_coo_tensor =
190
+ arrow::internal::checked_pointer_cast<SparseCOOTensor>(
191
+ blobs.sparse_tensors[ref]);
192
+ *result = wrap_sparse_coo_tensor(sparse_coo_tensor);
193
+ return Status::OK();
194
+ }
195
+ case PythonType::SPARSECSRMATRIX: {
196
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
197
+ const std::shared_ptr<SparseCSRMatrix>& sparse_csr_matrix =
198
+ arrow::internal::checked_pointer_cast<SparseCSRMatrix>(
199
+ blobs.sparse_tensors[ref]);
200
+ *result = wrap_sparse_csr_matrix(sparse_csr_matrix);
201
+ return Status::OK();
202
+ }
203
+ case PythonType::SPARSECSCMATRIX: {
204
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
205
+ const std::shared_ptr<SparseCSCMatrix>& sparse_csc_matrix =
206
+ arrow::internal::checked_pointer_cast<SparseCSCMatrix>(
207
+ blobs.sparse_tensors[ref]);
208
+ *result = wrap_sparse_csc_matrix(sparse_csc_matrix);
209
+ return Status::OK();
210
+ }
211
+ case PythonType::SPARSECSFTENSOR: {
212
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
213
+ const std::shared_ptr<SparseCSFTensor>& sparse_csf_tensor =
214
+ arrow::internal::checked_pointer_cast<SparseCSFTensor>(
215
+ blobs.sparse_tensors[ref]);
216
+ *result = wrap_sparse_csf_tensor(sparse_csf_tensor);
217
+ return Status::OK();
218
+ }
219
+ case PythonType::NDARRAY: {
220
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
221
+ return DeserializeArray(ref, base, blobs, result);
222
+ }
223
+ case PythonType::BUFFER: {
224
+ int32_t ref = checked_cast<const Int32Array&>(arr).Value(index);
225
+ *result = wrap_buffer(blobs.buffers[ref]);
226
+ return Status::OK();
227
+ }
228
+ default: {
229
+ ARROW_CHECK(false) << "union tag " << type << "' not recognized";
230
+ }
231
+ }
232
+ return Status::OK();
233
+ }
234
+
235
+ Status GetPythonTypes(const UnionArray& data, std::vector<int8_t>* result) {
236
+ ARROW_CHECK(result != nullptr);
237
+ auto type = data.type();
238
+ for (int i = 0; i < type->num_fields(); ++i) {
239
+ int8_t tag = 0;
240
+ const std::string& data = type->field(i)->name();
241
+ if (!ParseValue<Int8Type>(data.c_str(), data.size(), &tag)) {
242
+ return Status::SerializationError("Cannot convert string: \"",
243
+ type->field(i)->name(), "\" to int8_t");
244
+ }
245
+ result->push_back(tag);
246
+ }
247
+ return Status::OK();
248
+ }
249
+
250
+ template <typename CreateSequenceFn, typename SetItemFn>
251
+ Status DeserializeSequence(PyObject* context, const Array& array, int64_t start_idx,
252
+ int64_t stop_idx, PyObject* base,
253
+ const SerializedPyObject& blobs,
254
+ CreateSequenceFn&& create_sequence, SetItemFn&& set_item,
255
+ PyObject** out) {
256
+ const auto& data = checked_cast<const DenseUnionArray&>(array);
257
+ OwnedRef result(create_sequence(stop_idx - start_idx));
258
+ RETURN_IF_PYERROR();
259
+ const int8_t* type_codes = data.raw_type_codes();
260
+ const int32_t* value_offsets = data.raw_value_offsets();
261
+ std::vector<int8_t> python_types;
262
+ RETURN_NOT_OK(GetPythonTypes(data, &python_types));
263
+ for (int64_t i = start_idx; i < stop_idx; ++i) {
264
+ const int64_t offset = value_offsets[i];
265
+ const uint8_t type = type_codes[i];
266
+ PyObject* value;
267
+ RETURN_NOT_OK(GetValue(context, *data.field(type), offset, python_types[type], base,
268
+ blobs, &value));
269
+ RETURN_NOT_OK(set_item(result.obj(), i - start_idx, value));
270
+ }
271
+ *out = result.detach();
272
+ return Status::OK();
273
+ }
274
+
275
+ Status DeserializeList(PyObject* context, const Array& array, int64_t start_idx,
276
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
277
+ PyObject** out) {
278
+ return DeserializeSequence(
279
+ context, array, start_idx, stop_idx, base, blobs,
280
+ [](int64_t size) { return PyList_New(size); },
281
+ [](PyObject* seq, int64_t index, PyObject* item) {
282
+ PyList_SET_ITEM(seq, index, item);
283
+ return Status::OK();
284
+ },
285
+ out);
286
+ }
287
+
288
+ Status DeserializeTuple(PyObject* context, const Array& array, int64_t start_idx,
289
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
290
+ PyObject** out) {
291
+ return DeserializeSequence(
292
+ context, array, start_idx, stop_idx, base, blobs,
293
+ [](int64_t size) { return PyTuple_New(size); },
294
+ [](PyObject* seq, int64_t index, PyObject* item) {
295
+ PyTuple_SET_ITEM(seq, index, item);
296
+ return Status::OK();
297
+ },
298
+ out);
299
+ }
300
+
301
+ Status DeserializeSet(PyObject* context, const Array& array, int64_t start_idx,
302
+ int64_t stop_idx, PyObject* base, const SerializedPyObject& blobs,
303
+ PyObject** out) {
304
+ return DeserializeSequence(
305
+ context, array, start_idx, stop_idx, base, blobs,
306
+ [](int64_t size) { return PySet_New(nullptr); },
307
+ [](PyObject* seq, int64_t index, PyObject* item) {
308
+ int err = PySet_Add(seq, item);
309
+ Py_DECREF(item);
310
+ if (err < 0) {
311
+ RETURN_IF_PYERROR();
312
+ }
313
+ return Status::OK();
314
+ },
315
+ out);
316
+ }
317
+
318
+ Status ReadSerializedObject(io::RandomAccessFile* src, SerializedPyObject* out) {
319
+ int32_t num_tensors;
320
+ int32_t num_sparse_tensors;
321
+ int32_t num_ndarrays;
322
+ int32_t num_buffers;
323
+
324
+ // Read number of tensors
325
+ RETURN_NOT_OK(src->Read(sizeof(int32_t), reinterpret_cast<uint8_t*>(&num_tensors)));
326
+ RETURN_NOT_OK(
327
+ src->Read(sizeof(int32_t), reinterpret_cast<uint8_t*>(&num_sparse_tensors)));
328
+ RETURN_NOT_OK(src->Read(sizeof(int32_t), reinterpret_cast<uint8_t*>(&num_ndarrays)));
329
+ RETURN_NOT_OK(src->Read(sizeof(int32_t), reinterpret_cast<uint8_t*>(&num_buffers)));
330
+
331
+ // Align stream to 8-byte offset
332
+ RETURN_NOT_OK(ipc::AlignStream(src, ipc::kArrowIpcAlignment));
333
+ std::shared_ptr<RecordBatchReader> reader;
334
+ ARROW_ASSIGN_OR_RAISE(reader, ipc::RecordBatchStreamReader::Open(src));
335
+ RETURN_NOT_OK(reader->ReadNext(&out->batch));
336
+
337
+ /// Skip EOS marker
338
+ RETURN_NOT_OK(src->Advance(4));
339
+
340
+ /// Align stream so tensor bodies are 64-byte aligned
341
+ RETURN_NOT_OK(ipc::AlignStream(src, ipc::kTensorAlignment));
342
+
343
+ for (int i = 0; i < num_tensors; ++i) {
344
+ std::shared_ptr<Tensor> tensor;
345
+ ARROW_ASSIGN_OR_RAISE(tensor, ipc::ReadTensor(src));
346
+ RETURN_NOT_OK(ipc::AlignStream(src, ipc::kTensorAlignment));
347
+ out->tensors.push_back(tensor);
348
+ }
349
+
350
+ for (int i = 0; i < num_sparse_tensors; ++i) {
351
+ std::shared_ptr<SparseTensor> sparse_tensor;
352
+ ARROW_ASSIGN_OR_RAISE(sparse_tensor, ipc::ReadSparseTensor(src));
353
+ RETURN_NOT_OK(ipc::AlignStream(src, ipc::kTensorAlignment));
354
+ out->sparse_tensors.push_back(sparse_tensor);
355
+ }
356
+
357
+ for (int i = 0; i < num_ndarrays; ++i) {
358
+ std::shared_ptr<Tensor> ndarray;
359
+ ARROW_ASSIGN_OR_RAISE(ndarray, ipc::ReadTensor(src));
360
+ RETURN_NOT_OK(ipc::AlignStream(src, ipc::kTensorAlignment));
361
+ out->ndarrays.push_back(ndarray);
362
+ }
363
+
364
+ ARROW_ASSIGN_OR_RAISE(int64_t offset, src->Tell());
365
+ for (int i = 0; i < num_buffers; ++i) {
366
+ int64_t size;
367
+ RETURN_NOT_OK(src->ReadAt(offset, sizeof(int64_t), &size));
368
+ offset += sizeof(int64_t);
369
+ ARROW_ASSIGN_OR_RAISE(auto buffer, src->ReadAt(offset, size));
370
+ out->buffers.push_back(buffer);
371
+ offset += size;
372
+ }
373
+
374
+ return Status::OK();
375
+ }
376
+
377
+ Status DeserializeObject(PyObject* context, const SerializedPyObject& obj, PyObject* base,
378
+ PyObject** out) {
379
+ PyAcquireGIL lock;
380
+ return DeserializeList(context, *obj.batch->column(0), 0, obj.batch->num_rows(), base,
381
+ obj, out);
382
+ }
383
+
384
+ Status GetSerializedFromComponents(int num_tensors,
385
+ const SparseTensorCounts& num_sparse_tensors,
386
+ int num_ndarrays, int num_buffers, PyObject* data,
387
+ SerializedPyObject* out) {
388
+ PyAcquireGIL gil;
389
+ const Py_ssize_t data_length = PyList_Size(data);
390
+ RETURN_IF_PYERROR();
391
+
392
+ const Py_ssize_t expected_data_length = 1 + num_tensors * 2 +
393
+ num_sparse_tensors.num_total_buffers() +
394
+ num_ndarrays * 2 + num_buffers;
395
+ if (data_length != expected_data_length) {
396
+ return Status::Invalid("Invalid number of buffers in data");
397
+ }
398
+
399
+ auto GetBuffer = [&data](Py_ssize_t index, std::shared_ptr<Buffer>* out) {
400
+ ARROW_CHECK_LE(index, PyList_Size(data));
401
+ PyObject* py_buf = PyList_GET_ITEM(data, index);
402
+ return unwrap_buffer(py_buf).Value(out);
403
+ };
404
+
405
+ Py_ssize_t buffer_index = 0;
406
+
407
+ // Read the union batch describing object structure
408
+ {
409
+ std::shared_ptr<Buffer> data_buffer;
410
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &data_buffer));
411
+ gil.release();
412
+ io::BufferReader buf_reader(data_buffer);
413
+ std::shared_ptr<RecordBatchReader> reader;
414
+ ARROW_ASSIGN_OR_RAISE(reader, ipc::RecordBatchStreamReader::Open(&buf_reader));
415
+ RETURN_NOT_OK(reader->ReadNext(&out->batch));
416
+ gil.acquire();
417
+ }
418
+
419
+ // Zero-copy reconstruct tensors
420
+ for (int i = 0; i < num_tensors; ++i) {
421
+ std::shared_ptr<Buffer> metadata;
422
+ std::shared_ptr<Buffer> body;
423
+ std::shared_ptr<Tensor> tensor;
424
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &metadata));
425
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &body));
426
+
427
+ ipc::Message message(metadata, body);
428
+
429
+ ARROW_ASSIGN_OR_RAISE(tensor, ipc::ReadTensor(message));
430
+ out->tensors.emplace_back(std::move(tensor));
431
+ }
432
+
433
+ // Zero-copy reconstruct sparse tensors
434
+ for (int i = 0, n = num_sparse_tensors.num_total_tensors(); i < n; ++i) {
435
+ ipc::IpcPayload payload;
436
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &payload.metadata));
437
+
438
+ ARROW_ASSIGN_OR_RAISE(
439
+ size_t num_bodies,
440
+ ipc::internal::ReadSparseTensorBodyBufferCount(*payload.metadata));
441
+
442
+ payload.body_buffers.reserve(num_bodies);
443
+ for (size_t i = 0; i < num_bodies; ++i) {
444
+ std::shared_ptr<Buffer> body;
445
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &body));
446
+ payload.body_buffers.emplace_back(body);
447
+ }
448
+
449
+ std::shared_ptr<SparseTensor> sparse_tensor;
450
+ ARROW_ASSIGN_OR_RAISE(sparse_tensor, ipc::internal::ReadSparseTensorPayload(payload));
451
+ out->sparse_tensors.emplace_back(std::move(sparse_tensor));
452
+ }
453
+
454
+ // Zero-copy reconstruct tensors for numpy ndarrays
455
+ for (int i = 0; i < num_ndarrays; ++i) {
456
+ std::shared_ptr<Buffer> metadata;
457
+ std::shared_ptr<Buffer> body;
458
+ std::shared_ptr<Tensor> tensor;
459
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &metadata));
460
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &body));
461
+
462
+ ipc::Message message(metadata, body);
463
+
464
+ ARROW_ASSIGN_OR_RAISE(tensor, ipc::ReadTensor(message));
465
+ out->ndarrays.emplace_back(std::move(tensor));
466
+ }
467
+
468
+ // Unwrap and append buffers
469
+ for (int i = 0; i < num_buffers; ++i) {
470
+ std::shared_ptr<Buffer> buffer;
471
+ RETURN_NOT_OK(GetBuffer(buffer_index++, &buffer));
472
+ out->buffers.emplace_back(std::move(buffer));
473
+ }
474
+
475
+ return Status::OK();
476
+ }
477
+
478
+ Status DeserializeNdarray(const SerializedPyObject& object,
479
+ std::shared_ptr<Tensor>* out) {
480
+ if (object.ndarrays.size() != 1) {
481
+ return Status::Invalid("Object is not an Ndarray");
482
+ }
483
+ *out = object.ndarrays[0];
484
+ return Status::OK();
485
+ }
486
+
487
+ Status NdarrayFromBuffer(std::shared_ptr<Buffer> src, std::shared_ptr<Tensor>* out) {
488
+ io::BufferReader reader(src);
489
+ SerializedPyObject object;
490
+ RETURN_NOT_OK(ReadSerializedObject(&reader, &object));
491
+ return DeserializeNdarray(object, out);
492
+ }
493
+
494
+ } // namespace py
495
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/deserialize.h ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <vector>
23
+
24
+ #include "arrow/python/serialize.h"
25
+ #include "arrow/python/visibility.h"
26
+ #include "arrow/status.h"
27
+
28
+ namespace arrow {
29
+
30
+ class RecordBatch;
31
+ class Tensor;
32
+
33
+ namespace io {
34
+
35
+ class RandomAccessFile;
36
+
37
+ } // namespace io
38
+
39
+ namespace py {
40
+
41
+ struct ARROW_PYTHON_EXPORT SparseTensorCounts {
42
+ int coo;
43
+ int csr;
44
+ int csc;
45
+ int csf;
46
+ int ndim_csf;
47
+
48
+ int num_total_tensors() const { return coo + csr + csc + csf; }
49
+ int num_total_buffers() const {
50
+ return coo * 3 + csr * 4 + csc * 4 + 2 * ndim_csf + csf;
51
+ }
52
+ };
53
+
54
+ /// \brief Read serialized Python sequence from file interface using Arrow IPC
55
+ /// \param[in] src a RandomAccessFile
56
+ /// \param[out] out the reconstructed data
57
+ /// \return Status
58
+ ARROW_PYTHON_EXPORT
59
+ Status ReadSerializedObject(io::RandomAccessFile* src, SerializedPyObject* out);
60
+
61
+ /// \brief Reconstruct SerializedPyObject from representation produced by
62
+ /// SerializedPyObject::GetComponents.
63
+ ///
64
+ /// \param[in] num_tensors number of tensors in the object
65
+ /// \param[in] num_sparse_tensors number of sparse tensors in the object
66
+ /// \param[in] num_ndarrays number of numpy Ndarrays in the object
67
+ /// \param[in] num_buffers number of buffers in the object
68
+ /// \param[in] data a list containing pyarrow.Buffer instances. It must be 1 +
69
+ /// num_tensors * 2 + num_coo_tensors * 3 + num_csr_tensors * 4 + num_csc_tensors * 4 +
70
+ /// num_csf_tensors * (2 * ndim_csf + 3) + num_buffers in length
71
+ /// \param[out] out the reconstructed object
72
+ /// \return Status
73
+ ARROW_PYTHON_EXPORT
74
+ Status GetSerializedFromComponents(int num_tensors,
75
+ const SparseTensorCounts& num_sparse_tensors,
76
+ int num_ndarrays, int num_buffers, PyObject* data,
77
+ SerializedPyObject* out);
78
+
79
+ /// \brief Reconstruct Python object from Arrow-serialized representation
80
+ /// \param[in] context Serialization context which contains custom serialization
81
+ /// and deserialization callbacks. Can be any Python object with a
82
+ /// _serialize_callback method for serialization and a _deserialize_callback
83
+ /// method for deserialization. If context is None, no custom serialization
84
+ /// will be attempted.
85
+ /// \param[in] object Object to deserialize
86
+ /// \param[in] base a Python object holding the underlying data that any NumPy
87
+ /// arrays will reference, to avoid premature deallocation
88
+ /// \param[out] out The returned object
89
+ /// \return Status
90
+ /// This acquires the GIL
91
+ ARROW_PYTHON_EXPORT
92
+ Status DeserializeObject(PyObject* context, const SerializedPyObject& object,
93
+ PyObject* base, PyObject** out);
94
+
95
+ /// \brief Reconstruct Ndarray from Arrow-serialized representation
96
+ /// \param[in] object Object to deserialize
97
+ /// \param[out] out The deserialized tensor
98
+ /// \return Status
99
+ ARROW_PYTHON_EXPORT
100
+ Status DeserializeNdarray(const SerializedPyObject& object, std::shared_ptr<Tensor>* out);
101
+
102
+ ARROW_PYTHON_EXPORT
103
+ Status NdarrayFromBuffer(std::shared_ptr<Buffer> src, std::shared_ptr<Tensor>* out);
104
+
105
+ } // namespace py
106
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/extension_type.cc ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include <memory>
19
+ #include <sstream>
20
+ #include <utility>
21
+
22
+ #include "arrow/python/extension_type.h"
23
+ #include "arrow/python/helpers.h"
24
+ #include "arrow/python/pyarrow.h"
25
+ #include "arrow/util/checked_cast.h"
26
+ #include "arrow/util/logging.h"
27
+
28
+ namespace arrow {
29
+
30
+ using internal::checked_cast;
31
+
32
+ namespace py {
33
+
34
+ namespace {
35
+
36
+ // Serialize a Python ExtensionType instance
37
+ Status SerializeExtInstance(PyObject* type_instance, std::string* out) {
38
+ OwnedRef res(
39
+ cpp_PyObject_CallMethod(type_instance, "__arrow_ext_serialize__", nullptr));
40
+ if (!res) {
41
+ return ConvertPyError();
42
+ }
43
+ if (!PyBytes_Check(res.obj())) {
44
+ return Status::TypeError(
45
+ "__arrow_ext_serialize__ should return bytes object, "
46
+ "got ",
47
+ internal::PyObject_StdStringRepr(res.obj()));
48
+ }
49
+ *out = internal::PyBytes_AsStdString(res.obj());
50
+ return Status::OK();
51
+ }
52
+
53
+ // Deserialize a Python ExtensionType instance
54
+ PyObject* DeserializeExtInstance(PyObject* type_class,
55
+ std::shared_ptr<DataType> storage_type,
56
+ const std::string& serialized_data) {
57
+ OwnedRef storage_ref(wrap_data_type(storage_type));
58
+ if (!storage_ref) {
59
+ return nullptr;
60
+ }
61
+ OwnedRef data_ref(PyBytes_FromStringAndSize(
62
+ serialized_data.data(), static_cast<Py_ssize_t>(serialized_data.size())));
63
+ if (!data_ref) {
64
+ return nullptr;
65
+ }
66
+
67
+ return cpp_PyObject_CallMethod(type_class, "__arrow_ext_deserialize__", "OO",
68
+ storage_ref.obj(), data_ref.obj());
69
+ }
70
+
71
+ } // namespace
72
+
73
+ static const char* kExtensionName = "arrow.py_extension_type";
74
+
75
+ std::string PyExtensionType::ToString() const {
76
+ PyAcquireGIL lock;
77
+
78
+ std::stringstream ss;
79
+ OwnedRef instance(GetInstance());
80
+ ss << "extension<" << this->extension_name() << "<" << Py_TYPE(instance.obj())->tp_name
81
+ << ">>";
82
+ return ss.str();
83
+ }
84
+
85
+ PyExtensionType::PyExtensionType(std::shared_ptr<DataType> storage_type, PyObject* typ,
86
+ PyObject* inst)
87
+ : ExtensionType(storage_type),
88
+ extension_name_(kExtensionName),
89
+ type_class_(typ),
90
+ type_instance_(inst) {}
91
+
92
+ PyExtensionType::PyExtensionType(std::shared_ptr<DataType> storage_type,
93
+ std::string extension_name, PyObject* typ,
94
+ PyObject* inst)
95
+ : ExtensionType(storage_type),
96
+ extension_name_(std::move(extension_name)),
97
+ type_class_(typ),
98
+ type_instance_(inst) {}
99
+
100
+ bool PyExtensionType::ExtensionEquals(const ExtensionType& other) const {
101
+ PyAcquireGIL lock;
102
+
103
+ if (other.extension_name() != extension_name()) {
104
+ return false;
105
+ }
106
+ const auto& other_ext = checked_cast<const PyExtensionType&>(other);
107
+ int res = -1;
108
+ if (!type_instance_) {
109
+ if (other_ext.type_instance_) {
110
+ return false;
111
+ }
112
+ // Compare Python types
113
+ res = PyObject_RichCompareBool(type_class_.obj(), other_ext.type_class_.obj(), Py_EQ);
114
+ } else {
115
+ if (!other_ext.type_instance_) {
116
+ return false;
117
+ }
118
+ // Compare Python instances
119
+ OwnedRef left(GetInstance());
120
+ OwnedRef right(other_ext.GetInstance());
121
+ if (!left || !right) {
122
+ goto error;
123
+ }
124
+ res = PyObject_RichCompareBool(left.obj(), right.obj(), Py_EQ);
125
+ }
126
+ if (res == -1) {
127
+ goto error;
128
+ }
129
+ return res == 1;
130
+
131
+ error:
132
+ // Cannot propagate error
133
+ PyErr_WriteUnraisable(nullptr);
134
+ return false;
135
+ }
136
+
137
+ std::shared_ptr<Array> PyExtensionType::MakeArray(std::shared_ptr<ArrayData> data) const {
138
+ DCHECK_EQ(data->type->id(), Type::EXTENSION);
139
+ return std::make_shared<ExtensionArray>(data);
140
+ }
141
+
142
+ std::string PyExtensionType::Serialize() const {
143
+ DCHECK(type_instance_);
144
+ return serialized_;
145
+ }
146
+
147
+ Result<std::shared_ptr<DataType>> PyExtensionType::Deserialize(
148
+ std::shared_ptr<DataType> storage_type, const std::string& serialized_data) const {
149
+ PyAcquireGIL lock;
150
+
151
+ if (import_pyarrow()) {
152
+ return ConvertPyError();
153
+ }
154
+ OwnedRef res(DeserializeExtInstance(type_class_.obj(), storage_type, serialized_data));
155
+ if (!res) {
156
+ return ConvertPyError();
157
+ }
158
+ return unwrap_data_type(res.obj());
159
+ }
160
+
161
+ PyObject* PyExtensionType::GetInstance() const {
162
+ if (!type_instance_) {
163
+ PyErr_SetString(PyExc_TypeError, "Not an instance");
164
+ return nullptr;
165
+ }
166
+ DCHECK(PyWeakref_CheckRef(type_instance_.obj()));
167
+ PyObject* inst = PyWeakref_GET_OBJECT(type_instance_.obj());
168
+ if (inst != Py_None) {
169
+ // Cached instance still alive
170
+ Py_INCREF(inst);
171
+ return inst;
172
+ } else {
173
+ // Must reconstruct from serialized form
174
+ // XXX cache again?
175
+ return DeserializeExtInstance(type_class_.obj(), storage_type_, serialized_);
176
+ }
177
+ }
178
+
179
+ Status PyExtensionType::SetInstance(PyObject* inst) const {
180
+ // Check we have the right type
181
+ PyObject* typ = reinterpret_cast<PyObject*>(Py_TYPE(inst));
182
+ if (typ != type_class_.obj()) {
183
+ return Status::TypeError("Unexpected Python ExtensionType class ",
184
+ internal::PyObject_StdStringRepr(typ), " expected ",
185
+ internal::PyObject_StdStringRepr(type_class_.obj()));
186
+ }
187
+
188
+ PyObject* wr = PyWeakref_NewRef(inst, nullptr);
189
+ if (wr == NULL) {
190
+ return ConvertPyError();
191
+ }
192
+ type_instance_.reset(wr);
193
+ return SerializeExtInstance(inst, &serialized_);
194
+ }
195
+
196
+ Status PyExtensionType::FromClass(const std::shared_ptr<DataType> storage_type,
197
+ const std::string extension_name, PyObject* typ,
198
+ std::shared_ptr<ExtensionType>* out) {
199
+ Py_INCREF(typ);
200
+ out->reset(new PyExtensionType(storage_type, std::move(extension_name), typ));
201
+ return Status::OK();
202
+ }
203
+
204
+ Status RegisterPyExtensionType(const std::shared_ptr<DataType>& type) {
205
+ DCHECK_EQ(type->id(), Type::EXTENSION);
206
+ auto ext_type = std::dynamic_pointer_cast<ExtensionType>(type);
207
+ return RegisterExtensionType(ext_type);
208
+ }
209
+
210
+ Status UnregisterPyExtensionType(const std::string& type_name) {
211
+ return UnregisterExtensionType(type_name);
212
+ }
213
+
214
+ std::string PyExtensionName() { return kExtensionName; }
215
+
216
+ } // namespace py
217
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/extension_type.h ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <string>
22
+
23
+ #include "arrow/extension_type.h"
24
+ #include "arrow/python/common.h"
25
+ #include "arrow/python/visibility.h"
26
+ #include "arrow/util/macros.h"
27
+
28
+ namespace arrow {
29
+ namespace py {
30
+
31
+ class ARROW_PYTHON_EXPORT PyExtensionType : public ExtensionType {
32
+ public:
33
+ // Implement extensionType API
34
+ std::string extension_name() const override { return extension_name_; }
35
+
36
+ std::string ToString() const override;
37
+
38
+ bool ExtensionEquals(const ExtensionType& other) const override;
39
+
40
+ std::shared_ptr<Array> MakeArray(std::shared_ptr<ArrayData> data) const override;
41
+
42
+ Result<std::shared_ptr<DataType>> Deserialize(
43
+ std::shared_ptr<DataType> storage_type,
44
+ const std::string& serialized) const override;
45
+
46
+ std::string Serialize() const override;
47
+
48
+ // For use from Cython
49
+ // Assumes that `typ` is borrowed
50
+ static Status FromClass(const std::shared_ptr<DataType> storage_type,
51
+ const std::string extension_name, PyObject* typ,
52
+ std::shared_ptr<ExtensionType>* out);
53
+
54
+ // Return new ref
55
+ PyObject* GetInstance() const;
56
+ Status SetInstance(PyObject*) const;
57
+
58
+ protected:
59
+ PyExtensionType(std::shared_ptr<DataType> storage_type, PyObject* typ,
60
+ PyObject* inst = NULLPTR);
61
+ PyExtensionType(std::shared_ptr<DataType> storage_type, std::string extension_name,
62
+ PyObject* typ, PyObject* inst = NULLPTR);
63
+
64
+ std::string extension_name_;
65
+
66
+ // These fields are mutable because of two-step initialization.
67
+ mutable OwnedRefNoGIL type_class_;
68
+ // A weakref or null. Storing a strong reference to the Python extension type
69
+ // instance would create an unreclaimable reference cycle between Python and C++
70
+ // (the Python instance has to keep a strong reference to the C++ ExtensionType
71
+ // in other direction). Instead, we store a weakref to the instance.
72
+ // If the weakref is dead, we reconstruct the instance from its serialized form.
73
+ mutable OwnedRefNoGIL type_instance_;
74
+ // Empty if type_instance_ is null
75
+ mutable std::string serialized_;
76
+ };
77
+
78
+ ARROW_PYTHON_EXPORT std::string PyExtensionName();
79
+
80
+ ARROW_PYTHON_EXPORT Status RegisterPyExtensionType(const std::shared_ptr<DataType>&);
81
+
82
+ ARROW_PYTHON_EXPORT Status UnregisterPyExtensionType(const std::string& type_name);
83
+
84
+ } // namespace py
85
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/filesystem.cc ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include "arrow/python/filesystem.h"
19
+ #include "arrow/util/logging.h"
20
+
21
+ namespace arrow {
22
+
23
+ using fs::FileInfo;
24
+ using fs::FileSelector;
25
+
26
+ namespace py {
27
+ namespace fs {
28
+
29
+ PyFileSystem::PyFileSystem(PyObject* handler, PyFileSystemVtable vtable)
30
+ : handler_(handler), vtable_(std::move(vtable)) {
31
+ Py_INCREF(handler);
32
+ }
33
+
34
+ PyFileSystem::~PyFileSystem() {}
35
+
36
+ std::shared_ptr<PyFileSystem> PyFileSystem::Make(PyObject* handler,
37
+ PyFileSystemVtable vtable) {
38
+ return std::make_shared<PyFileSystem>(handler, std::move(vtable));
39
+ }
40
+
41
+ std::string PyFileSystem::type_name() const {
42
+ std::string result;
43
+ auto st = SafeCallIntoPython([&]() -> Status {
44
+ vtable_.get_type_name(handler_.obj(), &result);
45
+ if (PyErr_Occurred()) {
46
+ PyErr_WriteUnraisable(handler_.obj());
47
+ }
48
+ return Status::OK();
49
+ });
50
+ ARROW_UNUSED(st);
51
+ return result;
52
+ }
53
+
54
+ bool PyFileSystem::Equals(const FileSystem& other) const {
55
+ bool result;
56
+ auto st = SafeCallIntoPython([&]() -> Status {
57
+ result = vtable_.equals(handler_.obj(), other);
58
+ if (PyErr_Occurred()) {
59
+ PyErr_WriteUnraisable(handler_.obj());
60
+ }
61
+ return Status::OK();
62
+ });
63
+ ARROW_UNUSED(st);
64
+ return result;
65
+ }
66
+
67
+ Result<FileInfo> PyFileSystem::GetFileInfo(const std::string& path) {
68
+ FileInfo info;
69
+
70
+ auto st = SafeCallIntoPython([&]() -> Status {
71
+ vtable_.get_file_info(handler_.obj(), path, &info);
72
+ return CheckPyError();
73
+ });
74
+ RETURN_NOT_OK(st);
75
+ return info;
76
+ }
77
+
78
+ Result<std::vector<FileInfo>> PyFileSystem::GetFileInfo(
79
+ const std::vector<std::string>& paths) {
80
+ std::vector<FileInfo> infos;
81
+
82
+ auto st = SafeCallIntoPython([&]() -> Status {
83
+ vtable_.get_file_info_vector(handler_.obj(), paths, &infos);
84
+ return CheckPyError();
85
+ });
86
+ RETURN_NOT_OK(st);
87
+ return infos;
88
+ }
89
+
90
+ Result<std::vector<FileInfo>> PyFileSystem::GetFileInfo(const FileSelector& select) {
91
+ std::vector<FileInfo> infos;
92
+
93
+ auto st = SafeCallIntoPython([&]() -> Status {
94
+ vtable_.get_file_info_selector(handler_.obj(), select, &infos);
95
+ return CheckPyError();
96
+ });
97
+ RETURN_NOT_OK(st);
98
+ return infos;
99
+ }
100
+
101
+ Status PyFileSystem::CreateDir(const std::string& path, bool recursive) {
102
+ return SafeCallIntoPython([&]() -> Status {
103
+ vtable_.create_dir(handler_.obj(), path, recursive);
104
+ return CheckPyError();
105
+ });
106
+ }
107
+
108
+ Status PyFileSystem::DeleteDir(const std::string& path) {
109
+ return SafeCallIntoPython([&]() -> Status {
110
+ vtable_.delete_dir(handler_.obj(), path);
111
+ return CheckPyError();
112
+ });
113
+ }
114
+
115
+ Status PyFileSystem::DeleteDirContents(const std::string& path, bool missing_dir_ok) {
116
+ return SafeCallIntoPython([&]() -> Status {
117
+ vtable_.delete_dir_contents(handler_.obj(), path, missing_dir_ok);
118
+ return CheckPyError();
119
+ });
120
+ }
121
+
122
+ Status PyFileSystem::DeleteRootDirContents() {
123
+ return SafeCallIntoPython([&]() -> Status {
124
+ vtable_.delete_root_dir_contents(handler_.obj());
125
+ return CheckPyError();
126
+ });
127
+ }
128
+
129
+ Status PyFileSystem::DeleteFile(const std::string& path) {
130
+ return SafeCallIntoPython([&]() -> Status {
131
+ vtable_.delete_file(handler_.obj(), path);
132
+ return CheckPyError();
133
+ });
134
+ }
135
+
136
+ Status PyFileSystem::Move(const std::string& src, const std::string& dest) {
137
+ return SafeCallIntoPython([&]() -> Status {
138
+ vtable_.move(handler_.obj(), src, dest);
139
+ return CheckPyError();
140
+ });
141
+ }
142
+
143
+ Status PyFileSystem::CopyFile(const std::string& src, const std::string& dest) {
144
+ return SafeCallIntoPython([&]() -> Status {
145
+ vtable_.copy_file(handler_.obj(), src, dest);
146
+ return CheckPyError();
147
+ });
148
+ }
149
+
150
+ Result<std::shared_ptr<io::InputStream>> PyFileSystem::OpenInputStream(
151
+ const std::string& path) {
152
+ std::shared_ptr<io::InputStream> stream;
153
+ auto st = SafeCallIntoPython([&]() -> Status {
154
+ vtable_.open_input_stream(handler_.obj(), path, &stream);
155
+ return CheckPyError();
156
+ });
157
+ RETURN_NOT_OK(st);
158
+ return stream;
159
+ }
160
+
161
+ Result<std::shared_ptr<io::RandomAccessFile>> PyFileSystem::OpenInputFile(
162
+ const std::string& path) {
163
+ std::shared_ptr<io::RandomAccessFile> stream;
164
+ auto st = SafeCallIntoPython([&]() -> Status {
165
+ vtable_.open_input_file(handler_.obj(), path, &stream);
166
+ return CheckPyError();
167
+ });
168
+ RETURN_NOT_OK(st);
169
+ return stream;
170
+ }
171
+
172
+ Result<std::shared_ptr<io::OutputStream>> PyFileSystem::OpenOutputStream(
173
+ const std::string& path, const std::shared_ptr<const KeyValueMetadata>& metadata) {
174
+ std::shared_ptr<io::OutputStream> stream;
175
+ auto st = SafeCallIntoPython([&]() -> Status {
176
+ vtable_.open_output_stream(handler_.obj(), path, metadata, &stream);
177
+ return CheckPyError();
178
+ });
179
+ RETURN_NOT_OK(st);
180
+ return stream;
181
+ }
182
+
183
+ Result<std::shared_ptr<io::OutputStream>> PyFileSystem::OpenAppendStream(
184
+ const std::string& path, const std::shared_ptr<const KeyValueMetadata>& metadata) {
185
+ std::shared_ptr<io::OutputStream> stream;
186
+ auto st = SafeCallIntoPython([&]() -> Status {
187
+ vtable_.open_append_stream(handler_.obj(), path, metadata, &stream);
188
+ return CheckPyError();
189
+ });
190
+ RETURN_NOT_OK(st);
191
+ return stream;
192
+ }
193
+
194
+ Result<std::string> PyFileSystem::NormalizePath(std::string path) {
195
+ std::string normalized;
196
+ auto st = SafeCallIntoPython([&]() -> Status {
197
+ vtable_.normalize_path(handler_.obj(), path, &normalized);
198
+ return CheckPyError();
199
+ });
200
+ RETURN_NOT_OK(st);
201
+ return normalized;
202
+ }
203
+
204
+ } // namespace fs
205
+ } // namespace py
206
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/flight.cc ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include <signal.h>
19
+ #include <utility>
20
+
21
+ #include "arrow/python/flight.h"
22
+ #include "arrow/util/io_util.h"
23
+ #include "arrow/util/logging.h"
24
+
25
+ using arrow::flight::FlightPayload;
26
+
27
+ namespace arrow {
28
+ namespace py {
29
+ namespace flight {
30
+
31
+ const char* kPyServerMiddlewareName = "arrow.py_server_middleware";
32
+
33
+ PyServerAuthHandler::PyServerAuthHandler(PyObject* handler,
34
+ const PyServerAuthHandlerVtable& vtable)
35
+ : vtable_(vtable) {
36
+ Py_INCREF(handler);
37
+ handler_.reset(handler);
38
+ }
39
+
40
+ Status PyServerAuthHandler::Authenticate(arrow::flight::ServerAuthSender* outgoing,
41
+ arrow::flight::ServerAuthReader* incoming) {
42
+ return SafeCallIntoPython([=] {
43
+ const Status status = vtable_.authenticate(handler_.obj(), outgoing, incoming);
44
+ RETURN_NOT_OK(CheckPyError());
45
+ return status;
46
+ });
47
+ }
48
+
49
+ Status PyServerAuthHandler::IsValid(const std::string& token,
50
+ std::string* peer_identity) {
51
+ return SafeCallIntoPython([=] {
52
+ const Status status = vtable_.is_valid(handler_.obj(), token, peer_identity);
53
+ RETURN_NOT_OK(CheckPyError());
54
+ return status;
55
+ });
56
+ }
57
+
58
+ PyClientAuthHandler::PyClientAuthHandler(PyObject* handler,
59
+ const PyClientAuthHandlerVtable& vtable)
60
+ : vtable_(vtable) {
61
+ Py_INCREF(handler);
62
+ handler_.reset(handler);
63
+ }
64
+
65
+ Status PyClientAuthHandler::Authenticate(arrow::flight::ClientAuthSender* outgoing,
66
+ arrow::flight::ClientAuthReader* incoming) {
67
+ return SafeCallIntoPython([=] {
68
+ const Status status = vtable_.authenticate(handler_.obj(), outgoing, incoming);
69
+ RETURN_NOT_OK(CheckPyError());
70
+ return status;
71
+ });
72
+ }
73
+
74
+ Status PyClientAuthHandler::GetToken(std::string* token) {
75
+ return SafeCallIntoPython([=] {
76
+ const Status status = vtable_.get_token(handler_.obj(), token);
77
+ RETURN_NOT_OK(CheckPyError());
78
+ return status;
79
+ });
80
+ }
81
+
82
+ PyFlightServer::PyFlightServer(PyObject* server, const PyFlightServerVtable& vtable)
83
+ : vtable_(vtable) {
84
+ Py_INCREF(server);
85
+ server_.reset(server);
86
+ }
87
+
88
+ Status PyFlightServer::ListFlights(
89
+ const arrow::flight::ServerCallContext& context,
90
+ const arrow::flight::Criteria* criteria,
91
+ std::unique_ptr<arrow::flight::FlightListing>* listings) {
92
+ return SafeCallIntoPython([&] {
93
+ const Status status =
94
+ vtable_.list_flights(server_.obj(), context, criteria, listings);
95
+ RETURN_NOT_OK(CheckPyError());
96
+ return status;
97
+ });
98
+ }
99
+
100
+ Status PyFlightServer::GetFlightInfo(const arrow::flight::ServerCallContext& context,
101
+ const arrow::flight::FlightDescriptor& request,
102
+ std::unique_ptr<arrow::flight::FlightInfo>* info) {
103
+ return SafeCallIntoPython([&] {
104
+ const Status status = vtable_.get_flight_info(server_.obj(), context, request, info);
105
+ RETURN_NOT_OK(CheckPyError());
106
+ return status;
107
+ });
108
+ }
109
+
110
+ Status PyFlightServer::GetSchema(const arrow::flight::ServerCallContext& context,
111
+ const arrow::flight::FlightDescriptor& request,
112
+ std::unique_ptr<arrow::flight::SchemaResult>* result) {
113
+ return SafeCallIntoPython([&] {
114
+ const Status status = vtable_.get_schema(server_.obj(), context, request, result);
115
+ RETURN_NOT_OK(CheckPyError());
116
+ return status;
117
+ });
118
+ }
119
+
120
+ Status PyFlightServer::DoGet(const arrow::flight::ServerCallContext& context,
121
+ const arrow::flight::Ticket& request,
122
+ std::unique_ptr<arrow::flight::FlightDataStream>* stream) {
123
+ return SafeCallIntoPython([&] {
124
+ const Status status = vtable_.do_get(server_.obj(), context, request, stream);
125
+ RETURN_NOT_OK(CheckPyError());
126
+ return status;
127
+ });
128
+ }
129
+
130
+ Status PyFlightServer::DoPut(
131
+ const arrow::flight::ServerCallContext& context,
132
+ std::unique_ptr<arrow::flight::FlightMessageReader> reader,
133
+ std::unique_ptr<arrow::flight::FlightMetadataWriter> writer) {
134
+ return SafeCallIntoPython([&] {
135
+ const Status status =
136
+ vtable_.do_put(server_.obj(), context, std::move(reader), std::move(writer));
137
+ RETURN_NOT_OK(CheckPyError());
138
+ return status;
139
+ });
140
+ }
141
+
142
+ Status PyFlightServer::DoExchange(
143
+ const arrow::flight::ServerCallContext& context,
144
+ std::unique_ptr<arrow::flight::FlightMessageReader> reader,
145
+ std::unique_ptr<arrow::flight::FlightMessageWriter> writer) {
146
+ return SafeCallIntoPython([&] {
147
+ const Status status =
148
+ vtable_.do_exchange(server_.obj(), context, std::move(reader), std::move(writer));
149
+ RETURN_NOT_OK(CheckPyError());
150
+ return status;
151
+ });
152
+ }
153
+
154
+ Status PyFlightServer::DoAction(const arrow::flight::ServerCallContext& context,
155
+ const arrow::flight::Action& action,
156
+ std::unique_ptr<arrow::flight::ResultStream>* result) {
157
+ return SafeCallIntoPython([&] {
158
+ const Status status = vtable_.do_action(server_.obj(), context, action, result);
159
+ RETURN_NOT_OK(CheckPyError());
160
+ return status;
161
+ });
162
+ }
163
+
164
+ Status PyFlightServer::ListActions(const arrow::flight::ServerCallContext& context,
165
+ std::vector<arrow::flight::ActionType>* actions) {
166
+ return SafeCallIntoPython([&] {
167
+ const Status status = vtable_.list_actions(server_.obj(), context, actions);
168
+ RETURN_NOT_OK(CheckPyError());
169
+ return status;
170
+ });
171
+ }
172
+
173
+ Status PyFlightServer::ServeWithSignals() {
174
+ // Respect the current Python settings, i.e. only interrupt the server if there is
175
+ // an active signal handler for SIGINT and SIGTERM.
176
+ std::vector<int> signals;
177
+ for (const int signum : {SIGINT, SIGTERM}) {
178
+ ARROW_ASSIGN_OR_RAISE(auto handler, ::arrow::internal::GetSignalHandler(signum));
179
+ auto cb = handler.callback();
180
+ if (cb != SIG_DFL && cb != SIG_IGN) {
181
+ signals.push_back(signum);
182
+ }
183
+ }
184
+ RETURN_NOT_OK(SetShutdownOnSignals(signals));
185
+
186
+ // Serve until we got told to shutdown or a signal interrupted us
187
+ RETURN_NOT_OK(Serve());
188
+ int signum = GotSignal();
189
+ if (signum != 0) {
190
+ // Issue the signal again with Python's signal handlers restored
191
+ PyAcquireGIL lock;
192
+ raise(signum);
193
+ // XXX Ideally we would loop and serve again if no exception was raised.
194
+ // Unfortunately, gRPC will return immediately if Serve() is called again.
195
+ ARROW_UNUSED(PyErr_CheckSignals());
196
+ }
197
+
198
+ return Status::OK();
199
+ }
200
+
201
+ PyFlightResultStream::PyFlightResultStream(PyObject* generator,
202
+ PyFlightResultStreamCallback callback)
203
+ : callback_(callback) {
204
+ Py_INCREF(generator);
205
+ generator_.reset(generator);
206
+ }
207
+
208
+ arrow::Result<std::unique_ptr<arrow::flight::Result>> PyFlightResultStream::Next() {
209
+ return SafeCallIntoPython(
210
+ [=]() -> arrow::Result<std::unique_ptr<arrow::flight::Result>> {
211
+ std::unique_ptr<arrow::flight::Result> result;
212
+ const Status status = callback_(generator_.obj(), &result);
213
+ RETURN_NOT_OK(CheckPyError());
214
+ RETURN_NOT_OK(status);
215
+ return result;
216
+ });
217
+ }
218
+
219
+ PyFlightDataStream::PyFlightDataStream(
220
+ PyObject* data_source, std::unique_ptr<arrow::flight::FlightDataStream> stream)
221
+ : stream_(std::move(stream)) {
222
+ Py_INCREF(data_source);
223
+ data_source_.reset(data_source);
224
+ }
225
+
226
+ std::shared_ptr<Schema> PyFlightDataStream::schema() { return stream_->schema(); }
227
+
228
+ arrow::Result<FlightPayload> PyFlightDataStream::GetSchemaPayload() {
229
+ return stream_->GetSchemaPayload();
230
+ }
231
+
232
+ arrow::Result<FlightPayload> PyFlightDataStream::Next() { return stream_->Next(); }
233
+
234
+ PyGeneratorFlightDataStream::PyGeneratorFlightDataStream(
235
+ PyObject* generator, std::shared_ptr<arrow::Schema> schema,
236
+ PyGeneratorFlightDataStreamCallback callback, const ipc::IpcWriteOptions& options)
237
+ : schema_(schema), mapper_(*schema_), options_(options), callback_(callback) {
238
+ Py_INCREF(generator);
239
+ generator_.reset(generator);
240
+ }
241
+
242
+ std::shared_ptr<Schema> PyGeneratorFlightDataStream::schema() { return schema_; }
243
+
244
+ arrow::Result<FlightPayload> PyGeneratorFlightDataStream::GetSchemaPayload() {
245
+ FlightPayload payload;
246
+ RETURN_NOT_OK(ipc::GetSchemaPayload(*schema_, options_, mapper_, &payload.ipc_message));
247
+ return payload;
248
+ }
249
+
250
+ arrow::Result<FlightPayload> PyGeneratorFlightDataStream::Next() {
251
+ return SafeCallIntoPython([=]() -> arrow::Result<FlightPayload> {
252
+ FlightPayload payload;
253
+ const Status status = callback_(generator_.obj(), &payload);
254
+ RETURN_NOT_OK(CheckPyError());
255
+ RETURN_NOT_OK(status);
256
+ return payload;
257
+ });
258
+ }
259
+
260
+ // Flight Server Middleware
261
+
262
+ PyServerMiddlewareFactory::PyServerMiddlewareFactory(PyObject* factory,
263
+ StartCallCallback start_call)
264
+ : start_call_(start_call) {
265
+ Py_INCREF(factory);
266
+ factory_.reset(factory);
267
+ }
268
+
269
+ Status PyServerMiddlewareFactory::StartCall(
270
+ const arrow::flight::CallInfo& info,
271
+ const arrow::flight::CallHeaders& incoming_headers,
272
+ std::shared_ptr<arrow::flight::ServerMiddleware>* middleware) {
273
+ return SafeCallIntoPython([&] {
274
+ const Status status = start_call_(factory_.obj(), info, incoming_headers, middleware);
275
+ RETURN_NOT_OK(CheckPyError());
276
+ return status;
277
+ });
278
+ }
279
+
280
+ PyServerMiddleware::PyServerMiddleware(PyObject* middleware, Vtable vtable)
281
+ : vtable_(vtable) {
282
+ Py_INCREF(middleware);
283
+ middleware_.reset(middleware);
284
+ }
285
+
286
+ void PyServerMiddleware::SendingHeaders(arrow::flight::AddCallHeaders* outgoing_headers) {
287
+ const Status& status = SafeCallIntoPython([&] {
288
+ const Status status = vtable_.sending_headers(middleware_.obj(), outgoing_headers);
289
+ RETURN_NOT_OK(CheckPyError());
290
+ return status;
291
+ });
292
+
293
+ ARROW_WARN_NOT_OK(status, "Python server middleware failed in SendingHeaders");
294
+ }
295
+
296
+ void PyServerMiddleware::CallCompleted(const Status& call_status) {
297
+ const Status& status = SafeCallIntoPython([&] {
298
+ const Status status = vtable_.call_completed(middleware_.obj(), call_status);
299
+ RETURN_NOT_OK(CheckPyError());
300
+ return status;
301
+ });
302
+
303
+ ARROW_WARN_NOT_OK(status, "Python server middleware failed in CallCompleted");
304
+ }
305
+
306
+ std::string PyServerMiddleware::name() const { return kPyServerMiddlewareName; }
307
+
308
+ PyObject* PyServerMiddleware::py_object() const { return middleware_.obj(); }
309
+
310
+ // Flight Client Middleware
311
+
312
+ PyClientMiddlewareFactory::PyClientMiddlewareFactory(PyObject* factory,
313
+ StartCallCallback start_call)
314
+ : start_call_(start_call) {
315
+ Py_INCREF(factory);
316
+ factory_.reset(factory);
317
+ }
318
+
319
+ void PyClientMiddlewareFactory::StartCall(
320
+ const arrow::flight::CallInfo& info,
321
+ std::unique_ptr<arrow::flight::ClientMiddleware>* middleware) {
322
+ const Status& status = SafeCallIntoPython([&] {
323
+ const Status status = start_call_(factory_.obj(), info, middleware);
324
+ RETURN_NOT_OK(CheckPyError());
325
+ return status;
326
+ });
327
+
328
+ ARROW_WARN_NOT_OK(status, "Python client middleware failed in StartCall");
329
+ }
330
+
331
+ PyClientMiddleware::PyClientMiddleware(PyObject* middleware, Vtable vtable)
332
+ : vtable_(vtable) {
333
+ Py_INCREF(middleware);
334
+ middleware_.reset(middleware);
335
+ }
336
+
337
+ void PyClientMiddleware::SendingHeaders(arrow::flight::AddCallHeaders* outgoing_headers) {
338
+ const Status& status = SafeCallIntoPython([&] {
339
+ const Status status = vtable_.sending_headers(middleware_.obj(), outgoing_headers);
340
+ RETURN_NOT_OK(CheckPyError());
341
+ return status;
342
+ });
343
+
344
+ ARROW_WARN_NOT_OK(status, "Python client middleware failed in StartCall");
345
+ }
346
+
347
+ void PyClientMiddleware::ReceivedHeaders(
348
+ const arrow::flight::CallHeaders& incoming_headers) {
349
+ const Status& status = SafeCallIntoPython([&] {
350
+ const Status status = vtable_.received_headers(middleware_.obj(), incoming_headers);
351
+ RETURN_NOT_OK(CheckPyError());
352
+ return status;
353
+ });
354
+
355
+ ARROW_WARN_NOT_OK(status, "Python client middleware failed in StartCall");
356
+ }
357
+
358
+ void PyClientMiddleware::CallCompleted(const Status& call_status) {
359
+ const Status& status = SafeCallIntoPython([&] {
360
+ const Status status = vtable_.call_completed(middleware_.obj(), call_status);
361
+ RETURN_NOT_OK(CheckPyError());
362
+ return status;
363
+ });
364
+
365
+ ARROW_WARN_NOT_OK(status, "Python client middleware failed in StartCall");
366
+ }
367
+
368
+ Status CreateFlightInfo(const std::shared_ptr<arrow::Schema>& schema,
369
+ const arrow::flight::FlightDescriptor& descriptor,
370
+ const std::vector<arrow::flight::FlightEndpoint>& endpoints,
371
+ int64_t total_records, int64_t total_bytes,
372
+ std::unique_ptr<arrow::flight::FlightInfo>* out) {
373
+ ARROW_ASSIGN_OR_RAISE(auto result,
374
+ arrow::flight::FlightInfo::Make(*schema, descriptor, endpoints,
375
+ total_records, total_bytes));
376
+ *out = std::unique_ptr<arrow::flight::FlightInfo>(
377
+ new arrow::flight::FlightInfo(std::move(result)));
378
+ return Status::OK();
379
+ }
380
+
381
+ Status CreateSchemaResult(const std::shared_ptr<arrow::Schema>& schema,
382
+ std::unique_ptr<arrow::flight::SchemaResult>* out) {
383
+ return arrow::flight::SchemaResult::Make(*schema).Value(out);
384
+ }
385
+
386
+ } // namespace flight
387
+ } // namespace py
388
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/flight.h ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <string>
22
+ #include <vector>
23
+
24
+ #include "arrow/flight/api.h"
25
+ #include "arrow/ipc/dictionary.h"
26
+ #include "arrow/python/common.h"
27
+
28
+ #if defined(_WIN32) || defined(__CYGWIN__) // Windows
29
+ #if defined(_MSC_VER)
30
+ #pragma warning(disable : 4251)
31
+ #else
32
+ #pragma GCC diagnostic ignored "-Wattributes"
33
+ #endif
34
+
35
+ #ifdef ARROW_PYTHON_STATIC
36
+ #define ARROW_PYFLIGHT_EXPORT
37
+ #elif defined(ARROW_PYFLIGHT_EXPORTING)
38
+ #define ARROW_PYFLIGHT_EXPORT __declspec(dllexport)
39
+ #else
40
+ #define ARROW_PYFLIGHT_EXPORT __declspec(dllimport)
41
+ #endif
42
+
43
+ #else // Not Windows
44
+ #ifndef ARROW_PYFLIGHT_EXPORT
45
+ #define ARROW_PYFLIGHT_EXPORT __attribute__((visibility("default")))
46
+ #endif
47
+ #endif // Non-Windows
48
+
49
+ namespace arrow {
50
+
51
+ namespace py {
52
+
53
+ namespace flight {
54
+
55
+ ARROW_PYFLIGHT_EXPORT
56
+ extern const char* kPyServerMiddlewareName;
57
+
58
+ /// \brief A table of function pointers for calling from C++ into
59
+ /// Python.
60
+ class ARROW_PYFLIGHT_EXPORT PyFlightServerVtable {
61
+ public:
62
+ std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
63
+ const arrow::flight::Criteria*,
64
+ std::unique_ptr<arrow::flight::FlightListing>*)>
65
+ list_flights;
66
+ std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
67
+ const arrow::flight::FlightDescriptor&,
68
+ std::unique_ptr<arrow::flight::FlightInfo>*)>
69
+ get_flight_info;
70
+ std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
71
+ const arrow::flight::FlightDescriptor&,
72
+ std::unique_ptr<arrow::flight::SchemaResult>*)>
73
+ get_schema;
74
+ std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
75
+ const arrow::flight::Ticket&,
76
+ std::unique_ptr<arrow::flight::FlightDataStream>*)>
77
+ do_get;
78
+ std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
79
+ std::unique_ptr<arrow::flight::FlightMessageReader>,
80
+ std::unique_ptr<arrow::flight::FlightMetadataWriter>)>
81
+ do_put;
82
+ std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
83
+ std::unique_ptr<arrow::flight::FlightMessageReader>,
84
+ std::unique_ptr<arrow::flight::FlightMessageWriter>)>
85
+ do_exchange;
86
+ std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
87
+ const arrow::flight::Action&,
88
+ std::unique_ptr<arrow::flight::ResultStream>*)>
89
+ do_action;
90
+ std::function<Status(PyObject*, const arrow::flight::ServerCallContext&,
91
+ std::vector<arrow::flight::ActionType>*)>
92
+ list_actions;
93
+ };
94
+
95
+ class ARROW_PYFLIGHT_EXPORT PyServerAuthHandlerVtable {
96
+ public:
97
+ std::function<Status(PyObject*, arrow::flight::ServerAuthSender*,
98
+ arrow::flight::ServerAuthReader*)>
99
+ authenticate;
100
+ std::function<Status(PyObject*, const std::string&, std::string*)> is_valid;
101
+ };
102
+
103
+ class ARROW_PYFLIGHT_EXPORT PyClientAuthHandlerVtable {
104
+ public:
105
+ std::function<Status(PyObject*, arrow::flight::ClientAuthSender*,
106
+ arrow::flight::ClientAuthReader*)>
107
+ authenticate;
108
+ std::function<Status(PyObject*, std::string*)> get_token;
109
+ };
110
+
111
+ /// \brief A helper to implement an auth mechanism in Python.
112
+ class ARROW_PYFLIGHT_EXPORT PyServerAuthHandler
113
+ : public arrow::flight::ServerAuthHandler {
114
+ public:
115
+ explicit PyServerAuthHandler(PyObject* handler,
116
+ const PyServerAuthHandlerVtable& vtable);
117
+ Status Authenticate(arrow::flight::ServerAuthSender* outgoing,
118
+ arrow::flight::ServerAuthReader* incoming) override;
119
+ Status IsValid(const std::string& token, std::string* peer_identity) override;
120
+
121
+ private:
122
+ OwnedRefNoGIL handler_;
123
+ PyServerAuthHandlerVtable vtable_;
124
+ };
125
+
126
+ /// \brief A helper to implement an auth mechanism in Python.
127
+ class ARROW_PYFLIGHT_EXPORT PyClientAuthHandler
128
+ : public arrow::flight::ClientAuthHandler {
129
+ public:
130
+ explicit PyClientAuthHandler(PyObject* handler,
131
+ const PyClientAuthHandlerVtable& vtable);
132
+ Status Authenticate(arrow::flight::ClientAuthSender* outgoing,
133
+ arrow::flight::ClientAuthReader* incoming) override;
134
+ Status GetToken(std::string* token) override;
135
+
136
+ private:
137
+ OwnedRefNoGIL handler_;
138
+ PyClientAuthHandlerVtable vtable_;
139
+ };
140
+
141
+ class ARROW_PYFLIGHT_EXPORT PyFlightServer : public arrow::flight::FlightServerBase {
142
+ public:
143
+ explicit PyFlightServer(PyObject* server, const PyFlightServerVtable& vtable);
144
+
145
+ // Like Serve(), but set up signals and invoke Python signal handlers
146
+ // if necessary. This function may return with a Python exception set.
147
+ Status ServeWithSignals();
148
+
149
+ Status ListFlights(const arrow::flight::ServerCallContext& context,
150
+ const arrow::flight::Criteria* criteria,
151
+ std::unique_ptr<arrow::flight::FlightListing>* listings) override;
152
+ Status GetFlightInfo(const arrow::flight::ServerCallContext& context,
153
+ const arrow::flight::FlightDescriptor& request,
154
+ std::unique_ptr<arrow::flight::FlightInfo>* info) override;
155
+ Status GetSchema(const arrow::flight::ServerCallContext& context,
156
+ const arrow::flight::FlightDescriptor& request,
157
+ std::unique_ptr<arrow::flight::SchemaResult>* result) override;
158
+ Status DoGet(const arrow::flight::ServerCallContext& context,
159
+ const arrow::flight::Ticket& request,
160
+ std::unique_ptr<arrow::flight::FlightDataStream>* stream) override;
161
+ Status DoPut(const arrow::flight::ServerCallContext& context,
162
+ std::unique_ptr<arrow::flight::FlightMessageReader> reader,
163
+ std::unique_ptr<arrow::flight::FlightMetadataWriter> writer) override;
164
+ Status DoExchange(const arrow::flight::ServerCallContext& context,
165
+ std::unique_ptr<arrow::flight::FlightMessageReader> reader,
166
+ std::unique_ptr<arrow::flight::FlightMessageWriter> writer) override;
167
+ Status DoAction(const arrow::flight::ServerCallContext& context,
168
+ const arrow::flight::Action& action,
169
+ std::unique_ptr<arrow::flight::ResultStream>* result) override;
170
+ Status ListActions(const arrow::flight::ServerCallContext& context,
171
+ std::vector<arrow::flight::ActionType>* actions) override;
172
+
173
+ private:
174
+ OwnedRefNoGIL server_;
175
+ PyFlightServerVtable vtable_;
176
+ };
177
+
178
+ /// \brief A callback that obtains the next result from a Flight action.
179
+ typedef std::function<Status(PyObject*, std::unique_ptr<arrow::flight::Result>*)>
180
+ PyFlightResultStreamCallback;
181
+
182
+ /// \brief A ResultStream built around a Python callback.
183
+ class ARROW_PYFLIGHT_EXPORT PyFlightResultStream : public arrow::flight::ResultStream {
184
+ public:
185
+ /// \brief Construct a FlightResultStream from a Python object and callback.
186
+ /// Must only be called while holding the GIL.
187
+ explicit PyFlightResultStream(PyObject* generator,
188
+ PyFlightResultStreamCallback callback);
189
+ arrow::Result<std::unique_ptr<arrow::flight::Result>> Next() override;
190
+
191
+ private:
192
+ OwnedRefNoGIL generator_;
193
+ PyFlightResultStreamCallback callback_;
194
+ };
195
+
196
+ /// \brief A wrapper around a FlightDataStream that keeps alive a
197
+ /// Python object backing it.
198
+ class ARROW_PYFLIGHT_EXPORT PyFlightDataStream : public arrow::flight::FlightDataStream {
199
+ public:
200
+ /// \brief Construct a FlightDataStream from a Python object and underlying stream.
201
+ /// Must only be called while holding the GIL.
202
+ explicit PyFlightDataStream(PyObject* data_source,
203
+ std::unique_ptr<arrow::flight::FlightDataStream> stream);
204
+
205
+ std::shared_ptr<Schema> schema() override;
206
+ arrow::Result<arrow::flight::FlightPayload> GetSchemaPayload() override;
207
+ arrow::Result<arrow::flight::FlightPayload> Next() override;
208
+
209
+ private:
210
+ OwnedRefNoGIL data_source_;
211
+ std::unique_ptr<arrow::flight::FlightDataStream> stream_;
212
+ };
213
+
214
+ class ARROW_PYFLIGHT_EXPORT PyServerMiddlewareFactory
215
+ : public arrow::flight::ServerMiddlewareFactory {
216
+ public:
217
+ /// \brief A callback to create the middleware instance in Python
218
+ typedef std::function<Status(
219
+ PyObject*, const arrow::flight::CallInfo& info,
220
+ const arrow::flight::CallHeaders& incoming_headers,
221
+ std::shared_ptr<arrow::flight::ServerMiddleware>* middleware)>
222
+ StartCallCallback;
223
+
224
+ /// \brief Must only be called while holding the GIL.
225
+ explicit PyServerMiddlewareFactory(PyObject* factory, StartCallCallback start_call);
226
+
227
+ Status StartCall(const arrow::flight::CallInfo& info,
228
+ const arrow::flight::CallHeaders& incoming_headers,
229
+ std::shared_ptr<arrow::flight::ServerMiddleware>* middleware) override;
230
+
231
+ private:
232
+ OwnedRefNoGIL factory_;
233
+ StartCallCallback start_call_;
234
+ };
235
+
236
+ class ARROW_PYFLIGHT_EXPORT PyServerMiddleware : public arrow::flight::ServerMiddleware {
237
+ public:
238
+ typedef std::function<Status(PyObject*,
239
+ arrow::flight::AddCallHeaders* outgoing_headers)>
240
+ SendingHeadersCallback;
241
+ typedef std::function<Status(PyObject*, const Status& status)> CallCompletedCallback;
242
+
243
+ struct Vtable {
244
+ SendingHeadersCallback sending_headers;
245
+ CallCompletedCallback call_completed;
246
+ };
247
+
248
+ /// \brief Must only be called while holding the GIL.
249
+ explicit PyServerMiddleware(PyObject* middleware, Vtable vtable);
250
+
251
+ void SendingHeaders(arrow::flight::AddCallHeaders* outgoing_headers) override;
252
+ void CallCompleted(const Status& status) override;
253
+ std::string name() const override;
254
+ /// \brief Get the underlying Python object.
255
+ PyObject* py_object() const;
256
+
257
+ private:
258
+ OwnedRefNoGIL middleware_;
259
+ Vtable vtable_;
260
+ };
261
+
262
+ class ARROW_PYFLIGHT_EXPORT PyClientMiddlewareFactory
263
+ : public arrow::flight::ClientMiddlewareFactory {
264
+ public:
265
+ /// \brief A callback to create the middleware instance in Python
266
+ typedef std::function<Status(
267
+ PyObject*, const arrow::flight::CallInfo& info,
268
+ std::unique_ptr<arrow::flight::ClientMiddleware>* middleware)>
269
+ StartCallCallback;
270
+
271
+ /// \brief Must only be called while holding the GIL.
272
+ explicit PyClientMiddlewareFactory(PyObject* factory, StartCallCallback start_call);
273
+
274
+ void StartCall(const arrow::flight::CallInfo& info,
275
+ std::unique_ptr<arrow::flight::ClientMiddleware>* middleware) override;
276
+
277
+ private:
278
+ OwnedRefNoGIL factory_;
279
+ StartCallCallback start_call_;
280
+ };
281
+
282
+ class ARROW_PYFLIGHT_EXPORT PyClientMiddleware : public arrow::flight::ClientMiddleware {
283
+ public:
284
+ typedef std::function<Status(PyObject*,
285
+ arrow::flight::AddCallHeaders* outgoing_headers)>
286
+ SendingHeadersCallback;
287
+ typedef std::function<Status(PyObject*,
288
+ const arrow::flight::CallHeaders& incoming_headers)>
289
+ ReceivedHeadersCallback;
290
+ typedef std::function<Status(PyObject*, const Status& status)> CallCompletedCallback;
291
+
292
+ struct Vtable {
293
+ SendingHeadersCallback sending_headers;
294
+ ReceivedHeadersCallback received_headers;
295
+ CallCompletedCallback call_completed;
296
+ };
297
+
298
+ /// \brief Must only be called while holding the GIL.
299
+ explicit PyClientMiddleware(PyObject* factory, Vtable vtable);
300
+
301
+ void SendingHeaders(arrow::flight::AddCallHeaders* outgoing_headers) override;
302
+ void ReceivedHeaders(const arrow::flight::CallHeaders& incoming_headers) override;
303
+ void CallCompleted(const Status& status) override;
304
+
305
+ private:
306
+ OwnedRefNoGIL middleware_;
307
+ Vtable vtable_;
308
+ };
309
+
310
+ /// \brief A callback that obtains the next payload from a Flight result stream.
311
+ typedef std::function<Status(PyObject*, arrow::flight::FlightPayload*)>
312
+ PyGeneratorFlightDataStreamCallback;
313
+
314
+ /// \brief A FlightDataStream built around a Python callback.
315
+ class ARROW_PYFLIGHT_EXPORT PyGeneratorFlightDataStream
316
+ : public arrow::flight::FlightDataStream {
317
+ public:
318
+ /// \brief Construct a FlightDataStream from a Python object and underlying stream.
319
+ /// Must only be called while holding the GIL.
320
+ explicit PyGeneratorFlightDataStream(PyObject* generator,
321
+ std::shared_ptr<arrow::Schema> schema,
322
+ PyGeneratorFlightDataStreamCallback callback,
323
+ const ipc::IpcWriteOptions& options);
324
+ std::shared_ptr<Schema> schema() override;
325
+ arrow::Result<arrow::flight::FlightPayload> GetSchemaPayload() override;
326
+ arrow::Result<arrow::flight::FlightPayload> Next() override;
327
+
328
+ private:
329
+ OwnedRefNoGIL generator_;
330
+ std::shared_ptr<arrow::Schema> schema_;
331
+ ipc::DictionaryFieldMapper mapper_;
332
+ ipc::IpcWriteOptions options_;
333
+ PyGeneratorFlightDataStreamCallback callback_;
334
+ };
335
+
336
+ ARROW_PYFLIGHT_EXPORT
337
+ Status CreateFlightInfo(const std::shared_ptr<arrow::Schema>& schema,
338
+ const arrow::flight::FlightDescriptor& descriptor,
339
+ const std::vector<arrow::flight::FlightEndpoint>& endpoints,
340
+ int64_t total_records, int64_t total_bytes,
341
+ std::unique_ptr<arrow::flight::FlightInfo>* out);
342
+
343
+ /// \brief Create a SchemaResult from schema.
344
+ ARROW_PYFLIGHT_EXPORT
345
+ Status CreateSchemaResult(const std::shared_ptr<arrow::Schema>& schema,
346
+ std::unique_ptr<arrow::flight::SchemaResult>* out);
347
+
348
+ } // namespace flight
349
+ } // namespace py
350
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/gdb.cc ADDED
@@ -0,0 +1,530 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include <cstdlib>
19
+ #include <memory>
20
+ #include <utility>
21
+
22
+ #include "arrow/array.h"
23
+ #include "arrow/chunked_array.h"
24
+ #include "arrow/datum.h"
25
+ #include "arrow/extension_type.h"
26
+ #include "arrow/ipc/json_simple.h"
27
+ #include "arrow/python/gdb.h"
28
+ #include "arrow/record_batch.h"
29
+ #include "arrow/scalar.h"
30
+ #include "arrow/table.h"
31
+ #include "arrow/type.h"
32
+ #include "arrow/util/debug.h"
33
+ #include "arrow/util/decimal.h"
34
+ #include "arrow/util/key_value_metadata.h"
35
+ #include "arrow/util/logging.h"
36
+ #include "arrow/util/macros.h"
37
+
38
+ namespace arrow {
39
+
40
+ using ipc::internal::json::ArrayFromJSON;
41
+ using ipc::internal::json::ChunkedArrayFromJSON;
42
+ using ipc::internal::json::ScalarFromJSON;
43
+
44
+ namespace gdb {
45
+
46
+ // Add a nested `arrow` namespace to exercise type lookup from GDB (ARROW-15652)
47
+ namespace arrow {
48
+ void DummyFunction() {}
49
+ } // namespace arrow
50
+
51
+ namespace {
52
+
53
+ class CustomStatusDetail : public StatusDetail {
54
+ public:
55
+ const char* type_id() const override { return "custom-detail-id"; }
56
+ std::string ToString() const override { return "This is a detail"; }
57
+ };
58
+
59
+ class UuidType : public ExtensionType {
60
+ public:
61
+ UuidType() : ExtensionType(fixed_size_binary(16)) {}
62
+
63
+ std::string extension_name() const override { return "uuid"; }
64
+
65
+ bool ExtensionEquals(const ExtensionType& other) const override {
66
+ return (other.extension_name() == this->extension_name());
67
+ }
68
+
69
+ std::shared_ptr<Array> MakeArray(std::shared_ptr<ArrayData> data) const override {
70
+ return std::make_shared<ExtensionArray>(data);
71
+ }
72
+
73
+ Result<std::shared_ptr<DataType>> Deserialize(
74
+ std::shared_ptr<DataType> storage_type,
75
+ const std::string& serialized) const override {
76
+ return Status::NotImplemented("");
77
+ }
78
+
79
+ std::string Serialize() const override { return "uuid-serialized"; }
80
+ };
81
+
82
+ std::shared_ptr<Array> SliceArrayFromJSON(const std::shared_ptr<DataType>& ty,
83
+ std::string_view json, int64_t offset = 0,
84
+ int64_t length = -1) {
85
+ auto array = *ArrayFromJSON(ty, json);
86
+ if (length != -1) {
87
+ return array->Slice(offset, length);
88
+ } else {
89
+ return array->Slice(offset);
90
+ }
91
+ }
92
+
93
+ } // namespace
94
+
95
+ void TestSession() {
96
+ // We define local variables for all types for which we want to test
97
+ // pretty-printing.
98
+ // Then, at the end of this function, we trap to the debugger, so that
99
+ // test instrumentation can print values from this frame by interacting
100
+ // with the debugger.
101
+ // The test instrumentation is in pyarrow/tests/test_gdb.py
102
+
103
+ #ifdef __clang__
104
+ _Pragma("clang diagnostic push");
105
+ _Pragma("clang diagnostic ignored \"-Wunused-variable\"");
106
+ #elif defined(__GNUC__)
107
+ _Pragma("GCC diagnostic push");
108
+ _Pragma("GCC diagnostic ignored \"-Wunused-variable\"");
109
+ #endif
110
+
111
+ arrow::DummyFunction();
112
+
113
+ // Status & Result
114
+ auto ok_status = Status::OK();
115
+ auto error_status = Status::IOError("This is an error");
116
+ auto error_detail_status =
117
+ error_status.WithDetail(std::make_shared<CustomStatusDetail>());
118
+ auto ok_result = Result<int>(42);
119
+ auto error_result = Result<int>(error_status);
120
+ auto error_detail_result = Result<int>(error_detail_status);
121
+
122
+ // String views
123
+ std::string_view string_view_abc{"abc"};
124
+ std::string special_chars = std::string("foo\"bar") + '\x00' + "\r\n\t\x1f";
125
+ std::string_view string_view_special_chars(special_chars);
126
+
127
+ // Buffers
128
+ Buffer buffer_null{nullptr, 0};
129
+ Buffer buffer_abc{string_view_abc};
130
+ Buffer buffer_special_chars{string_view_special_chars};
131
+ char mutable_array[3] = {'a', 'b', 'c'};
132
+ MutableBuffer buffer_mutable{reinterpret_cast<uint8_t*>(mutable_array), 3};
133
+ auto heap_buffer = std::make_shared<Buffer>(string_view_abc);
134
+ auto heap_buffer_mutable = *AllocateBuffer(buffer_abc.size());
135
+ memcpy(heap_buffer_mutable->mutable_data(), buffer_abc.data(), buffer_abc.size());
136
+
137
+ // KeyValueMetadata
138
+ auto empty_metadata = key_value_metadata({}, {});
139
+ auto metadata = key_value_metadata(
140
+ {"key_text", "key_binary"}, {"some value", std::string("z") + '\x00' + "\x1f\xff"});
141
+
142
+ // Decimals
143
+ Decimal128 decimal128_zero{};
144
+ Decimal128 decimal128_pos{"98765432109876543210987654321098765432"};
145
+ Decimal128 decimal128_neg{"-98765432109876543210987654321098765432"};
146
+ BasicDecimal128 basic_decimal128_zero{};
147
+ BasicDecimal128 basic_decimal128_pos{decimal128_pos.native_endian_array()};
148
+ BasicDecimal128 basic_decimal128_neg{decimal128_neg.native_endian_array()};
149
+ Decimal256 decimal256_zero{};
150
+ Decimal256 decimal256_pos{
151
+ "9876543210987654321098765432109876543210987654321098765432109876543210987654"};
152
+ Decimal256 decimal256_neg{
153
+ "-9876543210987654321098765432109876543210987654321098765432109876543210987654"};
154
+ BasicDecimal256 basic_decimal256_zero{};
155
+ BasicDecimal256 basic_decimal256_pos{decimal256_pos.native_endian_array()};
156
+ BasicDecimal256 basic_decimal256_neg{decimal256_neg.native_endian_array()};
157
+
158
+ // Data types
159
+ NullType null_type;
160
+ auto heap_null_type = null();
161
+ BooleanType bool_type;
162
+ auto heap_bool_type = boolean();
163
+
164
+ Date32Type date32_type;
165
+ Date64Type date64_type;
166
+ Time32Type time_type_s(TimeUnit::SECOND);
167
+ Time32Type time_type_ms(TimeUnit::MILLI);
168
+ Time64Type time_type_us(TimeUnit::MICRO);
169
+ Time64Type time_type_ns(TimeUnit::NANO);
170
+ auto heap_time_type_ns = time64(TimeUnit::NANO);
171
+
172
+ TimestampType timestamp_type_s(TimeUnit::SECOND);
173
+ TimestampType timestamp_type_ms_timezone(TimeUnit::MILLI, "Europe/Paris");
174
+ TimestampType timestamp_type_us(TimeUnit::MICRO);
175
+ TimestampType timestamp_type_ns_timezone(TimeUnit::NANO, "Europe/Paris");
176
+ auto heap_timestamp_type_ns_timezone = timestamp(TimeUnit::NANO, "Europe/Paris");
177
+
178
+ DayTimeIntervalType day_time_interval_type;
179
+ MonthIntervalType month_interval_type;
180
+ MonthDayNanoIntervalType month_day_nano_interval_type;
181
+
182
+ DurationType duration_type_s(TimeUnit::SECOND);
183
+ DurationType duration_type_ns(TimeUnit::NANO);
184
+
185
+ BinaryType binary_type;
186
+ StringType string_type;
187
+ LargeBinaryType large_binary_type;
188
+ LargeStringType large_string_type;
189
+ FixedSizeBinaryType fixed_size_binary_type(10);
190
+ auto heap_fixed_size_binary_type = fixed_size_binary(10);
191
+
192
+ Decimal128Type decimal128_type(16, 5);
193
+ Decimal256Type decimal256_type(42, 12);
194
+ auto heap_decimal128_type = decimal128(16, 5);
195
+
196
+ ListType list_type(uint8());
197
+ LargeListType large_list_type(large_utf8());
198
+ auto heap_list_type = list(uint8());
199
+ auto heap_large_list_type = large_list(large_utf8());
200
+
201
+ FixedSizeListType fixed_size_list_type(float64(), 3);
202
+ auto heap_fixed_size_list_type = fixed_size_list(float64(), 3);
203
+
204
+ DictionaryType dict_type_unordered(int16(), utf8());
205
+ DictionaryType dict_type_ordered(int16(), utf8(), /*ordered=*/true);
206
+ auto heap_dict_type = dictionary(int16(), utf8());
207
+
208
+ MapType map_type_unsorted(utf8(), binary());
209
+ MapType map_type_sorted(utf8(), binary(), /*keys_sorted=*/true);
210
+ auto heap_map_type = map(utf8(), binary());
211
+
212
+ StructType struct_type_empty({});
213
+ StructType struct_type(
214
+ {field("ints", int8()), field("strs", utf8(), /*nullable=*/false)});
215
+ auto heap_struct_type =
216
+ struct_({field("ints", int8()), field("strs", utf8(), /*nullable=*/false)});
217
+
218
+ std::vector<int8_t> union_type_codes({7, 42});
219
+ FieldVector union_fields(
220
+ {field("ints", int8()), field("strs", utf8(), /*nullable=*/false)});
221
+ SparseUnionType sparse_union_type(union_fields, union_type_codes);
222
+ DenseUnionType dense_union_type(union_fields, union_type_codes);
223
+
224
+ UuidType uuid_type{};
225
+ std::shared_ptr<DataType> heap_uuid_type = std::make_shared<UuidType>();
226
+
227
+ // Schema
228
+ auto schema_empty = schema({});
229
+ auto schema_non_empty = schema({field("ints", int8()), field("strs", utf8())});
230
+ auto schema_with_metadata = schema_non_empty->WithMetadata(
231
+ key_value_metadata({"key1", "key2"}, {"value1", "value2"}));
232
+
233
+ // Fields
234
+ Field int_field("ints", int64());
235
+ Field float_field("floats", float32(), /*nullable=*/false);
236
+ auto heap_int_field = field("ints", int64());
237
+
238
+ // Scalars
239
+ NullScalar null_scalar;
240
+ auto heap_null_scalar = MakeNullScalar(null());
241
+
242
+ BooleanScalar bool_scalar_null{};
243
+ BooleanScalar bool_scalar{true};
244
+ auto heap_bool_scalar = *MakeScalar(boolean(), true);
245
+
246
+ Int8Scalar int8_scalar_null{};
247
+ UInt8Scalar uint8_scalar_null{};
248
+ Int64Scalar int64_scalar_null{};
249
+ UInt64Scalar uint64_scalar_null{};
250
+ Int8Scalar int8_scalar{-42};
251
+ UInt8Scalar uint8_scalar{234};
252
+ Int64Scalar int64_scalar{-9223372036854775807LL - 1};
253
+ UInt64Scalar uint64_scalar{18446744073709551615ULL};
254
+ HalfFloatScalar half_float_scalar{48640}; // -1.5
255
+ FloatScalar float_scalar{1.25f};
256
+ DoubleScalar double_scalar{2.5};
257
+
258
+ Time32Scalar time_scalar_s{100, TimeUnit::SECOND};
259
+ Time32Scalar time_scalar_ms{1000, TimeUnit::MILLI};
260
+ Time64Scalar time_scalar_us{10000, TimeUnit::MICRO};
261
+ Time64Scalar time_scalar_ns{100000, TimeUnit::NANO};
262
+ Time64Scalar time_scalar_null{time64(TimeUnit::NANO)};
263
+
264
+ DurationScalar duration_scalar_s{-100, TimeUnit::SECOND};
265
+ DurationScalar duration_scalar_ms{-1000, TimeUnit::MILLI};
266
+ DurationScalar duration_scalar_us{-10000, TimeUnit::MICRO};
267
+ DurationScalar duration_scalar_ns{-100000, TimeUnit::NANO};
268
+ DurationScalar duration_scalar_null{duration(TimeUnit::NANO)};
269
+
270
+ TimestampScalar timestamp_scalar_s{12345, timestamp(TimeUnit::SECOND)};
271
+ TimestampScalar timestamp_scalar_ms{-123456, timestamp(TimeUnit::MILLI)};
272
+ TimestampScalar timestamp_scalar_us{1234567, timestamp(TimeUnit::MICRO)};
273
+ TimestampScalar timestamp_scalar_ns{-12345678, timestamp(TimeUnit::NANO)};
274
+ TimestampScalar timestamp_scalar_null{timestamp(TimeUnit::NANO)};
275
+
276
+ TimestampScalar timestamp_scalar_s_tz{12345,
277
+ timestamp(TimeUnit::SECOND, "Europe/Paris")};
278
+ TimestampScalar timestamp_scalar_ms_tz{-123456,
279
+ timestamp(TimeUnit::MILLI, "Europe/Paris")};
280
+ TimestampScalar timestamp_scalar_us_tz{1234567,
281
+ timestamp(TimeUnit::MICRO, "Europe/Paris")};
282
+ TimestampScalar timestamp_scalar_ns_tz{-12345678,
283
+ timestamp(TimeUnit::NANO, "Europe/Paris")};
284
+ TimestampScalar timestamp_scalar_null_tz{timestamp(TimeUnit::NANO, "Europe/Paris")};
285
+
286
+ MonthIntervalScalar month_interval_scalar{23};
287
+ MonthIntervalScalar month_interval_scalar_null{};
288
+ DayTimeIntervalScalar day_time_interval_scalar{{23, -456}};
289
+ DayTimeIntervalScalar day_time_interval_scalar_null{};
290
+ MonthDayNanoIntervalScalar month_day_nano_interval_scalar{{1, 23, -456}};
291
+ MonthDayNanoIntervalScalar month_day_nano_interval_scalar_null{};
292
+
293
+ Date32Scalar date32_scalar{23};
294
+ Date32Scalar date32_scalar_null{};
295
+ Date64Scalar date64_scalar{45 * 86400000LL};
296
+ Date64Scalar date64_scalar_null{};
297
+
298
+ Decimal128Scalar decimal128_scalar_pos_scale_pos{Decimal128("1234567"),
299
+ decimal128(10, 4)};
300
+ Decimal128Scalar decimal128_scalar_pos_scale_neg{Decimal128("-1234567"),
301
+ decimal128(10, 4)};
302
+ Decimal128Scalar decimal128_scalar_neg_scale_pos{Decimal128("1234567"),
303
+ decimal128(10, -4)};
304
+ Decimal128Scalar decimal128_scalar_neg_scale_neg{Decimal128("-1234567"),
305
+ decimal128(10, -4)};
306
+ Decimal128Scalar decimal128_scalar_null{decimal128(10, 4)};
307
+ auto heap_decimal128_scalar = *MakeScalar(decimal128(10, 4), Decimal128("1234567"));
308
+
309
+ Decimal256Scalar decimal256_scalar_pos_scale_pos{
310
+ Decimal256("1234567890123456789012345678901234567890123456"), decimal256(50, 4)};
311
+ Decimal256Scalar decimal256_scalar_pos_scale_neg{
312
+ Decimal256("-1234567890123456789012345678901234567890123456"), decimal256(50, 4)};
313
+ Decimal256Scalar decimal256_scalar_neg_scale_pos{
314
+ Decimal256("1234567890123456789012345678901234567890123456"), decimal256(50, -4)};
315
+ Decimal256Scalar decimal256_scalar_neg_scale_neg{
316
+ Decimal256("-1234567890123456789012345678901234567890123456"), decimal256(50, -4)};
317
+ Decimal256Scalar decimal256_scalar_null{decimal256(50, 4)};
318
+ auto heap_decimal256_scalar = *MakeScalar(
319
+ decimal256(50, 4), Decimal256("1234567890123456789012345678901234567890123456"));
320
+
321
+ BinaryScalar binary_scalar_null{};
322
+ BinaryScalar binary_scalar_unallocated{std::shared_ptr<Buffer>{nullptr}};
323
+ BinaryScalar binary_scalar_empty{Buffer::FromString("")};
324
+ BinaryScalar binary_scalar_abc{Buffer::FromString("abc")};
325
+ BinaryScalar binary_scalar_bytes{
326
+ Buffer::FromString(std::string() + '\x00' + "\x1f\xff")};
327
+
328
+ StringScalar string_scalar_null{};
329
+ StringScalar string_scalar_unallocated{std::shared_ptr<Buffer>{nullptr}};
330
+ StringScalar string_scalar_empty{Buffer::FromString("")};
331
+ StringScalar string_scalar_hehe{Buffer::FromString("héhé")};
332
+ StringScalar string_scalar_invalid_chars{
333
+ Buffer::FromString(std::string("abc") + '\x00' + "def\xffghi")};
334
+
335
+ LargeBinaryScalar large_binary_scalar_abc{Buffer::FromString("abc")};
336
+ LargeStringScalar large_string_scalar_hehe{Buffer::FromString("héhé")};
337
+
338
+ FixedSizeBinaryScalar fixed_size_binary_scalar{Buffer::FromString("abc"),
339
+ fixed_size_binary(3)};
340
+ FixedSizeBinaryScalar fixed_size_binary_scalar_null{
341
+ Buffer::FromString(" "), fixed_size_binary(3), /*is_valid=*/false};
342
+
343
+ std::shared_ptr<Array> dict_array;
344
+ dict_array = *ArrayFromJSON(utf8(), R"(["foo", "bar", "quux"])");
345
+ DictionaryScalar dict_scalar{{std::make_shared<Int8Scalar>(42), dict_array},
346
+ dictionary(int8(), utf8())};
347
+ DictionaryScalar dict_scalar_null{dictionary(int8(), utf8())};
348
+
349
+ std::shared_ptr<Array> list_value_array = *ArrayFromJSON(int32(), R"([4, 5, 6])");
350
+ std::shared_ptr<Array> list_zero_length = *ArrayFromJSON(int32(), R"([])");
351
+ ListScalar list_scalar{list_value_array};
352
+ ListScalar list_scalar_null{list_zero_length, list(int32()), /*is_valid=*/false};
353
+ LargeListScalar large_list_scalar{list_value_array};
354
+ LargeListScalar large_list_scalar_null{list_zero_length, large_list(int32()),
355
+ /*is_valid=*/false};
356
+ FixedSizeListScalar fixed_size_list_scalar{list_value_array};
357
+ FixedSizeListScalar fixed_size_list_scalar_null{
358
+ list_value_array, fixed_size_list(int32(), 3), /*is_valid=*/false};
359
+
360
+ auto struct_scalar_type = struct_({field("ints", int32()), field("strs", utf8())});
361
+ StructScalar struct_scalar{
362
+ ScalarVector{MakeScalar(int32_t(42)), MakeScalar("some text")}, struct_scalar_type};
363
+ StructScalar struct_scalar_null{struct_scalar.value, struct_scalar_type,
364
+ /*is_valid=*/false};
365
+
366
+ auto sparse_union_scalar_type =
367
+ sparse_union(FieldVector{field("ints", int32()), field("strs", utf8())}, {7, 42});
368
+ auto dense_union_scalar_type =
369
+ dense_union(FieldVector{field("ints", int32()), field("strs", utf8())}, {7, 42});
370
+ std::vector<std::shared_ptr<Scalar>> union_values = {MakeScalar(int32_t(43)),
371
+ MakeNullScalar(utf8())};
372
+ SparseUnionScalar sparse_union_scalar{union_values, 7, sparse_union_scalar_type};
373
+ DenseUnionScalar dense_union_scalar{union_values[0], 7, dense_union_scalar_type};
374
+
375
+ union_values[0] = MakeNullScalar(int32());
376
+ SparseUnionScalar sparse_union_scalar_null{union_values, 7, sparse_union_scalar_type};
377
+ DenseUnionScalar dense_union_scalar_null{union_values[0], 7, dense_union_scalar_type};
378
+
379
+ auto extension_scalar_type = std::make_shared<UuidType>();
380
+ ExtensionScalar extension_scalar{
381
+ std::make_shared<FixedSizeBinaryScalar>(Buffer::FromString("0123456789abcdef"),
382
+ extension_scalar_type->storage_type()),
383
+ extension_scalar_type};
384
+ ExtensionScalar extension_scalar_null{extension_scalar.value, extension_scalar_type,
385
+ /*is_valid=*/false};
386
+
387
+ std::shared_ptr<Scalar> heap_map_scalar;
388
+ ARROW_CHECK_OK(
389
+ ScalarFromJSON(map(utf8(), int32()), R"([["a", 5], ["b", 6]])", &heap_map_scalar));
390
+ auto heap_map_scalar_null = MakeNullScalar(heap_map_scalar->type);
391
+
392
+ // Array and ArrayData
393
+ auto heap_null_array = SliceArrayFromJSON(null(), "[null, null]");
394
+
395
+ auto heap_int32_array = SliceArrayFromJSON(int32(), "[-5, 6, null, 42]");
396
+ ArrayData int32_array_data{*heap_int32_array->data()};
397
+ Int32Array int32_array{heap_int32_array->data()->Copy()};
398
+
399
+ auto heap_int32_array_no_nulls = SliceArrayFromJSON(int32(), "[-5, 6, 3, 42]");
400
+
401
+ const char* json_int32_array = "[-1, 2, -3, 4, null, -5, 6, -7, 8, null, -9, -10]";
402
+ auto heap_int32_array_sliced_1_9 = SliceArrayFromJSON(int32(), json_int32_array, 1, 9);
403
+ auto heap_int32_array_sliced_2_6 = SliceArrayFromJSON(int32(), json_int32_array, 2, 6);
404
+ auto heap_int32_array_sliced_8_4 = SliceArrayFromJSON(int32(), json_int32_array, 8, 4);
405
+ auto heap_int32_array_sliced_empty =
406
+ SliceArrayFromJSON(int32(), json_int32_array, 6, 0);
407
+
408
+ const char* json_bool_array =
409
+ "[false, false, true, true, null, null, false, false, true, true, "
410
+ "null, null, false, false, true, true, null, null]";
411
+ auto heap_bool_array = SliceArrayFromJSON(boolean(), json_bool_array);
412
+ auto heap_bool_array_sliced_1_9 = SliceArrayFromJSON(boolean(), json_bool_array, 1, 9);
413
+ auto heap_bool_array_sliced_2_6 = SliceArrayFromJSON(boolean(), json_bool_array, 2, 6);
414
+ auto heap_bool_array_sliced_empty =
415
+ SliceArrayFromJSON(boolean(), json_bool_array, 6, 0);
416
+
417
+ auto heap_list_array = SliceArrayFromJSON(list(int64()), "[[1, 2], null, []]");
418
+ ListArray list_array{heap_list_array->data()};
419
+
420
+ const char* json_double_array = "[-1.5, null]";
421
+ auto heap_double_array = SliceArrayFromJSON(float64(), json_double_array);
422
+
423
+ const char* json_float16_array = "[0, 48640]";
424
+ auto heap_float16_array =
425
+ *SliceArrayFromJSON(uint16(), json_float16_array)->View(float16());
426
+
427
+ auto heap_date32_array =
428
+ SliceArrayFromJSON(date32(), "[0, null, 18336, -9004, -719162, -719163]");
429
+ auto heap_date64_array = SliceArrayFromJSON(
430
+ date64(), "[1584230400000, -777945600000, -62135596800000, -62135683200000, 123]");
431
+
432
+ const char* json_time_array = "[null, -123, 456]";
433
+ auto heap_time32_array_s =
434
+ SliceArrayFromJSON(time32(TimeUnit::SECOND), json_time_array);
435
+ auto heap_time32_array_ms =
436
+ SliceArrayFromJSON(time32(TimeUnit::MILLI), json_time_array);
437
+ auto heap_time64_array_us =
438
+ SliceArrayFromJSON(time64(TimeUnit::MICRO), json_time_array);
439
+ auto heap_time64_array_ns = SliceArrayFromJSON(time64(TimeUnit::NANO), json_time_array);
440
+
441
+ auto heap_month_interval_array =
442
+ SliceArrayFromJSON(month_interval(), "[123, -456, null]");
443
+ auto heap_day_time_interval_array =
444
+ SliceArrayFromJSON(day_time_interval(), "[[1, -600], null]");
445
+ auto heap_month_day_nano_interval_array =
446
+ SliceArrayFromJSON(month_day_nano_interval(), "[[1, -600, 5000], null]");
447
+
448
+ const char* json_duration_array = "[null, -1234567890123456789]";
449
+ auto heap_duration_array_s =
450
+ SliceArrayFromJSON(duration(TimeUnit::SECOND), json_duration_array);
451
+ auto heap_duration_array_ns =
452
+ SliceArrayFromJSON(duration(TimeUnit::NANO), json_duration_array);
453
+
454
+ auto heap_timestamp_array_s = SliceArrayFromJSON(
455
+ timestamp(TimeUnit::SECOND),
456
+ R"([null, "1970-01-01 00:00:00", "1900-02-28 12:34:56", "3989-07-14 00:00:00"])");
457
+ auto heap_timestamp_array_ms = SliceArrayFromJSON(
458
+ timestamp(TimeUnit::MILLI),
459
+ R"([null, "1900-02-28 12:34:56.123", "3989-07-14 00:00:00.789"])");
460
+ auto heap_timestamp_array_us = SliceArrayFromJSON(
461
+ timestamp(TimeUnit::MICRO),
462
+ R"([null, "1900-02-28 12:34:56.654321", "3989-07-14 00:00:00.456789"])");
463
+ auto heap_timestamp_array_ns = SliceArrayFromJSON(
464
+ timestamp(TimeUnit::NANO), R"([null, "1900-02-28 12:34:56.987654321"])");
465
+
466
+ auto heap_decimal128_array = SliceArrayFromJSON(
467
+ decimal128(30, 6),
468
+ R"([null, "-1234567890123456789.012345", "1234567890123456789.012345"])");
469
+ auto heap_decimal256_array = SliceArrayFromJSON(
470
+ decimal256(50, 6), R"([null, "-123456789012345678901234567890123456789.012345"])");
471
+ auto heap_decimal128_array_sliced = heap_decimal128_array->Slice(1, 1);
472
+
473
+ auto heap_fixed_size_binary_array =
474
+ SliceArrayFromJSON(fixed_size_binary(3), "[null, \"abc\", \"\\u0000\\u001f\xff\"]");
475
+ auto heap_fixed_size_binary_array_zero_width =
476
+ SliceArrayFromJSON(fixed_size_binary(0), R"([null, ""])");
477
+ auto heap_fixed_size_binary_array_sliced = heap_fixed_size_binary_array->Slice(1, 1);
478
+
479
+ const char* json_binary_array = "[null, \"abcd\", \"\\u0000\\u001f\xff\"]";
480
+ auto heap_binary_array = SliceArrayFromJSON(binary(), json_binary_array);
481
+ auto heap_large_binary_array = SliceArrayFromJSON(large_binary(), json_binary_array);
482
+ const char* json_string_array = "[null, \"héhé\", \"invalid \xff char\"]";
483
+ auto heap_string_array = SliceArrayFromJSON(utf8(), json_string_array);
484
+ auto heap_large_string_array = SliceArrayFromJSON(large_utf8(), json_string_array);
485
+ auto heap_binary_array_sliced = heap_binary_array->Slice(1, 1);
486
+
487
+ // ChunkedArray
488
+ ArrayVector array_chunks(2);
489
+ array_chunks[0] = *ArrayFromJSON(int32(), "[1, 2]");
490
+ array_chunks[1] = *ArrayFromJSON(int32(), "[3, null, 4]");
491
+ ChunkedArray chunked_array{array_chunks};
492
+
493
+ // RecordBatch
494
+ auto batch_schema = schema({field("ints", int32()), field("strs", utf8())});
495
+ ArrayVector batch_columns{2};
496
+ batch_columns[0] = *ArrayFromJSON(int32(), "[1, 2, 3]");
497
+ batch_columns[1] = *ArrayFromJSON(utf8(), R"(["abc", null, "def"])");
498
+ auto batch = RecordBatch::Make(batch_schema, /*num_rows=*/3, batch_columns);
499
+ auto batch_with_metadata = batch->ReplaceSchemaMetadata(
500
+ key_value_metadata({"key1", "key2", "key3"}, {"value1", "value2", "value3"}));
501
+
502
+ // Table
503
+ ChunkedArrayVector table_columns{2};
504
+ ARROW_CHECK_OK(
505
+ ChunkedArrayFromJSON(int32(), {"[1, 2, 3]", "[4, 5]"}, &table_columns[0]));
506
+ ARROW_CHECK_OK(ChunkedArrayFromJSON(
507
+ utf8(), {R"(["abc", null])", R"(["def"])", R"(["ghi", "jkl"])"},
508
+ &table_columns[1]));
509
+ auto table = Table::Make(batch_schema, table_columns);
510
+
511
+ // Datum
512
+ Datum empty_datum{};
513
+ Datum scalar_datum{MakeNullScalar(boolean())};
514
+ Datum array_datum{heap_int32_array};
515
+ Datum chunked_array_datum{chunked_array};
516
+ Datum batch_datum{batch};
517
+ Datum table_datum{table};
518
+
519
+ #ifdef __clang__
520
+ _Pragma("clang diagnostic pop");
521
+ #elif defined(__GNUC__)
522
+ _Pragma("GCC diagnostic pop");
523
+ #endif
524
+
525
+ // Hook into debugger
526
+ ::arrow::internal::DebugTrap();
527
+ }
528
+
529
+ } // namespace gdb
530
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/helpers.cc ADDED
@@ -0,0 +1,470 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // helpers.h includes a NumPy header, so we include this first
19
+ #include "arrow/python/numpy_interop.h"
20
+
21
+ #include "arrow/python/helpers.h"
22
+
23
+ #include <cmath>
24
+ #include <limits>
25
+ #include <sstream>
26
+ #include <type_traits>
27
+
28
+ #include "arrow/python/common.h"
29
+ #include "arrow/python/decimal.h"
30
+ #include "arrow/type_fwd.h"
31
+ #include "arrow/util/checked_cast.h"
32
+ #include "arrow/util/logging.h"
33
+
34
+ namespace arrow {
35
+
36
+ using internal::checked_cast;
37
+
38
+ namespace py {
39
+
40
+ #define GET_PRIMITIVE_TYPE(NAME, FACTORY) \
41
+ case Type::NAME: \
42
+ return FACTORY()
43
+
44
+ std::shared_ptr<DataType> GetPrimitiveType(Type::type type) {
45
+ switch (type) {
46
+ case Type::NA:
47
+ return null();
48
+ GET_PRIMITIVE_TYPE(UINT8, uint8);
49
+ GET_PRIMITIVE_TYPE(INT8, int8);
50
+ GET_PRIMITIVE_TYPE(UINT16, uint16);
51
+ GET_PRIMITIVE_TYPE(INT16, int16);
52
+ GET_PRIMITIVE_TYPE(UINT32, uint32);
53
+ GET_PRIMITIVE_TYPE(INT32, int32);
54
+ GET_PRIMITIVE_TYPE(UINT64, uint64);
55
+ GET_PRIMITIVE_TYPE(INT64, int64);
56
+ GET_PRIMITIVE_TYPE(DATE32, date32);
57
+ GET_PRIMITIVE_TYPE(DATE64, date64);
58
+ GET_PRIMITIVE_TYPE(BOOL, boolean);
59
+ GET_PRIMITIVE_TYPE(HALF_FLOAT, float16);
60
+ GET_PRIMITIVE_TYPE(FLOAT, float32);
61
+ GET_PRIMITIVE_TYPE(DOUBLE, float64);
62
+ GET_PRIMITIVE_TYPE(BINARY, binary);
63
+ GET_PRIMITIVE_TYPE(STRING, utf8);
64
+ GET_PRIMITIVE_TYPE(LARGE_BINARY, large_binary);
65
+ GET_PRIMITIVE_TYPE(LARGE_STRING, large_utf8);
66
+ GET_PRIMITIVE_TYPE(INTERVAL_MONTH_DAY_NANO, month_day_nano_interval);
67
+ default:
68
+ return nullptr;
69
+ }
70
+ }
71
+
72
+ PyObject* PyHalf_FromHalf(npy_half value) {
73
+ PyObject* result = PyArrayScalar_New(Half);
74
+ if (result != NULL) {
75
+ PyArrayScalar_ASSIGN(result, Half, value);
76
+ }
77
+ return result;
78
+ }
79
+
80
+ Status PyFloat_AsHalf(PyObject* obj, npy_half* out) {
81
+ if (PyArray_IsScalar(obj, Half)) {
82
+ *out = PyArrayScalar_VAL(obj, Half);
83
+ return Status::OK();
84
+ } else {
85
+ // XXX: cannot use npy_double_to_half() without linking with Numpy
86
+ return Status::TypeError("Expected np.float16 instance");
87
+ }
88
+ }
89
+
90
+ namespace internal {
91
+
92
+ std::string PyBytes_AsStdString(PyObject* obj) {
93
+ DCHECK(PyBytes_Check(obj));
94
+ return std::string(PyBytes_AS_STRING(obj), PyBytes_GET_SIZE(obj));
95
+ }
96
+
97
+ Status PyUnicode_AsStdString(PyObject* obj, std::string* out) {
98
+ DCHECK(PyUnicode_Check(obj));
99
+ Py_ssize_t size;
100
+ // The utf-8 representation is cached on the unicode object
101
+ const char* data = PyUnicode_AsUTF8AndSize(obj, &size);
102
+ RETURN_IF_PYERROR();
103
+ *out = std::string(data, size);
104
+ return Status::OK();
105
+ }
106
+
107
+ std::string PyObject_StdStringRepr(PyObject* obj) {
108
+ OwnedRef unicode_ref(PyObject_Repr(obj));
109
+ OwnedRef bytes_ref;
110
+
111
+ if (unicode_ref) {
112
+ bytes_ref.reset(
113
+ PyUnicode_AsEncodedString(unicode_ref.obj(), "utf8", "backslashreplace"));
114
+ }
115
+ if (!bytes_ref) {
116
+ PyErr_Clear();
117
+ std::stringstream ss;
118
+ ss << "<object of type '" << Py_TYPE(obj)->tp_name << "' repr() failed>";
119
+ return ss.str();
120
+ }
121
+ return PyBytes_AsStdString(bytes_ref.obj());
122
+ }
123
+
124
+ Status PyObject_StdStringStr(PyObject* obj, std::string* out) {
125
+ OwnedRef string_ref(PyObject_Str(obj));
126
+ RETURN_IF_PYERROR();
127
+ return PyUnicode_AsStdString(string_ref.obj(), out);
128
+ }
129
+
130
+ Result<bool> IsModuleImported(const std::string& module_name) {
131
+ // PyImport_GetModuleDict returns with a borrowed reference
132
+ OwnedRef key(PyUnicode_FromString(module_name.c_str()));
133
+ auto is_imported = PyDict_Contains(PyImport_GetModuleDict(), key.obj());
134
+ RETURN_IF_PYERROR();
135
+ return is_imported;
136
+ }
137
+
138
+ Status ImportModule(const std::string& module_name, OwnedRef* ref) {
139
+ PyObject* module = PyImport_ImportModule(module_name.c_str());
140
+ RETURN_IF_PYERROR();
141
+ ref->reset(module);
142
+ return Status::OK();
143
+ }
144
+
145
+ Status ImportFromModule(PyObject* module, const std::string& name, OwnedRef* ref) {
146
+ PyObject* attr = PyObject_GetAttrString(module, name.c_str());
147
+ RETURN_IF_PYERROR();
148
+ ref->reset(attr);
149
+ return Status::OK();
150
+ }
151
+
152
+ namespace {
153
+
154
+ Status IntegerOverflowStatus(PyObject* obj, const std::string& overflow_message) {
155
+ if (overflow_message.empty()) {
156
+ std::string obj_as_stdstring;
157
+ RETURN_NOT_OK(PyObject_StdStringStr(obj, &obj_as_stdstring));
158
+ return Status::Invalid("Value ", obj_as_stdstring,
159
+ " too large to fit in C integer type");
160
+ } else {
161
+ return Status::Invalid(overflow_message);
162
+ }
163
+ }
164
+
165
+ Result<OwnedRef> PyObjectToPyInt(PyObject* obj) {
166
+ // Try to call __index__ or __int__ on `obj`
167
+ // (starting from Python 3.10, the latter isn't done anymore by PyLong_AsLong*).
168
+ OwnedRef ref(PyNumber_Index(obj));
169
+ if (ref) {
170
+ return std::move(ref);
171
+ }
172
+ PyErr_Clear();
173
+ const auto nb = Py_TYPE(obj)->tp_as_number;
174
+ if (nb && nb->nb_int) {
175
+ ref.reset(nb->nb_int(obj));
176
+ if (!ref) {
177
+ RETURN_IF_PYERROR();
178
+ }
179
+ DCHECK(ref);
180
+ return std::move(ref);
181
+ }
182
+ return Status::TypeError(
183
+ "object of type ",
184
+ PyObject_StdStringRepr(reinterpret_cast<PyObject*>(Py_TYPE(obj))),
185
+ " cannot be converted to int");
186
+ }
187
+
188
+ // Extract C signed int from Python object
189
+ template <typename Int, enable_if_t<std::is_signed<Int>::value, Int> = 0>
190
+ Status CIntFromPythonImpl(PyObject* obj, Int* out, const std::string& overflow_message) {
191
+ static_assert(sizeof(Int) <= sizeof(long long), // NOLINT
192
+ "integer type larger than long long");
193
+
194
+ OwnedRef ref;
195
+ if (!PyLong_Check(obj)) {
196
+ ARROW_ASSIGN_OR_RAISE(ref, PyObjectToPyInt(obj));
197
+ obj = ref.obj();
198
+ }
199
+
200
+ if (sizeof(Int) > sizeof(long)) { // NOLINT
201
+ const auto value = PyLong_AsLongLong(obj);
202
+ if (ARROW_PREDICT_FALSE(value == -1)) {
203
+ RETURN_IF_PYERROR();
204
+ }
205
+ if (ARROW_PREDICT_FALSE(value < std::numeric_limits<Int>::min() ||
206
+ value > std::numeric_limits<Int>::max())) {
207
+ return IntegerOverflowStatus(obj, overflow_message);
208
+ }
209
+ *out = static_cast<Int>(value);
210
+ } else {
211
+ const auto value = PyLong_AsLong(obj);
212
+ if (ARROW_PREDICT_FALSE(value == -1)) {
213
+ RETURN_IF_PYERROR();
214
+ }
215
+ if (ARROW_PREDICT_FALSE(value < std::numeric_limits<Int>::min() ||
216
+ value > std::numeric_limits<Int>::max())) {
217
+ return IntegerOverflowStatus(obj, overflow_message);
218
+ }
219
+ *out = static_cast<Int>(value);
220
+ }
221
+ return Status::OK();
222
+ }
223
+
224
+ // Extract C unsigned int from Python object
225
+ template <typename Int, enable_if_t<std::is_unsigned<Int>::value, Int> = 0>
226
+ Status CIntFromPythonImpl(PyObject* obj, Int* out, const std::string& overflow_message) {
227
+ static_assert(sizeof(Int) <= sizeof(unsigned long long), // NOLINT
228
+ "integer type larger than unsigned long long");
229
+
230
+ OwnedRef ref;
231
+ if (!PyLong_Check(obj)) {
232
+ ARROW_ASSIGN_OR_RAISE(ref, PyObjectToPyInt(obj));
233
+ obj = ref.obj();
234
+ }
235
+
236
+ if (sizeof(Int) > sizeof(unsigned long)) { // NOLINT
237
+ const auto value = PyLong_AsUnsignedLongLong(obj);
238
+ if (ARROW_PREDICT_FALSE(value == static_cast<decltype(value)>(-1))) {
239
+ RETURN_IF_PYERROR();
240
+ }
241
+ if (ARROW_PREDICT_FALSE(value > std::numeric_limits<Int>::max())) {
242
+ return IntegerOverflowStatus(obj, overflow_message);
243
+ }
244
+ *out = static_cast<Int>(value);
245
+ } else {
246
+ const auto value = PyLong_AsUnsignedLong(obj);
247
+ if (ARROW_PREDICT_FALSE(value == static_cast<decltype(value)>(-1))) {
248
+ RETURN_IF_PYERROR();
249
+ }
250
+ if (ARROW_PREDICT_FALSE(value > std::numeric_limits<Int>::max())) {
251
+ return IntegerOverflowStatus(obj, overflow_message);
252
+ }
253
+ *out = static_cast<Int>(value);
254
+ }
255
+ return Status::OK();
256
+ }
257
+
258
+ } // namespace
259
+
260
+ template <typename Int>
261
+ Status CIntFromPython(PyObject* obj, Int* out, const std::string& overflow_message) {
262
+ if (PyBool_Check(obj)) {
263
+ return Status::TypeError("Expected integer, got bool");
264
+ }
265
+ return CIntFromPythonImpl(obj, out, overflow_message);
266
+ }
267
+
268
+ template Status CIntFromPython(PyObject*, int8_t*, const std::string&);
269
+ template Status CIntFromPython(PyObject*, int16_t*, const std::string&);
270
+ template Status CIntFromPython(PyObject*, int32_t*, const std::string&);
271
+ template Status CIntFromPython(PyObject*, int64_t*, const std::string&);
272
+ template Status CIntFromPython(PyObject*, uint8_t*, const std::string&);
273
+ template Status CIntFromPython(PyObject*, uint16_t*, const std::string&);
274
+ template Status CIntFromPython(PyObject*, uint32_t*, const std::string&);
275
+ template Status CIntFromPython(PyObject*, uint64_t*, const std::string&);
276
+
277
+ inline bool MayHaveNaN(PyObject* obj) {
278
+ // Some core types can be very quickly type-checked and do not allow NaN values
279
+ const int64_t non_nan_tpflags = Py_TPFLAGS_LONG_SUBCLASS | Py_TPFLAGS_LIST_SUBCLASS |
280
+ Py_TPFLAGS_TUPLE_SUBCLASS | Py_TPFLAGS_BYTES_SUBCLASS |
281
+ Py_TPFLAGS_UNICODE_SUBCLASS | Py_TPFLAGS_DICT_SUBCLASS |
282
+ Py_TPFLAGS_BASE_EXC_SUBCLASS | Py_TPFLAGS_TYPE_SUBCLASS;
283
+ return !PyType_HasFeature(Py_TYPE(obj), non_nan_tpflags);
284
+ }
285
+
286
+ bool PyFloat_IsNaN(PyObject* obj) {
287
+ return PyFloat_Check(obj) && std::isnan(PyFloat_AsDouble(obj));
288
+ }
289
+
290
+ namespace {
291
+
292
+ static bool pandas_static_initialized = false;
293
+
294
+ // Once initialized, these variables hold borrowed references to Pandas static data.
295
+ // We should not use OwnedRef here because Python destructors would be
296
+ // called on a finalized interpreter.
297
+ static PyObject* pandas_NA = nullptr;
298
+ static PyObject* pandas_NaT = nullptr;
299
+ static PyObject* pandas_Timedelta = nullptr;
300
+ static PyObject* pandas_Timestamp = nullptr;
301
+ static PyTypeObject* pandas_NaTType = nullptr;
302
+ static PyObject* pandas_DateOffset = nullptr;
303
+
304
+ } // namespace
305
+
306
+ void InitPandasStaticData() {
307
+ // NOTE: This is called with the GIL held. We needn't (and shouldn't,
308
+ // to avoid deadlocks) use an additional C++ lock (ARROW-10519).
309
+ if (pandas_static_initialized) {
310
+ return;
311
+ }
312
+
313
+ OwnedRef pandas;
314
+
315
+ // Import pandas
316
+ Status s = ImportModule("pandas", &pandas);
317
+ if (!s.ok()) {
318
+ return;
319
+ }
320
+
321
+ // Since ImportModule can release the GIL, another thread could have
322
+ // already initialized the static data.
323
+ if (pandas_static_initialized) {
324
+ return;
325
+ }
326
+ OwnedRef ref;
327
+
328
+ // set NaT sentinel and its type
329
+ if (ImportFromModule(pandas.obj(), "NaT", &ref).ok()) {
330
+ pandas_NaT = ref.obj();
331
+ // PyObject_Type returns a new reference but we trust that pandas.NaT will
332
+ // outlive our use of this PyObject*
333
+ pandas_NaTType = Py_TYPE(ref.obj());
334
+ }
335
+
336
+ // retain a reference to Timedelta
337
+ if (ImportFromModule(pandas.obj(), "Timedelta", &ref).ok()) {
338
+ pandas_Timedelta = ref.obj();
339
+ }
340
+
341
+ // retain a reference to Timestamp
342
+ if (ImportFromModule(pandas.obj(), "Timestamp", &ref).ok()) {
343
+ pandas_Timestamp = ref.obj();
344
+ }
345
+
346
+ // if pandas.NA exists, retain a reference to it
347
+ if (ImportFromModule(pandas.obj(), "NA", &ref).ok()) {
348
+ pandas_NA = ref.obj();
349
+ }
350
+
351
+ // Import DateOffset type
352
+ if (ImportFromModule(pandas.obj(), "DateOffset", &ref).ok()) {
353
+ pandas_DateOffset = ref.obj();
354
+ }
355
+
356
+ pandas_static_initialized = true;
357
+ }
358
+
359
+ bool PandasObjectIsNull(PyObject* obj) {
360
+ if (!MayHaveNaN(obj)) {
361
+ return false;
362
+ }
363
+ if (obj == Py_None) {
364
+ return true;
365
+ }
366
+ if (PyFloat_IsNaN(obj) || (pandas_NA && obj == pandas_NA) ||
367
+ (pandas_NaTType && PyObject_TypeCheck(obj, pandas_NaTType)) ||
368
+ (internal::PyDecimal_Check(obj) && internal::PyDecimal_ISNAN(obj))) {
369
+ return true;
370
+ }
371
+ return false;
372
+ }
373
+
374
+ bool IsPandasTimedelta(PyObject* obj) {
375
+ return pandas_Timedelta && PyObject_IsInstance(obj, pandas_Timedelta);
376
+ }
377
+
378
+ bool IsPandasTimestamp(PyObject* obj) {
379
+ return pandas_Timestamp && PyObject_IsInstance(obj, pandas_Timestamp);
380
+ }
381
+
382
+ PyObject* BorrowPandasDataOffsetType() { return pandas_DateOffset; }
383
+
384
+ Status InvalidValue(PyObject* obj, const std::string& why) {
385
+ auto obj_as_str = PyObject_StdStringRepr(obj);
386
+ return Status::Invalid("Could not convert ", std::move(obj_as_str), " with type ",
387
+ Py_TYPE(obj)->tp_name, ": ", why);
388
+ }
389
+
390
+ Status InvalidType(PyObject* obj, const std::string& why) {
391
+ auto obj_as_str = PyObject_StdStringRepr(obj);
392
+ return Status::TypeError("Could not convert ", std::move(obj_as_str), " with type ",
393
+ Py_TYPE(obj)->tp_name, ": ", why);
394
+ }
395
+
396
+ Status UnboxIntegerAsInt64(PyObject* obj, int64_t* out) {
397
+ if (PyLong_Check(obj)) {
398
+ int overflow = 0;
399
+ *out = PyLong_AsLongLongAndOverflow(obj, &overflow);
400
+ if (overflow) {
401
+ return Status::Invalid("PyLong is too large to fit int64");
402
+ }
403
+ } else if (PyArray_IsScalar(obj, Byte)) {
404
+ *out = reinterpret_cast<PyByteScalarObject*>(obj)->obval;
405
+ } else if (PyArray_IsScalar(obj, UByte)) {
406
+ *out = reinterpret_cast<PyUByteScalarObject*>(obj)->obval;
407
+ } else if (PyArray_IsScalar(obj, Short)) {
408
+ *out = reinterpret_cast<PyShortScalarObject*>(obj)->obval;
409
+ } else if (PyArray_IsScalar(obj, UShort)) {
410
+ *out = reinterpret_cast<PyUShortScalarObject*>(obj)->obval;
411
+ } else if (PyArray_IsScalar(obj, Int)) {
412
+ *out = reinterpret_cast<PyIntScalarObject*>(obj)->obval;
413
+ } else if (PyArray_IsScalar(obj, UInt)) {
414
+ *out = reinterpret_cast<PyUIntScalarObject*>(obj)->obval;
415
+ } else if (PyArray_IsScalar(obj, Long)) {
416
+ *out = reinterpret_cast<PyLongScalarObject*>(obj)->obval;
417
+ } else if (PyArray_IsScalar(obj, ULong)) {
418
+ *out = reinterpret_cast<PyULongScalarObject*>(obj)->obval;
419
+ } else if (PyArray_IsScalar(obj, LongLong)) {
420
+ *out = reinterpret_cast<PyLongLongScalarObject*>(obj)->obval;
421
+ } else if (PyArray_IsScalar(obj, Int64)) {
422
+ *out = reinterpret_cast<PyInt64ScalarObject*>(obj)->obval;
423
+ } else if (PyArray_IsScalar(obj, ULongLong)) {
424
+ *out = reinterpret_cast<PyULongLongScalarObject*>(obj)->obval;
425
+ } else if (PyArray_IsScalar(obj, UInt64)) {
426
+ *out = reinterpret_cast<PyUInt64ScalarObject*>(obj)->obval;
427
+ } else {
428
+ return Status::Invalid("Integer scalar type not recognized");
429
+ }
430
+ return Status::OK();
431
+ }
432
+
433
+ Status IntegerScalarToDoubleSafe(PyObject* obj, double* out) {
434
+ int64_t value = 0;
435
+ RETURN_NOT_OK(UnboxIntegerAsInt64(obj, &value));
436
+
437
+ constexpr int64_t kDoubleMax = 1LL << 53;
438
+ constexpr int64_t kDoubleMin = -(1LL << 53);
439
+
440
+ if (value < kDoubleMin || value > kDoubleMax) {
441
+ return Status::Invalid("Integer value ", value, " is outside of the range exactly",
442
+ " representable by a IEEE 754 double precision value");
443
+ }
444
+ *out = static_cast<double>(value);
445
+ return Status::OK();
446
+ }
447
+
448
+ Status IntegerScalarToFloat32Safe(PyObject* obj, float* out) {
449
+ int64_t value = 0;
450
+ RETURN_NOT_OK(UnboxIntegerAsInt64(obj, &value));
451
+
452
+ constexpr int64_t kFloatMax = 1LL << 24;
453
+ constexpr int64_t kFloatMin = -(1LL << 24);
454
+
455
+ if (value < kFloatMin || value > kFloatMax) {
456
+ return Status::Invalid("Integer value ", value, " is outside of the range exactly",
457
+ " representable by a IEEE 754 single precision value");
458
+ }
459
+ *out = static_cast<float>(value);
460
+ return Status::OK();
461
+ }
462
+
463
+ void DebugPrint(PyObject* obj) {
464
+ std::string repr = PyObject_StdStringRepr(obj);
465
+ PySys_WriteStderr("%s\n", repr.c_str());
466
+ }
467
+
468
+ } // namespace internal
469
+ } // namespace py
470
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/helpers.h ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/python/platform.h"
21
+
22
+ #include <limits>
23
+ #include <memory>
24
+ #include <string>
25
+ #include <utility>
26
+
27
+ #include "arrow/python/numpy_interop.h"
28
+
29
+ #include <numpy/halffloat.h>
30
+
31
+ #include "arrow/python/visibility.h"
32
+ #include "arrow/type.h"
33
+ #include "arrow/util/macros.h"
34
+
35
+ namespace arrow {
36
+
37
+ namespace py {
38
+
39
+ class OwnedRef;
40
+
41
+ // \brief Get an arrow DataType instance from Arrow's Type::type enum
42
+ // \param[in] type One of the values of Arrow's Type::type enum
43
+ // \return A shared pointer to DataType
44
+ ARROW_PYTHON_EXPORT std::shared_ptr<DataType> GetPrimitiveType(Type::type type);
45
+
46
+ // \brief Construct a np.float16 object from a npy_half value.
47
+ ARROW_PYTHON_EXPORT PyObject* PyHalf_FromHalf(npy_half value);
48
+
49
+ // \brief Convert a Python object to a npy_half value.
50
+ ARROW_PYTHON_EXPORT Status PyFloat_AsHalf(PyObject* obj, npy_half* out);
51
+
52
+ namespace internal {
53
+
54
+ // \brief Check that a Python module has been already imported
55
+ // \param[in] module_name The name of the module
56
+ Result<bool> IsModuleImported(const std::string& module_name);
57
+
58
+ // \brief Import a Python module
59
+ // \param[in] module_name The name of the module
60
+ // \param[out] ref The OwnedRef containing the module PyObject*
61
+ ARROW_PYTHON_EXPORT
62
+ Status ImportModule(const std::string& module_name, OwnedRef* ref);
63
+
64
+ // \brief Import an object from a Python module
65
+ // \param[in] module A Python module
66
+ // \param[in] name The name of the object to import
67
+ // \param[out] ref The OwnedRef containing the \c name attribute of the Python module \c
68
+ // module
69
+ ARROW_PYTHON_EXPORT
70
+ Status ImportFromModule(PyObject* module, const std::string& name, OwnedRef* ref);
71
+
72
+ // \brief Check whether obj is an integer, independent of Python versions.
73
+ inline bool IsPyInteger(PyObject* obj) { return PyLong_Check(obj); }
74
+
75
+ // \brief Import symbols from pandas that we need for various type-checking,
76
+ // like pandas.NaT or pandas.NA
77
+ void InitPandasStaticData();
78
+
79
+ // \brief Use pandas missing value semantics to check if a value is null
80
+ ARROW_PYTHON_EXPORT
81
+ bool PandasObjectIsNull(PyObject* obj);
82
+
83
+ // \brief Check that obj is a pandas.Timedelta instance
84
+ ARROW_PYTHON_EXPORT
85
+ bool IsPandasTimedelta(PyObject* obj);
86
+
87
+ // \brief Check that obj is a pandas.Timestamp instance
88
+ bool IsPandasTimestamp(PyObject* obj);
89
+
90
+ // \brief Returned a borrowed reference to the pandas.tseries.offsets.DateOffset
91
+ PyObject* BorrowPandasDataOffsetType();
92
+
93
+ // \brief Check whether obj is a floating-point NaN
94
+ ARROW_PYTHON_EXPORT
95
+ bool PyFloat_IsNaN(PyObject* obj);
96
+
97
+ inline bool IsPyBinary(PyObject* obj) {
98
+ return PyBytes_Check(obj) || PyByteArray_Check(obj) || PyMemoryView_Check(obj);
99
+ }
100
+
101
+ // \brief Convert a Python integer into a C integer
102
+ // \param[in] obj A Python integer
103
+ // \param[out] out A pointer to a C integer to hold the result of the conversion
104
+ // \return The status of the operation
105
+ template <typename Int>
106
+ Status CIntFromPython(PyObject* obj, Int* out, const std::string& overflow_message = "");
107
+
108
+ // \brief Convert a Python unicode string to a std::string
109
+ ARROW_PYTHON_EXPORT
110
+ Status PyUnicode_AsStdString(PyObject* obj, std::string* out);
111
+
112
+ // \brief Convert a Python bytes object to a std::string
113
+ ARROW_PYTHON_EXPORT
114
+ std::string PyBytes_AsStdString(PyObject* obj);
115
+
116
+ // \brief Call str() on the given object and return the result as a std::string
117
+ ARROW_PYTHON_EXPORT
118
+ Status PyObject_StdStringStr(PyObject* obj, std::string* out);
119
+
120
+ // \brief Return the repr() of the given object (always succeeds)
121
+ ARROW_PYTHON_EXPORT
122
+ std::string PyObject_StdStringRepr(PyObject* obj);
123
+
124
+ // \brief Cast the given size to int32_t, with error checking
125
+ inline Status CastSize(Py_ssize_t size, int32_t* out,
126
+ const char* error_msg = "Maximum size exceeded (2GB)") {
127
+ // size is assumed to be positive
128
+ if (size > std::numeric_limits<int32_t>::max()) {
129
+ return Status::Invalid(error_msg);
130
+ }
131
+ *out = static_cast<int32_t>(size);
132
+ return Status::OK();
133
+ }
134
+
135
+ inline Status CastSize(Py_ssize_t size, int64_t* out, const char* error_msg = NULLPTR) {
136
+ // size is assumed to be positive
137
+ *out = static_cast<int64_t>(size);
138
+ return Status::OK();
139
+ }
140
+
141
+ // \brief Print the Python object's __str__ form along with the passed error
142
+ // message
143
+ ARROW_PYTHON_EXPORT
144
+ Status InvalidValue(PyObject* obj, const std::string& why);
145
+
146
+ ARROW_PYTHON_EXPORT
147
+ Status InvalidType(PyObject* obj, const std::string& why);
148
+
149
+ ARROW_PYTHON_EXPORT
150
+ Status IntegerScalarToDoubleSafe(PyObject* obj, double* result);
151
+ ARROW_PYTHON_EXPORT
152
+ Status IntegerScalarToFloat32Safe(PyObject* obj, float* result);
153
+
154
+ // \brief Print Python object __repr__
155
+ void DebugPrint(PyObject* obj);
156
+
157
+ } // namespace internal
158
+ } // namespace py
159
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/inference.cc ADDED
@@ -0,0 +1,745 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include "arrow/python/inference.h"
19
+ #include "arrow/python/numpy_interop.h"
20
+
21
+ #include <datetime.h>
22
+
23
+ #include <algorithm>
24
+ #include <limits>
25
+ #include <map>
26
+ #include <string>
27
+ #include <utility>
28
+ #include <vector>
29
+
30
+ #include "arrow/scalar.h"
31
+ #include "arrow/status.h"
32
+ #include "arrow/util/decimal.h"
33
+ #include "arrow/util/logging.h"
34
+
35
+ #include "arrow/python/datetime.h"
36
+ #include "arrow/python/decimal.h"
37
+ #include "arrow/python/helpers.h"
38
+ #include "arrow/python/iterators.h"
39
+ #include "arrow/python/numpy_convert.h"
40
+
41
+ namespace arrow {
42
+ namespace py {
43
+ namespace {
44
+ // Assigns a tuple to interval_types_tuple containing the nametuple for
45
+ // MonthDayNanoIntervalType and if present dateutil's relativedelta and
46
+ // pandas DateOffset.
47
+ Status ImportPresentIntervalTypes(OwnedRefNoGIL* interval_types_tuple) {
48
+ OwnedRef relative_delta_module;
49
+ // These are Optional imports so swallow errors.
50
+ OwnedRef relative_delta_type;
51
+ // Try to import pandas to get types.
52
+ internal::InitPandasStaticData();
53
+ if (internal::ImportModule("dateutil.relativedelta", &relative_delta_module).ok()) {
54
+ RETURN_NOT_OK(internal::ImportFromModule(relative_delta_module.obj(), "relativedelta",
55
+ &relative_delta_type));
56
+ }
57
+
58
+ PyObject* date_offset_type = internal::BorrowPandasDataOffsetType();
59
+ interval_types_tuple->reset(
60
+ PyTuple_New(1 + (date_offset_type != nullptr ? 1 : 0) +
61
+ (relative_delta_type.obj() != nullptr ? 1 : 0)));
62
+ RETURN_IF_PYERROR();
63
+ int index = 0;
64
+ PyTuple_SetItem(interval_types_tuple->obj(), index++,
65
+ internal::NewMonthDayNanoTupleType());
66
+ RETURN_IF_PYERROR();
67
+ if (date_offset_type != nullptr) {
68
+ Py_XINCREF(date_offset_type);
69
+ PyTuple_SetItem(interval_types_tuple->obj(), index++, date_offset_type);
70
+ RETURN_IF_PYERROR();
71
+ }
72
+ if (relative_delta_type.obj() != nullptr) {
73
+ PyTuple_SetItem(interval_types_tuple->obj(), index++, relative_delta_type.detach());
74
+ RETURN_IF_PYERROR();
75
+ }
76
+ return Status::OK();
77
+ }
78
+
79
+ } // namespace
80
+
81
+ #define _NUMPY_UNIFY_NOOP(DTYPE) \
82
+ case NPY_##DTYPE: \
83
+ return OK;
84
+
85
+ #define _NUMPY_UNIFY_PROMOTE(DTYPE) \
86
+ case NPY_##DTYPE: \
87
+ current_type_num_ = dtype; \
88
+ current_dtype_ = descr; \
89
+ return OK;
90
+
91
+ #define _NUMPY_UNIFY_PROMOTE_TO(DTYPE, NEW_TYPE) \
92
+ case NPY_##DTYPE: \
93
+ current_type_num_ = NPY_##NEW_TYPE; \
94
+ current_dtype_ = PyArray_DescrFromType(current_type_num_); \
95
+ return OK;
96
+
97
+ // Form a consensus NumPy dtype to use for Arrow conversion for a
98
+ // collection of dtype objects observed one at a time
99
+ class NumPyDtypeUnifier {
100
+ public:
101
+ enum Action { OK, INVALID };
102
+
103
+ NumPyDtypeUnifier() : current_type_num_(-1), current_dtype_(nullptr) {}
104
+
105
+ Status InvalidMix(int new_dtype) {
106
+ return Status::Invalid("Cannot mix NumPy dtypes ",
107
+ GetNumPyTypeName(current_type_num_), " and ",
108
+ GetNumPyTypeName(new_dtype));
109
+ }
110
+
111
+ int Observe_BOOL(PyArray_Descr* descr, int dtype) { return INVALID; }
112
+
113
+ int Observe_INT8(PyArray_Descr* descr, int dtype) {
114
+ switch (dtype) {
115
+ _NUMPY_UNIFY_PROMOTE(INT16);
116
+ _NUMPY_UNIFY_PROMOTE(INT32);
117
+ _NUMPY_UNIFY_PROMOTE(INT64);
118
+ _NUMPY_UNIFY_PROMOTE(FLOAT32);
119
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
120
+ default:
121
+ return INVALID;
122
+ }
123
+ }
124
+
125
+ int Observe_INT16(PyArray_Descr* descr, int dtype) {
126
+ switch (dtype) {
127
+ _NUMPY_UNIFY_NOOP(INT8);
128
+ _NUMPY_UNIFY_PROMOTE(INT32);
129
+ _NUMPY_UNIFY_PROMOTE(INT64);
130
+ _NUMPY_UNIFY_NOOP(UINT8);
131
+ _NUMPY_UNIFY_PROMOTE(FLOAT32);
132
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
133
+ default:
134
+ return INVALID;
135
+ }
136
+ }
137
+
138
+ int Observe_INT32(PyArray_Descr* descr, int dtype) {
139
+ switch (dtype) {
140
+ _NUMPY_UNIFY_NOOP(INT8);
141
+ _NUMPY_UNIFY_NOOP(INT16);
142
+ _NUMPY_UNIFY_PROMOTE(INT32);
143
+ _NUMPY_UNIFY_PROMOTE(INT64);
144
+ _NUMPY_UNIFY_NOOP(UINT8);
145
+ _NUMPY_UNIFY_NOOP(UINT16);
146
+ _NUMPY_UNIFY_PROMOTE_TO(FLOAT32, FLOAT64);
147
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
148
+ default:
149
+ return INVALID;
150
+ }
151
+ }
152
+
153
+ int Observe_INT64(PyArray_Descr* descr, int dtype) {
154
+ switch (dtype) {
155
+ _NUMPY_UNIFY_NOOP(INT8);
156
+ _NUMPY_UNIFY_NOOP(INT16);
157
+ _NUMPY_UNIFY_NOOP(INT32);
158
+ _NUMPY_UNIFY_NOOP(INT64);
159
+ _NUMPY_UNIFY_NOOP(UINT8);
160
+ _NUMPY_UNIFY_NOOP(UINT16);
161
+ _NUMPY_UNIFY_NOOP(UINT32);
162
+ _NUMPY_UNIFY_PROMOTE_TO(FLOAT32, FLOAT64);
163
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
164
+ default:
165
+ return INVALID;
166
+ }
167
+ }
168
+
169
+ int Observe_UINT8(PyArray_Descr* descr, int dtype) {
170
+ switch (dtype) {
171
+ _NUMPY_UNIFY_PROMOTE(UINT16);
172
+ _NUMPY_UNIFY_PROMOTE(UINT32);
173
+ _NUMPY_UNIFY_PROMOTE(UINT64);
174
+ _NUMPY_UNIFY_PROMOTE(FLOAT32);
175
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
176
+ default:
177
+ return INVALID;
178
+ }
179
+ }
180
+
181
+ int Observe_UINT16(PyArray_Descr* descr, int dtype) {
182
+ switch (dtype) {
183
+ _NUMPY_UNIFY_NOOP(UINT8);
184
+ _NUMPY_UNIFY_PROMOTE(UINT32);
185
+ _NUMPY_UNIFY_PROMOTE(UINT64);
186
+ _NUMPY_UNIFY_PROMOTE(FLOAT32);
187
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
188
+ default:
189
+ return INVALID;
190
+ }
191
+ }
192
+
193
+ int Observe_UINT32(PyArray_Descr* descr, int dtype) {
194
+ switch (dtype) {
195
+ _NUMPY_UNIFY_NOOP(UINT8);
196
+ _NUMPY_UNIFY_NOOP(UINT16);
197
+ _NUMPY_UNIFY_PROMOTE(UINT64);
198
+ _NUMPY_UNIFY_PROMOTE_TO(FLOAT32, FLOAT64);
199
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
200
+ default:
201
+ return INVALID;
202
+ }
203
+ }
204
+
205
+ int Observe_UINT64(PyArray_Descr* descr, int dtype) {
206
+ switch (dtype) {
207
+ _NUMPY_UNIFY_NOOP(UINT8);
208
+ _NUMPY_UNIFY_NOOP(UINT16);
209
+ _NUMPY_UNIFY_NOOP(UINT32);
210
+ _NUMPY_UNIFY_PROMOTE_TO(FLOAT32, FLOAT64);
211
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
212
+ default:
213
+ return INVALID;
214
+ }
215
+ }
216
+
217
+ int Observe_FLOAT16(PyArray_Descr* descr, int dtype) {
218
+ switch (dtype) {
219
+ _NUMPY_UNIFY_PROMOTE(FLOAT32);
220
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
221
+ default:
222
+ return INVALID;
223
+ }
224
+ }
225
+
226
+ int Observe_FLOAT32(PyArray_Descr* descr, int dtype) {
227
+ switch (dtype) {
228
+ _NUMPY_UNIFY_NOOP(INT8);
229
+ _NUMPY_UNIFY_NOOP(INT16);
230
+ _NUMPY_UNIFY_NOOP(INT32);
231
+ _NUMPY_UNIFY_NOOP(INT64);
232
+ _NUMPY_UNIFY_NOOP(UINT8);
233
+ _NUMPY_UNIFY_NOOP(UINT16);
234
+ _NUMPY_UNIFY_NOOP(UINT32);
235
+ _NUMPY_UNIFY_NOOP(UINT64);
236
+ _NUMPY_UNIFY_PROMOTE(FLOAT64);
237
+ default:
238
+ return INVALID;
239
+ }
240
+ }
241
+
242
+ int Observe_FLOAT64(PyArray_Descr* descr, int dtype) {
243
+ switch (dtype) {
244
+ _NUMPY_UNIFY_NOOP(INT8);
245
+ _NUMPY_UNIFY_NOOP(INT16);
246
+ _NUMPY_UNIFY_NOOP(INT32);
247
+ _NUMPY_UNIFY_NOOP(INT64);
248
+ _NUMPY_UNIFY_NOOP(UINT8);
249
+ _NUMPY_UNIFY_NOOP(UINT16);
250
+ _NUMPY_UNIFY_NOOP(UINT32);
251
+ _NUMPY_UNIFY_NOOP(UINT64);
252
+ default:
253
+ return INVALID;
254
+ }
255
+ }
256
+
257
+ int Observe_DATETIME(PyArray_Descr* dtype_obj) {
258
+ // TODO: check that units are all the same
259
+ return OK;
260
+ }
261
+
262
+ Status Observe(PyArray_Descr* descr) {
263
+ int dtype = fix_numpy_type_num(descr->type_num);
264
+
265
+ if (current_type_num_ == -1) {
266
+ current_dtype_ = descr;
267
+ current_type_num_ = dtype;
268
+ return Status::OK();
269
+ } else if (current_type_num_ == dtype) {
270
+ return Status::OK();
271
+ }
272
+
273
+ #define OBSERVE_CASE(DTYPE) \
274
+ case NPY_##DTYPE: \
275
+ action = Observe_##DTYPE(descr, dtype); \
276
+ break;
277
+
278
+ int action = OK;
279
+ switch (current_type_num_) {
280
+ OBSERVE_CASE(BOOL);
281
+ OBSERVE_CASE(INT8);
282
+ OBSERVE_CASE(INT16);
283
+ OBSERVE_CASE(INT32);
284
+ OBSERVE_CASE(INT64);
285
+ OBSERVE_CASE(UINT8);
286
+ OBSERVE_CASE(UINT16);
287
+ OBSERVE_CASE(UINT32);
288
+ OBSERVE_CASE(UINT64);
289
+ OBSERVE_CASE(FLOAT16);
290
+ OBSERVE_CASE(FLOAT32);
291
+ OBSERVE_CASE(FLOAT64);
292
+ case NPY_DATETIME:
293
+ action = Observe_DATETIME(descr);
294
+ break;
295
+ default:
296
+ return Status::NotImplemented("Unsupported numpy type ", GetNumPyTypeName(dtype));
297
+ }
298
+
299
+ if (action == INVALID) {
300
+ return InvalidMix(dtype);
301
+ }
302
+ return Status::OK();
303
+ }
304
+
305
+ bool dtype_was_observed() const { return current_type_num_ != -1; }
306
+
307
+ PyArray_Descr* current_dtype() const { return current_dtype_; }
308
+
309
+ int current_type_num() const { return current_type_num_; }
310
+
311
+ private:
312
+ int current_type_num_;
313
+ PyArray_Descr* current_dtype_;
314
+ };
315
+
316
+ class TypeInferrer {
317
+ // A type inference visitor for Python values
318
+ public:
319
+ // \param validate_interval the number of elements to observe before checking
320
+ // whether the data is mixed type or has other problems. This helps avoid
321
+ // excess computation for each element while also making sure we "bail out"
322
+ // early with long sequences that may have problems up front
323
+ // \param make_unions permit mixed-type data by creating union types (not yet
324
+ // implemented)
325
+ explicit TypeInferrer(bool pandas_null_sentinels = false,
326
+ int64_t validate_interval = 100, bool make_unions = false)
327
+ : pandas_null_sentinels_(pandas_null_sentinels),
328
+ validate_interval_(validate_interval),
329
+ make_unions_(make_unions),
330
+ total_count_(0),
331
+ none_count_(0),
332
+ bool_count_(0),
333
+ int_count_(0),
334
+ date_count_(0),
335
+ time_count_(0),
336
+ timestamp_micro_count_(0),
337
+ duration_count_(0),
338
+ float_count_(0),
339
+ binary_count_(0),
340
+ unicode_count_(0),
341
+ decimal_count_(0),
342
+ list_count_(0),
343
+ struct_count_(0),
344
+ arrow_scalar_count_(0),
345
+ numpy_dtype_count_(0),
346
+ interval_count_(0),
347
+ max_decimal_metadata_(std::numeric_limits<int32_t>::min(),
348
+ std::numeric_limits<int32_t>::min()),
349
+ decimal_type_() {
350
+ ARROW_CHECK_OK(internal::ImportDecimalType(&decimal_type_));
351
+ ARROW_CHECK_OK(ImportPresentIntervalTypes(&interval_types_));
352
+ }
353
+
354
+ /// \param[in] obj a Python object in the sequence
355
+ /// \param[out] keep_going if sufficient information has been gathered to
356
+ /// attempt to begin converting the sequence, *keep_going will be set to true
357
+ /// to signal to the calling visitor loop to terminate
358
+ Status Visit(PyObject* obj, bool* keep_going) {
359
+ ++total_count_;
360
+
361
+ if (obj == Py_None || (pandas_null_sentinels_ && internal::PandasObjectIsNull(obj))) {
362
+ ++none_count_;
363
+ } else if (PyBool_Check(obj)) {
364
+ ++bool_count_;
365
+ *keep_going = make_unions_;
366
+ } else if (PyFloat_Check(obj)) {
367
+ ++float_count_;
368
+ *keep_going = make_unions_;
369
+ } else if (internal::IsPyInteger(obj)) {
370
+ ++int_count_;
371
+ } else if (PyDateTime_Check(obj)) {
372
+ // infer timezone from the first encountered datetime object
373
+ if (!timestamp_micro_count_) {
374
+ OwnedRef tzinfo(PyObject_GetAttrString(obj, "tzinfo"));
375
+ if (tzinfo.obj() != nullptr && tzinfo.obj() != Py_None) {
376
+ ARROW_ASSIGN_OR_RAISE(timezone_, internal::TzinfoToString(tzinfo.obj()));
377
+ }
378
+ }
379
+ ++timestamp_micro_count_;
380
+ *keep_going = make_unions_;
381
+ } else if (PyDelta_Check(obj)) {
382
+ ++duration_count_;
383
+ *keep_going = make_unions_;
384
+ } else if (PyDate_Check(obj)) {
385
+ ++date_count_;
386
+ *keep_going = make_unions_;
387
+ } else if (PyTime_Check(obj)) {
388
+ ++time_count_;
389
+ *keep_going = make_unions_;
390
+ } else if (internal::IsPyBinary(obj)) {
391
+ ++binary_count_;
392
+ *keep_going = make_unions_;
393
+ } else if (PyUnicode_Check(obj)) {
394
+ ++unicode_count_;
395
+ *keep_going = make_unions_;
396
+ } else if (arrow::py::is_scalar(obj)) {
397
+ RETURN_NOT_OK(VisitArrowScalar(obj, keep_going));
398
+ } else if (PyArray_CheckAnyScalarExact(obj)) {
399
+ RETURN_NOT_OK(VisitDType(PyArray_DescrFromScalar(obj), keep_going));
400
+ } else if (PySet_Check(obj) || (Py_TYPE(obj) == &PyDictValues_Type)) {
401
+ RETURN_NOT_OK(VisitSet(obj, keep_going));
402
+ } else if (PyArray_Check(obj)) {
403
+ RETURN_NOT_OK(VisitNdarray(obj, keep_going));
404
+ } else if (PyDict_Check(obj)) {
405
+ RETURN_NOT_OK(VisitDict(obj));
406
+ } else if (PyList_Check(obj) ||
407
+ (PyTuple_Check(obj) &&
408
+ !PyObject_IsInstance(obj, PyTuple_GetItem(interval_types_.obj(), 0)))) {
409
+ RETURN_NOT_OK(VisitList(obj, keep_going));
410
+ } else if (PyObject_IsInstance(obj, decimal_type_.obj())) {
411
+ RETURN_NOT_OK(max_decimal_metadata_.Update(obj));
412
+ ++decimal_count_;
413
+ } else if (PyObject_IsInstance(obj, interval_types_.obj())) {
414
+ ++interval_count_;
415
+ } else {
416
+ return internal::InvalidValue(obj,
417
+ "did not recognize Python value type when inferring "
418
+ "an Arrow data type");
419
+ }
420
+
421
+ if (total_count_ % validate_interval_ == 0) {
422
+ RETURN_NOT_OK(Validate());
423
+ }
424
+
425
+ return Status::OK();
426
+ }
427
+
428
+ // Infer value type from a sequence of values
429
+ Status VisitSequence(PyObject* obj, PyObject* mask = nullptr) {
430
+ if (mask == nullptr || mask == Py_None) {
431
+ return internal::VisitSequence(
432
+ obj, /*offset=*/0,
433
+ [this](PyObject* value, bool* keep_going) { return Visit(value, keep_going); });
434
+ } else {
435
+ return internal::VisitSequenceMasked(
436
+ obj, mask, /*offset=*/0,
437
+ [this](PyObject* value, uint8_t masked, bool* keep_going) {
438
+ if (!masked) {
439
+ return Visit(value, keep_going);
440
+ } else {
441
+ return Status::OK();
442
+ }
443
+ });
444
+ }
445
+ }
446
+
447
+ // Infer value type from a sequence of values
448
+ Status VisitIterable(PyObject* obj) {
449
+ return internal::VisitIterable(obj, [this](PyObject* value, bool* keep_going) {
450
+ return Visit(value, keep_going);
451
+ });
452
+ }
453
+
454
+ Status GetType(std::shared_ptr<DataType>* out) {
455
+ // TODO(wesm): handling forming unions
456
+ if (make_unions_) {
457
+ return Status::NotImplemented("Creating union types not yet supported");
458
+ }
459
+
460
+ RETURN_NOT_OK(Validate());
461
+
462
+ if (arrow_scalar_count_ > 0 && arrow_scalar_count_ + none_count_ != total_count_) {
463
+ return Status::Invalid(
464
+ "pyarrow scalars cannot be mixed "
465
+ "with other Python scalar values currently");
466
+ }
467
+
468
+ if (numpy_dtype_count_ > 0) {
469
+ // All NumPy scalars and Nones/nulls
470
+ if (numpy_dtype_count_ + none_count_ == total_count_) {
471
+ return NumPyDtypeToArrow(numpy_unifier_.current_dtype()).Value(out);
472
+ }
473
+
474
+ // The "bad path": data contains a mix of NumPy scalars and
475
+ // other kinds of scalars. Note this can happen innocuously
476
+ // because numpy.nan is not a NumPy scalar (it's a built-in
477
+ // PyFloat)
478
+
479
+ // TODO(ARROW-5564): Merge together type unification so this
480
+ // hack is not necessary
481
+ switch (numpy_unifier_.current_type_num()) {
482
+ case NPY_BOOL:
483
+ bool_count_ += numpy_dtype_count_;
484
+ break;
485
+ case NPY_INT8:
486
+ case NPY_INT16:
487
+ case NPY_INT32:
488
+ case NPY_INT64:
489
+ case NPY_UINT8:
490
+ case NPY_UINT16:
491
+ case NPY_UINT32:
492
+ case NPY_UINT64:
493
+ int_count_ += numpy_dtype_count_;
494
+ break;
495
+ case NPY_FLOAT32:
496
+ case NPY_FLOAT64:
497
+ float_count_ += numpy_dtype_count_;
498
+ break;
499
+ case NPY_DATETIME:
500
+ return Status::Invalid(
501
+ "numpy.datetime64 scalars cannot be mixed "
502
+ "with other Python scalar values currently");
503
+ }
504
+ }
505
+
506
+ if (list_count_) {
507
+ std::shared_ptr<DataType> value_type;
508
+ RETURN_NOT_OK(list_inferrer_->GetType(&value_type));
509
+ *out = list(value_type);
510
+ } else if (struct_count_) {
511
+ RETURN_NOT_OK(GetStructType(out));
512
+ } else if (decimal_count_) {
513
+ if (max_decimal_metadata_.precision() > Decimal128Type::kMaxPrecision) {
514
+ // the default constructor does not validate the precision and scale
515
+ ARROW_ASSIGN_OR_RAISE(*out,
516
+ Decimal256Type::Make(max_decimal_metadata_.precision(),
517
+ max_decimal_metadata_.scale()));
518
+ } else {
519
+ ARROW_ASSIGN_OR_RAISE(*out,
520
+ Decimal128Type::Make(max_decimal_metadata_.precision(),
521
+ max_decimal_metadata_.scale()));
522
+ }
523
+ } else if (float_count_) {
524
+ // Prioritize floats before integers
525
+ *out = float64();
526
+ } else if (int_count_) {
527
+ *out = int64();
528
+ } else if (date_count_) {
529
+ *out = date32();
530
+ } else if (time_count_) {
531
+ *out = time64(TimeUnit::MICRO);
532
+ } else if (timestamp_micro_count_) {
533
+ *out = timestamp(TimeUnit::MICRO, timezone_);
534
+ } else if (duration_count_) {
535
+ *out = duration(TimeUnit::MICRO);
536
+ } else if (bool_count_) {
537
+ *out = boolean();
538
+ } else if (binary_count_) {
539
+ *out = binary();
540
+ } else if (unicode_count_) {
541
+ *out = utf8();
542
+ } else if (interval_count_) {
543
+ *out = month_day_nano_interval();
544
+ } else if (arrow_scalar_count_) {
545
+ *out = scalar_type_;
546
+ } else {
547
+ *out = null();
548
+ }
549
+ return Status::OK();
550
+ }
551
+
552
+ int64_t total_count() const { return total_count_; }
553
+
554
+ protected:
555
+ Status Validate() const {
556
+ if (list_count_ > 0) {
557
+ if (list_count_ + none_count_ != total_count_) {
558
+ return Status::Invalid("cannot mix list and non-list, non-null values");
559
+ }
560
+ RETURN_NOT_OK(list_inferrer_->Validate());
561
+ } else if (struct_count_ > 0) {
562
+ if (struct_count_ + none_count_ != total_count_) {
563
+ return Status::Invalid("cannot mix struct and non-struct, non-null values");
564
+ }
565
+ for (const auto& it : struct_inferrers_) {
566
+ RETURN_NOT_OK(it.second.Validate());
567
+ }
568
+ }
569
+ return Status::OK();
570
+ }
571
+
572
+ Status VisitArrowScalar(PyObject* obj, bool* keep_going /* unused */) {
573
+ ARROW_ASSIGN_OR_RAISE(auto scalar, arrow::py::unwrap_scalar(obj));
574
+ // Check that all the scalar types for the sequence are the same
575
+ if (arrow_scalar_count_ > 0 && *scalar->type != *scalar_type_) {
576
+ return internal::InvalidValue(obj, "cannot mix scalars with different types");
577
+ }
578
+ scalar_type_ = scalar->type;
579
+ ++arrow_scalar_count_;
580
+ return Status::OK();
581
+ }
582
+
583
+ Status VisitDType(PyArray_Descr* dtype, bool* keep_going) {
584
+ // Continue visiting dtypes for now.
585
+ // TODO(wesm): devise approach for unions
586
+ ++numpy_dtype_count_;
587
+ *keep_going = true;
588
+ return numpy_unifier_.Observe(dtype);
589
+ }
590
+
591
+ Status VisitList(PyObject* obj, bool* keep_going /* unused */) {
592
+ if (!list_inferrer_) {
593
+ list_inferrer_.reset(
594
+ new TypeInferrer(pandas_null_sentinels_, validate_interval_, make_unions_));
595
+ }
596
+ ++list_count_;
597
+ return list_inferrer_->VisitSequence(obj);
598
+ }
599
+
600
+ Status VisitSet(PyObject* obj, bool* keep_going /* unused */) {
601
+ if (!list_inferrer_) {
602
+ list_inferrer_.reset(
603
+ new TypeInferrer(pandas_null_sentinels_, validate_interval_, make_unions_));
604
+ }
605
+ ++list_count_;
606
+ return list_inferrer_->VisitIterable(obj);
607
+ }
608
+
609
+ Status VisitNdarray(PyObject* obj, bool* keep_going) {
610
+ PyArray_Descr* dtype = PyArray_DESCR(reinterpret_cast<PyArrayObject*>(obj));
611
+ if (dtype->type_num == NPY_OBJECT) {
612
+ return VisitList(obj, keep_going);
613
+ }
614
+ // Not an object array: infer child Arrow type from dtype
615
+ if (!list_inferrer_) {
616
+ list_inferrer_.reset(
617
+ new TypeInferrer(pandas_null_sentinels_, validate_interval_, make_unions_));
618
+ }
619
+ ++list_count_;
620
+
621
+ // XXX(wesm): In ARROW-4324 I added accounting to check whether
622
+ // all of the non-null values have NumPy dtypes, but the
623
+ // total_count not being properly incremented here
624
+ ++(*list_inferrer_).total_count_;
625
+ return list_inferrer_->VisitDType(dtype, keep_going);
626
+ }
627
+
628
+ Status VisitDict(PyObject* obj) {
629
+ PyObject* key_obj;
630
+ PyObject* value_obj;
631
+ Py_ssize_t pos = 0;
632
+
633
+ while (PyDict_Next(obj, &pos, &key_obj, &value_obj)) {
634
+ std::string key;
635
+ if (PyUnicode_Check(key_obj)) {
636
+ RETURN_NOT_OK(internal::PyUnicode_AsStdString(key_obj, &key));
637
+ } else if (PyBytes_Check(key_obj)) {
638
+ key = internal::PyBytes_AsStdString(key_obj);
639
+ } else {
640
+ return Status::TypeError("Expected dict key of type str or bytes, got '",
641
+ Py_TYPE(key_obj)->tp_name, "'");
642
+ }
643
+ // Get or create visitor for this key
644
+ auto it = struct_inferrers_.find(key);
645
+ if (it == struct_inferrers_.end()) {
646
+ it = struct_inferrers_
647
+ .insert(
648
+ std::make_pair(key, TypeInferrer(pandas_null_sentinels_,
649
+ validate_interval_, make_unions_)))
650
+ .first;
651
+ }
652
+ TypeInferrer* visitor = &it->second;
653
+
654
+ // We ignore termination signals from child visitors for now
655
+ //
656
+ // TODO(wesm): keep track of whether type inference has terminated for
657
+ // the child visitors to avoid doing unneeded work
658
+ bool keep_going = true;
659
+ RETURN_NOT_OK(visitor->Visit(value_obj, &keep_going));
660
+ }
661
+
662
+ // We do not terminate visiting dicts since we want the union of all
663
+ // observed keys
664
+ ++struct_count_;
665
+ return Status::OK();
666
+ }
667
+
668
+ Status GetStructType(std::shared_ptr<DataType>* out) {
669
+ std::vector<std::shared_ptr<Field>> fields;
670
+ for (auto&& it : struct_inferrers_) {
671
+ std::shared_ptr<DataType> field_type;
672
+ RETURN_NOT_OK(it.second.GetType(&field_type));
673
+ fields.emplace_back(field(it.first, field_type));
674
+ }
675
+ *out = struct_(fields);
676
+ return Status::OK();
677
+ }
678
+
679
+ private:
680
+ bool pandas_null_sentinels_;
681
+ int64_t validate_interval_;
682
+ bool make_unions_;
683
+ int64_t total_count_;
684
+ int64_t none_count_;
685
+ int64_t bool_count_;
686
+ int64_t int_count_;
687
+ int64_t date_count_;
688
+ int64_t time_count_;
689
+ int64_t timestamp_micro_count_;
690
+ std::string timezone_;
691
+ int64_t duration_count_;
692
+ int64_t float_count_;
693
+ int64_t binary_count_;
694
+ int64_t unicode_count_;
695
+ int64_t decimal_count_;
696
+ int64_t list_count_;
697
+ int64_t struct_count_;
698
+ int64_t arrow_scalar_count_;
699
+ int64_t numpy_dtype_count_;
700
+ int64_t interval_count_;
701
+ std::unique_ptr<TypeInferrer> list_inferrer_;
702
+ std::map<std::string, TypeInferrer> struct_inferrers_;
703
+ std::shared_ptr<DataType> scalar_type_;
704
+
705
+ // If we observe a strongly-typed value in e.g. a NumPy array, we can store
706
+ // it here to skip the type counting logic above
707
+ NumPyDtypeUnifier numpy_unifier_;
708
+
709
+ internal::DecimalMetadata max_decimal_metadata_;
710
+
711
+ OwnedRefNoGIL decimal_type_;
712
+ OwnedRefNoGIL interval_types_;
713
+ };
714
+
715
+ // Non-exhaustive type inference
716
+ Result<std::shared_ptr<DataType>> InferArrowType(PyObject* obj, PyObject* mask,
717
+ bool pandas_null_sentinels) {
718
+ if (pandas_null_sentinels) {
719
+ // ARROW-842: If pandas is not installed then null checks will be less
720
+ // comprehensive, but that is okay.
721
+ internal::InitPandasStaticData();
722
+ }
723
+
724
+ std::shared_ptr<DataType> out_type;
725
+ TypeInferrer inferrer(pandas_null_sentinels);
726
+ RETURN_NOT_OK(inferrer.VisitSequence(obj, mask));
727
+ RETURN_NOT_OK(inferrer.GetType(&out_type));
728
+ if (out_type == nullptr) {
729
+ return Status::TypeError("Unable to determine data type");
730
+ } else {
731
+ return std::move(out_type);
732
+ }
733
+ }
734
+
735
+ ARROW_PYTHON_EXPORT
736
+ bool IsPyBool(PyObject* obj) { return internal::PyBoolScalar_Check(obj); }
737
+
738
+ ARROW_PYTHON_EXPORT
739
+ bool IsPyInt(PyObject* obj) { return internal::PyIntScalar_Check(obj); }
740
+
741
+ ARROW_PYTHON_EXPORT
742
+ bool IsPyFloat(PyObject* obj) { return internal::PyFloatScalar_Check(obj); }
743
+
744
+ } // namespace py
745
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/inference.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Functions for converting between CPython built-in data structures and Arrow
19
+ // data structures
20
+
21
+ #pragma once
22
+
23
+ #include "arrow/python/platform.h"
24
+
25
+ #include <memory>
26
+
27
+ #include "arrow/python/visibility.h"
28
+ #include "arrow/type.h"
29
+ #include "arrow/util/macros.h"
30
+
31
+ #include "common.h"
32
+
33
+ namespace arrow {
34
+
35
+ class Array;
36
+ class Status;
37
+
38
+ namespace py {
39
+
40
+ // These functions take a sequence input, not arbitrary iterables
41
+
42
+ /// \brief Infer Arrow type from a Python sequence
43
+ /// \param[in] obj the sequence of values
44
+ /// \param[in] mask an optional mask where True values are null. May
45
+ /// be nullptr
46
+ /// \param[in] pandas_null_sentinels use pandas's null value markers
47
+ ARROW_PYTHON_EXPORT
48
+ Result<std::shared_ptr<arrow::DataType>> InferArrowType(PyObject* obj, PyObject* mask,
49
+ bool pandas_null_sentinels);
50
+
51
+ /// Checks whether the passed Python object is a boolean scalar
52
+ ARROW_PYTHON_EXPORT
53
+ bool IsPyBool(PyObject* obj);
54
+
55
+ /// Checks whether the passed Python object is an integer scalar
56
+ ARROW_PYTHON_EXPORT
57
+ bool IsPyInt(PyObject* obj);
58
+
59
+ /// Checks whether the passed Python object is a float scalar
60
+ ARROW_PYTHON_EXPORT
61
+ bool IsPyFloat(PyObject* obj);
62
+
63
+ } // namespace py
64
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/init.cc ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Trigger the array import (inversion of NO_IMPORT_ARRAY)
19
+ #define NUMPY_IMPORT_ARRAY
20
+
21
+ #include "arrow/python/init.h"
22
+ #include "arrow/python/numpy_interop.h"
23
+
24
+ int arrow_init_numpy() { return arrow::py::import_numpy(); }
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/init.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/python/platform.h"
21
+ #include "arrow/python/visibility.h"
22
+
23
+ extern "C" {
24
+ ARROW_PYTHON_EXPORT
25
+ int arrow_init_numpy();
26
+ }
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/io.cc ADDED
@@ -0,0 +1,384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include "io.h"
19
+
20
+ #include <cstdint>
21
+ #include <cstdlib>
22
+ #include <memory>
23
+ #include <mutex>
24
+ #include <string>
25
+
26
+ #include "arrow/io/memory.h"
27
+ #include "arrow/memory_pool.h"
28
+ #include "arrow/status.h"
29
+ #include "arrow/util/logging.h"
30
+
31
+ #include "arrow/python/common.h"
32
+ #include "arrow/python/pyarrow.h"
33
+
34
+ namespace arrow {
35
+
36
+ using arrow::io::TransformInputStream;
37
+
38
+ namespace py {
39
+
40
+ // ----------------------------------------------------------------------
41
+ // Python file
42
+
43
+ // A common interface to a Python file-like object. Must acquire GIL before
44
+ // calling any methods
45
+ class PythonFile {
46
+ public:
47
+ explicit PythonFile(PyObject* file) : file_(file), checked_read_buffer_(false) {
48
+ Py_INCREF(file);
49
+ }
50
+
51
+ Status CheckClosed() const {
52
+ if (!file_) {
53
+ return Status::Invalid("operation on closed Python file");
54
+ }
55
+ return Status::OK();
56
+ }
57
+
58
+ Status Close() {
59
+ if (file_) {
60
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "close", "()");
61
+ Py_XDECREF(result);
62
+ file_.reset();
63
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
64
+ }
65
+ return Status::OK();
66
+ }
67
+
68
+ Status Abort() {
69
+ file_.reset();
70
+ return Status::OK();
71
+ }
72
+
73
+ bool closed() const {
74
+ if (!file_) {
75
+ return true;
76
+ }
77
+ PyObject* result = PyObject_GetAttrString(file_.obj(), "closed");
78
+ if (result == NULL) {
79
+ // Can't propagate the error, so write it out and return an arbitrary value
80
+ PyErr_WriteUnraisable(NULL);
81
+ return true;
82
+ }
83
+ int ret = PyObject_IsTrue(result);
84
+ Py_XDECREF(result);
85
+ if (ret < 0) {
86
+ PyErr_WriteUnraisable(NULL);
87
+ return true;
88
+ }
89
+ return ret != 0;
90
+ }
91
+
92
+ Status Seek(int64_t position, int whence) {
93
+ RETURN_NOT_OK(CheckClosed());
94
+
95
+ // whence: 0 for relative to start of file, 2 for end of file
96
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "seek", "(ni)",
97
+ static_cast<Py_ssize_t>(position), whence);
98
+ Py_XDECREF(result);
99
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
100
+ return Status::OK();
101
+ }
102
+
103
+ Status Read(int64_t nbytes, PyObject** out) {
104
+ RETURN_NOT_OK(CheckClosed());
105
+
106
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "read", "(n)",
107
+ static_cast<Py_ssize_t>(nbytes));
108
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
109
+ *out = result;
110
+ return Status::OK();
111
+ }
112
+
113
+ Status ReadBuffer(int64_t nbytes, PyObject** out) {
114
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "read_buffer", "(n)",
115
+ static_cast<Py_ssize_t>(nbytes));
116
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
117
+ *out = result;
118
+ return Status::OK();
119
+ }
120
+
121
+ Status Write(const void* data, int64_t nbytes) {
122
+ RETURN_NOT_OK(CheckClosed());
123
+
124
+ // Since the data isn't owned, we have to make a copy
125
+ PyObject* py_data =
126
+ PyBytes_FromStringAndSize(reinterpret_cast<const char*>(data), nbytes);
127
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
128
+
129
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "write", "(O)", py_data);
130
+ Py_XDECREF(py_data);
131
+ Py_XDECREF(result);
132
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
133
+ return Status::OK();
134
+ }
135
+
136
+ Status Write(const std::shared_ptr<Buffer>& buffer) {
137
+ RETURN_NOT_OK(CheckClosed());
138
+
139
+ PyObject* py_data = wrap_buffer(buffer);
140
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
141
+
142
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "write", "(O)", py_data);
143
+ Py_XDECREF(py_data);
144
+ Py_XDECREF(result);
145
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
146
+ return Status::OK();
147
+ }
148
+
149
+ Result<int64_t> Tell() {
150
+ RETURN_NOT_OK(CheckClosed());
151
+
152
+ PyObject* result = cpp_PyObject_CallMethod(file_.obj(), "tell", "()");
153
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
154
+
155
+ int64_t position = PyLong_AsLongLong(result);
156
+ Py_DECREF(result);
157
+
158
+ // PyLong_AsLongLong can raise OverflowError
159
+ PY_RETURN_IF_ERROR(StatusCode::IOError);
160
+ return position;
161
+ }
162
+
163
+ std::mutex& lock() { return lock_; }
164
+
165
+ bool HasReadBuffer() {
166
+ if (!checked_read_buffer_) { // we don't want to check this each time
167
+ has_read_buffer_ = PyObject_HasAttrString(file_.obj(), "read_buffer") == 1;
168
+ checked_read_buffer_ = true;
169
+ }
170
+ return has_read_buffer_;
171
+ }
172
+
173
+ private:
174
+ std::mutex lock_;
175
+ OwnedRefNoGIL file_;
176
+ bool has_read_buffer_;
177
+ bool checked_read_buffer_;
178
+ };
179
+
180
+ // ----------------------------------------------------------------------
181
+ // Seekable input stream
182
+
183
+ PyReadableFile::PyReadableFile(PyObject* file) { file_.reset(new PythonFile(file)); }
184
+
185
+ // The destructor does not close the underlying Python file object, as
186
+ // there may be multiple references to it. Instead let the Python
187
+ // destructor do its job.
188
+ PyReadableFile::~PyReadableFile() {}
189
+
190
+ Status PyReadableFile::Abort() {
191
+ return SafeCallIntoPython([this]() { return file_->Abort(); });
192
+ }
193
+
194
+ Status PyReadableFile::Close() {
195
+ return SafeCallIntoPython([this]() { return file_->Close(); });
196
+ }
197
+
198
+ bool PyReadableFile::closed() const {
199
+ bool res;
200
+ Status st = SafeCallIntoPython([this, &res]() {
201
+ res = file_->closed();
202
+ return Status::OK();
203
+ });
204
+ return res;
205
+ }
206
+
207
+ Status PyReadableFile::Seek(int64_t position) {
208
+ return SafeCallIntoPython([=] { return file_->Seek(position, 0); });
209
+ }
210
+
211
+ Result<int64_t> PyReadableFile::Tell() const {
212
+ return SafeCallIntoPython([=]() -> Result<int64_t> { return file_->Tell(); });
213
+ }
214
+
215
+ Result<int64_t> PyReadableFile::Read(int64_t nbytes, void* out) {
216
+ return SafeCallIntoPython([=]() -> Result<int64_t> {
217
+ OwnedRef bytes;
218
+ RETURN_NOT_OK(file_->Read(nbytes, bytes.ref()));
219
+ PyObject* bytes_obj = bytes.obj();
220
+ DCHECK(bytes_obj != NULL);
221
+
222
+ Py_buffer py_buf;
223
+ if (!PyObject_GetBuffer(bytes_obj, &py_buf, PyBUF_ANY_CONTIGUOUS)) {
224
+ const uint8_t* data = reinterpret_cast<const uint8_t*>(py_buf.buf);
225
+ std::memcpy(out, data, py_buf.len);
226
+ int64_t len = py_buf.len;
227
+ PyBuffer_Release(&py_buf);
228
+ return len;
229
+ } else {
230
+ return Status::TypeError(
231
+ "Python file read() should have returned a bytes object or an object "
232
+ "supporting the buffer protocol, got '",
233
+ Py_TYPE(bytes_obj)->tp_name, "' (did you open the file in binary mode?)");
234
+ }
235
+ });
236
+ }
237
+
238
+ Result<std::shared_ptr<Buffer>> PyReadableFile::Read(int64_t nbytes) {
239
+ return SafeCallIntoPython([=]() -> Result<std::shared_ptr<Buffer>> {
240
+ OwnedRef buffer_obj;
241
+ if (file_->HasReadBuffer()) {
242
+ RETURN_NOT_OK(file_->ReadBuffer(nbytes, buffer_obj.ref()));
243
+ } else {
244
+ RETURN_NOT_OK(file_->Read(nbytes, buffer_obj.ref()));
245
+ }
246
+ DCHECK(buffer_obj.obj() != NULL);
247
+
248
+ return PyBuffer::FromPyObject(buffer_obj.obj());
249
+ });
250
+ }
251
+
252
+ Result<int64_t> PyReadableFile::ReadAt(int64_t position, int64_t nbytes, void* out) {
253
+ std::lock_guard<std::mutex> guard(file_->lock());
254
+ return SafeCallIntoPython([=]() -> Result<int64_t> {
255
+ RETURN_NOT_OK(Seek(position));
256
+ return Read(nbytes, out);
257
+ });
258
+ }
259
+
260
+ Result<std::shared_ptr<Buffer>> PyReadableFile::ReadAt(int64_t position, int64_t nbytes) {
261
+ std::lock_guard<std::mutex> guard(file_->lock());
262
+ return SafeCallIntoPython([=]() -> Result<std::shared_ptr<Buffer>> {
263
+ RETURN_NOT_OK(Seek(position));
264
+ return Read(nbytes);
265
+ });
266
+ }
267
+
268
+ Result<int64_t> PyReadableFile::GetSize() {
269
+ return SafeCallIntoPython([=]() -> Result<int64_t> {
270
+ ARROW_ASSIGN_OR_RAISE(int64_t current_position, file_->Tell());
271
+ RETURN_NOT_OK(file_->Seek(0, 2));
272
+
273
+ ARROW_ASSIGN_OR_RAISE(int64_t file_size, file_->Tell());
274
+ // Restore previous file position
275
+ RETURN_NOT_OK(file_->Seek(current_position, 0));
276
+
277
+ return file_size;
278
+ });
279
+ }
280
+
281
+ // ----------------------------------------------------------------------
282
+ // Output stream
283
+
284
+ PyOutputStream::PyOutputStream(PyObject* file) : position_(0) {
285
+ file_.reset(new PythonFile(file));
286
+ }
287
+
288
+ // The destructor does not close the underlying Python file object, as
289
+ // there may be multiple references to it. Instead let the Python
290
+ // destructor do its job.
291
+ PyOutputStream::~PyOutputStream() {}
292
+
293
+ Status PyOutputStream::Abort() {
294
+ return SafeCallIntoPython([=]() { return file_->Abort(); });
295
+ }
296
+
297
+ Status PyOutputStream::Close() {
298
+ return SafeCallIntoPython([=]() { return file_->Close(); });
299
+ }
300
+
301
+ bool PyOutputStream::closed() const {
302
+ bool res;
303
+ Status st = SafeCallIntoPython([this, &res]() {
304
+ res = file_->closed();
305
+ return Status::OK();
306
+ });
307
+ return res;
308
+ }
309
+
310
+ Result<int64_t> PyOutputStream::Tell() const { return position_; }
311
+
312
+ Status PyOutputStream::Write(const void* data, int64_t nbytes) {
313
+ return SafeCallIntoPython([=]() {
314
+ position_ += nbytes;
315
+ return file_->Write(data, nbytes);
316
+ });
317
+ }
318
+
319
+ Status PyOutputStream::Write(const std::shared_ptr<Buffer>& buffer) {
320
+ return SafeCallIntoPython([=]() {
321
+ position_ += buffer->size();
322
+ return file_->Write(buffer);
323
+ });
324
+ }
325
+
326
+ // ----------------------------------------------------------------------
327
+ // Foreign buffer
328
+
329
+ Status PyForeignBuffer::Make(const uint8_t* data, int64_t size, PyObject* base,
330
+ std::shared_ptr<Buffer>* out) {
331
+ PyForeignBuffer* buf = new PyForeignBuffer(data, size, base);
332
+ if (buf == NULL) {
333
+ return Status::OutOfMemory("could not allocate foreign buffer object");
334
+ } else {
335
+ *out = std::shared_ptr<Buffer>(buf);
336
+ return Status::OK();
337
+ }
338
+ }
339
+
340
+ // ----------------------------------------------------------------------
341
+ // TransformInputStream::TransformFunc wrapper
342
+
343
+ struct TransformFunctionWrapper {
344
+ TransformFunctionWrapper(TransformCallback cb, PyObject* arg)
345
+ : cb_(std::move(cb)), arg_(std::make_shared<OwnedRefNoGIL>(arg)) {
346
+ Py_INCREF(arg);
347
+ }
348
+
349
+ Result<std::shared_ptr<Buffer>> operator()(const std::shared_ptr<Buffer>& src) {
350
+ return SafeCallIntoPython([=]() -> Result<std::shared_ptr<Buffer>> {
351
+ std::shared_ptr<Buffer> dest;
352
+ cb_(arg_->obj(), src, &dest);
353
+ RETURN_NOT_OK(CheckPyError());
354
+ return dest;
355
+ });
356
+ }
357
+
358
+ protected:
359
+ // Need to wrap OwnedRefNoGIL because std::function needs the callable
360
+ // to be copy-constructible...
361
+ TransformCallback cb_;
362
+ std::shared_ptr<OwnedRefNoGIL> arg_;
363
+ };
364
+
365
+ std::shared_ptr<::arrow::io::InputStream> MakeTransformInputStream(
366
+ std::shared_ptr<::arrow::io::InputStream> wrapped, TransformInputStreamVTable vtable,
367
+ PyObject* handler) {
368
+ TransformInputStream::TransformFunc transform(
369
+ TransformFunctionWrapper{std::move(vtable.transform), handler});
370
+ return std::make_shared<TransformInputStream>(std::move(wrapped), std::move(transform));
371
+ }
372
+
373
+ std::shared_ptr<StreamWrapFunc> MakeStreamTransformFunc(TransformInputStreamVTable vtable,
374
+ PyObject* handler) {
375
+ TransformInputStream::TransformFunc transform(
376
+ TransformFunctionWrapper{std::move(vtable.transform), handler});
377
+ StreamWrapFunc func = [transform](std::shared_ptr<::arrow::io::InputStream> wrapped) {
378
+ return std::make_shared<TransformInputStream>(wrapped, transform);
379
+ };
380
+ return std::make_shared<StreamWrapFunc>(func);
381
+ }
382
+
383
+ } // namespace py
384
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/io.h ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+
22
+ #include "arrow/io/interfaces.h"
23
+ #include "arrow/io/transform.h"
24
+
25
+ #include "arrow/python/common.h"
26
+ #include "arrow/python/visibility.h"
27
+
28
+ namespace arrow {
29
+ namespace py {
30
+
31
+ class ARROW_NO_EXPORT PythonFile;
32
+
33
+ class ARROW_PYTHON_EXPORT PyReadableFile : public io::RandomAccessFile {
34
+ public:
35
+ explicit PyReadableFile(PyObject* file);
36
+ ~PyReadableFile() override;
37
+
38
+ Status Close() override;
39
+ Status Abort() override;
40
+ bool closed() const override;
41
+
42
+ Result<int64_t> Read(int64_t nbytes, void* out) override;
43
+ Result<std::shared_ptr<Buffer>> Read(int64_t nbytes) override;
44
+
45
+ // Thread-safe version
46
+ Result<int64_t> ReadAt(int64_t position, int64_t nbytes, void* out) override;
47
+
48
+ // Thread-safe version
49
+ Result<std::shared_ptr<Buffer>> ReadAt(int64_t position, int64_t nbytes) override;
50
+
51
+ Result<int64_t> GetSize() override;
52
+
53
+ Status Seek(int64_t position) override;
54
+
55
+ Result<int64_t> Tell() const override;
56
+
57
+ private:
58
+ std::unique_ptr<PythonFile> file_;
59
+ };
60
+
61
+ class ARROW_PYTHON_EXPORT PyOutputStream : public io::OutputStream {
62
+ public:
63
+ explicit PyOutputStream(PyObject* file);
64
+ ~PyOutputStream() override;
65
+
66
+ Status Close() override;
67
+ Status Abort() override;
68
+ bool closed() const override;
69
+ Result<int64_t> Tell() const override;
70
+ Status Write(const void* data, int64_t nbytes) override;
71
+ Status Write(const std::shared_ptr<Buffer>& buffer) override;
72
+
73
+ private:
74
+ std::unique_ptr<PythonFile> file_;
75
+ int64_t position_;
76
+ };
77
+
78
+ // TODO(wesm): seekable output files
79
+
80
+ // A Buffer subclass that keeps a PyObject reference throughout its
81
+ // lifetime, such that the Python object is kept alive as long as the
82
+ // C++ buffer is still needed.
83
+ // Keeping the reference in a Python wrapper would be incorrect as
84
+ // the Python wrapper can get destroyed even though the wrapped C++
85
+ // buffer is still alive (ARROW-2270).
86
+ class ARROW_PYTHON_EXPORT PyForeignBuffer : public Buffer {
87
+ public:
88
+ static Status Make(const uint8_t* data, int64_t size, PyObject* base,
89
+ std::shared_ptr<Buffer>* out);
90
+
91
+ private:
92
+ PyForeignBuffer(const uint8_t* data, int64_t size, PyObject* base)
93
+ : Buffer(data, size) {
94
+ Py_INCREF(base);
95
+ base_.reset(base);
96
+ }
97
+
98
+ OwnedRefNoGIL base_;
99
+ };
100
+
101
+ // All this rigamarole because Cython is really poor with std::function<>
102
+
103
+ using TransformCallback = std::function<void(
104
+ PyObject*, const std::shared_ptr<Buffer>& src, std::shared_ptr<Buffer>* out)>;
105
+
106
+ struct TransformInputStreamVTable {
107
+ TransformCallback transform;
108
+ };
109
+
110
+ ARROW_PYTHON_EXPORT
111
+ std::shared_ptr<::arrow::io::InputStream> MakeTransformInputStream(
112
+ std::shared_ptr<::arrow::io::InputStream> wrapped, TransformInputStreamVTable vtable,
113
+ PyObject* arg);
114
+
115
+ using StreamWrapFunc = std::function<Result<std::shared_ptr<io::InputStream>>(
116
+ std::shared_ptr<io::InputStream>)>;
117
+ ARROW_PYTHON_EXPORT
118
+ std::shared_ptr<StreamWrapFunc> MakeStreamTransformFunc(TransformInputStreamVTable vtable,
119
+ PyObject* handler);
120
+ } // namespace py
121
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/src/arrow/python/ipc.cc ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include "ipc.h"
19
+
20
+ #include <memory>
21
+
22
+ #include "arrow/python/pyarrow.h"
23
+
24
+ namespace arrow {
25
+ namespace py {
26
+
27
+ PyRecordBatchReader::PyRecordBatchReader() {}
28
+
29
+ Status PyRecordBatchReader::Init(std::shared_ptr<Schema> schema, PyObject* iterable) {
30
+ schema_ = std::move(schema);
31
+
32
+ iterator_.reset(PyObject_GetIter(iterable));
33
+ return CheckPyError();
34
+ }
35
+
36
+ std::shared_ptr<Schema> PyRecordBatchReader::schema() const { return schema_; }
37
+
38
+ Status PyRecordBatchReader::ReadNext(std::shared_ptr<RecordBatch>* batch) {
39
+ PyAcquireGIL lock;
40
+
41
+ if (!iterator_) {
42
+ // End of stream
43
+ batch->reset();
44
+ return Status::OK();
45
+ }
46
+
47
+ OwnedRef py_batch(PyIter_Next(iterator_.obj()));
48
+ if (!py_batch) {
49
+ RETURN_IF_PYERROR();
50
+ // End of stream
51
+ batch->reset();
52
+ iterator_.reset();
53
+ return Status::OK();
54
+ }
55
+
56
+ return unwrap_batch(py_batch.obj()).Value(batch);
57
+ }
58
+
59
+ Result<std::shared_ptr<RecordBatchReader>> PyRecordBatchReader::Make(
60
+ std::shared_ptr<Schema> schema, PyObject* iterable) {
61
+ auto reader = std::shared_ptr<PyRecordBatchReader>(new PyRecordBatchReader());
62
+ RETURN_NOT_OK(reader->Init(std::move(schema), iterable));
63
+ return reader;
64
+ }
65
+
66
+ } // namespace py
67
+ } // namespace arrow