applied-ai-018 commited on
Commit
e0df13e
·
verified ·
1 Parent(s): 0468ab7

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +5 -0
  2. env-llmeval/lib/python3.10/site-packages/pyarrow/_compute.cpython-310-x86_64-linux-gnu.so +3 -0
  3. env-llmeval/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so +3 -0
  4. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/api.h +22 -0
  5. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/pch.h +23 -0
  6. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/api.h +26 -0
  7. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/extension_set.h +477 -0
  8. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/extension_types.h +80 -0
  9. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/options.h +135 -0
  10. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/relation.h +71 -0
  11. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/serde.h +331 -0
  12. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/test_plan_builder.h +76 -0
  13. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/test_util.h +45 -0
  14. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/type_fwd.h +32 -0
  15. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/util.h +83 -0
  16. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/visibility.h +52 -0
  17. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/arrow_to_pandas.h +146 -0
  18. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/decimal.h +128 -0
  19. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/extension_type.h +85 -0
  20. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/init.h +26 -0
  21. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_convert.h +122 -0
  22. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_to_arrow.h +80 -0
  23. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/visibility.h +39 -0
  24. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h +33 -0
  25. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h +221 -0
  26. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator_fwd.h +71 -0
  27. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/base64.h +35 -0
  28. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/benchmark_util.h +211 -0
  29. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h +95 -0
  30. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_run_reader.h +515 -0
  31. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_util.h +370 -0
  32. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h +466 -0
  33. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h +43 -0
  34. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h +112 -0
  35. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_ops.h +244 -0
  36. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_writer.h +286 -0
  37. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h +34 -0
  38. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking64_default.h +0 -0
  39. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/checked_cast.h +61 -0
  40. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h +241 -0
  41. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h +411 -0
  42. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/crc32.h +36 -0
  43. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/debug.h +29 -0
  44. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/delimiting.h +181 -0
  45. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/dispatch.h +115 -0
  46. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/endian.h +245 -0
  47. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/functional.h +160 -0
  48. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/hashing.h +944 -0
  49. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util.h +137 -0
  50. env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h +420 -0
.gitattributes CHANGED
@@ -176,3 +176,8 @@ env-llmeval/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.cpython-310
176
  env-llmeval/lib/python3.10/site-packages/pandas/_libs/algos.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
177
  env-llmeval/lib/python3.10/site-packages/pandas/_libs/groupby.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
178
  env-llmeval/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
176
  env-llmeval/lib/python3.10/site-packages/pandas/_libs/algos.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
177
  env-llmeval/lib/python3.10/site-packages/pandas/_libs/groupby.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
178
  env-llmeval/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
179
+ env-llmeval/lib/python3.10/site-packages/pyarrow/_compute.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
180
+ env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_python.so filter=lfs diff=lfs merge=lfs -text
181
+ env-llmeval/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1500 filter=lfs diff=lfs merge=lfs -text
182
+ env-llmeval/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
183
+ env-llmeval/lib/python3.10/site-packages/pyarrow/libparquet.so.1500 filter=lfs diff=lfs merge=lfs -text
env-llmeval/lib/python3.10/site-packages/pyarrow/_compute.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67b17e479891bf5d919a8ca7ce7ca0535b8858e8e58bcb1fbd154778451aa7cd
3
+ size 1369152
env-llmeval/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3370617840e9ef04c74cd6f7e140d291a6709ba4e68e9c677b0306fbf92b431
3
+ size 1328288
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/api.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include "arrow/engine/substrait/api.h"
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/pch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Often-used headers, for precompiling.
19
+ // If updating this header, please make sure you check compilation speed
20
+ // before checking in. Adding headers which are not used extremely often
21
+ // may incur a slowdown, since it makes the precompiled header heavier to load.
22
+
23
+ #include "arrow/pch.h"
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/api.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include "arrow/engine/substrait/extension_set.h"
23
+ #include "arrow/engine/substrait/extension_types.h"
24
+ #include "arrow/engine/substrait/options.h"
25
+ #include "arrow/engine/substrait/relation.h"
26
+ #include "arrow/engine/substrait/serde.h"
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/extension_set.h ADDED
@@ -0,0 +1,477 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <cstddef>
23
+ #include <cstdint>
24
+ #include <functional>
25
+ #include <memory>
26
+ #include <optional>
27
+ #include <string>
28
+ #include <string_view>
29
+ #include <unordered_map>
30
+ #include <utility>
31
+ #include <vector>
32
+
33
+ #include "arrow/compute/api_aggregate.h"
34
+ #include "arrow/compute/expression.h"
35
+ #include "arrow/engine/substrait/type_fwd.h"
36
+ #include "arrow/engine/substrait/visibility.h"
37
+ #include "arrow/result.h"
38
+ #include "arrow/status.h"
39
+ #include "arrow/type_fwd.h"
40
+ #include "arrow/util/macros.h"
41
+
42
+ namespace arrow {
43
+ namespace engine {
44
+
45
+ constexpr const char* kSubstraitArithmeticFunctionsUri =
46
+ "https://github.com/substrait-io/substrait/blob/main/extensions/"
47
+ "functions_arithmetic.yaml";
48
+ constexpr const char* kSubstraitBooleanFunctionsUri =
49
+ "https://github.com/substrait-io/substrait/blob/main/extensions/"
50
+ "functions_boolean.yaml";
51
+ constexpr const char* kSubstraitComparisonFunctionsUri =
52
+ "https://github.com/substrait-io/substrait/blob/main/extensions/"
53
+ "functions_comparison.yaml";
54
+ constexpr const char* kSubstraitDatetimeFunctionsUri =
55
+ "https://github.com/substrait-io/substrait/blob/main/extensions/"
56
+ "functions_datetime.yaml";
57
+ constexpr const char* kSubstraitLogarithmicFunctionsUri =
58
+ "https://github.com/substrait-io/substrait/blob/main/extensions/"
59
+ "functions_logarithmic.yaml";
60
+ constexpr const char* kSubstraitRoundingFunctionsUri =
61
+ "https://github.com/substrait-io/substrait/blob/main/extensions/"
62
+ "functions_rounding.yaml";
63
+ constexpr const char* kSubstraitStringFunctionsUri =
64
+ "https://github.com/substrait-io/substrait/blob/main/extensions/"
65
+ "functions_string.yaml";
66
+ constexpr const char* kSubstraitAggregateGenericFunctionsUri =
67
+ "https://github.com/substrait-io/substrait/blob/main/extensions/"
68
+ "functions_aggregate_generic.yaml";
69
+
70
+ /// If a function call contains this URI then the function is looked up
71
+ /// in the registry directly, all arguments are mapped as value arguments,
72
+ /// and any options are ignored.
73
+ constexpr const char* kArrowSimpleExtensionFunctionsUri =
74
+ "urn:arrow:substrait_simple_extension_function";
75
+
76
+ struct ARROW_ENGINE_EXPORT Id {
77
+ std::string_view uri, name;
78
+ bool empty() const { return uri.empty() && name.empty(); }
79
+ std::string ToString() const;
80
+ };
81
+ struct ARROW_ENGINE_EXPORT IdHashEq {
82
+ size_t operator()(Id id) const;
83
+ bool operator()(Id l, Id r) const;
84
+ };
85
+
86
+ /// \brief Owning storage for ids
87
+ ///
88
+ /// Substrait plans may reuse URIs and names in many places. For convenience
89
+ /// and performance Substrait ids are typically passed around as views. As we
90
+ /// convert a plan from Substrait to Arrow we need to copy these strings out of
91
+ /// the Substrait buffer and into owned storage. This class serves as that owned
92
+ /// storage.
93
+ class ARROW_ENGINE_EXPORT IdStorage {
94
+ public:
95
+ virtual ~IdStorage() = default;
96
+ /// \brief Get an equivalent id pointing into this storage
97
+ ///
98
+ /// This operation will copy the ids into storage if they do not already exist
99
+ virtual Id Emplace(Id id) = 0;
100
+ /// \brief Get an equivalent view pointing into this storage for a URI
101
+ ///
102
+ /// If no URI is found then the uri will be copied into storage
103
+ virtual std::string_view EmplaceUri(std::string_view uri) = 0;
104
+ /// \brief Get an equivalent id pointing into this storage
105
+ ///
106
+ /// If no id is found then nullopt will be returned
107
+ virtual std::optional<Id> Find(Id id) const = 0;
108
+ /// \brief Get an equivalent view pointing into this storage for a URI
109
+ ///
110
+ /// If no URI is found then nullopt will be returned
111
+ virtual std::optional<std::string_view> FindUri(std::string_view uri) const = 0;
112
+
113
+ static std::unique_ptr<IdStorage> Make();
114
+ };
115
+
116
+ /// \brief Describes a Substrait call
117
+ ///
118
+ /// Substrait call expressions contain a list of arguments which can either
119
+ /// be enum arguments (which are serialized as strings), value arguments (which)
120
+ /// are Arrow expressions, or type arguments (not yet implemented)
121
+ class ARROW_ENGINE_EXPORT SubstraitCall {
122
+ public:
123
+ SubstraitCall(Id id, std::shared_ptr<DataType> output_type, bool output_nullable,
124
+ bool is_hash = false)
125
+ : id_(id),
126
+ output_type_(std::move(output_type)),
127
+ output_nullable_(output_nullable),
128
+ is_hash_(is_hash) {}
129
+
130
+ const Id& id() const { return id_; }
131
+ const std::shared_ptr<DataType>& output_type() const { return output_type_; }
132
+ bool output_nullable() const { return output_nullable_; }
133
+ bool is_hash() const { return is_hash_; }
134
+ const std::unordered_map<std::string, std::vector<std::string>>& options() const {
135
+ return options_;
136
+ }
137
+
138
+ bool HasEnumArg(int index) const;
139
+ Result<std::string_view> GetEnumArg(int index) const;
140
+ void SetEnumArg(int index, std::string enum_arg);
141
+ Result<compute::Expression> GetValueArg(int index) const;
142
+ bool HasValueArg(int index) const;
143
+ void SetValueArg(int index, compute::Expression value_arg);
144
+ std::optional<std::vector<std::string> const*> GetOption(
145
+ std::string_view option_name) const;
146
+ void SetOption(std::string_view option_name,
147
+ const std::vector<std::string_view>& option_preferences);
148
+ bool HasOptions() const;
149
+ int size() const { return size_; }
150
+
151
+ private:
152
+ Id id_;
153
+ std::shared_ptr<DataType> output_type_;
154
+ bool output_nullable_;
155
+ // Only needed when converting from Substrait -> Arrow aggregates. The
156
+ // Arrow function name depends on whether or not there are any groups
157
+ bool is_hash_;
158
+ std::unordered_map<int, std::string> enum_args_;
159
+ std::unordered_map<int, compute::Expression> value_args_;
160
+ std::unordered_map<std::string, std::vector<std::string>> options_;
161
+ int size_ = 0;
162
+ };
163
+
164
+ /// Substrait identifies functions and custom data types using a (uri, name) pair.
165
+ ///
166
+ /// This registry is a bidirectional mapping between Substrait IDs and their
167
+ /// corresponding Arrow counterparts (arrow::DataType and function names in a function
168
+ /// registry)
169
+ ///
170
+ /// Substrait extension types and variations must be registered with their
171
+ /// corresponding arrow::DataType before they can be used!
172
+ ///
173
+ /// Conceptually this can be thought of as two pairs of `unordered_map`s. One pair to
174
+ /// go back and forth between Substrait ID and arrow::DataType and another pair to go
175
+ /// back and forth between Substrait ID and Arrow function names.
176
+ ///
177
+ /// Unlike an ExtensionSet this registry is not created automatically when consuming
178
+ /// Substrait plans and must be configured ahead of time (although there is a default
179
+ /// instance).
180
+ class ARROW_ENGINE_EXPORT ExtensionIdRegistry {
181
+ public:
182
+ using ArrowToSubstraitCall =
183
+ std::function<Result<SubstraitCall>(const arrow::compute::Expression::Call&)>;
184
+ using SubstraitCallToArrow =
185
+ std::function<Result<arrow::compute::Expression>(const SubstraitCall&)>;
186
+ using ArrowToSubstraitAggregate =
187
+ std::function<Result<SubstraitCall>(const arrow::compute::Aggregate&)>;
188
+ using SubstraitAggregateToArrow =
189
+ std::function<Result<arrow::compute::Aggregate>(const SubstraitCall&)>;
190
+
191
+ /// \brief A mapping between a Substrait ID and an arrow::DataType
192
+ struct TypeRecord {
193
+ Id id;
194
+ const std::shared_ptr<DataType>& type;
195
+ };
196
+
197
+ /// \brief Return a uri view owned by this registry
198
+ ///
199
+ /// If the URI has never been emplaced it will return nullopt
200
+ virtual std::optional<std::string_view> FindUri(std::string_view uri) const = 0;
201
+ /// \brief Return a id view owned by this registry
202
+ ///
203
+ /// If the id has never been emplaced it will return nullopt
204
+ virtual std::optional<Id> FindId(Id id) const = 0;
205
+ virtual std::optional<TypeRecord> GetType(const DataType&) const = 0;
206
+ virtual std::optional<TypeRecord> GetType(Id) const = 0;
207
+ virtual Status CanRegisterType(Id, const std::shared_ptr<DataType>& type) const = 0;
208
+ virtual Status RegisterType(Id, std::shared_ptr<DataType>) = 0;
209
+ /// \brief Register a converter that converts an Arrow call to a Substrait call
210
+ ///
211
+ /// Note that there may not be 1:1 parity between ArrowToSubstraitCall and
212
+ /// SubstraitCallToArrow because some standard functions (e.g. add) may map to
213
+ /// multiple Arrow functions (e.g. add, add_checked)
214
+ virtual Status AddArrowToSubstraitCall(std::string arrow_function_name,
215
+ ArrowToSubstraitCall conversion_func) = 0;
216
+ /// \brief Check to see if a converter can be registered
217
+ ///
218
+ /// \return Status::OK if there are no conflicts, otherwise an error is returned
219
+ virtual Status CanAddArrowToSubstraitCall(
220
+ const std::string& arrow_function_name) const = 0;
221
+
222
+ /// \brief Register a converter that converts an Arrow aggregate to a Substrait
223
+ /// aggregate
224
+ virtual Status AddArrowToSubstraitAggregate(
225
+ std::string arrow_function_name, ArrowToSubstraitAggregate conversion_func) = 0;
226
+ /// \brief Check to see if a converter can be registered
227
+ ///
228
+ /// \return Status::OK if there are no conflicts, otherwise an error is returned
229
+ virtual Status CanAddArrowToSubstraitAggregate(
230
+ const std::string& arrow_function_name) const = 0;
231
+
232
+ /// \brief Register a converter that converts a Substrait call to an Arrow call
233
+ virtual Status AddSubstraitCallToArrow(Id substrait_function_id,
234
+ SubstraitCallToArrow conversion_func) = 0;
235
+ /// \brief Check to see if a converter can be registered
236
+ ///
237
+ /// \return Status::OK if there are no conflicts, otherwise an error is returned
238
+ virtual Status CanAddSubstraitCallToArrow(Id substrait_function_id) const = 0;
239
+ /// \brief Register a simple mapping function
240
+ ///
241
+ /// All calls to the function must pass only value arguments. The arguments
242
+ /// will be converted to expressions and passed to the Arrow function
243
+ virtual Status AddSubstraitCallToArrow(Id substrait_function_id,
244
+ std::string arrow_function_name) = 0;
245
+
246
+ /// \brief Register a converter that converts a Substrait aggregate to an Arrow
247
+ /// aggregate
248
+ virtual Status AddSubstraitAggregateToArrow(
249
+ Id substrait_function_id, SubstraitAggregateToArrow conversion_func) = 0;
250
+ /// \brief Check to see if a converter can be registered
251
+ ///
252
+ /// \return Status::OK if there are no conflicts, otherwise an error is returned
253
+ virtual Status CanAddSubstraitAggregateToArrow(Id substrait_function_id) const = 0;
254
+
255
+ /// \brief Return a list of Substrait functions that have a converter
256
+ ///
257
+ /// The function ids are encoded as strings using the pattern {uri}#{name}
258
+ virtual std::vector<std::string> GetSupportedSubstraitFunctions() const = 0;
259
+
260
+ /// \brief Find a converter to map Arrow calls to Substrait calls
261
+ /// \return A converter function or an invalid status if no converter is registered
262
+ virtual Result<ArrowToSubstraitCall> GetArrowToSubstraitCall(
263
+ const std::string& arrow_function_name) const = 0;
264
+
265
+ /// \brief Find a converter to map Arrow aggregates to Substrait aggregates
266
+ /// \return A converter function or an invalid status if no converter is registered
267
+ virtual Result<ArrowToSubstraitAggregate> GetArrowToSubstraitAggregate(
268
+ const std::string& arrow_function_name) const = 0;
269
+
270
+ /// \brief Find a converter to map a Substrait aggregate to an Arrow aggregate
271
+ /// \return A converter function or an invalid status if no converter is registered
272
+ virtual Result<SubstraitAggregateToArrow> GetSubstraitAggregateToArrow(
273
+ Id substrait_function_id) const = 0;
274
+
275
+ /// \brief Find a converter to map a Substrait call to an Arrow call
276
+ /// \return A converter function or an invalid status if no converter is registered
277
+ virtual Result<SubstraitCallToArrow> GetSubstraitCallToArrow(
278
+ Id substrait_function_id) const = 0;
279
+
280
+ /// \brief Similar to \see GetSubstraitCallToArrow but only uses the name
281
+ ///
282
+ /// There may be multiple functions with the same name and this will return
283
+ /// the first. This is slower than GetSubstraitCallToArrow and should only
284
+ /// be used when the plan does not include a URI (or the URI is "/")
285
+ virtual Result<SubstraitCallToArrow> GetSubstraitCallToArrowFallback(
286
+ std::string_view function_name) const = 0;
287
+
288
+ /// \brief Similar to \see GetSubstraitAggregateToArrow but only uses the name
289
+ ///
290
+ /// \see GetSubstraitCallToArrowFallback for details on the fallback behavior
291
+ virtual Result<SubstraitAggregateToArrow> GetSubstraitAggregateToArrowFallback(
292
+ std::string_view function_name) const = 0;
293
+ };
294
+
295
+ constexpr std::string_view kArrowExtTypesUri =
296
+ "https://github.com/apache/arrow/blob/main/format/substrait/"
297
+ "extension_types.yaml";
298
+
299
+ /// A default registry with all supported functions and data types registered
300
+ ///
301
+ /// Note: Function support is currently very minimal, see ARROW-15538
302
+ ARROW_ENGINE_EXPORT ExtensionIdRegistry* default_extension_id_registry();
303
+
304
+ /// \brief Make a nested registry with a given parent.
305
+ ///
306
+ /// A nested registry supports registering types and functions other and on top of those
307
+ /// already registered in its parent registry. No conflicts in IDs and names used for
308
+ /// lookup are allowed. Normally, the given parent is the default registry.
309
+ ///
310
+ /// One use case for a nested registry is for dynamic registration of functions defined
311
+ /// within a Substrait plan while keeping these registrations specific to the plan. When
312
+ /// the Substrait plan is disposed of, normally after its execution, the nested registry
313
+ /// can be disposed of as well.
314
+ ARROW_ENGINE_EXPORT std::shared_ptr<ExtensionIdRegistry> nested_extension_id_registry(
315
+ const ExtensionIdRegistry* parent);
316
+
317
+ /// \brief A set of extensions used within a plan
318
+ ///
319
+ /// Each time an extension is used within a Substrait plan the extension
320
+ /// must be included in an extension set that is defined at the root of the
321
+ /// plan.
322
+ ///
323
+ /// The plan refers to a specific extension using an "anchor" which is an
324
+ /// arbitrary integer invented by the producer that has no meaning beyond a
325
+ /// plan but which should be consistent within a plan.
326
+ ///
327
+ /// To support serialization and deserialization this type serves as a
328
+ /// bidirectional map between Substrait ID and "anchor"s.
329
+ ///
330
+ /// When deserializing a Substrait plan the extension set should be extracted
331
+ /// after the plan has been converted from Protobuf and before the plan
332
+ /// is converted to an execution plan.
333
+ ///
334
+ /// The extension set can be kept and reused during serialization if a perfect
335
+ /// round trip is required. If serialization is not needed or round tripping
336
+ /// is not required then the extension set can be safely discarded after the
337
+ /// plan has been converted into an execution plan.
338
+ ///
339
+ /// When converting an execution plan into a Substrait plan an extension set
340
+ /// can be automatically generated or a previously generated extension set can
341
+ /// be used.
342
+ ///
343
+ /// ExtensionSet does not own strings; it only refers to strings in an
344
+ /// ExtensionIdRegistry.
345
+ class ARROW_ENGINE_EXPORT ExtensionSet {
346
+ public:
347
+ struct FunctionRecord {
348
+ Id id;
349
+ std::string_view name;
350
+ };
351
+
352
+ struct TypeRecord {
353
+ Id id;
354
+ std::shared_ptr<DataType> type;
355
+ };
356
+
357
+ /// Construct an empty ExtensionSet to be populated during serialization.
358
+ explicit ExtensionSet(const ExtensionIdRegistry* = default_extension_id_registry());
359
+ ARROW_DEFAULT_MOVE_AND_ASSIGN(ExtensionSet);
360
+
361
+ /// Construct an ExtensionSet with explicit extension ids for efficient referencing
362
+ /// during deserialization. Note that input vectors need not be densely packed; an empty
363
+ /// (default constructed) Id may be used as a placeholder to indicate an unused
364
+ /// _anchor/_reference. This factory will be used to wrap the extensions declared in a
365
+ /// substrait::Plan before deserializing the plan's relations.
366
+ ///
367
+ /// Views will be replaced with equivalent views pointing to memory owned by the
368
+ /// registry.
369
+ ///
370
+ /// Note: This is an advanced operation. The order of the ids, types, and functions
371
+ /// must match the anchor numbers chosen for a plan.
372
+ ///
373
+ /// An extension set should instead be created using
374
+ /// arrow::engine::GetExtensionSetFromPlan
375
+ static Result<ExtensionSet> Make(
376
+ std::unordered_map<uint32_t, std::string_view> uris,
377
+ std::unordered_map<uint32_t, Id> type_ids,
378
+ std::unordered_map<uint32_t, Id> function_ids,
379
+ const ConversionOptions& conversion_options,
380
+ const ExtensionIdRegistry* = default_extension_id_registry());
381
+
382
+ const std::unordered_map<uint32_t, std::string_view>& uris() const { return uris_; }
383
+
384
+ /// \brief Returns a data type given an anchor
385
+ ///
386
+ /// This is used when converting a Substrait plan to an Arrow execution plan.
387
+ ///
388
+ /// If the anchor does not exist in this extension set an error will be returned.
389
+ Result<TypeRecord> DecodeType(uint32_t anchor) const;
390
+
391
+ /// \brief Returns the number of custom type records in this extension set
392
+ ///
393
+ /// Note: the types are currently stored as a sparse vector, so this may return a value
394
+ /// larger than the actual number of types. This behavior may change in the future; see
395
+ /// ARROW-15583.
396
+ std::size_t num_types() const { return types_.size(); }
397
+
398
+ /// \brief Lookup the anchor for a given type
399
+ ///
400
+ /// This operation is used when converting an Arrow execution plan to a Substrait plan.
401
+ /// If the type has been previously encoded then the same anchor value will returned.
402
+ ///
403
+ /// If the type has not been previously encoded then a new anchor value will be created.
404
+ ///
405
+ /// If the type does not exist in the extension id registry then an error will be
406
+ /// returned.
407
+ ///
408
+ /// \return An anchor that can be used to refer to the type within a plan
409
+ Result<uint32_t> EncodeType(const DataType& type);
410
+
411
+ /// \brief Return a function id given an anchor
412
+ ///
413
+ /// This is used when converting a Substrait plan to an Arrow execution plan.
414
+ ///
415
+ /// If the anchor does not exist in this extension set an error will be returned.
416
+ Result<Id> DecodeFunction(uint32_t anchor) const;
417
+
418
+ /// \brief Lookup the anchor for a given function
419
+ ///
420
+ /// This operation is used when converting an Arrow execution plan to a Substrait plan.
421
+ /// If the function has been previously encoded then the same anchor value will be
422
+ /// returned.
423
+ ///
424
+ /// If the function has not been previously encoded then a new anchor value will be
425
+ /// created.
426
+ ///
427
+ /// If the function name is not in the extension id registry then an error will be
428
+ /// returned.
429
+ ///
430
+ /// \return An anchor that can be used to refer to the function within a plan
431
+ Result<uint32_t> EncodeFunction(Id function_id);
432
+
433
+ /// \brief Stores a plan-specific id that is not known to the registry
434
+ ///
435
+ /// This is used when converting an Arrow execution plan to a Substrait plan.
436
+ ///
437
+ /// If the function is a UDF, something that wasn't known to the registry,
438
+ /// then we need long term storage of the function name (the ids are just
439
+ /// views)
440
+ Id RegisterPlanSpecificId(Id id);
441
+
442
+ /// \brief Return the number of custom functions in this extension set
443
+ std::size_t num_functions() const { return functions_.size(); }
444
+
445
+ const ExtensionIdRegistry* registry() const { return registry_; }
446
+
447
+ private:
448
+ const ExtensionIdRegistry* registry_;
449
+ // If the registry is not aware of an id then we probably can't do anything
450
+ // with it. However, in some cases, these may represent extensions or features
451
+ // that we can safely ignore. For example, we can usually safely ignore
452
+ // extension type variations if we assume the plan is valid. These ignorable
453
+ // ids are stored here.
454
+ std::unique_ptr<IdStorage> plan_specific_ids_ = IdStorage::Make();
455
+
456
+ // Map from anchor values to URI values referenced by this extension set
457
+ std::unordered_map<uint32_t, std::string_view> uris_;
458
+ // Map from anchor values to type definitions, used during Substrait->Arrow
459
+ // and populated from the Substrait extension set
460
+ std::unordered_map<uint32_t, TypeRecord> types_;
461
+ // Map from anchor values to function ids, used during Substrait->Arrow
462
+ // and populated from the Substrait extension set
463
+ std::unordered_map<uint32_t, Id> functions_;
464
+ // Map from type names to anchor values. Used during Arrow->Substrait
465
+ // and built as the plan is created.
466
+ std::unordered_map<Id, uint32_t, IdHashEq, IdHashEq> types_map_;
467
+ // Map from function names to anchor values. Used during Arrow->Substrait
468
+ // and built as the plan is created.
469
+ std::unordered_map<Id, uint32_t, IdHashEq, IdHashEq> functions_map_;
470
+
471
+ Status CheckHasUri(std::string_view uri);
472
+ void AddUri(std::pair<uint32_t, std::string_view> uri);
473
+ Status AddUri(Id id);
474
+ };
475
+
476
+ } // namespace engine
477
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/extension_types.h ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <cstdint>
23
+ #include <memory>
24
+ #include <optional>
25
+
26
+ #include "arrow/engine/substrait/visibility.h"
27
+ #include "arrow/type_fwd.h"
28
+
29
+ namespace arrow {
30
+ namespace engine {
31
+
32
+ // arrow::ExtensionTypes are provided to wrap uuid, fixed_char, varchar, interval_year,
33
+ // and interval_day which are first-class types in substrait but do not appear in
34
+ // the arrow type system.
35
+ //
36
+ // Note that these are not automatically registered with arrow::RegisterExtensionType(),
37
+ // which means among other things that serialization of these types to IPC would fail.
38
+
39
+ /// fixed_size_binary(16) for storing Universally Unique IDentifiers
40
+ ARROW_ENGINE_EXPORT
41
+ std::shared_ptr<DataType> uuid();
42
+
43
+ /// fixed_size_binary(length) constrained to contain only valid UTF-8
44
+ ARROW_ENGINE_EXPORT
45
+ std::shared_ptr<DataType> fixed_char(int32_t length);
46
+
47
+ /// utf8() constrained to be shorter than `length`
48
+ ARROW_ENGINE_EXPORT
49
+ std::shared_ptr<DataType> varchar(int32_t length);
50
+
51
+ /// fixed_size_list(int32(), 2) storing a number of [years, months]
52
+ ARROW_ENGINE_EXPORT
53
+ std::shared_ptr<DataType> interval_year();
54
+
55
+ /// fixed_size_list(int32(), 2) storing a number of [days, seconds]
56
+ ARROW_ENGINE_EXPORT
57
+ std::shared_ptr<DataType> interval_day();
58
+
59
+ /// Return true if t is Uuid, otherwise false
60
+ ARROW_ENGINE_EXPORT
61
+ bool UnwrapUuid(const DataType&);
62
+
63
+ /// Return FixedChar length if t is FixedChar, otherwise nullopt
64
+ ARROW_ENGINE_EXPORT
65
+ std::optional<int32_t> UnwrapFixedChar(const DataType&);
66
+
67
+ /// Return Varchar (max) length if t is VarChar, otherwise nullopt
68
+ ARROW_ENGINE_EXPORT
69
+ std::optional<int32_t> UnwrapVarChar(const DataType& t);
70
+
71
+ /// Return true if t is IntervalYear, otherwise false
72
+ ARROW_ENGINE_EXPORT
73
+ bool UnwrapIntervalYear(const DataType&);
74
+
75
+ /// Return true if t is IntervalDay, otherwise false
76
+ ARROW_ENGINE_EXPORT
77
+ bool UnwrapIntervalDay(const DataType&);
78
+
79
+ } // namespace engine
80
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/options.h ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <functional>
23
+ #include <string>
24
+ #include <vector>
25
+
26
+ #include "arrow/acero/exec_plan.h"
27
+ #include "arrow/acero/options.h"
28
+ #include "arrow/compute/type_fwd.h"
29
+ #include "arrow/engine/substrait/type_fwd.h"
30
+ #include "arrow/engine/substrait/visibility.h"
31
+ #include "arrow/type_fwd.h"
32
+
33
+ namespace arrow {
34
+ namespace engine {
35
+
36
+ /// How strictly to adhere to the input structure when converting between Substrait and
37
+ /// Acero representations of a plan. This allows the user to trade conversion accuracy
38
+ /// for performance and lenience.
39
+ enum class ARROW_ENGINE_EXPORT ConversionStrictness {
40
+ /// When a primitive is used at the input that doesn't have an exact match at the
41
+ /// output, reject the conversion. This effectively asserts that there is no (known)
42
+ /// information loss in the conversion, and that plans should either round-trip back and
43
+ /// forth exactly or not at all. This option is primarily intended for testing and
44
+ /// debugging.
45
+ EXACT_ROUNDTRIP,
46
+
47
+ /// When a primitive is used at the input that doesn't have an exact match at the
48
+ /// output, attempt to model it with some collection of primitives at the output. This
49
+ /// means that even if the incoming plan is completely optimal by some metric, the
50
+ /// returned plan is fairly likely to not be optimal anymore, and round-trips back and
51
+ /// forth may make the plan increasingly suboptimal. However, every primitive at the
52
+ /// output can be (manually) traced back to exactly one primitive at the input, which
53
+ /// may be useful when debugging.
54
+ PRESERVE_STRUCTURE,
55
+
56
+ /// Behaves like PRESERVE_STRUCTURE, but prefers performance over structural accuracy.
57
+ /// Basic optimizations *may* be applied, in order to attempt to not regress in terms of
58
+ /// plan performance: if the incoming plan was already aggressively optimized, the goal
59
+ /// is for the output plan to not be less performant. In practical use cases, this is
60
+ /// probably the option you want.
61
+ ///
62
+ /// Note that no guarantees are made on top of PRESERVE_STRUCTURE. Past and future
63
+ /// versions of Arrow may even ignore this option entirely and treat it exactly like
64
+ /// PRESERVE_STRUCTURE.
65
+ BEST_EFFORT,
66
+ };
67
+
68
+ using NamedTableProvider = std::function<Result<acero::Declaration>(
69
+ const std::vector<std::string>&, const Schema&)>;
70
+ static NamedTableProvider kDefaultNamedTableProvider;
71
+
72
+ using NamedTapProvider = std::function<Result<acero::Declaration>(
73
+ const std::string&, std::vector<acero::Declaration::Input>, const std::string&,
74
+ std::shared_ptr<Schema>)>;
75
+
76
+ class ARROW_ENGINE_EXPORT ExtensionDetails {
77
+ public:
78
+ virtual ~ExtensionDetails() = default;
79
+ };
80
+
81
+ class ARROW_ENGINE_EXPORT ExtensionProvider {
82
+ public:
83
+ virtual ~ExtensionProvider() = default;
84
+ virtual Result<DeclarationInfo> MakeRel(const ConversionOptions& conv_opts,
85
+ const std::vector<DeclarationInfo>& inputs,
86
+ const ExtensionDetails& ext_details,
87
+ const ExtensionSet& ext_set) = 0;
88
+ };
89
+
90
+ /// \brief Get the default extension provider
91
+ ARROW_ENGINE_EXPORT std::shared_ptr<ExtensionProvider> default_extension_provider();
92
+ /// \brief Set the default extension provider
93
+ ///
94
+ /// \param[in] provider the new provider to be set as default
95
+ ARROW_ENGINE_EXPORT void set_default_extension_provider(
96
+ const std::shared_ptr<ExtensionProvider>& provider);
97
+
98
+ ARROW_ENGINE_EXPORT NamedTapProvider default_named_tap_provider();
99
+
100
+ ARROW_ENGINE_EXPORT void set_default_named_tap_provider(NamedTapProvider provider);
101
+
102
+ /// Options that control the conversion between Substrait and Acero representations of a
103
+ /// plan.
104
+ struct ARROW_ENGINE_EXPORT ConversionOptions {
105
+ ConversionOptions()
106
+ : strictness(ConversionStrictness::BEST_EFFORT),
107
+ named_table_provider(kDefaultNamedTableProvider),
108
+ named_tap_provider(default_named_tap_provider()),
109
+ extension_provider(default_extension_provider()),
110
+ allow_arrow_extensions(false) {}
111
+
112
+ /// \brief How strictly the converter should adhere to the structure of the input.
113
+ ConversionStrictness strictness;
114
+ /// \brief A custom strategy to be used for providing named tables
115
+ ///
116
+ /// The default behavior will return an invalid status if the plan has any
117
+ /// named table relations.
118
+ NamedTableProvider named_table_provider;
119
+ /// \brief A custom strategy to be used for obtaining a tap declaration
120
+ ///
121
+ /// The default provider returns an error
122
+ NamedTapProvider named_tap_provider;
123
+ /// \brief A custom strategy to be used for providing relation infos.
124
+ ///
125
+ /// The default behavior will provide for relations known to Arrow.
126
+ std::shared_ptr<ExtensionProvider> extension_provider;
127
+ /// \brief If true then Arrow-specific types and functions will be allowed
128
+ ///
129
+ /// Set to false to create plans that are more likely to be compatible with non-Arrow
130
+ /// engines
131
+ bool allow_arrow_extensions;
132
+ };
133
+
134
+ } // namespace engine
135
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/relation.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+
22
+ #include "arrow/acero/exec_plan.h"
23
+ #include "arrow/compute/api_aggregate.h"
24
+ #include "arrow/engine/substrait/visibility.h"
25
+ #include "arrow/type_fwd.h"
26
+
27
+ namespace arrow {
28
+ namespace engine {
29
+
30
+ /// Execution information resulting from converting a Substrait relation.
31
+ struct ARROW_ENGINE_EXPORT DeclarationInfo {
32
+ /// The compute declaration produced thus far.
33
+ acero::Declaration declaration;
34
+
35
+ std::shared_ptr<Schema> output_schema;
36
+ };
37
+
38
+ /// Information resulting from converting a Substrait plan
39
+ struct ARROW_ENGINE_EXPORT PlanInfo {
40
+ /// The root declaration.
41
+ ///
42
+ /// Only plans containing a single top-level relation are supported and so this will
43
+ /// represent that relation.
44
+ ///
45
+ /// This should technically be a RelRoot but some producers use a simple Rel here and so
46
+ /// Acero currently supports that case.
47
+ DeclarationInfo root;
48
+ /// The names of the output fields
49
+ ///
50
+ /// If `root` was created from a simple Rel then this will be empty
51
+ std::vector<std::string> names;
52
+ };
53
+
54
+ /// An expression whose output has a name
55
+ struct ARROW_ENGINE_EXPORT NamedExpression {
56
+ /// An expression
57
+ compute::Expression expression;
58
+ // An optional name to assign to the output, may be the empty string
59
+ std::string name;
60
+ };
61
+
62
+ /// A collection of expressions bound to a common schema
63
+ struct ARROW_ENGINE_EXPORT BoundExpressions {
64
+ /// The expressions
65
+ std::vector<NamedExpression> named_expressions;
66
+ /// The schema that all the expressions are bound to
67
+ std::shared_ptr<Schema> schema;
68
+ };
69
+
70
+ } // namespace engine
71
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/serde.h ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ #include <functional>
23
+ #include <memory>
24
+ #include <string>
25
+ #include <string_view>
26
+ #include <vector>
27
+
28
+ #include "arrow/compute/type_fwd.h"
29
+ #include "arrow/dataset/type_fwd.h"
30
+ #include "arrow/engine/substrait/options.h"
31
+ #include "arrow/engine/substrait/relation.h"
32
+ #include "arrow/engine/substrait/type_fwd.h"
33
+ #include "arrow/engine/substrait/visibility.h"
34
+ #include "arrow/result.h"
35
+ #include "arrow/status.h"
36
+ #include "arrow/type_fwd.h"
37
+ #include "arrow/util/macros.h"
38
+
39
+ namespace arrow {
40
+ namespace engine {
41
+
42
+ /// \brief Serialize an Acero Plan to a binary protobuf Substrait message
43
+ ///
44
+ /// \param[in] declaration the Acero declaration to serialize.
45
+ /// This declaration is the sink relation of the Acero plan.
46
+ /// \param[in,out] ext_set the extension mapping to use; may be updated to add
47
+ /// \param[in] conversion_options options to control how the conversion is done
48
+ ///
49
+ /// \return a buffer containing the protobuf serialization of the Acero relation
50
+ ARROW_ENGINE_EXPORT
51
+ Result<std::shared_ptr<Buffer>> SerializePlan(
52
+ const acero::Declaration& declaration, ExtensionSet* ext_set,
53
+ const ConversionOptions& conversion_options = {});
54
+
55
+ /// \brief Serialize expressions to a Substrait message
56
+ ///
57
+ /// \param[in] bound_expressions the expressions to serialize.
58
+ /// \param[in] conversion_options options to control how the conversion is done
59
+ /// \param[in,out] ext_set the extension mapping to use, optional, only needed
60
+ /// if you want to control the value of function anchors
61
+ /// to mirror a previous serialization / deserialization.
62
+ /// Will be updated if new functions are encountered
63
+ ARROW_ENGINE_EXPORT
64
+ Result<std::shared_ptr<Buffer>> SerializeExpressions(
65
+ const BoundExpressions& bound_expressions,
66
+ const ConversionOptions& conversion_options = {}, ExtensionSet* ext_set = NULLPTR);
67
+
68
+ /// Factory function type for generating the node that consumes the batches produced by
69
+ /// each toplevel Substrait relation when deserializing a Substrait Plan.
70
+ using ConsumerFactory = std::function<std::shared_ptr<acero::SinkNodeConsumer>()>;
71
+
72
+ /// \brief Deserializes a Substrait Plan message to a list of ExecNode declarations
73
+ ///
74
+ /// The output of each top-level Substrait relation will be sent to a caller supplied
75
+ /// consumer function provided by consumer_factory
76
+ ///
77
+ /// \param[in] buf a buffer containing the protobuf serialization of a Substrait Plan
78
+ /// message
79
+ /// \param[in] consumer_factory factory function for generating the node that consumes
80
+ /// the batches produced by each toplevel Substrait relation
81
+ /// \param[in] registry an extension-id-registry to use, or null for the default one.
82
+ /// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait
83
+ /// Plan is returned here.
84
+ /// \param[in] conversion_options options to control how the conversion is to be done.
85
+ /// \return a vector of ExecNode declarations, one for each toplevel relation in the
86
+ /// Substrait Plan
87
+ ARROW_ENGINE_EXPORT Result<std::vector<acero::Declaration>> DeserializePlans(
88
+ const Buffer& buf, const ConsumerFactory& consumer_factory,
89
+ const ExtensionIdRegistry* registry = NULLPTR, ExtensionSet* ext_set_out = NULLPTR,
90
+ const ConversionOptions& conversion_options = {});
91
+
92
+ /// \brief Deserializes a single-relation Substrait Plan message to an execution plan
93
+ ///
94
+ /// The output of each top-level Substrait relation will be sent to a caller supplied
95
+ /// consumer function provided by consumer_factory
96
+ ///
97
+ /// \param[in] buf a buffer containing the protobuf serialization of a Substrait Plan
98
+ /// message
99
+ /// \param[in] consumer node that consumes the batches produced by each toplevel Substrait
100
+ /// relation
101
+ /// \param[in] registry an extension-id-registry to use, or null for the default one.
102
+ /// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait
103
+ /// \param[in] conversion_options options to control how the conversion is to be done.
104
+ /// Plan is returned here.
105
+ /// \return an ExecPlan for the Substrait Plan
106
+ ARROW_ENGINE_EXPORT Result<std::shared_ptr<acero::ExecPlan>> DeserializePlan(
107
+ const Buffer& buf, const std::shared_ptr<acero::SinkNodeConsumer>& consumer,
108
+ const ExtensionIdRegistry* registry = NULLPTR, ExtensionSet* ext_set_out = NULLPTR,
109
+ const ConversionOptions& conversion_options = {});
110
+
111
+ /// Factory function type for generating the write options of a node consuming the batches
112
+ /// produced by each toplevel Substrait relation when deserializing a Substrait Plan.
113
+ using WriteOptionsFactory = std::function<std::shared_ptr<dataset::WriteNodeOptions>()>;
114
+
115
+ /// \brief Deserializes a Substrait Plan message to a list of ExecNode declarations
116
+ ///
117
+ /// The output of each top-level Substrait relation will be written to a filesystem.
118
+ /// `write_options_factory` can be used to control write behavior.
119
+ ///
120
+ /// \param[in] buf a buffer containing the protobuf serialization of a Substrait Plan
121
+ /// message
122
+ /// \param[in] write_options_factory factory function for generating the write options of
123
+ /// a node consuming the batches produced by each toplevel Substrait relation
124
+ /// \param[in] registry an extension-id-registry to use, or null for the default one.
125
+ /// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait
126
+ /// Plan is returned here.
127
+ /// \param[in] conversion_options options to control how the conversion is to be done.
128
+ /// \return a vector of ExecNode declarations, one for each toplevel relation in the
129
+ /// Substrait Plan
130
+ ARROW_ENGINE_EXPORT Result<std::vector<acero::Declaration>> DeserializePlans(
131
+ const Buffer& buf, const WriteOptionsFactory& write_options_factory,
132
+ const ExtensionIdRegistry* registry = NULLPTR, ExtensionSet* ext_set_out = NULLPTR,
133
+ const ConversionOptions& conversion_options = {});
134
+
135
+ /// \brief Deserializes a single-relation Substrait Plan message to an execution plan
136
+ ///
137
+ /// The output of the single Substrait relation will be written to a filesystem.
138
+ /// `write_options_factory` can be used to control write behavior.
139
+ ///
140
+ /// \param[in] buf a buffer containing the protobuf serialization of a Substrait Plan
141
+ /// message
142
+ /// \param[in] write_options write options of a node consuming the batches produced by
143
+ /// each toplevel Substrait relation
144
+ /// \param[in] registry an extension-id-registry to use, or null for the default one.
145
+ /// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait
146
+ /// Plan is returned here.
147
+ /// \param[in] conversion_options options to control how the conversion is to be done.
148
+ /// \return an ExecPlan for the Substrait Plan
149
+ ARROW_ENGINE_EXPORT Result<std::shared_ptr<acero::ExecPlan>> DeserializePlan(
150
+ const Buffer& buf, const std::shared_ptr<dataset::WriteNodeOptions>& write_options,
151
+ const ExtensionIdRegistry* registry = NULLPTR, ExtensionSet* ext_set_out = NULLPTR,
152
+ const ConversionOptions& conversion_options = {});
153
+
154
+ /// \brief Deserializes a Substrait Plan message to a Declaration
155
+ ///
156
+ /// The plan will not contain any sink nodes and will be suitable for use in any
157
+ /// of the arrow::compute::DeclarationToXyz methods.
158
+ ///
159
+ /// \param[in] buf a buffer containing the protobuf serialization of a Substrait Plan
160
+ /// message
161
+ /// \param[in] registry an extension-id-registry to use, or null for the default one.
162
+ /// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait
163
+ /// Plan is returned here.
164
+ /// \param[in] conversion_options options to control how the conversion is to be done.
165
+ /// \return A declaration representing the Substrait plan
166
+ ARROW_ENGINE_EXPORT Result<PlanInfo> DeserializePlan(
167
+ const Buffer& buf, const ExtensionIdRegistry* registry = NULLPTR,
168
+ ExtensionSet* ext_set_out = NULLPTR,
169
+ const ConversionOptions& conversion_options = {});
170
+
171
+ /// \brief Deserialize a Substrait ExtendedExpression message to the corresponding Arrow
172
+ /// type
173
+ ///
174
+ /// \param[in] buf a buffer containing the protobuf serialization of a collection of bound
175
+ /// expressions
176
+ /// \param[in] registry an extension-id-registry to use, or null for the default one
177
+ /// \param[in] conversion_options options to control how the conversion is done
178
+ /// \param[out] ext_set_out if non-null, the extension mapping used by the Substrait
179
+ /// message is returned here.
180
+ /// \return A collection of expressions and a common input schema they are bound to
181
+ ARROW_ENGINE_EXPORT Result<BoundExpressions> DeserializeExpressions(
182
+ const Buffer& buf, const ExtensionIdRegistry* registry = NULLPTR,
183
+ const ConversionOptions& conversion_options = {},
184
+ ExtensionSet* ext_set_out = NULLPTR);
185
+
186
+ /// \brief Deserializes a Substrait Type message to the corresponding Arrow type
187
+ ///
188
+ /// \param[in] buf a buffer containing the protobuf serialization of a Substrait Type
189
+ /// message
190
+ /// \param[in] ext_set the extension mapping to use, normally provided by the
191
+ /// surrounding Plan message
192
+ /// \param[in] conversion_options options to control how the conversion is to be done.
193
+ /// \return the corresponding Arrow data type
194
+ ARROW_ENGINE_EXPORT
195
+ Result<std::shared_ptr<DataType>> DeserializeType(
196
+ const Buffer& buf, const ExtensionSet& ext_set,
197
+ const ConversionOptions& conversion_options = {});
198
+
199
+ /// \brief Serializes an Arrow type to a Substrait Type message
200
+ ///
201
+ /// \param[in] type the Arrow data type to serialize
202
+ /// \param[in,out] ext_set the extension mapping to use; may be updated to add a
203
+ /// mapping for the given type
204
+ /// \param[in] conversion_options options to control how the conversion is to be done.
205
+ /// \return a buffer containing the protobuf serialization of the corresponding Substrait
206
+ /// Type message
207
+ ARROW_ENGINE_EXPORT
208
+ Result<std::shared_ptr<Buffer>> SerializeType(
209
+ const DataType& type, ExtensionSet* ext_set,
210
+ const ConversionOptions& conversion_options = {});
211
+
212
+ /// \brief Deserializes a Substrait NamedStruct message to an Arrow schema
213
+ ///
214
+ /// \param[in] buf a buffer containing the protobuf serialization of a Substrait
215
+ /// NamedStruct message
216
+ /// \param[in] ext_set the extension mapping to use, normally provided by the
217
+ /// surrounding Plan message
218
+ /// \param[in] conversion_options options to control how the conversion is to be done.
219
+ /// \return the corresponding Arrow schema
220
+ ARROW_ENGINE_EXPORT
221
+ Result<std::shared_ptr<Schema>> DeserializeSchema(
222
+ const Buffer& buf, const ExtensionSet& ext_set,
223
+ const ConversionOptions& conversion_options = {});
224
+
225
+ /// \brief Serializes an Arrow schema to a Substrait NamedStruct message
226
+ ///
227
+ /// \param[in] schema the Arrow schema to serialize
228
+ /// \param[in,out] ext_set the extension mapping to use; may be updated to add
229
+ /// mappings for the types used in the schema
230
+ /// \param[in] conversion_options options to control how the conversion is to be done.
231
+ /// \return a buffer containing the protobuf serialization of the corresponding Substrait
232
+ /// NamedStruct message
233
+ ARROW_ENGINE_EXPORT
234
+ Result<std::shared_ptr<Buffer>> SerializeSchema(
235
+ const Schema& schema, ExtensionSet* ext_set,
236
+ const ConversionOptions& conversion_options = {});
237
+
238
+ /// \brief Deserializes a Substrait Expression message to a compute expression
239
+ ///
240
+ /// \param[in] buf a buffer containing the protobuf serialization of a Substrait
241
+ /// Expression message
242
+ /// \param[in] ext_set the extension mapping to use, normally provided by the
243
+ /// surrounding Plan message
244
+ /// \param[in] conversion_options options to control how the conversion is to be done.
245
+ /// \return the corresponding Arrow compute expression
246
+ ARROW_ENGINE_EXPORT
247
+ Result<compute::Expression> DeserializeExpression(
248
+ const Buffer& buf, const ExtensionSet& ext_set,
249
+ const ConversionOptions& conversion_options = {});
250
+
251
+ /// \brief Serializes an Arrow compute expression to a Substrait Expression message
252
+ ///
253
+ /// \param[in] expr the Arrow compute expression to serialize
254
+ /// \param[in,out] ext_set the extension mapping to use; may be updated to add
255
+ /// mappings for the types used in the expression
256
+ /// \param[in] conversion_options options to control how the conversion is to be done.
257
+ /// \return a buffer containing the protobuf serialization of the corresponding Substrait
258
+ /// Expression message
259
+ ARROW_ENGINE_EXPORT
260
+ Result<std::shared_ptr<Buffer>> SerializeExpression(
261
+ const compute::Expression& expr, ExtensionSet* ext_set,
262
+ const ConversionOptions& conversion_options = {});
263
+
264
+ /// \brief Serialize an Acero Declaration to a binary protobuf Substrait message
265
+ ///
266
+ /// \param[in] declaration the Acero declaration to serialize
267
+ /// \param[in,out] ext_set the extension mapping to use; may be updated to add
268
+ /// \param[in] conversion_options options to control how the conversion is done
269
+ ///
270
+ /// \return a buffer containing the protobuf serialization of the Acero relation
271
+ ARROW_ENGINE_EXPORT Result<std::shared_ptr<Buffer>> SerializeRelation(
272
+ const acero::Declaration& declaration, ExtensionSet* ext_set,
273
+ const ConversionOptions& conversion_options = {});
274
+
275
+ /// \brief Deserializes a Substrait Rel (relation) message to an ExecNode declaration
276
+ ///
277
+ /// \param[in] buf a buffer containing the protobuf serialization of a Substrait
278
+ /// Rel message
279
+ /// \param[in] ext_set the extension mapping to use, normally provided by the
280
+ /// surrounding Plan message
281
+ /// \param[in] conversion_options options to control how the conversion is to be done.
282
+ /// \return the corresponding ExecNode declaration
283
+ ARROW_ENGINE_EXPORT Result<acero::Declaration> DeserializeRelation(
284
+ const Buffer& buf, const ExtensionSet& ext_set,
285
+ const ConversionOptions& conversion_options = {});
286
+
287
+ namespace internal {
288
+
289
+ /// \brief Checks whether two protobuf serializations of a particular Substrait message
290
+ /// type are equivalent
291
+ ///
292
+ /// Note that a binary comparison of the two buffers is insufficient. One reason for this
293
+ /// is that the fields of a message can be specified in any order in the serialization.
294
+ ///
295
+ /// \param[in] message_name the name of the Substrait message type to check
296
+ /// \param[in] l_buf buffer containing the first protobuf serialization to compare
297
+ /// \param[in] r_buf buffer containing the second protobuf serialization to compare
298
+ /// \return success if equivalent, failure if not
299
+ ARROW_ENGINE_EXPORT
300
+ Status CheckMessagesEquivalent(std::string_view message_name, const Buffer& l_buf,
301
+ const Buffer& r_buf);
302
+
303
+ /// \brief Utility function to convert a JSON serialization of a Substrait message to
304
+ /// its binary serialization
305
+ ///
306
+ /// \param[in] type_name the name of the Substrait message type to convert
307
+ /// \param[in] json the JSON string to convert
308
+ /// \param[in] ignore_unknown_fields if true then unknown fields will be ignored and
309
+ /// will not cause an error
310
+ ///
311
+ /// This should generally be true to allow consumption of plans from newer
312
+ /// producers but setting to false can be useful if you are testing
313
+ /// conformance to a specific Substrait version
314
+ /// \return a buffer filled with the binary protobuf serialization of message
315
+ ARROW_ENGINE_EXPORT
316
+ Result<std::shared_ptr<Buffer>> SubstraitFromJSON(std::string_view type_name,
317
+ std::string_view json,
318
+ bool ignore_unknown_fields = true);
319
+
320
+ /// \brief Utility function to convert a binary protobuf serialization of a Substrait
321
+ /// message to JSON
322
+ ///
323
+ /// \param[in] type_name the name of the Substrait message type to convert
324
+ /// \param[in] buf the buffer containing the binary protobuf serialization of the message
325
+ /// \return a JSON string representing the message
326
+ ARROW_ENGINE_EXPORT
327
+ Result<std::string> SubstraitToJSON(std::string_view type_name, const Buffer& buf);
328
+
329
+ } // namespace internal
330
+ } // namespace engine
331
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/test_plan_builder.h ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // These utilities are for internal / unit test use only.
19
+ // They allow for the construction of simple Substrait plans
20
+ // programmatically without first requiring the construction
21
+ // of an ExecPlan
22
+
23
+ // These utilities have to be here, and not in a test_util.cc
24
+ // file (or in a unit test) because only one .so is allowed
25
+ // to include each .pb.h file or else protobuf will encounter
26
+ // global namespace conflicts.
27
+
28
+ #include <memory>
29
+ #include <string>
30
+ #include <unordered_map>
31
+ #include <vector>
32
+
33
+ #include "arrow/engine/substrait/visibility.h"
34
+ #include "arrow/result.h"
35
+ #include "arrow/type_fwd.h"
36
+
37
+ namespace arrow {
38
+ namespace engine {
39
+
40
+ struct Id;
41
+
42
+ namespace internal {
43
+
44
+ /// \brief Create a scan->project->sink plan for tests
45
+ ///
46
+ /// The plan will project one additional column using the function
47
+ /// defined by `function_id`, `arguments`, and data_types. `arguments`
48
+ /// and `data_types` should have the same length but only one of each
49
+ /// should be defined at each index.
50
+ ///
51
+ /// If `data_types` is defined at an index then the plan will create a
52
+ /// direct reference (starting at index 0 and increasing by 1 for each
53
+ /// argument of this type).
54
+ ///
55
+ /// If `arguments` is defined at an index then the plan will create an
56
+ /// enum argument with that value.
57
+ ARROW_ENGINE_EXPORT Result<std::shared_ptr<Buffer>> CreateScanProjectSubstrait(
58
+ Id function_id, const std::shared_ptr<Table>& input_table,
59
+ const std::vector<std::string>& arguments,
60
+ const std::unordered_map<std::string, std::vector<std::string>>& options,
61
+ const std::vector<std::shared_ptr<DataType>>& data_types,
62
+ const DataType& output_type);
63
+
64
+ /// \brief Create a scan->aggregate->sink plan for tests
65
+ ///
66
+ /// The plan will create an aggregate with one grouping set (defined by
67
+ /// key_idxs) and one measure. The measure will be a function
68
+ /// defined by `function_id` and direct references to `arg_idxs`.
69
+ ARROW_ENGINE_EXPORT Result<std::shared_ptr<Buffer>> CreateScanAggSubstrait(
70
+ Id function_id, const std::shared_ptr<Table>& input_table,
71
+ const std::vector<int>& key_idxs, const std::vector<int>& arg_idxs,
72
+ const DataType& output_type);
73
+
74
+ } // namespace internal
75
+ } // namespace engine
76
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/test_util.h ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/testing/gtest_util.h"
21
+ #include "arrow/util/vector.h"
22
+
23
+ #include <functional>
24
+ #include <random>
25
+ #include <string>
26
+ #include <string_view>
27
+ #include <vector>
28
+
29
+ #include "arrow/acero/exec_plan.h"
30
+ #include "arrow/compute/exec.h"
31
+ #include "arrow/compute/kernel.h"
32
+ #include "arrow/testing/visibility.h"
33
+ #include "arrow/util/async_generator.h"
34
+ #include "arrow/util/pcg_random.h"
35
+
36
+ namespace arrow {
37
+ namespace engine {
38
+
39
+ Result<std::shared_ptr<Table>> SortTableOnAllFields(const std::shared_ptr<Table>& tab);
40
+
41
+ void AssertTablesEqualIgnoringOrder(const std::shared_ptr<Table>& exp,
42
+ const std::shared_ptr<Table>& act);
43
+
44
+ } // namespace engine
45
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/type_fwd.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // This API is EXPERIMENTAL.
19
+
20
+ #pragma once
21
+
22
+ namespace arrow {
23
+ namespace engine {
24
+
25
+ class ExtensionIdRegistry;
26
+ class ExtensionSet;
27
+
28
+ struct ConversionOptions;
29
+ struct DeclarationInfo;
30
+
31
+ } // namespace engine
32
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/util.h ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <vector>
24
+
25
+ #include "arrow/compute/type_fwd.h"
26
+ #include "arrow/engine/substrait/options.h"
27
+ #include "arrow/engine/substrait/type_fwd.h"
28
+ #include "arrow/engine/substrait/visibility.h"
29
+ #include "arrow/result.h"
30
+ #include "arrow/type_fwd.h"
31
+ #include "arrow/util/iterator.h"
32
+ #include "arrow/util/macros.h"
33
+
34
+ namespace arrow {
35
+
36
+ namespace engine {
37
+
38
+ using PythonTableProvider =
39
+ std::function<Result<std::shared_ptr<Table>>(const std::vector<std::string>&)>;
40
+
41
+ /// \brief Utility method to run a Substrait plan
42
+ /// \param substrait_buffer The plan to run, must be in binary protobuf format
43
+ /// \param registry A registry of extension functions to make available to the plan
44
+ /// If null then the default registry will be used.
45
+ /// \param memory_pool The memory pool the plan should use to make allocations.
46
+ /// \param func_registry A registry of functions used for execution expressions.
47
+ /// `registry` maps from Substrait function IDs to "names". These
48
+ /// names will be provided to `func_registry` to get the actual
49
+ /// kernel.
50
+ /// \param conversion_options Options to control plan deserialization
51
+ /// \param use_threads If True then the CPU thread pool will be used for CPU work. If
52
+ /// False then all work will be done on the calling thread.
53
+ /// \return A record batch reader that will read out the results
54
+ ARROW_ENGINE_EXPORT Result<std::shared_ptr<RecordBatchReader>> ExecuteSerializedPlan(
55
+ const Buffer& substrait_buffer, const ExtensionIdRegistry* registry = NULLPTR,
56
+ compute::FunctionRegistry* func_registry = NULLPTR,
57
+ const ConversionOptions& conversion_options = {}, bool use_threads = true,
58
+ MemoryPool* memory_pool = default_memory_pool());
59
+
60
+ /// \brief Get a Serialized Plan from a Substrait JSON plan.
61
+ /// This is a helper method for Python tests.
62
+ ARROW_ENGINE_EXPORT Result<std::shared_ptr<Buffer>> SerializeJsonPlan(
63
+ const std::string& substrait_json);
64
+
65
+ /// \brief Make a nested registry with the default registry as parent.
66
+ /// See arrow::engine::nested_extension_id_registry for details.
67
+ ARROW_ENGINE_EXPORT std::shared_ptr<ExtensionIdRegistry> MakeExtensionIdRegistry();
68
+
69
+ ARROW_ENGINE_EXPORT const std::string& default_extension_types_uri();
70
+
71
+ // TODO(ARROW-18145) Populate these from cmake files
72
+ constexpr uint32_t kSubstraitMajorVersion = 0;
73
+ constexpr uint32_t kSubstraitMinorVersion = 27;
74
+ constexpr uint32_t kSubstraitPatchVersion = 0;
75
+
76
+ constexpr uint32_t kSubstraitMinimumMajorVersion = 0;
77
+ constexpr uint32_t kSubstraitMinimumMinorVersion = 20;
78
+
79
+ Status CheckVersion(uint32_t major_version, uint32_t minor_version);
80
+
81
+ } // namespace engine
82
+
83
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/engine/substrait/visibility.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // TODO(westonpace): Once we have a proper engine module this file
19
+ // should be renamed arrow/engine/visibility.h
20
+ // This API is EXPERIMENTAL.
21
+
22
+ #pragma once
23
+
24
+ #if defined(_WIN32) || defined(__CYGWIN__)
25
+ #if defined(_MSC_VER)
26
+ #pragma warning(push)
27
+ #pragma warning(disable : 4251)
28
+ #else
29
+ #pragma GCC diagnostic ignored "-Wattributes"
30
+ #endif
31
+
32
+ #ifdef ARROW_ENGINE_STATIC
33
+ #define ARROW_ENGINE_EXPORT
34
+ #elif defined(ARROW_ENGINE_EXPORTING)
35
+ #define ARROW_ENGINE_EXPORT __declspec(dllexport)
36
+ #else
37
+ #define ARROW_ENGINE_EXPORT __declspec(dllimport)
38
+ #endif
39
+
40
+ #define ARROW_ENGINE_NO_EXPORT
41
+ #else // Not Windows
42
+ #ifndef ARROW_ENGINE_EXPORT
43
+ #define ARROW_ENGINE_EXPORT __attribute__((visibility("default")))
44
+ #endif
45
+ #ifndef ARROW_ENGINE_NO_EXPORT
46
+ #define ARROW_ENGINE_NO_EXPORT __attribute__((visibility("hidden")))
47
+ #endif
48
+ #endif // Non-Windows
49
+
50
+ #if defined(_MSC_VER)
51
+ #pragma warning(pop)
52
+ #endif
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/arrow_to_pandas.h ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Functions for converting between pandas's NumPy-based data representation
19
+ // and Arrow data structures
20
+
21
+ #pragma once
22
+
23
+ #include "arrow/python/platform.h"
24
+
25
+ #include <memory>
26
+ #include <string>
27
+ #include <unordered_set>
28
+
29
+ #include "arrow/memory_pool.h"
30
+ #include "arrow/python/visibility.h"
31
+
32
+ namespace arrow {
33
+
34
+ class Array;
35
+ class ChunkedArray;
36
+ class Column;
37
+ class DataType;
38
+ class MemoryPool;
39
+ class Status;
40
+ class Table;
41
+
42
+ namespace py {
43
+
44
+ enum class MapConversionType {
45
+ DEFAULT, // convert arrow maps to assoc lists (list of kev-value tuples) in Pandas
46
+ LOSSY, // report warnings when lossiness is encountered due to duplicate keys
47
+ STRICT_, // raise a Python exception when lossiness is encountered due to duplicate
48
+ // keys
49
+ };
50
+
51
+ struct PandasOptions {
52
+ /// arrow::MemoryPool to use for memory allocations
53
+ MemoryPool* pool = default_memory_pool();
54
+
55
+ /// If true, we will convert all string columns to categoricals
56
+ bool strings_to_categorical = false;
57
+ bool zero_copy_only = false;
58
+ bool integer_object_nulls = false;
59
+ bool date_as_object = false;
60
+ bool timestamp_as_object = false;
61
+ bool use_threads = false;
62
+
63
+ /// Coerce all date and timestamp to datetime64[ns]
64
+ bool coerce_temporal_nanoseconds = false;
65
+
66
+ /// Used to maintain backwards compatibility for
67
+ /// timezone bugs (see ARROW-9528). Should be removed
68
+ /// after Arrow 2.0 release.
69
+ bool ignore_timezone = false;
70
+
71
+ /// \brief If true, do not create duplicate PyObject versions of equal
72
+ /// objects. This only applies to immutable objects like strings or datetime
73
+ /// objects
74
+ bool deduplicate_objects = false;
75
+
76
+ /// \brief For certain data types, a cast is needed in order to store the
77
+ /// data in a pandas DataFrame or Series (e.g. timestamps are always stored
78
+ /// as nanoseconds in pandas). This option controls whether it is a safe
79
+ /// cast or not.
80
+ bool safe_cast = true;
81
+
82
+ /// \brief If true, create one block per column rather than consolidated
83
+ /// blocks (1 per data type). Do zero-copy wrapping when there are no
84
+ /// nulls. pandas currently will consolidate the blocks on its own, causing
85
+ /// increased memory use, so keep this in mind if you are working on a
86
+ /// memory-constrained situation.
87
+ bool split_blocks = false;
88
+
89
+ /// \brief If true, allow non-writable zero-copy views to be created for
90
+ /// single column blocks. This option is also used to provide zero copy for
91
+ /// Series data
92
+ bool allow_zero_copy_blocks = false;
93
+
94
+ /// \brief If true, attempt to deallocate buffers in passed Arrow object if
95
+ /// it is the only remaining shared_ptr copy of it. See ARROW-3789 for
96
+ /// original context for this feature. Only currently implemented for Table
97
+ /// conversions
98
+ bool self_destruct = false;
99
+
100
+ /// \brief The default behavior (DEFAULT), is to convert Arrow Map arrays to
101
+ /// Python association lists (list-of-tuples) in the same order as the Arrow
102
+ /// Map, as in [(key1, value1), (key2, value2), ...]
103
+ /// If LOSSY or STRICT, convert Arrow Map arrays to native Python dicts.
104
+ /// This can change the ordering of (key, value) pairs, and will deduplicate
105
+ /// multiple keys, resulting in a possible loss of data.
106
+ /// If 'lossy', this key deduplication results in a warning printed
107
+ /// when detected. If 'strict', this instead results in an exception
108
+ /// being raised when detected.
109
+ MapConversionType maps_as_pydicts = MapConversionType::DEFAULT;
110
+
111
+ // Used internally for nested arrays.
112
+ bool decode_dictionaries = false;
113
+
114
+ // Columns that should be casted to categorical
115
+ std::unordered_set<std::string> categorical_columns;
116
+
117
+ // Columns that should be passed through to be converted to
118
+ // ExtensionArray/Block
119
+ std::unordered_set<std::string> extension_columns;
120
+
121
+ // Used internally to decipher between to_numpy() and to_pandas() when
122
+ // the expected output differs
123
+ bool to_numpy = false;
124
+ };
125
+
126
+ ARROW_PYTHON_EXPORT
127
+ Status ConvertArrayToPandas(const PandasOptions& options, std::shared_ptr<Array> arr,
128
+ PyObject* py_ref, PyObject** out);
129
+
130
+ ARROW_PYTHON_EXPORT
131
+ Status ConvertChunkedArrayToPandas(const PandasOptions& options,
132
+ std::shared_ptr<ChunkedArray> col, PyObject* py_ref,
133
+ PyObject** out);
134
+
135
+ // Convert a whole table as efficiently as possible to a pandas.DataFrame.
136
+ //
137
+ // The returned Python object is a list of tuples consisting of the exact 2D
138
+ // BlockManager structure of the pandas.DataFrame used as of pandas 0.19.x.
139
+ //
140
+ // tuple item: (indices: ndarray[int32], block: ndarray[TYPE, ndim=2])
141
+ ARROW_PYTHON_EXPORT
142
+ Status ConvertTableToPandas(const PandasOptions& options, std::shared_ptr<Table> table,
143
+ PyObject** out);
144
+
145
+ } // namespace py
146
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/decimal.h ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+
22
+ #include "arrow/python/visibility.h"
23
+ #include "arrow/type.h"
24
+
25
+ namespace arrow {
26
+
27
+ class Decimal128;
28
+ class Decimal256;
29
+
30
+ namespace py {
31
+
32
+ class OwnedRef;
33
+
34
+ //
35
+ // Python Decimal support
36
+ //
37
+
38
+ namespace internal {
39
+
40
+ // \brief Import the Python Decimal type
41
+ ARROW_PYTHON_EXPORT
42
+ Status ImportDecimalType(OwnedRef* decimal_type);
43
+
44
+ // \brief Convert a Python Decimal object to a C++ string
45
+ // \param[in] python_decimal A Python decimal.Decimal instance
46
+ // \param[out] The string representation of the Python Decimal instance
47
+ // \return The status of the operation
48
+ ARROW_PYTHON_EXPORT
49
+ Status PythonDecimalToString(PyObject* python_decimal, std::string* out);
50
+
51
+ // \brief Convert a C++ std::string to a Python Decimal instance
52
+ // \param[in] decimal_constructor The decimal type object
53
+ // \param[in] decimal_string A decimal string
54
+ // \return An instance of decimal.Decimal
55
+ ARROW_PYTHON_EXPORT
56
+ PyObject* DecimalFromString(PyObject* decimal_constructor,
57
+ const std::string& decimal_string);
58
+
59
+ // \brief Convert a Python decimal to an Arrow Decimal128 object
60
+ // \param[in] python_decimal A Python decimal.Decimal instance
61
+ // \param[in] arrow_type An instance of arrow::DecimalType
62
+ // \param[out] out A pointer to a Decimal128
63
+ // \return The status of the operation
64
+ ARROW_PYTHON_EXPORT
65
+ Status DecimalFromPythonDecimal(PyObject* python_decimal, const DecimalType& arrow_type,
66
+ Decimal128* out);
67
+
68
+ // \brief Convert a Python object to an Arrow Decimal128 object
69
+ // \param[in] python_decimal A Python int or decimal.Decimal instance
70
+ // \param[in] arrow_type An instance of arrow::DecimalType
71
+ // \param[out] out A pointer to a Decimal128
72
+ // \return The status of the operation
73
+ ARROW_PYTHON_EXPORT
74
+ Status DecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type, Decimal128* out);
75
+
76
+ // \brief Convert a Python decimal to an Arrow Decimal256 object
77
+ // \param[in] python_decimal A Python decimal.Decimal instance
78
+ // \param[in] arrow_type An instance of arrow::DecimalType
79
+ // \param[out] out A pointer to a Decimal256
80
+ // \return The status of the operation
81
+ ARROW_PYTHON_EXPORT
82
+ Status DecimalFromPythonDecimal(PyObject* python_decimal, const DecimalType& arrow_type,
83
+ Decimal256* out);
84
+
85
+ // \brief Convert a Python object to an Arrow Decimal256 object
86
+ // \param[in] python_decimal A Python int or decimal.Decimal instance
87
+ // \param[in] arrow_type An instance of arrow::DecimalType
88
+ // \param[out] out A pointer to a Decimal256
89
+ // \return The status of the operation
90
+ ARROW_PYTHON_EXPORT
91
+ Status DecimalFromPyObject(PyObject* obj, const DecimalType& arrow_type, Decimal256* out);
92
+
93
+ // \brief Check whether obj is an instance of Decimal
94
+ ARROW_PYTHON_EXPORT
95
+ bool PyDecimal_Check(PyObject* obj);
96
+
97
+ // \brief Check whether obj is nan. This function will abort the program if the argument
98
+ // is not a Decimal instance
99
+ ARROW_PYTHON_EXPORT
100
+ bool PyDecimal_ISNAN(PyObject* obj);
101
+
102
+ // \brief Helper class to track and update the precision and scale of a decimal
103
+ class ARROW_PYTHON_EXPORT DecimalMetadata {
104
+ public:
105
+ DecimalMetadata();
106
+ DecimalMetadata(int32_t precision, int32_t scale);
107
+
108
+ // \brief Adjust the precision and scale of a decimal type given a new precision and a
109
+ // new scale \param[in] suggested_precision A candidate precision \param[in]
110
+ // suggested_scale A candidate scale \return The status of the operation
111
+ Status Update(int32_t suggested_precision, int32_t suggested_scale);
112
+
113
+ // \brief A convenient interface for updating the precision and scale based on a Python
114
+ // Decimal object \param object A Python Decimal object \return The status of the
115
+ // operation
116
+ Status Update(PyObject* object);
117
+
118
+ int32_t precision() const { return precision_; }
119
+ int32_t scale() const { return scale_; }
120
+
121
+ private:
122
+ int32_t precision_;
123
+ int32_t scale_;
124
+ };
125
+
126
+ } // namespace internal
127
+ } // namespace py
128
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/extension_type.h ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <string>
22
+
23
+ #include "arrow/extension_type.h"
24
+ #include "arrow/python/common.h"
25
+ #include "arrow/python/visibility.h"
26
+ #include "arrow/util/macros.h"
27
+
28
+ namespace arrow {
29
+ namespace py {
30
+
31
+ class ARROW_PYTHON_EXPORT PyExtensionType : public ExtensionType {
32
+ public:
33
+ // Implement extensionType API
34
+ std::string extension_name() const override { return extension_name_; }
35
+
36
+ std::string ToString() const override;
37
+
38
+ bool ExtensionEquals(const ExtensionType& other) const override;
39
+
40
+ std::shared_ptr<Array> MakeArray(std::shared_ptr<ArrayData> data) const override;
41
+
42
+ Result<std::shared_ptr<DataType>> Deserialize(
43
+ std::shared_ptr<DataType> storage_type,
44
+ const std::string& serialized) const override;
45
+
46
+ std::string Serialize() const override;
47
+
48
+ // For use from Cython
49
+ // Assumes that `typ` is borrowed
50
+ static Status FromClass(const std::shared_ptr<DataType> storage_type,
51
+ const std::string extension_name, PyObject* typ,
52
+ std::shared_ptr<ExtensionType>* out);
53
+
54
+ // Return new ref
55
+ PyObject* GetInstance() const;
56
+ Status SetInstance(PyObject*) const;
57
+
58
+ protected:
59
+ PyExtensionType(std::shared_ptr<DataType> storage_type, PyObject* typ,
60
+ PyObject* inst = NULLPTR);
61
+ PyExtensionType(std::shared_ptr<DataType> storage_type, std::string extension_name,
62
+ PyObject* typ, PyObject* inst = NULLPTR);
63
+
64
+ std::string extension_name_;
65
+
66
+ // These fields are mutable because of two-step initialization.
67
+ mutable OwnedRefNoGIL type_class_;
68
+ // A weakref or null. Storing a strong reference to the Python extension type
69
+ // instance would create an unreclaimable reference cycle between Python and C++
70
+ // (the Python instance has to keep a strong reference to the C++ ExtensionType
71
+ // in other direction). Instead, we store a weakref to the instance.
72
+ // If the weakref is dead, we reconstruct the instance from its serialized form.
73
+ mutable OwnedRefNoGIL type_instance_;
74
+ // Empty if type_instance_ is null
75
+ mutable std::string serialized_;
76
+ };
77
+
78
+ ARROW_PYTHON_EXPORT std::string PyExtensionName();
79
+
80
+ ARROW_PYTHON_EXPORT Status RegisterPyExtensionType(const std::shared_ptr<DataType>&);
81
+
82
+ ARROW_PYTHON_EXPORT Status UnregisterPyExtensionType(const std::string& type_name);
83
+
84
+ } // namespace py
85
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/init.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/python/platform.h"
21
+ #include "arrow/python/visibility.h"
22
+
23
+ extern "C" {
24
+ ARROW_PYTHON_EXPORT
25
+ int arrow_init_numpy();
26
+ }
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/numpy_convert.h ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Functions for converting between pandas's NumPy-based data representation
19
+ // and Arrow data structures
20
+
21
+ #pragma once
22
+
23
+ #include "arrow/python/platform.h"
24
+
25
+ #include <memory>
26
+ #include <string>
27
+ #include <vector>
28
+
29
+ #include "arrow/buffer.h"
30
+ #include "arrow/python/visibility.h"
31
+ #include "arrow/sparse_tensor.h"
32
+
33
+ namespace arrow {
34
+
35
+ class DataType;
36
+ class MemoryPool;
37
+ class Status;
38
+ class Tensor;
39
+
40
+ namespace py {
41
+
42
+ class ARROW_PYTHON_EXPORT NumPyBuffer : public Buffer {
43
+ public:
44
+ explicit NumPyBuffer(PyObject* arr);
45
+ virtual ~NumPyBuffer();
46
+
47
+ private:
48
+ PyObject* arr_;
49
+ };
50
+
51
+ ARROW_PYTHON_EXPORT
52
+ Result<std::shared_ptr<DataType>> NumPyDtypeToArrow(PyObject* dtype);
53
+ ARROW_PYTHON_EXPORT
54
+ Result<std::shared_ptr<DataType>> NumPyDtypeToArrow(PyArray_Descr* descr);
55
+ ARROW_PYTHON_EXPORT
56
+ Result<std::shared_ptr<DataType>> NumPyScalarToArrowDataType(PyObject* scalar);
57
+
58
+ ARROW_PYTHON_EXPORT Status NdarrayToTensor(MemoryPool* pool, PyObject* ao,
59
+ const std::vector<std::string>& dim_names,
60
+ std::shared_ptr<Tensor>* out);
61
+
62
+ ARROW_PYTHON_EXPORT Status TensorToNdarray(const std::shared_ptr<Tensor>& tensor,
63
+ PyObject* base, PyObject** out);
64
+
65
+ ARROW_PYTHON_EXPORT Status
66
+ SparseCOOTensorToNdarray(const std::shared_ptr<SparseCOOTensor>& sparse_tensor,
67
+ PyObject* base, PyObject** out_data, PyObject** out_coords);
68
+
69
+ Status SparseCSXMatrixToNdarray(const std::shared_ptr<SparseTensor>& sparse_tensor,
70
+ PyObject* base, PyObject** out_data,
71
+ PyObject** out_indptr, PyObject** out_indices);
72
+
73
+ ARROW_PYTHON_EXPORT Status SparseCSRMatrixToNdarray(
74
+ const std::shared_ptr<SparseCSRMatrix>& sparse_tensor, PyObject* base,
75
+ PyObject** out_data, PyObject** out_indptr, PyObject** out_indices);
76
+
77
+ ARROW_PYTHON_EXPORT Status SparseCSCMatrixToNdarray(
78
+ const std::shared_ptr<SparseCSCMatrix>& sparse_tensor, PyObject* base,
79
+ PyObject** out_data, PyObject** out_indptr, PyObject** out_indices);
80
+
81
+ ARROW_PYTHON_EXPORT Status SparseCSFTensorToNdarray(
82
+ const std::shared_ptr<SparseCSFTensor>& sparse_tensor, PyObject* base,
83
+ PyObject** out_data, PyObject** out_indptr, PyObject** out_indices);
84
+
85
+ ARROW_PYTHON_EXPORT Status NdarraysToSparseCOOTensor(
86
+ MemoryPool* pool, PyObject* data_ao, PyObject* coords_ao,
87
+ const std::vector<int64_t>& shape, const std::vector<std::string>& dim_names,
88
+ std::shared_ptr<SparseCOOTensor>* out);
89
+
90
+ ARROW_PYTHON_EXPORT Status NdarraysToSparseCSRMatrix(
91
+ MemoryPool* pool, PyObject* data_ao, PyObject* indptr_ao, PyObject* indices_ao,
92
+ const std::vector<int64_t>& shape, const std::vector<std::string>& dim_names,
93
+ std::shared_ptr<SparseCSRMatrix>* out);
94
+
95
+ ARROW_PYTHON_EXPORT Status NdarraysToSparseCSCMatrix(
96
+ MemoryPool* pool, PyObject* data_ao, PyObject* indptr_ao, PyObject* indices_ao,
97
+ const std::vector<int64_t>& shape, const std::vector<std::string>& dim_names,
98
+ std::shared_ptr<SparseCSCMatrix>* out);
99
+
100
+ ARROW_PYTHON_EXPORT Status NdarraysToSparseCSFTensor(
101
+ MemoryPool* pool, PyObject* data_ao, PyObject* indptr_ao, PyObject* indices_ao,
102
+ const std::vector<int64_t>& shape, const std::vector<int64_t>& axis_order,
103
+ const std::vector<std::string>& dim_names, std::shared_ptr<SparseCSFTensor>* out);
104
+
105
+ ARROW_PYTHON_EXPORT Status
106
+ TensorToSparseCOOTensor(const std::shared_ptr<Tensor>& tensor,
107
+ std::shared_ptr<SparseCOOTensor>* csparse_tensor);
108
+
109
+ ARROW_PYTHON_EXPORT Status
110
+ TensorToSparseCSRMatrix(const std::shared_ptr<Tensor>& tensor,
111
+ std::shared_ptr<SparseCSRMatrix>* csparse_tensor);
112
+
113
+ ARROW_PYTHON_EXPORT Status
114
+ TensorToSparseCSCMatrix(const std::shared_ptr<Tensor>& tensor,
115
+ std::shared_ptr<SparseCSCMatrix>* csparse_tensor);
116
+
117
+ ARROW_PYTHON_EXPORT Status
118
+ TensorToSparseCSFTensor(const std::shared_ptr<Tensor>& tensor,
119
+ std::shared_ptr<SparseCSFTensor>* csparse_tensor);
120
+
121
+ } // namespace py
122
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/python_to_arrow.h ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Functions for converting between CPython built-in data structures and Arrow
19
+ // data structures
20
+
21
+ #pragma once
22
+
23
+ #include "arrow/python/platform.h"
24
+
25
+ #include <cstdint>
26
+ #include <memory>
27
+
28
+ #include "arrow/python/visibility.h"
29
+ #include "arrow/type.h"
30
+ #include "arrow/util/macros.h"
31
+
32
+ #include "arrow/python/common.h"
33
+
34
+ namespace arrow {
35
+
36
+ class Array;
37
+ class Status;
38
+
39
+ namespace py {
40
+
41
+ struct PyConversionOptions {
42
+ PyConversionOptions() = default;
43
+
44
+ PyConversionOptions(const std::shared_ptr<DataType>& type, int64_t size,
45
+ MemoryPool* pool, bool from_pandas)
46
+ : type(type), size(size), from_pandas(from_pandas) {}
47
+
48
+ // Set to null if to be inferred
49
+ std::shared_ptr<DataType> type;
50
+
51
+ // Default is -1, which indicates the size should the same as the input sequence
52
+ int64_t size = -1;
53
+
54
+ bool from_pandas = false;
55
+
56
+ /// Used to maintain backwards compatibility for
57
+ /// timezone bugs (see ARROW-9528). Should be removed
58
+ /// after Arrow 2.0 release.
59
+ bool ignore_timezone = false;
60
+
61
+ bool strict = false;
62
+ };
63
+
64
+ /// \brief Convert sequence (list, generator, NumPy array with dtype object) of
65
+ /// Python objects.
66
+ /// \param[in] obj the sequence to convert
67
+ /// \param[in] mask a NumPy array of true/false values to indicate whether
68
+ /// values in the sequence are null (true) or not null (false). This parameter
69
+ /// may be null
70
+ /// \param[in] options various conversion options
71
+ /// \param[in] pool MemoryPool to use for allocations
72
+ /// \return Result ChunkedArray
73
+ ARROW_PYTHON_EXPORT
74
+ Result<std::shared_ptr<ChunkedArray>> ConvertPySequence(
75
+ PyObject* obj, PyObject* mask, PyConversionOptions options,
76
+ MemoryPool* pool = default_memory_pool());
77
+
78
+ } // namespace py
79
+
80
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/python/visibility.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #if defined(_WIN32) || defined(__CYGWIN__) // Windows
21
+ #if defined(_MSC_VER)
22
+ #pragma warning(disable : 4251)
23
+ #else
24
+ #pragma GCC diagnostic ignored "-Wattributes"
25
+ #endif
26
+
27
+ #ifdef ARROW_PYTHON_STATIC
28
+ #define ARROW_PYTHON_EXPORT
29
+ #elif defined(ARROW_PYTHON_EXPORTING)
30
+ #define ARROW_PYTHON_EXPORT __declspec(dllexport)
31
+ #else
32
+ #define ARROW_PYTHON_EXPORT __declspec(dllimport)
33
+ #endif
34
+
35
+ #else // Not Windows
36
+ #ifndef ARROW_PYTHON_EXPORT
37
+ #define ARROW_PYTHON_EXPORT __attribute__((visibility("default")))
38
+ #endif
39
+ #endif // Non-Windows
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/algorithm.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/result.h"
21
+
22
+ namespace arrow {
23
+
24
+ template <typename InputIterator, typename OutputIterator, typename UnaryOperation>
25
+ Status MaybeTransform(InputIterator first, InputIterator last, OutputIterator out,
26
+ UnaryOperation unary_op) {
27
+ for (; first != last; ++first, (void)++out) {
28
+ ARROW_ASSIGN_OR_RAISE(*out, unary_op(*first));
29
+ }
30
+ return Status::OK();
31
+ }
32
+
33
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/align_util.h ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+
22
+ #include "arrow/memory_pool.h"
23
+ #include "arrow/type_fwd.h"
24
+ #include "arrow/util/bit_util.h"
25
+
26
+ namespace arrow {
27
+ namespace internal {
28
+
29
+ struct BitmapWordAlignParams {
30
+ int64_t leading_bits;
31
+ int64_t trailing_bits;
32
+ int64_t trailing_bit_offset;
33
+ const uint8_t* aligned_start;
34
+ int64_t aligned_bits;
35
+ int64_t aligned_words;
36
+ };
37
+
38
+ // Compute parameters for accessing a bitmap using aligned word instructions.
39
+ // The returned parameters describe:
40
+ // - a leading area of size `leading_bits` before the aligned words
41
+ // - a word-aligned area of size `aligned_bits`
42
+ // - a trailing area of size `trailing_bits` after the aligned words
43
+ template <uint64_t ALIGN_IN_BYTES>
44
+ inline BitmapWordAlignParams BitmapWordAlign(const uint8_t* data, int64_t bit_offset,
45
+ int64_t length) {
46
+ static_assert(bit_util::IsPowerOf2(ALIGN_IN_BYTES),
47
+ "ALIGN_IN_BYTES should be a positive power of two");
48
+ constexpr uint64_t ALIGN_IN_BITS = ALIGN_IN_BYTES * 8;
49
+
50
+ BitmapWordAlignParams p;
51
+
52
+ // Compute a "bit address" that we can align up to ALIGN_IN_BITS.
53
+ // We don't care about losing the upper bits since we are only interested in the
54
+ // difference between both addresses.
55
+ const uint64_t bit_addr =
56
+ reinterpret_cast<size_t>(data) * 8 + static_cast<uint64_t>(bit_offset);
57
+ const uint64_t aligned_bit_addr = bit_util::RoundUpToPowerOf2(bit_addr, ALIGN_IN_BITS);
58
+
59
+ p.leading_bits = std::min<int64_t>(length, aligned_bit_addr - bit_addr);
60
+ p.aligned_words = (length - p.leading_bits) / ALIGN_IN_BITS;
61
+ p.aligned_bits = p.aligned_words * ALIGN_IN_BITS;
62
+ p.trailing_bits = length - p.leading_bits - p.aligned_bits;
63
+ p.trailing_bit_offset = bit_offset + p.leading_bits + p.aligned_bits;
64
+
65
+ p.aligned_start = data + (bit_offset + p.leading_bits) / 8;
66
+ return p;
67
+ }
68
+ } // namespace internal
69
+
70
+ namespace util {
71
+
72
+ // Functions to check if the provided Arrow object is aligned by the specified alignment
73
+
74
+ /// \brief Special alignment value to use data type-specific alignment
75
+ ///
76
+ /// If this is passed as the `alignment` in one of the CheckAlignment or EnsureAlignment
77
+ /// functions, then the function will ensure each buffer is suitably aligned
78
+ /// for the data type of the array. For example, given an int32 buffer the values
79
+ /// buffer's address must be a multiple of 4. Given a large_string buffer the offsets
80
+ /// buffer's address must be a multiple of 8.
81
+ constexpr int64_t kValueAlignment = -3;
82
+
83
+ /// \brief Calculate if the buffer's address is a multiple of `alignment`
84
+ ///
85
+ /// If `alignment` is less than or equal to 0 then this method will always return true
86
+ /// \param buffer the buffer to check
87
+ /// \param alignment the alignment (in bytes) to check for
88
+ ARROW_EXPORT bool CheckAlignment(const Buffer& buffer, int64_t alignment);
89
+ /// \brief Calculate if all buffers in the array data are aligned
90
+ ///
91
+ /// This will also check the buffers in the dictionary and any children
92
+ /// \param array the array data to check
93
+ /// \param alignment the alignment (in bytes) to check for
94
+ ARROW_EXPORT bool CheckAlignment(const ArrayData& array, int64_t alignment);
95
+ /// \brief Calculate if all buffers in the array are aligned
96
+ ///
97
+ /// This will also check the buffers in the dictionary and any children
98
+ /// \param array the array to check
99
+ /// \param alignment the alignment (in bytes) to check for
100
+ ARROW_EXPORT bool CheckAlignment(const Array& array, int64_t alignment);
101
+
102
+ // Following functions require an additional boolean vector which stores the
103
+ // alignment check bits of the constituent objects.
104
+ // For example, needs_alignment vector for a ChunkedArray will contain the
105
+ // check bits of the constituent Arrays.
106
+ // The boolean vector check was introduced to minimize the repetitive checks
107
+ // of the constituent objects during the EnsureAlignment function where certain
108
+ // objects can be ignored for further checking if we already know that they are
109
+ // completely aligned.
110
+
111
+ /// \brief Calculate which (if any) chunks in a chunked array are unaligned
112
+ /// \param array the array to check
113
+ /// \param alignment the alignment (in bytes) to check for
114
+ /// \param needs_alignment an output vector that will store the results of the check
115
+ /// it must be set to a valid vector. Extra elements will be added to the end
116
+ /// of the vector for each chunk that is checked. `true` will be stored if
117
+ /// the chunk is unaligned.
118
+ /// \param offset the index of the chunk to start checking
119
+ /// \return true if all chunks (starting at `offset`) are aligned, false otherwise
120
+ ARROW_EXPORT bool CheckAlignment(const ChunkedArray& array, int64_t alignment,
121
+ std::vector<bool>* needs_alignment, int offset = 0);
122
+
123
+ /// \brief calculate which (if any) columns in a record batch are unaligned
124
+ /// \param batch the batch to check
125
+ /// \param alignment the alignment (in bytes) to check for
126
+ /// \param needs_alignment an output vector that will store the results of the
127
+ /// check. It must be set to a valid vector. Extra elements will be added
128
+ /// to the end of the vector for each column that is checked. `true` will be
129
+ /// stored if the column is unaligned.
130
+ ARROW_EXPORT bool CheckAlignment(const RecordBatch& batch, int64_t alignment,
131
+ std::vector<bool>* needs_alignment);
132
+
133
+ /// \brief calculate which (if any) columns in a table are unaligned
134
+ /// \param table the table to check
135
+ /// \param alignment the alignment (in bytes) to check for
136
+ /// \param needs_alignment an output vector that will store the results of the
137
+ /// check. It must be set to a valid vector. Extra elements will be added
138
+ /// to the end of the vector for each column that is checked. `true` will be
139
+ /// stored if the column is unaligned.
140
+ ARROW_EXPORT bool CheckAlignment(const Table& table, int64_t alignment,
141
+ std::vector<bool>* needs_alignment);
142
+
143
+ /// \brief return a buffer that has the given alignment and the same data as the input
144
+ /// buffer
145
+ ///
146
+ /// If the input buffer is already aligned then this method will return the input buffer
147
+ /// If the input buffer is not already aligned then this method will allocate a new
148
+ /// buffer. The alignment of the new buffer will have at least
149
+ /// max(kDefaultBufferAlignment, alignment) bytes of alignment.
150
+ ///
151
+ /// \param buffer the buffer to check
152
+ /// \param alignment the alignment (in bytes) to check for
153
+ /// \param memory_pool a memory pool that will be used to allocate a new buffer if the
154
+ /// input buffer is not sufficiently aligned
155
+ ARROW_EXPORT Result<std::shared_ptr<Buffer>> EnsureAlignment(
156
+ std::shared_ptr<Buffer> buffer, int64_t alignment, MemoryPool* memory_pool);
157
+
158
+ /// \brief return an array data where all buffers are aligned by the given alignment
159
+ ///
160
+ /// If any input buffer is already aligned then this method will reuse that same input
161
+ /// buffer.
162
+ ///
163
+ /// \param array_data the array data to check
164
+ /// \param alignment the alignment (in bytes) to check for
165
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
166
+ /// input buffer is not sufficiently aligned
167
+ ARROW_EXPORT Result<std::shared_ptr<ArrayData>> EnsureAlignment(
168
+ std::shared_ptr<ArrayData> array_data, int64_t alignment, MemoryPool* memory_pool);
169
+
170
+ /// \brief return an array where all buffers are aligned by the given alignment
171
+ ///
172
+ /// If any input buffer is already aligned then this method will reuse that same input
173
+ /// buffer.
174
+ ///
175
+ /// \param array the array to check
176
+ /// \param alignment the alignment (in bytes) to check for
177
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
178
+ /// input buffer is not sufficiently aligned
179
+ ARROW_EXPORT Result<std::shared_ptr<Array>> EnsureAlignment(std::shared_ptr<Array> array,
180
+ int64_t alignment,
181
+ MemoryPool* memory_pool);
182
+
183
+ /// \brief return a chunked array where all buffers are aligned by the given alignment
184
+ ///
185
+ /// If any input buffer is already aligned then this method will reuse that same input
186
+ /// buffer.
187
+ ///
188
+ /// \param array the chunked array to check
189
+ /// \param alignment the alignment (in bytes) to check for
190
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
191
+ /// input buffer is not sufficiently aligned
192
+ ARROW_EXPORT Result<std::shared_ptr<ChunkedArray>> EnsureAlignment(
193
+ std::shared_ptr<ChunkedArray> array, int64_t alignment, MemoryPool* memory_pool);
194
+
195
+ /// \brief return a record batch where all buffers are aligned by the given alignment
196
+ ///
197
+ /// If any input buffer is already aligned then this method will reuse that same input
198
+ /// buffer.
199
+ ///
200
+ /// \param batch the batch to check
201
+ /// \param alignment the alignment (in bytes) to check for
202
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
203
+ /// input buffer is not sufficiently aligned
204
+ ARROW_EXPORT Result<std::shared_ptr<RecordBatch>> EnsureAlignment(
205
+ std::shared_ptr<RecordBatch> batch, int64_t alignment, MemoryPool* memory_pool);
206
+
207
+ /// \brief return a table where all buffers are aligned by the given alignment
208
+ ///
209
+ /// If any input buffer is already aligned then this method will reuse that same input
210
+ /// buffer.
211
+ ///
212
+ /// \param table the table to check
213
+ /// \param alignment the alignment (in bytes) to check for
214
+ /// \param memory_pool a memory pool that will be used to allocate new buffers if any
215
+ /// input buffer is not sufficiently aligned
216
+ ARROW_EXPORT Result<std::shared_ptr<Table>> EnsureAlignment(std::shared_ptr<Table> table,
217
+ int64_t alignment,
218
+ MemoryPool* memory_pool);
219
+
220
+ } // namespace util
221
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/async_generator_fwd.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <functional>
21
+
22
+ #include "arrow/type_fwd.h"
23
+
24
+ namespace arrow {
25
+
26
+ template <typename T>
27
+ using AsyncGenerator = std::function<Future<T>()>;
28
+
29
+ template <typename T, typename V>
30
+ class MappingGenerator;
31
+
32
+ template <typename T, typename ComesAfter, typename IsNext>
33
+ class SequencingGenerator;
34
+
35
+ template <typename T, typename V>
36
+ class TransformingGenerator;
37
+
38
+ template <typename T>
39
+ class SerialReadaheadGenerator;
40
+
41
+ template <typename T>
42
+ class ReadaheadGenerator;
43
+
44
+ template <typename T>
45
+ class PushGenerator;
46
+
47
+ template <typename T>
48
+ class MergedGenerator;
49
+
50
+ template <typename T>
51
+ struct Enumerated;
52
+
53
+ template <typename T>
54
+ class EnumeratingGenerator;
55
+
56
+ template <typename T>
57
+ class TransferringGenerator;
58
+
59
+ template <typename T>
60
+ class BackgroundGenerator;
61
+
62
+ template <typename T>
63
+ class GeneratorIterator;
64
+
65
+ template <typename T>
66
+ struct CancellableGenerator;
67
+
68
+ template <typename T>
69
+ class DefaultIfEmptyGenerator;
70
+
71
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/base64.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string>
21
+ #include <string_view>
22
+
23
+ #include "arrow/util/visibility.h"
24
+
25
+ namespace arrow {
26
+ namespace util {
27
+
28
+ ARROW_EXPORT
29
+ std::string base64_encode(std::string_view s);
30
+
31
+ ARROW_EXPORT
32
+ std::string base64_decode(std::string_view s);
33
+
34
+ } // namespace util
35
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/benchmark_util.h ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include <algorithm>
19
+ #include <cstdint>
20
+ #include <string>
21
+
22
+ #include "benchmark/benchmark.h"
23
+
24
+ #include "arrow/memory_pool.h"
25
+ #include "arrow/type_fwd.h"
26
+ #include "arrow/util/cpu_info.h"
27
+ #include "arrow/util/logging.h" // IWYU pragma: keep
28
+
29
+ namespace arrow {
30
+
31
+ // Benchmark changed its parameter type between releases from
32
+ // int to int64_t. As it doesn't have version macros, we need
33
+ // to apply C++ template magic.
34
+
35
+ template <typename Func>
36
+ struct BenchmarkArgsType;
37
+
38
+ // Pattern matching that extracts the vector element type of Benchmark::Args()
39
+ template <typename Values>
40
+ struct BenchmarkArgsType<benchmark::internal::Benchmark* (
41
+ benchmark::internal::Benchmark::*)(const std::vector<Values>&)> {
42
+ using type = Values;
43
+ };
44
+
45
+ using ArgsType =
46
+ typename BenchmarkArgsType<decltype(&benchmark::internal::Benchmark::Args)>::type;
47
+
48
+ using internal::CpuInfo;
49
+
50
+ static const CpuInfo* cpu_info = CpuInfo::GetInstance();
51
+
52
+ static const int64_t kL1Size = cpu_info->CacheSize(CpuInfo::CacheLevel::L1);
53
+ static const int64_t kL2Size = cpu_info->CacheSize(CpuInfo::CacheLevel::L2);
54
+ static const int64_t kL3Size = cpu_info->CacheSize(CpuInfo::CacheLevel::L3);
55
+ static const int64_t kCantFitInL3Size = kL3Size * 4;
56
+ static const std::vector<int64_t> kMemorySizes = {kL1Size, kL2Size, kL3Size,
57
+ kCantFitInL3Size};
58
+ // 0 is treated as "no nulls"
59
+ static const std::vector<ArgsType> kInverseNullProportions = {10000, 100, 10, 2, 1, 0};
60
+
61
+ struct GenericItemsArgs {
62
+ // number of items processed per iteration
63
+ const int64_t size;
64
+
65
+ // proportion of nulls in generated arrays
66
+ double null_proportion;
67
+
68
+ explicit GenericItemsArgs(benchmark::State& state)
69
+ : size(state.range(0)), state_(state) {
70
+ if (state.range(1) == 0) {
71
+ this->null_proportion = 0.0;
72
+ } else {
73
+ this->null_proportion = std::min(1., 1. / static_cast<double>(state.range(1)));
74
+ }
75
+ }
76
+
77
+ ~GenericItemsArgs() {
78
+ state_.counters["size"] = static_cast<double>(size);
79
+ state_.counters["null_percent"] = null_proportion * 100;
80
+ state_.SetItemsProcessed(state_.iterations() * size);
81
+ }
82
+
83
+ private:
84
+ benchmark::State& state_;
85
+ };
86
+
87
+ void BenchmarkSetArgsWithSizes(benchmark::internal::Benchmark* bench,
88
+ const std::vector<int64_t>& sizes = kMemorySizes) {
89
+ bench->Unit(benchmark::kMicrosecond);
90
+
91
+ for (const auto size : sizes) {
92
+ for (const auto inverse_null_proportion : kInverseNullProportions) {
93
+ bench->Args({static_cast<ArgsType>(size), inverse_null_proportion});
94
+ }
95
+ }
96
+ }
97
+
98
+ void BenchmarkSetArgs(benchmark::internal::Benchmark* bench) {
99
+ BenchmarkSetArgsWithSizes(bench, kMemorySizes);
100
+ }
101
+
102
+ void RegressionSetArgs(benchmark::internal::Benchmark* bench) {
103
+ // Regression do not need to account for cache hierarchy, thus optimize for
104
+ // the best case.
105
+ BenchmarkSetArgsWithSizes(bench, {kL1Size});
106
+ }
107
+
108
+ // RAII struct to handle some of the boilerplate in regression benchmarks
109
+ struct RegressionArgs {
110
+ // size of memory tested (per iteration) in bytes
111
+ int64_t size;
112
+
113
+ // proportion of nulls in generated arrays
114
+ double null_proportion;
115
+
116
+ // If size_is_bytes is true, then it's a number of bytes, otherwise it's the
117
+ // number of items processed (for reporting)
118
+ explicit RegressionArgs(benchmark::State& state, bool size_is_bytes = true)
119
+ : size(state.range(0)), state_(state), size_is_bytes_(size_is_bytes) {
120
+ if (state.range(1) == 0) {
121
+ this->null_proportion = 0.0;
122
+ } else {
123
+ this->null_proportion = std::min(1., 1. / static_cast<double>(state.range(1)));
124
+ }
125
+ }
126
+
127
+ ~RegressionArgs() {
128
+ state_.counters["size"] = static_cast<double>(size);
129
+ state_.counters["null_percent"] = null_proportion * 100;
130
+ if (size_is_bytes_) {
131
+ state_.SetBytesProcessed(state_.iterations() * size);
132
+ } else {
133
+ state_.SetItemsProcessed(state_.iterations() * size);
134
+ }
135
+ }
136
+
137
+ private:
138
+ benchmark::State& state_;
139
+ bool size_is_bytes_;
140
+ };
141
+
142
+ class MemoryPoolMemoryManager : public benchmark::MemoryManager {
143
+ void Start() override {
144
+ memory_pool = std::make_shared<ProxyMemoryPool>(default_memory_pool());
145
+
146
+ MemoryPool* default_pool = default_memory_pool();
147
+ global_allocations_start = default_pool->num_allocations();
148
+ }
149
+
150
+ // BENCHMARK_DONT_OPTIMIZE is used here to detect Google Benchmark
151
+ // 1.8.0. We can remove this Stop(Result*) when we require Google
152
+ // Benchmark 1.8.0 or later.
153
+ #ifndef BENCHMARK_DONT_OPTIMIZE
154
+ void Stop(Result* result) override { Stop(*result); }
155
+ #endif
156
+
157
+ void Stop(benchmark::MemoryManager::Result& result) override {
158
+ // If num_allocations is still zero, we assume that the memory pool wasn't passed down
159
+ // so we should record them.
160
+ MemoryPool* default_pool = default_memory_pool();
161
+ int64_t new_default_allocations =
162
+ default_pool->num_allocations() - global_allocations_start;
163
+
164
+ // Only record metrics if (1) there were allocations and (2) we
165
+ // recorded at least one.
166
+ if (new_default_allocations > 0 && memory_pool->num_allocations() > 0) {
167
+ if (new_default_allocations > memory_pool->num_allocations()) {
168
+ // If we missed some, let's report that.
169
+ int64_t missed_allocations =
170
+ new_default_allocations - memory_pool->num_allocations();
171
+ ARROW_LOG(WARNING) << "BenchmarkMemoryTracker recorded some allocations "
172
+ << "for a benchmark, but missed " << missed_allocations
173
+ << " allocations.\n";
174
+ }
175
+
176
+ result.max_bytes_used = memory_pool->max_memory();
177
+ result.total_allocated_bytes = memory_pool->total_bytes_allocated();
178
+ result.num_allocs = memory_pool->num_allocations();
179
+ }
180
+ }
181
+
182
+ public:
183
+ std::shared_ptr<::arrow::ProxyMemoryPool> memory_pool;
184
+
185
+ protected:
186
+ int64_t global_allocations_start;
187
+ };
188
+
189
+ /// \brief Track memory pool allocations in benchmarks.
190
+ ///
191
+ /// Instantiate as a global variable to register the hooks into Google Benchmark
192
+ /// to collect memory metrics. Before each benchmark, a new ProxyMemoryPool is
193
+ /// created. It can then be accessed with memory_pool(). Once the benchmark is
194
+ /// complete, the hook will record the maximum memory used, the total bytes
195
+ /// allocated, and the total number of allocations. If no allocations were seen,
196
+ /// (for example, if you forgot to pass down the memory pool), then these metrics
197
+ /// will not be saved.
198
+ ///
199
+ /// Since this is used as one global variable, this will not work if multiple
200
+ /// benchmarks are run concurrently or for multi-threaded benchmarks (ones
201
+ /// that use `->ThreadRange(...)`).
202
+ class BenchmarkMemoryTracker {
203
+ public:
204
+ BenchmarkMemoryTracker() : manager_() { ::benchmark::RegisterMemoryManager(&manager_); }
205
+ ::arrow::MemoryPool* memory_pool() const { return manager_.memory_pool.get(); }
206
+
207
+ protected:
208
+ ::arrow::MemoryPoolMemoryManager manager_;
209
+ };
210
+
211
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/binary_view_util.h ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <string_view>
21
+ #include <utility>
22
+
23
+ #include "arrow/type.h"
24
+ #include "arrow/util/span.h"
25
+
26
+ namespace arrow::util {
27
+
28
+ inline BinaryViewType::c_type ToInlineBinaryView(const void* data, int32_t size) {
29
+ // Small string: inlined. Bytes beyond size are zeroed
30
+ BinaryViewType::c_type out;
31
+ out.inlined = {size, {}};
32
+ memcpy(&out.inlined.data, data, size);
33
+ return out;
34
+ }
35
+
36
+ inline BinaryViewType::c_type ToInlineBinaryView(std::string_view v) {
37
+ return ToInlineBinaryView(v.data(), static_cast<int32_t>(v.size()));
38
+ }
39
+
40
+ inline BinaryViewType::c_type ToBinaryView(const void* data, int32_t size,
41
+ int32_t buffer_index, int32_t offset) {
42
+ if (size <= BinaryViewType::kInlineSize) {
43
+ return ToInlineBinaryView(data, size);
44
+ }
45
+
46
+ // Large string: store index/offset.
47
+ BinaryViewType::c_type out;
48
+ out.ref = {size, {}, buffer_index, offset};
49
+ memcpy(&out.ref.prefix, data, sizeof(out.ref.prefix));
50
+ return out;
51
+ }
52
+
53
+ inline BinaryViewType::c_type ToBinaryView(std::string_view v, int32_t buffer_index,
54
+ int32_t offset) {
55
+ return ToBinaryView(v.data(), static_cast<int32_t>(v.size()), buffer_index, offset);
56
+ }
57
+
58
+ template <typename BufferPtr>
59
+ std::string_view FromBinaryView(const BinaryViewType::c_type& v,
60
+ const BufferPtr* data_buffers) {
61
+ auto* data = v.is_inline() ? v.inlined.data.data()
62
+ : data_buffers[v.ref.buffer_index]->data() + v.ref.offset;
63
+ return {reinterpret_cast<const char*>(data), static_cast<size_t>(v.size())};
64
+ }
65
+ template <typename BufferPtr>
66
+ std::string_view FromBinaryView(BinaryViewType::c_type&&, const BufferPtr*) = delete;
67
+
68
+ template <typename BufferPtr>
69
+ bool EqualBinaryView(BinaryViewType::c_type l, BinaryViewType::c_type r,
70
+ const BufferPtr* l_buffers, const BufferPtr* r_buffers) {
71
+ int64_t l_size_and_prefix, r_size_and_prefix;
72
+ memcpy(&l_size_and_prefix, &l, sizeof(l_size_and_prefix));
73
+ memcpy(&r_size_and_prefix, &r, sizeof(r_size_and_prefix));
74
+
75
+ if (l_size_and_prefix != r_size_and_prefix) return false;
76
+
77
+ if (l.is_inline()) {
78
+ // The columnar spec mandates that the inlined part be zero-padded, so we can compare
79
+ // a word at a time regardless of the exact size.
80
+ int64_t l_inlined, r_inlined;
81
+ memcpy(&l_inlined, l.inline_data() + BinaryViewType::kPrefixSize, sizeof(l_inlined));
82
+ memcpy(&r_inlined, r.inline_data() + BinaryViewType::kPrefixSize, sizeof(r_inlined));
83
+ return l_inlined == r_inlined;
84
+ }
85
+
86
+ // Sizes are equal and this is not inline, therefore both are out
87
+ // of line and have kPrefixSize first in common.
88
+ const uint8_t* l_data = l_buffers[l.ref.buffer_index]->data() + l.ref.offset;
89
+ const uint8_t* r_data = r_buffers[r.ref.buffer_index]->data() + r.ref.offset;
90
+ return memcmp(l_data + BinaryViewType::kPrefixSize,
91
+ r_data + BinaryViewType::kPrefixSize,
92
+ l.size() - BinaryViewType::kPrefixSize) == 0;
93
+ }
94
+
95
+ } // namespace arrow::util
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_run_reader.h ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cassert>
21
+ #include <cstdint>
22
+ #include <cstring>
23
+ #include <string>
24
+
25
+ #include "arrow/util/bit_util.h"
26
+ #include "arrow/util/bitmap_reader.h"
27
+ #include "arrow/util/endian.h"
28
+ #include "arrow/util/macros.h"
29
+ #include "arrow/util/visibility.h"
30
+
31
+ namespace arrow {
32
+ namespace internal {
33
+
34
+ struct BitRun {
35
+ int64_t length;
36
+ // Whether bits are set at this point.
37
+ bool set;
38
+
39
+ std::string ToString() const {
40
+ return std::string("{Length: ") + std::to_string(length) +
41
+ ", set=" + std::to_string(set) + "}";
42
+ }
43
+ };
44
+
45
+ inline bool operator==(const BitRun& lhs, const BitRun& rhs) {
46
+ return lhs.length == rhs.length && lhs.set == rhs.set;
47
+ }
48
+
49
+ inline bool operator!=(const BitRun& lhs, const BitRun& rhs) {
50
+ return lhs.length != rhs.length || lhs.set != rhs.set;
51
+ }
52
+
53
+ class BitRunReaderLinear {
54
+ public:
55
+ BitRunReaderLinear(const uint8_t* bitmap, int64_t start_offset, int64_t length)
56
+ : reader_(bitmap, start_offset, length) {}
57
+
58
+ BitRun NextRun() {
59
+ BitRun rl = {/*length=*/0, reader_.IsSet()};
60
+ // Advance while the values are equal and not at the end of list.
61
+ while (reader_.position() < reader_.length() && reader_.IsSet() == rl.set) {
62
+ rl.length++;
63
+ reader_.Next();
64
+ }
65
+ return rl;
66
+ }
67
+
68
+ private:
69
+ BitmapReader reader_;
70
+ };
71
+
72
+ #if ARROW_LITTLE_ENDIAN
73
+ /// A convenience class for counting the number of contiguous set/unset bits
74
+ /// in a bitmap.
75
+ class ARROW_EXPORT BitRunReader {
76
+ public:
77
+ /// \brief Constructs new BitRunReader.
78
+ ///
79
+ /// \param[in] bitmap source data
80
+ /// \param[in] start_offset bit offset into the source data
81
+ /// \param[in] length number of bits to copy
82
+ BitRunReader(const uint8_t* bitmap, int64_t start_offset, int64_t length);
83
+
84
+ /// Returns a new BitRun containing the number of contiguous
85
+ /// bits with the same value. length == 0 indicates the
86
+ /// end of the bitmap.
87
+ BitRun NextRun() {
88
+ if (ARROW_PREDICT_FALSE(position_ >= length_)) {
89
+ return {/*length=*/0, false};
90
+ }
91
+ // This implementation relies on a efficient implementations of
92
+ // CountTrailingZeros and assumes that runs are more often then
93
+ // not. The logic is to incrementally find the next bit change
94
+ // from the current position. This is done by zeroing all
95
+ // bits in word_ up to position_ and using the TrailingZeroCount
96
+ // to find the index of the next set bit.
97
+
98
+ // The runs alternate on each call, so flip the bit.
99
+ current_run_bit_set_ = !current_run_bit_set_;
100
+
101
+ int64_t start_position = position_;
102
+ int64_t start_bit_offset = start_position & 63;
103
+ // Invert the word for proper use of CountTrailingZeros and
104
+ // clear bits so CountTrailingZeros can do it magic.
105
+ word_ = ~word_ & ~bit_util::LeastSignificantBitMask(start_bit_offset);
106
+
107
+ // Go forward until the next change from unset to set.
108
+ int64_t new_bits = bit_util::CountTrailingZeros(word_) - start_bit_offset;
109
+ position_ += new_bits;
110
+
111
+ if (ARROW_PREDICT_FALSE(bit_util::IsMultipleOf64(position_)) &&
112
+ ARROW_PREDICT_TRUE(position_ < length_)) {
113
+ // Continue extending position while we can advance an entire word.
114
+ // (updates position_ accordingly).
115
+ AdvanceUntilChange();
116
+ }
117
+
118
+ return {/*length=*/position_ - start_position, current_run_bit_set_};
119
+ }
120
+
121
+ private:
122
+ void AdvanceUntilChange() {
123
+ int64_t new_bits = 0;
124
+ do {
125
+ // Advance the position of the bitmap for loading.
126
+ bitmap_ += sizeof(uint64_t);
127
+ LoadNextWord();
128
+ new_bits = bit_util::CountTrailingZeros(word_);
129
+ // Continue calculating run length.
130
+ position_ += new_bits;
131
+ } while (ARROW_PREDICT_FALSE(bit_util::IsMultipleOf64(position_)) &&
132
+ ARROW_PREDICT_TRUE(position_ < length_) && new_bits > 0);
133
+ }
134
+
135
+ void LoadNextWord() { return LoadWord(length_ - position_); }
136
+
137
+ // Helper method for Loading the next word.
138
+ void LoadWord(int64_t bits_remaining) {
139
+ word_ = 0;
140
+ // we need at least an extra byte in this case.
141
+ if (ARROW_PREDICT_TRUE(bits_remaining >= 64)) {
142
+ std::memcpy(&word_, bitmap_, 8);
143
+ } else {
144
+ int64_t bytes_to_load = bit_util::BytesForBits(bits_remaining);
145
+ auto word_ptr = reinterpret_cast<uint8_t*>(&word_);
146
+ std::memcpy(word_ptr, bitmap_, bytes_to_load);
147
+ // Ensure stoppage at last bit in bitmap by reversing the next higher
148
+ // order bit.
149
+ bit_util::SetBitTo(word_ptr, bits_remaining,
150
+ !bit_util::GetBit(word_ptr, bits_remaining - 1));
151
+ }
152
+
153
+ // Two cases:
154
+ // 1. For unset, CountTrailingZeros works naturally so we don't
155
+ // invert the word.
156
+ // 2. Otherwise invert so we can use CountTrailingZeros.
157
+ if (current_run_bit_set_) {
158
+ word_ = ~word_;
159
+ }
160
+ }
161
+ const uint8_t* bitmap_;
162
+ int64_t position_;
163
+ int64_t length_;
164
+ uint64_t word_;
165
+ bool current_run_bit_set_;
166
+ };
167
+ #else
168
+ using BitRunReader = BitRunReaderLinear;
169
+ #endif
170
+
171
+ struct SetBitRun {
172
+ int64_t position;
173
+ int64_t length;
174
+
175
+ bool AtEnd() const { return length == 0; }
176
+
177
+ std::string ToString() const {
178
+ return std::string("{pos=") + std::to_string(position) +
179
+ ", len=" + std::to_string(length) + "}";
180
+ }
181
+
182
+ bool operator==(const SetBitRun& other) const {
183
+ return position == other.position && length == other.length;
184
+ }
185
+ bool operator!=(const SetBitRun& other) const {
186
+ return position != other.position || length != other.length;
187
+ }
188
+ };
189
+
190
+ template <bool Reverse>
191
+ class BaseSetBitRunReader {
192
+ public:
193
+ /// \brief Constructs new SetBitRunReader.
194
+ ///
195
+ /// \param[in] bitmap source data
196
+ /// \param[in] start_offset bit offset into the source data
197
+ /// \param[in] length number of bits to copy
198
+ ARROW_NOINLINE
199
+ BaseSetBitRunReader(const uint8_t* bitmap, int64_t start_offset, int64_t length)
200
+ : bitmap_(util::MakeNonNull(bitmap)),
201
+ length_(length),
202
+ remaining_(length_),
203
+ current_word_(0),
204
+ current_num_bits_(0) {
205
+ if (Reverse) {
206
+ bitmap_ += (start_offset + length) / 8;
207
+ const int8_t end_bit_offset = static_cast<int8_t>((start_offset + length) % 8);
208
+ if (length > 0 && end_bit_offset) {
209
+ // Get LSBs from last byte
210
+ ++bitmap_;
211
+ current_num_bits_ =
212
+ std::min(static_cast<int32_t>(length), static_cast<int32_t>(end_bit_offset));
213
+ current_word_ = LoadPartialWord(8 - end_bit_offset, current_num_bits_);
214
+ }
215
+ } else {
216
+ bitmap_ += start_offset / 8;
217
+ const int8_t bit_offset = static_cast<int8_t>(start_offset % 8);
218
+ if (length > 0 && bit_offset) {
219
+ // Get MSBs from first byte
220
+ current_num_bits_ =
221
+ std::min(static_cast<int32_t>(length), static_cast<int32_t>(8 - bit_offset));
222
+ current_word_ = LoadPartialWord(bit_offset, current_num_bits_);
223
+ }
224
+ }
225
+ }
226
+
227
+ ARROW_NOINLINE
228
+ SetBitRun NextRun() {
229
+ int64_t pos = 0;
230
+ int64_t len = 0;
231
+ if (current_num_bits_) {
232
+ const auto run = FindCurrentRun();
233
+ assert(remaining_ >= 0);
234
+ if (run.length && current_num_bits_) {
235
+ // The run ends in current_word_
236
+ return AdjustRun(run);
237
+ }
238
+ pos = run.position;
239
+ len = run.length;
240
+ }
241
+ if (!len) {
242
+ // We didn't get any ones in current_word_, so we can skip any zeros
243
+ // in the following words
244
+ SkipNextZeros();
245
+ if (remaining_ == 0) {
246
+ return {0, 0};
247
+ }
248
+ assert(current_num_bits_);
249
+ pos = position();
250
+ } else if (!current_num_bits_) {
251
+ if (ARROW_PREDICT_TRUE(remaining_ >= 64)) {
252
+ current_word_ = LoadFullWord();
253
+ current_num_bits_ = 64;
254
+ } else if (remaining_ > 0) {
255
+ current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_);
256
+ current_num_bits_ = static_cast<int32_t>(remaining_);
257
+ } else {
258
+ // No bits remaining, perhaps we found a run?
259
+ return AdjustRun({pos, len});
260
+ }
261
+ // If current word starts with a zero, we got a full run
262
+ if (!(current_word_ & kFirstBit)) {
263
+ return AdjustRun({pos, len});
264
+ }
265
+ }
266
+ // Current word should now start with a set bit
267
+ len += CountNextOnes();
268
+ return AdjustRun({pos, len});
269
+ }
270
+
271
+ protected:
272
+ int64_t position() const {
273
+ if (Reverse) {
274
+ return remaining_;
275
+ } else {
276
+ return length_ - remaining_;
277
+ }
278
+ }
279
+
280
+ SetBitRun AdjustRun(SetBitRun run) {
281
+ if (Reverse) {
282
+ assert(run.position >= run.length);
283
+ run.position -= run.length;
284
+ }
285
+ return run;
286
+ }
287
+
288
+ uint64_t LoadFullWord() {
289
+ uint64_t word;
290
+ if (Reverse) {
291
+ bitmap_ -= 8;
292
+ }
293
+ memcpy(&word, bitmap_, 8);
294
+ if (!Reverse) {
295
+ bitmap_ += 8;
296
+ }
297
+ return bit_util::ToLittleEndian(word);
298
+ }
299
+
300
+ uint64_t LoadPartialWord(int8_t bit_offset, int64_t num_bits) {
301
+ assert(num_bits > 0);
302
+ uint64_t word = 0;
303
+ const int64_t num_bytes = bit_util::BytesForBits(num_bits);
304
+ if (Reverse) {
305
+ // Read in the most significant bytes of the word
306
+ bitmap_ -= num_bytes;
307
+ memcpy(reinterpret_cast<char*>(&word) + 8 - num_bytes, bitmap_, num_bytes);
308
+ // XXX MostSignificantBitmask
309
+ return (bit_util::ToLittleEndian(word) << bit_offset) &
310
+ ~bit_util::LeastSignificantBitMask(64 - num_bits);
311
+ } else {
312
+ memcpy(&word, bitmap_, num_bytes);
313
+ bitmap_ += num_bytes;
314
+ return (bit_util::ToLittleEndian(word) >> bit_offset) &
315
+ bit_util::LeastSignificantBitMask(num_bits);
316
+ }
317
+ }
318
+
319
+ void SkipNextZeros() {
320
+ assert(current_num_bits_ == 0);
321
+ while (ARROW_PREDICT_TRUE(remaining_ >= 64)) {
322
+ current_word_ = LoadFullWord();
323
+ const auto num_zeros = CountFirstZeros(current_word_);
324
+ if (num_zeros < 64) {
325
+ // Run of zeros ends here
326
+ current_word_ = ConsumeBits(current_word_, num_zeros);
327
+ current_num_bits_ = 64 - num_zeros;
328
+ remaining_ -= num_zeros;
329
+ assert(remaining_ >= 0);
330
+ assert(current_num_bits_ >= 0);
331
+ return;
332
+ }
333
+ remaining_ -= 64;
334
+ }
335
+ // Run of zeros continues in last bitmap word
336
+ if (remaining_ > 0) {
337
+ current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_);
338
+ current_num_bits_ = static_cast<int32_t>(remaining_);
339
+ const auto num_zeros =
340
+ std::min<int32_t>(current_num_bits_, CountFirstZeros(current_word_));
341
+ current_word_ = ConsumeBits(current_word_, num_zeros);
342
+ current_num_bits_ -= num_zeros;
343
+ remaining_ -= num_zeros;
344
+ assert(remaining_ >= 0);
345
+ assert(current_num_bits_ >= 0);
346
+ }
347
+ }
348
+
349
+ int64_t CountNextOnes() {
350
+ assert(current_word_ & kFirstBit);
351
+
352
+ int64_t len;
353
+ if (~current_word_) {
354
+ const auto num_ones = CountFirstZeros(~current_word_);
355
+ assert(num_ones <= current_num_bits_);
356
+ assert(num_ones <= remaining_);
357
+ remaining_ -= num_ones;
358
+ current_word_ = ConsumeBits(current_word_, num_ones);
359
+ current_num_bits_ -= num_ones;
360
+ if (current_num_bits_) {
361
+ // Run of ones ends here
362
+ return num_ones;
363
+ }
364
+ len = num_ones;
365
+ } else {
366
+ // current_word_ is all ones
367
+ remaining_ -= 64;
368
+ current_num_bits_ = 0;
369
+ len = 64;
370
+ }
371
+
372
+ while (ARROW_PREDICT_TRUE(remaining_ >= 64)) {
373
+ current_word_ = LoadFullWord();
374
+ const auto num_ones = CountFirstZeros(~current_word_);
375
+ len += num_ones;
376
+ remaining_ -= num_ones;
377
+ if (num_ones < 64) {
378
+ // Run of ones ends here
379
+ current_word_ = ConsumeBits(current_word_, num_ones);
380
+ current_num_bits_ = 64 - num_ones;
381
+ return len;
382
+ }
383
+ }
384
+ // Run of ones continues in last bitmap word
385
+ if (remaining_ > 0) {
386
+ current_word_ = LoadPartialWord(/*bit_offset=*/0, remaining_);
387
+ current_num_bits_ = static_cast<int32_t>(remaining_);
388
+ const auto num_ones = CountFirstZeros(~current_word_);
389
+ assert(num_ones <= current_num_bits_);
390
+ assert(num_ones <= remaining_);
391
+ current_word_ = ConsumeBits(current_word_, num_ones);
392
+ current_num_bits_ -= num_ones;
393
+ remaining_ -= num_ones;
394
+ len += num_ones;
395
+ }
396
+ return len;
397
+ }
398
+
399
+ SetBitRun FindCurrentRun() {
400
+ // Skip any pending zeros
401
+ const auto num_zeros = CountFirstZeros(current_word_);
402
+ if (num_zeros >= current_num_bits_) {
403
+ remaining_ -= current_num_bits_;
404
+ current_word_ = 0;
405
+ current_num_bits_ = 0;
406
+ return {0, 0};
407
+ }
408
+ assert(num_zeros <= remaining_);
409
+ current_word_ = ConsumeBits(current_word_, num_zeros);
410
+ current_num_bits_ -= num_zeros;
411
+ remaining_ -= num_zeros;
412
+ const int64_t pos = position();
413
+ // Count any ones
414
+ const auto num_ones = CountFirstZeros(~current_word_);
415
+ assert(num_ones <= current_num_bits_);
416
+ assert(num_ones <= remaining_);
417
+ current_word_ = ConsumeBits(current_word_, num_ones);
418
+ current_num_bits_ -= num_ones;
419
+ remaining_ -= num_ones;
420
+ return {pos, num_ones};
421
+ }
422
+
423
+ inline int CountFirstZeros(uint64_t word);
424
+ inline uint64_t ConsumeBits(uint64_t word, int32_t num_bits);
425
+
426
+ const uint8_t* bitmap_;
427
+ const int64_t length_;
428
+ int64_t remaining_;
429
+ uint64_t current_word_;
430
+ int32_t current_num_bits_;
431
+
432
+ static constexpr uint64_t kFirstBit = Reverse ? 0x8000000000000000ULL : 1;
433
+ };
434
+
435
+ template <>
436
+ inline int BaseSetBitRunReader<false>::CountFirstZeros(uint64_t word) {
437
+ return bit_util::CountTrailingZeros(word);
438
+ }
439
+
440
+ template <>
441
+ inline int BaseSetBitRunReader<true>::CountFirstZeros(uint64_t word) {
442
+ return bit_util::CountLeadingZeros(word);
443
+ }
444
+
445
+ template <>
446
+ inline uint64_t BaseSetBitRunReader<false>::ConsumeBits(uint64_t word, int32_t num_bits) {
447
+ return word >> num_bits;
448
+ }
449
+
450
+ template <>
451
+ inline uint64_t BaseSetBitRunReader<true>::ConsumeBits(uint64_t word, int32_t num_bits) {
452
+ return word << num_bits;
453
+ }
454
+
455
+ using SetBitRunReader = BaseSetBitRunReader</*Reverse=*/false>;
456
+ using ReverseSetBitRunReader = BaseSetBitRunReader</*Reverse=*/true>;
457
+
458
+ // Functional-style bit run visitors.
459
+
460
+ // XXX: Try to make this function small so the compiler can inline and optimize
461
+ // the `visit` function, which is normally a hot loop with vectorizable code.
462
+ // - don't inline SetBitRunReader constructor, it doesn't hurt performance
463
+ // - un-inline NextRun hurts 'many null' cases a bit, but improves normal cases
464
+ template <typename Visit>
465
+ inline Status VisitSetBitRuns(const uint8_t* bitmap, int64_t offset, int64_t length,
466
+ Visit&& visit) {
467
+ if (bitmap == NULLPTR) {
468
+ // Assuming all set (as in a null bitmap)
469
+ return visit(static_cast<int64_t>(0), static_cast<int64_t>(length));
470
+ }
471
+ SetBitRunReader reader(bitmap, offset, length);
472
+ while (true) {
473
+ const auto run = reader.NextRun();
474
+ if (run.length == 0) {
475
+ break;
476
+ }
477
+ ARROW_RETURN_NOT_OK(visit(run.position, run.length));
478
+ }
479
+ return Status::OK();
480
+ }
481
+
482
+ template <typename Visit>
483
+ inline void VisitSetBitRunsVoid(const uint8_t* bitmap, int64_t offset, int64_t length,
484
+ Visit&& visit) {
485
+ if (bitmap == NULLPTR) {
486
+ // Assuming all set (as in a null bitmap)
487
+ visit(static_cast<int64_t>(0), static_cast<int64_t>(length));
488
+ return;
489
+ }
490
+ SetBitRunReader reader(bitmap, offset, length);
491
+ while (true) {
492
+ const auto run = reader.NextRun();
493
+ if (run.length == 0) {
494
+ break;
495
+ }
496
+ visit(run.position, run.length);
497
+ }
498
+ }
499
+
500
+ template <typename Visit>
501
+ inline Status VisitSetBitRuns(const std::shared_ptr<Buffer>& bitmap, int64_t offset,
502
+ int64_t length, Visit&& visit) {
503
+ return VisitSetBitRuns(bitmap ? bitmap->data() : NULLPTR, offset, length,
504
+ std::forward<Visit>(visit));
505
+ }
506
+
507
+ template <typename Visit>
508
+ inline void VisitSetBitRunsVoid(const std::shared_ptr<Buffer>& bitmap, int64_t offset,
509
+ int64_t length, Visit&& visit) {
510
+ VisitSetBitRunsVoid(bitmap ? bitmap->data() : NULLPTR, offset, length,
511
+ std::forward<Visit>(visit));
512
+ }
513
+
514
+ } // namespace internal
515
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bit_util.h ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #if defined(_MSC_VER)
21
+ #if defined(_M_AMD64) || defined(_M_X64)
22
+ #include <intrin.h> // IWYU pragma: keep
23
+ #include <nmmintrin.h>
24
+ #endif
25
+
26
+ #pragma intrinsic(_BitScanReverse)
27
+ #pragma intrinsic(_BitScanForward)
28
+ #define ARROW_POPCOUNT64 __popcnt64
29
+ #define ARROW_POPCOUNT32 __popcnt
30
+ #else
31
+ #define ARROW_POPCOUNT64 __builtin_popcountll
32
+ #define ARROW_POPCOUNT32 __builtin_popcount
33
+ #endif
34
+
35
+ #include <cstdint>
36
+ #include <type_traits>
37
+
38
+ #include "arrow/util/macros.h"
39
+ #include "arrow/util/visibility.h"
40
+
41
+ namespace arrow {
42
+ namespace detail {
43
+
44
+ template <typename Integer>
45
+ typename std::make_unsigned<Integer>::type as_unsigned(Integer x) {
46
+ return static_cast<typename std::make_unsigned<Integer>::type>(x);
47
+ }
48
+
49
+ } // namespace detail
50
+
51
+ namespace bit_util {
52
+
53
+ // The number of set bits in a given unsigned byte value, pre-computed
54
+ //
55
+ // Generated with the following Python code
56
+ // output = 'static constexpr uint8_t kBytePopcount[] = {{{0}}};'
57
+ // popcounts = [str(bin(i).count('1')) for i in range(0, 256)]
58
+ // print(output.format(', '.join(popcounts)))
59
+ static constexpr uint8_t kBytePopcount[] = {
60
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3,
61
+ 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4,
62
+ 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4,
63
+ 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5,
64
+ 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2,
65
+ 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5,
66
+ 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4,
67
+ 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6,
68
+ 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8};
69
+
70
+ static inline uint64_t PopCount(uint64_t bitmap) { return ARROW_POPCOUNT64(bitmap); }
71
+ static inline uint32_t PopCount(uint32_t bitmap) { return ARROW_POPCOUNT32(bitmap); }
72
+
73
+ //
74
+ // Bit-related computations on integer values
75
+ //
76
+
77
+ // Returns the ceil of value/divisor
78
+ constexpr int64_t CeilDiv(int64_t value, int64_t divisor) {
79
+ return (value == 0) ? 0 : 1 + (value - 1) / divisor;
80
+ }
81
+
82
+ // Return the number of bytes needed to fit the given number of bits
83
+ constexpr int64_t BytesForBits(int64_t bits) {
84
+ // This formula avoids integer overflow on very large `bits`
85
+ return (bits >> 3) + ((bits & 7) != 0);
86
+ }
87
+
88
+ constexpr bool IsPowerOf2(int64_t value) {
89
+ return value > 0 && (value & (value - 1)) == 0;
90
+ }
91
+
92
+ constexpr bool IsPowerOf2(uint64_t value) {
93
+ return value > 0 && (value & (value - 1)) == 0;
94
+ }
95
+
96
+ // Returns the smallest power of two that contains v. If v is already a
97
+ // power of two, it is returned as is.
98
+ static inline int64_t NextPower2(int64_t n) {
99
+ // Taken from
100
+ // http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
101
+ n--;
102
+ n |= n >> 1;
103
+ n |= n >> 2;
104
+ n |= n >> 4;
105
+ n |= n >> 8;
106
+ n |= n >> 16;
107
+ n |= n >> 32;
108
+ n++;
109
+ return n;
110
+ }
111
+
112
+ constexpr bool IsMultipleOf64(int64_t n) { return (n & 63) == 0; }
113
+
114
+ constexpr bool IsMultipleOf8(int64_t n) { return (n & 7) == 0; }
115
+
116
+ // Returns a mask for the bit_index lower order bits.
117
+ // Only valid for bit_index in the range [0, 64).
118
+ constexpr uint64_t LeastSignificantBitMask(int64_t bit_index) {
119
+ return (static_cast<uint64_t>(1) << bit_index) - 1;
120
+ }
121
+
122
+ // Returns 'value' rounded up to the nearest multiple of 'factor'
123
+ constexpr int64_t RoundUp(int64_t value, int64_t factor) {
124
+ return CeilDiv(value, factor) * factor;
125
+ }
126
+
127
+ // Returns 'value' rounded down to the nearest multiple of 'factor'
128
+ constexpr int64_t RoundDown(int64_t value, int64_t factor) {
129
+ return (value / factor) * factor;
130
+ }
131
+
132
+ // Returns 'value' rounded up to the nearest multiple of 'factor' when factor
133
+ // is a power of two.
134
+ // The result is undefined on overflow, i.e. if `value > 2**64 - factor`,
135
+ // since we cannot return the correct result which would be 2**64.
136
+ constexpr int64_t RoundUpToPowerOf2(int64_t value, int64_t factor) {
137
+ // DCHECK(value >= 0);
138
+ // DCHECK(IsPowerOf2(factor));
139
+ return (value + (factor - 1)) & ~(factor - 1);
140
+ }
141
+
142
+ constexpr uint64_t RoundUpToPowerOf2(uint64_t value, uint64_t factor) {
143
+ // DCHECK(IsPowerOf2(factor));
144
+ return (value + (factor - 1)) & ~(factor - 1);
145
+ }
146
+
147
+ constexpr int64_t RoundUpToMultipleOf8(int64_t num) { return RoundUpToPowerOf2(num, 8); }
148
+
149
+ constexpr int64_t RoundUpToMultipleOf64(int64_t num) {
150
+ return RoundUpToPowerOf2(num, 64);
151
+ }
152
+
153
+ // Returns the number of bytes covering a sliced bitmap. Find the length
154
+ // rounded to cover full bytes on both extremities.
155
+ //
156
+ // The following example represents a slice (offset=10, length=9)
157
+ //
158
+ // 0 8 16 24
159
+ // |-------|-------|------|
160
+ // [ ] (slice)
161
+ // [ ] (same slice aligned to bytes bounds, length=16)
162
+ //
163
+ // The covering bytes is the length (in bytes) of this new aligned slice.
164
+ constexpr int64_t CoveringBytes(int64_t offset, int64_t length) {
165
+ return (bit_util::RoundUp(length + offset, 8) - bit_util::RoundDown(offset, 8)) / 8;
166
+ }
167
+
168
+ // Returns the 'num_bits' least-significant bits of 'v'.
169
+ static inline uint64_t TrailingBits(uint64_t v, int num_bits) {
170
+ if (ARROW_PREDICT_FALSE(num_bits == 0)) return 0;
171
+ if (ARROW_PREDICT_FALSE(num_bits >= 64)) return v;
172
+ int n = 64 - num_bits;
173
+ return (v << n) >> n;
174
+ }
175
+
176
+ /// \brief Count the number of leading zeros in an unsigned integer.
177
+ static inline int CountLeadingZeros(uint32_t value) {
178
+ #if defined(__clang__) || defined(__GNUC__)
179
+ if (value == 0) return 32;
180
+ return static_cast<int>(__builtin_clz(value));
181
+ #elif defined(_MSC_VER)
182
+ unsigned long index; // NOLINT
183
+ if (_BitScanReverse(&index, static_cast<unsigned long>(value))) { // NOLINT
184
+ return 31 - static_cast<int>(index);
185
+ } else {
186
+ return 32;
187
+ }
188
+ #else
189
+ int bitpos = 0;
190
+ while (value != 0) {
191
+ value >>= 1;
192
+ ++bitpos;
193
+ }
194
+ return 32 - bitpos;
195
+ #endif
196
+ }
197
+
198
+ static inline int CountLeadingZeros(uint64_t value) {
199
+ #if defined(__clang__) || defined(__GNUC__)
200
+ if (value == 0) return 64;
201
+ return static_cast<int>(__builtin_clzll(value));
202
+ #elif defined(_MSC_VER)
203
+ unsigned long index; // NOLINT
204
+ if (_BitScanReverse64(&index, value)) { // NOLINT
205
+ return 63 - static_cast<int>(index);
206
+ } else {
207
+ return 64;
208
+ }
209
+ #else
210
+ int bitpos = 0;
211
+ while (value != 0) {
212
+ value >>= 1;
213
+ ++bitpos;
214
+ }
215
+ return 64 - bitpos;
216
+ #endif
217
+ }
218
+
219
+ static inline int CountTrailingZeros(uint32_t value) {
220
+ #if defined(__clang__) || defined(__GNUC__)
221
+ if (value == 0) return 32;
222
+ return static_cast<int>(__builtin_ctzl(value));
223
+ #elif defined(_MSC_VER)
224
+ unsigned long index; // NOLINT
225
+ if (_BitScanForward(&index, value)) {
226
+ return static_cast<int>(index);
227
+ } else {
228
+ return 32;
229
+ }
230
+ #else
231
+ int bitpos = 0;
232
+ if (value) {
233
+ while (value & 1 == 0) {
234
+ value >>= 1;
235
+ ++bitpos;
236
+ }
237
+ } else {
238
+ bitpos = 32;
239
+ }
240
+ return bitpos;
241
+ #endif
242
+ }
243
+
244
+ static inline int CountTrailingZeros(uint64_t value) {
245
+ #if defined(__clang__) || defined(__GNUC__)
246
+ if (value == 0) return 64;
247
+ return static_cast<int>(__builtin_ctzll(value));
248
+ #elif defined(_MSC_VER)
249
+ unsigned long index; // NOLINT
250
+ if (_BitScanForward64(&index, value)) {
251
+ return static_cast<int>(index);
252
+ } else {
253
+ return 64;
254
+ }
255
+ #else
256
+ int bitpos = 0;
257
+ if (value) {
258
+ while (value & 1 == 0) {
259
+ value >>= 1;
260
+ ++bitpos;
261
+ }
262
+ } else {
263
+ bitpos = 64;
264
+ }
265
+ return bitpos;
266
+ #endif
267
+ }
268
+
269
+ // Returns the minimum number of bits needed to represent an unsigned value
270
+ static inline int NumRequiredBits(uint64_t x) { return 64 - CountLeadingZeros(x); }
271
+
272
+ // Returns ceil(log2(x)).
273
+ static inline int Log2(uint64_t x) {
274
+ // DCHECK_GT(x, 0);
275
+ return NumRequiredBits(x - 1);
276
+ }
277
+
278
+ //
279
+ // Utilities for reading and writing individual bits by their index
280
+ // in a memory area.
281
+ //
282
+
283
+ // Bitmask selecting the k-th bit in a byte
284
+ static constexpr uint8_t kBitmask[] = {1, 2, 4, 8, 16, 32, 64, 128};
285
+
286
+ // the bitwise complement version of kBitmask
287
+ static constexpr uint8_t kFlippedBitmask[] = {254, 253, 251, 247, 239, 223, 191, 127};
288
+
289
+ // Bitmask selecting the (k - 1) preceding bits in a byte
290
+ static constexpr uint8_t kPrecedingBitmask[] = {0, 1, 3, 7, 15, 31, 63, 127};
291
+ static constexpr uint8_t kPrecedingWrappingBitmask[] = {255, 1, 3, 7, 15, 31, 63, 127};
292
+
293
+ // the bitwise complement version of kPrecedingBitmask
294
+ static constexpr uint8_t kTrailingBitmask[] = {255, 254, 252, 248, 240, 224, 192, 128};
295
+
296
+ static constexpr bool GetBit(const uint8_t* bits, uint64_t i) {
297
+ return (bits[i >> 3] >> (i & 0x07)) & 1;
298
+ }
299
+
300
+ // Gets the i-th bit from a byte. Should only be used with i <= 7.
301
+ static constexpr bool GetBitFromByte(uint8_t byte, uint8_t i) {
302
+ return byte & kBitmask[i];
303
+ }
304
+
305
+ static inline void ClearBit(uint8_t* bits, int64_t i) {
306
+ bits[i / 8] &= kFlippedBitmask[i % 8];
307
+ }
308
+
309
+ static inline void SetBit(uint8_t* bits, int64_t i) { bits[i / 8] |= kBitmask[i % 8]; }
310
+
311
+ static inline void SetBitTo(uint8_t* bits, int64_t i, bool bit_is_set) {
312
+ // https://graphics.stanford.edu/~seander/bithacks.html
313
+ // "Conditionally set or clear bits without branching"
314
+ // NOTE: this seems to confuse Valgrind as it reads from potentially
315
+ // uninitialized memory
316
+ bits[i / 8] ^= static_cast<uint8_t>(-static_cast<uint8_t>(bit_is_set) ^ bits[i / 8]) &
317
+ kBitmask[i % 8];
318
+ }
319
+
320
+ /// \brief set or clear a range of bits quickly
321
+ ARROW_EXPORT
322
+ void SetBitsTo(uint8_t* bits, int64_t start_offset, int64_t length, bool bits_are_set);
323
+
324
+ /// \brief Sets all bits in the bitmap to true
325
+ ARROW_EXPORT
326
+ void SetBitmap(uint8_t* data, int64_t offset, int64_t length);
327
+
328
+ /// \brief Clears all bits in the bitmap (set to false)
329
+ ARROW_EXPORT
330
+ void ClearBitmap(uint8_t* data, int64_t offset, int64_t length);
331
+
332
+ /// Returns a mask with lower i bits set to 1. If i >= sizeof(Word)*8, all-ones will be
333
+ /// returned
334
+ /// ex:
335
+ /// ref: https://stackoverflow.com/a/59523400
336
+ template <typename Word>
337
+ constexpr Word PrecedingWordBitmask(unsigned int const i) {
338
+ return static_cast<Word>(static_cast<Word>(i < sizeof(Word) * 8)
339
+ << (i & (sizeof(Word) * 8 - 1))) -
340
+ 1;
341
+ }
342
+ static_assert(PrecedingWordBitmask<uint8_t>(0) == 0x00, "");
343
+ static_assert(PrecedingWordBitmask<uint8_t>(4) == 0x0f, "");
344
+ static_assert(PrecedingWordBitmask<uint8_t>(8) == 0xff, "");
345
+ static_assert(PrecedingWordBitmask<uint16_t>(8) == 0x00ff, "");
346
+
347
+ /// \brief Create a word with low `n` bits from `low` and high `sizeof(Word)-n` bits
348
+ /// from `high`.
349
+ /// Word ret
350
+ /// for (i = 0; i < sizeof(Word)*8; i++){
351
+ /// ret[i]= i < n ? low[i]: high[i];
352
+ /// }
353
+ template <typename Word>
354
+ constexpr Word SpliceWord(int n, Word low, Word high) {
355
+ return (high & ~PrecedingWordBitmask<Word>(n)) | (low & PrecedingWordBitmask<Word>(n));
356
+ }
357
+
358
+ /// \brief Pack integers into a bitmap in batches of 8
359
+ template <int batch_size>
360
+ void PackBits(const uint32_t* values, uint8_t* out) {
361
+ for (int i = 0; i < batch_size / 8; ++i) {
362
+ *out++ = static_cast<uint8_t>(values[0] | values[1] << 1 | values[2] << 2 |
363
+ values[3] << 3 | values[4] << 4 | values[5] << 5 |
364
+ values[6] << 6 | values[7] << 7);
365
+ values += 8;
366
+ }
367
+ }
368
+
369
+ } // namespace bit_util
370
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap.h ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <algorithm>
21
+ #include <array>
22
+ #include <bitset>
23
+ #include <cassert>
24
+ #include <cstdint>
25
+ #include <cstring>
26
+ #include <memory>
27
+ #include <string>
28
+ #include <string_view>
29
+ #include <utility>
30
+
31
+ #include "arrow/buffer.h"
32
+ #include "arrow/util/bit_util.h"
33
+ #include "arrow/util/bitmap_ops.h"
34
+ #include "arrow/util/bitmap_reader.h"
35
+ #include "arrow/util/bitmap_writer.h"
36
+ #include "arrow/util/compare.h"
37
+ #include "arrow/util/endian.h"
38
+ #include "arrow/util/functional.h"
39
+ #include "arrow/util/span.h"
40
+ #include "arrow/util/string_builder.h"
41
+ #include "arrow/util/visibility.h"
42
+
43
+ namespace arrow {
44
+
45
+ class BooleanArray;
46
+
47
+ namespace internal {
48
+
49
+ class ARROW_EXPORT Bitmap : public util::ToStringOstreamable<Bitmap>,
50
+ public util::EqualityComparable<Bitmap> {
51
+ public:
52
+ Bitmap() = default;
53
+
54
+ Bitmap(const std::shared_ptr<Buffer>& buffer, int64_t offset, int64_t length)
55
+ : data_(buffer->data()), offset_(offset), length_(length) {
56
+ if (buffer->is_mutable()) {
57
+ mutable_data_ = buffer->mutable_data();
58
+ }
59
+ }
60
+
61
+ Bitmap(const void* data, int64_t offset, int64_t length)
62
+ : data_(reinterpret_cast<const uint8_t*>(data)), offset_(offset), length_(length) {}
63
+
64
+ Bitmap(void* data, int64_t offset, int64_t length)
65
+ : data_(reinterpret_cast<const uint8_t*>(data)),
66
+ mutable_data_(reinterpret_cast<uint8_t*>(data)),
67
+ offset_(offset),
68
+ length_(length) {}
69
+
70
+ Bitmap Slice(int64_t offset) const {
71
+ if (mutable_data_ != NULLPTR) {
72
+ return {mutable_data_, offset_ + offset, length_ - offset};
73
+ } else {
74
+ return {data_, offset_ + offset, length_ - offset};
75
+ }
76
+ }
77
+
78
+ Bitmap Slice(int64_t offset, int64_t length) const {
79
+ if (mutable_data_ != NULLPTR) {
80
+ return {mutable_data_, offset_ + offset, length};
81
+ } else {
82
+ return {data_, offset_ + offset, length};
83
+ }
84
+ }
85
+
86
+ std::string ToString() const;
87
+
88
+ bool Equals(const Bitmap& other) const;
89
+
90
+ std::string Diff(const Bitmap& other) const;
91
+
92
+ bool GetBit(int64_t i) const { return bit_util::GetBit(data_, i + offset_); }
93
+
94
+ bool operator[](int64_t i) const { return GetBit(i); }
95
+
96
+ void SetBitTo(int64_t i, bool v) const {
97
+ bit_util::SetBitTo(mutable_data_, i + offset_, v);
98
+ }
99
+
100
+ void SetBitsTo(bool v) { bit_util::SetBitsTo(mutable_data_, offset_, length_, v); }
101
+
102
+ void CopyFrom(const Bitmap& other);
103
+ void CopyFromInverted(const Bitmap& other);
104
+
105
+ /// \brief Visit bits from each bitmap as bitset<N>
106
+ ///
107
+ /// All bitmaps must have identical length.
108
+ template <size_t N, typename Visitor>
109
+ static void VisitBits(const Bitmap (&bitmaps)[N], Visitor&& visitor) {
110
+ int64_t bit_length = BitLength(bitmaps, N);
111
+ std::bitset<N> bits;
112
+ for (int64_t bit_i = 0; bit_i < bit_length; ++bit_i) {
113
+ for (size_t i = 0; i < N; ++i) {
114
+ bits[i] = bitmaps[i].GetBit(bit_i);
115
+ }
116
+ visitor(bits);
117
+ }
118
+ }
119
+
120
+ /// \brief Visit bits from each bitmap as bitset<N>
121
+ ///
122
+ /// All bitmaps must have identical length.
123
+ template <size_t N, typename Visitor>
124
+ static void VisitBits(const std::array<Bitmap, N>& bitmaps, Visitor&& visitor) {
125
+ int64_t bit_length = BitLength(bitmaps);
126
+ std::bitset<N> bits;
127
+ for (int64_t bit_i = 0; bit_i < bit_length; ++bit_i) {
128
+ for (size_t i = 0; i < N; ++i) {
129
+ bits[i] = bitmaps[i].GetBit(bit_i);
130
+ }
131
+ visitor(bits);
132
+ }
133
+ }
134
+
135
+ /// \brief Visit words of bits from each bitmap as array<Word, N>
136
+ ///
137
+ /// All bitmaps must have identical length. The first bit in a visited bitmap
138
+ /// may be offset within the first visited word, but words will otherwise contain
139
+ /// densely packed bits loaded from the bitmap. That offset within the first word is
140
+ /// returned.
141
+ ///
142
+ /// TODO(bkietz) allow for early termination
143
+ // NOTE: this function is efficient on 3+ sufficiently large bitmaps.
144
+ // It also has a large prolog / epilog overhead and should be used
145
+ // carefully in other cases.
146
+ // For 2 bitmaps or less, and/or smaller bitmaps, see also VisitTwoBitBlocksVoid
147
+ // and BitmapUInt64Reader.
148
+ template <size_t N, typename Visitor,
149
+ typename Word = typename std::decay<
150
+ internal::call_traits::argument_type<0, Visitor&&>>::type::value_type>
151
+ static int64_t VisitWords(const Bitmap (&bitmaps_arg)[N], Visitor&& visitor) {
152
+ constexpr int64_t kBitWidth = sizeof(Word) * 8;
153
+
154
+ // local, mutable variables which will be sliced/decremented to represent consumption:
155
+ Bitmap bitmaps[N];
156
+ int64_t offsets[N];
157
+ int64_t bit_length = BitLength(bitmaps_arg, N);
158
+ util::span<const Word> words[N];
159
+ for (size_t i = 0; i < N; ++i) {
160
+ bitmaps[i] = bitmaps_arg[i];
161
+ offsets[i] = bitmaps[i].template word_offset<Word>();
162
+ assert(offsets[i] >= 0 && offsets[i] < kBitWidth);
163
+ words[i] = bitmaps[i].template words<Word>();
164
+ }
165
+
166
+ auto consume = [&](int64_t consumed_bits) {
167
+ for (size_t i = 0; i < N; ++i) {
168
+ bitmaps[i] = bitmaps[i].Slice(consumed_bits, bit_length - consumed_bits);
169
+ offsets[i] = bitmaps[i].template word_offset<Word>();
170
+ assert(offsets[i] >= 0 && offsets[i] < kBitWidth);
171
+ words[i] = bitmaps[i].template words<Word>();
172
+ }
173
+ bit_length -= consumed_bits;
174
+ };
175
+
176
+ std::array<Word, N> visited_words;
177
+ visited_words.fill(0);
178
+
179
+ if (bit_length <= kBitWidth * 2) {
180
+ // bitmaps fit into one or two words so don't bother with optimization
181
+ while (bit_length > 0) {
182
+ auto leading_bits = std::min(bit_length, kBitWidth);
183
+ SafeLoadWords(bitmaps, 0, leading_bits, false, &visited_words);
184
+ visitor(visited_words);
185
+ consume(leading_bits);
186
+ }
187
+ return 0;
188
+ }
189
+
190
+ int64_t max_offset = *std::max_element(offsets, offsets + N);
191
+ int64_t min_offset = *std::min_element(offsets, offsets + N);
192
+ if (max_offset > 0) {
193
+ // consume leading bits
194
+ auto leading_bits = kBitWidth - min_offset;
195
+ SafeLoadWords(bitmaps, 0, leading_bits, true, &visited_words);
196
+ visitor(visited_words);
197
+ consume(leading_bits);
198
+ }
199
+ assert(*std::min_element(offsets, offsets + N) == 0);
200
+
201
+ int64_t whole_word_count = bit_length / kBitWidth;
202
+ assert(whole_word_count >= 1);
203
+
204
+ if (min_offset == max_offset) {
205
+ // all offsets were identical, all leading bits have been consumed
206
+ assert(
207
+ std::all_of(offsets, offsets + N, [](int64_t offset) { return offset == 0; }));
208
+
209
+ for (int64_t word_i = 0; word_i < whole_word_count; ++word_i) {
210
+ for (size_t i = 0; i < N; ++i) {
211
+ visited_words[i] = words[i][word_i];
212
+ }
213
+ visitor(visited_words);
214
+ }
215
+ consume(whole_word_count * kBitWidth);
216
+ } else {
217
+ // leading bits from potentially incomplete words have been consumed
218
+
219
+ // word_i such that words[i][word_i] and words[i][word_i + 1] are lie entirely
220
+ // within the bitmap for all i
221
+ for (int64_t word_i = 0; word_i < whole_word_count - 1; ++word_i) {
222
+ for (size_t i = 0; i < N; ++i) {
223
+ if (offsets[i] == 0) {
224
+ visited_words[i] = words[i][word_i];
225
+ } else {
226
+ auto words0 = bit_util::ToLittleEndian(words[i][word_i]);
227
+ auto words1 = bit_util::ToLittleEndian(words[i][word_i + 1]);
228
+ visited_words[i] = bit_util::FromLittleEndian(
229
+ (words0 >> offsets[i]) | (words1 << (kBitWidth - offsets[i])));
230
+ }
231
+ }
232
+ visitor(visited_words);
233
+ }
234
+ consume((whole_word_count - 1) * kBitWidth);
235
+
236
+ SafeLoadWords(bitmaps, 0, kBitWidth, false, &visited_words);
237
+
238
+ visitor(visited_words);
239
+ consume(kBitWidth);
240
+ }
241
+
242
+ // load remaining bits
243
+ if (bit_length > 0) {
244
+ SafeLoadWords(bitmaps, 0, bit_length, false, &visited_words);
245
+ visitor(visited_words);
246
+ }
247
+
248
+ return min_offset;
249
+ }
250
+
251
+ template <size_t N, size_t M, typename ReaderT, typename WriterT, typename Visitor,
252
+ typename Word = typename std::decay<
253
+ internal::call_traits::argument_type<0, Visitor&&>>::type::value_type>
254
+ static void RunVisitWordsAndWriteLoop(int64_t bit_length,
255
+ std::array<ReaderT, N>& readers,
256
+ std::array<WriterT, M>& writers,
257
+ Visitor&& visitor) {
258
+ constexpr int64_t kBitWidth = sizeof(Word) * 8;
259
+
260
+ std::array<Word, N> visited_words;
261
+ std::array<Word, M> output_words;
262
+
263
+ // every reader will have same number of words, since they are same length'ed
264
+ // TODO($JIRA) this will be inefficient in some cases. When there are offsets beyond
265
+ // Word boundary, every Word would have to be created from 2 adjoining Words
266
+ auto n_words = readers[0].words();
267
+ bit_length -= n_words * kBitWidth;
268
+ while (n_words--) {
269
+ // first collect all words to visited_words array
270
+ for (size_t i = 0; i < N; i++) {
271
+ visited_words[i] = readers[i].NextWord();
272
+ }
273
+ visitor(visited_words, &output_words);
274
+ for (size_t i = 0; i < M; i++) {
275
+ writers[i].PutNextWord(output_words[i]);
276
+ }
277
+ }
278
+
279
+ // every reader will have same number of trailing bytes, because of the above reason
280
+ // tailing portion could be more than one word! (ref: BitmapWordReader constructor)
281
+ // remaining full/ partial words to write
282
+
283
+ if (bit_length) {
284
+ // convert the word visitor lambda to a byte_visitor
285
+ auto byte_visitor = [&](const std::array<uint8_t, N>& in,
286
+ std::array<uint8_t, M>* out) {
287
+ std::array<Word, N> in_words;
288
+ std::array<Word, M> out_words;
289
+ std::copy(in.begin(), in.end(), in_words.begin());
290
+ visitor(in_words, &out_words);
291
+ for (size_t i = 0; i < M; i++) {
292
+ out->at(i) = static_cast<uint8_t>(out_words[i]);
293
+ }
294
+ };
295
+
296
+ std::array<uint8_t, N> visited_bytes;
297
+ std::array<uint8_t, M> output_bytes;
298
+ int n_bytes = readers[0].trailing_bytes();
299
+ while (n_bytes--) {
300
+ visited_bytes.fill(0);
301
+ output_bytes.fill(0);
302
+ int valid_bits;
303
+ for (size_t i = 0; i < N; i++) {
304
+ visited_bytes[i] = readers[i].NextTrailingByte(valid_bits);
305
+ }
306
+ byte_visitor(visited_bytes, &output_bytes);
307
+ for (size_t i = 0; i < M; i++) {
308
+ writers[i].PutNextTrailingByte(output_bytes[i], valid_bits);
309
+ }
310
+ }
311
+ }
312
+ }
313
+
314
+ /// \brief Visit words of bits from each input bitmap as array<Word, N> and collects
315
+ /// outputs to an array<Word, M>, to be written into the output bitmaps accordingly.
316
+ ///
317
+ /// All bitmaps must have identical length. The first bit in a visited bitmap
318
+ /// may be offset within the first visited word, but words will otherwise contain
319
+ /// densely packed bits loaded from the bitmap. That offset within the first word is
320
+ /// returned.
321
+ /// Visitor is expected to have the following signature
322
+ /// [](const std::array<Word, N>& in_words, std::array<Word, M>* out_words){...}
323
+ ///
324
+ // NOTE: this function is efficient on 3+ sufficiently large bitmaps.
325
+ // It also has a large prolog / epilog overhead and should be used
326
+ // carefully in other cases.
327
+ // For 2 bitmaps or less, and/or smaller bitmaps, see also VisitTwoBitBlocksVoid
328
+ // and BitmapUInt64Reader.
329
+ template <size_t N, size_t M, typename Visitor,
330
+ typename Word = typename std::decay<
331
+ internal::call_traits::argument_type<0, Visitor&&>>::type::value_type>
332
+ static void VisitWordsAndWrite(const std::array<Bitmap, N>& bitmaps_arg,
333
+ std::array<Bitmap, M>* out_bitmaps_arg,
334
+ Visitor&& visitor) {
335
+ int64_t bit_length = BitLength(bitmaps_arg);
336
+ assert(bit_length == BitLength(*out_bitmaps_arg));
337
+
338
+ // if both input and output bitmaps have no byte offset, then use special template
339
+ if (std::all_of(bitmaps_arg.begin(), bitmaps_arg.end(),
340
+ [](const Bitmap& b) { return b.offset_ % 8 == 0; }) &&
341
+ std::all_of(out_bitmaps_arg->begin(), out_bitmaps_arg->end(),
342
+ [](const Bitmap& b) { return b.offset_ % 8 == 0; })) {
343
+ std::array<BitmapWordReader<Word, /*may_have_byte_offset=*/false>, N> readers;
344
+ for (size_t i = 0; i < N; ++i) {
345
+ const Bitmap& in_bitmap = bitmaps_arg[i];
346
+ readers[i] = BitmapWordReader<Word, /*may_have_byte_offset=*/false>(
347
+ in_bitmap.data_, in_bitmap.offset_, in_bitmap.length_);
348
+ }
349
+
350
+ std::array<BitmapWordWriter<Word, /*may_have_byte_offset=*/false>, M> writers;
351
+ for (size_t i = 0; i < M; ++i) {
352
+ const Bitmap& out_bitmap = out_bitmaps_arg->at(i);
353
+ writers[i] = BitmapWordWriter<Word, /*may_have_byte_offset=*/false>(
354
+ out_bitmap.mutable_data_, out_bitmap.offset_, out_bitmap.length_);
355
+ }
356
+
357
+ RunVisitWordsAndWriteLoop(bit_length, readers, writers, visitor);
358
+ } else {
359
+ std::array<BitmapWordReader<Word>, N> readers;
360
+ for (size_t i = 0; i < N; ++i) {
361
+ const Bitmap& in_bitmap = bitmaps_arg[i];
362
+ readers[i] =
363
+ BitmapWordReader<Word>(in_bitmap.data_, in_bitmap.offset_, in_bitmap.length_);
364
+ }
365
+
366
+ std::array<BitmapWordWriter<Word>, M> writers;
367
+ for (size_t i = 0; i < M; ++i) {
368
+ const Bitmap& out_bitmap = out_bitmaps_arg->at(i);
369
+ writers[i] = BitmapWordWriter<Word>(out_bitmap.mutable_data_, out_bitmap.offset_,
370
+ out_bitmap.length_);
371
+ }
372
+
373
+ RunVisitWordsAndWriteLoop(bit_length, readers, writers, visitor);
374
+ }
375
+ }
376
+
377
+ const uint8_t* data() const { return data_; }
378
+ uint8_t* mutable_data() { return mutable_data_; }
379
+
380
+ /// offset of first bit relative to buffer().data()
381
+ int64_t offset() const { return offset_; }
382
+
383
+ /// number of bits in this Bitmap
384
+ int64_t length() const { return length_; }
385
+
386
+ /// span of all bytes which contain any bit in this Bitmap
387
+ util::span<const uint8_t> bytes() const {
388
+ auto byte_offset = offset_ / 8;
389
+ auto byte_count = bit_util::CeilDiv(offset_ + length_, 8) - byte_offset;
390
+ return {data_ + byte_offset, static_cast<size_t>(byte_count)};
391
+ }
392
+
393
+ private:
394
+ /// span of all Words which contain any bit in this Bitmap
395
+ ///
396
+ /// For example, given Word=uint16_t and a bitmap spanning bits [20, 36)
397
+ /// words() would span bits [16, 48).
398
+ ///
399
+ /// 0 16 32 48 64
400
+ /// |-------|-------|------|------| (buffer)
401
+ /// [ ] (bitmap)
402
+ /// |-------|------| (returned words)
403
+ ///
404
+ /// \warning The words may contain bytes which lie outside the buffer or are
405
+ /// uninitialized.
406
+ template <typename Word>
407
+ util::span<const Word> words() const {
408
+ auto bytes_addr = reinterpret_cast<intptr_t>(bytes().data());
409
+ auto words_addr = bytes_addr - bytes_addr % sizeof(Word);
410
+ auto word_byte_count =
411
+ bit_util::RoundUpToPowerOf2(static_cast<int64_t>(bytes_addr + bytes().size()),
412
+ static_cast<int64_t>(sizeof(Word))) -
413
+ words_addr;
414
+ return {reinterpret_cast<const Word*>(words_addr),
415
+ static_cast<size_t>(word_byte_count / sizeof(Word))};
416
+ }
417
+
418
+ /// offset of first bit relative to words<Word>().data()
419
+ template <typename Word>
420
+ int64_t word_offset() const {
421
+ return offset_ + 8 * (reinterpret_cast<intptr_t>(data_) -
422
+ reinterpret_cast<intptr_t>(words<Word>().data()));
423
+ }
424
+
425
+ /// load words from bitmaps bitwise
426
+ template <size_t N, typename Word>
427
+ static void SafeLoadWords(const Bitmap (&bitmaps)[N], int64_t offset,
428
+ int64_t out_length, bool set_trailing_bits,
429
+ std::array<Word, N>* out) {
430
+ out->fill(0);
431
+
432
+ int64_t out_offset = set_trailing_bits ? sizeof(Word) * 8 - out_length : 0;
433
+
434
+ Bitmap slices[N], out_bitmaps[N];
435
+ for (size_t i = 0; i < N; ++i) {
436
+ slices[i] = bitmaps[i].Slice(offset, out_length);
437
+ out_bitmaps[i] = Bitmap(&out->at(i), out_offset, out_length);
438
+ }
439
+
440
+ int64_t bit_i = 0;
441
+ Bitmap::VisitBits(slices, [&](std::bitset<N> bits) {
442
+ for (size_t i = 0; i < N; ++i) {
443
+ out_bitmaps[i].SetBitTo(bit_i, bits[i]);
444
+ }
445
+ ++bit_i;
446
+ });
447
+ }
448
+
449
+ /// assert bitmaps have identical length and return that length
450
+ static int64_t BitLength(const Bitmap* bitmaps, size_t N);
451
+
452
+ template <size_t N>
453
+ static int64_t BitLength(const std::array<Bitmap, N>& bitmaps) {
454
+ for (size_t i = 1; i < N; ++i) {
455
+ assert(bitmaps[i].length() == bitmaps[0].length());
456
+ }
457
+ return bitmaps[0].length();
458
+ }
459
+
460
+ const uint8_t* data_ = NULLPTR;
461
+ uint8_t* mutable_data_ = NULLPTR;
462
+ int64_t offset_ = 0, length_ = 0;
463
+ };
464
+
465
+ } // namespace internal
466
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_builders.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <vector>
23
+
24
+ #include "arrow/result.h"
25
+ #include "arrow/type_fwd.h"
26
+ #include "arrow/util/visibility.h"
27
+
28
+ namespace arrow {
29
+ namespace internal {
30
+
31
+ /// \brief Generate Bitmap with all position to `value` except for one found
32
+ /// at `straggler_pos`.
33
+ ARROW_EXPORT
34
+ Result<std::shared_ptr<Buffer>> BitmapAllButOne(MemoryPool* pool, int64_t length,
35
+ int64_t straggler_pos, bool value = true);
36
+
37
+ /// \brief Convert vector of bytes to bitmap buffer
38
+ ARROW_EXPORT
39
+ Result<std::shared_ptr<Buffer>> BytesToBits(const std::vector<uint8_t>&,
40
+ MemoryPool* pool = default_memory_pool());
41
+
42
+ } // namespace internal
43
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_generate.h ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+
23
+ #include "arrow/buffer.h"
24
+ #include "arrow/memory_pool.h"
25
+ #include "arrow/result.h"
26
+ #include "arrow/util/bit_util.h"
27
+ #include "arrow/util/visibility.h"
28
+
29
+ namespace arrow {
30
+ namespace internal {
31
+
32
+ // A std::generate() like function to write sequential bits into a bitmap area.
33
+ // Bits preceding the bitmap area are preserved, bits following the bitmap
34
+ // area may be clobbered.
35
+
36
+ template <class Generator>
37
+ void GenerateBits(uint8_t* bitmap, int64_t start_offset, int64_t length, Generator&& g) {
38
+ if (length == 0) {
39
+ return;
40
+ }
41
+ uint8_t* cur = bitmap + start_offset / 8;
42
+ uint8_t bit_mask = bit_util::kBitmask[start_offset % 8];
43
+ uint8_t current_byte = *cur & bit_util::kPrecedingBitmask[start_offset % 8];
44
+
45
+ for (int64_t index = 0; index < length; ++index) {
46
+ const bool bit = g();
47
+ current_byte = bit ? (current_byte | bit_mask) : current_byte;
48
+ bit_mask = static_cast<uint8_t>(bit_mask << 1);
49
+ if (bit_mask == 0) {
50
+ bit_mask = 1;
51
+ *cur++ = current_byte;
52
+ current_byte = 0;
53
+ }
54
+ }
55
+ if (bit_mask != 1) {
56
+ *cur++ = current_byte;
57
+ }
58
+ }
59
+
60
+ // Like GenerateBits(), but unrolls its main loop for higher performance.
61
+
62
+ template <class Generator>
63
+ void GenerateBitsUnrolled(uint8_t* bitmap, int64_t start_offset, int64_t length,
64
+ Generator&& g) {
65
+ static_assert(std::is_same<decltype(std::declval<Generator>()()), bool>::value,
66
+ "Functor passed to GenerateBitsUnrolled must return bool");
67
+
68
+ if (length == 0) {
69
+ return;
70
+ }
71
+ uint8_t current_byte;
72
+ uint8_t* cur = bitmap + start_offset / 8;
73
+ const uint64_t start_bit_offset = start_offset % 8;
74
+ uint8_t bit_mask = bit_util::kBitmask[start_bit_offset];
75
+ int64_t remaining = length;
76
+
77
+ if (bit_mask != 0x01) {
78
+ current_byte = *cur & bit_util::kPrecedingBitmask[start_bit_offset];
79
+ while (bit_mask != 0 && remaining > 0) {
80
+ current_byte |= g() * bit_mask;
81
+ bit_mask = static_cast<uint8_t>(bit_mask << 1);
82
+ --remaining;
83
+ }
84
+ *cur++ = current_byte;
85
+ }
86
+
87
+ int64_t remaining_bytes = remaining / 8;
88
+ uint8_t out_results[8];
89
+ while (remaining_bytes-- > 0) {
90
+ for (int i = 0; i < 8; ++i) {
91
+ out_results[i] = g();
92
+ }
93
+ *cur++ = static_cast<uint8_t>(out_results[0] | out_results[1] << 1 |
94
+ out_results[2] << 2 | out_results[3] << 3 |
95
+ out_results[4] << 4 | out_results[5] << 5 |
96
+ out_results[6] << 6 | out_results[7] << 7);
97
+ }
98
+
99
+ int64_t remaining_bits = remaining % 8;
100
+ if (remaining_bits) {
101
+ current_byte = 0;
102
+ bit_mask = 0x01;
103
+ while (remaining_bits-- > 0) {
104
+ current_byte |= g() * bit_mask;
105
+ bit_mask = static_cast<uint8_t>(bit_mask << 1);
106
+ }
107
+ *cur++ = current_byte;
108
+ }
109
+ }
110
+
111
+ } // namespace internal
112
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_ops.h ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+
23
+ #include "arrow/result.h"
24
+ #include "arrow/util/visibility.h"
25
+
26
+ namespace arrow {
27
+
28
+ class Buffer;
29
+ class MemoryPool;
30
+
31
+ namespace internal {
32
+
33
+ // ----------------------------------------------------------------------
34
+ // Bitmap utilities
35
+
36
+ /// Copy a bit range of an existing bitmap
37
+ ///
38
+ /// \param[in] pool memory pool to allocate memory from
39
+ /// \param[in] bitmap source data
40
+ /// \param[in] offset bit offset into the source data
41
+ /// \param[in] length number of bits to copy
42
+ ///
43
+ /// \return Status message
44
+ ARROW_EXPORT
45
+ Result<std::shared_ptr<Buffer>> CopyBitmap(MemoryPool* pool, const uint8_t* bitmap,
46
+ int64_t offset, int64_t length);
47
+
48
+ /// Copy a bit range of an existing bitmap into an existing bitmap
49
+ ///
50
+ /// \param[in] bitmap source data
51
+ /// \param[in] offset bit offset into the source data
52
+ /// \param[in] length number of bits to copy
53
+ /// \param[in] dest_offset bit offset into the destination
54
+ /// \param[out] dest the destination buffer, must have at least space for
55
+ /// (offset + length) bits
56
+ ARROW_EXPORT
57
+ void CopyBitmap(const uint8_t* bitmap, int64_t offset, int64_t length, uint8_t* dest,
58
+ int64_t dest_offset);
59
+
60
+ /// Invert a bit range of an existing bitmap into an existing bitmap
61
+ ///
62
+ /// \param[in] bitmap source data
63
+ /// \param[in] offset bit offset into the source data
64
+ /// \param[in] length number of bits to copy
65
+ /// \param[in] dest_offset bit offset into the destination
66
+ /// \param[out] dest the destination buffer, must have at least space for
67
+ /// (offset + length) bits
68
+ ARROW_EXPORT
69
+ void InvertBitmap(const uint8_t* bitmap, int64_t offset, int64_t length, uint8_t* dest,
70
+ int64_t dest_offset);
71
+
72
+ /// Invert a bit range of an existing bitmap
73
+ ///
74
+ /// \param[in] pool memory pool to allocate memory from
75
+ /// \param[in] bitmap source data
76
+ /// \param[in] offset bit offset into the source data
77
+ /// \param[in] length number of bits to copy
78
+ ///
79
+ /// \return Status message
80
+ ARROW_EXPORT
81
+ Result<std::shared_ptr<Buffer>> InvertBitmap(MemoryPool* pool, const uint8_t* bitmap,
82
+ int64_t offset, int64_t length);
83
+
84
+ /// Reverse a bit range of an existing bitmap into an existing bitmap
85
+ ///
86
+ /// \param[in] bitmap source data
87
+ /// \param[in] offset bit offset into the source data
88
+ /// \param[in] length number of bits to reverse
89
+ /// \param[in] dest_offset bit offset into the destination
90
+ /// \param[out] dest the destination buffer, must have at least space for
91
+ /// (offset + length) bits
92
+ ARROW_EXPORT
93
+ void ReverseBitmap(const uint8_t* bitmap, int64_t offset, int64_t length, uint8_t* dest,
94
+ int64_t dest_offset);
95
+
96
+ /// Reverse a bit range of an existing bitmap
97
+ ///
98
+ /// \param[in] pool memory pool to allocate memory from
99
+ /// \param[in] bitmap source data
100
+ /// \param[in] offset bit offset into the source data
101
+ /// \param[in] length number of bits to reverse
102
+ ///
103
+ /// \return Status message
104
+ ARROW_EXPORT
105
+ Result<std::shared_ptr<Buffer>> ReverseBitmap(MemoryPool* pool, const uint8_t* bitmap,
106
+ int64_t offset, int64_t length);
107
+
108
+ /// Compute the number of 1's in the given data array
109
+ ///
110
+ /// \param[in] data a packed LSB-ordered bitmap as a byte array
111
+ /// \param[in] bit_offset a bitwise offset into the bitmap
112
+ /// \param[in] length the number of bits to inspect in the bitmap relative to
113
+ /// the offset
114
+ ///
115
+ /// \return The number of set (1) bits in the range
116
+ ARROW_EXPORT
117
+ int64_t CountSetBits(const uint8_t* data, int64_t bit_offset, int64_t length);
118
+
119
+ /// Compute the number of 1's in the result of an "and" (&) of two bitmaps
120
+ ///
121
+ /// \param[in] left_bitmap a packed LSB-ordered bitmap as a byte array
122
+ /// \param[in] left_offset a bitwise offset into the left bitmap
123
+ /// \param[in] right_bitmap a packed LSB-ordered bitmap as a byte array
124
+ /// \param[in] right_offset a bitwise offset into the right bitmap
125
+ /// \param[in] length the length of the bitmaps (must be the same)
126
+ ///
127
+ /// \return The number of set (1) bits in the "and" of the two bitmaps
128
+ ARROW_EXPORT
129
+ int64_t CountAndSetBits(const uint8_t* left_bitmap, int64_t left_offset,
130
+ const uint8_t* right_bitmap, int64_t right_offset,
131
+ int64_t length);
132
+
133
+ ARROW_EXPORT
134
+ bool BitmapEquals(const uint8_t* left, int64_t left_offset, const uint8_t* right,
135
+ int64_t right_offset, int64_t length);
136
+
137
+ // Same as BitmapEquals, but considers a NULL bitmap pointer the same as an
138
+ // all-ones bitmap.
139
+ ARROW_EXPORT
140
+ bool OptionalBitmapEquals(const uint8_t* left, int64_t left_offset, const uint8_t* right,
141
+ int64_t right_offset, int64_t length);
142
+
143
+ ARROW_EXPORT
144
+ bool OptionalBitmapEquals(const std::shared_ptr<Buffer>& left, int64_t left_offset,
145
+ const std::shared_ptr<Buffer>& right, int64_t right_offset,
146
+ int64_t length);
147
+
148
+ /// \brief Do a "bitmap and" on right and left buffers starting at
149
+ /// their respective bit-offsets for the given bit-length and put
150
+ /// the results in out_buffer starting at the given bit-offset.
151
+ ///
152
+ /// out_buffer will be allocated and initialized to zeros using pool before
153
+ /// the operation.
154
+ ARROW_EXPORT
155
+ Result<std::shared_ptr<Buffer>> BitmapAnd(MemoryPool* pool, const uint8_t* left,
156
+ int64_t left_offset, const uint8_t* right,
157
+ int64_t right_offset, int64_t length,
158
+ int64_t out_offset);
159
+
160
+ /// \brief Do a "bitmap and" on right and left buffers starting at
161
+ /// their respective bit-offsets for the given bit-length and put
162
+ /// the results in out starting at the given bit-offset.
163
+ ARROW_EXPORT
164
+ void BitmapAnd(const uint8_t* left, int64_t left_offset, const uint8_t* right,
165
+ int64_t right_offset, int64_t length, int64_t out_offset, uint8_t* out);
166
+
167
+ /// \brief Do a "bitmap or" for the given bit length on right and left buffers
168
+ /// starting at their respective bit-offsets and put the results in out_buffer
169
+ /// starting at the given bit-offset.
170
+ ///
171
+ /// out_buffer will be allocated and initialized to zeros using pool before
172
+ /// the operation.
173
+ ARROW_EXPORT
174
+ Result<std::shared_ptr<Buffer>> BitmapOr(MemoryPool* pool, const uint8_t* left,
175
+ int64_t left_offset, const uint8_t* right,
176
+ int64_t right_offset, int64_t length,
177
+ int64_t out_offset);
178
+
179
+ /// \brief Do a "bitmap or" for the given bit length on right and left buffers
180
+ /// starting at their respective bit-offsets and put the results in out
181
+ /// starting at the given bit-offset.
182
+ ARROW_EXPORT
183
+ void BitmapOr(const uint8_t* left, int64_t left_offset, const uint8_t* right,
184
+ int64_t right_offset, int64_t length, int64_t out_offset, uint8_t* out);
185
+
186
+ /// \brief Do a "bitmap xor" for the given bit-length on right and left
187
+ /// buffers starting at their respective bit-offsets and put the results in
188
+ /// out_buffer starting at the given bit offset.
189
+ ///
190
+ /// out_buffer will be allocated and initialized to zeros using pool before
191
+ /// the operation.
192
+ ARROW_EXPORT
193
+ Result<std::shared_ptr<Buffer>> BitmapXor(MemoryPool* pool, const uint8_t* left,
194
+ int64_t left_offset, const uint8_t* right,
195
+ int64_t right_offset, int64_t length,
196
+ int64_t out_offset);
197
+
198
+ /// \brief Do a "bitmap xor" for the given bit-length on right and left
199
+ /// buffers starting at their respective bit-offsets and put the results in
200
+ /// out starting at the given bit offset.
201
+ ARROW_EXPORT
202
+ void BitmapXor(const uint8_t* left, int64_t left_offset, const uint8_t* right,
203
+ int64_t right_offset, int64_t length, int64_t out_offset, uint8_t* out);
204
+
205
+ /// \brief Do a "bitmap and not" on right and left buffers starting at
206
+ /// their respective bit-offsets for the given bit-length and put
207
+ /// the results in out_buffer starting at the given bit-offset.
208
+ ///
209
+ /// out_buffer will be allocated and initialized to zeros using pool before
210
+ /// the operation.
211
+ ARROW_EXPORT
212
+ Result<std::shared_ptr<Buffer>> BitmapAndNot(MemoryPool* pool, const uint8_t* left,
213
+ int64_t left_offset, const uint8_t* right,
214
+ int64_t right_offset, int64_t length,
215
+ int64_t out_offset);
216
+
217
+ /// \brief Do a "bitmap and not" on right and left buffers starting at
218
+ /// their respective bit-offsets for the given bit-length and put
219
+ /// the results in out starting at the given bit-offset.
220
+ ARROW_EXPORT
221
+ void BitmapAndNot(const uint8_t* left, int64_t left_offset, const uint8_t* right,
222
+ int64_t right_offset, int64_t length, int64_t out_offset, uint8_t* out);
223
+
224
+ /// \brief Do a "bitmap or not" on right and left buffers starting at
225
+ /// their respective bit-offsets for the given bit-length and put
226
+ /// the results in out_buffer starting at the given bit-offset.
227
+ ///
228
+ /// out_buffer will be allocated and initialized to zeros using pool before
229
+ /// the operation.
230
+ ARROW_EXPORT
231
+ Result<std::shared_ptr<Buffer>> BitmapOrNot(MemoryPool* pool, const uint8_t* left,
232
+ int64_t left_offset, const uint8_t* right,
233
+ int64_t right_offset, int64_t length,
234
+ int64_t out_offset);
235
+
236
+ /// \brief Do a "bitmap or not" on right and left buffers starting at
237
+ /// their respective bit-offsets for the given bit-length and put
238
+ /// the results in out starting at the given bit-offset.
239
+ ARROW_EXPORT
240
+ void BitmapOrNot(const uint8_t* left, int64_t left_offset, const uint8_t* right,
241
+ int64_t right_offset, int64_t length, int64_t out_offset, uint8_t* out);
242
+
243
+ } // namespace internal
244
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bitmap_writer.h ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <cstring>
22
+
23
+ #include "arrow/util/bit_util.h"
24
+ #include "arrow/util/endian.h"
25
+ #include "arrow/util/macros.h"
26
+
27
+ namespace arrow {
28
+ namespace internal {
29
+
30
+ class BitmapWriter {
31
+ // A sequential bitwise writer that preserves surrounding bit values.
32
+
33
+ public:
34
+ BitmapWriter(uint8_t* bitmap, int64_t start_offset, int64_t length)
35
+ : bitmap_(bitmap), position_(0), length_(length) {
36
+ byte_offset_ = start_offset / 8;
37
+ bit_mask_ = bit_util::kBitmask[start_offset % 8];
38
+ if (length > 0) {
39
+ current_byte_ = bitmap[byte_offset_];
40
+ } else {
41
+ current_byte_ = 0;
42
+ }
43
+ }
44
+
45
+ void Set() { current_byte_ |= bit_mask_; }
46
+
47
+ void Clear() { current_byte_ &= bit_mask_ ^ 0xFF; }
48
+
49
+ void Next() {
50
+ bit_mask_ = static_cast<uint8_t>(bit_mask_ << 1);
51
+ ++position_;
52
+ if (bit_mask_ == 0) {
53
+ // Finished this byte, need advancing
54
+ bit_mask_ = 0x01;
55
+ bitmap_[byte_offset_++] = current_byte_;
56
+ if (ARROW_PREDICT_TRUE(position_ < length_)) {
57
+ current_byte_ = bitmap_[byte_offset_];
58
+ }
59
+ }
60
+ }
61
+
62
+ void Finish() {
63
+ // Store current byte if we didn't went past bitmap storage
64
+ if (length_ > 0 && (bit_mask_ != 0x01 || position_ < length_)) {
65
+ bitmap_[byte_offset_] = current_byte_;
66
+ }
67
+ }
68
+
69
+ int64_t position() const { return position_; }
70
+
71
+ private:
72
+ uint8_t* bitmap_;
73
+ int64_t position_;
74
+ int64_t length_;
75
+
76
+ uint8_t current_byte_;
77
+ uint8_t bit_mask_;
78
+ int64_t byte_offset_;
79
+ };
80
+
81
+ class FirstTimeBitmapWriter {
82
+ // Like BitmapWriter, but any bit values *following* the bits written
83
+ // might be clobbered. It is hence faster than BitmapWriter, and can
84
+ // also avoid false positives with Valgrind.
85
+
86
+ public:
87
+ FirstTimeBitmapWriter(uint8_t* bitmap, int64_t start_offset, int64_t length)
88
+ : bitmap_(bitmap), position_(0), length_(length) {
89
+ current_byte_ = 0;
90
+ byte_offset_ = start_offset / 8;
91
+ bit_mask_ = bit_util::kBitmask[start_offset % 8];
92
+ if (length > 0) {
93
+ current_byte_ =
94
+ bitmap[byte_offset_] & bit_util::kPrecedingBitmask[start_offset % 8];
95
+ } else {
96
+ current_byte_ = 0;
97
+ }
98
+ }
99
+
100
+ /// Appends number_of_bits from word to valid_bits and valid_bits_offset.
101
+ ///
102
+ /// \param[in] word The LSB bitmap to append. Any bits past number_of_bits are assumed
103
+ /// to be unset (i.e. 0).
104
+ /// \param[in] number_of_bits The number of bits to append from word.
105
+ void AppendWord(uint64_t word, int64_t number_of_bits) {
106
+ if (ARROW_PREDICT_FALSE(number_of_bits == 0)) {
107
+ return;
108
+ }
109
+
110
+ // Location that the first byte needs to be written to.
111
+ uint8_t* append_position = bitmap_ + byte_offset_;
112
+
113
+ // Update state variables except for current_byte_ here.
114
+ position_ += number_of_bits;
115
+ int64_t bit_offset = bit_util::CountTrailingZeros(static_cast<uint32_t>(bit_mask_));
116
+ bit_mask_ = bit_util::kBitmask[(bit_offset + number_of_bits) % 8];
117
+ byte_offset_ += (bit_offset + number_of_bits) / 8;
118
+
119
+ if (bit_offset != 0) {
120
+ // We are in the middle of the byte. This code updates the byte and shifts
121
+ // bits appropriately within word so it can be memcpy'd below.
122
+ int64_t bits_to_carry = 8 - bit_offset;
123
+ // Carry over bits from word to current_byte_. We assume any extra bits in word
124
+ // unset so no additional accounting is needed for when number_of_bits <
125
+ // bits_to_carry.
126
+ current_byte_ |= (word & bit_util::kPrecedingBitmask[bits_to_carry]) << bit_offset;
127
+ // Check if everything is transferred into current_byte_.
128
+ if (ARROW_PREDICT_FALSE(number_of_bits < bits_to_carry)) {
129
+ return;
130
+ }
131
+ *append_position = current_byte_;
132
+ append_position++;
133
+ // Move the carry bits off of word.
134
+ word = word >> bits_to_carry;
135
+ number_of_bits -= bits_to_carry;
136
+ }
137
+ word = bit_util::ToLittleEndian(word);
138
+ int64_t bytes_for_word = ::arrow::bit_util::BytesForBits(number_of_bits);
139
+ std::memcpy(append_position, &word, bytes_for_word);
140
+ // At this point, the previous current_byte_ has been written to bitmap_.
141
+ // The new current_byte_ is either the last relevant byte in 'word'
142
+ // or cleared if the new position is byte aligned (i.e. a fresh byte).
143
+ if (bit_mask_ == 0x1) {
144
+ current_byte_ = 0;
145
+ } else {
146
+ current_byte_ = *(append_position + bytes_for_word - 1);
147
+ }
148
+ }
149
+
150
+ void Set() { current_byte_ |= bit_mask_; }
151
+
152
+ void Clear() {}
153
+
154
+ void Next() {
155
+ bit_mask_ = static_cast<uint8_t>(bit_mask_ << 1);
156
+ ++position_;
157
+ if (bit_mask_ == 0) {
158
+ // Finished this byte, need advancing
159
+ bit_mask_ = 0x01;
160
+ bitmap_[byte_offset_++] = current_byte_;
161
+ current_byte_ = 0;
162
+ }
163
+ }
164
+
165
+ void Finish() {
166
+ // Store current byte if we didn't went go bitmap storage
167
+ if (length_ > 0 && (bit_mask_ != 0x01 || position_ < length_)) {
168
+ bitmap_[byte_offset_] = current_byte_;
169
+ }
170
+ }
171
+
172
+ int64_t position() const { return position_; }
173
+
174
+ private:
175
+ uint8_t* bitmap_;
176
+ int64_t position_;
177
+ int64_t length_;
178
+
179
+ uint8_t current_byte_;
180
+ uint8_t bit_mask_;
181
+ int64_t byte_offset_;
182
+ };
183
+
184
+ template <typename Word, bool may_have_byte_offset = true>
185
+ class BitmapWordWriter {
186
+ public:
187
+ BitmapWordWriter() = default;
188
+ BitmapWordWriter(uint8_t* bitmap, int64_t offset, int64_t length)
189
+ : offset_(static_cast<int64_t>(may_have_byte_offset) * (offset % 8)),
190
+ bitmap_(bitmap + offset / 8),
191
+ bitmap_end_(bitmap_ + bit_util::BytesForBits(offset_ + length)),
192
+ mask_((1U << offset_) - 1) {
193
+ if (offset_) {
194
+ if (length >= static_cast<int>(sizeof(Word) * 8)) {
195
+ current_data.word_ = load<Word>(bitmap_);
196
+ } else if (length > 0) {
197
+ current_data.epi.byte_ = load<uint8_t>(bitmap_);
198
+ }
199
+ }
200
+ }
201
+
202
+ void PutNextWord(Word word) {
203
+ if (may_have_byte_offset && offset_) {
204
+ // split one word into two adjacent words, don't touch unused bits
205
+ // |<------ word ----->|
206
+ // +-----+-------------+
207
+ // | A | B |
208
+ // +-----+-------------+
209
+ // | |
210
+ // v v offset
211
+ // +-------------+-----+-------------+-----+
212
+ // | --- | A | B | --- |
213
+ // +-------------+-----+-------------+-----+
214
+ // |<------ next ----->|<---- current ---->|
215
+ word = (word << offset_) | (word >> (sizeof(Word) * 8 - offset_));
216
+ Word next_word = load<Word>(bitmap_ + sizeof(Word));
217
+ current_data.word_ = (current_data.word_ & mask_) | (word & ~mask_);
218
+ next_word = (next_word & ~mask_) | (word & mask_);
219
+ store<Word>(bitmap_, current_data.word_);
220
+ store<Word>(bitmap_ + sizeof(Word), next_word);
221
+ current_data.word_ = next_word;
222
+ } else {
223
+ store<Word>(bitmap_, word);
224
+ }
225
+ bitmap_ += sizeof(Word);
226
+ }
227
+
228
+ void PutNextTrailingByte(uint8_t byte, int valid_bits) {
229
+ if (valid_bits == 8) {
230
+ if (may_have_byte_offset && offset_) {
231
+ byte = (byte << offset_) | (byte >> (8 - offset_));
232
+ uint8_t next_byte = load<uint8_t>(bitmap_ + 1);
233
+ current_data.epi.byte_ = (current_data.epi.byte_ & mask_) | (byte & ~mask_);
234
+ next_byte = (next_byte & ~mask_) | (byte & mask_);
235
+ store<uint8_t>(bitmap_, current_data.epi.byte_);
236
+ store<uint8_t>(bitmap_ + 1, next_byte);
237
+ current_data.epi.byte_ = next_byte;
238
+ } else {
239
+ store<uint8_t>(bitmap_, byte);
240
+ }
241
+ ++bitmap_;
242
+ } else {
243
+ assert(valid_bits > 0);
244
+ assert(valid_bits < 8);
245
+ assert(bitmap_ + bit_util::BytesForBits(offset_ + valid_bits) <= bitmap_end_);
246
+ internal::BitmapWriter writer(bitmap_, offset_, valid_bits);
247
+ for (int i = 0; i < valid_bits; ++i) {
248
+ (byte & 0x01) ? writer.Set() : writer.Clear();
249
+ writer.Next();
250
+ byte >>= 1;
251
+ }
252
+ writer.Finish();
253
+ }
254
+ }
255
+
256
+ private:
257
+ int64_t offset_;
258
+ uint8_t* bitmap_;
259
+
260
+ const uint8_t* bitmap_end_;
261
+ uint64_t mask_;
262
+ union {
263
+ Word word_;
264
+ struct {
265
+ #if ARROW_LITTLE_ENDIAN == 0
266
+ uint8_t padding_bytes_[sizeof(Word) - 1];
267
+ #endif
268
+ uint8_t byte_;
269
+ } epi;
270
+ } current_data;
271
+
272
+ template <typename DType>
273
+ DType load(const uint8_t* bitmap) {
274
+ assert(bitmap + sizeof(DType) <= bitmap_end_);
275
+ return bit_util::ToLittleEndian(util::SafeLoadAs<DType>(bitmap));
276
+ }
277
+
278
+ template <typename DType>
279
+ void store(uint8_t* bitmap, DType data) {
280
+ assert(bitmap + sizeof(DType) <= bitmap_end_);
281
+ util::SafeStore(bitmap, bit_util::FromLittleEndian(data));
282
+ }
283
+ };
284
+
285
+ } // namespace internal
286
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/util/endian.h"
21
+ #include "arrow/util/visibility.h"
22
+
23
+ #include <stdint.h>
24
+
25
+ namespace arrow {
26
+ namespace internal {
27
+
28
+ ARROW_EXPORT
29
+ int unpack32(const uint32_t* in, uint32_t* out, int batch_size, int num_bits);
30
+ ARROW_EXPORT
31
+ int unpack64(const uint8_t* in, uint64_t* out, int batch_size, int num_bits);
32
+
33
+ } // namespace internal
34
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/bpacking64_default.h ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/checked_cast.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <type_traits>
22
+ #include <utility>
23
+
24
+ namespace arrow {
25
+ namespace internal {
26
+
27
+ template <typename OutputType, typename InputType>
28
+ inline OutputType checked_cast(InputType&& value) {
29
+ static_assert(std::is_class<typename std::remove_pointer<
30
+ typename std::remove_reference<InputType>::type>::type>::value,
31
+ "checked_cast input type must be a class");
32
+ static_assert(std::is_class<typename std::remove_pointer<
33
+ typename std::remove_reference<OutputType>::type>::type>::value,
34
+ "checked_cast output type must be a class");
35
+ #ifdef NDEBUG
36
+ return static_cast<OutputType>(value);
37
+ #else
38
+ return dynamic_cast<OutputType>(value);
39
+ #endif
40
+ }
41
+
42
+ template <class T, class U>
43
+ std::shared_ptr<T> checked_pointer_cast(std::shared_ptr<U> r) noexcept {
44
+ #ifdef NDEBUG
45
+ return std::static_pointer_cast<T>(std::move(r));
46
+ #else
47
+ return std::dynamic_pointer_cast<T>(std::move(r));
48
+ #endif
49
+ }
50
+
51
+ template <class T, class U>
52
+ std::unique_ptr<T> checked_pointer_cast(std::unique_ptr<U> r) noexcept {
53
+ #ifdef NDEBUG
54
+ return std::unique_ptr<T>(static_cast<T*>(r.release()));
55
+ #else
56
+ return std::unique_ptr<T>(dynamic_cast<T*>(r.release()));
57
+ #endif
58
+ }
59
+
60
+ } // namespace internal
61
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/compression.h ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <limits>
22
+ #include <memory>
23
+ #include <optional>
24
+ #include <string>
25
+
26
+ #include "arrow/result.h"
27
+ #include "arrow/status.h"
28
+ #include "arrow/util/type_fwd.h"
29
+ #include "arrow/util/visibility.h"
30
+
31
+ namespace arrow {
32
+ namespace util {
33
+
34
+ constexpr int kUseDefaultCompressionLevel = std::numeric_limits<int>::min();
35
+
36
+ /// \brief Streaming compressor interface
37
+ ///
38
+ class ARROW_EXPORT Compressor {
39
+ public:
40
+ virtual ~Compressor() = default;
41
+
42
+ struct CompressResult {
43
+ int64_t bytes_read;
44
+ int64_t bytes_written;
45
+ };
46
+ struct FlushResult {
47
+ int64_t bytes_written;
48
+ bool should_retry;
49
+ };
50
+ struct EndResult {
51
+ int64_t bytes_written;
52
+ bool should_retry;
53
+ };
54
+
55
+ /// \brief Compress some input.
56
+ ///
57
+ /// If bytes_read is 0 on return, then a larger output buffer should be supplied.
58
+ virtual Result<CompressResult> Compress(int64_t input_len, const uint8_t* input,
59
+ int64_t output_len, uint8_t* output) = 0;
60
+
61
+ /// \brief Flush part of the compressed output.
62
+ ///
63
+ /// If should_retry is true on return, Flush() should be called again
64
+ /// with a larger buffer.
65
+ virtual Result<FlushResult> Flush(int64_t output_len, uint8_t* output) = 0;
66
+
67
+ /// \brief End compressing, doing whatever is necessary to end the stream.
68
+ ///
69
+ /// If should_retry is true on return, End() should be called again
70
+ /// with a larger buffer. Otherwise, the Compressor should not be used anymore.
71
+ ///
72
+ /// End() implies Flush().
73
+ virtual Result<EndResult> End(int64_t output_len, uint8_t* output) = 0;
74
+
75
+ // XXX add methods for buffer size heuristics?
76
+ };
77
+
78
+ /// \brief Streaming decompressor interface
79
+ ///
80
+ class ARROW_EXPORT Decompressor {
81
+ public:
82
+ virtual ~Decompressor() = default;
83
+
84
+ struct DecompressResult {
85
+ // XXX is need_more_output necessary? (Brotli?)
86
+ int64_t bytes_read;
87
+ int64_t bytes_written;
88
+ bool need_more_output;
89
+ };
90
+
91
+ /// \brief Decompress some input.
92
+ ///
93
+ /// If need_more_output is true on return, a larger output buffer needs
94
+ /// to be supplied.
95
+ virtual Result<DecompressResult> Decompress(int64_t input_len, const uint8_t* input,
96
+ int64_t output_len, uint8_t* output) = 0;
97
+
98
+ /// \brief Return whether the compressed stream is finished.
99
+ ///
100
+ /// This is a heuristic. If true is returned, then it is guaranteed
101
+ /// that the stream is finished. If false is returned, however, it may
102
+ /// simply be that the underlying library isn't able to provide the information.
103
+ virtual bool IsFinished() = 0;
104
+
105
+ /// \brief Reinitialize decompressor, making it ready for a new compressed stream.
106
+ virtual Status Reset() = 0;
107
+
108
+ // XXX add methods for buffer size heuristics?
109
+ };
110
+
111
+ /// \brief Compression codec options
112
+ class ARROW_EXPORT CodecOptions {
113
+ public:
114
+ explicit CodecOptions(int compression_level = kUseDefaultCompressionLevel)
115
+ : compression_level(compression_level) {}
116
+
117
+ virtual ~CodecOptions() = default;
118
+
119
+ int compression_level;
120
+ };
121
+
122
+ // ----------------------------------------------------------------------
123
+ // GZip codec options implementation
124
+
125
+ enum class GZipFormat {
126
+ ZLIB,
127
+ DEFLATE,
128
+ GZIP,
129
+ };
130
+
131
+ class ARROW_EXPORT GZipCodecOptions : public CodecOptions {
132
+ public:
133
+ GZipFormat gzip_format = GZipFormat::GZIP;
134
+ std::optional<int> window_bits;
135
+ };
136
+
137
+ // ----------------------------------------------------------------------
138
+ // brotli codec options implementation
139
+
140
+ class ARROW_EXPORT BrotliCodecOptions : public CodecOptions {
141
+ public:
142
+ std::optional<int> window_bits;
143
+ };
144
+
145
+ /// \brief Compression codec
146
+ class ARROW_EXPORT Codec {
147
+ public:
148
+ virtual ~Codec() = default;
149
+
150
+ /// \brief Return special value to indicate that a codec implementation
151
+ /// should use its default compression level
152
+ static int UseDefaultCompressionLevel();
153
+
154
+ /// \brief Return a string name for compression type
155
+ static const std::string& GetCodecAsString(Compression::type t);
156
+
157
+ /// \brief Return compression type for name (all lower case)
158
+ static Result<Compression::type> GetCompressionType(const std::string& name);
159
+
160
+ /// \brief Create a codec for the given compression algorithm with CodecOptions
161
+ static Result<std::unique_ptr<Codec>> Create(
162
+ Compression::type codec, const CodecOptions& codec_options = CodecOptions{});
163
+
164
+ /// \brief Create a codec for the given compression algorithm
165
+ static Result<std::unique_ptr<Codec>> Create(Compression::type codec,
166
+ int compression_level);
167
+
168
+ /// \brief Return true if support for indicated codec has been enabled
169
+ static bool IsAvailable(Compression::type codec);
170
+
171
+ /// \brief Return true if indicated codec supports setting a compression level
172
+ static bool SupportsCompressionLevel(Compression::type codec);
173
+
174
+ /// \brief Return the smallest supported compression level for the codec
175
+ /// Note: This function creates a temporary Codec instance
176
+ static Result<int> MinimumCompressionLevel(Compression::type codec);
177
+
178
+ /// \brief Return the largest supported compression level for the codec
179
+ /// Note: This function creates a temporary Codec instance
180
+ static Result<int> MaximumCompressionLevel(Compression::type codec);
181
+
182
+ /// \brief Return the default compression level
183
+ /// Note: This function creates a temporary Codec instance
184
+ static Result<int> DefaultCompressionLevel(Compression::type codec);
185
+
186
+ /// \brief Return the smallest supported compression level
187
+ virtual int minimum_compression_level() const = 0;
188
+
189
+ /// \brief Return the largest supported compression level
190
+ virtual int maximum_compression_level() const = 0;
191
+
192
+ /// \brief Return the default compression level
193
+ virtual int default_compression_level() const = 0;
194
+
195
+ /// \brief One-shot decompression function
196
+ ///
197
+ /// output_buffer_len must be correct and therefore be obtained in advance.
198
+ /// The actual decompressed length is returned.
199
+ ///
200
+ /// \note One-shot decompression is not always compatible with streaming
201
+ /// compression. Depending on the codec (e.g. LZ4), different formats may
202
+ /// be used.
203
+ virtual Result<int64_t> Decompress(int64_t input_len, const uint8_t* input,
204
+ int64_t output_buffer_len,
205
+ uint8_t* output_buffer) = 0;
206
+
207
+ /// \brief One-shot compression function
208
+ ///
209
+ /// output_buffer_len must first have been computed using MaxCompressedLen().
210
+ /// The actual compressed length is returned.
211
+ ///
212
+ /// \note One-shot compression is not always compatible with streaming
213
+ /// decompression. Depending on the codec (e.g. LZ4), different formats may
214
+ /// be used.
215
+ virtual Result<int64_t> Compress(int64_t input_len, const uint8_t* input,
216
+ int64_t output_buffer_len, uint8_t* output_buffer) = 0;
217
+
218
+ virtual int64_t MaxCompressedLen(int64_t input_len, const uint8_t* input) = 0;
219
+
220
+ /// \brief Create a streaming compressor instance
221
+ virtual Result<std::shared_ptr<Compressor>> MakeCompressor() = 0;
222
+
223
+ /// \brief Create a streaming compressor instance
224
+ virtual Result<std::shared_ptr<Decompressor>> MakeDecompressor() = 0;
225
+
226
+ /// \brief This Codec's compression type
227
+ virtual Compression::type compression_type() const = 0;
228
+
229
+ /// \brief The name of this Codec's compression type
230
+ const std::string& name() const { return GetCodecAsString(compression_type()); }
231
+
232
+ /// \brief This Codec's compression level, if applicable
233
+ virtual int compression_level() const { return UseDefaultCompressionLevel(); }
234
+
235
+ private:
236
+ /// \brief Initializes the codec's resources.
237
+ virtual Status Init();
238
+ };
239
+
240
+ } // namespace util
241
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/converter.h ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include <memory>
19
+ #include <string>
20
+ #include <utility>
21
+ #include <vector>
22
+
23
+ #include "arrow/array.h"
24
+ #include "arrow/chunked_array.h"
25
+ #include "arrow/status.h"
26
+ #include "arrow/type.h"
27
+ #include "arrow/type_traits.h"
28
+ #include "arrow/util/checked_cast.h"
29
+ #include "arrow/visit_type_inline.h"
30
+
31
+ namespace arrow {
32
+ namespace internal {
33
+
34
+ template <typename BaseConverter, template <typename...> class ConverterTrait>
35
+ static Result<std::unique_ptr<BaseConverter>> MakeConverter(
36
+ std::shared_ptr<DataType> type, typename BaseConverter::OptionsType options,
37
+ MemoryPool* pool);
38
+
39
+ template <typename Input, typename Options>
40
+ class Converter {
41
+ public:
42
+ using Self = Converter<Input, Options>;
43
+ using InputType = Input;
44
+ using OptionsType = Options;
45
+
46
+ virtual ~Converter() = default;
47
+
48
+ Status Construct(std::shared_ptr<DataType> type, OptionsType options,
49
+ MemoryPool* pool) {
50
+ type_ = std::move(type);
51
+ options_ = std::move(options);
52
+ return Init(pool);
53
+ }
54
+
55
+ virtual Status Append(InputType value) { return Status::NotImplemented("Append"); }
56
+
57
+ virtual Status Extend(InputType values, int64_t size, int64_t offset = 0) {
58
+ return Status::NotImplemented("Extend");
59
+ }
60
+
61
+ virtual Status ExtendMasked(InputType values, InputType mask, int64_t size,
62
+ int64_t offset = 0) {
63
+ return Status::NotImplemented("ExtendMasked");
64
+ }
65
+
66
+ const std::shared_ptr<ArrayBuilder>& builder() const { return builder_; }
67
+
68
+ const std::shared_ptr<DataType>& type() const { return type_; }
69
+
70
+ OptionsType options() const { return options_; }
71
+
72
+ bool may_overflow() const { return may_overflow_; }
73
+
74
+ bool rewind_on_overflow() const { return rewind_on_overflow_; }
75
+
76
+ virtual Status Reserve(int64_t additional_capacity) {
77
+ return builder_->Reserve(additional_capacity);
78
+ }
79
+
80
+ Status AppendNull() { return builder_->AppendNull(); }
81
+
82
+ virtual Result<std::shared_ptr<Array>> ToArray() { return builder_->Finish(); }
83
+
84
+ virtual Result<std::shared_ptr<Array>> ToArray(int64_t length) {
85
+ ARROW_ASSIGN_OR_RAISE(auto arr, this->ToArray());
86
+ return arr->Slice(0, length);
87
+ }
88
+
89
+ virtual Result<std::shared_ptr<ChunkedArray>> ToChunkedArray() {
90
+ ARROW_ASSIGN_OR_RAISE(auto array, ToArray());
91
+ std::vector<std::shared_ptr<Array>> chunks = {std::move(array)};
92
+ return std::make_shared<ChunkedArray>(chunks);
93
+ }
94
+
95
+ protected:
96
+ virtual Status Init(MemoryPool* pool) { return Status::OK(); }
97
+
98
+ std::shared_ptr<DataType> type_;
99
+ std::shared_ptr<ArrayBuilder> builder_;
100
+ OptionsType options_;
101
+ bool may_overflow_ = false;
102
+ bool rewind_on_overflow_ = false;
103
+ };
104
+
105
+ template <typename ArrowType, typename BaseConverter>
106
+ class PrimitiveConverter : public BaseConverter {
107
+ public:
108
+ using BuilderType = typename TypeTraits<ArrowType>::BuilderType;
109
+
110
+ protected:
111
+ Status Init(MemoryPool* pool) override {
112
+ this->builder_ = std::make_shared<BuilderType>(this->type_, pool);
113
+ // Narrow variable-sized binary types may overflow
114
+ this->may_overflow_ = is_binary_like(this->type_->id());
115
+ primitive_type_ = checked_cast<const ArrowType*>(this->type_.get());
116
+ primitive_builder_ = checked_cast<BuilderType*>(this->builder_.get());
117
+ return Status::OK();
118
+ }
119
+
120
+ const ArrowType* primitive_type_;
121
+ BuilderType* primitive_builder_;
122
+ };
123
+
124
+ template <typename ArrowType, typename BaseConverter,
125
+ template <typename...> class ConverterTrait>
126
+ class ListConverter : public BaseConverter {
127
+ public:
128
+ using BuilderType = typename TypeTraits<ArrowType>::BuilderType;
129
+ using ConverterType = typename ConverterTrait<ArrowType>::type;
130
+
131
+ protected:
132
+ Status Init(MemoryPool* pool) override {
133
+ list_type_ = checked_cast<const ArrowType*>(this->type_.get());
134
+ ARROW_ASSIGN_OR_RAISE(value_converter_,
135
+ (MakeConverter<BaseConverter, ConverterTrait>(
136
+ list_type_->value_type(), this->options_, pool)));
137
+ this->builder_ =
138
+ std::make_shared<BuilderType>(pool, value_converter_->builder(), this->type_);
139
+ list_builder_ = checked_cast<BuilderType*>(this->builder_.get());
140
+ // Narrow list types may overflow
141
+ this->may_overflow_ = this->rewind_on_overflow_ =
142
+ sizeof(typename ArrowType::offset_type) < sizeof(int64_t);
143
+ return Status::OK();
144
+ }
145
+
146
+ const ArrowType* list_type_;
147
+ BuilderType* list_builder_;
148
+ std::unique_ptr<BaseConverter> value_converter_;
149
+ };
150
+
151
+ template <typename BaseConverter, template <typename...> class ConverterTrait>
152
+ class StructConverter : public BaseConverter {
153
+ public:
154
+ using ConverterType = typename ConverterTrait<StructType>::type;
155
+
156
+ Status Reserve(int64_t additional_capacity) override {
157
+ ARROW_RETURN_NOT_OK(this->builder_->Reserve(additional_capacity));
158
+ for (const auto& child : children_) {
159
+ ARROW_RETURN_NOT_OK(child->Reserve(additional_capacity));
160
+ }
161
+ return Status::OK();
162
+ }
163
+
164
+ protected:
165
+ Status Init(MemoryPool* pool) override {
166
+ std::unique_ptr<BaseConverter> child_converter;
167
+ std::vector<std::shared_ptr<ArrayBuilder>> child_builders;
168
+
169
+ struct_type_ = checked_cast<const StructType*>(this->type_.get());
170
+ for (const auto& field : struct_type_->fields()) {
171
+ ARROW_ASSIGN_OR_RAISE(child_converter,
172
+ (MakeConverter<BaseConverter, ConverterTrait>(
173
+ field->type(), this->options_, pool)));
174
+ this->may_overflow_ |= child_converter->may_overflow();
175
+ this->rewind_on_overflow_ = this->may_overflow_;
176
+ child_builders.push_back(child_converter->builder());
177
+ children_.push_back(std::move(child_converter));
178
+ }
179
+
180
+ this->builder_ =
181
+ std::make_shared<StructBuilder>(this->type_, pool, std::move(child_builders));
182
+ struct_builder_ = checked_cast<StructBuilder*>(this->builder_.get());
183
+
184
+ return Status::OK();
185
+ }
186
+
187
+ const StructType* struct_type_;
188
+ StructBuilder* struct_builder_;
189
+ std::vector<std::unique_ptr<BaseConverter>> children_;
190
+ };
191
+
192
+ template <typename ValueType, typename BaseConverter>
193
+ class DictionaryConverter : public BaseConverter {
194
+ public:
195
+ using BuilderType = DictionaryBuilder<ValueType>;
196
+
197
+ protected:
198
+ Status Init(MemoryPool* pool) override {
199
+ std::unique_ptr<ArrayBuilder> builder;
200
+ ARROW_RETURN_NOT_OK(MakeDictionaryBuilder(pool, this->type_, NULLPTR, &builder));
201
+ this->builder_ = std::move(builder);
202
+ this->may_overflow_ = false;
203
+ dict_type_ = checked_cast<const DictionaryType*>(this->type_.get());
204
+ value_type_ = checked_cast<const ValueType*>(dict_type_->value_type().get());
205
+ value_builder_ = checked_cast<BuilderType*>(this->builder_.get());
206
+ return Status::OK();
207
+ }
208
+
209
+ const DictionaryType* dict_type_;
210
+ const ValueType* value_type_;
211
+ BuilderType* value_builder_;
212
+ };
213
+
214
+ template <typename BaseConverter, template <typename...> class ConverterTrait>
215
+ struct MakeConverterImpl {
216
+ template <typename T, typename ConverterType = typename ConverterTrait<T>::type>
217
+ Status Visit(const T&) {
218
+ out.reset(new ConverterType());
219
+ return out->Construct(std::move(type), std::move(options), pool);
220
+ }
221
+
222
+ Status Visit(const DictionaryType& t) {
223
+ switch (t.value_type()->id()) {
224
+ #define DICTIONARY_CASE(TYPE) \
225
+ case TYPE::type_id: \
226
+ out = std::make_unique< \
227
+ typename ConverterTrait<DictionaryType>::template dictionary_type<TYPE>>(); \
228
+ break;
229
+ DICTIONARY_CASE(BooleanType);
230
+ DICTIONARY_CASE(Int8Type);
231
+ DICTIONARY_CASE(Int16Type);
232
+ DICTIONARY_CASE(Int32Type);
233
+ DICTIONARY_CASE(Int64Type);
234
+ DICTIONARY_CASE(UInt8Type);
235
+ DICTIONARY_CASE(UInt16Type);
236
+ DICTIONARY_CASE(UInt32Type);
237
+ DICTIONARY_CASE(UInt64Type);
238
+ DICTIONARY_CASE(FloatType);
239
+ DICTIONARY_CASE(DoubleType);
240
+ DICTIONARY_CASE(BinaryType);
241
+ DICTIONARY_CASE(StringType);
242
+ DICTIONARY_CASE(FixedSizeBinaryType);
243
+ #undef DICTIONARY_CASE
244
+ default:
245
+ return Status::NotImplemented("DictionaryArray converter for type ", t.ToString(),
246
+ " not implemented");
247
+ }
248
+ return out->Construct(std::move(type), std::move(options), pool);
249
+ }
250
+
251
+ Status Visit(const DataType& t) { return Status::NotImplemented(t.name()); }
252
+
253
+ std::shared_ptr<DataType> type;
254
+ typename BaseConverter::OptionsType options;
255
+ MemoryPool* pool;
256
+ std::unique_ptr<BaseConverter> out;
257
+ };
258
+
259
+ template <typename BaseConverter, template <typename...> class ConverterTrait>
260
+ static Result<std::unique_ptr<BaseConverter>> MakeConverter(
261
+ std::shared_ptr<DataType> type, typename BaseConverter::OptionsType options,
262
+ MemoryPool* pool) {
263
+ MakeConverterImpl<BaseConverter, ConverterTrait> visitor{
264
+ std::move(type), std::move(options), pool, NULLPTR};
265
+ ARROW_RETURN_NOT_OK(VisitTypeInline(*visitor.type, &visitor));
266
+ return std::move(visitor.out);
267
+ }
268
+
269
+ template <typename Converter>
270
+ class Chunker {
271
+ public:
272
+ using InputType = typename Converter::InputType;
273
+
274
+ explicit Chunker(std::unique_ptr<Converter> converter)
275
+ : converter_(std::move(converter)) {}
276
+
277
+ Status Reserve(int64_t additional_capacity) {
278
+ ARROW_RETURN_NOT_OK(converter_->Reserve(additional_capacity));
279
+ reserved_ += additional_capacity;
280
+ return Status::OK();
281
+ }
282
+
283
+ Status AppendNull() {
284
+ auto status = converter_->AppendNull();
285
+ if (ARROW_PREDICT_FALSE(status.IsCapacityError())) {
286
+ if (converter_->builder()->length() == 0) {
287
+ // Builder length == 0 means the individual element is too large to append.
288
+ // In this case, no need to try again.
289
+ return status;
290
+ }
291
+ ARROW_RETURN_NOT_OK(FinishChunk());
292
+ return converter_->AppendNull();
293
+ }
294
+ ++length_;
295
+ return status;
296
+ }
297
+
298
+ Status Append(InputType value) {
299
+ auto status = converter_->Append(value);
300
+ if (ARROW_PREDICT_FALSE(status.IsCapacityError())) {
301
+ if (converter_->builder()->length() == 0) {
302
+ return status;
303
+ }
304
+ ARROW_RETURN_NOT_OK(FinishChunk());
305
+ return Append(value);
306
+ }
307
+ ++length_;
308
+ return status;
309
+ }
310
+
311
+ Status Extend(InputType values, int64_t size, int64_t offset = 0) {
312
+ while (offset < size) {
313
+ auto length_before = converter_->builder()->length();
314
+ auto status = converter_->Extend(values, size, offset);
315
+ auto length_after = converter_->builder()->length();
316
+ auto num_converted = length_after - length_before;
317
+
318
+ offset += num_converted;
319
+ length_ += num_converted;
320
+
321
+ if (status.IsCapacityError()) {
322
+ if (converter_->builder()->length() == 0) {
323
+ // Builder length == 0 means the individual element is too large to append.
324
+ // In this case, no need to try again.
325
+ return status;
326
+ } else if (converter_->rewind_on_overflow()) {
327
+ // The list-like and binary-like conversion paths may raise a capacity error,
328
+ // we need to handle them differently. While the binary-like converters check
329
+ // the capacity before append/extend the list-like converters just check after
330
+ // append/extend. Thus depending on the implementation semantics we may need
331
+ // to rewind (slice) the output chunk by one.
332
+ length_ -= 1;
333
+ offset -= 1;
334
+ }
335
+ ARROW_RETURN_NOT_OK(FinishChunk());
336
+ } else if (!status.ok()) {
337
+ return status;
338
+ }
339
+ }
340
+ return Status::OK();
341
+ }
342
+
343
+ Status ExtendMasked(InputType values, InputType mask, int64_t size,
344
+ int64_t offset = 0) {
345
+ while (offset < size) {
346
+ auto length_before = converter_->builder()->length();
347
+ auto status = converter_->ExtendMasked(values, mask, size, offset);
348
+ auto length_after = converter_->builder()->length();
349
+ auto num_converted = length_after - length_before;
350
+
351
+ offset += num_converted;
352
+ length_ += num_converted;
353
+
354
+ if (status.IsCapacityError()) {
355
+ if (converter_->builder()->length() == 0) {
356
+ // Builder length == 0 means the individual element is too large to append.
357
+ // In this case, no need to try again.
358
+ return status;
359
+ } else if (converter_->rewind_on_overflow()) {
360
+ // The list-like and binary-like conversion paths may raise a capacity error,
361
+ // we need to handle them differently. While the binary-like converters check
362
+ // the capacity before append/extend the list-like converters just check after
363
+ // append/extend. Thus depending on the implementation semantics we may need
364
+ // to rewind (slice) the output chunk by one.
365
+ length_ -= 1;
366
+ offset -= 1;
367
+ }
368
+ ARROW_RETURN_NOT_OK(FinishChunk());
369
+ } else if (!status.ok()) {
370
+ return status;
371
+ }
372
+ }
373
+ return Status::OK();
374
+ }
375
+
376
+ Status FinishChunk() {
377
+ ARROW_ASSIGN_OR_RAISE(auto chunk, converter_->ToArray(length_));
378
+ chunks_.push_back(chunk);
379
+ // Reserve space for the remaining items.
380
+ // Besides being an optimization, it is also required if the converter's
381
+ // implementation relies on unsafe builder methods in converter->Append().
382
+ auto remaining = reserved_ - length_;
383
+ Reset();
384
+ return Reserve(remaining);
385
+ }
386
+
387
+ Result<std::shared_ptr<ChunkedArray>> ToChunkedArray() {
388
+ ARROW_RETURN_NOT_OK(FinishChunk());
389
+ return std::make_shared<ChunkedArray>(chunks_);
390
+ }
391
+
392
+ protected:
393
+ void Reset() {
394
+ converter_->builder()->Reset();
395
+ length_ = 0;
396
+ reserved_ = 0;
397
+ }
398
+
399
+ int64_t length_ = 0;
400
+ int64_t reserved_ = 0;
401
+ std::unique_ptr<Converter> converter_;
402
+ std::vector<std::shared_ptr<Array>> chunks_;
403
+ };
404
+
405
+ template <typename T>
406
+ static Result<std::unique_ptr<Chunker<T>>> MakeChunker(std::unique_ptr<T> converter) {
407
+ return std::make_unique<Chunker<T>>(std::move(converter));
408
+ }
409
+
410
+ } // namespace internal
411
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/crc32.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #include <cstddef>
19
+ #include <cstdint>
20
+
21
+ #include "arrow/util/visibility.h"
22
+
23
+ namespace arrow {
24
+ namespace internal {
25
+
26
+ /// \brief Compute the CRC32 checksum of the given data
27
+ ///
28
+ /// This function computes CRC32 with the polynomial 0x04C11DB7,
29
+ /// as used in zlib and others (note this is different from CRC32C).
30
+ /// To compute a running CRC32, pass the previous value in `prev`,
31
+ /// otherwise `prev` should be 0.
32
+ ARROW_EXPORT
33
+ uint32_t crc32(uint32_t prev, const void* data, size_t length);
34
+
35
+ } // namespace internal
36
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/debug.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include "arrow/util/visibility.h"
21
+
22
+ namespace arrow {
23
+ namespace internal {
24
+
25
+ ARROW_EXPORT
26
+ void DebugTrap();
27
+
28
+ } // namespace internal
29
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/delimiting.h ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <memory>
22
+ #include <string_view>
23
+
24
+ #include "arrow/status.h"
25
+ #include "arrow/util/macros.h"
26
+ #include "arrow/util/visibility.h"
27
+
28
+ namespace arrow {
29
+
30
+ class Buffer;
31
+
32
+ class ARROW_EXPORT BoundaryFinder {
33
+ public:
34
+ BoundaryFinder() = default;
35
+
36
+ virtual ~BoundaryFinder();
37
+
38
+ /// \brief Find the position of the first delimiter inside block
39
+ ///
40
+ /// `partial` is taken to be the beginning of the block, and `block`
41
+ /// its continuation. Also, `partial` doesn't contain a delimiter.
42
+ ///
43
+ /// The returned `out_pos` is relative to `block`'s start and should point
44
+ /// to the first character after the first delimiter.
45
+ /// `out_pos` will be -1 if no delimiter is found.
46
+ virtual Status FindFirst(std::string_view partial, std::string_view block,
47
+ int64_t* out_pos) = 0;
48
+
49
+ /// \brief Find the position of the last delimiter inside block
50
+ ///
51
+ /// The returned `out_pos` is relative to `block`'s start and should point
52
+ /// to the first character after the last delimiter.
53
+ /// `out_pos` will be -1 if no delimiter is found.
54
+ virtual Status FindLast(std::string_view block, int64_t* out_pos) = 0;
55
+
56
+ /// \brief Find the position of the Nth delimiter inside the block
57
+ ///
58
+ /// `partial` is taken to be the beginning of the block, and `block`
59
+ /// its continuation. Also, `partial` doesn't contain a delimiter.
60
+ ///
61
+ /// The returned `out_pos` is relative to `block`'s start and should point
62
+ /// to the first character after the first delimiter.
63
+ /// `out_pos` will be -1 if no delimiter is found.
64
+ ///
65
+ /// The returned `num_found` is the number of delimiters actually found
66
+ virtual Status FindNth(std::string_view partial, std::string_view block, int64_t count,
67
+ int64_t* out_pos, int64_t* num_found) = 0;
68
+
69
+ static constexpr int64_t kNoDelimiterFound = -1;
70
+
71
+ protected:
72
+ ARROW_DISALLOW_COPY_AND_ASSIGN(BoundaryFinder);
73
+ };
74
+
75
+ ARROW_EXPORT
76
+ std::shared_ptr<BoundaryFinder> MakeNewlineBoundaryFinder();
77
+
78
+ /// \brief A reusable block-based chunker for delimited data
79
+ ///
80
+ /// The chunker takes a block of delimited data and helps carve a sub-block
81
+ /// which begins and ends on delimiters (suitable for consumption by parsers
82
+ /// which can only parse whole objects).
83
+ class ARROW_EXPORT Chunker {
84
+ public:
85
+ explicit Chunker(std::shared_ptr<BoundaryFinder> delimiter);
86
+ ~Chunker();
87
+
88
+ /// \brief Carve up a chunk in a block of data to contain only whole objects
89
+ ///
90
+ /// Pre-conditions:
91
+ /// - `block` is the start of a valid block of delimited data
92
+ /// (i.e. starts just after a delimiter)
93
+ ///
94
+ /// Post-conditions:
95
+ /// - block == whole + partial
96
+ /// - `whole` is a valid block of delimited data
97
+ /// (i.e. starts just after a delimiter and ends with a delimiter)
98
+ /// - `partial` doesn't contain an entire delimited object
99
+ /// (IOW: `partial` is generally small)
100
+ ///
101
+ /// This method will look for the last delimiter in `block` and may
102
+ /// therefore be costly.
103
+ ///
104
+ /// \param[in] block data to be chunked
105
+ /// \param[out] whole subrange of block containing whole delimited objects
106
+ /// \param[out] partial subrange of block starting with a partial delimited object
107
+ Status Process(std::shared_ptr<Buffer> block, std::shared_ptr<Buffer>* whole,
108
+ std::shared_ptr<Buffer>* partial);
109
+
110
+ /// \brief Carve the completion of a partial object out of a block
111
+ ///
112
+ /// Pre-conditions:
113
+ /// - `partial` is the start of a valid block of delimited data
114
+ /// (i.e. starts just after a delimiter)
115
+ /// - `block` follows `partial` in file order
116
+ ///
117
+ /// Post-conditions:
118
+ /// - block == completion + rest
119
+ /// - `partial + completion` is a valid block of delimited data
120
+ /// (i.e. starts just after a delimiter and ends with a delimiter)
121
+ /// - `completion` doesn't contain an entire delimited object
122
+ /// (IOW: `completion` is generally small)
123
+ ///
124
+ /// This method will look for the first delimiter in `block` and should
125
+ /// therefore be reasonably cheap.
126
+ ///
127
+ /// \param[in] partial incomplete delimited data
128
+ /// \param[in] block delimited data following partial
129
+ /// \param[out] completion subrange of block containing the completion of partial
130
+ /// \param[out] rest subrange of block containing what completion does not cover
131
+ Status ProcessWithPartial(std::shared_ptr<Buffer> partial,
132
+ std::shared_ptr<Buffer> block,
133
+ std::shared_ptr<Buffer>* completion,
134
+ std::shared_ptr<Buffer>* rest);
135
+
136
+ /// \brief Like ProcessWithPartial, but for the last block of a file
137
+ ///
138
+ /// This method allows for a final delimited object without a trailing delimiter
139
+ /// (ProcessWithPartial would return an error in that case).
140
+ ///
141
+ /// Pre-conditions:
142
+ /// - `partial` is the start of a valid block of delimited data
143
+ /// - `block` follows `partial` in file order and is the last data block
144
+ ///
145
+ /// Post-conditions:
146
+ /// - block == completion + rest
147
+ /// - `partial + completion` is a valid block of delimited data
148
+ /// - `completion` doesn't contain an entire delimited object
149
+ /// (IOW: `completion` is generally small)
150
+ ///
151
+ Status ProcessFinal(std::shared_ptr<Buffer> partial, std::shared_ptr<Buffer> block,
152
+ std::shared_ptr<Buffer>* completion, std::shared_ptr<Buffer>* rest);
153
+
154
+ /// \brief Skip count number of rows
155
+ /// Pre-conditions:
156
+ /// - `partial` is the start of a valid block of delimited data
157
+ /// (i.e. starts just after a delimiter)
158
+ /// - `block` follows `partial` in file order
159
+ ///
160
+ /// Post-conditions:
161
+ /// - `count` is updated to indicate the number of rows that still need to be skipped
162
+ /// - If `count` is > 0 then `rest` is an incomplete block that should be a future
163
+ /// `partial`
164
+ /// - Else `rest` could be one or more valid blocks of delimited data which need to be
165
+ /// parsed
166
+ ///
167
+ /// \param[in] partial incomplete delimited data
168
+ /// \param[in] block delimited data following partial
169
+ /// \param[in] final whether this is the final chunk
170
+ /// \param[in,out] count number of rows that need to be skipped
171
+ /// \param[out] rest subrange of block containing what was not skipped
172
+ Status ProcessSkip(std::shared_ptr<Buffer> partial, std::shared_ptr<Buffer> block,
173
+ bool final, int64_t* count, std::shared_ptr<Buffer>* rest);
174
+
175
+ protected:
176
+ ARROW_DISALLOW_COPY_AND_ASSIGN(Chunker);
177
+
178
+ std::shared_ptr<BoundaryFinder> boundary_finder_;
179
+ };
180
+
181
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/dispatch.h ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <utility>
21
+ #include <vector>
22
+
23
+ #include "arrow/status.h"
24
+ #include "arrow/util/cpu_info.h"
25
+
26
+ namespace arrow {
27
+ namespace internal {
28
+
29
+ enum class DispatchLevel : int {
30
+ // These dispatch levels, corresponding to instruction set features,
31
+ // are sorted in increasing order of preference.
32
+ NONE = 0,
33
+ SSE4_2,
34
+ AVX2,
35
+ AVX512,
36
+ NEON,
37
+ MAX
38
+ };
39
+
40
+ /*
41
+ A facility for dynamic dispatch according to available DispatchLevel.
42
+
43
+ Typical use:
44
+
45
+ static void my_function_default(...);
46
+ static void my_function_avx2(...);
47
+
48
+ struct MyDynamicFunction {
49
+ using FunctionType = decltype(&my_function_default);
50
+
51
+ static std::vector<std::pair<DispatchLevel, FunctionType>> implementations() {
52
+ return {
53
+ { DispatchLevel::NONE, my_function_default }
54
+ #if defined(ARROW_HAVE_RUNTIME_AVX2)
55
+ , { DispatchLevel::AVX2, my_function_avx2 }
56
+ #endif
57
+ };
58
+ }
59
+ };
60
+
61
+ void my_function(...) {
62
+ static DynamicDispatch<MyDynamicFunction> dispatch;
63
+ return dispatch.func(...);
64
+ }
65
+ */
66
+ template <typename DynamicFunction>
67
+ class DynamicDispatch {
68
+ protected:
69
+ using FunctionType = typename DynamicFunction::FunctionType;
70
+ using Implementation = std::pair<DispatchLevel, FunctionType>;
71
+
72
+ public:
73
+ DynamicDispatch() { Resolve(DynamicFunction::implementations()); }
74
+
75
+ FunctionType func = {};
76
+
77
+ protected:
78
+ // Use the Implementation with the highest DispatchLevel
79
+ void Resolve(const std::vector<Implementation>& implementations) {
80
+ Implementation cur{DispatchLevel::NONE, {}};
81
+
82
+ for (const auto& impl : implementations) {
83
+ if (impl.first >= cur.first && IsSupported(impl.first)) {
84
+ // Higher (or same) level than current
85
+ cur = impl;
86
+ }
87
+ }
88
+
89
+ if (!cur.second) {
90
+ Status::Invalid("No appropriate implementation found").Abort();
91
+ }
92
+ func = cur.second;
93
+ }
94
+
95
+ private:
96
+ bool IsSupported(DispatchLevel level) const {
97
+ static const auto cpu_info = arrow::internal::CpuInfo::GetInstance();
98
+
99
+ switch (level) {
100
+ case DispatchLevel::NONE:
101
+ return true;
102
+ case DispatchLevel::SSE4_2:
103
+ return cpu_info->IsSupported(CpuInfo::SSE4_2);
104
+ case DispatchLevel::AVX2:
105
+ return cpu_info->IsSupported(CpuInfo::AVX2);
106
+ case DispatchLevel::AVX512:
107
+ return cpu_info->IsSupported(CpuInfo::AVX512);
108
+ default:
109
+ return false;
110
+ }
111
+ }
112
+ };
113
+
114
+ } // namespace internal
115
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/endian.h ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #ifdef _WIN32
21
+ #define ARROW_LITTLE_ENDIAN 1
22
+ #else
23
+ #if defined(__APPLE__) || defined(__FreeBSD__)
24
+ #include <machine/endian.h> // IWYU pragma: keep
25
+ #elif defined(sun) || defined(__sun)
26
+ #include <sys/byteorder.h> // IWYU pragma: keep
27
+ #else
28
+ #include <endian.h> // IWYU pragma: keep
29
+ #endif
30
+ #
31
+ #ifndef __BYTE_ORDER__
32
+ #error "__BYTE_ORDER__ not defined"
33
+ #endif
34
+ #
35
+ #ifndef __ORDER_LITTLE_ENDIAN__
36
+ #error "__ORDER_LITTLE_ENDIAN__ not defined"
37
+ #endif
38
+ #
39
+ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
40
+ #define ARROW_LITTLE_ENDIAN 1
41
+ #else
42
+ #define ARROW_LITTLE_ENDIAN 0
43
+ #endif
44
+ #endif
45
+
46
+ #if defined(_MSC_VER)
47
+ #include <intrin.h> // IWYU pragma: keep
48
+ #define ARROW_BYTE_SWAP64 _byteswap_uint64
49
+ #define ARROW_BYTE_SWAP32 _byteswap_ulong
50
+ #else
51
+ #define ARROW_BYTE_SWAP64 __builtin_bswap64
52
+ #define ARROW_BYTE_SWAP32 __builtin_bswap32
53
+ #endif
54
+
55
+ #include <algorithm>
56
+ #include <array>
57
+
58
+ #include "arrow/util/type_traits.h"
59
+ #include "arrow/util/ubsan.h"
60
+
61
+ namespace arrow {
62
+ namespace bit_util {
63
+
64
+ //
65
+ // Byte-swap 16-bit, 32-bit and 64-bit values
66
+ //
67
+
68
+ // Swap the byte order (i.e. endianness)
69
+ static inline int64_t ByteSwap(int64_t value) { return ARROW_BYTE_SWAP64(value); }
70
+ static inline uint64_t ByteSwap(uint64_t value) {
71
+ return static_cast<uint64_t>(ARROW_BYTE_SWAP64(value));
72
+ }
73
+ static inline int32_t ByteSwap(int32_t value) { return ARROW_BYTE_SWAP32(value); }
74
+ static inline uint32_t ByteSwap(uint32_t value) {
75
+ return static_cast<uint32_t>(ARROW_BYTE_SWAP32(value));
76
+ }
77
+ static inline int16_t ByteSwap(int16_t value) {
78
+ constexpr auto m = static_cast<int16_t>(0xff);
79
+ return static_cast<int16_t>(((value >> 8) & m) | ((value & m) << 8));
80
+ }
81
+ static inline uint16_t ByteSwap(uint16_t value) {
82
+ return static_cast<uint16_t>(ByteSwap(static_cast<int16_t>(value)));
83
+ }
84
+ static inline uint8_t ByteSwap(uint8_t value) { return value; }
85
+ static inline int8_t ByteSwap(int8_t value) { return value; }
86
+ static inline double ByteSwap(double value) {
87
+ const uint64_t swapped = ARROW_BYTE_SWAP64(util::SafeCopy<uint64_t>(value));
88
+ return util::SafeCopy<double>(swapped);
89
+ }
90
+ static inline float ByteSwap(float value) {
91
+ const uint32_t swapped = ARROW_BYTE_SWAP32(util::SafeCopy<uint32_t>(value));
92
+ return util::SafeCopy<float>(swapped);
93
+ }
94
+
95
+ // Write the swapped bytes into dst. Src and dst cannot overlap.
96
+ static inline void ByteSwap(void* dst, const void* src, int len) {
97
+ switch (len) {
98
+ case 1:
99
+ *reinterpret_cast<int8_t*>(dst) = *reinterpret_cast<const int8_t*>(src);
100
+ return;
101
+ case 2:
102
+ *reinterpret_cast<int16_t*>(dst) = ByteSwap(*reinterpret_cast<const int16_t*>(src));
103
+ return;
104
+ case 4:
105
+ *reinterpret_cast<int32_t*>(dst) = ByteSwap(*reinterpret_cast<const int32_t*>(src));
106
+ return;
107
+ case 8:
108
+ *reinterpret_cast<int64_t*>(dst) = ByteSwap(*reinterpret_cast<const int64_t*>(src));
109
+ return;
110
+ default:
111
+ break;
112
+ }
113
+
114
+ auto d = reinterpret_cast<uint8_t*>(dst);
115
+ auto s = reinterpret_cast<const uint8_t*>(src);
116
+ for (int i = 0; i < len; ++i) {
117
+ d[i] = s[len - i - 1];
118
+ }
119
+ }
120
+
121
+ // Convert to little/big endian format from the machine's native endian format.
122
+ #if ARROW_LITTLE_ENDIAN
123
+ template <typename T, typename = internal::EnableIfIsOneOf<
124
+ T, int64_t, uint64_t, int32_t, uint32_t, int16_t, uint16_t,
125
+ uint8_t, int8_t, float, double, bool>>
126
+ static inline T ToBigEndian(T value) {
127
+ return ByteSwap(value);
128
+ }
129
+
130
+ template <typename T, typename = internal::EnableIfIsOneOf<
131
+ T, int64_t, uint64_t, int32_t, uint32_t, int16_t, uint16_t,
132
+ uint8_t, int8_t, float, double, bool>>
133
+ static inline T ToLittleEndian(T value) {
134
+ return value;
135
+ }
136
+ #else
137
+ template <typename T, typename = internal::EnableIfIsOneOf<
138
+ T, int64_t, uint64_t, int32_t, uint32_t, int16_t, uint16_t,
139
+ uint8_t, int8_t, float, double, bool>>
140
+ static inline T ToBigEndian(T value) {
141
+ return value;
142
+ }
143
+
144
+ template <typename T, typename = internal::EnableIfIsOneOf<
145
+ T, int64_t, uint64_t, int32_t, uint32_t, int16_t, uint16_t,
146
+ uint8_t, int8_t, float, double, bool>>
147
+ static inline T ToLittleEndian(T value) {
148
+ return ByteSwap(value);
149
+ }
150
+ #endif
151
+
152
+ // Convert from big/little endian format to the machine's native endian format.
153
+ #if ARROW_LITTLE_ENDIAN
154
+ template <typename T, typename = internal::EnableIfIsOneOf<
155
+ T, int64_t, uint64_t, int32_t, uint32_t, int16_t, uint16_t,
156
+ uint8_t, int8_t, float, double, bool>>
157
+ static inline T FromBigEndian(T value) {
158
+ return ByteSwap(value);
159
+ }
160
+
161
+ template <typename T, typename = internal::EnableIfIsOneOf<
162
+ T, int64_t, uint64_t, int32_t, uint32_t, int16_t, uint16_t,
163
+ uint8_t, int8_t, float, double, bool>>
164
+ static inline T FromLittleEndian(T value) {
165
+ return value;
166
+ }
167
+ #else
168
+ template <typename T, typename = internal::EnableIfIsOneOf<
169
+ T, int64_t, uint64_t, int32_t, uint32_t, int16_t, uint16_t,
170
+ uint8_t, int8_t, float, double, bool>>
171
+ static inline T FromBigEndian(T value) {
172
+ return value;
173
+ }
174
+
175
+ template <typename T, typename = internal::EnableIfIsOneOf<
176
+ T, int64_t, uint64_t, int32_t, uint32_t, int16_t, uint16_t,
177
+ uint8_t, int8_t, float, double, bool>>
178
+ static inline T FromLittleEndian(T value) {
179
+ return ByteSwap(value);
180
+ }
181
+ #endif
182
+
183
+ // Handle endianness in *word* granularity (keep individual array element untouched)
184
+ namespace little_endian {
185
+
186
+ namespace detail {
187
+
188
+ // Read a native endian array as little endian
189
+ template <typename T, size_t N>
190
+ struct Reader {
191
+ const std::array<T, N>& native_array;
192
+
193
+ explicit Reader(const std::array<T, N>& native_array) : native_array(native_array) {}
194
+
195
+ const T& operator[](size_t i) const {
196
+ return native_array[ARROW_LITTLE_ENDIAN ? i : N - 1 - i];
197
+ }
198
+ };
199
+
200
+ // Read/write a native endian array as little endian
201
+ template <typename T, size_t N>
202
+ struct Writer {
203
+ std::array<T, N>* native_array;
204
+
205
+ explicit Writer(std::array<T, N>* native_array) : native_array(native_array) {}
206
+
207
+ const T& operator[](size_t i) const {
208
+ return (*native_array)[ARROW_LITTLE_ENDIAN ? i : N - 1 - i];
209
+ }
210
+ T& operator[](size_t i) { return (*native_array)[ARROW_LITTLE_ENDIAN ? i : N - 1 - i]; }
211
+ };
212
+
213
+ } // namespace detail
214
+
215
+ // Construct array reader and try to deduce template augments
216
+ template <typename T, size_t N>
217
+ static inline detail::Reader<T, N> Make(const std::array<T, N>& native_array) {
218
+ return detail::Reader<T, N>(native_array);
219
+ }
220
+
221
+ // Construct array writer and try to deduce template augments
222
+ template <typename T, size_t N>
223
+ static inline detail::Writer<T, N> Make(std::array<T, N>* native_array) {
224
+ return detail::Writer<T, N>(native_array);
225
+ }
226
+
227
+ // Convert little endian array to native endian
228
+ template <typename T, size_t N>
229
+ static inline std::array<T, N> ToNative(std::array<T, N> array) {
230
+ if (!ARROW_LITTLE_ENDIAN) {
231
+ std::reverse(array.begin(), array.end());
232
+ }
233
+ return array;
234
+ }
235
+
236
+ // Convert native endian array to little endian
237
+ template <typename T, size_t N>
238
+ static inline std::array<T, N> FromNative(std::array<T, N> array) {
239
+ return ToNative(array);
240
+ }
241
+
242
+ } // namespace little_endian
243
+
244
+ } // namespace bit_util
245
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/functional.h ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <memory>
21
+ #include <tuple>
22
+ #include <type_traits>
23
+
24
+ #include "arrow/result.h"
25
+ #include "arrow/util/macros.h"
26
+
27
+ namespace arrow {
28
+ namespace internal {
29
+
30
+ struct Empty {
31
+ static Result<Empty> ToResult(Status s) {
32
+ if (ARROW_PREDICT_TRUE(s.ok())) {
33
+ return Empty{};
34
+ }
35
+ return s;
36
+ }
37
+ };
38
+
39
+ /// Helper struct for examining lambdas and other callables.
40
+ /// TODO(ARROW-12655) support function pointers
41
+ struct call_traits {
42
+ public:
43
+ template <typename R, typename... A>
44
+ static std::false_type is_overloaded_impl(R(A...));
45
+
46
+ template <typename F>
47
+ static std::false_type is_overloaded_impl(decltype(&F::operator())*);
48
+
49
+ template <typename F>
50
+ static std::true_type is_overloaded_impl(...);
51
+
52
+ template <typename F, typename R, typename... A>
53
+ static R return_type_impl(R (F::*)(A...));
54
+
55
+ template <typename F, typename R, typename... A>
56
+ static R return_type_impl(R (F::*)(A...) const);
57
+
58
+ template <std::size_t I, typename F, typename R, typename... A>
59
+ static typename std::tuple_element<I, std::tuple<A...>>::type argument_type_impl(
60
+ R (F::*)(A...));
61
+
62
+ template <std::size_t I, typename F, typename R, typename... A>
63
+ static typename std::tuple_element<I, std::tuple<A...>>::type argument_type_impl(
64
+ R (F::*)(A...) const);
65
+
66
+ template <std::size_t I, typename F, typename R, typename... A>
67
+ static typename std::tuple_element<I, std::tuple<A...>>::type argument_type_impl(
68
+ R (F::*)(A...) &&);
69
+
70
+ template <typename F, typename R, typename... A>
71
+ static std::integral_constant<int, sizeof...(A)> argument_count_impl(R (F::*)(A...));
72
+
73
+ template <typename F, typename R, typename... A>
74
+ static std::integral_constant<int, sizeof...(A)> argument_count_impl(R (F::*)(A...)
75
+ const);
76
+
77
+ template <typename F, typename R, typename... A>
78
+ static std::integral_constant<int, sizeof...(A)> argument_count_impl(R (F::*)(A...) &&);
79
+
80
+ /// bool constant indicating whether F is a callable with more than one possible
81
+ /// signature. Will be true_type for objects which define multiple operator() or which
82
+ /// define a template operator()
83
+ template <typename F>
84
+ using is_overloaded =
85
+ decltype(is_overloaded_impl<typename std::decay<F>::type>(NULLPTR));
86
+
87
+ template <typename F, typename T = void>
88
+ using enable_if_overloaded = typename std::enable_if<is_overloaded<F>::value, T>::type;
89
+
90
+ template <typename F, typename T = void>
91
+ using disable_if_overloaded =
92
+ typename std::enable_if<!is_overloaded<F>::value, T>::type;
93
+
94
+ /// If F is not overloaded, the argument types of its call operator can be
95
+ /// extracted via call_traits::argument_type<Index, F>
96
+ template <std::size_t I, typename F>
97
+ using argument_type = decltype(argument_type_impl<I>(&std::decay<F>::type::operator()));
98
+
99
+ template <typename F>
100
+ using argument_count = decltype(argument_count_impl(&std::decay<F>::type::operator()));
101
+
102
+ template <typename F>
103
+ using return_type = decltype(return_type_impl(&std::decay<F>::type::operator()));
104
+
105
+ template <typename F, typename T, typename RT = T>
106
+ using enable_if_return =
107
+ typename std::enable_if<std::is_same<return_type<F>, T>::value, RT>;
108
+
109
+ template <typename T, typename R = void>
110
+ using enable_if_empty = typename std::enable_if<std::is_same<T, Empty>::value, R>::type;
111
+
112
+ template <typename T, typename R = void>
113
+ using enable_if_not_empty =
114
+ typename std::enable_if<!std::is_same<T, Empty>::value, R>::type;
115
+ };
116
+
117
+ /// A type erased callable object which may only be invoked once.
118
+ /// It can be constructed from any lambda which matches the provided call signature.
119
+ /// Invoking it results in destruction of the lambda, freeing any state/references
120
+ /// immediately. Invoking a default constructed FnOnce or one which has already been
121
+ /// invoked will segfault.
122
+ template <typename Signature>
123
+ class FnOnce;
124
+
125
+ template <typename R, typename... A>
126
+ class FnOnce<R(A...)> {
127
+ public:
128
+ FnOnce() = default;
129
+
130
+ template <typename Fn,
131
+ typename = typename std::enable_if<std::is_convertible<
132
+ decltype(std::declval<Fn&&>()(std::declval<A>()...)), R>::value>::type>
133
+ FnOnce(Fn fn) : impl_(new FnImpl<Fn>(std::move(fn))) { // NOLINT runtime/explicit
134
+ }
135
+
136
+ explicit operator bool() const { return impl_ != NULLPTR; }
137
+
138
+ R operator()(A... a) && {
139
+ auto bye = std::move(impl_);
140
+ return bye->invoke(std::forward<A&&>(a)...);
141
+ }
142
+
143
+ private:
144
+ struct Impl {
145
+ virtual ~Impl() = default;
146
+ virtual R invoke(A&&... a) = 0;
147
+ };
148
+
149
+ template <typename Fn>
150
+ struct FnImpl : Impl {
151
+ explicit FnImpl(Fn fn) : fn_(std::move(fn)) {}
152
+ R invoke(A&&... a) override { return std::move(fn_)(std::forward<A&&>(a)...); }
153
+ Fn fn_;
154
+ };
155
+
156
+ std::unique_ptr<Impl> impl_;
157
+ };
158
+
159
+ } // namespace internal
160
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/hashing.h ADDED
@@ -0,0 +1,944 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ // Private header, not to be exported
19
+
20
+ #pragma once
21
+
22
+ #include <algorithm>
23
+ #include <cassert>
24
+ #include <cmath>
25
+ #include <cstdint>
26
+ #include <cstring>
27
+ #include <limits>
28
+ #include <memory>
29
+ #include <string>
30
+ #include <type_traits>
31
+ #include <utility>
32
+ #include <vector>
33
+
34
+ #include "arrow/array/builder_binary.h"
35
+ #include "arrow/buffer_builder.h"
36
+ #include "arrow/result.h"
37
+ #include "arrow/status.h"
38
+ #include "arrow/type_fwd.h"
39
+ #include "arrow/type_traits.h"
40
+ #include "arrow/util/bit_util.h"
41
+ #include "arrow/util/bitmap_builders.h"
42
+ #include "arrow/util/endian.h"
43
+ #include "arrow/util/logging.h"
44
+ #include "arrow/util/macros.h"
45
+ #include "arrow/util/ubsan.h"
46
+
47
+ #define XXH_INLINE_ALL
48
+
49
+ #include "arrow/vendored/xxhash.h" // IWYU pragma: keep
50
+
51
+ namespace arrow {
52
+ namespace internal {
53
+
54
+ // XXX would it help to have a 32-bit hash value on large datasets?
55
+ typedef uint64_t hash_t;
56
+
57
+ // Notes about the choice of a hash function.
58
+ // - XXH3 is extremely fast on most data sizes, from small to huge;
59
+ // faster even than HW CRC-based hashing schemes
60
+ // - our custom hash function for tiny values (< 16 bytes) is still
61
+ // significantly faster (~30%), at least on this machine and compiler
62
+
63
+ template <uint64_t AlgNum>
64
+ inline hash_t ComputeStringHash(const void* data, int64_t length);
65
+
66
+ /// \brief A hash function for bitmaps that can handle offsets and lengths in
67
+ /// terms of number of bits. The hash only depends on the bits actually hashed.
68
+ ///
69
+ /// It's the caller's responsibility to ensure that bits_offset + num_bits are
70
+ /// readable from the bitmap.
71
+ ///
72
+ /// \pre bits_offset >= 0
73
+ /// \pre num_bits >= 0
74
+ /// \pre (bits_offset + num_bits + 7) / 8 <= readable length in bytes from bitmap
75
+ ///
76
+ /// \param bitmap The pointer to the bitmap.
77
+ /// \param seed The seed for the hash function (useful when chaining hash functions).
78
+ /// \param bits_offset The offset in bits relative to the start of the bitmap.
79
+ /// \param num_bits The number of bits after the offset to be hashed.
80
+ ARROW_EXPORT hash_t ComputeBitmapHash(const uint8_t* bitmap, hash_t seed,
81
+ int64_t bits_offset, int64_t num_bits);
82
+
83
+ template <typename Scalar, uint64_t AlgNum>
84
+ struct ScalarHelperBase {
85
+ static bool CompareScalars(Scalar u, Scalar v) { return u == v; }
86
+
87
+ static hash_t ComputeHash(const Scalar& value) {
88
+ // Generic hash computation for scalars. Simply apply the string hash
89
+ // to the bit representation of the value.
90
+
91
+ // XXX in the case of FP values, we'd like equal values to have the same hash,
92
+ // even if they have different bit representations...
93
+ return ComputeStringHash<AlgNum>(&value, sizeof(value));
94
+ }
95
+ };
96
+
97
+ template <typename Scalar, uint64_t AlgNum = 0, typename Enable = void>
98
+ struct ScalarHelper : public ScalarHelperBase<Scalar, AlgNum> {};
99
+
100
+ template <typename Scalar, uint64_t AlgNum>
101
+ struct ScalarHelper<Scalar, AlgNum, enable_if_t<std::is_integral<Scalar>::value>>
102
+ : public ScalarHelperBase<Scalar, AlgNum> {
103
+ // ScalarHelper specialization for integers
104
+
105
+ static hash_t ComputeHash(const Scalar& value) {
106
+ // Faster hash computation for integers.
107
+
108
+ // Two of xxhash's prime multipliers (which are chosen for their
109
+ // bit dispersion properties)
110
+ static constexpr uint64_t multipliers[] = {11400714785074694791ULL,
111
+ 14029467366897019727ULL};
112
+
113
+ // Multiplying by the prime number mixes the low bits into the high bits,
114
+ // then byte-swapping (which is a single CPU instruction) allows the
115
+ // combined high and low bits to participate in the initial hash table index.
116
+ auto h = static_cast<hash_t>(value);
117
+ return bit_util::ByteSwap(multipliers[AlgNum] * h);
118
+ }
119
+ };
120
+
121
+ template <typename Scalar, uint64_t AlgNum>
122
+ struct ScalarHelper<Scalar, AlgNum,
123
+ enable_if_t<std::is_same<std::string_view, Scalar>::value>>
124
+ : public ScalarHelperBase<Scalar, AlgNum> {
125
+ // ScalarHelper specialization for std::string_view
126
+
127
+ static hash_t ComputeHash(std::string_view value) {
128
+ return ComputeStringHash<AlgNum>(value.data(), static_cast<int64_t>(value.size()));
129
+ }
130
+ };
131
+
132
+ template <typename Scalar, uint64_t AlgNum>
133
+ struct ScalarHelper<Scalar, AlgNum, enable_if_t<std::is_floating_point<Scalar>::value>>
134
+ : public ScalarHelperBase<Scalar, AlgNum> {
135
+ // ScalarHelper specialization for reals
136
+
137
+ static bool CompareScalars(Scalar u, Scalar v) {
138
+ if (std::isnan(u)) {
139
+ // XXX should we do a bit-precise comparison?
140
+ return std::isnan(v);
141
+ }
142
+ return u == v;
143
+ }
144
+ };
145
+
146
+ template <uint64_t AlgNum = 0>
147
+ hash_t ComputeStringHash(const void* data, int64_t length) {
148
+ if (ARROW_PREDICT_TRUE(length <= 16)) {
149
+ // Specialize for small hash strings, as they are quite common as
150
+ // hash table keys. Even XXH3 isn't quite as fast.
151
+ auto p = reinterpret_cast<const uint8_t*>(data);
152
+ auto n = static_cast<uint32_t>(length);
153
+ if (n <= 8) {
154
+ if (n <= 3) {
155
+ if (n == 0) {
156
+ return 1U;
157
+ }
158
+ uint32_t x = (n << 24) ^ (p[0] << 16) ^ (p[n / 2] << 8) ^ p[n - 1];
159
+ return ScalarHelper<uint32_t, AlgNum>::ComputeHash(x);
160
+ }
161
+ // 4 <= length <= 8
162
+ // We can read the string as two overlapping 32-bit ints, apply
163
+ // different hash functions to each of them in parallel, then XOR
164
+ // the results
165
+ uint32_t x, y;
166
+ hash_t hx, hy;
167
+ x = util::SafeLoadAs<uint32_t>(p + n - 4);
168
+ y = util::SafeLoadAs<uint32_t>(p);
169
+ hx = ScalarHelper<uint32_t, AlgNum>::ComputeHash(x);
170
+ hy = ScalarHelper<uint32_t, AlgNum ^ 1>::ComputeHash(y);
171
+ return n ^ hx ^ hy;
172
+ }
173
+ // 8 <= length <= 16
174
+ // Apply the same principle as above
175
+ uint64_t x, y;
176
+ hash_t hx, hy;
177
+ x = util::SafeLoadAs<uint64_t>(p + n - 8);
178
+ y = util::SafeLoadAs<uint64_t>(p);
179
+ hx = ScalarHelper<uint64_t, AlgNum>::ComputeHash(x);
180
+ hy = ScalarHelper<uint64_t, AlgNum ^ 1>::ComputeHash(y);
181
+ return n ^ hx ^ hy;
182
+ }
183
+
184
+ #if XXH3_SECRET_SIZE_MIN != 136
185
+ #error XXH3_SECRET_SIZE_MIN changed, please fix kXxh3Secrets
186
+ #endif
187
+
188
+ // XXH3_64bits_withSeed generates a secret based on the seed, which is too slow.
189
+ // Instead, we use hard-coded random secrets. To maximize cache efficiency,
190
+ // they reuse the same memory area.
191
+ static constexpr unsigned char kXxh3Secrets[XXH3_SECRET_SIZE_MIN + 1] = {
192
+ 0xe7, 0x8b, 0x13, 0xf9, 0xfc, 0xb5, 0x8e, 0xef, 0x81, 0x48, 0x2c, 0xbf, 0xf9, 0x9f,
193
+ 0xc1, 0x1e, 0x43, 0x6d, 0xbf, 0xa6, 0x6d, 0xb5, 0x72, 0xbc, 0x97, 0xd8, 0x61, 0x24,
194
+ 0x0f, 0x12, 0xe3, 0x05, 0x21, 0xf7, 0x5c, 0x66, 0x67, 0xa5, 0x65, 0x03, 0x96, 0x26,
195
+ 0x69, 0xd8, 0x29, 0x20, 0xf8, 0xc7, 0xb0, 0x3d, 0xdd, 0x7d, 0x18, 0xa0, 0x60, 0x75,
196
+ 0x92, 0xa4, 0xce, 0xba, 0xc0, 0x77, 0xf4, 0xac, 0xb7, 0x03, 0x53, 0xf0, 0x98, 0xce,
197
+ 0xe6, 0x2b, 0x20, 0xc7, 0x82, 0x91, 0xab, 0xbf, 0x68, 0x5c, 0x62, 0x4d, 0x33, 0xa3,
198
+ 0xe1, 0xb3, 0xff, 0x97, 0x54, 0x4c, 0x44, 0x34, 0xb5, 0xb9, 0x32, 0x4c, 0x75, 0x42,
199
+ 0x89, 0x53, 0x94, 0xd4, 0x9f, 0x2b, 0x76, 0x4d, 0x4e, 0xe6, 0xfa, 0x15, 0x3e, 0xc1,
200
+ 0xdb, 0x71, 0x4b, 0x2c, 0x94, 0xf5, 0xfc, 0x8c, 0x89, 0x4b, 0xfb, 0xc1, 0x82, 0xa5,
201
+ 0x6a, 0x53, 0xf9, 0x4a, 0xba, 0xce, 0x1f, 0xc0, 0x97, 0x1a, 0x87};
202
+
203
+ static_assert(AlgNum < 2, "AlgNum too large");
204
+ static constexpr auto secret = kXxh3Secrets + AlgNum;
205
+ return XXH3_64bits_withSecret(data, static_cast<size_t>(length), secret,
206
+ XXH3_SECRET_SIZE_MIN);
207
+ }
208
+
209
+ // XXX add a HashEq<ArrowType> struct with both hash and compare functions?
210
+
211
+ // ----------------------------------------------------------------------
212
+ // An open-addressing insert-only hash table (no deletes)
213
+
214
+ template <typename Payload>
215
+ class HashTable {
216
+ public:
217
+ static constexpr hash_t kSentinel = 0ULL;
218
+ static constexpr int64_t kLoadFactor = 2UL;
219
+
220
+ struct Entry {
221
+ hash_t h;
222
+ Payload payload;
223
+
224
+ // An entry is valid if the hash is different from the sentinel value
225
+ operator bool() const { return h != kSentinel; }
226
+ };
227
+
228
+ HashTable(MemoryPool* pool, uint64_t capacity) : entries_builder_(pool) {
229
+ DCHECK_NE(pool, nullptr);
230
+ // Minimum of 32 elements
231
+ capacity = std::max<uint64_t>(capacity, 32UL);
232
+ capacity_ = bit_util::NextPower2(capacity);
233
+ capacity_mask_ = capacity_ - 1;
234
+ size_ = 0;
235
+
236
+ DCHECK_OK(UpsizeBuffer(capacity_));
237
+ }
238
+
239
+ // Lookup with non-linear probing
240
+ // cmp_func should have signature bool(const Payload*).
241
+ // Return a (Entry*, found) pair.
242
+ template <typename CmpFunc>
243
+ std::pair<Entry*, bool> Lookup(hash_t h, CmpFunc&& cmp_func) {
244
+ auto p = Lookup<DoCompare, CmpFunc>(h, entries_, capacity_mask_,
245
+ std::forward<CmpFunc>(cmp_func));
246
+ return {&entries_[p.first], p.second};
247
+ }
248
+
249
+ template <typename CmpFunc>
250
+ std::pair<const Entry*, bool> Lookup(hash_t h, CmpFunc&& cmp_func) const {
251
+ auto p = Lookup<DoCompare, CmpFunc>(h, entries_, capacity_mask_,
252
+ std::forward<CmpFunc>(cmp_func));
253
+ return {&entries_[p.first], p.second};
254
+ }
255
+
256
+ Status Insert(Entry* entry, hash_t h, const Payload& payload) {
257
+ // Ensure entry is empty before inserting
258
+ assert(!*entry);
259
+ entry->h = FixHash(h);
260
+ entry->payload = payload;
261
+ ++size_;
262
+
263
+ if (ARROW_PREDICT_FALSE(NeedUpsizing())) {
264
+ // Resize less frequently since it is expensive
265
+ return Upsize(capacity_ * kLoadFactor * 2);
266
+ }
267
+ return Status::OK();
268
+ }
269
+
270
+ uint64_t size() const { return size_; }
271
+
272
+ // Visit all non-empty entries in the table
273
+ // The visit_func should have signature void(const Entry*)
274
+ template <typename VisitFunc>
275
+ void VisitEntries(VisitFunc&& visit_func) const {
276
+ for (uint64_t i = 0; i < capacity_; i++) {
277
+ const auto& entry = entries_[i];
278
+ if (entry) {
279
+ visit_func(&entry);
280
+ }
281
+ }
282
+ }
283
+
284
+ protected:
285
+ // NoCompare is for when the value is known not to exist in the table
286
+ enum CompareKind { DoCompare, NoCompare };
287
+
288
+ // The workhorse lookup function
289
+ template <CompareKind CKind, typename CmpFunc>
290
+ std::pair<uint64_t, bool> Lookup(hash_t h, const Entry* entries, uint64_t size_mask,
291
+ CmpFunc&& cmp_func) const {
292
+ static constexpr uint8_t perturb_shift = 5;
293
+
294
+ uint64_t index, perturb;
295
+ const Entry* entry;
296
+
297
+ h = FixHash(h);
298
+ index = h & size_mask;
299
+ perturb = (h >> perturb_shift) + 1U;
300
+
301
+ while (true) {
302
+ entry = &entries[index];
303
+ if (CompareEntry<CKind, CmpFunc>(h, entry, std::forward<CmpFunc>(cmp_func))) {
304
+ // Found
305
+ return {index, true};
306
+ }
307
+ if (entry->h == kSentinel) {
308
+ // Empty slot
309
+ return {index, false};
310
+ }
311
+
312
+ // Perturbation logic inspired from CPython's set / dict object.
313
+ // The goal is that all 64 bits of the unmasked hash value eventually
314
+ // participate in the probing sequence, to minimize clustering.
315
+ index = (index + perturb) & size_mask;
316
+ perturb = (perturb >> perturb_shift) + 1U;
317
+ }
318
+ }
319
+
320
+ template <CompareKind CKind, typename CmpFunc>
321
+ bool CompareEntry(hash_t h, const Entry* entry, CmpFunc&& cmp_func) const {
322
+ if (CKind == NoCompare) {
323
+ return false;
324
+ } else {
325
+ return entry->h == h && cmp_func(&entry->payload);
326
+ }
327
+ }
328
+
329
+ bool NeedUpsizing() const {
330
+ // Keep the load factor <= 1/2
331
+ return size_ * kLoadFactor >= capacity_;
332
+ }
333
+
334
+ Status UpsizeBuffer(uint64_t capacity) {
335
+ RETURN_NOT_OK(entries_builder_.Resize(capacity));
336
+ entries_ = entries_builder_.mutable_data();
337
+ memset(static_cast<void*>(entries_), 0, capacity * sizeof(Entry));
338
+
339
+ return Status::OK();
340
+ }
341
+
342
+ Status Upsize(uint64_t new_capacity) {
343
+ assert(new_capacity > capacity_);
344
+ uint64_t new_mask = new_capacity - 1;
345
+ assert((new_capacity & new_mask) == 0); // it's a power of two
346
+
347
+ // Stash old entries and seal builder, effectively resetting the Buffer
348
+ const Entry* old_entries = entries_;
349
+ ARROW_ASSIGN_OR_RAISE(auto previous, entries_builder_.FinishWithLength(capacity_));
350
+ // Allocate new buffer
351
+ RETURN_NOT_OK(UpsizeBuffer(new_capacity));
352
+
353
+ for (uint64_t i = 0; i < capacity_; i++) {
354
+ const auto& entry = old_entries[i];
355
+ if (entry) {
356
+ // Dummy compare function will not be called
357
+ auto p = Lookup<NoCompare>(entry.h, entries_, new_mask,
358
+ [](const Payload*) { return false; });
359
+ // Lookup<NoCompare> (and CompareEntry<NoCompare>) ensure that an
360
+ // empty slots is always returned
361
+ assert(!p.second);
362
+ entries_[p.first] = entry;
363
+ }
364
+ }
365
+ capacity_ = new_capacity;
366
+ capacity_mask_ = new_mask;
367
+
368
+ return Status::OK();
369
+ }
370
+
371
+ hash_t FixHash(hash_t h) const { return (h == kSentinel) ? 42U : h; }
372
+
373
+ // The number of slots available in the hash table array.
374
+ uint64_t capacity_;
375
+ uint64_t capacity_mask_;
376
+ // The number of used slots in the hash table array.
377
+ uint64_t size_;
378
+
379
+ Entry* entries_;
380
+ TypedBufferBuilder<Entry> entries_builder_;
381
+ };
382
+
383
+ // XXX typedef memo_index_t int32_t ?
384
+
385
+ constexpr int32_t kKeyNotFound = -1;
386
+
387
+ // ----------------------------------------------------------------------
388
+ // A base class for memoization table.
389
+
390
+ class MemoTable {
391
+ public:
392
+ virtual ~MemoTable() = default;
393
+
394
+ virtual int32_t size() const = 0;
395
+ };
396
+
397
+ // ----------------------------------------------------------------------
398
+ // A memoization table for memory-cheap scalar values.
399
+
400
+ // The memoization table remembers and allows to look up the insertion
401
+ // index for each key.
402
+
403
+ template <typename Scalar, template <class> class HashTableTemplateType = HashTable>
404
+ class ScalarMemoTable : public MemoTable {
405
+ public:
406
+ explicit ScalarMemoTable(MemoryPool* pool, int64_t entries = 0)
407
+ : hash_table_(pool, static_cast<uint64_t>(entries)) {}
408
+
409
+ int32_t Get(const Scalar& value) const {
410
+ auto cmp_func = [value](const Payload* payload) -> bool {
411
+ return ScalarHelper<Scalar, 0>::CompareScalars(payload->value, value);
412
+ };
413
+ hash_t h = ComputeHash(value);
414
+ auto p = hash_table_.Lookup(h, cmp_func);
415
+ if (p.second) {
416
+ return p.first->payload.memo_index;
417
+ } else {
418
+ return kKeyNotFound;
419
+ }
420
+ }
421
+
422
+ template <typename Func1, typename Func2>
423
+ Status GetOrInsert(const Scalar& value, Func1&& on_found, Func2&& on_not_found,
424
+ int32_t* out_memo_index) {
425
+ auto cmp_func = [value](const Payload* payload) -> bool {
426
+ return ScalarHelper<Scalar, 0>::CompareScalars(value, payload->value);
427
+ };
428
+ hash_t h = ComputeHash(value);
429
+ auto p = hash_table_.Lookup(h, cmp_func);
430
+ int32_t memo_index;
431
+ if (p.second) {
432
+ memo_index = p.first->payload.memo_index;
433
+ on_found(memo_index);
434
+ } else {
435
+ memo_index = size();
436
+ RETURN_NOT_OK(hash_table_.Insert(p.first, h, {value, memo_index}));
437
+ on_not_found(memo_index);
438
+ }
439
+ *out_memo_index = memo_index;
440
+ return Status::OK();
441
+ }
442
+
443
+ Status GetOrInsert(const Scalar& value, int32_t* out_memo_index) {
444
+ return GetOrInsert(
445
+ value, [](int32_t i) {}, [](int32_t i) {}, out_memo_index);
446
+ }
447
+
448
+ int32_t GetNull() const { return null_index_; }
449
+
450
+ template <typename Func1, typename Func2>
451
+ int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) {
452
+ int32_t memo_index = GetNull();
453
+ if (memo_index != kKeyNotFound) {
454
+ on_found(memo_index);
455
+ } else {
456
+ null_index_ = memo_index = size();
457
+ on_not_found(memo_index);
458
+ }
459
+ return memo_index;
460
+ }
461
+
462
+ int32_t GetOrInsertNull() {
463
+ return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {});
464
+ }
465
+
466
+ // The number of entries in the memo table +1 if null was added.
467
+ // (which is also 1 + the largest memo index)
468
+ int32_t size() const override {
469
+ return static_cast<int32_t>(hash_table_.size()) + (GetNull() != kKeyNotFound);
470
+ }
471
+
472
+ // Copy values starting from index `start` into `out_data`
473
+ void CopyValues(int32_t start, Scalar* out_data) const {
474
+ hash_table_.VisitEntries([=](const HashTableEntry* entry) {
475
+ int32_t index = entry->payload.memo_index - start;
476
+ if (index >= 0) {
477
+ out_data[index] = entry->payload.value;
478
+ }
479
+ });
480
+ // Zero-initialize the null entry
481
+ if (null_index_ != kKeyNotFound) {
482
+ int32_t index = null_index_ - start;
483
+ if (index >= 0) {
484
+ out_data[index] = Scalar{};
485
+ }
486
+ }
487
+ }
488
+
489
+ void CopyValues(Scalar* out_data) const { CopyValues(0, out_data); }
490
+
491
+ protected:
492
+ struct Payload {
493
+ Scalar value;
494
+ int32_t memo_index;
495
+ };
496
+
497
+ using HashTableType = HashTableTemplateType<Payload>;
498
+ using HashTableEntry = typename HashTableType::Entry;
499
+ HashTableType hash_table_;
500
+ int32_t null_index_ = kKeyNotFound;
501
+
502
+ hash_t ComputeHash(const Scalar& value) const {
503
+ return ScalarHelper<Scalar, 0>::ComputeHash(value);
504
+ }
505
+
506
+ public:
507
+ // defined here so that `HashTableType` is visible
508
+ // Merge entries from `other_table` into `this->hash_table_`.
509
+ Status MergeTable(const ScalarMemoTable& other_table) {
510
+ const HashTableType& other_hashtable = other_table.hash_table_;
511
+
512
+ other_hashtable.VisitEntries([this](const HashTableEntry* other_entry) {
513
+ int32_t unused;
514
+ DCHECK_OK(this->GetOrInsert(other_entry->payload.value, &unused));
515
+ });
516
+ // TODO: ARROW-17074 - implement proper error handling
517
+ return Status::OK();
518
+ }
519
+ };
520
+
521
+ // ----------------------------------------------------------------------
522
+ // A memoization table for small scalar values, using direct indexing
523
+
524
+ template <typename Scalar, typename Enable = void>
525
+ struct SmallScalarTraits {};
526
+
527
+ template <>
528
+ struct SmallScalarTraits<bool> {
529
+ static constexpr int32_t cardinality = 2;
530
+
531
+ static uint32_t AsIndex(bool value) { return value ? 1 : 0; }
532
+ };
533
+
534
+ template <typename Scalar>
535
+ struct SmallScalarTraits<Scalar, enable_if_t<std::is_integral<Scalar>::value>> {
536
+ using Unsigned = typename std::make_unsigned<Scalar>::type;
537
+
538
+ static constexpr int32_t cardinality = 1U + std::numeric_limits<Unsigned>::max();
539
+
540
+ static uint32_t AsIndex(Scalar value) { return static_cast<Unsigned>(value); }
541
+ };
542
+
543
+ template <typename Scalar, template <class> class HashTableTemplateType = HashTable>
544
+ class SmallScalarMemoTable : public MemoTable {
545
+ public:
546
+ explicit SmallScalarMemoTable(MemoryPool* pool, int64_t entries = 0) {
547
+ std::fill(value_to_index_, value_to_index_ + cardinality + 1, kKeyNotFound);
548
+ index_to_value_.reserve(cardinality);
549
+ }
550
+
551
+ int32_t Get(const Scalar value) const {
552
+ auto value_index = AsIndex(value);
553
+ return value_to_index_[value_index];
554
+ }
555
+
556
+ template <typename Func1, typename Func2>
557
+ Status GetOrInsert(const Scalar value, Func1&& on_found, Func2&& on_not_found,
558
+ int32_t* out_memo_index) {
559
+ auto value_index = AsIndex(value);
560
+ auto memo_index = value_to_index_[value_index];
561
+ if (memo_index == kKeyNotFound) {
562
+ memo_index = static_cast<int32_t>(index_to_value_.size());
563
+ index_to_value_.push_back(value);
564
+ value_to_index_[value_index] = memo_index;
565
+ DCHECK_LT(memo_index, cardinality + 1);
566
+ on_not_found(memo_index);
567
+ } else {
568
+ on_found(memo_index);
569
+ }
570
+ *out_memo_index = memo_index;
571
+ return Status::OK();
572
+ }
573
+
574
+ Status GetOrInsert(const Scalar value, int32_t* out_memo_index) {
575
+ return GetOrInsert(
576
+ value, [](int32_t i) {}, [](int32_t i) {}, out_memo_index);
577
+ }
578
+
579
+ int32_t GetNull() const { return value_to_index_[cardinality]; }
580
+
581
+ template <typename Func1, typename Func2>
582
+ int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) {
583
+ auto memo_index = GetNull();
584
+ if (memo_index == kKeyNotFound) {
585
+ memo_index = value_to_index_[cardinality] = size();
586
+ index_to_value_.push_back(0);
587
+ on_not_found(memo_index);
588
+ } else {
589
+ on_found(memo_index);
590
+ }
591
+ return memo_index;
592
+ }
593
+
594
+ int32_t GetOrInsertNull() {
595
+ return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {});
596
+ }
597
+
598
+ // The number of entries in the memo table
599
+ // (which is also 1 + the largest memo index)
600
+ int32_t size() const override { return static_cast<int32_t>(index_to_value_.size()); }
601
+
602
+ // Merge entries from `other_table` into `this`.
603
+ Status MergeTable(const SmallScalarMemoTable& other_table) {
604
+ for (const Scalar& other_val : other_table.index_to_value_) {
605
+ int32_t unused;
606
+ RETURN_NOT_OK(this->GetOrInsert(other_val, &unused));
607
+ }
608
+ return Status::OK();
609
+ }
610
+
611
+ // Copy values starting from index `start` into `out_data`
612
+ void CopyValues(int32_t start, Scalar* out_data) const {
613
+ DCHECK_GE(start, 0);
614
+ DCHECK_LE(static_cast<size_t>(start), index_to_value_.size());
615
+ int64_t offset = start * static_cast<int32_t>(sizeof(Scalar));
616
+ memcpy(out_data, index_to_value_.data() + offset, (size() - start) * sizeof(Scalar));
617
+ }
618
+
619
+ void CopyValues(Scalar* out_data) const { CopyValues(0, out_data); }
620
+
621
+ const std::vector<Scalar>& values() const { return index_to_value_; }
622
+
623
+ protected:
624
+ static constexpr auto cardinality = SmallScalarTraits<Scalar>::cardinality;
625
+ static_assert(cardinality <= 256, "cardinality too large for direct-addressed table");
626
+
627
+ uint32_t AsIndex(Scalar value) const {
628
+ return SmallScalarTraits<Scalar>::AsIndex(value);
629
+ }
630
+
631
+ // The last index is reserved for the null element.
632
+ int32_t value_to_index_[cardinality + 1];
633
+ std::vector<Scalar> index_to_value_;
634
+ };
635
+
636
+ // ----------------------------------------------------------------------
637
+ // A memoization table for variable-sized binary data.
638
+
639
+ template <typename BinaryBuilderT>
640
+ class BinaryMemoTable : public MemoTable {
641
+ public:
642
+ using builder_offset_type = typename BinaryBuilderT::offset_type;
643
+ explicit BinaryMemoTable(MemoryPool* pool, int64_t entries = 0,
644
+ int64_t values_size = -1)
645
+ : hash_table_(pool, static_cast<uint64_t>(entries)), binary_builder_(pool) {
646
+ const int64_t data_size = (values_size < 0) ? entries * 4 : values_size;
647
+ DCHECK_OK(binary_builder_.Resize(entries));
648
+ DCHECK_OK(binary_builder_.ReserveData(data_size));
649
+ }
650
+
651
+ int32_t Get(const void* data, builder_offset_type length) const {
652
+ hash_t h = ComputeStringHash<0>(data, length);
653
+ auto p = Lookup(h, data, length);
654
+ if (p.second) {
655
+ return p.first->payload.memo_index;
656
+ } else {
657
+ return kKeyNotFound;
658
+ }
659
+ }
660
+
661
+ int32_t Get(std::string_view value) const {
662
+ return Get(value.data(), static_cast<builder_offset_type>(value.length()));
663
+ }
664
+
665
+ template <typename Func1, typename Func2>
666
+ Status GetOrInsert(const void* data, builder_offset_type length, Func1&& on_found,
667
+ Func2&& on_not_found, int32_t* out_memo_index) {
668
+ hash_t h = ComputeStringHash<0>(data, length);
669
+ auto p = Lookup(h, data, length);
670
+ int32_t memo_index;
671
+ if (p.second) {
672
+ memo_index = p.first->payload.memo_index;
673
+ on_found(memo_index);
674
+ } else {
675
+ memo_index = size();
676
+ // Insert string value
677
+ RETURN_NOT_OK(binary_builder_.Append(static_cast<const char*>(data), length));
678
+ // Insert hash entry
679
+ RETURN_NOT_OK(
680
+ hash_table_.Insert(const_cast<HashTableEntry*>(p.first), h, {memo_index}));
681
+
682
+ on_not_found(memo_index);
683
+ }
684
+ *out_memo_index = memo_index;
685
+ return Status::OK();
686
+ }
687
+
688
+ template <typename Func1, typename Func2>
689
+ Status GetOrInsert(std::string_view value, Func1&& on_found, Func2&& on_not_found,
690
+ int32_t* out_memo_index) {
691
+ return GetOrInsert(value.data(), static_cast<builder_offset_type>(value.length()),
692
+ std::forward<Func1>(on_found), std::forward<Func2>(on_not_found),
693
+ out_memo_index);
694
+ }
695
+
696
+ Status GetOrInsert(const void* data, builder_offset_type length,
697
+ int32_t* out_memo_index) {
698
+ return GetOrInsert(
699
+ data, length, [](int32_t i) {}, [](int32_t i) {}, out_memo_index);
700
+ }
701
+
702
+ Status GetOrInsert(std::string_view value, int32_t* out_memo_index) {
703
+ return GetOrInsert(value.data(), static_cast<builder_offset_type>(value.length()),
704
+ out_memo_index);
705
+ }
706
+
707
+ int32_t GetNull() const { return null_index_; }
708
+
709
+ template <typename Func1, typename Func2>
710
+ int32_t GetOrInsertNull(Func1&& on_found, Func2&& on_not_found) {
711
+ int32_t memo_index = GetNull();
712
+ if (memo_index == kKeyNotFound) {
713
+ memo_index = null_index_ = size();
714
+ DCHECK_OK(binary_builder_.AppendNull());
715
+ on_not_found(memo_index);
716
+ } else {
717
+ on_found(memo_index);
718
+ }
719
+ return memo_index;
720
+ }
721
+
722
+ int32_t GetOrInsertNull() {
723
+ return GetOrInsertNull([](int32_t i) {}, [](int32_t i) {});
724
+ }
725
+
726
+ // The number of entries in the memo table
727
+ // (which is also 1 + the largest memo index)
728
+ int32_t size() const override {
729
+ return static_cast<int32_t>(hash_table_.size() + (GetNull() != kKeyNotFound));
730
+ }
731
+
732
+ int64_t values_size() const { return binary_builder_.value_data_length(); }
733
+
734
+ // Copy (n + 1) offsets starting from index `start` into `out_data`
735
+ template <class Offset>
736
+ void CopyOffsets(int32_t start, Offset* out_data) const {
737
+ DCHECK_LE(start, size());
738
+
739
+ const builder_offset_type* offsets = binary_builder_.offsets_data();
740
+ const builder_offset_type delta =
741
+ start < binary_builder_.length() ? offsets[start] : 0;
742
+ for (int32_t i = start; i < size(); ++i) {
743
+ const builder_offset_type adjusted_offset = offsets[i] - delta;
744
+ Offset cast_offset = static_cast<Offset>(adjusted_offset);
745
+ assert(static_cast<builder_offset_type>(cast_offset) ==
746
+ adjusted_offset); // avoid truncation
747
+ *out_data++ = cast_offset;
748
+ }
749
+
750
+ // Copy last value since BinaryBuilder only materializes it on in Finish()
751
+ *out_data = static_cast<Offset>(binary_builder_.value_data_length() - delta);
752
+ }
753
+
754
+ template <class Offset>
755
+ void CopyOffsets(Offset* out_data) const {
756
+ CopyOffsets(0, out_data);
757
+ }
758
+
759
+ // Copy values starting from index `start` into `out_data`
760
+ void CopyValues(int32_t start, uint8_t* out_data) const {
761
+ CopyValues(start, -1, out_data);
762
+ }
763
+
764
+ // Same as above, but check output size in debug mode
765
+ void CopyValues(int32_t start, int64_t out_size, uint8_t* out_data) const {
766
+ DCHECK_LE(start, size());
767
+
768
+ // The absolute byte offset of `start` value in the binary buffer.
769
+ const builder_offset_type offset = binary_builder_.offset(start);
770
+ const auto length = binary_builder_.value_data_length() - static_cast<size_t>(offset);
771
+
772
+ if (out_size != -1) {
773
+ assert(static_cast<int64_t>(length) <= out_size);
774
+ }
775
+
776
+ auto view = binary_builder_.GetView(start);
777
+ memcpy(out_data, view.data(), length);
778
+ }
779
+
780
+ void CopyValues(uint8_t* out_data) const { CopyValues(0, -1, out_data); }
781
+
782
+ void CopyValues(int64_t out_size, uint8_t* out_data) const {
783
+ CopyValues(0, out_size, out_data);
784
+ }
785
+
786
+ void CopyFixedWidthValues(int32_t start, int32_t width_size, int64_t out_size,
787
+ uint8_t* out_data) const {
788
+ // This method exists to cope with the fact that the BinaryMemoTable does
789
+ // not know the fixed width when inserting the null value. The data
790
+ // buffer hold a zero length string for the null value (if found).
791
+ //
792
+ // Thus, the method will properly inject an empty value of the proper width
793
+ // in the output buffer.
794
+ //
795
+ if (start >= size()) {
796
+ return;
797
+ }
798
+
799
+ int32_t null_index = GetNull();
800
+ if (null_index < start) {
801
+ // Nothing to skip, proceed as usual.
802
+ CopyValues(start, out_size, out_data);
803
+ return;
804
+ }
805
+
806
+ builder_offset_type left_offset = binary_builder_.offset(start);
807
+
808
+ // Ensure that the data length is exactly missing width_size bytes to fit
809
+ // in the expected output (n_values * width_size).
810
+ #ifndef NDEBUG
811
+ int64_t data_length = values_size() - static_cast<size_t>(left_offset);
812
+ assert(data_length + width_size == out_size);
813
+ ARROW_UNUSED(data_length);
814
+ #endif
815
+
816
+ auto in_data = binary_builder_.value_data() + left_offset;
817
+ // The null use 0-length in the data, slice the data in 2 and skip by
818
+ // width_size in out_data. [part_1][width_size][part_2]
819
+ auto null_data_offset = binary_builder_.offset(null_index);
820
+ auto left_size = null_data_offset - left_offset;
821
+ if (left_size > 0) {
822
+ memcpy(out_data, in_data + left_offset, left_size);
823
+ }
824
+ // Zero-initialize the null entry
825
+ memset(out_data + left_size, 0, width_size);
826
+
827
+ auto right_size = values_size() - static_cast<size_t>(null_data_offset);
828
+ if (right_size > 0) {
829
+ // skip the null fixed size value.
830
+ auto out_offset = left_size + width_size;
831
+ assert(out_data + out_offset + right_size == out_data + out_size);
832
+ memcpy(out_data + out_offset, in_data + null_data_offset, right_size);
833
+ }
834
+ }
835
+
836
+ // Visit the stored values in insertion order.
837
+ // The visitor function should have the signature `void(std::string_view)`
838
+ // or `void(const std::string_view&)`.
839
+ template <typename VisitFunc>
840
+ void VisitValues(int32_t start, VisitFunc&& visit) const {
841
+ for (int32_t i = start; i < size(); ++i) {
842
+ visit(binary_builder_.GetView(i));
843
+ }
844
+ }
845
+
846
+ protected:
847
+ struct Payload {
848
+ int32_t memo_index;
849
+ };
850
+
851
+ using HashTableType = HashTable<Payload>;
852
+ using HashTableEntry = typename HashTable<Payload>::Entry;
853
+ HashTableType hash_table_;
854
+ BinaryBuilderT binary_builder_;
855
+
856
+ int32_t null_index_ = kKeyNotFound;
857
+
858
+ std::pair<const HashTableEntry*, bool> Lookup(hash_t h, const void* data,
859
+ builder_offset_type length) const {
860
+ auto cmp_func = [&](const Payload* payload) {
861
+ std::string_view lhs = binary_builder_.GetView(payload->memo_index);
862
+ std::string_view rhs(static_cast<const char*>(data), length);
863
+ return lhs == rhs;
864
+ };
865
+ return hash_table_.Lookup(h, cmp_func);
866
+ }
867
+
868
+ public:
869
+ Status MergeTable(const BinaryMemoTable& other_table) {
870
+ other_table.VisitValues(0, [this](std::string_view other_value) {
871
+ int32_t unused;
872
+ DCHECK_OK(this->GetOrInsert(other_value, &unused));
873
+ });
874
+ return Status::OK();
875
+ }
876
+ };
877
+
878
+ template <typename T, typename Enable = void>
879
+ struct HashTraits {};
880
+
881
+ template <>
882
+ struct HashTraits<BooleanType> {
883
+ using MemoTableType = SmallScalarMemoTable<bool>;
884
+ };
885
+
886
+ template <typename T>
887
+ struct HashTraits<T, enable_if_8bit_int<T>> {
888
+ using c_type = typename T::c_type;
889
+ using MemoTableType = SmallScalarMemoTable<typename T::c_type>;
890
+ };
891
+
892
+ template <typename T>
893
+ struct HashTraits<T, enable_if_t<has_c_type<T>::value && !is_8bit_int<T>::value>> {
894
+ using c_type = typename T::c_type;
895
+ using MemoTableType = ScalarMemoTable<c_type, HashTable>;
896
+ };
897
+
898
+ template <typename T>
899
+ struct HashTraits<T, enable_if_t<has_string_view<T>::value &&
900
+ !std::is_base_of<LargeBinaryType, T>::value>> {
901
+ using MemoTableType = BinaryMemoTable<BinaryBuilder>;
902
+ };
903
+
904
+ template <typename T>
905
+ struct HashTraits<T, enable_if_decimal<T>> {
906
+ using MemoTableType = BinaryMemoTable<BinaryBuilder>;
907
+ };
908
+
909
+ template <typename T>
910
+ struct HashTraits<T, enable_if_t<std::is_base_of<LargeBinaryType, T>::value>> {
911
+ using MemoTableType = BinaryMemoTable<LargeBinaryBuilder>;
912
+ };
913
+
914
+ template <typename MemoTableType>
915
+ static inline Status ComputeNullBitmap(MemoryPool* pool, const MemoTableType& memo_table,
916
+ int64_t start_offset, int64_t* null_count,
917
+ std::shared_ptr<Buffer>* null_bitmap) {
918
+ int64_t dict_length = static_cast<int64_t>(memo_table.size()) - start_offset;
919
+ int64_t null_index = memo_table.GetNull();
920
+
921
+ *null_count = 0;
922
+ *null_bitmap = nullptr;
923
+
924
+ if (null_index != kKeyNotFound && null_index >= start_offset) {
925
+ null_index -= start_offset;
926
+ *null_count = 1;
927
+ ARROW_ASSIGN_OR_RAISE(*null_bitmap,
928
+ internal::BitmapAllButOne(pool, dict_length, null_index));
929
+ }
930
+
931
+ return Status::OK();
932
+ }
933
+
934
+ struct StringViewHash {
935
+ // std::hash compatible hasher for use with std::unordered_*
936
+ // (the std::hash specialization provided by nonstd constructs std::string
937
+ // temporaries then invokes std::hash<std::string> against those)
938
+ hash_t operator()(std::string_view value) const {
939
+ return ComputeStringHash<0>(value.data(), static_cast<int64_t>(value.size()));
940
+ }
941
+ };
942
+
943
+ } // namespace internal
944
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/int_util.h ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #include <cstdint>
21
+ #include <type_traits>
22
+
23
+ #include "arrow/status.h"
24
+
25
+ #include "arrow/util/visibility.h"
26
+
27
+ namespace arrow {
28
+
29
+ class DataType;
30
+ struct ArraySpan;
31
+ struct Scalar;
32
+
33
+ namespace internal {
34
+
35
+ ARROW_EXPORT
36
+ uint8_t DetectUIntWidth(const uint64_t* values, int64_t length, uint8_t min_width = 1);
37
+
38
+ ARROW_EXPORT
39
+ uint8_t DetectUIntWidth(const uint64_t* values, const uint8_t* valid_bytes,
40
+ int64_t length, uint8_t min_width = 1);
41
+
42
+ ARROW_EXPORT
43
+ uint8_t DetectIntWidth(const int64_t* values, int64_t length, uint8_t min_width = 1);
44
+
45
+ ARROW_EXPORT
46
+ uint8_t DetectIntWidth(const int64_t* values, const uint8_t* valid_bytes, int64_t length,
47
+ uint8_t min_width = 1);
48
+
49
+ ARROW_EXPORT
50
+ void DowncastInts(const int64_t* source, int8_t* dest, int64_t length);
51
+
52
+ ARROW_EXPORT
53
+ void DowncastInts(const int64_t* source, int16_t* dest, int64_t length);
54
+
55
+ ARROW_EXPORT
56
+ void DowncastInts(const int64_t* source, int32_t* dest, int64_t length);
57
+
58
+ ARROW_EXPORT
59
+ void DowncastInts(const int64_t* source, int64_t* dest, int64_t length);
60
+
61
+ ARROW_EXPORT
62
+ void DowncastUInts(const uint64_t* source, uint8_t* dest, int64_t length);
63
+
64
+ ARROW_EXPORT
65
+ void DowncastUInts(const uint64_t* source, uint16_t* dest, int64_t length);
66
+
67
+ ARROW_EXPORT
68
+ void DowncastUInts(const uint64_t* source, uint32_t* dest, int64_t length);
69
+
70
+ ARROW_EXPORT
71
+ void DowncastUInts(const uint64_t* source, uint64_t* dest, int64_t length);
72
+
73
+ ARROW_EXPORT
74
+ void UpcastInts(const int32_t* source, int64_t* dest, int64_t length);
75
+
76
+ template <typename InputInt, typename OutputInt>
77
+ inline typename std::enable_if<(sizeof(InputInt) >= sizeof(OutputInt))>::type CastInts(
78
+ const InputInt* source, OutputInt* dest, int64_t length) {
79
+ DowncastInts(source, dest, length);
80
+ }
81
+
82
+ template <typename InputInt, typename OutputInt>
83
+ inline typename std::enable_if<(sizeof(InputInt) < sizeof(OutputInt))>::type CastInts(
84
+ const InputInt* source, OutputInt* dest, int64_t length) {
85
+ UpcastInts(source, dest, length);
86
+ }
87
+
88
+ template <typename InputInt, typename OutputInt>
89
+ ARROW_EXPORT void TransposeInts(const InputInt* source, OutputInt* dest, int64_t length,
90
+ const int32_t* transpose_map);
91
+
92
+ ARROW_EXPORT
93
+ Status TransposeInts(const DataType& src_type, const DataType& dest_type,
94
+ const uint8_t* src, uint8_t* dest, int64_t src_offset,
95
+ int64_t dest_offset, int64_t length, const int32_t* transpose_map);
96
+
97
+ /// \brief Do vectorized boundschecking of integer-type array indices. The
98
+ /// indices must be nonnegative and strictly less than the passed upper
99
+ /// limit (which is usually the length of an array that is being indexed-into).
100
+ ARROW_EXPORT
101
+ Status CheckIndexBounds(const ArraySpan& values, uint64_t upper_limit);
102
+
103
+ /// \brief Boundscheck integer values to determine if they are all between the
104
+ /// passed upper and lower limits (inclusive). Upper and lower bounds must be
105
+ /// the same type as the data and are not currently casted.
106
+ ARROW_EXPORT
107
+ Status CheckIntegersInRange(const ArraySpan& values, const Scalar& bound_lower,
108
+ const Scalar& bound_upper);
109
+
110
+ /// \brief Use CheckIntegersInRange to determine whether the passed integers
111
+ /// can fit safely in the passed integer type. This helps quickly determine if
112
+ /// integer narrowing (e.g. int64->int32) is safe to do.
113
+ ARROW_EXPORT
114
+ Status IntegersCanFit(const ArraySpan& values, const DataType& target_type);
115
+
116
+ /// \brief Convenience for boundschecking a single Scalar value
117
+ ARROW_EXPORT
118
+ Status IntegersCanFit(const Scalar& value, const DataType& target_type);
119
+
120
+ /// Upcast an integer to the largest possible width (currently 64 bits)
121
+
122
+ template <typename Integer>
123
+ typename std::enable_if<
124
+ std::is_integral<Integer>::value && std::is_signed<Integer>::value, int64_t>::type
125
+ UpcastInt(Integer v) {
126
+ return v;
127
+ }
128
+
129
+ template <typename Integer>
130
+ typename std::enable_if<
131
+ std::is_integral<Integer>::value && std::is_unsigned<Integer>::value, uint64_t>::type
132
+ UpcastInt(Integer v) {
133
+ return v;
134
+ }
135
+
136
+ } // namespace internal
137
+ } // namespace arrow
env-llmeval/lib/python3.10/site-packages/pyarrow/include/arrow/util/io_util.h ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // http://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
17
+
18
+ #pragma once
19
+
20
+ #ifndef _WIN32
21
+ #define ARROW_HAVE_SIGACTION 1
22
+ #endif
23
+
24
+ #include <atomic>
25
+ #include <memory>
26
+ #include <string>
27
+ #include <utility>
28
+ #include <vector>
29
+
30
+ #if ARROW_HAVE_SIGACTION
31
+ #include <signal.h> // Needed for struct sigaction
32
+ #endif
33
+
34
+ #include "arrow/status.h"
35
+ #include "arrow/type_fwd.h"
36
+ #include "arrow/util/macros.h"
37
+ #include "arrow/util/windows_fixup.h"
38
+
39
+ namespace arrow {
40
+ namespace internal {
41
+
42
+ // NOTE: 8-bit path strings on Windows are encoded using UTF-8.
43
+ // Using MBCS would fail encoding some paths.
44
+
45
+ #if defined(_WIN32)
46
+ using NativePathString = std::wstring;
47
+ #else
48
+ using NativePathString = std::string;
49
+ #endif
50
+
51
+ class ARROW_EXPORT PlatformFilename {
52
+ public:
53
+ struct Impl;
54
+
55
+ ~PlatformFilename();
56
+ PlatformFilename();
57
+ PlatformFilename(const PlatformFilename&);
58
+ PlatformFilename(PlatformFilename&&);
59
+ PlatformFilename& operator=(const PlatformFilename&);
60
+ PlatformFilename& operator=(PlatformFilename&&);
61
+ explicit PlatformFilename(NativePathString path);
62
+ explicit PlatformFilename(const NativePathString::value_type* path);
63
+
64
+ const NativePathString& ToNative() const;
65
+ std::string ToString() const;
66
+
67
+ PlatformFilename Parent() const;
68
+ Result<PlatformFilename> Real() const;
69
+
70
+ // These functions can fail for character encoding reasons.
71
+ static Result<PlatformFilename> FromString(std::string_view file_name);
72
+ Result<PlatformFilename> Join(std::string_view child_name) const;
73
+
74
+ PlatformFilename Join(const PlatformFilename& child_name) const;
75
+
76
+ bool operator==(const PlatformFilename& other) const;
77
+ bool operator!=(const PlatformFilename& other) const;
78
+
79
+ // Made public to avoid the proliferation of friend declarations.
80
+ const Impl* impl() const { return impl_.get(); }
81
+
82
+ private:
83
+ std::unique_ptr<Impl> impl_;
84
+
85
+ explicit PlatformFilename(Impl impl);
86
+ };
87
+
88
+ /// Create a directory if it doesn't exist.
89
+ ///
90
+ /// Return whether the directory was created.
91
+ ARROW_EXPORT
92
+ Result<bool> CreateDir(const PlatformFilename& dir_path);
93
+
94
+ /// Create a directory and its parents if it doesn't exist.
95
+ ///
96
+ /// Return whether the directory was created.
97
+ ARROW_EXPORT
98
+ Result<bool> CreateDirTree(const PlatformFilename& dir_path);
99
+
100
+ /// Delete a directory's contents (but not the directory itself) if it exists.
101
+ ///
102
+ /// Return whether the directory existed.
103
+ ARROW_EXPORT
104
+ Result<bool> DeleteDirContents(const PlatformFilename& dir_path,
105
+ bool allow_not_found = true);
106
+
107
+ /// Delete a directory tree if it exists.
108
+ ///
109
+ /// Return whether the directory existed.
110
+ ARROW_EXPORT
111
+ Result<bool> DeleteDirTree(const PlatformFilename& dir_path, bool allow_not_found = true);
112
+
113
+ // Non-recursively list the contents of the given directory.
114
+ // The returned names are the children's base names, not including dir_path.
115
+ ARROW_EXPORT
116
+ Result<std::vector<PlatformFilename>> ListDir(const PlatformFilename& dir_path);
117
+
118
+ /// Delete a file if it exists.
119
+ ///
120
+ /// Return whether the file existed.
121
+ ARROW_EXPORT
122
+ Result<bool> DeleteFile(const PlatformFilename& file_path, bool allow_not_found = true);
123
+
124
+ /// Return whether a file exists.
125
+ ARROW_EXPORT
126
+ Result<bool> FileExists(const PlatformFilename& path);
127
+
128
+ // TODO expose this more publicly to make it available from io/file.h?
129
+ /// A RAII wrapper for a file descriptor.
130
+ ///
131
+ /// The underlying file descriptor is automatically closed on destruction.
132
+ /// Moving is supported with well-defined semantics.
133
+ /// Furthermore, closing is idempotent.
134
+ class ARROW_EXPORT FileDescriptor {
135
+ public:
136
+ FileDescriptor() = default;
137
+ explicit FileDescriptor(int fd) : fd_(fd) {}
138
+ FileDescriptor(FileDescriptor&&);
139
+ FileDescriptor& operator=(FileDescriptor&&);
140
+
141
+ ~FileDescriptor();
142
+
143
+ Status Close();
144
+
145
+ /// May return -1 if closed or default-initialized
146
+ int fd() const { return fd_.load(); }
147
+
148
+ /// Detach and return the underlying file descriptor
149
+ int Detach();
150
+
151
+ bool closed() const { return fd_.load() == -1; }
152
+
153
+ protected:
154
+ static void CloseFromDestructor(int fd);
155
+
156
+ std::atomic<int> fd_{-1};
157
+ };
158
+
159
+ /// Open a file for reading and return a file descriptor.
160
+ ARROW_EXPORT
161
+ Result<FileDescriptor> FileOpenReadable(const PlatformFilename& file_name);
162
+
163
+ /// Open a file for writing and return a file descriptor.
164
+ ARROW_EXPORT
165
+ Result<FileDescriptor> FileOpenWritable(const PlatformFilename& file_name,
166
+ bool write_only = true, bool truncate = true,
167
+ bool append = false);
168
+
169
+ /// Read from current file position. Return number of bytes read.
170
+ ARROW_EXPORT
171
+ Result<int64_t> FileRead(int fd, uint8_t* buffer, int64_t nbytes);
172
+ /// Read from given file position. Return number of bytes read.
173
+ ARROW_EXPORT
174
+ Result<int64_t> FileReadAt(int fd, uint8_t* buffer, int64_t position, int64_t nbytes);
175
+
176
+ ARROW_EXPORT
177
+ Status FileWrite(int fd, const uint8_t* buffer, const int64_t nbytes);
178
+ ARROW_EXPORT
179
+ Status FileTruncate(int fd, const int64_t size);
180
+
181
+ ARROW_EXPORT
182
+ Status FileSeek(int fd, int64_t pos);
183
+ ARROW_EXPORT
184
+ Status FileSeek(int fd, int64_t pos, int whence);
185
+ ARROW_EXPORT
186
+ Result<int64_t> FileTell(int fd);
187
+ ARROW_EXPORT
188
+ Result<int64_t> FileGetSize(int fd);
189
+
190
+ ARROW_EXPORT
191
+ Status FileClose(int fd);
192
+
193
+ struct Pipe {
194
+ FileDescriptor rfd;
195
+ FileDescriptor wfd;
196
+
197
+ Status Close() { return rfd.Close() & wfd.Close(); }
198
+ };
199
+
200
+ ARROW_EXPORT
201
+ Result<Pipe> CreatePipe();
202
+
203
+ ARROW_EXPORT
204
+ Status SetPipeFileDescriptorNonBlocking(int fd);
205
+
206
+ class ARROW_EXPORT SelfPipe {
207
+ public:
208
+ static Result<std::shared_ptr<SelfPipe>> Make(bool signal_safe);
209
+ virtual ~SelfPipe();
210
+
211
+ /// \brief Wait for a wakeup.
212
+ ///
213
+ /// Status::Invalid is returned if the pipe has been shutdown.
214
+ /// Otherwise the next sent payload is returned.
215
+ virtual Result<uint64_t> Wait() = 0;
216
+
217
+ /// \brief Wake up the pipe by sending a payload.
218
+ ///
219
+ /// This method is async-signal-safe if `signal_safe` was set to true.
220
+ virtual void Send(uint64_t payload) = 0;
221
+
222
+ /// \brief Wake up the pipe and shut it down.
223
+ virtual Status Shutdown() = 0;
224
+ };
225
+
226
+ ARROW_EXPORT
227
+ int64_t GetPageSize();
228
+
229
+ struct MemoryRegion {
230
+ void* addr;
231
+ size_t size;
232
+ };
233
+
234
+ ARROW_EXPORT
235
+ Status MemoryMapRemap(void* addr, size_t old_size, size_t new_size, int fildes,
236
+ void** new_addr);
237
+ ARROW_EXPORT
238
+ Status MemoryAdviseWillNeed(const std::vector<MemoryRegion>& regions);
239
+
240
+ ARROW_EXPORT
241
+ Result<std::string> GetEnvVar(const char* name);
242
+ ARROW_EXPORT
243
+ Result<std::string> GetEnvVar(const std::string& name);
244
+ ARROW_EXPORT
245
+ Result<NativePathString> GetEnvVarNative(const char* name);
246
+ ARROW_EXPORT
247
+ Result<NativePathString> GetEnvVarNative(const std::string& name);
248
+
249
+ ARROW_EXPORT
250
+ Status SetEnvVar(const char* name, const char* value);
251
+ ARROW_EXPORT
252
+ Status SetEnvVar(const std::string& name, const std::string& value);
253
+ ARROW_EXPORT
254
+ Status DelEnvVar(const char* name);
255
+ ARROW_EXPORT
256
+ Status DelEnvVar(const std::string& name);
257
+
258
+ ARROW_EXPORT
259
+ std::string ErrnoMessage(int errnum);
260
+ #if _WIN32
261
+ ARROW_EXPORT
262
+ std::string WinErrorMessage(int errnum);
263
+ #endif
264
+
265
+ ARROW_EXPORT
266
+ std::shared_ptr<StatusDetail> StatusDetailFromErrno(int errnum);
267
+ #if _WIN32
268
+ ARROW_EXPORT
269
+ std::shared_ptr<StatusDetail> StatusDetailFromWinError(int errnum);
270
+ #endif
271
+ ARROW_EXPORT
272
+ std::shared_ptr<StatusDetail> StatusDetailFromSignal(int signum);
273
+
274
+ template <typename... Args>
275
+ Status StatusFromErrno(int errnum, StatusCode code, Args&&... args) {
276
+ return Status::FromDetailAndArgs(code, StatusDetailFromErrno(errnum),
277
+ std::forward<Args>(args)...);
278
+ }
279
+
280
+ template <typename... Args>
281
+ Status IOErrorFromErrno(int errnum, Args&&... args) {
282
+ return StatusFromErrno(errnum, StatusCode::IOError, std::forward<Args>(args)...);
283
+ }
284
+
285
+ #if _WIN32
286
+ template <typename... Args>
287
+ Status StatusFromWinError(int errnum, StatusCode code, Args&&... args) {
288
+ return Status::FromDetailAndArgs(code, StatusDetailFromWinError(errnum),
289
+ std::forward<Args>(args)...);
290
+ }
291
+
292
+ template <typename... Args>
293
+ Status IOErrorFromWinError(int errnum, Args&&... args) {
294
+ return StatusFromWinError(errnum, StatusCode::IOError, std::forward<Args>(args)...);
295
+ }
296
+ #endif
297
+
298
+ template <typename... Args>
299
+ Status StatusFromSignal(int signum, StatusCode code, Args&&... args) {
300
+ return Status::FromDetailAndArgs(code, StatusDetailFromSignal(signum),
301
+ std::forward<Args>(args)...);
302
+ }
303
+
304
+ template <typename... Args>
305
+ Status CancelledFromSignal(int signum, Args&&... args) {
306
+ return StatusFromSignal(signum, StatusCode::Cancelled, std::forward<Args>(args)...);
307
+ }
308
+
309
+ ARROW_EXPORT
310
+ int ErrnoFromStatus(const Status&);
311
+
312
+ // Always returns 0 on non-Windows platforms (for Python).
313
+ ARROW_EXPORT
314
+ int WinErrorFromStatus(const Status&);
315
+
316
+ ARROW_EXPORT
317
+ int SignalFromStatus(const Status&);
318
+
319
+ class ARROW_EXPORT TemporaryDir {
320
+ public:
321
+ ~TemporaryDir();
322
+
323
+ /// '/'-terminated path to the temporary dir
324
+ const PlatformFilename& path() { return path_; }
325
+
326
+ /// Create a temporary subdirectory in the system temporary dir,
327
+ /// named starting with `prefix`.
328
+ static Result<std::unique_ptr<TemporaryDir>> Make(const std::string& prefix);
329
+
330
+ private:
331
+ PlatformFilename path_;
332
+
333
+ explicit TemporaryDir(PlatformFilename&&);
334
+ };
335
+
336
+ class ARROW_EXPORT SignalHandler {
337
+ public:
338
+ typedef void (*Callback)(int);
339
+
340
+ SignalHandler();
341
+ explicit SignalHandler(Callback cb);
342
+ #if ARROW_HAVE_SIGACTION
343
+ explicit SignalHandler(const struct sigaction& sa);
344
+ #endif
345
+
346
+ Callback callback() const;
347
+ #if ARROW_HAVE_SIGACTION
348
+ const struct sigaction& action() const;
349
+ #endif
350
+
351
+ protected:
352
+ #if ARROW_HAVE_SIGACTION
353
+ // Storing the full sigaction allows to restore the entire signal handling
354
+ // configuration.
355
+ struct sigaction sa_;
356
+ #else
357
+ Callback cb_;
358
+ #endif
359
+ };
360
+
361
+ /// \brief Return the current handler for the given signal number.
362
+ ARROW_EXPORT
363
+ Result<SignalHandler> GetSignalHandler(int signum);
364
+
365
+ /// \brief Set a new handler for the given signal number.
366
+ ///
367
+ /// The old signal handler is returned.
368
+ ARROW_EXPORT
369
+ Result<SignalHandler> SetSignalHandler(int signum, const SignalHandler& handler);
370
+
371
+ /// \brief Reinstate the signal handler
372
+ ///
373
+ /// For use in signal handlers. This is needed on platforms without sigaction()
374
+ /// such as Windows, as the default signal handler is restored there as
375
+ /// soon as a signal is raised.
376
+ ARROW_EXPORT
377
+ void ReinstateSignalHandler(int signum, SignalHandler::Callback handler);
378
+
379
+ /// \brief Send a signal to the current process
380
+ ///
381
+ /// The thread which will receive the signal is unspecified.
382
+ ARROW_EXPORT
383
+ Status SendSignal(int signum);
384
+
385
+ /// \brief Send a signal to the given thread
386
+ ///
387
+ /// This function isn't supported on Windows.
388
+ ARROW_EXPORT
389
+ Status SendSignalToThread(int signum, uint64_t thread_id);
390
+
391
+ /// \brief Get an unpredictable random seed
392
+ ///
393
+ /// This function may be slightly costly, so should only be used to initialize
394
+ /// a PRNG, not to generate a large amount of random numbers.
395
+ /// It is better to use this function rather than std::random_device, unless
396
+ /// absolutely necessary (e.g. to generate a cryptographic secret).
397
+ ARROW_EXPORT
398
+ int64_t GetRandomSeed();
399
+
400
+ /// \brief Get the current thread id
401
+ ///
402
+ /// In addition to having the same properties as std::thread, the returned value
403
+ /// is a regular integer value, which is more convenient than an opaque type.
404
+ ARROW_EXPORT
405
+ uint64_t GetThreadId();
406
+
407
+ /// \brief Get the current memory used by the current process in bytes
408
+ ///
409
+ /// This function supports Windows, Linux, and Mac and will return 0 otherwise
410
+ ARROW_EXPORT
411
+ int64_t GetCurrentRSS();
412
+
413
+ /// \brief Get the total memory available to the system in bytes
414
+ ///
415
+ /// This function supports Windows, Linux, and Mac and will return 0 otherwise
416
+ ARROW_EXPORT
417
+ int64_t GetTotalMemoryBytes();
418
+
419
+ } // namespace internal
420
+ } // namespace arrow