Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset.cpython-310-x86_64-linux-gnu.so +3 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/includes/__init__.pxd +0 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/includes/common.pxd +175 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libarrow.pxd +0 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libarrow_acero.pxd +111 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libarrow_cuda.pxd +107 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset.pxd +405 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset_parquet.pxd +105 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libarrow_feather.pxd +50 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libarrow_flight.pxd +622 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libarrow_fs.pxd +349 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libarrow_python.pxd +311 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libarrow_substrait.pxd +77 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libgandiva.pxd +298 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libparquet_encryption.pxd +130 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/parquet/__init__.py +20 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/core.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/encryption.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/parquet/core.py +2355 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/arrow_16597.py +37 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/arrow_39313.py +47 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/arrow_7980.py +30 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/bound_function_visit_strings.pyx +67 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/conftest.py +281 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/interchange/__init__.py +16 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/interchange/test_conversion.py +522 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/pandas_threaded_import.py +44 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/pyarrow_cython_example.pyx +55 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/strategies.py +451 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_acero.py +378 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_adhoc_memory_leak.py +43 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_array.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_builder.py +67 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_cffi.py +569 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_compute.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_convert_builtin.py +2507 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_csv.py +1993 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_cuda.py +794 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_cuda_numba_interop.py +235 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_dataset.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_deprecations.py +23 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_dlpack.py +142 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_exec_plan.py +337 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_extension_type.py +1496 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_feather.py +863 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_filesystem.py +75 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_flight.py +2367 -0
- env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_flight_async.py +93 -0
.gitattributes
CHANGED
@@ -207,3 +207,4 @@ env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch_python.so filter=lfs
|
|
207 |
env-llmeval/lib/python3.10/site-packages/torch/lib/libcusparseLt-f8b4a9fb.so.0 filter=lfs diff=lfs merge=lfs -text
|
208 |
env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
|
209 |
env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
207 |
env-llmeval/lib/python3.10/site-packages/torch/lib/libcusparseLt-f8b4a9fb.so.0 filter=lfs diff=lfs merge=lfs -text
|
208 |
env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
|
209 |
env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so filter=lfs diff=lfs merge=lfs -text
|
210 |
+
env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
env-llmeval/lib/python3.10/site-packages/pyarrow/_dataset.cpython-310-x86_64-linux-gnu.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3b1fc7eec7614f71e395c087e2a55a60919eaf0376277c7591ad635d353c9026
|
3 |
+
size 1098408
|
env-llmeval/lib/python3.10/site-packages/pyarrow/includes/__init__.pxd
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/pyarrow/includes/common.pxd
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language = c++
|
19 |
+
|
20 |
+
from libc.stdint cimport *
|
21 |
+
from libcpp cimport bool as c_bool, nullptr
|
22 |
+
from libcpp.functional cimport function
|
23 |
+
from libcpp.memory cimport shared_ptr, unique_ptr, make_shared
|
24 |
+
from libcpp.string cimport string as c_string
|
25 |
+
from libcpp.utility cimport pair
|
26 |
+
from libcpp.vector cimport vector
|
27 |
+
from libcpp.unordered_map cimport unordered_map
|
28 |
+
from libcpp.unordered_set cimport unordered_set
|
29 |
+
|
30 |
+
from cpython cimport PyObject
|
31 |
+
from cpython.datetime cimport PyDateTime_DateTime
|
32 |
+
cimport cpython
|
33 |
+
|
34 |
+
|
35 |
+
cdef extern from * namespace "std" nogil:
|
36 |
+
cdef shared_ptr[T] static_pointer_cast[T, U](shared_ptr[U])
|
37 |
+
|
38 |
+
|
39 |
+
cdef extern from "<optional>" namespace "std" nogil:
|
40 |
+
cdef cppclass optional[T]:
|
41 |
+
ctypedef T value_type
|
42 |
+
optional()
|
43 |
+
optional(nullopt_t)
|
44 |
+
optional(optional&) except +
|
45 |
+
optional(T&) except +
|
46 |
+
c_bool has_value()
|
47 |
+
T& value()
|
48 |
+
T& value_or[U](U& default_value)
|
49 |
+
void swap(optional&)
|
50 |
+
void reset()
|
51 |
+
T& emplace(...)
|
52 |
+
T& operator*()
|
53 |
+
# T* operator->() # Not Supported
|
54 |
+
optional& operator=(optional&)
|
55 |
+
optional& operator=[U](U&)
|
56 |
+
|
57 |
+
|
58 |
+
# vendored from the cymove project https://github.com/ozars/cymove
|
59 |
+
cdef extern from * namespace "cymove" nogil:
|
60 |
+
"""
|
61 |
+
#include <type_traits>
|
62 |
+
#include <utility>
|
63 |
+
namespace cymove {
|
64 |
+
template <typename T>
|
65 |
+
inline typename std::remove_reference<T>::type&& cymove(T& t) {
|
66 |
+
return std::move(t);
|
67 |
+
}
|
68 |
+
template <typename T>
|
69 |
+
inline typename std::remove_reference<T>::type&& cymove(T&& t) {
|
70 |
+
return std::move(t);
|
71 |
+
}
|
72 |
+
} // namespace cymove
|
73 |
+
"""
|
74 |
+
cdef T move" cymove::cymove"[T](T)
|
75 |
+
|
76 |
+
cdef extern from * namespace "arrow::py" nogil:
|
77 |
+
"""
|
78 |
+
#include <memory>
|
79 |
+
#include <utility>
|
80 |
+
|
81 |
+
namespace arrow {
|
82 |
+
namespace py {
|
83 |
+
template <typename T>
|
84 |
+
std::shared_ptr<T> to_shared(std::unique_ptr<T>& t) {
|
85 |
+
return std::move(t);
|
86 |
+
}
|
87 |
+
template <typename T>
|
88 |
+
std::shared_ptr<T> to_shared(std::unique_ptr<T>&& t) {
|
89 |
+
return std::move(t);
|
90 |
+
}
|
91 |
+
} // namespace py
|
92 |
+
} // namespace arrow
|
93 |
+
"""
|
94 |
+
cdef shared_ptr[T] to_shared" arrow::py::to_shared"[T](unique_ptr[T])
|
95 |
+
|
96 |
+
cdef extern from "arrow/python/platform.h":
|
97 |
+
pass
|
98 |
+
|
99 |
+
cdef extern from "<Python.h>":
|
100 |
+
void Py_XDECREF(PyObject* o)
|
101 |
+
Py_ssize_t Py_REFCNT(PyObject* o)
|
102 |
+
|
103 |
+
cdef extern from "numpy/halffloat.h":
|
104 |
+
ctypedef uint16_t npy_half
|
105 |
+
|
106 |
+
cdef extern from "arrow/api.h" namespace "arrow" nogil:
|
107 |
+
# We can later add more of the common status factory methods as needed
|
108 |
+
cdef CStatus CStatus_OK "arrow::Status::OK"()
|
109 |
+
|
110 |
+
cdef CStatus CStatus_Invalid "arrow::Status::Invalid"()
|
111 |
+
cdef CStatus CStatus_NotImplemented \
|
112 |
+
"arrow::Status::NotImplemented"(const c_string& msg)
|
113 |
+
cdef CStatus CStatus_UnknownError \
|
114 |
+
"arrow::Status::UnknownError"(const c_string& msg)
|
115 |
+
|
116 |
+
cdef cppclass CStatus "arrow::Status":
|
117 |
+
CStatus()
|
118 |
+
|
119 |
+
c_string ToString()
|
120 |
+
c_string message()
|
121 |
+
shared_ptr[CStatusDetail] detail()
|
122 |
+
|
123 |
+
c_bool ok()
|
124 |
+
c_bool IsIOError()
|
125 |
+
c_bool IsOutOfMemory()
|
126 |
+
c_bool IsInvalid()
|
127 |
+
c_bool IsKeyError()
|
128 |
+
c_bool IsNotImplemented()
|
129 |
+
c_bool IsTypeError()
|
130 |
+
c_bool IsCapacityError()
|
131 |
+
c_bool IsIndexError()
|
132 |
+
c_bool IsSerializationError()
|
133 |
+
c_bool IsCancelled()
|
134 |
+
|
135 |
+
void Warn()
|
136 |
+
|
137 |
+
cdef cppclass CStatusDetail "arrow::StatusDetail":
|
138 |
+
c_string ToString()
|
139 |
+
|
140 |
+
|
141 |
+
cdef extern from "arrow/result.h" namespace "arrow" nogil:
|
142 |
+
cdef cppclass CResult "arrow::Result"[T]:
|
143 |
+
CResult()
|
144 |
+
CResult(CStatus)
|
145 |
+
CResult(T)
|
146 |
+
c_bool ok()
|
147 |
+
CStatus status()
|
148 |
+
CStatus Value(T*)
|
149 |
+
T operator*()
|
150 |
+
|
151 |
+
|
152 |
+
cdef extern from "arrow/util/future.h" namespace "arrow" nogil:
|
153 |
+
cdef cppclass CFuture "arrow::Future"[T]:
|
154 |
+
CFuture()
|
155 |
+
|
156 |
+
|
157 |
+
cdef extern from "arrow/python/async.h" namespace "arrow::py" nogil:
|
158 |
+
# BindFuture's third argument is really a C++ callable with
|
159 |
+
# the signature `object(T*)`, but Cython does not allow declaring that.
|
160 |
+
# We use an ellipsis as a workaround.
|
161 |
+
# Another possibility is to type-erase the argument by making it
|
162 |
+
# `object(void*)`, but it would lose compile-time C++ type safety.
|
163 |
+
void BindFuture[T](CFuture[T], object cb, ...)
|
164 |
+
|
165 |
+
|
166 |
+
cdef extern from "arrow/python/common.h" namespace "arrow::py" nogil:
|
167 |
+
T GetResultValue[T](CResult[T]) except *
|
168 |
+
cdef function[F] BindFunction[F](void* unbound, object bound, ...)
|
169 |
+
|
170 |
+
|
171 |
+
cdef inline object PyObject_to_object(PyObject* o):
|
172 |
+
# Cast to "object" increments reference count
|
173 |
+
cdef object result = <object> o
|
174 |
+
cpython.Py_DECREF(result)
|
175 |
+
return result
|
env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libarrow.pxd
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libarrow_acero.pxd
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language = c++
|
19 |
+
|
20 |
+
from pyarrow.includes.common cimport *
|
21 |
+
from pyarrow.includes.libarrow cimport *
|
22 |
+
|
23 |
+
|
24 |
+
cdef extern from "arrow/acero/options.h" namespace "arrow::acero" nogil:
|
25 |
+
cdef enum CJoinType "arrow::acero::JoinType":
|
26 |
+
CJoinType_LEFT_SEMI "arrow::acero::JoinType::LEFT_SEMI"
|
27 |
+
CJoinType_RIGHT_SEMI "arrow::acero::JoinType::RIGHT_SEMI"
|
28 |
+
CJoinType_LEFT_ANTI "arrow::acero::JoinType::LEFT_ANTI"
|
29 |
+
CJoinType_RIGHT_ANTI "arrow::acero::JoinType::RIGHT_ANTI"
|
30 |
+
CJoinType_INNER "arrow::acero::JoinType::INNER"
|
31 |
+
CJoinType_LEFT_OUTER "arrow::acero::JoinType::LEFT_OUTER"
|
32 |
+
CJoinType_RIGHT_OUTER "arrow::acero::JoinType::RIGHT_OUTER"
|
33 |
+
CJoinType_FULL_OUTER "arrow::acero::JoinType::FULL_OUTER"
|
34 |
+
|
35 |
+
cdef cppclass CExecNodeOptions "arrow::acero::ExecNodeOptions":
|
36 |
+
pass
|
37 |
+
|
38 |
+
cdef cppclass CSourceNodeOptions "arrow::acero::SourceNodeOptions"(CExecNodeOptions):
|
39 |
+
pass
|
40 |
+
|
41 |
+
cdef cppclass CTableSourceNodeOptions "arrow::acero::TableSourceNodeOptions"(CExecNodeOptions):
|
42 |
+
CTableSourceNodeOptions(shared_ptr[CTable] table)
|
43 |
+
CTableSourceNodeOptions(shared_ptr[CTable] table, int64_t max_batch_size)
|
44 |
+
|
45 |
+
cdef cppclass CSinkNodeOptions "arrow::acero::SinkNodeOptions"(CExecNodeOptions):
|
46 |
+
pass
|
47 |
+
|
48 |
+
cdef cppclass CFilterNodeOptions "arrow::acero::FilterNodeOptions"(CExecNodeOptions):
|
49 |
+
CFilterNodeOptions(CExpression)
|
50 |
+
|
51 |
+
cdef cppclass CProjectNodeOptions "arrow::acero::ProjectNodeOptions"(CExecNodeOptions):
|
52 |
+
CProjectNodeOptions(vector[CExpression] expressions)
|
53 |
+
CProjectNodeOptions(vector[CExpression] expressions,
|
54 |
+
vector[c_string] names)
|
55 |
+
|
56 |
+
cdef cppclass CAggregateNodeOptions "arrow::acero::AggregateNodeOptions"(CExecNodeOptions):
|
57 |
+
CAggregateNodeOptions(vector[CAggregate] aggregates, vector[CFieldRef] names)
|
58 |
+
|
59 |
+
cdef cppclass COrderByNodeOptions "arrow::acero::OrderByNodeOptions"(CExecNodeOptions):
|
60 |
+
COrderByNodeOptions(COrdering ordering)
|
61 |
+
|
62 |
+
cdef cppclass CHashJoinNodeOptions "arrow::acero::HashJoinNodeOptions"(CExecNodeOptions):
|
63 |
+
CHashJoinNodeOptions(CJoinType, vector[CFieldRef] in_left_keys,
|
64 |
+
vector[CFieldRef] in_right_keys)
|
65 |
+
CHashJoinNodeOptions(CJoinType, vector[CFieldRef] in_left_keys,
|
66 |
+
vector[CFieldRef] in_right_keys,
|
67 |
+
CExpression filter,
|
68 |
+
c_string output_suffix_for_left,
|
69 |
+
c_string output_suffix_for_right)
|
70 |
+
CHashJoinNodeOptions(CJoinType join_type,
|
71 |
+
vector[CFieldRef] left_keys,
|
72 |
+
vector[CFieldRef] right_keys,
|
73 |
+
vector[CFieldRef] left_output,
|
74 |
+
vector[CFieldRef] right_output,
|
75 |
+
CExpression filter,
|
76 |
+
c_string output_suffix_for_left,
|
77 |
+
c_string output_suffix_for_right)
|
78 |
+
|
79 |
+
|
80 |
+
cdef extern from "arrow/acero/exec_plan.h" namespace "arrow::acero" nogil:
|
81 |
+
cdef cppclass CDeclaration "arrow::acero::Declaration":
|
82 |
+
cppclass Input:
|
83 |
+
Input(CExecNode*)
|
84 |
+
Input(CDeclaration)
|
85 |
+
|
86 |
+
c_string label
|
87 |
+
vector[Input] inputs
|
88 |
+
|
89 |
+
CDeclaration()
|
90 |
+
CDeclaration(c_string factory_name, CExecNodeOptions options)
|
91 |
+
CDeclaration(c_string factory_name, vector[Input] inputs, shared_ptr[CExecNodeOptions] options)
|
92 |
+
|
93 |
+
@staticmethod
|
94 |
+
CDeclaration Sequence(vector[CDeclaration] decls)
|
95 |
+
|
96 |
+
cdef cppclass CExecNode "arrow::acero::ExecNode":
|
97 |
+
const vector[CExecNode*]& inputs() const
|
98 |
+
const shared_ptr[CSchema]& output_schema() const
|
99 |
+
|
100 |
+
CResult[shared_ptr[CTable]] DeclarationToTable(
|
101 |
+
CDeclaration declaration, c_bool use_threads
|
102 |
+
)
|
103 |
+
CResult[shared_ptr[CTable]] DeclarationToTable(
|
104 |
+
CDeclaration declaration, c_bool use_threads,
|
105 |
+
CMemoryPool* memory_pool, CFunctionRegistry* function_registry
|
106 |
+
)
|
107 |
+
CResult[unique_ptr[CRecordBatchReader]] DeclarationToReader(
|
108 |
+
CDeclaration declaration, c_bool use_threads
|
109 |
+
)
|
110 |
+
|
111 |
+
CResult[c_string] DeclarationToString(const CDeclaration& declaration)
|
env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libarrow_cuda.pxd
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language = c++
|
19 |
+
|
20 |
+
from pyarrow.includes.libarrow cimport *
|
21 |
+
|
22 |
+
cdef extern from "arrow/gpu/cuda_api.h" namespace "arrow::cuda" nogil:
|
23 |
+
|
24 |
+
cdef cppclass CCudaDeviceManager" arrow::cuda::CudaDeviceManager":
|
25 |
+
@staticmethod
|
26 |
+
CResult[CCudaDeviceManager*] Instance()
|
27 |
+
CResult[shared_ptr[CCudaContext]] GetContext(int gpu_number)
|
28 |
+
CResult[shared_ptr[CCudaContext]] GetSharedContext(int gpu_number,
|
29 |
+
void* handle)
|
30 |
+
CStatus AllocateHost(int device_number, int64_t nbytes,
|
31 |
+
shared_ptr[CCudaHostBuffer]* buffer)
|
32 |
+
int num_devices() const
|
33 |
+
|
34 |
+
cdef cppclass CCudaContext" arrow::cuda::CudaContext":
|
35 |
+
CResult[shared_ptr[CCudaBuffer]] Allocate(int64_t nbytes)
|
36 |
+
CResult[shared_ptr[CCudaBuffer]] View(uint8_t* data, int64_t nbytes)
|
37 |
+
CResult[shared_ptr[CCudaBuffer]] OpenIpcBuffer(
|
38 |
+
const CCudaIpcMemHandle& ipc_handle)
|
39 |
+
CStatus Synchronize()
|
40 |
+
int64_t bytes_allocated() const
|
41 |
+
const void* handle() const
|
42 |
+
int device_number() const
|
43 |
+
CResult[uintptr_t] GetDeviceAddress(uintptr_t addr)
|
44 |
+
|
45 |
+
cdef cppclass CCudaIpcMemHandle" arrow::cuda::CudaIpcMemHandle":
|
46 |
+
@staticmethod
|
47 |
+
CResult[shared_ptr[CCudaIpcMemHandle]] FromBuffer(
|
48 |
+
const void* opaque_handle)
|
49 |
+
CResult[shared_ptr[CBuffer]] Serialize(CMemoryPool* pool) const
|
50 |
+
|
51 |
+
cdef cppclass CCudaBuffer" arrow::cuda::CudaBuffer"(CBuffer):
|
52 |
+
CCudaBuffer(uint8_t* data, int64_t size,
|
53 |
+
const shared_ptr[CCudaContext]& context,
|
54 |
+
c_bool own_data=false, c_bool is_ipc=false)
|
55 |
+
CCudaBuffer(const shared_ptr[CCudaBuffer]& parent,
|
56 |
+
const int64_t offset, const int64_t size)
|
57 |
+
|
58 |
+
@staticmethod
|
59 |
+
CResult[shared_ptr[CCudaBuffer]] FromBuffer(shared_ptr[CBuffer] buf)
|
60 |
+
|
61 |
+
CStatus CopyToHost(const int64_t position, const int64_t nbytes,
|
62 |
+
void* out) const
|
63 |
+
CStatus CopyFromHost(const int64_t position, const void* data,
|
64 |
+
int64_t nbytes)
|
65 |
+
CStatus CopyFromDevice(const int64_t position, const void* data,
|
66 |
+
int64_t nbytes)
|
67 |
+
CStatus CopyFromAnotherDevice(const shared_ptr[CCudaContext]& src_ctx,
|
68 |
+
const int64_t position, const void* data,
|
69 |
+
int64_t nbytes)
|
70 |
+
CResult[shared_ptr[CCudaIpcMemHandle]] ExportForIpc()
|
71 |
+
shared_ptr[CCudaContext] context() const
|
72 |
+
|
73 |
+
cdef cppclass \
|
74 |
+
CCudaHostBuffer" arrow::cuda::CudaHostBuffer"(CMutableBuffer):
|
75 |
+
pass
|
76 |
+
|
77 |
+
cdef cppclass \
|
78 |
+
CCudaBufferReader" arrow::cuda::CudaBufferReader"(CBufferReader):
|
79 |
+
CCudaBufferReader(const shared_ptr[CBuffer]& buffer)
|
80 |
+
CResult[int64_t] Read(int64_t nbytes, void* buffer)
|
81 |
+
CResult[shared_ptr[CBuffer]] Read(int64_t nbytes)
|
82 |
+
|
83 |
+
cdef cppclass \
|
84 |
+
CCudaBufferWriter" arrow::cuda::CudaBufferWriter"(WritableFile):
|
85 |
+
CCudaBufferWriter(const shared_ptr[CCudaBuffer]& buffer)
|
86 |
+
CStatus Close()
|
87 |
+
CStatus Write(const void* data, int64_t nbytes)
|
88 |
+
CStatus WriteAt(int64_t position, const void* data, int64_t nbytes)
|
89 |
+
CStatus SetBufferSize(const int64_t buffer_size)
|
90 |
+
int64_t buffer_size()
|
91 |
+
int64_t num_bytes_buffered() const
|
92 |
+
|
93 |
+
CResult[shared_ptr[CCudaHostBuffer]] AllocateCudaHostBuffer(
|
94 |
+
int device_number, const int64_t size)
|
95 |
+
|
96 |
+
# Cuda prefix is added to avoid picking up arrow::cuda functions
|
97 |
+
# from arrow namespace.
|
98 |
+
CResult[shared_ptr[CCudaBuffer]] \
|
99 |
+
CudaSerializeRecordBatch" arrow::cuda::SerializeRecordBatch"\
|
100 |
+
(const CRecordBatch& batch,
|
101 |
+
CCudaContext* ctx)
|
102 |
+
CResult[shared_ptr[CRecordBatch]] \
|
103 |
+
CudaReadRecordBatch" arrow::cuda::ReadRecordBatch"\
|
104 |
+
(const shared_ptr[CSchema]& schema,
|
105 |
+
CDictionaryMemo* dictionary_memo,
|
106 |
+
const shared_ptr[CCudaBuffer]& buffer,
|
107 |
+
CMemoryPool* pool)
|
env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset.pxd
ADDED
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language = c++
|
19 |
+
|
20 |
+
from libcpp.unordered_map cimport unordered_map
|
21 |
+
from libcpp cimport bool as c_bool
|
22 |
+
|
23 |
+
from pyarrow.includes.common cimport *
|
24 |
+
from pyarrow.includes.libarrow cimport *
|
25 |
+
from pyarrow.includes.libarrow_acero cimport *
|
26 |
+
from pyarrow.includes.libarrow_fs cimport *
|
27 |
+
|
28 |
+
|
29 |
+
cdef extern from "arrow/dataset/plan.h" namespace "arrow::dataset::internal" nogil:
|
30 |
+
|
31 |
+
cdef void Initialize()
|
32 |
+
|
33 |
+
|
34 |
+
ctypedef CStatus cb_writer_finish_internal(CFileWriter*)
|
35 |
+
ctypedef void cb_writer_finish(dict, CFileWriter*)
|
36 |
+
|
37 |
+
cdef extern from "arrow/dataset/api.h" namespace "arrow::dataset" nogil:
|
38 |
+
|
39 |
+
cdef enum ExistingDataBehavior" arrow::dataset::ExistingDataBehavior":
|
40 |
+
ExistingDataBehavior_DELETE_MATCHING" \
|
41 |
+
arrow::dataset::ExistingDataBehavior::kDeleteMatchingPartitions"
|
42 |
+
ExistingDataBehavior_OVERWRITE_OR_IGNORE" \
|
43 |
+
arrow::dataset::ExistingDataBehavior::kOverwriteOrIgnore"
|
44 |
+
ExistingDataBehavior_ERROR" \
|
45 |
+
arrow::dataset::ExistingDataBehavior::kError"
|
46 |
+
|
47 |
+
cdef cppclass CScanOptions "arrow::dataset::ScanOptions":
|
48 |
+
shared_ptr[CSchema] dataset_schema
|
49 |
+
shared_ptr[CSchema] projected_schema
|
50 |
+
c_bool use_threads
|
51 |
+
CExpression filter
|
52 |
+
|
53 |
+
cdef cppclass CScanNodeOptions "arrow::dataset::ScanNodeOptions"(CExecNodeOptions):
|
54 |
+
CScanNodeOptions(shared_ptr[CDataset] dataset, shared_ptr[CScanOptions] scan_options)
|
55 |
+
|
56 |
+
shared_ptr[CScanOptions] scan_options
|
57 |
+
|
58 |
+
cdef cppclass CFragmentScanOptions "arrow::dataset::FragmentScanOptions":
|
59 |
+
c_string type_name() const
|
60 |
+
|
61 |
+
ctypedef CIterator[shared_ptr[CScanTask]] CScanTaskIterator \
|
62 |
+
"arrow::dataset::ScanTaskIterator"
|
63 |
+
|
64 |
+
cdef cppclass CScanTask" arrow::dataset::ScanTask":
|
65 |
+
CResult[CRecordBatchIterator] Execute()
|
66 |
+
|
67 |
+
cdef cppclass CFragment "arrow::dataset::Fragment":
|
68 |
+
CResult[shared_ptr[CSchema]] ReadPhysicalSchema()
|
69 |
+
CResult[CScanTaskIterator] Scan(shared_ptr[CScanOptions] options)
|
70 |
+
c_bool splittable() const
|
71 |
+
c_string type_name() const
|
72 |
+
const CExpression& partition_expression() const
|
73 |
+
|
74 |
+
ctypedef vector[shared_ptr[CFragment]] CFragmentVector \
|
75 |
+
"arrow::dataset::FragmentVector"
|
76 |
+
|
77 |
+
ctypedef CIterator[shared_ptr[CFragment]] CFragmentIterator \
|
78 |
+
"arrow::dataset::FragmentIterator"
|
79 |
+
|
80 |
+
cdef cppclass CInMemoryFragment "arrow::dataset::InMemoryFragment"(
|
81 |
+
CFragment):
|
82 |
+
CInMemoryFragment(vector[shared_ptr[CRecordBatch]] record_batches,
|
83 |
+
CExpression partition_expression)
|
84 |
+
|
85 |
+
cdef cppclass CTaggedRecordBatch "arrow::dataset::TaggedRecordBatch":
|
86 |
+
shared_ptr[CRecordBatch] record_batch
|
87 |
+
shared_ptr[CFragment] fragment
|
88 |
+
|
89 |
+
ctypedef CIterator[CTaggedRecordBatch] CTaggedRecordBatchIterator \
|
90 |
+
"arrow::dataset::TaggedRecordBatchIterator"
|
91 |
+
|
92 |
+
cdef cppclass CScanner "arrow::dataset::Scanner":
|
93 |
+
CScanner(shared_ptr[CDataset], shared_ptr[CScanOptions])
|
94 |
+
CScanner(shared_ptr[CFragment], shared_ptr[CScanOptions])
|
95 |
+
CResult[CScanTaskIterator] Scan()
|
96 |
+
CResult[CTaggedRecordBatchIterator] ScanBatches()
|
97 |
+
CResult[shared_ptr[CTable]] ToTable()
|
98 |
+
CResult[shared_ptr[CTable]] TakeRows(const CArray& indices)
|
99 |
+
CResult[shared_ptr[CTable]] Head(int64_t num_rows)
|
100 |
+
CResult[int64_t] CountRows()
|
101 |
+
CResult[CFragmentIterator] GetFragments()
|
102 |
+
CResult[shared_ptr[CRecordBatchReader]] ToRecordBatchReader()
|
103 |
+
const shared_ptr[CScanOptions]& options()
|
104 |
+
|
105 |
+
cdef cppclass CScannerBuilder "arrow::dataset::ScannerBuilder":
|
106 |
+
CScannerBuilder(shared_ptr[CDataset],
|
107 |
+
shared_ptr[CScanOptions] scan_options)
|
108 |
+
CScannerBuilder(shared_ptr[CSchema], shared_ptr[CFragment],
|
109 |
+
shared_ptr[CScanOptions] scan_options)
|
110 |
+
|
111 |
+
@staticmethod
|
112 |
+
shared_ptr[CScannerBuilder] FromRecordBatchReader(
|
113 |
+
shared_ptr[CRecordBatchReader] reader)
|
114 |
+
CStatus ProjectColumns "Project"(const vector[c_string]& columns)
|
115 |
+
CStatus Project(vector[CExpression]& exprs, vector[c_string]& columns)
|
116 |
+
CStatus Filter(CExpression filter)
|
117 |
+
CStatus UseThreads(c_bool use_threads)
|
118 |
+
CStatus Pool(CMemoryPool* pool)
|
119 |
+
CStatus BatchSize(int64_t batch_size)
|
120 |
+
CStatus BatchReadahead(int32_t batch_readahead)
|
121 |
+
CStatus FragmentReadahead(int32_t fragment_readahead)
|
122 |
+
CStatus FragmentScanOptions(
|
123 |
+
shared_ptr[CFragmentScanOptions] fragment_scan_options)
|
124 |
+
CResult[shared_ptr[CScanOptions]] GetScanOptions()
|
125 |
+
CResult[shared_ptr[CScanner]] Finish()
|
126 |
+
shared_ptr[CSchema] schema() const
|
127 |
+
|
128 |
+
ctypedef vector[shared_ptr[CDataset]] CDatasetVector \
|
129 |
+
"arrow::dataset::DatasetVector"
|
130 |
+
|
131 |
+
cdef cppclass CDataset "arrow::dataset::Dataset":
|
132 |
+
const shared_ptr[CSchema] & schema()
|
133 |
+
CResult[CFragmentIterator] GetFragments()
|
134 |
+
CResult[CFragmentIterator] GetFragments(CExpression predicate)
|
135 |
+
const CExpression & partition_expression()
|
136 |
+
c_string type_name()
|
137 |
+
|
138 |
+
CResult[shared_ptr[CDataset]] ReplaceSchema(shared_ptr[CSchema])
|
139 |
+
|
140 |
+
CResult[shared_ptr[CScannerBuilder]] NewScan()
|
141 |
+
|
142 |
+
cdef cppclass CInMemoryDataset "arrow::dataset::InMemoryDataset"(
|
143 |
+
CDataset):
|
144 |
+
CInMemoryDataset(shared_ptr[CRecordBatchReader])
|
145 |
+
CInMemoryDataset(shared_ptr[CTable])
|
146 |
+
|
147 |
+
cdef cppclass CUnionDataset "arrow::dataset::UnionDataset"(
|
148 |
+
CDataset):
|
149 |
+
@staticmethod
|
150 |
+
CResult[shared_ptr[CUnionDataset]] Make(shared_ptr[CSchema] schema,
|
151 |
+
CDatasetVector children)
|
152 |
+
|
153 |
+
const CDatasetVector& children() const
|
154 |
+
|
155 |
+
cdef cppclass CInspectOptions "arrow::dataset::InspectOptions":
|
156 |
+
int fragments
|
157 |
+
|
158 |
+
cdef cppclass CFinishOptions "arrow::dataset::FinishOptions":
|
159 |
+
shared_ptr[CSchema] schema
|
160 |
+
CInspectOptions inspect_options
|
161 |
+
c_bool validate_fragments
|
162 |
+
|
163 |
+
cdef cppclass CDatasetFactory "arrow::dataset::DatasetFactory":
|
164 |
+
CResult[vector[shared_ptr[CSchema]]] InspectSchemas(CInspectOptions)
|
165 |
+
CResult[shared_ptr[CSchema]] Inspect(CInspectOptions)
|
166 |
+
CResult[shared_ptr[CDataset]] FinishWithSchema "Finish"(
|
167 |
+
const shared_ptr[CSchema]& schema)
|
168 |
+
CResult[shared_ptr[CDataset]] Finish()
|
169 |
+
const CExpression& root_partition()
|
170 |
+
CStatus SetRootPartition(CExpression partition)
|
171 |
+
|
172 |
+
cdef cppclass CUnionDatasetFactory "arrow::dataset::UnionDatasetFactory":
|
173 |
+
@staticmethod
|
174 |
+
CResult[shared_ptr[CDatasetFactory]] Make(
|
175 |
+
vector[shared_ptr[CDatasetFactory]] factories)
|
176 |
+
|
177 |
+
cdef cppclass CFileSource "arrow::dataset::FileSource":
|
178 |
+
const c_string& path() const
|
179 |
+
const shared_ptr[CFileSystem]& filesystem() const
|
180 |
+
const shared_ptr[CBuffer]& buffer() const
|
181 |
+
const int64_t size() const
|
182 |
+
# HACK: Cython can't handle all the overloads so don't declare them.
|
183 |
+
# This means invalid construction of CFileSource won't be caught in
|
184 |
+
# the C++ generation phase (though it will still be caught when
|
185 |
+
# the generated C++ is compiled).
|
186 |
+
CFileSource(...)
|
187 |
+
|
188 |
+
cdef cppclass CFileWriteOptions \
|
189 |
+
"arrow::dataset::FileWriteOptions":
|
190 |
+
const shared_ptr[CFileFormat]& format() const
|
191 |
+
c_string type_name() const
|
192 |
+
|
193 |
+
cdef cppclass CFileWriter \
|
194 |
+
"arrow::dataset::FileWriter":
|
195 |
+
const shared_ptr[CFileFormat]& format() const
|
196 |
+
const shared_ptr[CSchema]& schema() const
|
197 |
+
const shared_ptr[CFileWriteOptions]& options() const
|
198 |
+
const CFileLocator& destination() const
|
199 |
+
CResult[int64_t] GetBytesWritten()
|
200 |
+
|
201 |
+
cdef cppclass CFileFormat "arrow::dataset::FileFormat":
|
202 |
+
shared_ptr[CFragmentScanOptions] default_fragment_scan_options
|
203 |
+
c_string type_name() const
|
204 |
+
CResult[shared_ptr[CSchema]] Inspect(const CFileSource&) const
|
205 |
+
CResult[shared_ptr[CFileFragment]] MakeFragment(
|
206 |
+
CFileSource source,
|
207 |
+
CExpression partition_expression,
|
208 |
+
shared_ptr[CSchema] physical_schema)
|
209 |
+
shared_ptr[CFileWriteOptions] DefaultWriteOptions()
|
210 |
+
|
211 |
+
cdef cppclass CFileFragment "arrow::dataset::FileFragment"(
|
212 |
+
CFragment):
|
213 |
+
const CFileSource& source() const
|
214 |
+
const shared_ptr[CFileFormat]& format() const
|
215 |
+
|
216 |
+
cdef cppclass CFileSystemDatasetWriteOptions \
|
217 |
+
"arrow::dataset::FileSystemDatasetWriteOptions":
|
218 |
+
shared_ptr[CFileWriteOptions] file_write_options
|
219 |
+
shared_ptr[CFileSystem] filesystem
|
220 |
+
c_string base_dir
|
221 |
+
shared_ptr[CPartitioning] partitioning
|
222 |
+
int max_partitions
|
223 |
+
c_string basename_template
|
224 |
+
function[cb_writer_finish_internal] writer_pre_finish
|
225 |
+
function[cb_writer_finish_internal] writer_post_finish
|
226 |
+
ExistingDataBehavior existing_data_behavior
|
227 |
+
c_bool create_dir
|
228 |
+
uint32_t max_open_files
|
229 |
+
uint64_t max_rows_per_file
|
230 |
+
uint64_t min_rows_per_group
|
231 |
+
uint64_t max_rows_per_group
|
232 |
+
|
233 |
+
cdef cppclass CFileSystemDataset \
|
234 |
+
"arrow::dataset::FileSystemDataset"(CDataset):
|
235 |
+
@staticmethod
|
236 |
+
CResult[shared_ptr[CDataset]] Make(
|
237 |
+
shared_ptr[CSchema] schema,
|
238 |
+
CExpression source_partition,
|
239 |
+
shared_ptr[CFileFormat] format,
|
240 |
+
shared_ptr[CFileSystem] filesystem,
|
241 |
+
vector[shared_ptr[CFileFragment]] fragments)
|
242 |
+
|
243 |
+
@staticmethod
|
244 |
+
CStatus Write(
|
245 |
+
const CFileSystemDatasetWriteOptions& write_options,
|
246 |
+
shared_ptr[CScanner] scanner)
|
247 |
+
|
248 |
+
c_string type()
|
249 |
+
vector[c_string] files()
|
250 |
+
const shared_ptr[CFileFormat]& format() const
|
251 |
+
const shared_ptr[CFileSystem]& filesystem() const
|
252 |
+
const shared_ptr[CPartitioning]& partitioning() const
|
253 |
+
|
254 |
+
cdef cppclass CIpcFileWriteOptions \
|
255 |
+
"arrow::dataset::IpcFileWriteOptions"(CFileWriteOptions):
|
256 |
+
shared_ptr[CIpcWriteOptions] options
|
257 |
+
|
258 |
+
cdef cppclass CIpcFileFormat "arrow::dataset::IpcFileFormat"(
|
259 |
+
CFileFormat):
|
260 |
+
pass
|
261 |
+
|
262 |
+
cdef cppclass COrcFileFormat "arrow::dataset::OrcFileFormat"(
|
263 |
+
CFileFormat):
|
264 |
+
pass
|
265 |
+
|
266 |
+
cdef cppclass CCsvFileWriteOptions \
|
267 |
+
"arrow::dataset::CsvFileWriteOptions"(CFileWriteOptions):
|
268 |
+
shared_ptr[CCSVWriteOptions] write_options
|
269 |
+
CMemoryPool* pool
|
270 |
+
|
271 |
+
cdef cppclass CCsvFileFormat "arrow::dataset::CsvFileFormat"(
|
272 |
+
CFileFormat):
|
273 |
+
CCSVParseOptions parse_options
|
274 |
+
|
275 |
+
cdef cppclass CCsvFragmentScanOptions \
|
276 |
+
"arrow::dataset::CsvFragmentScanOptions"(CFragmentScanOptions):
|
277 |
+
CCSVConvertOptions convert_options
|
278 |
+
CCSVReadOptions read_options
|
279 |
+
function[StreamWrapFunc] stream_transform_func
|
280 |
+
|
281 |
+
cdef cppclass CJsonFileFormat "arrow::dataset::JsonFileFormat"(CFileFormat):
|
282 |
+
pass
|
283 |
+
|
284 |
+
cdef cppclass CJsonFragmentScanOptions "arrow::dataset::JsonFragmentScanOptions"(CFragmentScanOptions):
|
285 |
+
CJSONParseOptions parse_options
|
286 |
+
CJSONReadOptions read_options
|
287 |
+
|
288 |
+
cdef cppclass CPartitioning "arrow::dataset::Partitioning":
|
289 |
+
c_string type_name() const
|
290 |
+
CResult[CExpression] Parse(const c_string & path) const
|
291 |
+
const shared_ptr[CSchema] & schema()
|
292 |
+
c_bool Equals(const CPartitioning& other) const
|
293 |
+
|
294 |
+
cdef cppclass CSegmentEncoding" arrow::dataset::SegmentEncoding":
|
295 |
+
bint operator==(CSegmentEncoding)
|
296 |
+
|
297 |
+
CSegmentEncoding CSegmentEncoding_None\
|
298 |
+
" arrow::dataset::SegmentEncoding::None"
|
299 |
+
CSegmentEncoding CSegmentEncoding_Uri\
|
300 |
+
" arrow::dataset::SegmentEncoding::Uri"
|
301 |
+
|
302 |
+
cdef cppclass CKeyValuePartitioningOptions \
|
303 |
+
"arrow::dataset::KeyValuePartitioningOptions":
|
304 |
+
CSegmentEncoding segment_encoding
|
305 |
+
|
306 |
+
cdef cppclass CHivePartitioningOptions \
|
307 |
+
"arrow::dataset::HivePartitioningOptions":
|
308 |
+
CSegmentEncoding segment_encoding
|
309 |
+
c_string null_fallback
|
310 |
+
|
311 |
+
cdef cppclass CPartitioningFactoryOptions \
|
312 |
+
"arrow::dataset::PartitioningFactoryOptions":
|
313 |
+
c_bool infer_dictionary
|
314 |
+
shared_ptr[CSchema] schema
|
315 |
+
CSegmentEncoding segment_encoding
|
316 |
+
|
317 |
+
cdef cppclass CHivePartitioningFactoryOptions \
|
318 |
+
"arrow::dataset::HivePartitioningFactoryOptions":
|
319 |
+
c_bool infer_dictionary
|
320 |
+
c_string null_fallback
|
321 |
+
shared_ptr[CSchema] schema
|
322 |
+
CSegmentEncoding segment_encoding
|
323 |
+
|
324 |
+
cdef cppclass CPartitioningFactory "arrow::dataset::PartitioningFactory":
|
325 |
+
c_string type_name() const
|
326 |
+
|
327 |
+
cdef cppclass CKeyValuePartitioning \
|
328 |
+
"arrow::dataset::KeyValuePartitioning"(CPartitioning):
|
329 |
+
CKeyValuePartitioning(shared_ptr[CSchema] schema,
|
330 |
+
vector[shared_ptr[CArray]] dictionaries,
|
331 |
+
CKeyValuePartitioningOptions options)
|
332 |
+
|
333 |
+
vector[shared_ptr[CArray]] dictionaries() const
|
334 |
+
CSegmentEncoding segment_encoding()
|
335 |
+
|
336 |
+
cdef cppclass CDirectoryPartitioning \
|
337 |
+
"arrow::dataset::DirectoryPartitioning"(CPartitioning):
|
338 |
+
CDirectoryPartitioning(shared_ptr[CSchema] schema,
|
339 |
+
vector[shared_ptr[CArray]] dictionaries)
|
340 |
+
|
341 |
+
@staticmethod
|
342 |
+
shared_ptr[CPartitioningFactory] MakeFactory(
|
343 |
+
vector[c_string] field_names, CPartitioningFactoryOptions)
|
344 |
+
|
345 |
+
vector[shared_ptr[CArray]] dictionaries() const
|
346 |
+
|
347 |
+
cdef cppclass CHivePartitioning \
|
348 |
+
"arrow::dataset::HivePartitioning"(CPartitioning):
|
349 |
+
CHivePartitioning(shared_ptr[CSchema] schema,
|
350 |
+
vector[shared_ptr[CArray]] dictionaries,
|
351 |
+
CHivePartitioningOptions options)
|
352 |
+
|
353 |
+
@staticmethod
|
354 |
+
shared_ptr[CPartitioningFactory] MakeFactory(
|
355 |
+
CHivePartitioningFactoryOptions)
|
356 |
+
|
357 |
+
vector[shared_ptr[CArray]] dictionaries() const
|
358 |
+
c_string null_fallback() const
|
359 |
+
|
360 |
+
cdef cppclass CFilenamePartitioning \
|
361 |
+
"arrow::dataset::FilenamePartitioning"(CPartitioning):
|
362 |
+
CFilenamePartitioning(shared_ptr[CSchema] schema,
|
363 |
+
vector[shared_ptr[CArray]] dictionaries)
|
364 |
+
|
365 |
+
@staticmethod
|
366 |
+
shared_ptr[CPartitioningFactory] MakeFactory(
|
367 |
+
vector[c_string] field_names, CPartitioningFactoryOptions)
|
368 |
+
|
369 |
+
vector[shared_ptr[CArray]] dictionaries() const
|
370 |
+
|
371 |
+
cdef cppclass CPartitioningOrFactory \
|
372 |
+
"arrow::dataset::PartitioningOrFactory":
|
373 |
+
CPartitioningOrFactory(shared_ptr[CPartitioning])
|
374 |
+
CPartitioningOrFactory(shared_ptr[CPartitioningFactory])
|
375 |
+
CPartitioningOrFactory & operator = (shared_ptr[CPartitioning])
|
376 |
+
CPartitioningOrFactory & operator = (
|
377 |
+
shared_ptr[CPartitioningFactory])
|
378 |
+
shared_ptr[CPartitioning] partitioning() const
|
379 |
+
shared_ptr[CPartitioningFactory] factory() const
|
380 |
+
|
381 |
+
cdef cppclass CFileSystemFactoryOptions \
|
382 |
+
"arrow::dataset::FileSystemFactoryOptions":
|
383 |
+
CPartitioningOrFactory partitioning
|
384 |
+
c_string partition_base_dir
|
385 |
+
c_bool exclude_invalid_files
|
386 |
+
vector[c_string] selector_ignore_prefixes
|
387 |
+
|
388 |
+
cdef cppclass CFileSystemDatasetFactory \
|
389 |
+
"arrow::dataset::FileSystemDatasetFactory"(
|
390 |
+
CDatasetFactory):
|
391 |
+
@staticmethod
|
392 |
+
CResult[shared_ptr[CDatasetFactory]] MakeFromPaths "Make"(
|
393 |
+
shared_ptr[CFileSystem] filesystem,
|
394 |
+
vector[c_string] paths,
|
395 |
+
shared_ptr[CFileFormat] format,
|
396 |
+
CFileSystemFactoryOptions options
|
397 |
+
)
|
398 |
+
|
399 |
+
@staticmethod
|
400 |
+
CResult[shared_ptr[CDatasetFactory]] MakeFromSelector "Make"(
|
401 |
+
shared_ptr[CFileSystem] filesystem,
|
402 |
+
CFileSelector,
|
403 |
+
shared_ptr[CFileFormat] format,
|
404 |
+
CFileSystemFactoryOptions options
|
405 |
+
)
|
env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libarrow_dataset_parquet.pxd
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language = c++
|
19 |
+
|
20 |
+
from pyarrow.includes.libarrow_dataset cimport *
|
21 |
+
from pyarrow.includes.libparquet_encryption cimport *
|
22 |
+
|
23 |
+
from pyarrow._parquet cimport *
|
24 |
+
|
25 |
+
|
26 |
+
cdef extern from "arrow/dataset/parquet_encryption_config.h" namespace "arrow::dataset" nogil:
|
27 |
+
cdef cppclass CParquetEncryptionConfig "arrow::dataset::ParquetEncryptionConfig":
|
28 |
+
shared_ptr[CCryptoFactory] crypto_factory
|
29 |
+
shared_ptr[CKmsConnectionConfig] kms_connection_config
|
30 |
+
shared_ptr[CEncryptionConfiguration] encryption_config
|
31 |
+
|
32 |
+
cdef cppclass CParquetDecryptionConfig "arrow::dataset::ParquetDecryptionConfig":
|
33 |
+
shared_ptr[CCryptoFactory] crypto_factory
|
34 |
+
shared_ptr[CKmsConnectionConfig] kms_connection_config
|
35 |
+
shared_ptr[CDecryptionConfiguration] decryption_config
|
36 |
+
|
37 |
+
|
38 |
+
cdef extern from "arrow/dataset/api.h" namespace "arrow::dataset" nogil:
|
39 |
+
|
40 |
+
cdef cppclass CParquetFileWriter \
|
41 |
+
"arrow::dataset::ParquetFileWriter"(CFileWriter):
|
42 |
+
const shared_ptr[FileWriter]& parquet_writer() const
|
43 |
+
|
44 |
+
cdef cppclass CParquetFileWriteOptions \
|
45 |
+
"arrow::dataset::ParquetFileWriteOptions"(CFileWriteOptions):
|
46 |
+
shared_ptr[WriterProperties] writer_properties
|
47 |
+
shared_ptr[ArrowWriterProperties] arrow_writer_properties
|
48 |
+
shared_ptr[CParquetEncryptionConfig] parquet_encryption_config
|
49 |
+
|
50 |
+
cdef cppclass CParquetFileFragment "arrow::dataset::ParquetFileFragment"(
|
51 |
+
CFileFragment):
|
52 |
+
const vector[int]& row_groups() const
|
53 |
+
shared_ptr[CFileMetaData] metadata() const
|
54 |
+
CResult[vector[shared_ptr[CFragment]]] SplitByRowGroup(
|
55 |
+
CExpression predicate)
|
56 |
+
CResult[shared_ptr[CFragment]] SubsetWithFilter "Subset"(
|
57 |
+
CExpression predicate)
|
58 |
+
CResult[shared_ptr[CFragment]] SubsetWithIds "Subset"(
|
59 |
+
vector[int] row_group_ids)
|
60 |
+
CStatus EnsureCompleteMetadata()
|
61 |
+
|
62 |
+
cdef cppclass CParquetFileFormatReaderOptions \
|
63 |
+
"arrow::dataset::ParquetFileFormat::ReaderOptions":
|
64 |
+
unordered_set[c_string] dict_columns
|
65 |
+
TimeUnit coerce_int96_timestamp_unit
|
66 |
+
|
67 |
+
cdef cppclass CParquetFileFormat "arrow::dataset::ParquetFileFormat"(
|
68 |
+
CFileFormat):
|
69 |
+
CParquetFileFormatReaderOptions reader_options
|
70 |
+
CResult[shared_ptr[CFileFragment]] MakeFragment(
|
71 |
+
CFileSource source,
|
72 |
+
CExpression partition_expression,
|
73 |
+
shared_ptr[CSchema] physical_schema,
|
74 |
+
vector[int] row_groups)
|
75 |
+
|
76 |
+
cdef cppclass CParquetFragmentScanOptions \
|
77 |
+
"arrow::dataset::ParquetFragmentScanOptions"(CFragmentScanOptions):
|
78 |
+
shared_ptr[CReaderProperties] reader_properties
|
79 |
+
shared_ptr[ArrowReaderProperties] arrow_reader_properties
|
80 |
+
shared_ptr[CParquetDecryptionConfig] parquet_decryption_config
|
81 |
+
|
82 |
+
cdef cppclass CParquetFactoryOptions \
|
83 |
+
"arrow::dataset::ParquetFactoryOptions":
|
84 |
+
CPartitioningOrFactory partitioning
|
85 |
+
c_string partition_base_dir
|
86 |
+
c_bool validate_column_chunk_paths
|
87 |
+
|
88 |
+
cdef cppclass CParquetDatasetFactory \
|
89 |
+
"arrow::dataset::ParquetDatasetFactory"(CDatasetFactory):
|
90 |
+
@staticmethod
|
91 |
+
CResult[shared_ptr[CDatasetFactory]] MakeFromMetaDataPath "Make"(
|
92 |
+
const c_string& metadata_path,
|
93 |
+
shared_ptr[CFileSystem] filesystem,
|
94 |
+
shared_ptr[CParquetFileFormat] format,
|
95 |
+
CParquetFactoryOptions options
|
96 |
+
)
|
97 |
+
|
98 |
+
@staticmethod
|
99 |
+
CResult[shared_ptr[CDatasetFactory]] MakeFromMetaDataSource "Make"(
|
100 |
+
const CFileSource& metadata_path,
|
101 |
+
const c_string& base_path,
|
102 |
+
shared_ptr[CFileSystem] filesystem,
|
103 |
+
shared_ptr[CParquetFileFormat] format,
|
104 |
+
CParquetFactoryOptions options
|
105 |
+
)
|
env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libarrow_feather.pxd
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language = c++
|
19 |
+
|
20 |
+
from pyarrow.includes.libarrow cimport (CCompressionType, CStatus, CTable,
|
21 |
+
COutputStream, CResult, shared_ptr,
|
22 |
+
vector, CRandomAccessFile, CSchema,
|
23 |
+
c_string, CIpcReadOptions)
|
24 |
+
|
25 |
+
|
26 |
+
cdef extern from "arrow/ipc/api.h" namespace "arrow::ipc" nogil:
|
27 |
+
int kFeatherV1Version" arrow::ipc::feather::kFeatherV1Version"
|
28 |
+
int kFeatherV2Version" arrow::ipc::feather::kFeatherV2Version"
|
29 |
+
|
30 |
+
cdef cppclass CFeatherProperties" arrow::ipc::feather::WriteProperties":
|
31 |
+
int version
|
32 |
+
int chunksize
|
33 |
+
CCompressionType compression
|
34 |
+
int compression_level
|
35 |
+
|
36 |
+
CStatus WriteFeather" arrow::ipc::feather::WriteTable" \
|
37 |
+
(const CTable& table, COutputStream* out,
|
38 |
+
CFeatherProperties properties)
|
39 |
+
|
40 |
+
cdef cppclass CFeatherReader" arrow::ipc::feather::Reader":
|
41 |
+
@staticmethod
|
42 |
+
CResult[shared_ptr[CFeatherReader]] Open(
|
43 |
+
const shared_ptr[CRandomAccessFile]& file,
|
44 |
+
const CIpcReadOptions& options)
|
45 |
+
int version()
|
46 |
+
shared_ptr[CSchema] schema()
|
47 |
+
|
48 |
+
CStatus Read(shared_ptr[CTable]* out)
|
49 |
+
CStatus Read(const vector[int] indices, shared_ptr[CTable]* out)
|
50 |
+
CStatus Read(const vector[c_string] names, shared_ptr[CTable]* out)
|
env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libarrow_flight.pxd
ADDED
@@ -0,0 +1,622 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language = c++
|
19 |
+
|
20 |
+
from pyarrow.includes.common cimport *
|
21 |
+
from pyarrow.includes.libarrow cimport *
|
22 |
+
|
23 |
+
|
24 |
+
cdef extern from "arrow/flight/api.h" namespace "arrow" nogil:
|
25 |
+
cdef char* CTracingServerMiddlewareName\
|
26 |
+
" arrow::flight::TracingServerMiddleware::kMiddlewareName"
|
27 |
+
|
28 |
+
cdef cppclass CActionType" arrow::flight::ActionType":
|
29 |
+
c_string type
|
30 |
+
c_string description
|
31 |
+
bint operator==(CActionType)
|
32 |
+
CResult[c_string] SerializeToString()
|
33 |
+
|
34 |
+
@staticmethod
|
35 |
+
CResult[CActionType] Deserialize(const c_string& serialized)
|
36 |
+
|
37 |
+
cdef cppclass CAction" arrow::flight::Action":
|
38 |
+
c_string type
|
39 |
+
shared_ptr[CBuffer] body
|
40 |
+
bint operator==(CAction)
|
41 |
+
CResult[c_string] SerializeToString()
|
42 |
+
c_string ToString()
|
43 |
+
|
44 |
+
@staticmethod
|
45 |
+
CResult[CAction] Deserialize(const c_string& serialized)
|
46 |
+
|
47 |
+
cdef cppclass CFlightResult" arrow::flight::Result":
|
48 |
+
CFlightResult()
|
49 |
+
CFlightResult(CFlightResult)
|
50 |
+
shared_ptr[CBuffer] body
|
51 |
+
bint operator==(CFlightResult)
|
52 |
+
CResult[c_string] SerializeToString()
|
53 |
+
c_string ToString()
|
54 |
+
|
55 |
+
@staticmethod
|
56 |
+
CResult[CFlightResult] Deserialize(const c_string& serialized)
|
57 |
+
|
58 |
+
cdef cppclass CBasicAuth" arrow::flight::BasicAuth":
|
59 |
+
CBasicAuth()
|
60 |
+
CBasicAuth(CBuffer)
|
61 |
+
CBasicAuth(CBasicAuth)
|
62 |
+
c_string username
|
63 |
+
c_string password
|
64 |
+
bint operator==(CBasicAuth)
|
65 |
+
CResult[c_string] SerializeToString()
|
66 |
+
c_string ToString()
|
67 |
+
|
68 |
+
@staticmethod
|
69 |
+
CResult[CBasicAuth] Deserialize(const c_string& serialized)
|
70 |
+
|
71 |
+
cdef cppclass CResultStream" arrow::flight::ResultStream":
|
72 |
+
CResult[unique_ptr[CFlightResult]] Next()
|
73 |
+
|
74 |
+
cdef cppclass CDescriptorType \
|
75 |
+
" arrow::flight::FlightDescriptor::DescriptorType":
|
76 |
+
bint operator==(CDescriptorType)
|
77 |
+
|
78 |
+
CDescriptorType CDescriptorTypeUnknown\
|
79 |
+
" arrow::flight::FlightDescriptor::UNKNOWN"
|
80 |
+
CDescriptorType CDescriptorTypePath\
|
81 |
+
" arrow::flight::FlightDescriptor::PATH"
|
82 |
+
CDescriptorType CDescriptorTypeCmd\
|
83 |
+
" arrow::flight::FlightDescriptor::CMD"
|
84 |
+
|
85 |
+
cdef cppclass CFlightDescriptor" arrow::flight::FlightDescriptor":
|
86 |
+
CDescriptorType type
|
87 |
+
c_string cmd
|
88 |
+
vector[c_string] path
|
89 |
+
bint operator==(CFlightDescriptor)
|
90 |
+
CResult[c_string] SerializeToString()
|
91 |
+
c_string ToString()
|
92 |
+
|
93 |
+
@staticmethod
|
94 |
+
CResult[CFlightDescriptor] Deserialize(const c_string& serialized)
|
95 |
+
|
96 |
+
cdef cppclass CTicket" arrow::flight::Ticket":
|
97 |
+
CTicket()
|
98 |
+
c_string ticket
|
99 |
+
bint operator==(CTicket)
|
100 |
+
CResult[c_string] SerializeToString()
|
101 |
+
c_string ToString()
|
102 |
+
|
103 |
+
@staticmethod
|
104 |
+
CResult[CTicket] Deserialize(const c_string& serialized)
|
105 |
+
|
106 |
+
cdef cppclass CCriteria" arrow::flight::Criteria":
|
107 |
+
CCriteria()
|
108 |
+
c_string expression
|
109 |
+
bint operator==(CCriteria)
|
110 |
+
CResult[c_string] SerializeToString()
|
111 |
+
|
112 |
+
@staticmethod
|
113 |
+
CResult[CCriteria] Deserialize(const c_string& serialized)
|
114 |
+
|
115 |
+
cdef cppclass CLocation" arrow::flight::Location":
|
116 |
+
CLocation()
|
117 |
+
c_string ToString()
|
118 |
+
c_bool Equals(const CLocation& other)
|
119 |
+
|
120 |
+
@staticmethod
|
121 |
+
CResult[CLocation] Parse(const c_string& uri_string)
|
122 |
+
|
123 |
+
@staticmethod
|
124 |
+
CResult[CLocation] ForGrpcTcp(const c_string& host, int port)
|
125 |
+
|
126 |
+
@staticmethod
|
127 |
+
CResult[CLocation] ForGrpcTls(const c_string& host, int port)
|
128 |
+
|
129 |
+
@staticmethod
|
130 |
+
CResult[CLocation] ForGrpcUnix(const c_string& path)
|
131 |
+
|
132 |
+
cdef cppclass CFlightEndpoint" arrow::flight::FlightEndpoint":
|
133 |
+
CFlightEndpoint()
|
134 |
+
|
135 |
+
CTicket ticket
|
136 |
+
vector[CLocation] locations
|
137 |
+
|
138 |
+
bint operator==(CFlightEndpoint)
|
139 |
+
CResult[c_string] SerializeToString()
|
140 |
+
c_string ToString()
|
141 |
+
|
142 |
+
@staticmethod
|
143 |
+
CResult[CFlightEndpoint] Deserialize(const c_string& serialized)
|
144 |
+
|
145 |
+
cdef cppclass CFlightInfo" arrow::flight::FlightInfo":
|
146 |
+
CFlightInfo(CFlightInfo info)
|
147 |
+
int64_t total_records()
|
148 |
+
int64_t total_bytes()
|
149 |
+
CResult[shared_ptr[CSchema]] GetSchema(CDictionaryMemo* memo)
|
150 |
+
CFlightDescriptor& descriptor()
|
151 |
+
const vector[CFlightEndpoint]& endpoints()
|
152 |
+
CResult[c_string] SerializeToString()
|
153 |
+
c_string ToString()
|
154 |
+
bint operator==(CFlightInfo)
|
155 |
+
|
156 |
+
@staticmethod
|
157 |
+
CResult[unique_ptr[CFlightInfo]] Deserialize(
|
158 |
+
const c_string& serialized)
|
159 |
+
|
160 |
+
cdef cppclass CSchemaResult" arrow::flight::SchemaResult":
|
161 |
+
CSchemaResult()
|
162 |
+
CSchemaResult(CSchemaResult result)
|
163 |
+
CResult[shared_ptr[CSchema]] GetSchema(CDictionaryMemo* memo)
|
164 |
+
bint operator==(CSchemaResult)
|
165 |
+
CResult[c_string] SerializeToString()
|
166 |
+
c_string ToString()
|
167 |
+
|
168 |
+
@staticmethod
|
169 |
+
CResult[CSchemaResult] Deserialize(const c_string& serialized)
|
170 |
+
|
171 |
+
cdef cppclass CFlightListing" arrow::flight::FlightListing":
|
172 |
+
CResult[unique_ptr[CFlightInfo]] Next()
|
173 |
+
|
174 |
+
cdef cppclass CSimpleFlightListing" arrow::flight::SimpleFlightListing":
|
175 |
+
# This doesn't work with Cython >= 3
|
176 |
+
# CSimpleFlightListing(vector[CFlightInfo]&& info)
|
177 |
+
CSimpleFlightListing(const vector[CFlightInfo]& info)
|
178 |
+
|
179 |
+
cdef cppclass CFlightPayload" arrow::flight::FlightPayload":
|
180 |
+
shared_ptr[CBuffer] descriptor
|
181 |
+
shared_ptr[CBuffer] app_metadata
|
182 |
+
CIpcPayload ipc_message
|
183 |
+
|
184 |
+
cdef cppclass CFlightDataStream" arrow::flight::FlightDataStream":
|
185 |
+
shared_ptr[CSchema] schema()
|
186 |
+
CResult[CFlightPayload] Next()
|
187 |
+
|
188 |
+
cdef cppclass CFlightStreamChunk" arrow::flight::FlightStreamChunk":
|
189 |
+
CFlightStreamChunk()
|
190 |
+
shared_ptr[CRecordBatch] data
|
191 |
+
shared_ptr[CBuffer] app_metadata
|
192 |
+
|
193 |
+
cdef cppclass CMetadataRecordBatchReader \
|
194 |
+
" arrow::flight::MetadataRecordBatchReader":
|
195 |
+
CResult[shared_ptr[CSchema]] GetSchema()
|
196 |
+
CResult[CFlightStreamChunk] Next()
|
197 |
+
CResult[shared_ptr[CTable]] ToTable()
|
198 |
+
|
199 |
+
CResult[shared_ptr[CRecordBatchReader]] MakeRecordBatchReader\
|
200 |
+
" arrow::flight::MakeRecordBatchReader"(
|
201 |
+
shared_ptr[CMetadataRecordBatchReader])
|
202 |
+
|
203 |
+
cdef cppclass CMetadataRecordBatchWriter \
|
204 |
+
" arrow::flight::MetadataRecordBatchWriter"(CRecordBatchWriter):
|
205 |
+
CStatus Begin(shared_ptr[CSchema] schema,
|
206 |
+
const CIpcWriteOptions& options)
|
207 |
+
CStatus WriteMetadata(shared_ptr[CBuffer] app_metadata)
|
208 |
+
CStatus WriteWithMetadata(const CRecordBatch& batch,
|
209 |
+
shared_ptr[CBuffer] app_metadata)
|
210 |
+
|
211 |
+
cdef cppclass CFlightStreamReader \
|
212 |
+
" arrow::flight::FlightStreamReader"(CMetadataRecordBatchReader):
|
213 |
+
void Cancel()
|
214 |
+
CResult[shared_ptr[CTable]] ToTableWithStopToken" ToTable"\
|
215 |
+
(const CStopToken& stop_token)
|
216 |
+
|
217 |
+
cdef cppclass CFlightMessageReader \
|
218 |
+
" arrow::flight::FlightMessageReader"(CMetadataRecordBatchReader):
|
219 |
+
CFlightDescriptor& descriptor()
|
220 |
+
|
221 |
+
cdef cppclass CFlightMessageWriter \
|
222 |
+
" arrow::flight::FlightMessageWriter"(CMetadataRecordBatchWriter):
|
223 |
+
pass
|
224 |
+
|
225 |
+
cdef cppclass CFlightStreamWriter \
|
226 |
+
" arrow::flight::FlightStreamWriter"(CMetadataRecordBatchWriter):
|
227 |
+
CStatus DoneWriting()
|
228 |
+
|
229 |
+
cdef cppclass CRecordBatchStream \
|
230 |
+
" arrow::flight::RecordBatchStream"(CFlightDataStream):
|
231 |
+
CRecordBatchStream(shared_ptr[CRecordBatchReader]& reader,
|
232 |
+
const CIpcWriteOptions& options)
|
233 |
+
|
234 |
+
cdef cppclass CFlightMetadataReader" arrow::flight::FlightMetadataReader":
|
235 |
+
CStatus ReadMetadata(shared_ptr[CBuffer]* out)
|
236 |
+
|
237 |
+
cdef cppclass CFlightMetadataWriter" arrow::flight::FlightMetadataWriter":
|
238 |
+
CStatus WriteMetadata(const CBuffer& message)
|
239 |
+
|
240 |
+
cdef cppclass CServerAuthReader" arrow::flight::ServerAuthReader":
|
241 |
+
CStatus Read(c_string* token)
|
242 |
+
|
243 |
+
cdef cppclass CServerAuthSender" arrow::flight::ServerAuthSender":
|
244 |
+
CStatus Write(c_string& token)
|
245 |
+
|
246 |
+
cdef cppclass CClientAuthReader" arrow::flight::ClientAuthReader":
|
247 |
+
CStatus Read(c_string* token)
|
248 |
+
|
249 |
+
cdef cppclass CClientAuthSender" arrow::flight::ClientAuthSender":
|
250 |
+
CStatus Write(c_string& token)
|
251 |
+
|
252 |
+
cdef cppclass CServerAuthHandler" arrow::flight::ServerAuthHandler":
|
253 |
+
pass
|
254 |
+
|
255 |
+
cdef cppclass CClientAuthHandler" arrow::flight::ClientAuthHandler":
|
256 |
+
pass
|
257 |
+
|
258 |
+
cdef cppclass CServerCallContext" arrow::flight::ServerCallContext":
|
259 |
+
c_string& peer_identity()
|
260 |
+
c_string& peer()
|
261 |
+
c_bool is_cancelled()
|
262 |
+
void AddHeader(const c_string& key, const c_string& value)
|
263 |
+
void AddTrailer(const c_string& key, const c_string& value)
|
264 |
+
CServerMiddleware* GetMiddleware(const c_string& key)
|
265 |
+
|
266 |
+
cdef cppclass CTimeoutDuration" arrow::flight::TimeoutDuration":
|
267 |
+
CTimeoutDuration(double)
|
268 |
+
|
269 |
+
cdef cppclass CFlightCallOptions" arrow::flight::FlightCallOptions":
|
270 |
+
CFlightCallOptions()
|
271 |
+
CTimeoutDuration timeout
|
272 |
+
CIpcWriteOptions write_options
|
273 |
+
CIpcReadOptions read_options
|
274 |
+
vector[pair[c_string, c_string]] headers
|
275 |
+
CStopToken stop_token
|
276 |
+
|
277 |
+
cdef cppclass CCertKeyPair" arrow::flight::CertKeyPair":
|
278 |
+
CCertKeyPair()
|
279 |
+
c_string pem_cert
|
280 |
+
c_string pem_key
|
281 |
+
|
282 |
+
cdef cppclass CFlightMethod" arrow::flight::FlightMethod":
|
283 |
+
bint operator==(CFlightMethod)
|
284 |
+
|
285 |
+
CFlightMethod CFlightMethodInvalid\
|
286 |
+
" arrow::flight::FlightMethod::Invalid"
|
287 |
+
CFlightMethod CFlightMethodHandshake\
|
288 |
+
" arrow::flight::FlightMethod::Handshake"
|
289 |
+
CFlightMethod CFlightMethodListFlights\
|
290 |
+
" arrow::flight::FlightMethod::ListFlights"
|
291 |
+
CFlightMethod CFlightMethodGetFlightInfo\
|
292 |
+
" arrow::flight::FlightMethod::GetFlightInfo"
|
293 |
+
CFlightMethod CFlightMethodGetSchema\
|
294 |
+
" arrow::flight::FlightMethod::GetSchema"
|
295 |
+
CFlightMethod CFlightMethodDoGet\
|
296 |
+
" arrow::flight::FlightMethod::DoGet"
|
297 |
+
CFlightMethod CFlightMethodDoPut\
|
298 |
+
" arrow::flight::FlightMethod::DoPut"
|
299 |
+
CFlightMethod CFlightMethodDoAction\
|
300 |
+
" arrow::flight::FlightMethod::DoAction"
|
301 |
+
CFlightMethod CFlightMethodListActions\
|
302 |
+
" arrow::flight::FlightMethod::ListActions"
|
303 |
+
CFlightMethod CFlightMethodDoExchange\
|
304 |
+
" arrow::flight::FlightMethod::DoExchange"
|
305 |
+
|
306 |
+
cdef cppclass CCallInfo" arrow::flight::CallInfo":
|
307 |
+
CFlightMethod method
|
308 |
+
|
309 |
+
# This is really std::unordered_multimap, but Cython has no
|
310 |
+
# bindings for it, so treat it as an opaque class and bind the
|
311 |
+
# methods we need
|
312 |
+
cdef cppclass CCallHeaders" arrow::flight::CallHeaders":
|
313 |
+
cppclass const_iterator:
|
314 |
+
pair[c_string, c_string] operator*()
|
315 |
+
# For Cython < 3
|
316 |
+
const_iterator operator++()
|
317 |
+
# For Cython >= 3
|
318 |
+
const_iterator operator++(int)
|
319 |
+
bint operator==(const_iterator)
|
320 |
+
bint operator!=(const_iterator)
|
321 |
+
const_iterator cbegin()
|
322 |
+
const_iterator cend()
|
323 |
+
|
324 |
+
cdef cppclass CAddCallHeaders" arrow::flight::AddCallHeaders":
|
325 |
+
void AddHeader(const c_string& key, const c_string& value)
|
326 |
+
|
327 |
+
cdef cppclass CServerMiddleware" arrow::flight::ServerMiddleware":
|
328 |
+
c_string name()
|
329 |
+
|
330 |
+
cdef cppclass CServerMiddlewareFactory\
|
331 |
+
" arrow::flight::ServerMiddlewareFactory":
|
332 |
+
pass
|
333 |
+
|
334 |
+
cdef cppclass CClientMiddleware" arrow::flight::ClientMiddleware":
|
335 |
+
pass
|
336 |
+
|
337 |
+
cdef cppclass CClientMiddlewareFactory\
|
338 |
+
" arrow::flight::ClientMiddlewareFactory":
|
339 |
+
pass
|
340 |
+
|
341 |
+
cpdef cppclass CTracingServerMiddlewareTraceKey\
|
342 |
+
" arrow::flight::TracingServerMiddleware::TraceKey":
|
343 |
+
CTracingServerMiddlewareTraceKey()
|
344 |
+
c_string key
|
345 |
+
c_string value
|
346 |
+
|
347 |
+
cdef cppclass CTracingServerMiddleware\
|
348 |
+
" arrow::flight::TracingServerMiddleware"(CServerMiddleware):
|
349 |
+
vector[CTracingServerMiddlewareTraceKey] GetTraceContext()
|
350 |
+
|
351 |
+
cdef shared_ptr[CServerMiddlewareFactory] \
|
352 |
+
MakeTracingServerMiddlewareFactory\
|
353 |
+
" arrow::flight::MakeTracingServerMiddlewareFactory"()
|
354 |
+
|
355 |
+
cdef cppclass CFlightServerOptions" arrow::flight::FlightServerOptions":
|
356 |
+
CFlightServerOptions(const CLocation& location)
|
357 |
+
CLocation location
|
358 |
+
unique_ptr[CServerAuthHandler] auth_handler
|
359 |
+
vector[CCertKeyPair] tls_certificates
|
360 |
+
c_bool verify_client
|
361 |
+
c_string root_certificates
|
362 |
+
vector[pair[c_string, shared_ptr[CServerMiddlewareFactory]]] middleware
|
363 |
+
|
364 |
+
cdef cppclass CFlightClientOptions" arrow::flight::FlightClientOptions":
|
365 |
+
c_string tls_root_certs
|
366 |
+
c_string cert_chain
|
367 |
+
c_string private_key
|
368 |
+
c_string override_hostname
|
369 |
+
vector[shared_ptr[CClientMiddlewareFactory]] middleware
|
370 |
+
int64_t write_size_limit_bytes
|
371 |
+
vector[pair[c_string, CIntStringVariant]] generic_options
|
372 |
+
c_bool disable_server_verification
|
373 |
+
|
374 |
+
@staticmethod
|
375 |
+
CFlightClientOptions Defaults()
|
376 |
+
|
377 |
+
cdef cppclass CDoPutResult" arrow::flight::FlightClient::DoPutResult":
|
378 |
+
unique_ptr[CFlightStreamWriter] writer
|
379 |
+
unique_ptr[CFlightMetadataReader] reader
|
380 |
+
|
381 |
+
cdef cppclass CDoExchangeResult" arrow::flight::FlightClient::DoExchangeResult":
|
382 |
+
unique_ptr[CFlightStreamWriter] writer
|
383 |
+
unique_ptr[CFlightStreamReader] reader
|
384 |
+
|
385 |
+
cdef cppclass CFlightClient" arrow::flight::FlightClient":
|
386 |
+
@staticmethod
|
387 |
+
CResult[unique_ptr[CFlightClient]] Connect(const CLocation& location,
|
388 |
+
const CFlightClientOptions& options)
|
389 |
+
|
390 |
+
c_bool supports_async()
|
391 |
+
CStatus CheckAsyncSupport()
|
392 |
+
|
393 |
+
CStatus Authenticate(CFlightCallOptions& options,
|
394 |
+
unique_ptr[CClientAuthHandler] auth_handler)
|
395 |
+
|
396 |
+
CResult[pair[c_string, c_string]] AuthenticateBasicToken(
|
397 |
+
CFlightCallOptions& options,
|
398 |
+
const c_string& username,
|
399 |
+
const c_string& password)
|
400 |
+
|
401 |
+
CResult[unique_ptr[CResultStream]] DoAction(CFlightCallOptions& options, CAction& action)
|
402 |
+
CResult[vector[CActionType]] ListActions(CFlightCallOptions& options)
|
403 |
+
|
404 |
+
CResult[unique_ptr[CFlightListing]] ListFlights(CFlightCallOptions& options, CCriteria criteria)
|
405 |
+
CResult[unique_ptr[CFlightInfo]] GetFlightInfo(CFlightCallOptions& options,
|
406 |
+
CFlightDescriptor& descriptor)
|
407 |
+
CFuture[CFlightInfo] GetFlightInfoAsync(CFlightCallOptions& options,
|
408 |
+
CFlightDescriptor& descriptor)
|
409 |
+
CResult[unique_ptr[CSchemaResult]] GetSchema(CFlightCallOptions& options,
|
410 |
+
CFlightDescriptor& descriptor)
|
411 |
+
CResult[unique_ptr[CFlightStreamReader]] DoGet(CFlightCallOptions& options, CTicket& ticket)
|
412 |
+
CResult[CDoPutResult] DoPut(CFlightCallOptions& options,
|
413 |
+
CFlightDescriptor& descriptor,
|
414 |
+
shared_ptr[CSchema]& schema)
|
415 |
+
CResult[CDoExchangeResult] DoExchange(CFlightCallOptions& options,
|
416 |
+
CFlightDescriptor& descriptor)
|
417 |
+
CStatus Close()
|
418 |
+
|
419 |
+
cdef cppclass CFlightStatusCode" arrow::flight::FlightStatusCode":
|
420 |
+
bint operator==(CFlightStatusCode)
|
421 |
+
|
422 |
+
CFlightStatusCode CFlightStatusInternal \
|
423 |
+
" arrow::flight::FlightStatusCode::Internal"
|
424 |
+
CFlightStatusCode CFlightStatusTimedOut \
|
425 |
+
" arrow::flight::FlightStatusCode::TimedOut"
|
426 |
+
CFlightStatusCode CFlightStatusCancelled \
|
427 |
+
" arrow::flight::FlightStatusCode::Cancelled"
|
428 |
+
CFlightStatusCode CFlightStatusUnauthenticated \
|
429 |
+
" arrow::flight::FlightStatusCode::Unauthenticated"
|
430 |
+
CFlightStatusCode CFlightStatusUnauthorized \
|
431 |
+
" arrow::flight::FlightStatusCode::Unauthorized"
|
432 |
+
CFlightStatusCode CFlightStatusUnavailable \
|
433 |
+
" arrow::flight::FlightStatusCode::Unavailable"
|
434 |
+
CFlightStatusCode CFlightStatusFailed \
|
435 |
+
" arrow::flight::FlightStatusCode::Failed"
|
436 |
+
|
437 |
+
cdef cppclass FlightStatusDetail" arrow::flight::FlightStatusDetail":
|
438 |
+
CFlightStatusCode code()
|
439 |
+
c_string extra_info()
|
440 |
+
|
441 |
+
@staticmethod
|
442 |
+
shared_ptr[FlightStatusDetail] UnwrapStatus(const CStatus& status)
|
443 |
+
|
444 |
+
cdef cppclass FlightWriteSizeStatusDetail\
|
445 |
+
" arrow::flight::FlightWriteSizeStatusDetail":
|
446 |
+
int64_t limit()
|
447 |
+
int64_t actual()
|
448 |
+
|
449 |
+
@staticmethod
|
450 |
+
shared_ptr[FlightWriteSizeStatusDetail] UnwrapStatus(
|
451 |
+
const CStatus& status)
|
452 |
+
|
453 |
+
cdef CStatus MakeFlightError" arrow::flight::MakeFlightError" \
|
454 |
+
(CFlightStatusCode code, const c_string& message)
|
455 |
+
|
456 |
+
cdef CStatus MakeFlightError" arrow::flight::MakeFlightError" \
|
457 |
+
(CFlightStatusCode code,
|
458 |
+
const c_string& message,
|
459 |
+
const c_string& extra_info)
|
460 |
+
|
461 |
+
# Callbacks for implementing Flight servers
|
462 |
+
# Use typedef to emulate syntax for std::function<void(..)>
|
463 |
+
ctypedef CStatus cb_list_flights(object, const CServerCallContext&,
|
464 |
+
const CCriteria*,
|
465 |
+
unique_ptr[CFlightListing]*)
|
466 |
+
ctypedef CStatus cb_get_flight_info(object, const CServerCallContext&,
|
467 |
+
const CFlightDescriptor&,
|
468 |
+
unique_ptr[CFlightInfo]*)
|
469 |
+
ctypedef CStatus cb_get_schema(object, const CServerCallContext&,
|
470 |
+
const CFlightDescriptor&,
|
471 |
+
unique_ptr[CSchemaResult]*)
|
472 |
+
ctypedef CStatus cb_do_put(object, const CServerCallContext&,
|
473 |
+
unique_ptr[CFlightMessageReader],
|
474 |
+
unique_ptr[CFlightMetadataWriter])
|
475 |
+
ctypedef CStatus cb_do_get(object, const CServerCallContext&,
|
476 |
+
const CTicket&,
|
477 |
+
unique_ptr[CFlightDataStream]*)
|
478 |
+
ctypedef CStatus cb_do_exchange(object, const CServerCallContext&,
|
479 |
+
unique_ptr[CFlightMessageReader],
|
480 |
+
unique_ptr[CFlightMessageWriter])
|
481 |
+
ctypedef CStatus cb_do_action(object, const CServerCallContext&,
|
482 |
+
const CAction&,
|
483 |
+
unique_ptr[CResultStream]*)
|
484 |
+
ctypedef CStatus cb_list_actions(object, const CServerCallContext&,
|
485 |
+
vector[CActionType]*)
|
486 |
+
ctypedef CStatus cb_result_next(object, unique_ptr[CFlightResult]*)
|
487 |
+
ctypedef CStatus cb_data_stream_next(object, CFlightPayload*)
|
488 |
+
ctypedef CStatus cb_server_authenticate(object, CServerAuthSender*,
|
489 |
+
CServerAuthReader*)
|
490 |
+
ctypedef CStatus cb_is_valid(object, const c_string&, c_string*)
|
491 |
+
ctypedef CStatus cb_client_authenticate(object, CClientAuthSender*,
|
492 |
+
CClientAuthReader*)
|
493 |
+
ctypedef CStatus cb_get_token(object, c_string*)
|
494 |
+
|
495 |
+
ctypedef CStatus cb_middleware_sending_headers(object, CAddCallHeaders*)
|
496 |
+
ctypedef CStatus cb_middleware_call_completed(object, const CStatus&)
|
497 |
+
ctypedef CStatus cb_client_middleware_received_headers(
|
498 |
+
object, const CCallHeaders&)
|
499 |
+
ctypedef CStatus cb_server_middleware_start_call(
|
500 |
+
object,
|
501 |
+
const CCallInfo&,
|
502 |
+
const CCallHeaders&,
|
503 |
+
shared_ptr[CServerMiddleware]*)
|
504 |
+
ctypedef CStatus cb_client_middleware_start_call(
|
505 |
+
object,
|
506 |
+
const CCallInfo&,
|
507 |
+
unique_ptr[CClientMiddleware]*)
|
508 |
+
|
509 |
+
cdef extern from "arrow/python/flight.h" namespace "arrow::py::flight" nogil:
|
510 |
+
cdef char* CPyServerMiddlewareName\
|
511 |
+
" arrow::py::flight::kPyServerMiddlewareName"
|
512 |
+
|
513 |
+
cdef cppclass PyFlightServerVtable:
|
514 |
+
PyFlightServerVtable()
|
515 |
+
function[cb_list_flights] list_flights
|
516 |
+
function[cb_get_flight_info] get_flight_info
|
517 |
+
function[cb_get_schema] get_schema
|
518 |
+
function[cb_do_put] do_put
|
519 |
+
function[cb_do_get] do_get
|
520 |
+
function[cb_do_exchange] do_exchange
|
521 |
+
function[cb_do_action] do_action
|
522 |
+
function[cb_list_actions] list_actions
|
523 |
+
|
524 |
+
cdef cppclass PyServerAuthHandlerVtable:
|
525 |
+
PyServerAuthHandlerVtable()
|
526 |
+
function[cb_server_authenticate] authenticate
|
527 |
+
function[cb_is_valid] is_valid
|
528 |
+
|
529 |
+
cdef cppclass PyClientAuthHandlerVtable:
|
530 |
+
PyClientAuthHandlerVtable()
|
531 |
+
function[cb_client_authenticate] authenticate
|
532 |
+
function[cb_get_token] get_token
|
533 |
+
|
534 |
+
cdef cppclass PyFlightServer:
|
535 |
+
PyFlightServer(object server, PyFlightServerVtable vtable)
|
536 |
+
|
537 |
+
CStatus Init(CFlightServerOptions& options)
|
538 |
+
int port()
|
539 |
+
CStatus ServeWithSignals() except *
|
540 |
+
CStatus Shutdown()
|
541 |
+
CStatus Wait()
|
542 |
+
|
543 |
+
cdef cppclass PyServerAuthHandler\
|
544 |
+
" arrow::py::flight::PyServerAuthHandler"(CServerAuthHandler):
|
545 |
+
PyServerAuthHandler(object handler, PyServerAuthHandlerVtable vtable)
|
546 |
+
|
547 |
+
cdef cppclass PyClientAuthHandler\
|
548 |
+
" arrow::py::flight::PyClientAuthHandler"(CClientAuthHandler):
|
549 |
+
PyClientAuthHandler(object handler, PyClientAuthHandlerVtable vtable)
|
550 |
+
|
551 |
+
cdef cppclass CPyFlightResultStream\
|
552 |
+
" arrow::py::flight::PyFlightResultStream"(CResultStream):
|
553 |
+
CPyFlightResultStream(object generator,
|
554 |
+
function[cb_result_next] callback)
|
555 |
+
|
556 |
+
cdef cppclass CPyFlightDataStream\
|
557 |
+
" arrow::py::flight::PyFlightDataStream"(CFlightDataStream):
|
558 |
+
CPyFlightDataStream(object data_source,
|
559 |
+
unique_ptr[CFlightDataStream] stream)
|
560 |
+
|
561 |
+
cdef cppclass CPyGeneratorFlightDataStream\
|
562 |
+
" arrow::py::flight::PyGeneratorFlightDataStream"\
|
563 |
+
(CFlightDataStream):
|
564 |
+
CPyGeneratorFlightDataStream(object generator,
|
565 |
+
shared_ptr[CSchema] schema,
|
566 |
+
function[cb_data_stream_next] callback,
|
567 |
+
const CIpcWriteOptions& options)
|
568 |
+
|
569 |
+
cdef cppclass PyServerMiddlewareVtable\
|
570 |
+
" arrow::py::flight::PyServerMiddleware::Vtable":
|
571 |
+
PyServerMiddlewareVtable()
|
572 |
+
function[cb_middleware_sending_headers] sending_headers
|
573 |
+
function[cb_middleware_call_completed] call_completed
|
574 |
+
|
575 |
+
cdef cppclass PyClientMiddlewareVtable\
|
576 |
+
" arrow::py::flight::PyClientMiddleware::Vtable":
|
577 |
+
PyClientMiddlewareVtable()
|
578 |
+
function[cb_middleware_sending_headers] sending_headers
|
579 |
+
function[cb_client_middleware_received_headers] received_headers
|
580 |
+
function[cb_middleware_call_completed] call_completed
|
581 |
+
|
582 |
+
cdef cppclass CPyServerMiddleware\
|
583 |
+
" arrow::py::flight::PyServerMiddleware"(CServerMiddleware):
|
584 |
+
CPyServerMiddleware(object middleware, PyServerMiddlewareVtable vtable)
|
585 |
+
void* py_object()
|
586 |
+
|
587 |
+
cdef cppclass CPyServerMiddlewareFactory\
|
588 |
+
" arrow::py::flight::PyServerMiddlewareFactory"\
|
589 |
+
(CServerMiddlewareFactory):
|
590 |
+
CPyServerMiddlewareFactory(
|
591 |
+
object factory,
|
592 |
+
function[cb_server_middleware_start_call] start_call)
|
593 |
+
|
594 |
+
cdef cppclass CPyClientMiddleware\
|
595 |
+
" arrow::py::flight::PyClientMiddleware"(CClientMiddleware):
|
596 |
+
CPyClientMiddleware(object middleware, PyClientMiddlewareVtable vtable)
|
597 |
+
|
598 |
+
cdef cppclass CPyClientMiddlewareFactory\
|
599 |
+
" arrow::py::flight::PyClientMiddlewareFactory"\
|
600 |
+
(CClientMiddlewareFactory):
|
601 |
+
CPyClientMiddlewareFactory(
|
602 |
+
object factory,
|
603 |
+
function[cb_client_middleware_start_call] start_call)
|
604 |
+
|
605 |
+
cdef CStatus CreateFlightInfo" arrow::py::flight::CreateFlightInfo"(
|
606 |
+
shared_ptr[CSchema] schema,
|
607 |
+
CFlightDescriptor& descriptor,
|
608 |
+
vector[CFlightEndpoint] endpoints,
|
609 |
+
int64_t total_records,
|
610 |
+
int64_t total_bytes,
|
611 |
+
unique_ptr[CFlightInfo]* out)
|
612 |
+
|
613 |
+
cdef CStatus CreateSchemaResult" arrow::py::flight::CreateSchemaResult"(
|
614 |
+
shared_ptr[CSchema] schema,
|
615 |
+
unique_ptr[CSchemaResult]* out)
|
616 |
+
|
617 |
+
|
618 |
+
cdef extern from "<variant>" namespace "std" nogil:
|
619 |
+
cdef cppclass CIntStringVariant" std::variant<int, std::string>":
|
620 |
+
CIntStringVariant()
|
621 |
+
CIntStringVariant(int)
|
622 |
+
CIntStringVariant(c_string)
|
env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libarrow_fs.pxd
ADDED
@@ -0,0 +1,349 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language = c++
|
19 |
+
|
20 |
+
from pyarrow.includes.common cimport *
|
21 |
+
from pyarrow.includes.libarrow cimport *
|
22 |
+
from pyarrow.includes.libarrow_python cimport CTimePoint
|
23 |
+
|
24 |
+
cdef extern from "arrow/filesystem/api.h" namespace "arrow::fs" nogil:
|
25 |
+
|
26 |
+
ctypedef enum CFileType "arrow::fs::FileType":
|
27 |
+
CFileType_NotFound "arrow::fs::FileType::NotFound"
|
28 |
+
CFileType_Unknown "arrow::fs::FileType::Unknown"
|
29 |
+
CFileType_File "arrow::fs::FileType::File"
|
30 |
+
CFileType_Directory "arrow::fs::FileType::Directory"
|
31 |
+
|
32 |
+
cdef cppclass CFileInfo "arrow::fs::FileInfo":
|
33 |
+
CFileInfo()
|
34 |
+
CFileInfo(CFileInfo)
|
35 |
+
CFileInfo& operator=(CFileInfo)
|
36 |
+
CFileInfo(const CFileInfo&)
|
37 |
+
CFileInfo& operator=(const CFileInfo&)
|
38 |
+
|
39 |
+
CFileType type()
|
40 |
+
void set_type(CFileType type)
|
41 |
+
c_string path()
|
42 |
+
void set_path(const c_string& path)
|
43 |
+
c_string base_name()
|
44 |
+
int64_t size()
|
45 |
+
void set_size(int64_t size)
|
46 |
+
c_string extension()
|
47 |
+
CTimePoint mtime()
|
48 |
+
void set_mtime(CTimePoint mtime)
|
49 |
+
|
50 |
+
cdef cppclass CFileSelector "arrow::fs::FileSelector":
|
51 |
+
CFileSelector()
|
52 |
+
c_string base_dir
|
53 |
+
c_bool allow_not_found
|
54 |
+
c_bool recursive
|
55 |
+
|
56 |
+
cdef cppclass CFileLocator "arrow::fs::FileLocator":
|
57 |
+
shared_ptr[CFileSystem] filesystem
|
58 |
+
c_string path
|
59 |
+
|
60 |
+
cdef cppclass CFileSystem "arrow::fs::FileSystem":
|
61 |
+
shared_ptr[CFileSystem] shared_from_this()
|
62 |
+
c_string type_name() const
|
63 |
+
CResult[c_string] NormalizePath(c_string path)
|
64 |
+
CResult[CFileInfo] GetFileInfo(const c_string& path)
|
65 |
+
CResult[vector[CFileInfo]] GetFileInfo(
|
66 |
+
const vector[c_string]& paths)
|
67 |
+
CResult[vector[CFileInfo]] GetFileInfo(const CFileSelector& select)
|
68 |
+
CStatus CreateDir(const c_string& path, c_bool recursive)
|
69 |
+
CStatus DeleteDir(const c_string& path)
|
70 |
+
CStatus DeleteDirContents(const c_string& path, c_bool missing_dir_ok)
|
71 |
+
CStatus DeleteRootDirContents()
|
72 |
+
CStatus DeleteFile(const c_string& path)
|
73 |
+
CStatus DeleteFiles(const vector[c_string]& paths)
|
74 |
+
CStatus Move(const c_string& src, const c_string& dest)
|
75 |
+
CStatus CopyFile(const c_string& src, const c_string& dest)
|
76 |
+
CResult[shared_ptr[CInputStream]] OpenInputStream(
|
77 |
+
const c_string& path)
|
78 |
+
CResult[shared_ptr[CRandomAccessFile]] OpenInputFile(
|
79 |
+
const c_string& path)
|
80 |
+
CResult[shared_ptr[COutputStream]] OpenOutputStream(
|
81 |
+
const c_string& path, const shared_ptr[const CKeyValueMetadata]&)
|
82 |
+
CResult[shared_ptr[COutputStream]] OpenAppendStream(
|
83 |
+
const c_string& path, const shared_ptr[const CKeyValueMetadata]&)
|
84 |
+
c_bool Equals(const CFileSystem& other)
|
85 |
+
c_bool Equals(shared_ptr[CFileSystem] other)
|
86 |
+
|
87 |
+
CResult[shared_ptr[CFileSystem]] CFileSystemFromUri \
|
88 |
+
"arrow::fs::FileSystemFromUri"(const c_string& uri, c_string* out_path)
|
89 |
+
CResult[shared_ptr[CFileSystem]] CFileSystemFromUriOrPath \
|
90 |
+
"arrow::fs::FileSystemFromUriOrPath"(const c_string& uri,
|
91 |
+
c_string* out_path)
|
92 |
+
|
93 |
+
cdef cppclass CFileSystemGlobalOptions \
|
94 |
+
"arrow::fs::FileSystemGlobalOptions":
|
95 |
+
c_string tls_ca_file_path
|
96 |
+
c_string tls_ca_dir_path
|
97 |
+
|
98 |
+
CStatus CFileSystemsInitialize "arrow::fs::Initialize" \
|
99 |
+
(const CFileSystemGlobalOptions& options)
|
100 |
+
|
101 |
+
cdef cppclass CLocalFileSystemOptions "arrow::fs::LocalFileSystemOptions":
|
102 |
+
c_bool use_mmap
|
103 |
+
|
104 |
+
@staticmethod
|
105 |
+
CLocalFileSystemOptions Defaults()
|
106 |
+
|
107 |
+
c_bool Equals(const CLocalFileSystemOptions& other)
|
108 |
+
|
109 |
+
cdef cppclass CLocalFileSystem "arrow::fs::LocalFileSystem"(CFileSystem):
|
110 |
+
CLocalFileSystem()
|
111 |
+
CLocalFileSystem(CLocalFileSystemOptions)
|
112 |
+
CLocalFileSystemOptions options()
|
113 |
+
|
114 |
+
cdef cppclass CSubTreeFileSystem \
|
115 |
+
"arrow::fs::SubTreeFileSystem"(CFileSystem):
|
116 |
+
CSubTreeFileSystem(const c_string& base_path,
|
117 |
+
shared_ptr[CFileSystem] base_fs)
|
118 |
+
c_string base_path()
|
119 |
+
shared_ptr[CFileSystem] base_fs()
|
120 |
+
|
121 |
+
ctypedef enum CS3LogLevel "arrow::fs::S3LogLevel":
|
122 |
+
CS3LogLevel_Off "arrow::fs::S3LogLevel::Off"
|
123 |
+
CS3LogLevel_Fatal "arrow::fs::S3LogLevel::Fatal"
|
124 |
+
CS3LogLevel_Error "arrow::fs::S3LogLevel::Error"
|
125 |
+
CS3LogLevel_Warn "arrow::fs::S3LogLevel::Warn"
|
126 |
+
CS3LogLevel_Info "arrow::fs::S3LogLevel::Info"
|
127 |
+
CS3LogLevel_Debug "arrow::fs::S3LogLevel::Debug"
|
128 |
+
CS3LogLevel_Trace "arrow::fs::S3LogLevel::Trace"
|
129 |
+
|
130 |
+
cdef struct CS3GlobalOptions "arrow::fs::S3GlobalOptions":
|
131 |
+
CS3LogLevel log_level
|
132 |
+
int num_event_loop_threads
|
133 |
+
|
134 |
+
cdef cppclass CS3ProxyOptions "arrow::fs::S3ProxyOptions":
|
135 |
+
c_string scheme
|
136 |
+
c_string host
|
137 |
+
int port
|
138 |
+
c_string username
|
139 |
+
c_string password
|
140 |
+
c_bool Equals(const CS3ProxyOptions& other)
|
141 |
+
|
142 |
+
@staticmethod
|
143 |
+
CResult[CS3ProxyOptions] FromUriString "FromUri"(
|
144 |
+
const c_string& uri_string)
|
145 |
+
|
146 |
+
ctypedef enum CS3CredentialsKind "arrow::fs::S3CredentialsKind":
|
147 |
+
CS3CredentialsKind_Anonymous "arrow::fs::S3CredentialsKind::Anonymous"
|
148 |
+
CS3CredentialsKind_Default "arrow::fs::S3CredentialsKind::Default"
|
149 |
+
CS3CredentialsKind_Explicit "arrow::fs::S3CredentialsKind::Explicit"
|
150 |
+
CS3CredentialsKind_Role "arrow::fs::S3CredentialsKind::Role"
|
151 |
+
CS3CredentialsKind_WebIdentity \
|
152 |
+
"arrow::fs::S3CredentialsKind::WebIdentity"
|
153 |
+
|
154 |
+
cdef cppclass CS3RetryStrategy "arrow::fs::S3RetryStrategy":
|
155 |
+
@staticmethod
|
156 |
+
shared_ptr[CS3RetryStrategy] GetAwsDefaultRetryStrategy(int64_t max_attempts)
|
157 |
+
|
158 |
+
@staticmethod
|
159 |
+
shared_ptr[CS3RetryStrategy] GetAwsStandardRetryStrategy(int64_t max_attempts)
|
160 |
+
|
161 |
+
cdef cppclass CS3Options "arrow::fs::S3Options":
|
162 |
+
c_string region
|
163 |
+
double connect_timeout
|
164 |
+
double request_timeout
|
165 |
+
c_string endpoint_override
|
166 |
+
c_string scheme
|
167 |
+
c_bool background_writes
|
168 |
+
c_bool allow_bucket_creation
|
169 |
+
c_bool allow_bucket_deletion
|
170 |
+
shared_ptr[const CKeyValueMetadata] default_metadata
|
171 |
+
c_string role_arn
|
172 |
+
c_string session_name
|
173 |
+
c_string external_id
|
174 |
+
int load_frequency
|
175 |
+
CS3ProxyOptions proxy_options
|
176 |
+
CS3CredentialsKind credentials_kind
|
177 |
+
shared_ptr[CS3RetryStrategy] retry_strategy
|
178 |
+
void ConfigureDefaultCredentials()
|
179 |
+
void ConfigureAccessKey(const c_string& access_key,
|
180 |
+
const c_string& secret_key,
|
181 |
+
const c_string& session_token)
|
182 |
+
c_string GetAccessKey()
|
183 |
+
c_string GetSecretKey()
|
184 |
+
c_string GetSessionToken()
|
185 |
+
c_bool Equals(const CS3Options& other)
|
186 |
+
|
187 |
+
@staticmethod
|
188 |
+
CS3Options Defaults()
|
189 |
+
|
190 |
+
@staticmethod
|
191 |
+
CS3Options Anonymous()
|
192 |
+
|
193 |
+
@staticmethod
|
194 |
+
CS3Options FromAccessKey(const c_string& access_key,
|
195 |
+
const c_string& secret_key,
|
196 |
+
const c_string& session_token)
|
197 |
+
|
198 |
+
@staticmethod
|
199 |
+
CS3Options FromAssumeRole(const c_string& role_arn,
|
200 |
+
const c_string& session_name,
|
201 |
+
const c_string& external_id,
|
202 |
+
const int load_frequency)
|
203 |
+
|
204 |
+
cdef cppclass CS3FileSystem "arrow::fs::S3FileSystem"(CFileSystem):
|
205 |
+
@staticmethod
|
206 |
+
CResult[shared_ptr[CS3FileSystem]] Make(const CS3Options& options)
|
207 |
+
CS3Options options()
|
208 |
+
c_string region()
|
209 |
+
|
210 |
+
cdef CStatus CInitializeS3 "arrow::fs::InitializeS3"(
|
211 |
+
const CS3GlobalOptions& options)
|
212 |
+
cdef CStatus CEnsureS3Initialized "arrow::fs::EnsureS3Initialized"()
|
213 |
+
cdef CStatus CFinalizeS3 "arrow::fs::FinalizeS3"()
|
214 |
+
cdef CStatus CEnsureS3Finalized "arrow::fs::EnsureS3Finalized"()
|
215 |
+
|
216 |
+
cdef CResult[c_string] ResolveS3BucketRegion(const c_string& bucket)
|
217 |
+
|
218 |
+
cdef cppclass CGcsCredentials "arrow::fs::GcsCredentials":
|
219 |
+
c_bool anonymous()
|
220 |
+
CTimePoint expiration()
|
221 |
+
c_string access_token()
|
222 |
+
c_string target_service_account()
|
223 |
+
|
224 |
+
cdef cppclass CGcsOptions "arrow::fs::GcsOptions":
|
225 |
+
CGcsCredentials credentials
|
226 |
+
c_string endpoint_override
|
227 |
+
c_string scheme
|
228 |
+
c_string default_bucket_location
|
229 |
+
optional[c_string] project_id
|
230 |
+
optional[double] retry_limit_seconds
|
231 |
+
shared_ptr[const CKeyValueMetadata] default_metadata
|
232 |
+
c_bool Equals(const CS3Options& other)
|
233 |
+
|
234 |
+
@staticmethod
|
235 |
+
CGcsOptions Defaults()
|
236 |
+
|
237 |
+
@staticmethod
|
238 |
+
CGcsOptions Anonymous()
|
239 |
+
|
240 |
+
@staticmethod
|
241 |
+
CGcsOptions FromAccessToken(const c_string& access_token,
|
242 |
+
CTimePoint expiration)
|
243 |
+
|
244 |
+
@staticmethod
|
245 |
+
CGcsOptions FromImpersonatedServiceAccount(const CGcsCredentials& base_credentials,
|
246 |
+
c_string& target_service_account)
|
247 |
+
|
248 |
+
cdef cppclass CGcsFileSystem "arrow::fs::GcsFileSystem":
|
249 |
+
@staticmethod
|
250 |
+
CResult[shared_ptr[CGcsFileSystem]] Make(const CGcsOptions& options)
|
251 |
+
CGcsOptions options()
|
252 |
+
|
253 |
+
cdef cppclass CHdfsOptions "arrow::fs::HdfsOptions":
|
254 |
+
HdfsConnectionConfig connection_config
|
255 |
+
int32_t buffer_size
|
256 |
+
int16_t replication
|
257 |
+
int64_t default_block_size
|
258 |
+
|
259 |
+
@staticmethod
|
260 |
+
CResult[CHdfsOptions] FromUriString "FromUri"(
|
261 |
+
const c_string& uri_string)
|
262 |
+
void ConfigureEndPoint(c_string host, int port)
|
263 |
+
void ConfigureDriver(c_bool use_hdfs3)
|
264 |
+
void ConfigureReplication(int16_t replication)
|
265 |
+
void ConfigureUser(c_string user_name)
|
266 |
+
void ConfigureBufferSize(int32_t buffer_size)
|
267 |
+
void ConfigureBlockSize(int64_t default_block_size)
|
268 |
+
void ConfigureKerberosTicketCachePath(c_string path)
|
269 |
+
void ConfigureExtraConf(c_string key, c_string value)
|
270 |
+
|
271 |
+
cdef cppclass CHadoopFileSystem "arrow::fs::HadoopFileSystem"(CFileSystem):
|
272 |
+
@staticmethod
|
273 |
+
CResult[shared_ptr[CHadoopFileSystem]] Make(
|
274 |
+
const CHdfsOptions& options)
|
275 |
+
CHdfsOptions options()
|
276 |
+
|
277 |
+
cdef cppclass CMockFileSystem "arrow::fs::internal::MockFileSystem"(
|
278 |
+
CFileSystem):
|
279 |
+
CMockFileSystem(CTimePoint current_time)
|
280 |
+
|
281 |
+
CStatus CCopyFiles "arrow::fs::CopyFiles"(
|
282 |
+
const vector[CFileLocator]& sources,
|
283 |
+
const vector[CFileLocator]& destinations,
|
284 |
+
const CIOContext& io_context,
|
285 |
+
int64_t chunk_size, c_bool use_threads)
|
286 |
+
CStatus CCopyFilesWithSelector "arrow::fs::CopyFiles"(
|
287 |
+
const shared_ptr[CFileSystem]& source_fs,
|
288 |
+
const CFileSelector& source_sel,
|
289 |
+
const shared_ptr[CFileSystem]& destination_fs,
|
290 |
+
const c_string& destination_base_dir,
|
291 |
+
const CIOContext& io_context,
|
292 |
+
int64_t chunk_size, c_bool use_threads)
|
293 |
+
|
294 |
+
|
295 |
+
# Callbacks for implementing Python filesystems
|
296 |
+
# Use typedef to emulate syntax for std::function<void(..)>
|
297 |
+
ctypedef void CallbackGetTypeName(object, c_string*)
|
298 |
+
ctypedef c_bool CallbackEquals(object, const CFileSystem&)
|
299 |
+
|
300 |
+
ctypedef void CallbackGetFileInfo(object, const c_string&, CFileInfo*)
|
301 |
+
ctypedef void CallbackGetFileInfoVector(object, const vector[c_string]&,
|
302 |
+
vector[CFileInfo]*)
|
303 |
+
ctypedef void CallbackGetFileInfoSelector(object, const CFileSelector&,
|
304 |
+
vector[CFileInfo]*)
|
305 |
+
ctypedef void CallbackCreateDir(object, const c_string&, c_bool)
|
306 |
+
ctypedef void CallbackDeleteDir(object, const c_string&)
|
307 |
+
ctypedef void CallbackDeleteDirContents(object, const c_string&, c_bool)
|
308 |
+
ctypedef void CallbackDeleteRootDirContents(object)
|
309 |
+
ctypedef void CallbackDeleteFile(object, const c_string&)
|
310 |
+
ctypedef void CallbackMove(object, const c_string&, const c_string&)
|
311 |
+
ctypedef void CallbackCopyFile(object, const c_string&, const c_string&)
|
312 |
+
|
313 |
+
ctypedef void CallbackOpenInputStream(object, const c_string&,
|
314 |
+
shared_ptr[CInputStream]*)
|
315 |
+
ctypedef void CallbackOpenInputFile(object, const c_string&,
|
316 |
+
shared_ptr[CRandomAccessFile]*)
|
317 |
+
ctypedef void CallbackOpenOutputStream(
|
318 |
+
object, const c_string&, const shared_ptr[const CKeyValueMetadata]&,
|
319 |
+
shared_ptr[COutputStream]*)
|
320 |
+
ctypedef void CallbackNormalizePath(object, const c_string&, c_string*)
|
321 |
+
|
322 |
+
cdef extern from "arrow/python/filesystem.h" namespace "arrow::py::fs" nogil:
|
323 |
+
|
324 |
+
cdef cppclass CPyFileSystemVtable "arrow::py::fs::PyFileSystemVtable":
|
325 |
+
PyFileSystemVtable()
|
326 |
+
function[CallbackGetTypeName] get_type_name
|
327 |
+
function[CallbackEquals] equals
|
328 |
+
function[CallbackGetFileInfo] get_file_info
|
329 |
+
function[CallbackGetFileInfoVector] get_file_info_vector
|
330 |
+
function[CallbackGetFileInfoSelector] get_file_info_selector
|
331 |
+
function[CallbackCreateDir] create_dir
|
332 |
+
function[CallbackDeleteDir] delete_dir
|
333 |
+
function[CallbackDeleteDirContents] delete_dir_contents
|
334 |
+
function[CallbackDeleteRootDirContents] delete_root_dir_contents
|
335 |
+
function[CallbackDeleteFile] delete_file
|
336 |
+
function[CallbackMove] move
|
337 |
+
function[CallbackCopyFile] copy_file
|
338 |
+
function[CallbackOpenInputStream] open_input_stream
|
339 |
+
function[CallbackOpenInputFile] open_input_file
|
340 |
+
function[CallbackOpenOutputStream] open_output_stream
|
341 |
+
function[CallbackOpenOutputStream] open_append_stream
|
342 |
+
function[CallbackNormalizePath] normalize_path
|
343 |
+
|
344 |
+
cdef cppclass CPyFileSystem "arrow::py::fs::PyFileSystem":
|
345 |
+
@staticmethod
|
346 |
+
shared_ptr[CPyFileSystem] Make(object handler,
|
347 |
+
CPyFileSystemVtable vtable)
|
348 |
+
|
349 |
+
PyObject* handler()
|
env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libarrow_python.pxd
ADDED
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language = c++
|
19 |
+
|
20 |
+
from pyarrow.includes.common cimport *
|
21 |
+
from pyarrow.includes.libarrow cimport *
|
22 |
+
|
23 |
+
|
24 |
+
ctypedef CInvalidRowResult PyInvalidRowCallback(object,
|
25 |
+
const CCSVInvalidRow&)
|
26 |
+
|
27 |
+
|
28 |
+
cdef extern from "arrow/python/csv.h" namespace "arrow::py::csv":
|
29 |
+
|
30 |
+
function[CInvalidRowHandler] MakeInvalidRowHandler(
|
31 |
+
function[PyInvalidRowCallback], object handler)
|
32 |
+
|
33 |
+
|
34 |
+
cdef extern from "arrow/python/api.h" namespace "arrow::py":
|
35 |
+
# Requires GIL
|
36 |
+
CResult[shared_ptr[CDataType]] InferArrowType(
|
37 |
+
object obj, object mask, c_bool pandas_null_sentinels)
|
38 |
+
|
39 |
+
|
40 |
+
cdef extern from "arrow/python/api.h" namespace "arrow::py::internal":
|
41 |
+
object NewMonthDayNanoTupleType()
|
42 |
+
CResult[PyObject*] MonthDayNanoIntervalArrayToPyList(
|
43 |
+
const CMonthDayNanoIntervalArray& array)
|
44 |
+
CResult[PyObject*] MonthDayNanoIntervalScalarToPyObject(
|
45 |
+
const CMonthDayNanoIntervalScalar& scalar)
|
46 |
+
|
47 |
+
|
48 |
+
cdef extern from "arrow/python/arrow_to_pandas.h" namespace "arrow::py::MapConversionType":
|
49 |
+
cdef enum MapConversionType "arrow::py::MapConversionType":
|
50 |
+
DEFAULT,
|
51 |
+
LOSSY,
|
52 |
+
STRICT_
|
53 |
+
|
54 |
+
|
55 |
+
cdef extern from "arrow/python/api.h" namespace "arrow::py" nogil:
|
56 |
+
shared_ptr[CDataType] GetPrimitiveType(Type type)
|
57 |
+
|
58 |
+
object PyHalf_FromHalf(npy_half value)
|
59 |
+
|
60 |
+
cdef cppclass PyConversionOptions:
|
61 |
+
PyConversionOptions()
|
62 |
+
|
63 |
+
shared_ptr[CDataType] type
|
64 |
+
int64_t size
|
65 |
+
CMemoryPool* pool
|
66 |
+
c_bool from_pandas
|
67 |
+
c_bool ignore_timezone
|
68 |
+
c_bool strict
|
69 |
+
|
70 |
+
# TODO Some functions below are not actually "nogil"
|
71 |
+
|
72 |
+
CResult[shared_ptr[CChunkedArray]] ConvertPySequence(
|
73 |
+
object obj, object mask, const PyConversionOptions& options,
|
74 |
+
CMemoryPool* pool)
|
75 |
+
|
76 |
+
CResult[shared_ptr[CDataType]] NumPyDtypeToArrow(object dtype)
|
77 |
+
|
78 |
+
CStatus NdarrayToArrow(CMemoryPool* pool, object ao, object mo,
|
79 |
+
c_bool from_pandas,
|
80 |
+
const shared_ptr[CDataType]& type,
|
81 |
+
shared_ptr[CChunkedArray]* out)
|
82 |
+
|
83 |
+
CStatus NdarrayToArrow(CMemoryPool* pool, object ao, object mo,
|
84 |
+
c_bool from_pandas,
|
85 |
+
const shared_ptr[CDataType]& type,
|
86 |
+
const CCastOptions& cast_options,
|
87 |
+
shared_ptr[CChunkedArray]* out)
|
88 |
+
|
89 |
+
CStatus NdarrayToTensor(CMemoryPool* pool, object ao,
|
90 |
+
const vector[c_string]& dim_names,
|
91 |
+
shared_ptr[CTensor]* out)
|
92 |
+
|
93 |
+
CStatus TensorToNdarray(const shared_ptr[CTensor]& tensor, object base,
|
94 |
+
PyObject** out)
|
95 |
+
|
96 |
+
CStatus SparseCOOTensorToNdarray(
|
97 |
+
const shared_ptr[CSparseCOOTensor]& sparse_tensor, object base,
|
98 |
+
PyObject** out_data, PyObject** out_coords)
|
99 |
+
|
100 |
+
CStatus SparseCSRMatrixToNdarray(
|
101 |
+
const shared_ptr[CSparseCSRMatrix]& sparse_tensor, object base,
|
102 |
+
PyObject** out_data, PyObject** out_indptr, PyObject** out_indices)
|
103 |
+
|
104 |
+
CStatus SparseCSCMatrixToNdarray(
|
105 |
+
const shared_ptr[CSparseCSCMatrix]& sparse_tensor, object base,
|
106 |
+
PyObject** out_data, PyObject** out_indptr, PyObject** out_indices)
|
107 |
+
|
108 |
+
CStatus SparseCSFTensorToNdarray(
|
109 |
+
const shared_ptr[CSparseCSFTensor]& sparse_tensor, object base,
|
110 |
+
PyObject** out_data, PyObject** out_indptr, PyObject** out_indices)
|
111 |
+
|
112 |
+
CStatus NdarraysToSparseCOOTensor(CMemoryPool* pool, object data_ao,
|
113 |
+
object coords_ao,
|
114 |
+
const vector[int64_t]& shape,
|
115 |
+
const vector[c_string]& dim_names,
|
116 |
+
shared_ptr[CSparseCOOTensor]* out)
|
117 |
+
|
118 |
+
CStatus NdarraysToSparseCSRMatrix(CMemoryPool* pool, object data_ao,
|
119 |
+
object indptr_ao, object indices_ao,
|
120 |
+
const vector[int64_t]& shape,
|
121 |
+
const vector[c_string]& dim_names,
|
122 |
+
shared_ptr[CSparseCSRMatrix]* out)
|
123 |
+
|
124 |
+
CStatus NdarraysToSparseCSCMatrix(CMemoryPool* pool, object data_ao,
|
125 |
+
object indptr_ao, object indices_ao,
|
126 |
+
const vector[int64_t]& shape,
|
127 |
+
const vector[c_string]& dim_names,
|
128 |
+
shared_ptr[CSparseCSCMatrix]* out)
|
129 |
+
|
130 |
+
CStatus NdarraysToSparseCSFTensor(CMemoryPool* pool, object data_ao,
|
131 |
+
object indptr_ao, object indices_ao,
|
132 |
+
const vector[int64_t]& shape,
|
133 |
+
const vector[int64_t]& axis_order,
|
134 |
+
const vector[c_string]& dim_names,
|
135 |
+
shared_ptr[CSparseCSFTensor]* out)
|
136 |
+
|
137 |
+
CStatus TensorToSparseCOOTensor(shared_ptr[CTensor],
|
138 |
+
shared_ptr[CSparseCOOTensor]* out)
|
139 |
+
|
140 |
+
CStatus TensorToSparseCSRMatrix(shared_ptr[CTensor],
|
141 |
+
shared_ptr[CSparseCSRMatrix]* out)
|
142 |
+
|
143 |
+
CStatus TensorToSparseCSCMatrix(shared_ptr[CTensor],
|
144 |
+
shared_ptr[CSparseCSCMatrix]* out)
|
145 |
+
|
146 |
+
CStatus TensorToSparseCSFTensor(shared_ptr[CTensor],
|
147 |
+
shared_ptr[CSparseCSFTensor]* out)
|
148 |
+
|
149 |
+
CStatus ConvertArrayToPandas(const PandasOptions& options,
|
150 |
+
shared_ptr[CArray] arr,
|
151 |
+
object py_ref, PyObject** out)
|
152 |
+
|
153 |
+
CStatus ConvertChunkedArrayToPandas(const PandasOptions& options,
|
154 |
+
shared_ptr[CChunkedArray] arr,
|
155 |
+
object py_ref, PyObject** out)
|
156 |
+
|
157 |
+
CStatus ConvertTableToPandas(const PandasOptions& options,
|
158 |
+
shared_ptr[CTable] table,
|
159 |
+
PyObject** out)
|
160 |
+
|
161 |
+
void c_set_default_memory_pool \
|
162 |
+
" arrow::py::set_default_memory_pool"(CMemoryPool* pool)\
|
163 |
+
|
164 |
+
CMemoryPool* c_get_memory_pool \
|
165 |
+
" arrow::py::get_memory_pool"()
|
166 |
+
|
167 |
+
cdef cppclass PyBuffer(CBuffer):
|
168 |
+
@staticmethod
|
169 |
+
CResult[shared_ptr[CBuffer]] FromPyObject(object obj)
|
170 |
+
|
171 |
+
cdef cppclass PyForeignBuffer(CBuffer):
|
172 |
+
@staticmethod
|
173 |
+
CStatus Make(const uint8_t* data, int64_t size, object base,
|
174 |
+
shared_ptr[CBuffer]* out)
|
175 |
+
|
176 |
+
cdef cppclass PyReadableFile(CRandomAccessFile):
|
177 |
+
PyReadableFile(object fo)
|
178 |
+
|
179 |
+
cdef cppclass PyOutputStream(COutputStream):
|
180 |
+
PyOutputStream(object fo)
|
181 |
+
|
182 |
+
cdef cppclass PandasOptions:
|
183 |
+
CMemoryPool* pool
|
184 |
+
c_bool strings_to_categorical
|
185 |
+
c_bool zero_copy_only
|
186 |
+
c_bool integer_object_nulls
|
187 |
+
c_bool date_as_object
|
188 |
+
c_bool timestamp_as_object
|
189 |
+
c_bool use_threads
|
190 |
+
c_bool coerce_temporal_nanoseconds
|
191 |
+
c_bool ignore_timezone
|
192 |
+
c_bool deduplicate_objects
|
193 |
+
c_bool safe_cast
|
194 |
+
c_bool split_blocks
|
195 |
+
c_bool self_destruct
|
196 |
+
MapConversionType maps_as_pydicts
|
197 |
+
c_bool decode_dictionaries
|
198 |
+
unordered_set[c_string] categorical_columns
|
199 |
+
unordered_set[c_string] extension_columns
|
200 |
+
c_bool to_numpy
|
201 |
+
|
202 |
+
cdef cppclass CSerializedPyObject" arrow::py::SerializedPyObject":
|
203 |
+
shared_ptr[CRecordBatch] batch
|
204 |
+
vector[shared_ptr[CTensor]] tensors
|
205 |
+
|
206 |
+
CStatus WriteTo(COutputStream* dst)
|
207 |
+
CStatus GetComponents(CMemoryPool* pool, PyObject** dst)
|
208 |
+
|
209 |
+
CStatus SerializeObject(object context, object sequence,
|
210 |
+
CSerializedPyObject* out)
|
211 |
+
|
212 |
+
CStatus DeserializeObject(object context,
|
213 |
+
const CSerializedPyObject& obj,
|
214 |
+
PyObject* base, PyObject** out)
|
215 |
+
|
216 |
+
CStatus ReadSerializedObject(CRandomAccessFile* src,
|
217 |
+
CSerializedPyObject* out)
|
218 |
+
|
219 |
+
cdef cppclass SparseTensorCounts:
|
220 |
+
SparseTensorCounts()
|
221 |
+
int coo
|
222 |
+
int csr
|
223 |
+
int csc
|
224 |
+
int csf
|
225 |
+
int ndim_csf
|
226 |
+
int num_total_tensors() const
|
227 |
+
int num_total_buffers() const
|
228 |
+
|
229 |
+
CStatus GetSerializedFromComponents(
|
230 |
+
int num_tensors,
|
231 |
+
const SparseTensorCounts& num_sparse_tensors,
|
232 |
+
int num_ndarrays,
|
233 |
+
int num_buffers,
|
234 |
+
object buffers,
|
235 |
+
CSerializedPyObject* out)
|
236 |
+
|
237 |
+
|
238 |
+
cdef extern from "arrow/python/api.h" namespace "arrow::py::internal" nogil:
|
239 |
+
cdef cppclass CTimePoint "arrow::py::internal::TimePoint":
|
240 |
+
pass
|
241 |
+
|
242 |
+
CTimePoint PyDateTime_to_TimePoint(PyDateTime_DateTime* pydatetime)
|
243 |
+
int64_t TimePoint_to_ns(CTimePoint val)
|
244 |
+
CTimePoint TimePoint_from_s(double val)
|
245 |
+
CTimePoint TimePoint_from_ns(int64_t val)
|
246 |
+
|
247 |
+
CResult[c_string] TzinfoToString(PyObject* pytzinfo)
|
248 |
+
CResult[PyObject*] StringToTzinfo(c_string)
|
249 |
+
|
250 |
+
|
251 |
+
cdef extern from "arrow/python/init.h":
|
252 |
+
int arrow_init_numpy() except -1
|
253 |
+
|
254 |
+
|
255 |
+
cdef extern from "arrow/python/pyarrow.h" namespace "arrow::py":
|
256 |
+
int import_pyarrow() except -1
|
257 |
+
|
258 |
+
|
259 |
+
cdef extern from "arrow/python/common.h" namespace "arrow::py":
|
260 |
+
c_bool IsPyError(const CStatus& status)
|
261 |
+
void RestorePyError(const CStatus& status) except *
|
262 |
+
|
263 |
+
|
264 |
+
cdef extern from "arrow/python/common.h" namespace "arrow::py" nogil:
|
265 |
+
cdef cppclass SharedPtrNoGIL[T](shared_ptr[T]):
|
266 |
+
# This looks like the only way to satisfy both Cython 2 and Cython 3
|
267 |
+
SharedPtrNoGIL& operator=(...)
|
268 |
+
cdef cppclass UniquePtrNoGIL[T, DELETER=*](unique_ptr[T, DELETER]):
|
269 |
+
UniquePtrNoGIL& operator=(...)
|
270 |
+
|
271 |
+
|
272 |
+
cdef extern from "arrow/python/inference.h" namespace "arrow::py":
|
273 |
+
c_bool IsPyBool(object o)
|
274 |
+
c_bool IsPyInt(object o)
|
275 |
+
c_bool IsPyFloat(object o)
|
276 |
+
|
277 |
+
|
278 |
+
cdef extern from "arrow/python/ipc.h" namespace "arrow::py":
|
279 |
+
cdef cppclass CPyRecordBatchReader" arrow::py::PyRecordBatchReader" \
|
280 |
+
(CRecordBatchReader):
|
281 |
+
@staticmethod
|
282 |
+
CResult[shared_ptr[CRecordBatchReader]] Make(shared_ptr[CSchema],
|
283 |
+
object)
|
284 |
+
|
285 |
+
|
286 |
+
cdef extern from "arrow/python/extension_type.h" namespace "arrow::py":
|
287 |
+
cdef cppclass CPyExtensionType \
|
288 |
+
" arrow::py::PyExtensionType"(CExtensionType):
|
289 |
+
@staticmethod
|
290 |
+
CStatus FromClass(const shared_ptr[CDataType] storage_type,
|
291 |
+
const c_string extension_name, object typ,
|
292 |
+
shared_ptr[CExtensionType]* out)
|
293 |
+
|
294 |
+
@staticmethod
|
295 |
+
CStatus FromInstance(shared_ptr[CDataType] storage_type,
|
296 |
+
object inst, shared_ptr[CExtensionType]* out)
|
297 |
+
|
298 |
+
object GetInstance()
|
299 |
+
CStatus SetInstance(object)
|
300 |
+
|
301 |
+
c_string PyExtensionName()
|
302 |
+
CStatus RegisterPyExtensionType(shared_ptr[CDataType])
|
303 |
+
CStatus UnregisterPyExtensionType(c_string type_name)
|
304 |
+
|
305 |
+
|
306 |
+
cdef extern from "arrow/python/benchmark.h" namespace "arrow::py::benchmark":
|
307 |
+
void Benchmark_PandasObjectIsNull(object lst) except *
|
308 |
+
|
309 |
+
|
310 |
+
cdef extern from "arrow/python/gdb.h" namespace "arrow::gdb" nogil:
|
311 |
+
void GdbTestSession "arrow::gdb::TestSession"()
|
env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libarrow_substrait.pxd
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language = c++
|
19 |
+
|
20 |
+
from libcpp.vector cimport vector as std_vector
|
21 |
+
|
22 |
+
from pyarrow.includes.common cimport *
|
23 |
+
from pyarrow.includes.libarrow cimport *
|
24 |
+
from pyarrow.includes.libarrow_acero cimport *
|
25 |
+
|
26 |
+
ctypedef CResult[CDeclaration] CNamedTableProvider(const std_vector[c_string]&, const CSchema&)
|
27 |
+
|
28 |
+
cdef extern from "arrow/engine/substrait/options.h" namespace "arrow::engine" nogil:
|
29 |
+
cdef enum ConversionStrictness \
|
30 |
+
"arrow::engine::ConversionStrictness":
|
31 |
+
EXACT_ROUNDTRIP \
|
32 |
+
"arrow::engine::ConversionStrictness::EXACT_ROUNDTRIP"
|
33 |
+
PRESERVE_STRUCTURE \
|
34 |
+
"arrow::engine::ConversionStrictness::PRESERVE_STRUCTURE"
|
35 |
+
BEST_EFFORT \
|
36 |
+
"arrow::engine::ConversionStrictness::BEST_EFFORT"
|
37 |
+
|
38 |
+
cdef cppclass CConversionOptions \
|
39 |
+
"arrow::engine::ConversionOptions":
|
40 |
+
CConversionOptions()
|
41 |
+
ConversionStrictness strictness
|
42 |
+
function[CNamedTableProvider] named_table_provider
|
43 |
+
c_bool allow_arrow_extensions
|
44 |
+
|
45 |
+
cdef extern from "arrow/engine/substrait/extension_set.h" \
|
46 |
+
namespace "arrow::engine" nogil:
|
47 |
+
|
48 |
+
cdef cppclass ExtensionIdRegistry:
|
49 |
+
std_vector[c_string] GetSupportedSubstraitFunctions()
|
50 |
+
|
51 |
+
ExtensionIdRegistry* default_extension_id_registry()
|
52 |
+
|
53 |
+
cdef extern from "arrow/engine/substrait/relation.h" namespace "arrow::engine" nogil:
|
54 |
+
|
55 |
+
cdef cppclass CNamedExpression "arrow::engine::NamedExpression":
|
56 |
+
CExpression expression
|
57 |
+
c_string name
|
58 |
+
|
59 |
+
cdef cppclass CBoundExpressions "arrow::engine::BoundExpressions":
|
60 |
+
std_vector[CNamedExpression] named_expressions
|
61 |
+
shared_ptr[CSchema] schema
|
62 |
+
|
63 |
+
cdef extern from "arrow/engine/substrait/serde.h" namespace "arrow::engine" nogil:
|
64 |
+
|
65 |
+
CResult[shared_ptr[CBuffer]] SerializeExpressions(
|
66 |
+
const CBoundExpressions& bound_expressions, const CConversionOptions& conversion_options)
|
67 |
+
|
68 |
+
CResult[CBoundExpressions] DeserializeExpressions(
|
69 |
+
const CBuffer& serialized_expressions)
|
70 |
+
|
71 |
+
cdef extern from "arrow/engine/substrait/util.h" namespace "arrow::engine" nogil:
|
72 |
+
CResult[shared_ptr[CRecordBatchReader]] ExecuteSerializedPlan(
|
73 |
+
const CBuffer& substrait_buffer, const ExtensionIdRegistry* registry,
|
74 |
+
CFunctionRegistry* func_registry, const CConversionOptions& conversion_options,
|
75 |
+
c_bool use_threads)
|
76 |
+
|
77 |
+
CResult[shared_ptr[CBuffer]] SerializeJsonPlan(const c_string& substrait_json)
|
env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libgandiva.pxd
ADDED
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language = c++
|
19 |
+
|
20 |
+
from libcpp.string cimport string as c_string
|
21 |
+
from libcpp.unordered_set cimport unordered_set as c_unordered_set
|
22 |
+
from libc.stdint cimport int64_t, int32_t, uint8_t, uintptr_t
|
23 |
+
|
24 |
+
from pyarrow.includes.common cimport *
|
25 |
+
from pyarrow.includes.libarrow cimport *
|
26 |
+
|
27 |
+
cdef extern from "gandiva/node.h" namespace "gandiva" nogil:
|
28 |
+
|
29 |
+
cdef cppclass CNode" gandiva::Node":
|
30 |
+
c_string ToString()
|
31 |
+
shared_ptr[CDataType] return_type()
|
32 |
+
|
33 |
+
cdef cppclass CGandivaExpression" gandiva::Expression":
|
34 |
+
c_string ToString()
|
35 |
+
shared_ptr[CNode] root()
|
36 |
+
shared_ptr[CField] result()
|
37 |
+
|
38 |
+
ctypedef vector[shared_ptr[CNode]] CNodeVector" gandiva::NodeVector"
|
39 |
+
|
40 |
+
ctypedef vector[shared_ptr[CGandivaExpression]] \
|
41 |
+
CExpressionVector" gandiva::ExpressionVector"
|
42 |
+
|
43 |
+
cdef extern from "gandiva/selection_vector.h" namespace "gandiva" nogil:
|
44 |
+
|
45 |
+
cdef cppclass CSelectionVector" gandiva::SelectionVector":
|
46 |
+
|
47 |
+
shared_ptr[CArray] ToArray()
|
48 |
+
|
49 |
+
enum CSelectionVector_Mode" gandiva::SelectionVector::Mode":
|
50 |
+
CSelectionVector_Mode_NONE" gandiva::SelectionVector::Mode::MODE_NONE"
|
51 |
+
CSelectionVector_Mode_UINT16" \
|
52 |
+
gandiva::SelectionVector::Mode::MODE_UINT16"
|
53 |
+
CSelectionVector_Mode_UINT32" \
|
54 |
+
gandiva::SelectionVector::Mode::MODE_UINT32"
|
55 |
+
CSelectionVector_Mode_UINT64" \
|
56 |
+
gandiva::SelectionVector::Mode::MODE_UINT64"
|
57 |
+
|
58 |
+
cdef CStatus SelectionVector_MakeInt16\
|
59 |
+
"gandiva::SelectionVector::MakeInt16"(
|
60 |
+
int64_t max_slots, CMemoryPool* pool,
|
61 |
+
shared_ptr[CSelectionVector]* selection_vector)
|
62 |
+
|
63 |
+
cdef CStatus SelectionVector_MakeInt32\
|
64 |
+
"gandiva::SelectionVector::MakeInt32"(
|
65 |
+
int64_t max_slots, CMemoryPool* pool,
|
66 |
+
shared_ptr[CSelectionVector]* selection_vector)
|
67 |
+
|
68 |
+
cdef CStatus SelectionVector_MakeInt64\
|
69 |
+
"gandiva::SelectionVector::MakeInt64"(
|
70 |
+
int64_t max_slots, CMemoryPool* pool,
|
71 |
+
shared_ptr[CSelectionVector]* selection_vector)
|
72 |
+
|
73 |
+
cdef inline CSelectionVector_Mode _ensure_selection_mode(str name) except *:
|
74 |
+
uppercase = name.upper()
|
75 |
+
if uppercase == 'NONE':
|
76 |
+
return CSelectionVector_Mode_NONE
|
77 |
+
elif uppercase == 'UINT16':
|
78 |
+
return CSelectionVector_Mode_UINT16
|
79 |
+
elif uppercase == 'UINT32':
|
80 |
+
return CSelectionVector_Mode_UINT32
|
81 |
+
elif uppercase == 'UINT64':
|
82 |
+
return CSelectionVector_Mode_UINT64
|
83 |
+
else:
|
84 |
+
raise ValueError('Invalid value for Selection Mode: {!r}'.format(name))
|
85 |
+
|
86 |
+
cdef inline str _selection_mode_name(CSelectionVector_Mode ctype):
|
87 |
+
if ctype == CSelectionVector_Mode_NONE:
|
88 |
+
return 'NONE'
|
89 |
+
elif ctype == CSelectionVector_Mode_UINT16:
|
90 |
+
return 'UINT16'
|
91 |
+
elif ctype == CSelectionVector_Mode_UINT32:
|
92 |
+
return 'UINT32'
|
93 |
+
elif ctype == CSelectionVector_Mode_UINT64:
|
94 |
+
return 'UINT64'
|
95 |
+
else:
|
96 |
+
raise RuntimeError('Unexpected CSelectionVector_Mode value')
|
97 |
+
|
98 |
+
cdef extern from "gandiva/condition.h" namespace "gandiva" nogil:
|
99 |
+
|
100 |
+
cdef cppclass CCondition" gandiva::Condition":
|
101 |
+
c_string ToString()
|
102 |
+
shared_ptr[CNode] root()
|
103 |
+
shared_ptr[CField] result()
|
104 |
+
|
105 |
+
cdef extern from "gandiva/arrow.h" namespace "gandiva" nogil:
|
106 |
+
|
107 |
+
ctypedef vector[shared_ptr[CArray]] CArrayVector" gandiva::ArrayVector"
|
108 |
+
|
109 |
+
|
110 |
+
cdef extern from "gandiva/tree_expr_builder.h" namespace "gandiva" nogil:
|
111 |
+
|
112 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeBoolLiteral \
|
113 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(c_bool value)
|
114 |
+
|
115 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeUInt8Literal \
|
116 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(uint8_t value)
|
117 |
+
|
118 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeUInt16Literal \
|
119 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(uint16_t value)
|
120 |
+
|
121 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeUInt32Literal \
|
122 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(uint32_t value)
|
123 |
+
|
124 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeUInt64Literal \
|
125 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(uint64_t value)
|
126 |
+
|
127 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInt8Literal \
|
128 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(int8_t value)
|
129 |
+
|
130 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInt16Literal \
|
131 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(int16_t value)
|
132 |
+
|
133 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInt32Literal \
|
134 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(int32_t value)
|
135 |
+
|
136 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInt64Literal \
|
137 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(int64_t value)
|
138 |
+
|
139 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeFloatLiteral \
|
140 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(float value)
|
141 |
+
|
142 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeDoubleLiteral \
|
143 |
+
"gandiva::TreeExprBuilder::MakeLiteral"(double value)
|
144 |
+
|
145 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeStringLiteral \
|
146 |
+
"gandiva::TreeExprBuilder::MakeStringLiteral"(const c_string& value)
|
147 |
+
|
148 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeBinaryLiteral \
|
149 |
+
"gandiva::TreeExprBuilder::MakeBinaryLiteral"(const c_string& value)
|
150 |
+
|
151 |
+
cdef shared_ptr[CGandivaExpression] TreeExprBuilder_MakeExpression\
|
152 |
+
"gandiva::TreeExprBuilder::MakeExpression"(
|
153 |
+
shared_ptr[CNode] root_node, shared_ptr[CField] result_field)
|
154 |
+
|
155 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeFunction \
|
156 |
+
"gandiva::TreeExprBuilder::MakeFunction"(
|
157 |
+
const c_string& name, const CNodeVector& children,
|
158 |
+
shared_ptr[CDataType] return_type)
|
159 |
+
|
160 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeField \
|
161 |
+
"gandiva::TreeExprBuilder::MakeField"(shared_ptr[CField] field)
|
162 |
+
|
163 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeIf \
|
164 |
+
"gandiva::TreeExprBuilder::MakeIf"(
|
165 |
+
shared_ptr[CNode] condition, shared_ptr[CNode] this_node,
|
166 |
+
shared_ptr[CNode] else_node, shared_ptr[CDataType] return_type)
|
167 |
+
|
168 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeAnd \
|
169 |
+
"gandiva::TreeExprBuilder::MakeAnd"(const CNodeVector& children)
|
170 |
+
|
171 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeOr \
|
172 |
+
"gandiva::TreeExprBuilder::MakeOr"(const CNodeVector& children)
|
173 |
+
|
174 |
+
cdef shared_ptr[CCondition] TreeExprBuilder_MakeCondition \
|
175 |
+
"gandiva::TreeExprBuilder::MakeCondition"(
|
176 |
+
shared_ptr[CNode] condition)
|
177 |
+
|
178 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionInt32 \
|
179 |
+
"gandiva::TreeExprBuilder::MakeInExpressionInt32"(
|
180 |
+
shared_ptr[CNode] node, const c_unordered_set[int32_t]& values)
|
181 |
+
|
182 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionInt64 \
|
183 |
+
"gandiva::TreeExprBuilder::MakeInExpressionInt64"(
|
184 |
+
shared_ptr[CNode] node, const c_unordered_set[int64_t]& values)
|
185 |
+
|
186 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionTime32 \
|
187 |
+
"gandiva::TreeExprBuilder::MakeInExpressionTime32"(
|
188 |
+
shared_ptr[CNode] node, const c_unordered_set[int32_t]& values)
|
189 |
+
|
190 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionTime64 \
|
191 |
+
"gandiva::TreeExprBuilder::MakeInExpressionTime64"(
|
192 |
+
shared_ptr[CNode] node, const c_unordered_set[int64_t]& values)
|
193 |
+
|
194 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionDate32 \
|
195 |
+
"gandiva::TreeExprBuilder::MakeInExpressionDate32"(
|
196 |
+
shared_ptr[CNode] node, const c_unordered_set[int32_t]& values)
|
197 |
+
|
198 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionDate64 \
|
199 |
+
"gandiva::TreeExprBuilder::MakeInExpressionDate64"(
|
200 |
+
shared_ptr[CNode] node, const c_unordered_set[int64_t]& values)
|
201 |
+
|
202 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionTimeStamp \
|
203 |
+
"gandiva::TreeExprBuilder::MakeInExpressionTimeStamp"(
|
204 |
+
shared_ptr[CNode] node, const c_unordered_set[int64_t]& values)
|
205 |
+
|
206 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionString \
|
207 |
+
"gandiva::TreeExprBuilder::MakeInExpressionString"(
|
208 |
+
shared_ptr[CNode] node, const c_unordered_set[c_string]& values)
|
209 |
+
|
210 |
+
cdef shared_ptr[CNode] TreeExprBuilder_MakeInExpressionBinary \
|
211 |
+
"gandiva::TreeExprBuilder::MakeInExpressionBinary"(
|
212 |
+
shared_ptr[CNode] node, const c_unordered_set[c_string]& values)
|
213 |
+
|
214 |
+
cdef extern from "gandiva/projector.h" namespace "gandiva" nogil:
|
215 |
+
|
216 |
+
cdef cppclass CProjector" gandiva::Projector":
|
217 |
+
|
218 |
+
CStatus Evaluate(
|
219 |
+
const CRecordBatch& batch, CMemoryPool* pool,
|
220 |
+
const CArrayVector* output)
|
221 |
+
|
222 |
+
CStatus Evaluate(
|
223 |
+
const CRecordBatch& batch,
|
224 |
+
const CSelectionVector* selection,
|
225 |
+
CMemoryPool* pool,
|
226 |
+
const CArrayVector* output)
|
227 |
+
|
228 |
+
c_string DumpIR()
|
229 |
+
|
230 |
+
cdef CStatus Projector_Make \
|
231 |
+
"gandiva::Projector::Make"(
|
232 |
+
shared_ptr[CSchema] schema, const CExpressionVector& children,
|
233 |
+
shared_ptr[CProjector]* projector)
|
234 |
+
|
235 |
+
cdef CStatus Projector_Make \
|
236 |
+
"gandiva::Projector::Make"(
|
237 |
+
shared_ptr[CSchema] schema, const CExpressionVector& children,
|
238 |
+
CSelectionVector_Mode mode,
|
239 |
+
shared_ptr[CConfiguration] configuration,
|
240 |
+
shared_ptr[CProjector]* projector)
|
241 |
+
|
242 |
+
cdef extern from "gandiva/filter.h" namespace "gandiva" nogil:
|
243 |
+
|
244 |
+
cdef cppclass CFilter" gandiva::Filter":
|
245 |
+
|
246 |
+
CStatus Evaluate(
|
247 |
+
const CRecordBatch& batch,
|
248 |
+
shared_ptr[CSelectionVector] out_selection)
|
249 |
+
|
250 |
+
c_string DumpIR()
|
251 |
+
|
252 |
+
cdef CStatus Filter_Make \
|
253 |
+
"gandiva::Filter::Make"(
|
254 |
+
shared_ptr[CSchema] schema, shared_ptr[CCondition] condition,
|
255 |
+
shared_ptr[CConfiguration] configuration,
|
256 |
+
shared_ptr[CFilter]* filter)
|
257 |
+
|
258 |
+
cdef extern from "gandiva/function_signature.h" namespace "gandiva" nogil:
|
259 |
+
|
260 |
+
cdef cppclass CFunctionSignature" gandiva::FunctionSignature":
|
261 |
+
|
262 |
+
CFunctionSignature(const c_string& base_name,
|
263 |
+
vector[shared_ptr[CDataType]] param_types,
|
264 |
+
shared_ptr[CDataType] ret_type)
|
265 |
+
|
266 |
+
shared_ptr[CDataType] ret_type() const
|
267 |
+
|
268 |
+
const c_string& base_name() const
|
269 |
+
|
270 |
+
vector[shared_ptr[CDataType]] param_types() const
|
271 |
+
|
272 |
+
c_string ToString() const
|
273 |
+
|
274 |
+
cdef extern from "gandiva/expression_registry.h" namespace "gandiva" nogil:
|
275 |
+
|
276 |
+
cdef vector[shared_ptr[CFunctionSignature]] \
|
277 |
+
GetRegisteredFunctionSignatures()
|
278 |
+
|
279 |
+
cdef extern from "gandiva/configuration.h" namespace "gandiva" nogil:
|
280 |
+
|
281 |
+
cdef cppclass CConfiguration" gandiva::Configuration":
|
282 |
+
|
283 |
+
CConfiguration()
|
284 |
+
|
285 |
+
CConfiguration(bint optimize, bint dump_ir)
|
286 |
+
|
287 |
+
void set_optimize(bint optimize)
|
288 |
+
|
289 |
+
void set_dump_ir(bint dump_ir)
|
290 |
+
|
291 |
+
cdef cppclass CConfigurationBuilder \
|
292 |
+
" gandiva::ConfigurationBuilder":
|
293 |
+
@staticmethod
|
294 |
+
shared_ptr[CConfiguration] DefaultConfiguration()
|
295 |
+
|
296 |
+
CConfigurationBuilder()
|
297 |
+
|
298 |
+
shared_ptr[CConfiguration] build()
|
env-llmeval/lib/python3.10/site-packages/pyarrow/includes/libparquet_encryption.pxd
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language = c++
|
19 |
+
|
20 |
+
from pyarrow.includes.common cimport *
|
21 |
+
from pyarrow._parquet cimport (ParquetCipher,
|
22 |
+
CFileEncryptionProperties,
|
23 |
+
CFileDecryptionProperties,
|
24 |
+
ParquetCipher_AES_GCM_V1,
|
25 |
+
ParquetCipher_AES_GCM_CTR_V1)
|
26 |
+
|
27 |
+
|
28 |
+
cdef extern from "parquet/encryption/kms_client.h" \
|
29 |
+
namespace "parquet::encryption" nogil:
|
30 |
+
cdef cppclass CKmsClient" parquet::encryption::KmsClient":
|
31 |
+
c_string WrapKey(const c_string& key_bytes,
|
32 |
+
const c_string& master_key_identifier) except +
|
33 |
+
c_string UnwrapKey(const c_string& wrapped_key,
|
34 |
+
const c_string& master_key_identifier) except +
|
35 |
+
|
36 |
+
cdef cppclass CKeyAccessToken" parquet::encryption::KeyAccessToken":
|
37 |
+
CKeyAccessToken(const c_string value)
|
38 |
+
void Refresh(const c_string& new_value)
|
39 |
+
const c_string& value() const
|
40 |
+
|
41 |
+
cdef cppclass CKmsConnectionConfig \
|
42 |
+
" parquet::encryption::KmsConnectionConfig":
|
43 |
+
CKmsConnectionConfig()
|
44 |
+
c_string kms_instance_id
|
45 |
+
c_string kms_instance_url
|
46 |
+
shared_ptr[CKeyAccessToken] refreshable_key_access_token
|
47 |
+
unordered_map[c_string, c_string] custom_kms_conf
|
48 |
+
|
49 |
+
# Callbacks for implementing Python kms clients
|
50 |
+
# Use typedef to emulate syntax for std::function<void(..)>
|
51 |
+
ctypedef void CallbackWrapKey(
|
52 |
+
object, const c_string&, const c_string&, c_string*)
|
53 |
+
ctypedef void CallbackUnwrapKey(
|
54 |
+
object, const c_string&, const c_string&, c_string*)
|
55 |
+
|
56 |
+
cdef extern from "parquet/encryption/kms_client_factory.h" \
|
57 |
+
namespace "parquet::encryption" nogil:
|
58 |
+
cdef cppclass CKmsClientFactory" parquet::encryption::KmsClientFactory":
|
59 |
+
shared_ptr[CKmsClient] CreateKmsClient(
|
60 |
+
const CKmsConnectionConfig& kms_connection_config) except +
|
61 |
+
|
62 |
+
# Callbacks for implementing Python kms client factories
|
63 |
+
# Use typedef to emulate syntax for std::function<void(..)>
|
64 |
+
ctypedef void CallbackCreateKmsClient(
|
65 |
+
object,
|
66 |
+
const CKmsConnectionConfig&, shared_ptr[CKmsClient]*)
|
67 |
+
|
68 |
+
cdef extern from "parquet/encryption/crypto_factory.h" \
|
69 |
+
namespace "parquet::encryption" nogil:
|
70 |
+
cdef cppclass CEncryptionConfiguration\
|
71 |
+
" parquet::encryption::EncryptionConfiguration":
|
72 |
+
CEncryptionConfiguration(const c_string& footer_key) except +
|
73 |
+
c_string footer_key
|
74 |
+
c_string column_keys
|
75 |
+
ParquetCipher encryption_algorithm
|
76 |
+
c_bool plaintext_footer
|
77 |
+
c_bool double_wrapping
|
78 |
+
double cache_lifetime_seconds
|
79 |
+
c_bool internal_key_material
|
80 |
+
int32_t data_key_length_bits
|
81 |
+
|
82 |
+
cdef cppclass CDecryptionConfiguration\
|
83 |
+
" parquet::encryption::DecryptionConfiguration":
|
84 |
+
CDecryptionConfiguration() except +
|
85 |
+
double cache_lifetime_seconds
|
86 |
+
|
87 |
+
cdef cppclass CCryptoFactory" parquet::encryption::CryptoFactory":
|
88 |
+
void RegisterKmsClientFactory(
|
89 |
+
shared_ptr[CKmsClientFactory] kms_client_factory) except +
|
90 |
+
shared_ptr[CFileEncryptionProperties] GetFileEncryptionProperties(
|
91 |
+
const CKmsConnectionConfig& kms_connection_config,
|
92 |
+
const CEncryptionConfiguration& encryption_config) except +*
|
93 |
+
shared_ptr[CFileDecryptionProperties] GetFileDecryptionProperties(
|
94 |
+
const CKmsConnectionConfig& kms_connection_config,
|
95 |
+
const CDecryptionConfiguration& decryption_config) except +*
|
96 |
+
void RemoveCacheEntriesForToken(const c_string& access_token) except +
|
97 |
+
void RemoveCacheEntriesForAllTokens() except +
|
98 |
+
|
99 |
+
cdef extern from "arrow/python/parquet_encryption.h" \
|
100 |
+
namespace "arrow::py::parquet::encryption" nogil:
|
101 |
+
cdef cppclass CPyKmsClientVtable \
|
102 |
+
" arrow::py::parquet::encryption::PyKmsClientVtable":
|
103 |
+
CPyKmsClientVtable()
|
104 |
+
function[CallbackWrapKey] wrap_key
|
105 |
+
function[CallbackUnwrapKey] unwrap_key
|
106 |
+
|
107 |
+
cdef cppclass CPyKmsClient\
|
108 |
+
" arrow::py::parquet::encryption::PyKmsClient"(CKmsClient):
|
109 |
+
CPyKmsClient(object handler, CPyKmsClientVtable vtable)
|
110 |
+
|
111 |
+
cdef cppclass CPyKmsClientFactoryVtable\
|
112 |
+
" arrow::py::parquet::encryption::PyKmsClientFactoryVtable":
|
113 |
+
CPyKmsClientFactoryVtable()
|
114 |
+
function[CallbackCreateKmsClient] create_kms_client
|
115 |
+
|
116 |
+
cdef cppclass CPyKmsClientFactory\
|
117 |
+
" arrow::py::parquet::encryption::PyKmsClientFactory"(
|
118 |
+
CKmsClientFactory):
|
119 |
+
CPyKmsClientFactory(object handler, CPyKmsClientFactoryVtable vtable)
|
120 |
+
|
121 |
+
cdef cppclass CPyCryptoFactory\
|
122 |
+
" arrow::py::parquet::encryption::PyCryptoFactory"(CCryptoFactory):
|
123 |
+
CResult[shared_ptr[CFileEncryptionProperties]] \
|
124 |
+
SafeGetFileEncryptionProperties(
|
125 |
+
const CKmsConnectionConfig& kms_connection_config,
|
126 |
+
const CEncryptionConfiguration& encryption_config)
|
127 |
+
CResult[shared_ptr[CFileDecryptionProperties]] \
|
128 |
+
SafeGetFileDecryptionProperties(
|
129 |
+
const CKmsConnectionConfig& kms_connection_config,
|
130 |
+
const CDecryptionConfiguration& decryption_config)
|
env-llmeval/lib/python3.10/site-packages/pyarrow/parquet/__init__.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# flake8: noqa
|
19 |
+
|
20 |
+
from .core import *
|
env-llmeval/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (201 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/core.cpython-310.pyc
ADDED
Binary file (74.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/encryption.cpython-310.pyc
ADDED
Binary file (365 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pyarrow/parquet/core.py
ADDED
@@ -0,0 +1,2355 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
|
19 |
+
from collections import defaultdict
|
20 |
+
from contextlib import nullcontext
|
21 |
+
from functools import reduce
|
22 |
+
|
23 |
+
import inspect
|
24 |
+
import json
|
25 |
+
import os
|
26 |
+
import re
|
27 |
+
import operator
|
28 |
+
import warnings
|
29 |
+
|
30 |
+
import pyarrow as pa
|
31 |
+
|
32 |
+
try:
|
33 |
+
import pyarrow._parquet as _parquet
|
34 |
+
except ImportError as exc:
|
35 |
+
raise ImportError(
|
36 |
+
"The pyarrow installation is not built with support "
|
37 |
+
f"for the Parquet file format ({str(exc)})"
|
38 |
+
) from None
|
39 |
+
|
40 |
+
from pyarrow._parquet import (ParquetReader, Statistics, # noqa
|
41 |
+
FileMetaData, RowGroupMetaData,
|
42 |
+
ColumnChunkMetaData,
|
43 |
+
ParquetSchema, ColumnSchema,
|
44 |
+
ParquetLogicalType,
|
45 |
+
FileEncryptionProperties,
|
46 |
+
FileDecryptionProperties,
|
47 |
+
SortingColumn)
|
48 |
+
from pyarrow.fs import (LocalFileSystem, FileSystem, FileType,
|
49 |
+
_resolve_filesystem_and_path, _ensure_filesystem)
|
50 |
+
from pyarrow import filesystem as legacyfs
|
51 |
+
from pyarrow.util import guid, _is_path_like, _stringify_path, _deprecate_api
|
52 |
+
|
53 |
+
|
54 |
+
def _check_contains_null(val):
|
55 |
+
if isinstance(val, bytes):
|
56 |
+
for byte in val:
|
57 |
+
if isinstance(byte, bytes):
|
58 |
+
compare_to = chr(0)
|
59 |
+
else:
|
60 |
+
compare_to = 0
|
61 |
+
if byte == compare_to:
|
62 |
+
return True
|
63 |
+
elif isinstance(val, str):
|
64 |
+
return '\x00' in val
|
65 |
+
return False
|
66 |
+
|
67 |
+
|
68 |
+
def _check_filters(filters, check_null_strings=True):
|
69 |
+
"""
|
70 |
+
Check if filters are well-formed.
|
71 |
+
"""
|
72 |
+
if filters is not None:
|
73 |
+
if len(filters) == 0 or any(len(f) == 0 for f in filters):
|
74 |
+
raise ValueError("Malformed filters")
|
75 |
+
if isinstance(filters[0][0], str):
|
76 |
+
# We have encountered the situation where we have one nesting level
|
77 |
+
# too few:
|
78 |
+
# We have [(,,), ..] instead of [[(,,), ..]]
|
79 |
+
filters = [filters]
|
80 |
+
if check_null_strings:
|
81 |
+
for conjunction in filters:
|
82 |
+
for col, op, val in conjunction:
|
83 |
+
if (
|
84 |
+
isinstance(val, list) and
|
85 |
+
all(_check_contains_null(v) for v in val) or
|
86 |
+
_check_contains_null(val)
|
87 |
+
):
|
88 |
+
raise NotImplementedError(
|
89 |
+
"Null-terminated binary strings are not supported "
|
90 |
+
"as filter values."
|
91 |
+
)
|
92 |
+
return filters
|
93 |
+
|
94 |
+
|
95 |
+
_DNF_filter_doc = """Predicates are expressed using an ``Expression`` or using
|
96 |
+
the disjunctive normal form (DNF), like ``[[('x', '=', 0), ...], ...]``.
|
97 |
+
DNF allows arbitrary boolean logical combinations of single column predicates.
|
98 |
+
The innermost tuples each describe a single column predicate. The list of inner
|
99 |
+
predicates is interpreted as a conjunction (AND), forming a more selective and
|
100 |
+
multiple column predicate. Finally, the most outer list combines these filters
|
101 |
+
as a disjunction (OR).
|
102 |
+
|
103 |
+
Predicates may also be passed as List[Tuple]. This form is interpreted
|
104 |
+
as a single conjunction. To express OR in predicates, one must
|
105 |
+
use the (preferred) List[List[Tuple]] notation.
|
106 |
+
|
107 |
+
Each tuple has format: (``key``, ``op``, ``value``) and compares the
|
108 |
+
``key`` with the ``value``.
|
109 |
+
The supported ``op`` are: ``=`` or ``==``, ``!=``, ``<``, ``>``, ``<=``,
|
110 |
+
``>=``, ``in`` and ``not in``. If the ``op`` is ``in`` or ``not in``, the
|
111 |
+
``value`` must be a collection such as a ``list``, a ``set`` or a
|
112 |
+
``tuple``.
|
113 |
+
|
114 |
+
Examples:
|
115 |
+
|
116 |
+
Using the ``Expression`` API:
|
117 |
+
|
118 |
+
.. code-block:: python
|
119 |
+
|
120 |
+
import pyarrow.compute as pc
|
121 |
+
pc.field('x') = 0
|
122 |
+
pc.field('y').isin(['a', 'b', 'c'])
|
123 |
+
~pc.field('y').isin({'a', 'b'})
|
124 |
+
|
125 |
+
Using the DNF format:
|
126 |
+
|
127 |
+
.. code-block:: python
|
128 |
+
|
129 |
+
('x', '=', 0)
|
130 |
+
('y', 'in', ['a', 'b', 'c'])
|
131 |
+
('z', 'not in', {'a','b'})
|
132 |
+
|
133 |
+
"""
|
134 |
+
|
135 |
+
|
136 |
+
def filters_to_expression(filters):
|
137 |
+
"""
|
138 |
+
Check if filters are well-formed and convert to an ``Expression``.
|
139 |
+
|
140 |
+
Parameters
|
141 |
+
----------
|
142 |
+
filters : List[Tuple] or List[List[Tuple]]
|
143 |
+
|
144 |
+
Notes
|
145 |
+
-----
|
146 |
+
See internal ``pyarrow._DNF_filter_doc`` attribute for more details.
|
147 |
+
|
148 |
+
Examples
|
149 |
+
--------
|
150 |
+
|
151 |
+
>>> filters_to_expression([('foo', '==', 'bar')])
|
152 |
+
<pyarrow.compute.Expression (foo == "bar")>
|
153 |
+
|
154 |
+
Returns
|
155 |
+
-------
|
156 |
+
pyarrow.compute.Expression
|
157 |
+
An Expression representing the filters
|
158 |
+
"""
|
159 |
+
import pyarrow.dataset as ds
|
160 |
+
|
161 |
+
if isinstance(filters, ds.Expression):
|
162 |
+
return filters
|
163 |
+
|
164 |
+
filters = _check_filters(filters, check_null_strings=False)
|
165 |
+
|
166 |
+
def convert_single_predicate(col, op, val):
|
167 |
+
field = ds.field(col)
|
168 |
+
|
169 |
+
if op == "=" or op == "==":
|
170 |
+
return field == val
|
171 |
+
elif op == "!=":
|
172 |
+
return field != val
|
173 |
+
elif op == '<':
|
174 |
+
return field < val
|
175 |
+
elif op == '>':
|
176 |
+
return field > val
|
177 |
+
elif op == '<=':
|
178 |
+
return field <= val
|
179 |
+
elif op == '>=':
|
180 |
+
return field >= val
|
181 |
+
elif op == 'in':
|
182 |
+
return field.isin(val)
|
183 |
+
elif op == 'not in':
|
184 |
+
return ~field.isin(val)
|
185 |
+
else:
|
186 |
+
raise ValueError(
|
187 |
+
'"{0}" is not a valid operator in predicates.'.format(
|
188 |
+
(col, op, val)))
|
189 |
+
|
190 |
+
disjunction_members = []
|
191 |
+
|
192 |
+
for conjunction in filters:
|
193 |
+
conjunction_members = [
|
194 |
+
convert_single_predicate(col, op, val)
|
195 |
+
for col, op, val in conjunction
|
196 |
+
]
|
197 |
+
|
198 |
+
disjunction_members.append(reduce(operator.and_, conjunction_members))
|
199 |
+
|
200 |
+
return reduce(operator.or_, disjunction_members)
|
201 |
+
|
202 |
+
|
203 |
+
_filters_to_expression = _deprecate_api(
|
204 |
+
"_filters_to_expression", "filters_to_expression",
|
205 |
+
filters_to_expression, "10.0.0", DeprecationWarning)
|
206 |
+
|
207 |
+
|
208 |
+
# ----------------------------------------------------------------------
|
209 |
+
# Reading a single Parquet file
|
210 |
+
|
211 |
+
|
212 |
+
class ParquetFile:
|
213 |
+
"""
|
214 |
+
Reader interface for a single Parquet file.
|
215 |
+
|
216 |
+
Parameters
|
217 |
+
----------
|
218 |
+
source : str, pathlib.Path, pyarrow.NativeFile, or file-like object
|
219 |
+
Readable source. For passing bytes or buffer-like file containing a
|
220 |
+
Parquet file, use pyarrow.BufferReader.
|
221 |
+
metadata : FileMetaData, default None
|
222 |
+
Use existing metadata object, rather than reading from file.
|
223 |
+
common_metadata : FileMetaData, default None
|
224 |
+
Will be used in reads for pandas schema metadata if not found in the
|
225 |
+
main file's metadata, no other uses at the moment.
|
226 |
+
read_dictionary : list
|
227 |
+
List of column names to read directly as DictionaryArray.
|
228 |
+
memory_map : bool, default False
|
229 |
+
If the source is a file path, use a memory map to read file, which can
|
230 |
+
improve performance in some environments.
|
231 |
+
buffer_size : int, default 0
|
232 |
+
If positive, perform read buffering when deserializing individual
|
233 |
+
column chunks. Otherwise IO calls are unbuffered.
|
234 |
+
pre_buffer : bool, default False
|
235 |
+
Coalesce and issue file reads in parallel to improve performance on
|
236 |
+
high-latency filesystems (e.g. S3). If True, Arrow will use a
|
237 |
+
background I/O thread pool.
|
238 |
+
coerce_int96_timestamp_unit : str, default None
|
239 |
+
Cast timestamps that are stored in INT96 format to a particular
|
240 |
+
resolution (e.g. 'ms'). Setting to None is equivalent to 'ns'
|
241 |
+
and therefore INT96 timestamps will be inferred as timestamps
|
242 |
+
in nanoseconds.
|
243 |
+
decryption_properties : FileDecryptionProperties, default None
|
244 |
+
File decryption properties for Parquet Modular Encryption.
|
245 |
+
thrift_string_size_limit : int, default None
|
246 |
+
If not None, override the maximum total string size allocated
|
247 |
+
when decoding Thrift structures. The default limit should be
|
248 |
+
sufficient for most Parquet files.
|
249 |
+
thrift_container_size_limit : int, default None
|
250 |
+
If not None, override the maximum total size of containers allocated
|
251 |
+
when decoding Thrift structures. The default limit should be
|
252 |
+
sufficient for most Parquet files.
|
253 |
+
filesystem : FileSystem, default None
|
254 |
+
If nothing passed, will be inferred based on path.
|
255 |
+
Path will try to be found in the local on-disk filesystem otherwise
|
256 |
+
it will be parsed as an URI to determine the filesystem.
|
257 |
+
page_checksum_verification : bool, default False
|
258 |
+
If True, verify the checksum for each page read from the file.
|
259 |
+
|
260 |
+
Examples
|
261 |
+
--------
|
262 |
+
|
263 |
+
Generate an example PyArrow Table and write it to Parquet file:
|
264 |
+
|
265 |
+
>>> import pyarrow as pa
|
266 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
267 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
268 |
+
... "Brittle stars", "Centipede"]})
|
269 |
+
|
270 |
+
>>> import pyarrow.parquet as pq
|
271 |
+
>>> pq.write_table(table, 'example.parquet')
|
272 |
+
|
273 |
+
Create a ``ParquetFile`` object from the Parquet file:
|
274 |
+
|
275 |
+
>>> parquet_file = pq.ParquetFile('example.parquet')
|
276 |
+
|
277 |
+
Read the data:
|
278 |
+
|
279 |
+
>>> parquet_file.read()
|
280 |
+
pyarrow.Table
|
281 |
+
n_legs: int64
|
282 |
+
animal: string
|
283 |
+
----
|
284 |
+
n_legs: [[2,2,4,4,5,100]]
|
285 |
+
animal: [["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"]]
|
286 |
+
|
287 |
+
Create a ParquetFile object with "animal" column as DictionaryArray:
|
288 |
+
|
289 |
+
>>> parquet_file = pq.ParquetFile('example.parquet',
|
290 |
+
... read_dictionary=["animal"])
|
291 |
+
>>> parquet_file.read()
|
292 |
+
pyarrow.Table
|
293 |
+
n_legs: int64
|
294 |
+
animal: dictionary<values=string, indices=int32, ordered=0>
|
295 |
+
----
|
296 |
+
n_legs: [[2,2,4,4,5,100]]
|
297 |
+
animal: [ -- dictionary:
|
298 |
+
["Flamingo","Parrot",...,"Brittle stars","Centipede"] -- indices:
|
299 |
+
[0,1,2,3,4,5]]
|
300 |
+
"""
|
301 |
+
|
302 |
+
def __init__(self, source, *, metadata=None, common_metadata=None,
|
303 |
+
read_dictionary=None, memory_map=False, buffer_size=0,
|
304 |
+
pre_buffer=False, coerce_int96_timestamp_unit=None,
|
305 |
+
decryption_properties=None, thrift_string_size_limit=None,
|
306 |
+
thrift_container_size_limit=None, filesystem=None,
|
307 |
+
page_checksum_verification=False):
|
308 |
+
|
309 |
+
self._close_source = getattr(source, 'closed', True)
|
310 |
+
|
311 |
+
filesystem, source = _resolve_filesystem_and_path(
|
312 |
+
source, filesystem, memory_map)
|
313 |
+
if filesystem is not None:
|
314 |
+
source = filesystem.open_input_file(source)
|
315 |
+
self._close_source = True # We opened it here, ensure we close it.
|
316 |
+
|
317 |
+
self.reader = ParquetReader()
|
318 |
+
self.reader.open(
|
319 |
+
source, use_memory_map=memory_map,
|
320 |
+
buffer_size=buffer_size, pre_buffer=pre_buffer,
|
321 |
+
read_dictionary=read_dictionary, metadata=metadata,
|
322 |
+
coerce_int96_timestamp_unit=coerce_int96_timestamp_unit,
|
323 |
+
decryption_properties=decryption_properties,
|
324 |
+
thrift_string_size_limit=thrift_string_size_limit,
|
325 |
+
thrift_container_size_limit=thrift_container_size_limit,
|
326 |
+
page_checksum_verification=page_checksum_verification,
|
327 |
+
)
|
328 |
+
self.common_metadata = common_metadata
|
329 |
+
self._nested_paths_by_prefix = self._build_nested_paths()
|
330 |
+
|
331 |
+
def __enter__(self):
|
332 |
+
return self
|
333 |
+
|
334 |
+
def __exit__(self, *args, **kwargs):
|
335 |
+
self.close()
|
336 |
+
|
337 |
+
def _build_nested_paths(self):
|
338 |
+
paths = self.reader.column_paths
|
339 |
+
|
340 |
+
result = defaultdict(list)
|
341 |
+
|
342 |
+
for i, path in enumerate(paths):
|
343 |
+
key = path[0]
|
344 |
+
rest = path[1:]
|
345 |
+
while True:
|
346 |
+
result[key].append(i)
|
347 |
+
|
348 |
+
if not rest:
|
349 |
+
break
|
350 |
+
|
351 |
+
key = '.'.join((key, rest[0]))
|
352 |
+
rest = rest[1:]
|
353 |
+
|
354 |
+
return result
|
355 |
+
|
356 |
+
@property
|
357 |
+
def metadata(self):
|
358 |
+
"""
|
359 |
+
Return the Parquet metadata.
|
360 |
+
"""
|
361 |
+
return self.reader.metadata
|
362 |
+
|
363 |
+
@property
|
364 |
+
def schema(self):
|
365 |
+
"""
|
366 |
+
Return the Parquet schema, unconverted to Arrow types
|
367 |
+
"""
|
368 |
+
return self.metadata.schema
|
369 |
+
|
370 |
+
@property
|
371 |
+
def schema_arrow(self):
|
372 |
+
"""
|
373 |
+
Return the inferred Arrow schema, converted from the whole Parquet
|
374 |
+
file's schema
|
375 |
+
|
376 |
+
Examples
|
377 |
+
--------
|
378 |
+
Generate an example Parquet file:
|
379 |
+
|
380 |
+
>>> import pyarrow as pa
|
381 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
382 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
383 |
+
... "Brittle stars", "Centipede"]})
|
384 |
+
>>> import pyarrow.parquet as pq
|
385 |
+
>>> pq.write_table(table, 'example.parquet')
|
386 |
+
>>> parquet_file = pq.ParquetFile('example.parquet')
|
387 |
+
|
388 |
+
Read the Arrow schema:
|
389 |
+
|
390 |
+
>>> parquet_file.schema_arrow
|
391 |
+
n_legs: int64
|
392 |
+
animal: string
|
393 |
+
"""
|
394 |
+
return self.reader.schema_arrow
|
395 |
+
|
396 |
+
@property
|
397 |
+
def num_row_groups(self):
|
398 |
+
"""
|
399 |
+
Return the number of row groups of the Parquet file.
|
400 |
+
|
401 |
+
Examples
|
402 |
+
--------
|
403 |
+
>>> import pyarrow as pa
|
404 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
405 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
406 |
+
... "Brittle stars", "Centipede"]})
|
407 |
+
>>> import pyarrow.parquet as pq
|
408 |
+
>>> pq.write_table(table, 'example.parquet')
|
409 |
+
>>> parquet_file = pq.ParquetFile('example.parquet')
|
410 |
+
|
411 |
+
>>> parquet_file.num_row_groups
|
412 |
+
1
|
413 |
+
"""
|
414 |
+
return self.reader.num_row_groups
|
415 |
+
|
416 |
+
def close(self, force: bool = False):
|
417 |
+
if self._close_source or force:
|
418 |
+
self.reader.close()
|
419 |
+
|
420 |
+
@property
|
421 |
+
def closed(self) -> bool:
|
422 |
+
return self.reader.closed
|
423 |
+
|
424 |
+
def read_row_group(self, i, columns=None, use_threads=True,
|
425 |
+
use_pandas_metadata=False):
|
426 |
+
"""
|
427 |
+
Read a single row group from a Parquet file.
|
428 |
+
|
429 |
+
Parameters
|
430 |
+
----------
|
431 |
+
i : int
|
432 |
+
Index of the individual row group that we want to read.
|
433 |
+
columns : list
|
434 |
+
If not None, only these columns will be read from the row group. A
|
435 |
+
column name may be a prefix of a nested field, e.g. 'a' will select
|
436 |
+
'a.b', 'a.c', and 'a.d.e'.
|
437 |
+
use_threads : bool, default True
|
438 |
+
Perform multi-threaded column reads.
|
439 |
+
use_pandas_metadata : bool, default False
|
440 |
+
If True and file has custom pandas schema metadata, ensure that
|
441 |
+
index columns are also loaded.
|
442 |
+
|
443 |
+
Returns
|
444 |
+
-------
|
445 |
+
pyarrow.table.Table
|
446 |
+
Content of the row group as a table (of columns)
|
447 |
+
|
448 |
+
Examples
|
449 |
+
--------
|
450 |
+
>>> import pyarrow as pa
|
451 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
452 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
453 |
+
... "Brittle stars", "Centipede"]})
|
454 |
+
>>> import pyarrow.parquet as pq
|
455 |
+
>>> pq.write_table(table, 'example.parquet')
|
456 |
+
>>> parquet_file = pq.ParquetFile('example.parquet')
|
457 |
+
|
458 |
+
>>> parquet_file.read_row_group(0)
|
459 |
+
pyarrow.Table
|
460 |
+
n_legs: int64
|
461 |
+
animal: string
|
462 |
+
----
|
463 |
+
n_legs: [[2,2,4,4,5,100]]
|
464 |
+
animal: [["Flamingo","Parrot",...,"Brittle stars","Centipede"]]
|
465 |
+
"""
|
466 |
+
column_indices = self._get_column_indices(
|
467 |
+
columns, use_pandas_metadata=use_pandas_metadata)
|
468 |
+
return self.reader.read_row_group(i, column_indices=column_indices,
|
469 |
+
use_threads=use_threads)
|
470 |
+
|
471 |
+
def read_row_groups(self, row_groups, columns=None, use_threads=True,
|
472 |
+
use_pandas_metadata=False):
|
473 |
+
"""
|
474 |
+
Read a multiple row groups from a Parquet file.
|
475 |
+
|
476 |
+
Parameters
|
477 |
+
----------
|
478 |
+
row_groups : list
|
479 |
+
Only these row groups will be read from the file.
|
480 |
+
columns : list
|
481 |
+
If not None, only these columns will be read from the row group. A
|
482 |
+
column name may be a prefix of a nested field, e.g. 'a' will select
|
483 |
+
'a.b', 'a.c', and 'a.d.e'.
|
484 |
+
use_threads : bool, default True
|
485 |
+
Perform multi-threaded column reads.
|
486 |
+
use_pandas_metadata : bool, default False
|
487 |
+
If True and file has custom pandas schema metadata, ensure that
|
488 |
+
index columns are also loaded.
|
489 |
+
|
490 |
+
Returns
|
491 |
+
-------
|
492 |
+
pyarrow.table.Table
|
493 |
+
Content of the row groups as a table (of columns).
|
494 |
+
|
495 |
+
Examples
|
496 |
+
--------
|
497 |
+
>>> import pyarrow as pa
|
498 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
499 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
500 |
+
... "Brittle stars", "Centipede"]})
|
501 |
+
>>> import pyarrow.parquet as pq
|
502 |
+
>>> pq.write_table(table, 'example.parquet')
|
503 |
+
>>> parquet_file = pq.ParquetFile('example.parquet')
|
504 |
+
|
505 |
+
>>> parquet_file.read_row_groups([0,0])
|
506 |
+
pyarrow.Table
|
507 |
+
n_legs: int64
|
508 |
+
animal: string
|
509 |
+
----
|
510 |
+
n_legs: [[2,2,4,4,5,...,2,4,4,5,100]]
|
511 |
+
animal: [["Flamingo","Parrot","Dog",...,"Brittle stars","Centipede"]]
|
512 |
+
"""
|
513 |
+
column_indices = self._get_column_indices(
|
514 |
+
columns, use_pandas_metadata=use_pandas_metadata)
|
515 |
+
return self.reader.read_row_groups(row_groups,
|
516 |
+
column_indices=column_indices,
|
517 |
+
use_threads=use_threads)
|
518 |
+
|
519 |
+
def iter_batches(self, batch_size=65536, row_groups=None, columns=None,
|
520 |
+
use_threads=True, use_pandas_metadata=False):
|
521 |
+
"""
|
522 |
+
Read streaming batches from a Parquet file.
|
523 |
+
|
524 |
+
Parameters
|
525 |
+
----------
|
526 |
+
batch_size : int, default 64K
|
527 |
+
Maximum number of records to yield per batch. Batches may be
|
528 |
+
smaller if there aren't enough rows in the file.
|
529 |
+
row_groups : list
|
530 |
+
Only these row groups will be read from the file.
|
531 |
+
columns : list
|
532 |
+
If not None, only these columns will be read from the file. A
|
533 |
+
column name may be a prefix of a nested field, e.g. 'a' will select
|
534 |
+
'a.b', 'a.c', and 'a.d.e'.
|
535 |
+
use_threads : boolean, default True
|
536 |
+
Perform multi-threaded column reads.
|
537 |
+
use_pandas_metadata : boolean, default False
|
538 |
+
If True and file has custom pandas schema metadata, ensure that
|
539 |
+
index columns are also loaded.
|
540 |
+
|
541 |
+
Yields
|
542 |
+
------
|
543 |
+
pyarrow.RecordBatch
|
544 |
+
Contents of each batch as a record batch
|
545 |
+
|
546 |
+
Examples
|
547 |
+
--------
|
548 |
+
Generate an example Parquet file:
|
549 |
+
|
550 |
+
>>> import pyarrow as pa
|
551 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
552 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
553 |
+
... "Brittle stars", "Centipede"]})
|
554 |
+
>>> import pyarrow.parquet as pq
|
555 |
+
>>> pq.write_table(table, 'example.parquet')
|
556 |
+
>>> parquet_file = pq.ParquetFile('example.parquet')
|
557 |
+
>>> for i in parquet_file.iter_batches():
|
558 |
+
... print("RecordBatch")
|
559 |
+
... print(i.to_pandas())
|
560 |
+
...
|
561 |
+
RecordBatch
|
562 |
+
n_legs animal
|
563 |
+
0 2 Flamingo
|
564 |
+
1 2 Parrot
|
565 |
+
2 4 Dog
|
566 |
+
3 4 Horse
|
567 |
+
4 5 Brittle stars
|
568 |
+
5 100 Centipede
|
569 |
+
"""
|
570 |
+
if row_groups is None:
|
571 |
+
row_groups = range(0, self.metadata.num_row_groups)
|
572 |
+
column_indices = self._get_column_indices(
|
573 |
+
columns, use_pandas_metadata=use_pandas_metadata)
|
574 |
+
|
575 |
+
batches = self.reader.iter_batches(batch_size,
|
576 |
+
row_groups=row_groups,
|
577 |
+
column_indices=column_indices,
|
578 |
+
use_threads=use_threads)
|
579 |
+
return batches
|
580 |
+
|
581 |
+
def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
|
582 |
+
"""
|
583 |
+
Read a Table from Parquet format.
|
584 |
+
|
585 |
+
Parameters
|
586 |
+
----------
|
587 |
+
columns : list
|
588 |
+
If not None, only these columns will be read from the file. A
|
589 |
+
column name may be a prefix of a nested field, e.g. 'a' will select
|
590 |
+
'a.b', 'a.c', and 'a.d.e'.
|
591 |
+
use_threads : bool, default True
|
592 |
+
Perform multi-threaded column reads.
|
593 |
+
use_pandas_metadata : bool, default False
|
594 |
+
If True and file has custom pandas schema metadata, ensure that
|
595 |
+
index columns are also loaded.
|
596 |
+
|
597 |
+
Returns
|
598 |
+
-------
|
599 |
+
pyarrow.table.Table
|
600 |
+
Content of the file as a table (of columns).
|
601 |
+
|
602 |
+
Examples
|
603 |
+
--------
|
604 |
+
Generate an example Parquet file:
|
605 |
+
|
606 |
+
>>> import pyarrow as pa
|
607 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
608 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
609 |
+
... "Brittle stars", "Centipede"]})
|
610 |
+
>>> import pyarrow.parquet as pq
|
611 |
+
>>> pq.write_table(table, 'example.parquet')
|
612 |
+
>>> parquet_file = pq.ParquetFile('example.parquet')
|
613 |
+
|
614 |
+
Read a Table:
|
615 |
+
|
616 |
+
>>> parquet_file.read(columns=["animal"])
|
617 |
+
pyarrow.Table
|
618 |
+
animal: string
|
619 |
+
----
|
620 |
+
animal: [["Flamingo","Parrot",...,"Brittle stars","Centipede"]]
|
621 |
+
"""
|
622 |
+
column_indices = self._get_column_indices(
|
623 |
+
columns, use_pandas_metadata=use_pandas_metadata)
|
624 |
+
return self.reader.read_all(column_indices=column_indices,
|
625 |
+
use_threads=use_threads)
|
626 |
+
|
627 |
+
def scan_contents(self, columns=None, batch_size=65536):
|
628 |
+
"""
|
629 |
+
Read contents of file for the given columns and batch size.
|
630 |
+
|
631 |
+
Notes
|
632 |
+
-----
|
633 |
+
This function's primary purpose is benchmarking.
|
634 |
+
The scan is executed on a single thread.
|
635 |
+
|
636 |
+
Parameters
|
637 |
+
----------
|
638 |
+
columns : list of integers, default None
|
639 |
+
Select columns to read, if None scan all columns.
|
640 |
+
batch_size : int, default 64K
|
641 |
+
Number of rows to read at a time internally.
|
642 |
+
|
643 |
+
Returns
|
644 |
+
-------
|
645 |
+
num_rows : int
|
646 |
+
Number of rows in file
|
647 |
+
|
648 |
+
Examples
|
649 |
+
--------
|
650 |
+
>>> import pyarrow as pa
|
651 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
652 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
653 |
+
... "Brittle stars", "Centipede"]})
|
654 |
+
>>> import pyarrow.parquet as pq
|
655 |
+
>>> pq.write_table(table, 'example.parquet')
|
656 |
+
>>> parquet_file = pq.ParquetFile('example.parquet')
|
657 |
+
|
658 |
+
>>> parquet_file.scan_contents()
|
659 |
+
6
|
660 |
+
"""
|
661 |
+
column_indices = self._get_column_indices(columns)
|
662 |
+
return self.reader.scan_contents(column_indices,
|
663 |
+
batch_size=batch_size)
|
664 |
+
|
665 |
+
def _get_column_indices(self, column_names, use_pandas_metadata=False):
|
666 |
+
if column_names is None:
|
667 |
+
return None
|
668 |
+
|
669 |
+
indices = []
|
670 |
+
|
671 |
+
for name in column_names:
|
672 |
+
if name in self._nested_paths_by_prefix:
|
673 |
+
indices.extend(self._nested_paths_by_prefix[name])
|
674 |
+
|
675 |
+
if use_pandas_metadata:
|
676 |
+
file_keyvalues = self.metadata.metadata
|
677 |
+
common_keyvalues = (self.common_metadata.metadata
|
678 |
+
if self.common_metadata is not None
|
679 |
+
else None)
|
680 |
+
|
681 |
+
if file_keyvalues and b'pandas' in file_keyvalues:
|
682 |
+
index_columns = _get_pandas_index_columns(file_keyvalues)
|
683 |
+
elif common_keyvalues and b'pandas' in common_keyvalues:
|
684 |
+
index_columns = _get_pandas_index_columns(common_keyvalues)
|
685 |
+
else:
|
686 |
+
index_columns = []
|
687 |
+
|
688 |
+
if indices is not None and index_columns:
|
689 |
+
indices += [self.reader.column_name_idx(descr)
|
690 |
+
for descr in index_columns
|
691 |
+
if not isinstance(descr, dict)]
|
692 |
+
|
693 |
+
return indices
|
694 |
+
|
695 |
+
|
696 |
+
_SPARK_DISALLOWED_CHARS = re.compile('[ ,;{}()\n\t=]')
|
697 |
+
|
698 |
+
|
699 |
+
def _sanitized_spark_field_name(name):
|
700 |
+
return _SPARK_DISALLOWED_CHARS.sub('_', name)
|
701 |
+
|
702 |
+
|
703 |
+
def _sanitize_schema(schema, flavor):
|
704 |
+
if 'spark' in flavor:
|
705 |
+
sanitized_fields = []
|
706 |
+
|
707 |
+
schema_changed = False
|
708 |
+
|
709 |
+
for field in schema:
|
710 |
+
name = field.name
|
711 |
+
sanitized_name = _sanitized_spark_field_name(name)
|
712 |
+
|
713 |
+
if sanitized_name != name:
|
714 |
+
schema_changed = True
|
715 |
+
sanitized_field = pa.field(sanitized_name, field.type,
|
716 |
+
field.nullable, field.metadata)
|
717 |
+
sanitized_fields.append(sanitized_field)
|
718 |
+
else:
|
719 |
+
sanitized_fields.append(field)
|
720 |
+
|
721 |
+
new_schema = pa.schema(sanitized_fields, metadata=schema.metadata)
|
722 |
+
return new_schema, schema_changed
|
723 |
+
else:
|
724 |
+
return schema, False
|
725 |
+
|
726 |
+
|
727 |
+
def _sanitize_table(table, new_schema, flavor):
|
728 |
+
# TODO: This will not handle prohibited characters in nested field names
|
729 |
+
if 'spark' in flavor:
|
730 |
+
column_data = [table[i] for i in range(table.num_columns)]
|
731 |
+
return pa.Table.from_arrays(column_data, schema=new_schema)
|
732 |
+
else:
|
733 |
+
return table
|
734 |
+
|
735 |
+
|
736 |
+
_parquet_writer_arg_docs = """version : {"1.0", "2.4", "2.6"}, default "2.6"
|
737 |
+
Determine which Parquet logical types are available for use, whether the
|
738 |
+
reduced set from the Parquet 1.x.x format or the expanded logical types
|
739 |
+
added in later format versions.
|
740 |
+
Files written with version='2.4' or '2.6' may not be readable in all
|
741 |
+
Parquet implementations, so version='1.0' is likely the choice that
|
742 |
+
maximizes file compatibility.
|
743 |
+
UINT32 and some logical types are only available with version '2.4'.
|
744 |
+
Nanosecond timestamps are only available with version '2.6'.
|
745 |
+
Other features such as compression algorithms or the new serialized
|
746 |
+
data page format must be enabled separately (see 'compression' and
|
747 |
+
'data_page_version').
|
748 |
+
use_dictionary : bool or list, default True
|
749 |
+
Specify if we should use dictionary encoding in general or only for
|
750 |
+
some columns.
|
751 |
+
When encoding the column, if the dictionary size is too large, the
|
752 |
+
column will fallback to ``PLAIN`` encoding. Specially, ``BOOLEAN`` type
|
753 |
+
doesn't support dictionary encoding.
|
754 |
+
compression : str or dict, default 'snappy'
|
755 |
+
Specify the compression codec, either on a general basis or per-column.
|
756 |
+
Valid values: {'NONE', 'SNAPPY', 'GZIP', 'BROTLI', 'LZ4', 'ZSTD'}.
|
757 |
+
write_statistics : bool or list, default True
|
758 |
+
Specify if we should write statistics in general (default is True) or only
|
759 |
+
for some columns.
|
760 |
+
use_deprecated_int96_timestamps : bool, default None
|
761 |
+
Write timestamps to INT96 Parquet format. Defaults to False unless enabled
|
762 |
+
by flavor argument. This take priority over the coerce_timestamps option.
|
763 |
+
coerce_timestamps : str, default None
|
764 |
+
Cast timestamps to a particular resolution. If omitted, defaults are chosen
|
765 |
+
depending on `version`. By default, for ``version='1.0'`` (the default)
|
766 |
+
and ``version='2.4'``, nanoseconds are cast to microseconds ('us'), while
|
767 |
+
for other `version` values, they are written natively without loss
|
768 |
+
of resolution. Seconds are always cast to milliseconds ('ms') by default,
|
769 |
+
as Parquet does not have any temporal type with seconds resolution.
|
770 |
+
If the casting results in loss of data, it will raise an exception
|
771 |
+
unless ``allow_truncated_timestamps=True`` is given.
|
772 |
+
Valid values: {None, 'ms', 'us'}
|
773 |
+
allow_truncated_timestamps : bool, default False
|
774 |
+
Allow loss of data when coercing timestamps to a particular
|
775 |
+
resolution. E.g. if microsecond or nanosecond data is lost when coercing to
|
776 |
+
'ms', do not raise an exception. Passing ``allow_truncated_timestamp=True``
|
777 |
+
will NOT result in the truncation exception being ignored unless
|
778 |
+
``coerce_timestamps`` is not None.
|
779 |
+
data_page_size : int, default None
|
780 |
+
Set a target threshold for the approximate encoded size of data
|
781 |
+
pages within a column chunk (in bytes). If None, use the default data page
|
782 |
+
size of 1MByte.
|
783 |
+
flavor : {'spark'}, default None
|
784 |
+
Sanitize schema or set other compatibility options to work with
|
785 |
+
various target systems.
|
786 |
+
filesystem : FileSystem, default None
|
787 |
+
If nothing passed, will be inferred from `where` if path-like, else
|
788 |
+
`where` is already a file-like object so no filesystem is needed.
|
789 |
+
compression_level : int or dict, default None
|
790 |
+
Specify the compression level for a codec, either on a general basis or
|
791 |
+
per-column. If None is passed, arrow selects the compression level for
|
792 |
+
the compression codec in use. The compression level has a different
|
793 |
+
meaning for each codec, so you have to read the documentation of the
|
794 |
+
codec you are using.
|
795 |
+
An exception is thrown if the compression codec does not allow specifying
|
796 |
+
a compression level.
|
797 |
+
use_byte_stream_split : bool or list, default False
|
798 |
+
Specify if the byte_stream_split encoding should be used in general or
|
799 |
+
only for some columns. If both dictionary and byte_stream_stream are
|
800 |
+
enabled, then dictionary is preferred.
|
801 |
+
The byte_stream_split encoding is valid only for floating-point data types
|
802 |
+
and should be combined with a compression codec.
|
803 |
+
column_encoding : string or dict, default None
|
804 |
+
Specify the encoding scheme on a per column basis.
|
805 |
+
Can only be used when ``use_dictionary`` is set to False, and
|
806 |
+
cannot be used in combination with ``use_byte_stream_split``.
|
807 |
+
Currently supported values: {'PLAIN', 'BYTE_STREAM_SPLIT',
|
808 |
+
'DELTA_BINARY_PACKED', 'DELTA_LENGTH_BYTE_ARRAY', 'DELTA_BYTE_ARRAY'}.
|
809 |
+
Certain encodings are only compatible with certain data types.
|
810 |
+
Please refer to the encodings section of `Reading and writing Parquet
|
811 |
+
files <https://arrow.apache.org/docs/cpp/parquet.html#encodings>`_.
|
812 |
+
data_page_version : {"1.0", "2.0"}, default "1.0"
|
813 |
+
The serialized Parquet data page format version to write, defaults to
|
814 |
+
1.0. This does not impact the file schema logical types and Arrow to
|
815 |
+
Parquet type casting behavior; for that use the "version" option.
|
816 |
+
use_compliant_nested_type : bool, default True
|
817 |
+
Whether to write compliant Parquet nested type (lists) as defined
|
818 |
+
`here <https://github.com/apache/parquet-format/blob/master/
|
819 |
+
LogicalTypes.md#nested-types>`_, defaults to ``True``.
|
820 |
+
For ``use_compliant_nested_type=True``, this will write into a list
|
821 |
+
with 3-level structure where the middle level, named ``list``,
|
822 |
+
is a repeated group with a single field named ``element``::
|
823 |
+
|
824 |
+
<list-repetition> group <name> (LIST) {
|
825 |
+
repeated group list {
|
826 |
+
<element-repetition> <element-type> element;
|
827 |
+
}
|
828 |
+
}
|
829 |
+
|
830 |
+
For ``use_compliant_nested_type=False``, this will also write into a list
|
831 |
+
with 3-level structure, where the name of the single field of the middle
|
832 |
+
level ``list`` is taken from the element name for nested columns in Arrow,
|
833 |
+
which defaults to ``item``::
|
834 |
+
|
835 |
+
<list-repetition> group <name> (LIST) {
|
836 |
+
repeated group list {
|
837 |
+
<element-repetition> <element-type> item;
|
838 |
+
}
|
839 |
+
}
|
840 |
+
encryption_properties : FileEncryptionProperties, default None
|
841 |
+
File encryption properties for Parquet Modular Encryption.
|
842 |
+
If None, no encryption will be done.
|
843 |
+
The encryption properties can be created using:
|
844 |
+
``CryptoFactory.file_encryption_properties()``.
|
845 |
+
write_batch_size : int, default None
|
846 |
+
Number of values to write to a page at a time. If None, use the default of
|
847 |
+
1024. ``write_batch_size`` is complementary to ``data_page_size``. If pages
|
848 |
+
are exceeding the ``data_page_size`` due to large column values, lowering
|
849 |
+
the batch size can help keep page sizes closer to the intended size.
|
850 |
+
dictionary_pagesize_limit : int, default None
|
851 |
+
Specify the dictionary page size limit per row group. If None, use the
|
852 |
+
default 1MB.
|
853 |
+
store_schema : bool, default True
|
854 |
+
By default, the Arrow schema is serialized and stored in the Parquet
|
855 |
+
file metadata (in the "ARROW:schema" key). When reading the file,
|
856 |
+
if this key is available, it will be used to more faithfully recreate
|
857 |
+
the original Arrow data. For example, for tz-aware timestamp columns
|
858 |
+
it will restore the timezone (Parquet only stores the UTC values without
|
859 |
+
timezone), or columns with duration type will be restored from the int64
|
860 |
+
Parquet column.
|
861 |
+
write_page_index : bool, default False
|
862 |
+
Whether to write a page index in general for all columns.
|
863 |
+
Writing statistics to the page index disables the old method of writing
|
864 |
+
statistics to each data page header. The page index makes statistics-based
|
865 |
+
filtering more efficient than the page header, as it gathers all the
|
866 |
+
statistics for a Parquet file in a single place, avoiding scattered I/O.
|
867 |
+
Note that the page index is not yet used on the read size by PyArrow.
|
868 |
+
write_page_checksum : bool, default False
|
869 |
+
Whether to write page checksums in general for all columns.
|
870 |
+
Page checksums enable detection of data corruption, which might occur during
|
871 |
+
transmission or in the storage.
|
872 |
+
sorting_columns : Sequence of SortingColumn, default None
|
873 |
+
Specify the sort order of the data being written. The writer does not sort
|
874 |
+
the data nor does it verify that the data is sorted. The sort order is
|
875 |
+
written to the row group metadata, which can then be used by readers.
|
876 |
+
"""
|
877 |
+
|
878 |
+
_parquet_writer_example_doc = """\
|
879 |
+
Generate an example PyArrow Table and RecordBatch:
|
880 |
+
|
881 |
+
>>> import pyarrow as pa
|
882 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
883 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
884 |
+
... "Brittle stars", "Centipede"]})
|
885 |
+
>>> batch = pa.record_batch([[2, 2, 4, 4, 5, 100],
|
886 |
+
... ["Flamingo", "Parrot", "Dog", "Horse",
|
887 |
+
... "Brittle stars", "Centipede"]],
|
888 |
+
... names=['n_legs', 'animal'])
|
889 |
+
|
890 |
+
create a ParquetWriter object:
|
891 |
+
|
892 |
+
>>> import pyarrow.parquet as pq
|
893 |
+
>>> writer = pq.ParquetWriter('example.parquet', table.schema)
|
894 |
+
|
895 |
+
and write the Table into the Parquet file:
|
896 |
+
|
897 |
+
>>> writer.write_table(table)
|
898 |
+
>>> writer.close()
|
899 |
+
|
900 |
+
>>> pq.read_table('example.parquet').to_pandas()
|
901 |
+
n_legs animal
|
902 |
+
0 2 Flamingo
|
903 |
+
1 2 Parrot
|
904 |
+
2 4 Dog
|
905 |
+
3 4 Horse
|
906 |
+
4 5 Brittle stars
|
907 |
+
5 100 Centipede
|
908 |
+
|
909 |
+
create a ParquetWriter object for the RecordBatch:
|
910 |
+
|
911 |
+
>>> writer2 = pq.ParquetWriter('example2.parquet', batch.schema)
|
912 |
+
|
913 |
+
and write the RecordBatch into the Parquet file:
|
914 |
+
|
915 |
+
>>> writer2.write_batch(batch)
|
916 |
+
>>> writer2.close()
|
917 |
+
|
918 |
+
>>> pq.read_table('example2.parquet').to_pandas()
|
919 |
+
n_legs animal
|
920 |
+
0 2 Flamingo
|
921 |
+
1 2 Parrot
|
922 |
+
2 4 Dog
|
923 |
+
3 4 Horse
|
924 |
+
4 5 Brittle stars
|
925 |
+
5 100 Centipede
|
926 |
+
"""
|
927 |
+
|
928 |
+
|
929 |
+
class ParquetWriter:
|
930 |
+
|
931 |
+
__doc__ = """
|
932 |
+
Class for incrementally building a Parquet file for Arrow tables.
|
933 |
+
|
934 |
+
Parameters
|
935 |
+
----------
|
936 |
+
where : path or file-like object
|
937 |
+
schema : pyarrow.Schema
|
938 |
+
{}
|
939 |
+
writer_engine_version : unused
|
940 |
+
**options : dict
|
941 |
+
If options contains a key `metadata_collector` then the
|
942 |
+
corresponding value is assumed to be a list (or any object with
|
943 |
+
`.append` method) that will be filled with the file metadata instance
|
944 |
+
of the written file.
|
945 |
+
|
946 |
+
Examples
|
947 |
+
--------
|
948 |
+
{}
|
949 |
+
""".format(_parquet_writer_arg_docs, _parquet_writer_example_doc)
|
950 |
+
|
951 |
+
def __init__(self, where, schema, filesystem=None,
|
952 |
+
flavor=None,
|
953 |
+
version='2.6',
|
954 |
+
use_dictionary=True,
|
955 |
+
compression='snappy',
|
956 |
+
write_statistics=True,
|
957 |
+
use_deprecated_int96_timestamps=None,
|
958 |
+
compression_level=None,
|
959 |
+
use_byte_stream_split=False,
|
960 |
+
column_encoding=None,
|
961 |
+
writer_engine_version=None,
|
962 |
+
data_page_version='1.0',
|
963 |
+
use_compliant_nested_type=True,
|
964 |
+
encryption_properties=None,
|
965 |
+
write_batch_size=None,
|
966 |
+
dictionary_pagesize_limit=None,
|
967 |
+
store_schema=True,
|
968 |
+
write_page_index=False,
|
969 |
+
write_page_checksum=False,
|
970 |
+
sorting_columns=None,
|
971 |
+
**options):
|
972 |
+
if use_deprecated_int96_timestamps is None:
|
973 |
+
# Use int96 timestamps for Spark
|
974 |
+
if flavor is not None and 'spark' in flavor:
|
975 |
+
use_deprecated_int96_timestamps = True
|
976 |
+
else:
|
977 |
+
use_deprecated_int96_timestamps = False
|
978 |
+
|
979 |
+
self.flavor = flavor
|
980 |
+
if flavor is not None:
|
981 |
+
schema, self.schema_changed = _sanitize_schema(schema, flavor)
|
982 |
+
else:
|
983 |
+
self.schema_changed = False
|
984 |
+
|
985 |
+
self.schema = schema
|
986 |
+
self.where = where
|
987 |
+
|
988 |
+
# If we open a file using a filesystem, store file handle so we can be
|
989 |
+
# sure to close it when `self.close` is called.
|
990 |
+
self.file_handle = None
|
991 |
+
|
992 |
+
filesystem, path = _resolve_filesystem_and_path(
|
993 |
+
where, filesystem, allow_legacy_filesystem=True
|
994 |
+
)
|
995 |
+
if filesystem is not None:
|
996 |
+
if isinstance(filesystem, legacyfs.FileSystem):
|
997 |
+
# legacy filesystem (eg custom subclass)
|
998 |
+
# TODO deprecate
|
999 |
+
sink = self.file_handle = filesystem.open(path, 'wb')
|
1000 |
+
else:
|
1001 |
+
# ARROW-10480: do not auto-detect compression. While
|
1002 |
+
# a filename like foo.parquet.gz is nonconforming, it
|
1003 |
+
# shouldn't implicitly apply compression.
|
1004 |
+
sink = self.file_handle = filesystem.open_output_stream(
|
1005 |
+
path, compression=None)
|
1006 |
+
else:
|
1007 |
+
sink = where
|
1008 |
+
self._metadata_collector = options.pop('metadata_collector', None)
|
1009 |
+
engine_version = 'V2'
|
1010 |
+
self.writer = _parquet.ParquetWriter(
|
1011 |
+
sink, schema,
|
1012 |
+
version=version,
|
1013 |
+
compression=compression,
|
1014 |
+
use_dictionary=use_dictionary,
|
1015 |
+
write_statistics=write_statistics,
|
1016 |
+
use_deprecated_int96_timestamps=use_deprecated_int96_timestamps,
|
1017 |
+
compression_level=compression_level,
|
1018 |
+
use_byte_stream_split=use_byte_stream_split,
|
1019 |
+
column_encoding=column_encoding,
|
1020 |
+
writer_engine_version=engine_version,
|
1021 |
+
data_page_version=data_page_version,
|
1022 |
+
use_compliant_nested_type=use_compliant_nested_type,
|
1023 |
+
encryption_properties=encryption_properties,
|
1024 |
+
write_batch_size=write_batch_size,
|
1025 |
+
dictionary_pagesize_limit=dictionary_pagesize_limit,
|
1026 |
+
store_schema=store_schema,
|
1027 |
+
write_page_index=write_page_index,
|
1028 |
+
write_page_checksum=write_page_checksum,
|
1029 |
+
sorting_columns=sorting_columns,
|
1030 |
+
**options)
|
1031 |
+
self.is_open = True
|
1032 |
+
|
1033 |
+
def __del__(self):
|
1034 |
+
if getattr(self, 'is_open', False):
|
1035 |
+
self.close()
|
1036 |
+
|
1037 |
+
def __enter__(self):
|
1038 |
+
return self
|
1039 |
+
|
1040 |
+
def __exit__(self, *args, **kwargs):
|
1041 |
+
self.close()
|
1042 |
+
# return false since we want to propagate exceptions
|
1043 |
+
return False
|
1044 |
+
|
1045 |
+
def write(self, table_or_batch, row_group_size=None):
|
1046 |
+
"""
|
1047 |
+
Write RecordBatch or Table to the Parquet file.
|
1048 |
+
|
1049 |
+
Parameters
|
1050 |
+
----------
|
1051 |
+
table_or_batch : {RecordBatch, Table}
|
1052 |
+
row_group_size : int, default None
|
1053 |
+
Maximum number of rows in each written row group. If None,
|
1054 |
+
the row group size will be the minimum of the input
|
1055 |
+
table or batch length and 1024 * 1024.
|
1056 |
+
"""
|
1057 |
+
if isinstance(table_or_batch, pa.RecordBatch):
|
1058 |
+
self.write_batch(table_or_batch, row_group_size)
|
1059 |
+
elif isinstance(table_or_batch, pa.Table):
|
1060 |
+
self.write_table(table_or_batch, row_group_size)
|
1061 |
+
else:
|
1062 |
+
raise TypeError(type(table_or_batch))
|
1063 |
+
|
1064 |
+
def write_batch(self, batch, row_group_size=None):
|
1065 |
+
"""
|
1066 |
+
Write RecordBatch to the Parquet file.
|
1067 |
+
|
1068 |
+
Parameters
|
1069 |
+
----------
|
1070 |
+
batch : RecordBatch
|
1071 |
+
row_group_size : int, default None
|
1072 |
+
Maximum number of rows in written row group. If None, the
|
1073 |
+
row group size will be the minimum of the RecordBatch
|
1074 |
+
size and 1024 * 1024. If set larger than 64Mi then 64Mi
|
1075 |
+
will be used instead.
|
1076 |
+
"""
|
1077 |
+
table = pa.Table.from_batches([batch], batch.schema)
|
1078 |
+
self.write_table(table, row_group_size)
|
1079 |
+
|
1080 |
+
def write_table(self, table, row_group_size=None):
|
1081 |
+
"""
|
1082 |
+
Write Table to the Parquet file.
|
1083 |
+
|
1084 |
+
Parameters
|
1085 |
+
----------
|
1086 |
+
table : Table
|
1087 |
+
row_group_size : int, default None
|
1088 |
+
Maximum number of rows in each written row group. If None,
|
1089 |
+
the row group size will be the minimum of the Table size
|
1090 |
+
and 1024 * 1024. If set larger than 64Mi then 64Mi will
|
1091 |
+
be used instead.
|
1092 |
+
|
1093 |
+
"""
|
1094 |
+
if self.schema_changed:
|
1095 |
+
table = _sanitize_table(table, self.schema, self.flavor)
|
1096 |
+
assert self.is_open
|
1097 |
+
|
1098 |
+
if not table.schema.equals(self.schema, check_metadata=False):
|
1099 |
+
msg = ('Table schema does not match schema used to create file: '
|
1100 |
+
'\ntable:\n{!s} vs. \nfile:\n{!s}'
|
1101 |
+
.format(table.schema, self.schema))
|
1102 |
+
raise ValueError(msg)
|
1103 |
+
|
1104 |
+
self.writer.write_table(table, row_group_size=row_group_size)
|
1105 |
+
|
1106 |
+
def close(self):
|
1107 |
+
"""
|
1108 |
+
Close the connection to the Parquet file.
|
1109 |
+
"""
|
1110 |
+
if self.is_open:
|
1111 |
+
self.writer.close()
|
1112 |
+
self.is_open = False
|
1113 |
+
if self._metadata_collector is not None:
|
1114 |
+
self._metadata_collector.append(self.writer.metadata)
|
1115 |
+
if self.file_handle is not None:
|
1116 |
+
self.file_handle.close()
|
1117 |
+
|
1118 |
+
|
1119 |
+
def _get_pandas_index_columns(keyvalues):
|
1120 |
+
return (json.loads(keyvalues[b'pandas'].decode('utf8'))
|
1121 |
+
['index_columns'])
|
1122 |
+
|
1123 |
+
|
1124 |
+
EXCLUDED_PARQUET_PATHS = {'_SUCCESS'}
|
1125 |
+
|
1126 |
+
|
1127 |
+
def _is_local_file_system(fs):
|
1128 |
+
return isinstance(fs, LocalFileSystem) or isinstance(
|
1129 |
+
fs, legacyfs.LocalFileSystem
|
1130 |
+
)
|
1131 |
+
|
1132 |
+
|
1133 |
+
_read_docstring_common = """\
|
1134 |
+
read_dictionary : list, default None
|
1135 |
+
List of names or column paths (for nested types) to read directly
|
1136 |
+
as DictionaryArray. Only supported for BYTE_ARRAY storage. To read
|
1137 |
+
a flat column as dictionary-encoded pass the column name. For
|
1138 |
+
nested types, you must pass the full column "path", which could be
|
1139 |
+
something like level1.level2.list.item. Refer to the Parquet
|
1140 |
+
file's schema to obtain the paths.
|
1141 |
+
memory_map : bool, default False
|
1142 |
+
If the source is a file path, use a memory map to read file, which can
|
1143 |
+
improve performance in some environments.
|
1144 |
+
buffer_size : int, default 0
|
1145 |
+
If positive, perform read buffering when deserializing individual
|
1146 |
+
column chunks. Otherwise IO calls are unbuffered.
|
1147 |
+
partitioning : pyarrow.dataset.Partitioning or str or list of str, \
|
1148 |
+
default "hive"
|
1149 |
+
The partitioning scheme for a partitioned dataset. The default of "hive"
|
1150 |
+
assumes directory names with key=value pairs like "/year=2009/month=11".
|
1151 |
+
In addition, a scheme like "/2009/11" is also supported, in which case
|
1152 |
+
you need to specify the field names or a full schema. See the
|
1153 |
+
``pyarrow.dataset.partitioning()`` function for more details."""
|
1154 |
+
|
1155 |
+
|
1156 |
+
_parquet_dataset_example = """\
|
1157 |
+
Generate an example PyArrow Table and write it to a partitioned dataset:
|
1158 |
+
|
1159 |
+
>>> import pyarrow as pa
|
1160 |
+
>>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
|
1161 |
+
... 'n_legs': [2, 2, 4, 4, 5, 100],
|
1162 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
1163 |
+
... "Brittle stars", "Centipede"]})
|
1164 |
+
>>> import pyarrow.parquet as pq
|
1165 |
+
>>> pq.write_to_dataset(table, root_path='dataset_v2',
|
1166 |
+
... partition_cols=['year'])
|
1167 |
+
|
1168 |
+
create a ParquetDataset object from the dataset source:
|
1169 |
+
|
1170 |
+
>>> dataset = pq.ParquetDataset('dataset_v2/')
|
1171 |
+
|
1172 |
+
and read the data:
|
1173 |
+
|
1174 |
+
>>> dataset.read().to_pandas()
|
1175 |
+
n_legs animal year
|
1176 |
+
0 5 Brittle stars 2019
|
1177 |
+
1 2 Flamingo 2020
|
1178 |
+
2 4 Dog 2021
|
1179 |
+
3 100 Centipede 2021
|
1180 |
+
4 2 Parrot 2022
|
1181 |
+
5 4 Horse 2022
|
1182 |
+
|
1183 |
+
create a ParquetDataset object with filter:
|
1184 |
+
|
1185 |
+
>>> dataset = pq.ParquetDataset('dataset_v2/',
|
1186 |
+
... filters=[('n_legs','=',4)])
|
1187 |
+
>>> dataset.read().to_pandas()
|
1188 |
+
n_legs animal year
|
1189 |
+
0 4 Dog 2021
|
1190 |
+
1 4 Horse 2022
|
1191 |
+
"""
|
1192 |
+
|
1193 |
+
|
1194 |
+
class ParquetDataset:
|
1195 |
+
__doc__ = """
|
1196 |
+
Encapsulates details of reading a complete Parquet dataset possibly
|
1197 |
+
consisting of multiple files and partitions in subdirectories.
|
1198 |
+
|
1199 |
+
Parameters
|
1200 |
+
----------
|
1201 |
+
path_or_paths : str or List[str]
|
1202 |
+
A directory name, single file name, or list of file names.
|
1203 |
+
filesystem : FileSystem, default None
|
1204 |
+
If nothing passed, will be inferred based on path.
|
1205 |
+
Path will try to be found in the local on-disk filesystem otherwise
|
1206 |
+
it will be parsed as an URI to determine the filesystem.
|
1207 |
+
schema : pyarrow.parquet.Schema
|
1208 |
+
Optionally provide the Schema for the Dataset, in which case it will
|
1209 |
+
not be inferred from the source.
|
1210 |
+
filters : pyarrow.compute.Expression or List[Tuple] or List[List[Tuple]], default None
|
1211 |
+
Rows which do not match the filter predicate will be removed from scanned
|
1212 |
+
data. Partition keys embedded in a nested directory structure will be
|
1213 |
+
exploited to avoid loading files at all if they contain no matching rows.
|
1214 |
+
Within-file level filtering and different partitioning schemes are supported.
|
1215 |
+
|
1216 |
+
{1}
|
1217 |
+
{0}
|
1218 |
+
ignore_prefixes : list, optional
|
1219 |
+
Files matching any of these prefixes will be ignored by the
|
1220 |
+
discovery process.
|
1221 |
+
This is matched to the basename of a path.
|
1222 |
+
By default this is ['.', '_'].
|
1223 |
+
Note that discovery happens only if a directory is passed as source.
|
1224 |
+
pre_buffer : bool, default True
|
1225 |
+
Coalesce and issue file reads in parallel to improve performance on
|
1226 |
+
high-latency filesystems (e.g. S3, GCS). If True, Arrow will use a
|
1227 |
+
background I/O thread pool. If using a filesystem layer that itself
|
1228 |
+
performs readahead (e.g. fsspec's S3FS), disable readahead for best
|
1229 |
+
results. Set to False if you want to prioritize minimal memory usage
|
1230 |
+
over maximum speed.
|
1231 |
+
coerce_int96_timestamp_unit : str, default None
|
1232 |
+
Cast timestamps that are stored in INT96 format to a particular resolution
|
1233 |
+
(e.g. 'ms'). Setting to None is equivalent to 'ns' and therefore INT96
|
1234 |
+
timestamps will be inferred as timestamps in nanoseconds.
|
1235 |
+
decryption_properties : FileDecryptionProperties or None
|
1236 |
+
File-level decryption properties.
|
1237 |
+
The decryption properties can be created using
|
1238 |
+
``CryptoFactory.file_decryption_properties()``.
|
1239 |
+
thrift_string_size_limit : int, default None
|
1240 |
+
If not None, override the maximum total string size allocated
|
1241 |
+
when decoding Thrift structures. The default limit should be
|
1242 |
+
sufficient for most Parquet files.
|
1243 |
+
thrift_container_size_limit : int, default None
|
1244 |
+
If not None, override the maximum total size of containers allocated
|
1245 |
+
when decoding Thrift structures. The default limit should be
|
1246 |
+
sufficient for most Parquet files.
|
1247 |
+
page_checksum_verification : bool, default False
|
1248 |
+
If True, verify the page checksum for each page read from the file.
|
1249 |
+
use_legacy_dataset : bool, optional
|
1250 |
+
Deprecated and has no effect from PyArrow version 15.0.0.
|
1251 |
+
|
1252 |
+
Examples
|
1253 |
+
--------
|
1254 |
+
{2}
|
1255 |
+
""".format(_read_docstring_common, _DNF_filter_doc, _parquet_dataset_example)
|
1256 |
+
|
1257 |
+
def __init__(self, path_or_paths, filesystem=None, schema=None, *, filters=None,
|
1258 |
+
read_dictionary=None, memory_map=False, buffer_size=None,
|
1259 |
+
partitioning="hive", ignore_prefixes=None, pre_buffer=True,
|
1260 |
+
coerce_int96_timestamp_unit=None,
|
1261 |
+
decryption_properties=None, thrift_string_size_limit=None,
|
1262 |
+
thrift_container_size_limit=None,
|
1263 |
+
page_checksum_verification=False,
|
1264 |
+
use_legacy_dataset=None):
|
1265 |
+
|
1266 |
+
if use_legacy_dataset is not None:
|
1267 |
+
warnings.warn(
|
1268 |
+
"Passing 'use_legacy_dataset' is deprecated as of pyarrow 15.0.0 "
|
1269 |
+
"and will be removed in a future version.",
|
1270 |
+
FutureWarning, stacklevel=2)
|
1271 |
+
|
1272 |
+
import pyarrow.dataset as ds
|
1273 |
+
|
1274 |
+
# map format arguments
|
1275 |
+
read_options = {
|
1276 |
+
"pre_buffer": pre_buffer,
|
1277 |
+
"coerce_int96_timestamp_unit": coerce_int96_timestamp_unit,
|
1278 |
+
"thrift_string_size_limit": thrift_string_size_limit,
|
1279 |
+
"thrift_container_size_limit": thrift_container_size_limit,
|
1280 |
+
"page_checksum_verification": page_checksum_verification,
|
1281 |
+
}
|
1282 |
+
if buffer_size:
|
1283 |
+
read_options.update(use_buffered_stream=True,
|
1284 |
+
buffer_size=buffer_size)
|
1285 |
+
if read_dictionary is not None:
|
1286 |
+
read_options.update(dictionary_columns=read_dictionary)
|
1287 |
+
|
1288 |
+
if decryption_properties is not None:
|
1289 |
+
read_options.update(decryption_properties=decryption_properties)
|
1290 |
+
|
1291 |
+
self._filter_expression = None
|
1292 |
+
if filters is not None:
|
1293 |
+
self._filter_expression = filters_to_expression(filters)
|
1294 |
+
|
1295 |
+
# map old filesystems to new one
|
1296 |
+
if filesystem is not None:
|
1297 |
+
filesystem = _ensure_filesystem(
|
1298 |
+
filesystem, use_mmap=memory_map)
|
1299 |
+
elif filesystem is None and memory_map:
|
1300 |
+
# if memory_map is specified, assume local file system (string
|
1301 |
+
# path can in principle be URI for any filesystem)
|
1302 |
+
filesystem = LocalFileSystem(use_mmap=memory_map)
|
1303 |
+
|
1304 |
+
# This needs to be checked after _ensure_filesystem, because that
|
1305 |
+
# handles the case of an fsspec LocalFileSystem
|
1306 |
+
if (
|
1307 |
+
hasattr(path_or_paths, "__fspath__") and
|
1308 |
+
filesystem is not None and
|
1309 |
+
not _is_local_file_system(filesystem)
|
1310 |
+
):
|
1311 |
+
raise TypeError(
|
1312 |
+
"Path-like objects with __fspath__ must only be used with "
|
1313 |
+
f"local file systems, not {type(filesystem)}"
|
1314 |
+
)
|
1315 |
+
|
1316 |
+
# check for single fragment dataset
|
1317 |
+
single_file = None
|
1318 |
+
self._base_dir = None
|
1319 |
+
if not isinstance(path_or_paths, list):
|
1320 |
+
if _is_path_like(path_or_paths):
|
1321 |
+
path_or_paths = _stringify_path(path_or_paths)
|
1322 |
+
if filesystem is None:
|
1323 |
+
# path might be a URI describing the FileSystem as well
|
1324 |
+
try:
|
1325 |
+
filesystem, path_or_paths = FileSystem.from_uri(
|
1326 |
+
path_or_paths)
|
1327 |
+
except ValueError:
|
1328 |
+
filesystem = LocalFileSystem(use_mmap=memory_map)
|
1329 |
+
finfo = filesystem.get_file_info(path_or_paths)
|
1330 |
+
if finfo.is_file:
|
1331 |
+
single_file = path_or_paths
|
1332 |
+
if finfo.type == FileType.Directory:
|
1333 |
+
self._base_dir = path_or_paths
|
1334 |
+
else:
|
1335 |
+
single_file = path_or_paths
|
1336 |
+
|
1337 |
+
parquet_format = ds.ParquetFileFormat(**read_options)
|
1338 |
+
|
1339 |
+
if single_file is not None:
|
1340 |
+
fragment = parquet_format.make_fragment(single_file, filesystem)
|
1341 |
+
|
1342 |
+
self._dataset = ds.FileSystemDataset(
|
1343 |
+
[fragment], schema=schema or fragment.physical_schema,
|
1344 |
+
format=parquet_format,
|
1345 |
+
filesystem=fragment.filesystem
|
1346 |
+
)
|
1347 |
+
return
|
1348 |
+
|
1349 |
+
# check partitioning to enable dictionary encoding
|
1350 |
+
if partitioning == "hive":
|
1351 |
+
partitioning = ds.HivePartitioning.discover(
|
1352 |
+
infer_dictionary=True)
|
1353 |
+
|
1354 |
+
self._dataset = ds.dataset(path_or_paths, filesystem=filesystem,
|
1355 |
+
schema=schema, format=parquet_format,
|
1356 |
+
partitioning=partitioning,
|
1357 |
+
ignore_prefixes=ignore_prefixes)
|
1358 |
+
|
1359 |
+
def equals(self, other):
|
1360 |
+
if not isinstance(other, ParquetDataset):
|
1361 |
+
raise TypeError('`other` must be an instance of ParquetDataset')
|
1362 |
+
|
1363 |
+
return (self.schema == other.schema and
|
1364 |
+
self._dataset.format == other._dataset.format and
|
1365 |
+
self.filesystem == other.filesystem and
|
1366 |
+
# self.fragments == other.fragments and
|
1367 |
+
self.files == other.files)
|
1368 |
+
|
1369 |
+
def __eq__(self, other):
|
1370 |
+
try:
|
1371 |
+
return self.equals(other)
|
1372 |
+
except TypeError:
|
1373 |
+
return NotImplemented
|
1374 |
+
|
1375 |
+
@property
|
1376 |
+
def schema(self):
|
1377 |
+
"""
|
1378 |
+
Schema of the Dataset.
|
1379 |
+
|
1380 |
+
Examples
|
1381 |
+
--------
|
1382 |
+
Generate an example dataset:
|
1383 |
+
|
1384 |
+
>>> import pyarrow as pa
|
1385 |
+
>>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
|
1386 |
+
... 'n_legs': [2, 2, 4, 4, 5, 100],
|
1387 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
1388 |
+
... "Brittle stars", "Centipede"]})
|
1389 |
+
>>> import pyarrow.parquet as pq
|
1390 |
+
>>> pq.write_to_dataset(table, root_path='dataset_v2_schema',
|
1391 |
+
... partition_cols=['year'])
|
1392 |
+
>>> dataset = pq.ParquetDataset('dataset_v2_schema/')
|
1393 |
+
|
1394 |
+
Read the schema:
|
1395 |
+
|
1396 |
+
>>> dataset.schema
|
1397 |
+
n_legs: int64
|
1398 |
+
animal: string
|
1399 |
+
year: dictionary<values=int32, indices=int32, ordered=0>
|
1400 |
+
"""
|
1401 |
+
return self._dataset.schema
|
1402 |
+
|
1403 |
+
def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
|
1404 |
+
"""
|
1405 |
+
Read (multiple) Parquet files as a single pyarrow.Table.
|
1406 |
+
|
1407 |
+
Parameters
|
1408 |
+
----------
|
1409 |
+
columns : List[str]
|
1410 |
+
Names of columns to read from the dataset. The partition fields
|
1411 |
+
are not automatically included.
|
1412 |
+
use_threads : bool, default True
|
1413 |
+
Perform multi-threaded column reads.
|
1414 |
+
use_pandas_metadata : bool, default False
|
1415 |
+
If True and file has custom pandas schema metadata, ensure that
|
1416 |
+
index columns are also loaded.
|
1417 |
+
|
1418 |
+
Returns
|
1419 |
+
-------
|
1420 |
+
pyarrow.Table
|
1421 |
+
Content of the file as a table (of columns).
|
1422 |
+
|
1423 |
+
Examples
|
1424 |
+
--------
|
1425 |
+
Generate an example dataset:
|
1426 |
+
|
1427 |
+
>>> import pyarrow as pa
|
1428 |
+
>>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
|
1429 |
+
... 'n_legs': [2, 2, 4, 4, 5, 100],
|
1430 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
1431 |
+
... "Brittle stars", "Centipede"]})
|
1432 |
+
>>> import pyarrow.parquet as pq
|
1433 |
+
>>> pq.write_to_dataset(table, root_path='dataset_v2_read',
|
1434 |
+
... partition_cols=['year'])
|
1435 |
+
>>> dataset = pq.ParquetDataset('dataset_v2_read/')
|
1436 |
+
|
1437 |
+
Read the dataset:
|
1438 |
+
|
1439 |
+
>>> dataset.read(columns=["n_legs"])
|
1440 |
+
pyarrow.Table
|
1441 |
+
n_legs: int64
|
1442 |
+
----
|
1443 |
+
n_legs: [[5],[2],[4,100],[2,4]]
|
1444 |
+
"""
|
1445 |
+
# if use_pandas_metadata, we need to include index columns in the
|
1446 |
+
# column selection, to be able to restore those in the pandas DataFrame
|
1447 |
+
metadata = self.schema.metadata or {}
|
1448 |
+
|
1449 |
+
if use_pandas_metadata:
|
1450 |
+
# if the dataset schema metadata itself doesn't have pandas
|
1451 |
+
# then try to get this from common file (for backwards compat)
|
1452 |
+
if b"pandas" not in metadata:
|
1453 |
+
common_metadata = self._get_common_pandas_metadata()
|
1454 |
+
if common_metadata:
|
1455 |
+
metadata = common_metadata
|
1456 |
+
|
1457 |
+
if columns is not None and use_pandas_metadata:
|
1458 |
+
if metadata and b'pandas' in metadata:
|
1459 |
+
# RangeIndex can be represented as dict instead of column name
|
1460 |
+
index_columns = [
|
1461 |
+
col for col in _get_pandas_index_columns(metadata)
|
1462 |
+
if not isinstance(col, dict)
|
1463 |
+
]
|
1464 |
+
columns = (
|
1465 |
+
list(columns) + list(set(index_columns) - set(columns))
|
1466 |
+
)
|
1467 |
+
|
1468 |
+
table = self._dataset.to_table(
|
1469 |
+
columns=columns, filter=self._filter_expression,
|
1470 |
+
use_threads=use_threads
|
1471 |
+
)
|
1472 |
+
|
1473 |
+
# if use_pandas_metadata, restore the pandas metadata (which gets
|
1474 |
+
# lost if doing a specific `columns` selection in to_table)
|
1475 |
+
if use_pandas_metadata:
|
1476 |
+
if metadata and b"pandas" in metadata:
|
1477 |
+
new_metadata = table.schema.metadata or {}
|
1478 |
+
new_metadata.update({b"pandas": metadata[b"pandas"]})
|
1479 |
+
table = table.replace_schema_metadata(new_metadata)
|
1480 |
+
|
1481 |
+
return table
|
1482 |
+
|
1483 |
+
def _get_common_pandas_metadata(self):
|
1484 |
+
|
1485 |
+
if not self._base_dir:
|
1486 |
+
return None
|
1487 |
+
|
1488 |
+
metadata = None
|
1489 |
+
for name in ["_common_metadata", "_metadata"]:
|
1490 |
+
metadata_path = os.path.join(str(self._base_dir), name)
|
1491 |
+
finfo = self.filesystem.get_file_info(metadata_path)
|
1492 |
+
if finfo.is_file:
|
1493 |
+
pq_meta = read_metadata(
|
1494 |
+
metadata_path, filesystem=self.filesystem)
|
1495 |
+
metadata = pq_meta.metadata
|
1496 |
+
if metadata and b'pandas' in metadata:
|
1497 |
+
break
|
1498 |
+
|
1499 |
+
return metadata
|
1500 |
+
|
1501 |
+
def read_pandas(self, **kwargs):
|
1502 |
+
"""
|
1503 |
+
Read dataset including pandas metadata, if any. Other arguments passed
|
1504 |
+
through to :func:`read`, see docstring for further details.
|
1505 |
+
|
1506 |
+
Parameters
|
1507 |
+
----------
|
1508 |
+
**kwargs : optional
|
1509 |
+
Additional options for :func:`read`
|
1510 |
+
|
1511 |
+
Examples
|
1512 |
+
--------
|
1513 |
+
Generate an example parquet file:
|
1514 |
+
|
1515 |
+
>>> import pyarrow as pa
|
1516 |
+
>>> import pandas as pd
|
1517 |
+
>>> df = pd.DataFrame({'year': [2020, 2022, 2021, 2022, 2019, 2021],
|
1518 |
+
... 'n_legs': [2, 2, 4, 4, 5, 100],
|
1519 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
1520 |
+
... "Brittle stars", "Centipede"]})
|
1521 |
+
>>> table = pa.Table.from_pandas(df)
|
1522 |
+
>>> import pyarrow.parquet as pq
|
1523 |
+
>>> pq.write_table(table, 'table_V2.parquet')
|
1524 |
+
>>> dataset = pq.ParquetDataset('table_V2.parquet')
|
1525 |
+
|
1526 |
+
Read the dataset with pandas metadata:
|
1527 |
+
|
1528 |
+
>>> dataset.read_pandas(columns=["n_legs"])
|
1529 |
+
pyarrow.Table
|
1530 |
+
n_legs: int64
|
1531 |
+
----
|
1532 |
+
n_legs: [[2,2,4,4,5,100]]
|
1533 |
+
|
1534 |
+
>>> dataset.read_pandas(columns=["n_legs"]).schema.pandas_metadata
|
1535 |
+
{'index_columns': [{'kind': 'range', 'name': None, 'start': 0, ...}
|
1536 |
+
"""
|
1537 |
+
return self.read(use_pandas_metadata=True, **kwargs)
|
1538 |
+
|
1539 |
+
@property
|
1540 |
+
def fragments(self):
|
1541 |
+
"""
|
1542 |
+
A list of the Dataset source fragments or pieces with absolute
|
1543 |
+
file paths.
|
1544 |
+
|
1545 |
+
Examples
|
1546 |
+
--------
|
1547 |
+
Generate an example dataset:
|
1548 |
+
|
1549 |
+
>>> import pyarrow as pa
|
1550 |
+
>>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
|
1551 |
+
... 'n_legs': [2, 2, 4, 4, 5, 100],
|
1552 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
1553 |
+
... "Brittle stars", "Centipede"]})
|
1554 |
+
>>> import pyarrow.parquet as pq
|
1555 |
+
>>> pq.write_to_dataset(table, root_path='dataset_v2_fragments',
|
1556 |
+
... partition_cols=['year'])
|
1557 |
+
>>> dataset = pq.ParquetDataset('dataset_v2_fragments/')
|
1558 |
+
|
1559 |
+
List the fragments:
|
1560 |
+
|
1561 |
+
>>> dataset.fragments
|
1562 |
+
[<pyarrow.dataset.ParquetFileFragment path=dataset_v2_fragments/...
|
1563 |
+
"""
|
1564 |
+
return list(self._dataset.get_fragments())
|
1565 |
+
|
1566 |
+
@property
|
1567 |
+
def files(self):
|
1568 |
+
"""
|
1569 |
+
A list of absolute Parquet file paths in the Dataset source.
|
1570 |
+
|
1571 |
+
Examples
|
1572 |
+
--------
|
1573 |
+
Generate an example dataset:
|
1574 |
+
|
1575 |
+
>>> import pyarrow as pa
|
1576 |
+
>>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
|
1577 |
+
... 'n_legs': [2, 2, 4, 4, 5, 100],
|
1578 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
1579 |
+
... "Brittle stars", "Centipede"]})
|
1580 |
+
>>> import pyarrow.parquet as pq
|
1581 |
+
>>> pq.write_to_dataset(table, root_path='dataset_v2_files',
|
1582 |
+
... partition_cols=['year'])
|
1583 |
+
>>> dataset = pq.ParquetDataset('dataset_v2_files/')
|
1584 |
+
|
1585 |
+
List the files:
|
1586 |
+
|
1587 |
+
>>> dataset.files
|
1588 |
+
['dataset_v2_files/year=2019/...-0.parquet', ...
|
1589 |
+
"""
|
1590 |
+
return self._dataset.files
|
1591 |
+
|
1592 |
+
@property
|
1593 |
+
def filesystem(self):
|
1594 |
+
"""
|
1595 |
+
The filesystem type of the Dataset source.
|
1596 |
+
"""
|
1597 |
+
return self._dataset.filesystem
|
1598 |
+
|
1599 |
+
@property
|
1600 |
+
def partitioning(self):
|
1601 |
+
"""
|
1602 |
+
The partitioning of the Dataset source, if discovered.
|
1603 |
+
"""
|
1604 |
+
return self._dataset.partitioning
|
1605 |
+
|
1606 |
+
|
1607 |
+
_read_table_docstring = """
|
1608 |
+
{0}
|
1609 |
+
|
1610 |
+
Parameters
|
1611 |
+
----------
|
1612 |
+
source : str, pyarrow.NativeFile, or file-like object
|
1613 |
+
If a string passed, can be a single file name or directory name. For
|
1614 |
+
file-like objects, only read a single file. Use pyarrow.BufferReader to
|
1615 |
+
read a file contained in a bytes or buffer-like object.
|
1616 |
+
columns : list
|
1617 |
+
If not None, only these columns will be read from the file. A column
|
1618 |
+
name may be a prefix of a nested field, e.g. 'a' will select 'a.b',
|
1619 |
+
'a.c', and 'a.d.e'. If empty, no columns will be read. Note
|
1620 |
+
that the table will still have the correct num_rows set despite having
|
1621 |
+
no columns.
|
1622 |
+
use_threads : bool, default True
|
1623 |
+
Perform multi-threaded column reads.
|
1624 |
+
schema : Schema, optional
|
1625 |
+
Optionally provide the Schema for the parquet dataset, in which case it
|
1626 |
+
will not be inferred from the source.
|
1627 |
+
{1}
|
1628 |
+
filesystem : FileSystem, default None
|
1629 |
+
If nothing passed, will be inferred based on path.
|
1630 |
+
Path will try to be found in the local on-disk filesystem otherwise
|
1631 |
+
it will be parsed as an URI to determine the filesystem.
|
1632 |
+
filters : pyarrow.compute.Expression or List[Tuple] or List[List[Tuple]], default None
|
1633 |
+
Rows which do not match the filter predicate will be removed from scanned
|
1634 |
+
data. Partition keys embedded in a nested directory structure will be
|
1635 |
+
exploited to avoid loading files at all if they contain no matching rows.
|
1636 |
+
Within-file level filtering and different partitioning schemes are supported.
|
1637 |
+
|
1638 |
+
{3}
|
1639 |
+
use_legacy_dataset : bool, optional
|
1640 |
+
Deprecated and has no effect from PyArrow version 15.0.0.
|
1641 |
+
ignore_prefixes : list, optional
|
1642 |
+
Files matching any of these prefixes will be ignored by the
|
1643 |
+
discovery process.
|
1644 |
+
This is matched to the basename of a path.
|
1645 |
+
By default this is ['.', '_'].
|
1646 |
+
Note that discovery happens only if a directory is passed as source.
|
1647 |
+
pre_buffer : bool, default True
|
1648 |
+
Coalesce and issue file reads in parallel to improve performance on
|
1649 |
+
high-latency filesystems (e.g. S3). If True, Arrow will use a
|
1650 |
+
background I/O thread pool. If using a filesystem layer that itself
|
1651 |
+
performs readahead (e.g. fsspec's S3FS), disable readahead for best
|
1652 |
+
results.
|
1653 |
+
coerce_int96_timestamp_unit : str, default None
|
1654 |
+
Cast timestamps that are stored in INT96 format to a particular
|
1655 |
+
resolution (e.g. 'ms'). Setting to None is equivalent to 'ns'
|
1656 |
+
and therefore INT96 timestamps will be inferred as timestamps
|
1657 |
+
in nanoseconds.
|
1658 |
+
decryption_properties : FileDecryptionProperties or None
|
1659 |
+
File-level decryption properties.
|
1660 |
+
The decryption properties can be created using
|
1661 |
+
``CryptoFactory.file_decryption_properties()``.
|
1662 |
+
thrift_string_size_limit : int, default None
|
1663 |
+
If not None, override the maximum total string size allocated
|
1664 |
+
when decoding Thrift structures. The default limit should be
|
1665 |
+
sufficient for most Parquet files.
|
1666 |
+
thrift_container_size_limit : int, default None
|
1667 |
+
If not None, override the maximum total size of containers allocated
|
1668 |
+
when decoding Thrift structures. The default limit should be
|
1669 |
+
sufficient for most Parquet files.
|
1670 |
+
page_checksum_verification : bool, default False
|
1671 |
+
If True, verify the checksum for each page read from the file.
|
1672 |
+
|
1673 |
+
Returns
|
1674 |
+
-------
|
1675 |
+
{2}
|
1676 |
+
|
1677 |
+
{4}
|
1678 |
+
"""
|
1679 |
+
|
1680 |
+
_read_table_example = """\
|
1681 |
+
|
1682 |
+
Examples
|
1683 |
+
--------
|
1684 |
+
|
1685 |
+
Generate an example PyArrow Table and write it to a partitioned dataset:
|
1686 |
+
|
1687 |
+
>>> import pyarrow as pa
|
1688 |
+
>>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
|
1689 |
+
... 'n_legs': [2, 2, 4, 4, 5, 100],
|
1690 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
1691 |
+
... "Brittle stars", "Centipede"]})
|
1692 |
+
>>> import pyarrow.parquet as pq
|
1693 |
+
>>> pq.write_to_dataset(table, root_path='dataset_name_2',
|
1694 |
+
... partition_cols=['year'])
|
1695 |
+
|
1696 |
+
Read the data:
|
1697 |
+
|
1698 |
+
>>> pq.read_table('dataset_name_2').to_pandas()
|
1699 |
+
n_legs animal year
|
1700 |
+
0 5 Brittle stars 2019
|
1701 |
+
1 2 Flamingo 2020
|
1702 |
+
2 4 Dog 2021
|
1703 |
+
3 100 Centipede 2021
|
1704 |
+
4 2 Parrot 2022
|
1705 |
+
5 4 Horse 2022
|
1706 |
+
|
1707 |
+
|
1708 |
+
Read only a subset of columns:
|
1709 |
+
|
1710 |
+
>>> pq.read_table('dataset_name_2', columns=["n_legs", "animal"])
|
1711 |
+
pyarrow.Table
|
1712 |
+
n_legs: int64
|
1713 |
+
animal: string
|
1714 |
+
----
|
1715 |
+
n_legs: [[5],[2],[4,100],[2,4]]
|
1716 |
+
animal: [["Brittle stars"],["Flamingo"],["Dog","Centipede"],["Parrot","Horse"]]
|
1717 |
+
|
1718 |
+
Read a subset of columns and read one column as DictionaryArray:
|
1719 |
+
|
1720 |
+
>>> pq.read_table('dataset_name_2', columns=["n_legs", "animal"],
|
1721 |
+
... read_dictionary=["animal"])
|
1722 |
+
pyarrow.Table
|
1723 |
+
n_legs: int64
|
1724 |
+
animal: dictionary<values=string, indices=int32, ordered=0>
|
1725 |
+
----
|
1726 |
+
n_legs: [[5],[2],[4,100],[2,4]]
|
1727 |
+
animal: [ -- dictionary:
|
1728 |
+
["Brittle stars"] -- indices:
|
1729 |
+
[0], -- dictionary:
|
1730 |
+
["Flamingo"] -- indices:
|
1731 |
+
[0], -- dictionary:
|
1732 |
+
["Dog","Centipede"] -- indices:
|
1733 |
+
[0,1], -- dictionary:
|
1734 |
+
["Parrot","Horse"] -- indices:
|
1735 |
+
[0,1]]
|
1736 |
+
|
1737 |
+
Read the table with filter:
|
1738 |
+
|
1739 |
+
>>> pq.read_table('dataset_name_2', columns=["n_legs", "animal"],
|
1740 |
+
... filters=[('n_legs','<',4)]).to_pandas()
|
1741 |
+
n_legs animal
|
1742 |
+
0 2 Flamingo
|
1743 |
+
1 2 Parrot
|
1744 |
+
|
1745 |
+
Read data from a single Parquet file:
|
1746 |
+
|
1747 |
+
>>> pq.write_table(table, 'example.parquet')
|
1748 |
+
>>> pq.read_table('dataset_name_2').to_pandas()
|
1749 |
+
n_legs animal year
|
1750 |
+
0 5 Brittle stars 2019
|
1751 |
+
1 2 Flamingo 2020
|
1752 |
+
2 4 Dog 2021
|
1753 |
+
3 100 Centipede 2021
|
1754 |
+
4 2 Parrot 2022
|
1755 |
+
5 4 Horse 2022
|
1756 |
+
"""
|
1757 |
+
|
1758 |
+
|
1759 |
+
def read_table(source, *, columns=None, use_threads=True,
|
1760 |
+
schema=None, use_pandas_metadata=False, read_dictionary=None,
|
1761 |
+
memory_map=False, buffer_size=0, partitioning="hive",
|
1762 |
+
filesystem=None, filters=None, use_legacy_dataset=None,
|
1763 |
+
ignore_prefixes=None, pre_buffer=True,
|
1764 |
+
coerce_int96_timestamp_unit=None,
|
1765 |
+
decryption_properties=None, thrift_string_size_limit=None,
|
1766 |
+
thrift_container_size_limit=None,
|
1767 |
+
page_checksum_verification=False):
|
1768 |
+
|
1769 |
+
if use_legacy_dataset is not None:
|
1770 |
+
warnings.warn(
|
1771 |
+
"Passing 'use_legacy_dataset' is deprecated as of pyarrow 15.0.0 "
|
1772 |
+
"and will be removed in a future version.",
|
1773 |
+
FutureWarning, stacklevel=2)
|
1774 |
+
|
1775 |
+
try:
|
1776 |
+
dataset = ParquetDataset(
|
1777 |
+
source,
|
1778 |
+
schema=schema,
|
1779 |
+
filesystem=filesystem,
|
1780 |
+
partitioning=partitioning,
|
1781 |
+
memory_map=memory_map,
|
1782 |
+
read_dictionary=read_dictionary,
|
1783 |
+
buffer_size=buffer_size,
|
1784 |
+
filters=filters,
|
1785 |
+
ignore_prefixes=ignore_prefixes,
|
1786 |
+
pre_buffer=pre_buffer,
|
1787 |
+
coerce_int96_timestamp_unit=coerce_int96_timestamp_unit,
|
1788 |
+
thrift_string_size_limit=thrift_string_size_limit,
|
1789 |
+
thrift_container_size_limit=thrift_container_size_limit,
|
1790 |
+
page_checksum_verification=page_checksum_verification,
|
1791 |
+
)
|
1792 |
+
except ImportError:
|
1793 |
+
# fall back on ParquetFile for simple cases when pyarrow.dataset
|
1794 |
+
# module is not available
|
1795 |
+
if filters is not None:
|
1796 |
+
raise ValueError(
|
1797 |
+
"the 'filters' keyword is not supported when the "
|
1798 |
+
"pyarrow.dataset module is not available"
|
1799 |
+
)
|
1800 |
+
if partitioning != "hive":
|
1801 |
+
raise ValueError(
|
1802 |
+
"the 'partitioning' keyword is not supported when the "
|
1803 |
+
"pyarrow.dataset module is not available"
|
1804 |
+
)
|
1805 |
+
if schema is not None:
|
1806 |
+
raise ValueError(
|
1807 |
+
"the 'schema' argument is not supported when the "
|
1808 |
+
"pyarrow.dataset module is not available"
|
1809 |
+
)
|
1810 |
+
filesystem, path = _resolve_filesystem_and_path(source, filesystem)
|
1811 |
+
if filesystem is not None:
|
1812 |
+
source = filesystem.open_input_file(path)
|
1813 |
+
# TODO test that source is not a directory or a list
|
1814 |
+
dataset = ParquetFile(
|
1815 |
+
source, read_dictionary=read_dictionary,
|
1816 |
+
memory_map=memory_map, buffer_size=buffer_size,
|
1817 |
+
pre_buffer=pre_buffer,
|
1818 |
+
coerce_int96_timestamp_unit=coerce_int96_timestamp_unit,
|
1819 |
+
decryption_properties=decryption_properties,
|
1820 |
+
thrift_string_size_limit=thrift_string_size_limit,
|
1821 |
+
thrift_container_size_limit=thrift_container_size_limit,
|
1822 |
+
page_checksum_verification=page_checksum_verification,
|
1823 |
+
)
|
1824 |
+
|
1825 |
+
return dataset.read(columns=columns, use_threads=use_threads,
|
1826 |
+
use_pandas_metadata=use_pandas_metadata)
|
1827 |
+
|
1828 |
+
|
1829 |
+
read_table.__doc__ = _read_table_docstring.format(
|
1830 |
+
"""Read a Table from Parquet format""",
|
1831 |
+
"\n".join(("""use_pandas_metadata : bool, default False
|
1832 |
+
If True and file has custom pandas schema metadata, ensure that
|
1833 |
+
index columns are also loaded.""", _read_docstring_common)),
|
1834 |
+
"""pyarrow.Table
|
1835 |
+
Content of the file as a table (of columns)""",
|
1836 |
+
_DNF_filter_doc, _read_table_example)
|
1837 |
+
|
1838 |
+
|
1839 |
+
def read_pandas(source, columns=None, **kwargs):
|
1840 |
+
return read_table(
|
1841 |
+
source, columns=columns, use_pandas_metadata=True, **kwargs
|
1842 |
+
)
|
1843 |
+
|
1844 |
+
|
1845 |
+
read_pandas.__doc__ = _read_table_docstring.format(
|
1846 |
+
'Read a Table from Parquet format, also reading DataFrame\n'
|
1847 |
+
'index values if known in the file metadata',
|
1848 |
+
"\n".join((_read_docstring_common,
|
1849 |
+
"""**kwargs
|
1850 |
+
additional options for :func:`read_table`""")),
|
1851 |
+
"""pyarrow.Table
|
1852 |
+
Content of the file as a Table of Columns, including DataFrame
|
1853 |
+
indexes as columns""",
|
1854 |
+
_DNF_filter_doc, "")
|
1855 |
+
|
1856 |
+
|
1857 |
+
def write_table(table, where, row_group_size=None, version='2.6',
|
1858 |
+
use_dictionary=True, compression='snappy',
|
1859 |
+
write_statistics=True,
|
1860 |
+
use_deprecated_int96_timestamps=None,
|
1861 |
+
coerce_timestamps=None,
|
1862 |
+
allow_truncated_timestamps=False,
|
1863 |
+
data_page_size=None, flavor=None,
|
1864 |
+
filesystem=None,
|
1865 |
+
compression_level=None,
|
1866 |
+
use_byte_stream_split=False,
|
1867 |
+
column_encoding=None,
|
1868 |
+
data_page_version='1.0',
|
1869 |
+
use_compliant_nested_type=True,
|
1870 |
+
encryption_properties=None,
|
1871 |
+
write_batch_size=None,
|
1872 |
+
dictionary_pagesize_limit=None,
|
1873 |
+
store_schema=True,
|
1874 |
+
write_page_index=False,
|
1875 |
+
write_page_checksum=False,
|
1876 |
+
sorting_columns=None,
|
1877 |
+
**kwargs):
|
1878 |
+
# Implementor's note: when adding keywords here / updating defaults, also
|
1879 |
+
# update it in write_to_dataset and _dataset_parquet.pyx ParquetFileWriteOptions
|
1880 |
+
row_group_size = kwargs.pop('chunk_size', row_group_size)
|
1881 |
+
use_int96 = use_deprecated_int96_timestamps
|
1882 |
+
try:
|
1883 |
+
with ParquetWriter(
|
1884 |
+
where, table.schema,
|
1885 |
+
filesystem=filesystem,
|
1886 |
+
version=version,
|
1887 |
+
flavor=flavor,
|
1888 |
+
use_dictionary=use_dictionary,
|
1889 |
+
write_statistics=write_statistics,
|
1890 |
+
coerce_timestamps=coerce_timestamps,
|
1891 |
+
data_page_size=data_page_size,
|
1892 |
+
allow_truncated_timestamps=allow_truncated_timestamps,
|
1893 |
+
compression=compression,
|
1894 |
+
use_deprecated_int96_timestamps=use_int96,
|
1895 |
+
compression_level=compression_level,
|
1896 |
+
use_byte_stream_split=use_byte_stream_split,
|
1897 |
+
column_encoding=column_encoding,
|
1898 |
+
data_page_version=data_page_version,
|
1899 |
+
use_compliant_nested_type=use_compliant_nested_type,
|
1900 |
+
encryption_properties=encryption_properties,
|
1901 |
+
write_batch_size=write_batch_size,
|
1902 |
+
dictionary_pagesize_limit=dictionary_pagesize_limit,
|
1903 |
+
store_schema=store_schema,
|
1904 |
+
write_page_index=write_page_index,
|
1905 |
+
write_page_checksum=write_page_checksum,
|
1906 |
+
sorting_columns=sorting_columns,
|
1907 |
+
**kwargs) as writer:
|
1908 |
+
writer.write_table(table, row_group_size=row_group_size)
|
1909 |
+
except Exception:
|
1910 |
+
if _is_path_like(where):
|
1911 |
+
try:
|
1912 |
+
os.remove(_stringify_path(where))
|
1913 |
+
except os.error:
|
1914 |
+
pass
|
1915 |
+
raise
|
1916 |
+
|
1917 |
+
|
1918 |
+
_write_table_example = """\
|
1919 |
+
Generate an example PyArrow Table:
|
1920 |
+
|
1921 |
+
>>> import pyarrow as pa
|
1922 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
1923 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
1924 |
+
... "Brittle stars", "Centipede"]})
|
1925 |
+
|
1926 |
+
and write the Table into Parquet file:
|
1927 |
+
|
1928 |
+
>>> import pyarrow.parquet as pq
|
1929 |
+
>>> pq.write_table(table, 'example.parquet')
|
1930 |
+
|
1931 |
+
Defining row group size for the Parquet file:
|
1932 |
+
|
1933 |
+
>>> pq.write_table(table, 'example.parquet', row_group_size=3)
|
1934 |
+
|
1935 |
+
Defining row group compression (default is Snappy):
|
1936 |
+
|
1937 |
+
>>> pq.write_table(table, 'example.parquet', compression='none')
|
1938 |
+
|
1939 |
+
Defining row group compression and encoding per-column:
|
1940 |
+
|
1941 |
+
>>> pq.write_table(table, 'example.parquet',
|
1942 |
+
... compression={'n_legs': 'snappy', 'animal': 'gzip'},
|
1943 |
+
... use_dictionary=['n_legs', 'animal'])
|
1944 |
+
|
1945 |
+
Defining column encoding per-column:
|
1946 |
+
|
1947 |
+
>>> pq.write_table(table, 'example.parquet',
|
1948 |
+
... column_encoding={'animal':'PLAIN'},
|
1949 |
+
... use_dictionary=False)
|
1950 |
+
"""
|
1951 |
+
|
1952 |
+
write_table.__doc__ = """
|
1953 |
+
Write a Table to Parquet format.
|
1954 |
+
|
1955 |
+
Parameters
|
1956 |
+
----------
|
1957 |
+
table : pyarrow.Table
|
1958 |
+
where : string or pyarrow.NativeFile
|
1959 |
+
row_group_size : int
|
1960 |
+
Maximum number of rows in each written row group. If None, the
|
1961 |
+
row group size will be the minimum of the Table size and
|
1962 |
+
1024 * 1024.
|
1963 |
+
{}
|
1964 |
+
**kwargs : optional
|
1965 |
+
Additional options for ParquetWriter
|
1966 |
+
|
1967 |
+
Examples
|
1968 |
+
--------
|
1969 |
+
{}
|
1970 |
+
""".format(_parquet_writer_arg_docs, _write_table_example)
|
1971 |
+
|
1972 |
+
|
1973 |
+
def write_to_dataset(table, root_path, partition_cols=None,
|
1974 |
+
filesystem=None, use_legacy_dataset=None,
|
1975 |
+
schema=None, partitioning=None,
|
1976 |
+
basename_template=None, use_threads=None,
|
1977 |
+
file_visitor=None, existing_data_behavior=None,
|
1978 |
+
**kwargs):
|
1979 |
+
"""Wrapper around dataset.write_dataset for writing a Table to
|
1980 |
+
Parquet format by partitions.
|
1981 |
+
For each combination of partition columns and values,
|
1982 |
+
a subdirectories are created in the following
|
1983 |
+
manner:
|
1984 |
+
|
1985 |
+
root_dir/
|
1986 |
+
group1=value1
|
1987 |
+
group2=value1
|
1988 |
+
<uuid>.parquet
|
1989 |
+
group2=value2
|
1990 |
+
<uuid>.parquet
|
1991 |
+
group1=valueN
|
1992 |
+
group2=value1
|
1993 |
+
<uuid>.parquet
|
1994 |
+
group2=valueN
|
1995 |
+
<uuid>.parquet
|
1996 |
+
|
1997 |
+
Parameters
|
1998 |
+
----------
|
1999 |
+
table : pyarrow.Table
|
2000 |
+
root_path : str, pathlib.Path
|
2001 |
+
The root directory of the dataset.
|
2002 |
+
partition_cols : list,
|
2003 |
+
Column names by which to partition the dataset.
|
2004 |
+
Columns are partitioned in the order they are given.
|
2005 |
+
filesystem : FileSystem, default None
|
2006 |
+
If nothing passed, will be inferred based on path.
|
2007 |
+
Path will try to be found in the local on-disk filesystem otherwise
|
2008 |
+
it will be parsed as an URI to determine the filesystem.
|
2009 |
+
use_legacy_dataset : bool, optional
|
2010 |
+
Deprecated and has no effect from PyArrow version 15.0.0.
|
2011 |
+
schema : Schema, optional
|
2012 |
+
This Schema of the dataset.
|
2013 |
+
partitioning : Partitioning or list[str], optional
|
2014 |
+
The partitioning scheme specified with the
|
2015 |
+
``pyarrow.dataset.partitioning()`` function or a list of field names.
|
2016 |
+
When providing a list of field names, you can use
|
2017 |
+
``partitioning_flavor`` to drive which partitioning type should be
|
2018 |
+
used.
|
2019 |
+
basename_template : str, optional
|
2020 |
+
A template string used to generate basenames of written data files.
|
2021 |
+
The token '{i}' will be replaced with an automatically incremented
|
2022 |
+
integer. If not specified, it defaults to "guid-{i}.parquet".
|
2023 |
+
use_threads : bool, default True
|
2024 |
+
Write files in parallel. If enabled, then maximum parallelism will be
|
2025 |
+
used determined by the number of available CPU cores.
|
2026 |
+
file_visitor : function
|
2027 |
+
If set, this function will be called with a WrittenFile instance
|
2028 |
+
for each file created during the call. This object will have both
|
2029 |
+
a path attribute and a metadata attribute.
|
2030 |
+
|
2031 |
+
The path attribute will be a string containing the path to
|
2032 |
+
the created file.
|
2033 |
+
|
2034 |
+
The metadata attribute will be the parquet metadata of the file.
|
2035 |
+
This metadata will have the file path attribute set and can be used
|
2036 |
+
to build a _metadata file. The metadata attribute will be None if
|
2037 |
+
the format is not parquet.
|
2038 |
+
|
2039 |
+
Example visitor which simple collects the filenames created::
|
2040 |
+
|
2041 |
+
visited_paths = []
|
2042 |
+
|
2043 |
+
def file_visitor(written_file):
|
2044 |
+
visited_paths.append(written_file.path)
|
2045 |
+
|
2046 |
+
existing_data_behavior : 'overwrite_or_ignore' | 'error' | \
|
2047 |
+
'delete_matching'
|
2048 |
+
Controls how the dataset will handle data that already exists in
|
2049 |
+
the destination. The default behaviour is 'overwrite_or_ignore'.
|
2050 |
+
|
2051 |
+
'overwrite_or_ignore' will ignore any existing data and will
|
2052 |
+
overwrite files with the same name as an output file. Other
|
2053 |
+
existing files will be ignored. This behavior, in combination
|
2054 |
+
with a unique basename_template for each write, will allow for
|
2055 |
+
an append workflow.
|
2056 |
+
|
2057 |
+
'error' will raise an error if any data exists in the destination.
|
2058 |
+
|
2059 |
+
'delete_matching' is useful when you are writing a partitioned
|
2060 |
+
dataset. The first time each partition directory is encountered
|
2061 |
+
the entire directory will be deleted. This allows you to overwrite
|
2062 |
+
old partitions completely.
|
2063 |
+
**kwargs : dict,
|
2064 |
+
Used as additional kwargs for :func:`pyarrow.dataset.write_dataset`
|
2065 |
+
function for matching kwargs, and remainder to
|
2066 |
+
:func:`pyarrow.dataset.ParquetFileFormat.make_write_options`.
|
2067 |
+
See the docstring of :func:`write_table` and
|
2068 |
+
:func:`pyarrow.dataset.write_dataset` for the available options.
|
2069 |
+
Using `metadata_collector` in kwargs allows one to collect the
|
2070 |
+
file metadata instances of dataset pieces. The file paths in the
|
2071 |
+
ColumnChunkMetaData will be set relative to `root_path`.
|
2072 |
+
|
2073 |
+
Examples
|
2074 |
+
--------
|
2075 |
+
Generate an example PyArrow Table:
|
2076 |
+
|
2077 |
+
>>> import pyarrow as pa
|
2078 |
+
>>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
|
2079 |
+
... 'n_legs': [2, 2, 4, 4, 5, 100],
|
2080 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
2081 |
+
... "Brittle stars", "Centipede"]})
|
2082 |
+
|
2083 |
+
and write it to a partitioned dataset:
|
2084 |
+
|
2085 |
+
>>> import pyarrow.parquet as pq
|
2086 |
+
>>> pq.write_to_dataset(table, root_path='dataset_name_3',
|
2087 |
+
... partition_cols=['year'])
|
2088 |
+
>>> pq.ParquetDataset('dataset_name_3').files
|
2089 |
+
['dataset_name_3/year=2019/...-0.parquet', ...
|
2090 |
+
|
2091 |
+
Write a single Parquet file into the root folder:
|
2092 |
+
|
2093 |
+
>>> pq.write_to_dataset(table, root_path='dataset_name_4')
|
2094 |
+
>>> pq.ParquetDataset('dataset_name_4/').files
|
2095 |
+
['dataset_name_4/...-0.parquet']
|
2096 |
+
"""
|
2097 |
+
if use_legacy_dataset is not None:
|
2098 |
+
warnings.warn(
|
2099 |
+
"Passing 'use_legacy_dataset' is deprecated as of pyarrow 15.0.0 "
|
2100 |
+
"and will be removed in a future version.",
|
2101 |
+
FutureWarning, stacklevel=2)
|
2102 |
+
|
2103 |
+
metadata_collector = kwargs.pop('metadata_collector', None)
|
2104 |
+
|
2105 |
+
# Check for conflicting keywords
|
2106 |
+
msg_confl = (
|
2107 |
+
"The '{1}' argument is not supported. "
|
2108 |
+
"Use only '{0}' instead."
|
2109 |
+
)
|
2110 |
+
if partition_cols is not None and partitioning is not None:
|
2111 |
+
raise ValueError(msg_confl.format("partitioning",
|
2112 |
+
"partition_cols"))
|
2113 |
+
|
2114 |
+
if metadata_collector is not None and file_visitor is not None:
|
2115 |
+
raise ValueError(msg_confl.format("file_visitor",
|
2116 |
+
"metadata_collector"))
|
2117 |
+
|
2118 |
+
import pyarrow.dataset as ds
|
2119 |
+
|
2120 |
+
# extract write_dataset specific options
|
2121 |
+
# reset assumed to go to make_write_options
|
2122 |
+
write_dataset_kwargs = dict()
|
2123 |
+
for key in inspect.signature(ds.write_dataset).parameters:
|
2124 |
+
if key in kwargs:
|
2125 |
+
write_dataset_kwargs[key] = kwargs.pop(key)
|
2126 |
+
write_dataset_kwargs['max_rows_per_group'] = kwargs.pop(
|
2127 |
+
'row_group_size', kwargs.pop("chunk_size", None)
|
2128 |
+
)
|
2129 |
+
|
2130 |
+
if metadata_collector is not None:
|
2131 |
+
def file_visitor(written_file):
|
2132 |
+
metadata_collector.append(written_file.metadata)
|
2133 |
+
|
2134 |
+
# map format arguments
|
2135 |
+
parquet_format = ds.ParquetFileFormat()
|
2136 |
+
write_options = parquet_format.make_write_options(**kwargs)
|
2137 |
+
|
2138 |
+
# map old filesystems to new one
|
2139 |
+
if filesystem is not None:
|
2140 |
+
filesystem = _ensure_filesystem(filesystem)
|
2141 |
+
|
2142 |
+
if partition_cols:
|
2143 |
+
part_schema = table.select(partition_cols).schema
|
2144 |
+
partitioning = ds.partitioning(part_schema, flavor="hive")
|
2145 |
+
|
2146 |
+
if basename_template is None:
|
2147 |
+
basename_template = guid() + '-{i}.parquet'
|
2148 |
+
|
2149 |
+
if existing_data_behavior is None:
|
2150 |
+
existing_data_behavior = 'overwrite_or_ignore'
|
2151 |
+
|
2152 |
+
ds.write_dataset(
|
2153 |
+
table, root_path, filesystem=filesystem,
|
2154 |
+
format=parquet_format, file_options=write_options, schema=schema,
|
2155 |
+
partitioning=partitioning, use_threads=use_threads,
|
2156 |
+
file_visitor=file_visitor,
|
2157 |
+
basename_template=basename_template,
|
2158 |
+
existing_data_behavior=existing_data_behavior,
|
2159 |
+
**write_dataset_kwargs)
|
2160 |
+
return
|
2161 |
+
|
2162 |
+
|
2163 |
+
def write_metadata(schema, where, metadata_collector=None, filesystem=None,
|
2164 |
+
**kwargs):
|
2165 |
+
"""
|
2166 |
+
Write metadata-only Parquet file from schema. This can be used with
|
2167 |
+
`write_to_dataset` to generate `_common_metadata` and `_metadata` sidecar
|
2168 |
+
files.
|
2169 |
+
|
2170 |
+
Parameters
|
2171 |
+
----------
|
2172 |
+
schema : pyarrow.Schema
|
2173 |
+
where : string or pyarrow.NativeFile
|
2174 |
+
metadata_collector : list
|
2175 |
+
where to collect metadata information.
|
2176 |
+
filesystem : FileSystem, default None
|
2177 |
+
If nothing passed, will be inferred from `where` if path-like, else
|
2178 |
+
`where` is already a file-like object so no filesystem is needed.
|
2179 |
+
**kwargs : dict,
|
2180 |
+
Additional kwargs for ParquetWriter class. See docstring for
|
2181 |
+
`ParquetWriter` for more information.
|
2182 |
+
|
2183 |
+
Examples
|
2184 |
+
--------
|
2185 |
+
Generate example data:
|
2186 |
+
|
2187 |
+
>>> import pyarrow as pa
|
2188 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
2189 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
2190 |
+
... "Brittle stars", "Centipede"]})
|
2191 |
+
|
2192 |
+
Write a dataset and collect metadata information.
|
2193 |
+
|
2194 |
+
>>> metadata_collector = []
|
2195 |
+
>>> import pyarrow.parquet as pq
|
2196 |
+
>>> pq.write_to_dataset(
|
2197 |
+
... table, 'dataset_metadata',
|
2198 |
+
... metadata_collector=metadata_collector)
|
2199 |
+
|
2200 |
+
Write the `_common_metadata` parquet file without row groups statistics.
|
2201 |
+
|
2202 |
+
>>> pq.write_metadata(
|
2203 |
+
... table.schema, 'dataset_metadata/_common_metadata')
|
2204 |
+
|
2205 |
+
Write the `_metadata` parquet file with row groups statistics.
|
2206 |
+
|
2207 |
+
>>> pq.write_metadata(
|
2208 |
+
... table.schema, 'dataset_metadata/_metadata',
|
2209 |
+
... metadata_collector=metadata_collector)
|
2210 |
+
"""
|
2211 |
+
filesystem, where = _resolve_filesystem_and_path(where, filesystem)
|
2212 |
+
|
2213 |
+
if hasattr(where, "seek"): # file-like
|
2214 |
+
cursor_position = where.tell()
|
2215 |
+
|
2216 |
+
writer = ParquetWriter(where, schema, filesystem, **kwargs)
|
2217 |
+
writer.close()
|
2218 |
+
|
2219 |
+
if metadata_collector is not None:
|
2220 |
+
# ParquetWriter doesn't expose the metadata until it's written. Write
|
2221 |
+
# it and read it again.
|
2222 |
+
metadata = read_metadata(where, filesystem=filesystem)
|
2223 |
+
if hasattr(where, "seek"):
|
2224 |
+
where.seek(cursor_position) # file-like, set cursor back.
|
2225 |
+
|
2226 |
+
for m in metadata_collector:
|
2227 |
+
metadata.append_row_groups(m)
|
2228 |
+
if filesystem is not None:
|
2229 |
+
with filesystem.open_output_stream(where) as f:
|
2230 |
+
metadata.write_metadata_file(f)
|
2231 |
+
else:
|
2232 |
+
metadata.write_metadata_file(where)
|
2233 |
+
|
2234 |
+
|
2235 |
+
def read_metadata(where, memory_map=False, decryption_properties=None,
|
2236 |
+
filesystem=None):
|
2237 |
+
"""
|
2238 |
+
Read FileMetaData from footer of a single Parquet file.
|
2239 |
+
|
2240 |
+
Parameters
|
2241 |
+
----------
|
2242 |
+
where : str (file path) or file-like object
|
2243 |
+
memory_map : bool, default False
|
2244 |
+
Create memory map when the source is a file path.
|
2245 |
+
decryption_properties : FileDecryptionProperties, default None
|
2246 |
+
Decryption properties for reading encrypted Parquet files.
|
2247 |
+
filesystem : FileSystem, default None
|
2248 |
+
If nothing passed, will be inferred based on path.
|
2249 |
+
Path will try to be found in the local on-disk filesystem otherwise
|
2250 |
+
it will be parsed as an URI to determine the filesystem.
|
2251 |
+
|
2252 |
+
Returns
|
2253 |
+
-------
|
2254 |
+
metadata : FileMetaData
|
2255 |
+
The metadata of the Parquet file
|
2256 |
+
|
2257 |
+
Examples
|
2258 |
+
--------
|
2259 |
+
>>> import pyarrow as pa
|
2260 |
+
>>> import pyarrow.parquet as pq
|
2261 |
+
>>> table = pa.table({'n_legs': [4, 5, 100],
|
2262 |
+
... 'animal': ["Dog", "Brittle stars", "Centipede"]})
|
2263 |
+
>>> pq.write_table(table, 'example.parquet')
|
2264 |
+
|
2265 |
+
>>> pq.read_metadata('example.parquet')
|
2266 |
+
<pyarrow._parquet.FileMetaData object at ...>
|
2267 |
+
created_by: parquet-cpp-arrow version ...
|
2268 |
+
num_columns: 2
|
2269 |
+
num_rows: 3
|
2270 |
+
num_row_groups: 1
|
2271 |
+
format_version: 2.6
|
2272 |
+
serialized_size: ...
|
2273 |
+
"""
|
2274 |
+
filesystem, where = _resolve_filesystem_and_path(where, filesystem)
|
2275 |
+
file_ctx = nullcontext()
|
2276 |
+
if filesystem is not None:
|
2277 |
+
file_ctx = where = filesystem.open_input_file(where)
|
2278 |
+
|
2279 |
+
with file_ctx:
|
2280 |
+
file = ParquetFile(where, memory_map=memory_map,
|
2281 |
+
decryption_properties=decryption_properties)
|
2282 |
+
return file.metadata
|
2283 |
+
|
2284 |
+
|
2285 |
+
def read_schema(where, memory_map=False, decryption_properties=None,
|
2286 |
+
filesystem=None):
|
2287 |
+
"""
|
2288 |
+
Read effective Arrow schema from Parquet file metadata.
|
2289 |
+
|
2290 |
+
Parameters
|
2291 |
+
----------
|
2292 |
+
where : str (file path) or file-like object
|
2293 |
+
memory_map : bool, default False
|
2294 |
+
Create memory map when the source is a file path.
|
2295 |
+
decryption_properties : FileDecryptionProperties, default None
|
2296 |
+
Decryption properties for reading encrypted Parquet files.
|
2297 |
+
filesystem : FileSystem, default None
|
2298 |
+
If nothing passed, will be inferred based on path.
|
2299 |
+
Path will try to be found in the local on-disk filesystem otherwise
|
2300 |
+
it will be parsed as an URI to determine the filesystem.
|
2301 |
+
|
2302 |
+
Returns
|
2303 |
+
-------
|
2304 |
+
schema : pyarrow.Schema
|
2305 |
+
The schema of the Parquet file
|
2306 |
+
|
2307 |
+
Examples
|
2308 |
+
--------
|
2309 |
+
>>> import pyarrow as pa
|
2310 |
+
>>> import pyarrow.parquet as pq
|
2311 |
+
>>> table = pa.table({'n_legs': [4, 5, 100],
|
2312 |
+
... 'animal': ["Dog", "Brittle stars", "Centipede"]})
|
2313 |
+
>>> pq.write_table(table, 'example.parquet')
|
2314 |
+
|
2315 |
+
>>> pq.read_schema('example.parquet')
|
2316 |
+
n_legs: int64
|
2317 |
+
animal: string
|
2318 |
+
"""
|
2319 |
+
filesystem, where = _resolve_filesystem_and_path(where, filesystem)
|
2320 |
+
file_ctx = nullcontext()
|
2321 |
+
if filesystem is not None:
|
2322 |
+
file_ctx = where = filesystem.open_input_file(where)
|
2323 |
+
|
2324 |
+
with file_ctx:
|
2325 |
+
file = ParquetFile(
|
2326 |
+
where, memory_map=memory_map,
|
2327 |
+
decryption_properties=decryption_properties)
|
2328 |
+
return file.schema.to_arrow_schema()
|
2329 |
+
|
2330 |
+
|
2331 |
+
__all__ = (
|
2332 |
+
"ColumnChunkMetaData",
|
2333 |
+
"ColumnSchema",
|
2334 |
+
"FileDecryptionProperties",
|
2335 |
+
"FileEncryptionProperties",
|
2336 |
+
"FileMetaData",
|
2337 |
+
"ParquetDataset",
|
2338 |
+
"ParquetFile",
|
2339 |
+
"ParquetLogicalType",
|
2340 |
+
"ParquetReader",
|
2341 |
+
"ParquetSchema",
|
2342 |
+
"ParquetWriter",
|
2343 |
+
"RowGroupMetaData",
|
2344 |
+
"SortingColumn",
|
2345 |
+
"Statistics",
|
2346 |
+
"read_metadata",
|
2347 |
+
"read_pandas",
|
2348 |
+
"read_schema",
|
2349 |
+
"read_table",
|
2350 |
+
"write_metadata",
|
2351 |
+
"write_table",
|
2352 |
+
"write_to_dataset",
|
2353 |
+
"_filters_to_expression",
|
2354 |
+
"filters_to_expression",
|
2355 |
+
)
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/arrow_16597.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# This file is called from a test in test_flight.py.
|
19 |
+
import time
|
20 |
+
|
21 |
+
import pyarrow as pa
|
22 |
+
import pyarrow.flight as flight
|
23 |
+
|
24 |
+
|
25 |
+
class Server(flight.FlightServerBase):
|
26 |
+
def do_put(self, context, descriptor, reader, writer):
|
27 |
+
time.sleep(1)
|
28 |
+
raise flight.FlightCancelledError("")
|
29 |
+
|
30 |
+
|
31 |
+
if __name__ == "__main__":
|
32 |
+
server = Server("grpc://localhost:0")
|
33 |
+
client = flight.connect(f"grpc://localhost:{server.port}")
|
34 |
+
schema = pa.schema([])
|
35 |
+
writer, reader = client.do_put(
|
36 |
+
flight.FlightDescriptor.for_command(b""), schema)
|
37 |
+
writer.done_writing()
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/arrow_39313.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# This file is called from a test in test_pandas.py.
|
19 |
+
|
20 |
+
from threading import Thread
|
21 |
+
|
22 |
+
import pandas as pd
|
23 |
+
from pyarrow.pandas_compat import _pandas_api
|
24 |
+
|
25 |
+
if __name__ == "__main__":
|
26 |
+
wait = True
|
27 |
+
num_threads = 10
|
28 |
+
df = pd.DataFrame()
|
29 |
+
results = []
|
30 |
+
|
31 |
+
def rc():
|
32 |
+
while wait:
|
33 |
+
pass
|
34 |
+
results.append(_pandas_api.is_data_frame(df))
|
35 |
+
|
36 |
+
threads = [Thread(target=rc) for _ in range(num_threads)]
|
37 |
+
|
38 |
+
for t in threads:
|
39 |
+
t.start()
|
40 |
+
|
41 |
+
wait = False
|
42 |
+
|
43 |
+
for t in threads:
|
44 |
+
t.join()
|
45 |
+
|
46 |
+
assert len(results) == num_threads
|
47 |
+
assert all(results), "`is_data_frame` returned False when given a DataFrame"
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/arrow_7980.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# This file is called from a test in test_schema.py.
|
19 |
+
|
20 |
+
import pyarrow as pa
|
21 |
+
|
22 |
+
|
23 |
+
# the types where to_pandas_dtype returns a non-numpy dtype
|
24 |
+
cases = [
|
25 |
+
(pa.timestamp('ns', tz='UTC'), "datetime64[ns, UTC]"),
|
26 |
+
]
|
27 |
+
|
28 |
+
|
29 |
+
for arrow_type, pandas_type in cases:
|
30 |
+
assert str(arrow_type.to_pandas_dtype()) == pandas_type
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/bound_function_visit_strings.pyx
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language=c++
|
19 |
+
# cython: language_level = 3
|
20 |
+
|
21 |
+
from pyarrow.lib cimport *
|
22 |
+
from pyarrow.lib import frombytes, tobytes
|
23 |
+
|
24 |
+
# basic test to roundtrip through a BoundFunction
|
25 |
+
|
26 |
+
ctypedef CStatus visit_string_cb(const c_string&)
|
27 |
+
|
28 |
+
cdef extern from * namespace "arrow::py" nogil:
|
29 |
+
"""
|
30 |
+
#include <functional>
|
31 |
+
#include <string>
|
32 |
+
#include <vector>
|
33 |
+
|
34 |
+
#include "arrow/status.h"
|
35 |
+
|
36 |
+
namespace arrow {
|
37 |
+
namespace py {
|
38 |
+
|
39 |
+
Status VisitStrings(const std::vector<std::string>& strs,
|
40 |
+
std::function<Status(const std::string&)> cb) {
|
41 |
+
for (const std::string& str : strs) {
|
42 |
+
RETURN_NOT_OK(cb(str));
|
43 |
+
}
|
44 |
+
return Status::OK();
|
45 |
+
}
|
46 |
+
|
47 |
+
} // namespace py
|
48 |
+
} // namespace arrow
|
49 |
+
"""
|
50 |
+
cdef CStatus CVisitStrings" arrow::py::VisitStrings"(
|
51 |
+
vector[c_string], function[visit_string_cb])
|
52 |
+
|
53 |
+
|
54 |
+
cdef void _visit_strings_impl(py_cb, const c_string& s) except *:
|
55 |
+
py_cb(frombytes(s))
|
56 |
+
|
57 |
+
|
58 |
+
def _visit_strings(strings, cb):
|
59 |
+
cdef:
|
60 |
+
function[visit_string_cb] c_cb
|
61 |
+
vector[c_string] c_strings
|
62 |
+
|
63 |
+
c_cb = BindFunction[visit_string_cb](&_visit_strings_impl, cb)
|
64 |
+
for s in strings:
|
65 |
+
c_strings.push_back(tobytes(s))
|
66 |
+
|
67 |
+
check_status(CVisitStrings(c_strings, c_cb))
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/conftest.py
ADDED
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import functools
|
19 |
+
import os
|
20 |
+
import pathlib
|
21 |
+
import subprocess
|
22 |
+
import sys
|
23 |
+
import time
|
24 |
+
import urllib.request
|
25 |
+
|
26 |
+
import pytest
|
27 |
+
import hypothesis as h
|
28 |
+
from ..conftest import groups, defaults
|
29 |
+
|
30 |
+
from pyarrow import set_timezone_db_path
|
31 |
+
from pyarrow.util import find_free_port
|
32 |
+
|
33 |
+
|
34 |
+
# setup hypothesis profiles
|
35 |
+
h.settings.register_profile('ci', max_examples=1000)
|
36 |
+
h.settings.register_profile('dev', max_examples=50)
|
37 |
+
h.settings.register_profile('debug', max_examples=10,
|
38 |
+
verbosity=h.Verbosity.verbose)
|
39 |
+
|
40 |
+
# load default hypothesis profile, either set HYPOTHESIS_PROFILE environment
|
41 |
+
# variable or pass --hypothesis-profile option to pytest, to see the generated
|
42 |
+
# examples try:
|
43 |
+
# pytest pyarrow -sv --enable-hypothesis --hypothesis-profile=debug
|
44 |
+
h.settings.load_profile(os.environ.get('HYPOTHESIS_PROFILE', 'dev'))
|
45 |
+
|
46 |
+
# Set this at the beginning before the AWS SDK was loaded to avoid reading in
|
47 |
+
# user configuration values.
|
48 |
+
os.environ['AWS_CONFIG_FILE'] = "/dev/null"
|
49 |
+
|
50 |
+
|
51 |
+
if sys.platform == 'win32':
|
52 |
+
tzdata_set_path = os.environ.get('PYARROW_TZDATA_PATH', None)
|
53 |
+
if tzdata_set_path:
|
54 |
+
set_timezone_db_path(tzdata_set_path)
|
55 |
+
|
56 |
+
|
57 |
+
def pytest_addoption(parser):
|
58 |
+
# Create options to selectively enable test groups
|
59 |
+
def bool_env(name, default=None):
|
60 |
+
value = os.environ.get(name.upper())
|
61 |
+
if not value: # missing or empty
|
62 |
+
return default
|
63 |
+
value = value.lower()
|
64 |
+
if value in {'1', 'true', 'on', 'yes', 'y'}:
|
65 |
+
return True
|
66 |
+
elif value in {'0', 'false', 'off', 'no', 'n'}:
|
67 |
+
return False
|
68 |
+
else:
|
69 |
+
raise ValueError('{}={} is not parsable as boolean'
|
70 |
+
.format(name.upper(), value))
|
71 |
+
|
72 |
+
for group in groups:
|
73 |
+
default = bool_env('PYARROW_TEST_{}'.format(group), defaults[group])
|
74 |
+
parser.addoption('--enable-{}'.format(group),
|
75 |
+
action='store_true', default=default,
|
76 |
+
help=('Enable the {} test group'.format(group)))
|
77 |
+
parser.addoption('--disable-{}'.format(group),
|
78 |
+
action='store_true', default=False,
|
79 |
+
help=('Disable the {} test group'.format(group)))
|
80 |
+
|
81 |
+
|
82 |
+
class PyArrowConfig:
|
83 |
+
def __init__(self):
|
84 |
+
self.is_enabled = {}
|
85 |
+
|
86 |
+
def apply_mark(self, mark):
|
87 |
+
group = mark.name
|
88 |
+
if group in groups:
|
89 |
+
self.requires(group)
|
90 |
+
|
91 |
+
def requires(self, group):
|
92 |
+
if not self.is_enabled[group]:
|
93 |
+
pytest.skip('{} NOT enabled'.format(group))
|
94 |
+
|
95 |
+
|
96 |
+
def pytest_configure(config):
|
97 |
+
# Apply command-line options to initialize PyArrow-specific config object
|
98 |
+
config.pyarrow = PyArrowConfig()
|
99 |
+
|
100 |
+
for mark in groups:
|
101 |
+
config.addinivalue_line(
|
102 |
+
"markers", mark,
|
103 |
+
)
|
104 |
+
|
105 |
+
enable_flag = '--enable-{}'.format(mark)
|
106 |
+
disable_flag = '--disable-{}'.format(mark)
|
107 |
+
|
108 |
+
is_enabled = (config.getoption(enable_flag) and not
|
109 |
+
config.getoption(disable_flag))
|
110 |
+
config.pyarrow.is_enabled[mark] = is_enabled
|
111 |
+
|
112 |
+
|
113 |
+
def pytest_runtest_setup(item):
|
114 |
+
# Apply test markers to skip tests selectively
|
115 |
+
for mark in item.iter_markers():
|
116 |
+
item.config.pyarrow.apply_mark(mark)
|
117 |
+
|
118 |
+
|
119 |
+
@pytest.fixture
|
120 |
+
def tempdir(tmpdir):
|
121 |
+
# convert pytest's LocalPath to pathlib.Path
|
122 |
+
return pathlib.Path(tmpdir.strpath)
|
123 |
+
|
124 |
+
|
125 |
+
@pytest.fixture(scope='session')
|
126 |
+
def base_datadir():
|
127 |
+
return pathlib.Path(__file__).parent / 'data'
|
128 |
+
|
129 |
+
|
130 |
+
@pytest.fixture(autouse=True)
|
131 |
+
def disable_aws_metadata(monkeypatch):
|
132 |
+
"""Stop the AWS SDK from trying to contact the EC2 metadata server.
|
133 |
+
|
134 |
+
Otherwise, this causes a 5 second delay in tests that exercise the
|
135 |
+
S3 filesystem.
|
136 |
+
"""
|
137 |
+
monkeypatch.setenv("AWS_EC2_METADATA_DISABLED", "true")
|
138 |
+
|
139 |
+
|
140 |
+
# TODO(kszucs): move the following fixtures to test_fs.py once the previous
|
141 |
+
# parquet dataset implementation and hdfs implementation are removed.
|
142 |
+
|
143 |
+
@pytest.fixture(scope='session')
|
144 |
+
def hdfs_connection():
|
145 |
+
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'default')
|
146 |
+
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))
|
147 |
+
user = os.environ.get('ARROW_HDFS_TEST_USER', 'hdfs')
|
148 |
+
return host, port, user
|
149 |
+
|
150 |
+
|
151 |
+
@pytest.fixture(scope='session')
|
152 |
+
def s3_connection():
|
153 |
+
host, port = 'localhost', find_free_port()
|
154 |
+
access_key, secret_key = 'arrow', 'apachearrow'
|
155 |
+
return host, port, access_key, secret_key
|
156 |
+
|
157 |
+
|
158 |
+
def retry(attempts=3, delay=1.0, max_delay=None, backoff=1):
|
159 |
+
"""
|
160 |
+
Retry decorator
|
161 |
+
|
162 |
+
Parameters
|
163 |
+
----------
|
164 |
+
attempts : int, default 3
|
165 |
+
The number of attempts.
|
166 |
+
delay : float, default 1
|
167 |
+
Initial delay in seconds.
|
168 |
+
max_delay : float, optional
|
169 |
+
The max delay between attempts.
|
170 |
+
backoff : float, default 1
|
171 |
+
The multiplier to delay after each attempt.
|
172 |
+
"""
|
173 |
+
def decorate(func):
|
174 |
+
@functools.wraps(func)
|
175 |
+
def wrapper(*args, **kwargs):
|
176 |
+
remaining_attempts = attempts
|
177 |
+
curr_delay = delay
|
178 |
+
while remaining_attempts > 0:
|
179 |
+
try:
|
180 |
+
return func(*args, **kwargs)
|
181 |
+
except Exception as err:
|
182 |
+
remaining_attempts -= 1
|
183 |
+
last_exception = err
|
184 |
+
curr_delay *= backoff
|
185 |
+
if max_delay:
|
186 |
+
curr_delay = min(curr_delay, max_delay)
|
187 |
+
time.sleep(curr_delay)
|
188 |
+
raise last_exception
|
189 |
+
return wrapper
|
190 |
+
return decorate
|
191 |
+
|
192 |
+
|
193 |
+
@pytest.fixture(scope='session')
|
194 |
+
def s3_server(s3_connection, tmpdir_factory):
|
195 |
+
@retry(attempts=5, delay=0.1, backoff=2)
|
196 |
+
def minio_server_health_check(address):
|
197 |
+
resp = urllib.request.urlopen(f"http://{address}/minio/health/cluster")
|
198 |
+
assert resp.getcode() == 200
|
199 |
+
|
200 |
+
tmpdir = tmpdir_factory.getbasetemp()
|
201 |
+
host, port, access_key, secret_key = s3_connection
|
202 |
+
|
203 |
+
address = '{}:{}'.format(host, port)
|
204 |
+
env = os.environ.copy()
|
205 |
+
env.update({
|
206 |
+
'MINIO_ACCESS_KEY': access_key,
|
207 |
+
'MINIO_SECRET_KEY': secret_key
|
208 |
+
})
|
209 |
+
|
210 |
+
args = ['minio', '--compat', 'server', '--quiet', '--address',
|
211 |
+
address, tmpdir]
|
212 |
+
proc = None
|
213 |
+
try:
|
214 |
+
proc = subprocess.Popen(args, env=env)
|
215 |
+
except OSError:
|
216 |
+
pytest.skip('`minio` command cannot be located')
|
217 |
+
else:
|
218 |
+
# Wait for the server to startup before yielding
|
219 |
+
minio_server_health_check(address)
|
220 |
+
|
221 |
+
yield {
|
222 |
+
'connection': s3_connection,
|
223 |
+
'process': proc,
|
224 |
+
'tempdir': tmpdir
|
225 |
+
}
|
226 |
+
finally:
|
227 |
+
if proc is not None:
|
228 |
+
proc.kill()
|
229 |
+
proc.wait()
|
230 |
+
|
231 |
+
|
232 |
+
@pytest.fixture(scope='session')
|
233 |
+
def gcs_server():
|
234 |
+
port = find_free_port()
|
235 |
+
env = os.environ.copy()
|
236 |
+
args = [sys.executable, '-m', 'testbench', '--port', str(port)]
|
237 |
+
proc = None
|
238 |
+
try:
|
239 |
+
# check first if testbench module is available
|
240 |
+
import testbench # noqa:F401
|
241 |
+
# start server
|
242 |
+
proc = subprocess.Popen(args, env=env)
|
243 |
+
# Make sure the server is alive.
|
244 |
+
if proc.poll() is not None:
|
245 |
+
pytest.skip(f"Command {args} did not start server successfully!")
|
246 |
+
except (ModuleNotFoundError, OSError) as e:
|
247 |
+
pytest.skip(f"Command {args} failed to execute: {e}")
|
248 |
+
else:
|
249 |
+
yield {
|
250 |
+
'connection': ('localhost', port),
|
251 |
+
'process': proc,
|
252 |
+
}
|
253 |
+
finally:
|
254 |
+
if proc is not None:
|
255 |
+
proc.kill()
|
256 |
+
proc.wait()
|
257 |
+
|
258 |
+
|
259 |
+
@pytest.fixture(
|
260 |
+
params=[
|
261 |
+
'builtin_pickle',
|
262 |
+
'cloudpickle'
|
263 |
+
],
|
264 |
+
scope='session'
|
265 |
+
)
|
266 |
+
def pickle_module(request):
|
267 |
+
return request.getfixturevalue(request.param)
|
268 |
+
|
269 |
+
|
270 |
+
@pytest.fixture(scope='session')
|
271 |
+
def builtin_pickle():
|
272 |
+
import pickle
|
273 |
+
return pickle
|
274 |
+
|
275 |
+
|
276 |
+
@pytest.fixture(scope='session')
|
277 |
+
def cloudpickle():
|
278 |
+
cp = pytest.importorskip('cloudpickle')
|
279 |
+
if 'HIGHEST_PROTOCOL' not in cp.__dict__:
|
280 |
+
cp.HIGHEST_PROTOCOL = cp.DEFAULT_PROTOCOL
|
281 |
+
return cp
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/interchange/__init__.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/interchange/test_conversion.py
ADDED
@@ -0,0 +1,522 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
from datetime import datetime as dt
|
19 |
+
import numpy as np
|
20 |
+
import pyarrow as pa
|
21 |
+
from pyarrow.vendored.version import Version
|
22 |
+
import pytest
|
23 |
+
|
24 |
+
import pyarrow.interchange as pi
|
25 |
+
from pyarrow.interchange.column import (
|
26 |
+
_PyArrowColumn,
|
27 |
+
ColumnNullType,
|
28 |
+
DtypeKind,
|
29 |
+
)
|
30 |
+
from pyarrow.interchange.from_dataframe import _from_dataframe
|
31 |
+
|
32 |
+
try:
|
33 |
+
import pandas as pd
|
34 |
+
# import pandas.testing as tm
|
35 |
+
except ImportError:
|
36 |
+
pass
|
37 |
+
|
38 |
+
|
39 |
+
@pytest.mark.parametrize("unit", ['s', 'ms', 'us', 'ns'])
|
40 |
+
@pytest.mark.parametrize("tz", ['', 'America/New_York', '+07:30', '-04:30'])
|
41 |
+
def test_datetime(unit, tz):
|
42 |
+
dt_arr = [dt(2007, 7, 13), dt(2007, 7, 14), None]
|
43 |
+
table = pa.table({"A": pa.array(dt_arr, type=pa.timestamp(unit, tz=tz))})
|
44 |
+
col = table.__dataframe__().get_column_by_name("A")
|
45 |
+
|
46 |
+
assert col.size() == 3
|
47 |
+
assert col.offset == 0
|
48 |
+
assert col.null_count == 1
|
49 |
+
assert col.dtype[0] == DtypeKind.DATETIME
|
50 |
+
assert col.describe_null == (ColumnNullType.USE_BITMASK, 0)
|
51 |
+
|
52 |
+
|
53 |
+
@pytest.mark.parametrize(
|
54 |
+
["test_data", "kind"],
|
55 |
+
[
|
56 |
+
(["foo", "bar"], 21),
|
57 |
+
([1.5, 2.5, 3.5], 2),
|
58 |
+
([1, 2, 3, 4], 0),
|
59 |
+
],
|
60 |
+
)
|
61 |
+
def test_array_to_pyarrowcolumn(test_data, kind):
|
62 |
+
arr = pa.array(test_data)
|
63 |
+
arr_column = _PyArrowColumn(arr)
|
64 |
+
|
65 |
+
assert arr_column._col == arr
|
66 |
+
assert arr_column.size() == len(test_data)
|
67 |
+
assert arr_column.dtype[0] == kind
|
68 |
+
assert arr_column.num_chunks() == 1
|
69 |
+
assert arr_column.null_count == 0
|
70 |
+
assert arr_column.get_buffers()["validity"] is None
|
71 |
+
assert len(list(arr_column.get_chunks())) == 1
|
72 |
+
|
73 |
+
for chunk in arr_column.get_chunks():
|
74 |
+
assert chunk == arr_column
|
75 |
+
|
76 |
+
|
77 |
+
def test_offset_of_sliced_array():
|
78 |
+
arr = pa.array([1, 2, 3, 4])
|
79 |
+
arr_sliced = arr.slice(2, 2)
|
80 |
+
|
81 |
+
table = pa.table([arr], names=["arr"])
|
82 |
+
table_sliced = pa.table([arr_sliced], names=["arr_sliced"])
|
83 |
+
|
84 |
+
col = table_sliced.__dataframe__().get_column(0)
|
85 |
+
assert col.offset == 2
|
86 |
+
|
87 |
+
result = _from_dataframe(table_sliced.__dataframe__())
|
88 |
+
assert table_sliced.equals(result)
|
89 |
+
assert not table.equals(result)
|
90 |
+
|
91 |
+
# pandas hardcodes offset to 0:
|
92 |
+
# https://github.com/pandas-dev/pandas/blob/5c66e65d7b9fef47ccb585ce2fd0b3ea18dc82ea/pandas/core/interchange/from_dataframe.py#L247
|
93 |
+
# so conversion to pandas can't be tested currently
|
94 |
+
|
95 |
+
# df = pandas_from_dataframe(table)
|
96 |
+
# df_sliced = pandas_from_dataframe(table_sliced)
|
97 |
+
|
98 |
+
# tm.assert_series_equal(df["arr"][2:4], df_sliced["arr_sliced"],
|
99 |
+
# check_index=False, check_names=False)
|
100 |
+
|
101 |
+
|
102 |
+
@pytest.mark.pandas
|
103 |
+
@pytest.mark.parametrize(
|
104 |
+
"uint", [pa.uint8(), pa.uint16(), pa.uint32()]
|
105 |
+
)
|
106 |
+
@pytest.mark.parametrize(
|
107 |
+
"int", [pa.int8(), pa.int16(), pa.int32(), pa.int64()]
|
108 |
+
)
|
109 |
+
@pytest.mark.parametrize(
|
110 |
+
"float, np_float", [
|
111 |
+
# (pa.float16(), np.float16), #not supported by pandas
|
112 |
+
(pa.float32(), np.float32),
|
113 |
+
(pa.float64(), np.float64)
|
114 |
+
]
|
115 |
+
)
|
116 |
+
def test_pandas_roundtrip(uint, int, float, np_float):
|
117 |
+
if Version(pd.__version__) < Version("1.5.0"):
|
118 |
+
pytest.skip("__dataframe__ added to pandas in 1.5.0")
|
119 |
+
|
120 |
+
arr = [1, 2, 3]
|
121 |
+
table = pa.table(
|
122 |
+
{
|
123 |
+
"a": pa.array(arr, type=uint),
|
124 |
+
"b": pa.array(arr, type=int),
|
125 |
+
"c": pa.array(np.array(arr, dtype=np_float), type=float),
|
126 |
+
"d": [True, False, True],
|
127 |
+
}
|
128 |
+
)
|
129 |
+
from pandas.api.interchange import (
|
130 |
+
from_dataframe as pandas_from_dataframe
|
131 |
+
)
|
132 |
+
pandas_df = pandas_from_dataframe(table)
|
133 |
+
result = pi.from_dataframe(pandas_df)
|
134 |
+
assert table.equals(result)
|
135 |
+
|
136 |
+
table_protocol = table.__dataframe__()
|
137 |
+
result_protocol = result.__dataframe__()
|
138 |
+
|
139 |
+
assert table_protocol.num_columns() == result_protocol.num_columns()
|
140 |
+
assert table_protocol.num_rows() == result_protocol.num_rows()
|
141 |
+
assert table_protocol.num_chunks() == result_protocol.num_chunks()
|
142 |
+
assert table_protocol.column_names() == result_protocol.column_names()
|
143 |
+
|
144 |
+
|
145 |
+
@pytest.mark.pandas
|
146 |
+
def test_pandas_roundtrip_string():
|
147 |
+
# See https://github.com/pandas-dev/pandas/issues/50554
|
148 |
+
if Version(pd.__version__) < Version("1.6"):
|
149 |
+
pytest.skip("Column.size() bug in pandas")
|
150 |
+
|
151 |
+
arr = ["a", "", "c"]
|
152 |
+
table = pa.table({"a": pa.array(arr)})
|
153 |
+
|
154 |
+
from pandas.api.interchange import (
|
155 |
+
from_dataframe as pandas_from_dataframe
|
156 |
+
)
|
157 |
+
|
158 |
+
pandas_df = pandas_from_dataframe(table)
|
159 |
+
result = pi.from_dataframe(pandas_df)
|
160 |
+
|
161 |
+
assert result["a"].to_pylist() == table["a"].to_pylist()
|
162 |
+
assert pa.types.is_string(table["a"].type)
|
163 |
+
assert pa.types.is_large_string(result["a"].type)
|
164 |
+
|
165 |
+
table_protocol = table.__dataframe__()
|
166 |
+
result_protocol = result.__dataframe__()
|
167 |
+
|
168 |
+
assert table_protocol.num_columns() == result_protocol.num_columns()
|
169 |
+
assert table_protocol.num_rows() == result_protocol.num_rows()
|
170 |
+
assert table_protocol.num_chunks() == result_protocol.num_chunks()
|
171 |
+
assert table_protocol.column_names() == result_protocol.column_names()
|
172 |
+
|
173 |
+
|
174 |
+
@pytest.mark.pandas
|
175 |
+
def test_pandas_roundtrip_large_string():
|
176 |
+
# See https://github.com/pandas-dev/pandas/issues/50554
|
177 |
+
if Version(pd.__version__) < Version("1.6"):
|
178 |
+
pytest.skip("Column.size() bug in pandas")
|
179 |
+
|
180 |
+
arr = ["a", "", "c"]
|
181 |
+
table = pa.table({"a_large": pa.array(arr, type=pa.large_string())})
|
182 |
+
|
183 |
+
from pandas.api.interchange import (
|
184 |
+
from_dataframe as pandas_from_dataframe
|
185 |
+
)
|
186 |
+
|
187 |
+
if Version(pd.__version__) >= Version("2.0.1"):
|
188 |
+
pandas_df = pandas_from_dataframe(table)
|
189 |
+
result = pi.from_dataframe(pandas_df)
|
190 |
+
|
191 |
+
assert result["a_large"].to_pylist() == table["a_large"].to_pylist()
|
192 |
+
assert pa.types.is_large_string(table["a_large"].type)
|
193 |
+
assert pa.types.is_large_string(result["a_large"].type)
|
194 |
+
|
195 |
+
table_protocol = table.__dataframe__()
|
196 |
+
result_protocol = result.__dataframe__()
|
197 |
+
|
198 |
+
assert table_protocol.num_columns() == result_protocol.num_columns()
|
199 |
+
assert table_protocol.num_rows() == result_protocol.num_rows()
|
200 |
+
assert table_protocol.num_chunks() == result_protocol.num_chunks()
|
201 |
+
assert table_protocol.column_names() == result_protocol.column_names()
|
202 |
+
|
203 |
+
else:
|
204 |
+
# large string not supported by pandas implementation for
|
205 |
+
# older versions of pandas
|
206 |
+
# https://github.com/pandas-dev/pandas/issues/52795
|
207 |
+
with pytest.raises(AssertionError):
|
208 |
+
pandas_from_dataframe(table)
|
209 |
+
|
210 |
+
|
211 |
+
@pytest.mark.pandas
|
212 |
+
def test_pandas_roundtrip_string_with_missing():
|
213 |
+
# See https://github.com/pandas-dev/pandas/issues/50554
|
214 |
+
if Version(pd.__version__) < Version("1.6"):
|
215 |
+
pytest.skip("Column.size() bug in pandas")
|
216 |
+
|
217 |
+
arr = ["a", "", "c", None]
|
218 |
+
table = pa.table({"a": pa.array(arr),
|
219 |
+
"a_large": pa.array(arr, type=pa.large_string())})
|
220 |
+
|
221 |
+
from pandas.api.interchange import (
|
222 |
+
from_dataframe as pandas_from_dataframe
|
223 |
+
)
|
224 |
+
|
225 |
+
if Version(pd.__version__) >= Version("2.0.2"):
|
226 |
+
pandas_df = pandas_from_dataframe(table)
|
227 |
+
result = pi.from_dataframe(pandas_df)
|
228 |
+
|
229 |
+
assert result["a"].to_pylist() == table["a"].to_pylist()
|
230 |
+
assert pa.types.is_string(table["a"].type)
|
231 |
+
assert pa.types.is_large_string(result["a"].type)
|
232 |
+
|
233 |
+
assert result["a_large"].to_pylist() == table["a_large"].to_pylist()
|
234 |
+
assert pa.types.is_large_string(table["a_large"].type)
|
235 |
+
assert pa.types.is_large_string(result["a_large"].type)
|
236 |
+
else:
|
237 |
+
# older versions of pandas do not have bitmask support
|
238 |
+
# https://github.com/pandas-dev/pandas/issues/49888
|
239 |
+
with pytest.raises(NotImplementedError):
|
240 |
+
pandas_from_dataframe(table)
|
241 |
+
|
242 |
+
|
243 |
+
@pytest.mark.pandas
|
244 |
+
def test_pandas_roundtrip_categorical():
|
245 |
+
if Version(pd.__version__) < Version("2.0.2"):
|
246 |
+
pytest.skip("Bitmasks not supported in pandas interchange implementation")
|
247 |
+
|
248 |
+
arr = ["Mon", "Tue", "Mon", "Wed", "Mon", "Thu", "Fri", "Sat", None]
|
249 |
+
table = pa.table(
|
250 |
+
{"weekday": pa.array(arr).dictionary_encode()}
|
251 |
+
)
|
252 |
+
|
253 |
+
from pandas.api.interchange import (
|
254 |
+
from_dataframe as pandas_from_dataframe
|
255 |
+
)
|
256 |
+
pandas_df = pandas_from_dataframe(table)
|
257 |
+
result = pi.from_dataframe(pandas_df)
|
258 |
+
|
259 |
+
assert result["weekday"].to_pylist() == table["weekday"].to_pylist()
|
260 |
+
assert pa.types.is_dictionary(table["weekday"].type)
|
261 |
+
assert pa.types.is_dictionary(result["weekday"].type)
|
262 |
+
assert pa.types.is_string(table["weekday"].chunk(0).dictionary.type)
|
263 |
+
assert pa.types.is_large_string(result["weekday"].chunk(0).dictionary.type)
|
264 |
+
assert pa.types.is_int32(table["weekday"].chunk(0).indices.type)
|
265 |
+
assert pa.types.is_int8(result["weekday"].chunk(0).indices.type)
|
266 |
+
|
267 |
+
table_protocol = table.__dataframe__()
|
268 |
+
result_protocol = result.__dataframe__()
|
269 |
+
|
270 |
+
assert table_protocol.num_columns() == result_protocol.num_columns()
|
271 |
+
assert table_protocol.num_rows() == result_protocol.num_rows()
|
272 |
+
assert table_protocol.num_chunks() == result_protocol.num_chunks()
|
273 |
+
assert table_protocol.column_names() == result_protocol.column_names()
|
274 |
+
|
275 |
+
col_table = table_protocol.get_column(0)
|
276 |
+
col_result = result_protocol.get_column(0)
|
277 |
+
|
278 |
+
assert col_result.dtype[0] == DtypeKind.CATEGORICAL
|
279 |
+
assert col_result.dtype[0] == col_table.dtype[0]
|
280 |
+
assert col_result.size() == col_table.size()
|
281 |
+
assert col_result.offset == col_table.offset
|
282 |
+
|
283 |
+
desc_cat_table = col_result.describe_categorical
|
284 |
+
desc_cat_result = col_result.describe_categorical
|
285 |
+
|
286 |
+
assert desc_cat_table["is_ordered"] == desc_cat_result["is_ordered"]
|
287 |
+
assert desc_cat_table["is_dictionary"] == desc_cat_result["is_dictionary"]
|
288 |
+
assert isinstance(desc_cat_result["categories"]._col, pa.Array)
|
289 |
+
|
290 |
+
|
291 |
+
@pytest.mark.pandas
|
292 |
+
@pytest.mark.parametrize("unit", ['s', 'ms', 'us', 'ns'])
|
293 |
+
def test_pandas_roundtrip_datetime(unit):
|
294 |
+
if Version(pd.__version__) < Version("1.5.0"):
|
295 |
+
pytest.skip("__dataframe__ added to pandas in 1.5.0")
|
296 |
+
from datetime import datetime as dt
|
297 |
+
|
298 |
+
# timezones not included as they are not yet supported in
|
299 |
+
# the pandas implementation
|
300 |
+
dt_arr = [dt(2007, 7, 13), dt(2007, 7, 14), dt(2007, 7, 15)]
|
301 |
+
table = pa.table({"a": pa.array(dt_arr, type=pa.timestamp(unit))})
|
302 |
+
|
303 |
+
if Version(pd.__version__) < Version("1.6"):
|
304 |
+
# pandas < 2.0 always creates datetime64 in "ns"
|
305 |
+
# resolution
|
306 |
+
expected = pa.table({"a": pa.array(dt_arr, type=pa.timestamp('ns'))})
|
307 |
+
else:
|
308 |
+
expected = table
|
309 |
+
|
310 |
+
from pandas.api.interchange import (
|
311 |
+
from_dataframe as pandas_from_dataframe
|
312 |
+
)
|
313 |
+
pandas_df = pandas_from_dataframe(table)
|
314 |
+
result = pi.from_dataframe(pandas_df)
|
315 |
+
|
316 |
+
assert expected.equals(result)
|
317 |
+
|
318 |
+
expected_protocol = expected.__dataframe__()
|
319 |
+
result_protocol = result.__dataframe__()
|
320 |
+
|
321 |
+
assert expected_protocol.num_columns() == result_protocol.num_columns()
|
322 |
+
assert expected_protocol.num_rows() == result_protocol.num_rows()
|
323 |
+
assert expected_protocol.num_chunks() == result_protocol.num_chunks()
|
324 |
+
assert expected_protocol.column_names() == result_protocol.column_names()
|
325 |
+
|
326 |
+
|
327 |
+
@pytest.mark.pandas
|
328 |
+
@pytest.mark.parametrize(
|
329 |
+
"np_float", [np.float32, np.float64]
|
330 |
+
)
|
331 |
+
def test_pandas_to_pyarrow_with_missing(np_float):
|
332 |
+
if Version(pd.__version__) < Version("1.5.0"):
|
333 |
+
pytest.skip("__dataframe__ added to pandas in 1.5.0")
|
334 |
+
|
335 |
+
np_array = np.array([0, np.nan, 2], dtype=np_float)
|
336 |
+
datetime_array = [None, dt(2007, 7, 14), dt(2007, 7, 15)]
|
337 |
+
df = pd.DataFrame({
|
338 |
+
"a": np_array, # float, ColumnNullType.USE_NAN
|
339 |
+
"dt": datetime_array # ColumnNullType.USE_SENTINEL
|
340 |
+
})
|
341 |
+
expected = pa.table({
|
342 |
+
"a": pa.array(np_array, from_pandas=True),
|
343 |
+
"dt": pa.array(datetime_array, type=pa.timestamp("ns"))
|
344 |
+
})
|
345 |
+
result = pi.from_dataframe(df)
|
346 |
+
|
347 |
+
assert result.equals(expected)
|
348 |
+
|
349 |
+
|
350 |
+
@pytest.mark.pandas
|
351 |
+
def test_pandas_to_pyarrow_float16_with_missing():
|
352 |
+
if Version(pd.__version__) < Version("1.5.0"):
|
353 |
+
pytest.skip("__dataframe__ added to pandas in 1.5.0")
|
354 |
+
|
355 |
+
# np.float16 errors if ps.is_nan is used
|
356 |
+
# pyarrow.lib.ArrowNotImplementedError: Function 'is_nan' has no kernel
|
357 |
+
# matching input types (halffloat)
|
358 |
+
np_array = np.array([0, np.nan, 2], dtype=np.float16)
|
359 |
+
df = pd.DataFrame({"a": np_array})
|
360 |
+
|
361 |
+
with pytest.raises(NotImplementedError):
|
362 |
+
pi.from_dataframe(df)
|
363 |
+
|
364 |
+
|
365 |
+
@pytest.mark.parametrize(
|
366 |
+
"uint", [pa.uint8(), pa.uint16(), pa.uint32()]
|
367 |
+
)
|
368 |
+
@pytest.mark.parametrize(
|
369 |
+
"int", [pa.int8(), pa.int16(), pa.int32(), pa.int64()]
|
370 |
+
)
|
371 |
+
@pytest.mark.parametrize(
|
372 |
+
"float, np_float", [
|
373 |
+
(pa.float16(), np.float16),
|
374 |
+
(pa.float32(), np.float32),
|
375 |
+
(pa.float64(), np.float64)
|
376 |
+
]
|
377 |
+
)
|
378 |
+
@pytest.mark.parametrize("unit", ['s', 'ms', 'us', 'ns'])
|
379 |
+
@pytest.mark.parametrize("tz", ['America/New_York', '+07:30', '-04:30'])
|
380 |
+
@pytest.mark.parametrize("offset, length", [(0, 3), (0, 2), (1, 2), (2, 1)])
|
381 |
+
def test_pyarrow_roundtrip(uint, int, float, np_float,
|
382 |
+
unit, tz, offset, length):
|
383 |
+
|
384 |
+
from datetime import datetime as dt
|
385 |
+
arr = [1, 2, None]
|
386 |
+
dt_arr = [dt(2007, 7, 13), None, dt(2007, 7, 15)]
|
387 |
+
|
388 |
+
table = pa.table(
|
389 |
+
{
|
390 |
+
"a": pa.array(arr, type=uint),
|
391 |
+
"b": pa.array(arr, type=int),
|
392 |
+
"c": pa.array(np.array(arr, dtype=np_float),
|
393 |
+
type=float, from_pandas=True),
|
394 |
+
"d": [True, False, True],
|
395 |
+
"e": [True, False, None],
|
396 |
+
"f": ["a", None, "c"],
|
397 |
+
"g": pa.array(dt_arr, type=pa.timestamp(unit, tz=tz))
|
398 |
+
}
|
399 |
+
)
|
400 |
+
table = table.slice(offset, length)
|
401 |
+
result = _from_dataframe(table.__dataframe__())
|
402 |
+
|
403 |
+
assert table.equals(result)
|
404 |
+
|
405 |
+
table_protocol = table.__dataframe__()
|
406 |
+
result_protocol = result.__dataframe__()
|
407 |
+
|
408 |
+
assert table_protocol.num_columns() == result_protocol.num_columns()
|
409 |
+
assert table_protocol.num_rows() == result_protocol.num_rows()
|
410 |
+
assert table_protocol.num_chunks() == result_protocol.num_chunks()
|
411 |
+
assert table_protocol.column_names() == result_protocol.column_names()
|
412 |
+
|
413 |
+
|
414 |
+
@pytest.mark.parametrize("offset, length", [(0, 10), (0, 2), (7, 3), (2, 1)])
|
415 |
+
def test_pyarrow_roundtrip_categorical(offset, length):
|
416 |
+
arr = ["Mon", "Tue", "Mon", "Wed", "Mon", "Thu", "Fri", None, "Sun"]
|
417 |
+
table = pa.table(
|
418 |
+
{"weekday": pa.array(arr).dictionary_encode()}
|
419 |
+
)
|
420 |
+
table = table.slice(offset, length)
|
421 |
+
result = _from_dataframe(table.__dataframe__())
|
422 |
+
|
423 |
+
assert table.equals(result)
|
424 |
+
|
425 |
+
table_protocol = table.__dataframe__()
|
426 |
+
result_protocol = result.__dataframe__()
|
427 |
+
|
428 |
+
assert table_protocol.num_columns() == result_protocol.num_columns()
|
429 |
+
assert table_protocol.num_rows() == result_protocol.num_rows()
|
430 |
+
assert table_protocol.num_chunks() == result_protocol.num_chunks()
|
431 |
+
assert table_protocol.column_names() == result_protocol.column_names()
|
432 |
+
|
433 |
+
col_table = table_protocol.get_column(0)
|
434 |
+
col_result = result_protocol.get_column(0)
|
435 |
+
|
436 |
+
assert col_result.dtype[0] == DtypeKind.CATEGORICAL
|
437 |
+
assert col_result.dtype[0] == col_table.dtype[0]
|
438 |
+
assert col_result.size() == col_table.size()
|
439 |
+
assert col_result.offset == col_table.offset
|
440 |
+
|
441 |
+
desc_cat_table = col_table.describe_categorical
|
442 |
+
desc_cat_result = col_result.describe_categorical
|
443 |
+
|
444 |
+
assert desc_cat_table["is_ordered"] == desc_cat_result["is_ordered"]
|
445 |
+
assert desc_cat_table["is_dictionary"] == desc_cat_result["is_dictionary"]
|
446 |
+
assert isinstance(desc_cat_result["categories"]._col, pa.Array)
|
447 |
+
|
448 |
+
|
449 |
+
@pytest.mark.large_memory
|
450 |
+
def test_pyarrow_roundtrip_large_string():
|
451 |
+
|
452 |
+
data = np.array([b'x'*1024]*(3*1024**2), dtype='object') # 3GB bytes data
|
453 |
+
arr = pa.array(data, type=pa.large_string())
|
454 |
+
table = pa.table([arr], names=["large_string"])
|
455 |
+
|
456 |
+
result = _from_dataframe(table.__dataframe__())
|
457 |
+
col = result.__dataframe__().get_column(0)
|
458 |
+
|
459 |
+
assert col.size() == 3*1024**2
|
460 |
+
assert pa.types.is_large_string(table[0].type)
|
461 |
+
assert pa.types.is_large_string(result[0].type)
|
462 |
+
|
463 |
+
assert table.equals(result)
|
464 |
+
|
465 |
+
|
466 |
+
def test_nan_as_null():
|
467 |
+
table = pa.table({"a": [1, 2, 3, 4]})
|
468 |
+
with pytest.raises(RuntimeError):
|
469 |
+
table.__dataframe__(nan_as_null=True)
|
470 |
+
|
471 |
+
|
472 |
+
@pytest.mark.pandas
|
473 |
+
def test_allow_copy_false():
|
474 |
+
if Version(pd.__version__) < Version("1.5.0"):
|
475 |
+
pytest.skip("__dataframe__ added to pandas in 1.5.0")
|
476 |
+
|
477 |
+
# Test that an error is raised when a copy is needed
|
478 |
+
# to create a bitmask
|
479 |
+
|
480 |
+
df = pd.DataFrame({"a": [0, 1.0, 2.0]})
|
481 |
+
with pytest.raises(RuntimeError):
|
482 |
+
pi.from_dataframe(df, allow_copy=False)
|
483 |
+
|
484 |
+
df = pd.DataFrame({
|
485 |
+
"dt": [None, dt(2007, 7, 14), dt(2007, 7, 15)]
|
486 |
+
})
|
487 |
+
with pytest.raises(RuntimeError):
|
488 |
+
pi.from_dataframe(df, allow_copy=False)
|
489 |
+
|
490 |
+
|
491 |
+
@pytest.mark.pandas
|
492 |
+
def test_allow_copy_false_bool_categorical():
|
493 |
+
if Version(pd.__version__) < Version("1.5.0"):
|
494 |
+
pytest.skip("__dataframe__ added to pandas in 1.5.0")
|
495 |
+
|
496 |
+
# Test that an error is raised for boolean
|
497 |
+
# and categorical dtype (copy is always made)
|
498 |
+
|
499 |
+
df = pd.DataFrame({"a": [None, False, True]})
|
500 |
+
with pytest.raises(RuntimeError):
|
501 |
+
pi.from_dataframe(df, allow_copy=False)
|
502 |
+
|
503 |
+
df = pd.DataFrame({"a": [True, False, True]})
|
504 |
+
with pytest.raises(RuntimeError):
|
505 |
+
pi.from_dataframe(df, allow_copy=False)
|
506 |
+
|
507 |
+
df = pd.DataFrame({"weekday": ["a", "b", None]})
|
508 |
+
df = df.astype("category")
|
509 |
+
with pytest.raises(RuntimeError):
|
510 |
+
pi.from_dataframe(df, allow_copy=False)
|
511 |
+
|
512 |
+
df = pd.DataFrame({"weekday": ["a", "b", "c"]})
|
513 |
+
df = df.astype("category")
|
514 |
+
with pytest.raises(RuntimeError):
|
515 |
+
pi.from_dataframe(df, allow_copy=False)
|
516 |
+
|
517 |
+
|
518 |
+
def test_empty_dataframe():
|
519 |
+
schema = pa.schema([('col1', pa.int8())])
|
520 |
+
df = pa.table([[]], schema=schema)
|
521 |
+
dfi = df.__dataframe__()
|
522 |
+
assert pi.from_dataframe(dfi) == df
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/pandas_threaded_import.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# This file is called from a test in test_pandas.py.
|
19 |
+
|
20 |
+
from concurrent.futures import ThreadPoolExecutor
|
21 |
+
import faulthandler
|
22 |
+
import sys
|
23 |
+
|
24 |
+
import pyarrow as pa
|
25 |
+
|
26 |
+
num_threads = 60
|
27 |
+
timeout = 10 # seconds
|
28 |
+
|
29 |
+
|
30 |
+
def thread_func(i):
|
31 |
+
pa.array([i]).to_pandas()
|
32 |
+
|
33 |
+
|
34 |
+
def main():
|
35 |
+
# In case of import deadlock, crash after a finite timeout
|
36 |
+
faulthandler.dump_traceback_later(timeout, exit=True)
|
37 |
+
with ThreadPoolExecutor(num_threads) as pool:
|
38 |
+
assert "pandas" not in sys.modules # pandas is imported lazily
|
39 |
+
list(pool.map(thread_func, range(num_threads)))
|
40 |
+
assert "pandas" in sys.modules
|
41 |
+
|
42 |
+
|
43 |
+
if __name__ == "__main__":
|
44 |
+
main()
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/pyarrow_cython_example.pyx
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language=c++
|
19 |
+
# cython: language_level = 3
|
20 |
+
|
21 |
+
from pyarrow.lib cimport *
|
22 |
+
|
23 |
+
|
24 |
+
def get_array_length(obj):
|
25 |
+
# An example function accessing both the pyarrow Cython API
|
26 |
+
# and the Arrow C++ API
|
27 |
+
cdef shared_ptr[CArray] arr = pyarrow_unwrap_array(obj)
|
28 |
+
if arr.get() == NULL:
|
29 |
+
raise TypeError("not an array")
|
30 |
+
return arr.get().length()
|
31 |
+
|
32 |
+
|
33 |
+
def make_null_array(length):
|
34 |
+
# An example function that returns a PyArrow object without PyArrow
|
35 |
+
# being imported explicitly at the Python level.
|
36 |
+
cdef shared_ptr[CArray] null_array
|
37 |
+
null_array.reset(new CNullArray(length))
|
38 |
+
return pyarrow_wrap_array(null_array)
|
39 |
+
|
40 |
+
|
41 |
+
def cast_scalar(scalar, to_type):
|
42 |
+
cdef:
|
43 |
+
shared_ptr[CScalar] c_scalar
|
44 |
+
shared_ptr[CDataType] c_type
|
45 |
+
CResult[shared_ptr[CScalar]] c_result
|
46 |
+
|
47 |
+
c_scalar = pyarrow_unwrap_scalar(scalar)
|
48 |
+
if c_scalar.get() == NULL:
|
49 |
+
raise TypeError("not a scalar")
|
50 |
+
c_type = pyarrow_unwrap_data_type(to_type)
|
51 |
+
if c_type.get() == NULL:
|
52 |
+
raise TypeError("not a type")
|
53 |
+
c_result = c_scalar.get().CastTo(c_type)
|
54 |
+
c_scalar = GetResultValue(c_result)
|
55 |
+
return pyarrow_wrap_scalar(c_scalar)
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/strategies.py
ADDED
@@ -0,0 +1,451 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import datetime
|
19 |
+
import sys
|
20 |
+
|
21 |
+
import pytest
|
22 |
+
import hypothesis as h
|
23 |
+
import hypothesis.strategies as st
|
24 |
+
import hypothesis.extra.numpy as npst
|
25 |
+
try:
|
26 |
+
import hypothesis.extra.pytz as tzst
|
27 |
+
except ImportError:
|
28 |
+
tzst = None
|
29 |
+
try:
|
30 |
+
import zoneinfo
|
31 |
+
except ImportError:
|
32 |
+
zoneinfo = None
|
33 |
+
if sys.platform == 'win32':
|
34 |
+
try:
|
35 |
+
import tzdata # noqa:F401
|
36 |
+
except ImportError:
|
37 |
+
zoneinfo = None
|
38 |
+
import numpy as np
|
39 |
+
|
40 |
+
import pyarrow as pa
|
41 |
+
|
42 |
+
|
43 |
+
# TODO(kszucs): alphanum_text, surrogate_text
|
44 |
+
custom_text = st.text(
|
45 |
+
alphabet=st.characters(
|
46 |
+
min_codepoint=0x41,
|
47 |
+
max_codepoint=0x7E
|
48 |
+
)
|
49 |
+
)
|
50 |
+
|
51 |
+
null_type = st.just(pa.null())
|
52 |
+
bool_type = st.just(pa.bool_())
|
53 |
+
|
54 |
+
binary_type = st.just(pa.binary())
|
55 |
+
string_type = st.just(pa.string())
|
56 |
+
large_binary_type = st.just(pa.large_binary())
|
57 |
+
large_string_type = st.just(pa.large_string())
|
58 |
+
fixed_size_binary_type = st.builds(
|
59 |
+
pa.binary,
|
60 |
+
st.integers(min_value=0, max_value=16)
|
61 |
+
)
|
62 |
+
binary_like_types = st.one_of(
|
63 |
+
binary_type,
|
64 |
+
string_type,
|
65 |
+
large_binary_type,
|
66 |
+
large_string_type,
|
67 |
+
fixed_size_binary_type
|
68 |
+
)
|
69 |
+
|
70 |
+
signed_integer_types = st.sampled_from([
|
71 |
+
pa.int8(),
|
72 |
+
pa.int16(),
|
73 |
+
pa.int32(),
|
74 |
+
pa.int64()
|
75 |
+
])
|
76 |
+
unsigned_integer_types = st.sampled_from([
|
77 |
+
pa.uint8(),
|
78 |
+
pa.uint16(),
|
79 |
+
pa.uint32(),
|
80 |
+
pa.uint64()
|
81 |
+
])
|
82 |
+
integer_types = st.one_of(signed_integer_types, unsigned_integer_types)
|
83 |
+
|
84 |
+
floating_types = st.sampled_from([
|
85 |
+
pa.float16(),
|
86 |
+
pa.float32(),
|
87 |
+
pa.float64()
|
88 |
+
])
|
89 |
+
decimal128_type = st.builds(
|
90 |
+
pa.decimal128,
|
91 |
+
precision=st.integers(min_value=1, max_value=38),
|
92 |
+
scale=st.integers(min_value=1, max_value=38)
|
93 |
+
)
|
94 |
+
decimal256_type = st.builds(
|
95 |
+
pa.decimal256,
|
96 |
+
precision=st.integers(min_value=1, max_value=76),
|
97 |
+
scale=st.integers(min_value=1, max_value=76)
|
98 |
+
)
|
99 |
+
numeric_types = st.one_of(integer_types, floating_types,
|
100 |
+
decimal128_type, decimal256_type)
|
101 |
+
|
102 |
+
date_types = st.sampled_from([
|
103 |
+
pa.date32(),
|
104 |
+
pa.date64()
|
105 |
+
])
|
106 |
+
time_types = st.sampled_from([
|
107 |
+
pa.time32('s'),
|
108 |
+
pa.time32('ms'),
|
109 |
+
pa.time64('us'),
|
110 |
+
pa.time64('ns')
|
111 |
+
])
|
112 |
+
|
113 |
+
if tzst and zoneinfo:
|
114 |
+
timezones = st.one_of(st.none(), tzst.timezones(), st.timezones())
|
115 |
+
elif tzst:
|
116 |
+
timezones = st.one_of(st.none(), tzst.timezones())
|
117 |
+
elif zoneinfo:
|
118 |
+
timezones = st.one_of(st.none(), st.timezones())
|
119 |
+
else:
|
120 |
+
timezones = st.none()
|
121 |
+
timestamp_types = st.builds(
|
122 |
+
pa.timestamp,
|
123 |
+
unit=st.sampled_from(['s', 'ms', 'us', 'ns']),
|
124 |
+
tz=timezones
|
125 |
+
)
|
126 |
+
duration_types = st.builds(
|
127 |
+
pa.duration,
|
128 |
+
st.sampled_from(['s', 'ms', 'us', 'ns'])
|
129 |
+
)
|
130 |
+
interval_types = st.just(pa.month_day_nano_interval())
|
131 |
+
temporal_types = st.one_of(
|
132 |
+
date_types,
|
133 |
+
time_types,
|
134 |
+
timestamp_types,
|
135 |
+
duration_types,
|
136 |
+
interval_types
|
137 |
+
)
|
138 |
+
|
139 |
+
primitive_types = st.one_of(
|
140 |
+
null_type,
|
141 |
+
bool_type,
|
142 |
+
numeric_types,
|
143 |
+
temporal_types,
|
144 |
+
binary_like_types
|
145 |
+
)
|
146 |
+
|
147 |
+
metadata = st.dictionaries(st.text(), st.text())
|
148 |
+
|
149 |
+
|
150 |
+
@st.composite
|
151 |
+
def fields(draw, type_strategy=primitive_types):
|
152 |
+
name = draw(custom_text)
|
153 |
+
typ = draw(type_strategy)
|
154 |
+
if pa.types.is_null(typ):
|
155 |
+
nullable = True
|
156 |
+
else:
|
157 |
+
nullable = draw(st.booleans())
|
158 |
+
meta = draw(metadata)
|
159 |
+
return pa.field(name, type=typ, nullable=nullable, metadata=meta)
|
160 |
+
|
161 |
+
|
162 |
+
def list_types(item_strategy=primitive_types):
|
163 |
+
return (
|
164 |
+
st.builds(pa.list_, item_strategy) |
|
165 |
+
st.builds(pa.large_list, item_strategy) |
|
166 |
+
st.builds(
|
167 |
+
pa.list_,
|
168 |
+
item_strategy,
|
169 |
+
st.integers(min_value=0, max_value=16)
|
170 |
+
)
|
171 |
+
)
|
172 |
+
|
173 |
+
|
174 |
+
@st.composite
|
175 |
+
def struct_types(draw, item_strategy=primitive_types):
|
176 |
+
fields_strategy = st.lists(fields(item_strategy))
|
177 |
+
fields_rendered = draw(fields_strategy)
|
178 |
+
field_names = [field.name for field in fields_rendered]
|
179 |
+
# check that field names are unique, see ARROW-9997
|
180 |
+
h.assume(len(set(field_names)) == len(field_names))
|
181 |
+
return pa.struct(fields_rendered)
|
182 |
+
|
183 |
+
|
184 |
+
def dictionary_types(key_strategy=None, value_strategy=None):
|
185 |
+
if key_strategy is None:
|
186 |
+
key_strategy = signed_integer_types
|
187 |
+
if value_strategy is None:
|
188 |
+
value_strategy = st.one_of(
|
189 |
+
bool_type,
|
190 |
+
integer_types,
|
191 |
+
st.sampled_from([pa.float32(), pa.float64()]),
|
192 |
+
binary_type,
|
193 |
+
string_type,
|
194 |
+
fixed_size_binary_type,
|
195 |
+
)
|
196 |
+
return st.builds(pa.dictionary, key_strategy, value_strategy)
|
197 |
+
|
198 |
+
|
199 |
+
@st.composite
|
200 |
+
def map_types(draw, key_strategy=primitive_types,
|
201 |
+
item_strategy=primitive_types):
|
202 |
+
key_type = draw(key_strategy)
|
203 |
+
h.assume(not pa.types.is_null(key_type))
|
204 |
+
value_type = draw(item_strategy)
|
205 |
+
return pa.map_(key_type, value_type)
|
206 |
+
|
207 |
+
|
208 |
+
# union type
|
209 |
+
# extension type
|
210 |
+
|
211 |
+
|
212 |
+
def schemas(type_strategy=primitive_types, max_fields=None):
|
213 |
+
children = st.lists(fields(type_strategy), max_size=max_fields)
|
214 |
+
return st.builds(pa.schema, children)
|
215 |
+
|
216 |
+
|
217 |
+
all_types = st.deferred(
|
218 |
+
lambda: (
|
219 |
+
primitive_types |
|
220 |
+
list_types() |
|
221 |
+
struct_types() |
|
222 |
+
dictionary_types() |
|
223 |
+
map_types() |
|
224 |
+
list_types(all_types) |
|
225 |
+
struct_types(all_types)
|
226 |
+
)
|
227 |
+
)
|
228 |
+
all_fields = fields(all_types)
|
229 |
+
all_schemas = schemas(all_types)
|
230 |
+
|
231 |
+
|
232 |
+
_default_array_sizes = st.integers(min_value=0, max_value=20)
|
233 |
+
|
234 |
+
|
235 |
+
@st.composite
|
236 |
+
def _pylist(draw, value_type, size, nullable=True):
|
237 |
+
arr = draw(arrays(value_type, size=size, nullable=False))
|
238 |
+
return arr.to_pylist()
|
239 |
+
|
240 |
+
|
241 |
+
@st.composite
|
242 |
+
def _pymap(draw, key_type, value_type, size, nullable=True):
|
243 |
+
length = draw(size)
|
244 |
+
keys = draw(_pylist(key_type, size=length, nullable=False))
|
245 |
+
values = draw(_pylist(value_type, size=length, nullable=nullable))
|
246 |
+
return list(zip(keys, values))
|
247 |
+
|
248 |
+
|
249 |
+
@st.composite
|
250 |
+
def arrays(draw, type, size=None, nullable=True):
|
251 |
+
if isinstance(type, st.SearchStrategy):
|
252 |
+
ty = draw(type)
|
253 |
+
elif isinstance(type, pa.DataType):
|
254 |
+
ty = type
|
255 |
+
else:
|
256 |
+
raise TypeError('Type must be a pyarrow DataType')
|
257 |
+
|
258 |
+
if isinstance(size, st.SearchStrategy):
|
259 |
+
size = draw(size)
|
260 |
+
elif size is None:
|
261 |
+
size = draw(_default_array_sizes)
|
262 |
+
elif not isinstance(size, int):
|
263 |
+
raise TypeError('Size must be an integer')
|
264 |
+
|
265 |
+
if pa.types.is_null(ty):
|
266 |
+
h.assume(nullable)
|
267 |
+
value = st.none()
|
268 |
+
elif pa.types.is_boolean(ty):
|
269 |
+
value = st.booleans()
|
270 |
+
elif pa.types.is_integer(ty):
|
271 |
+
values = draw(npst.arrays(ty.to_pandas_dtype(), shape=(size,)))
|
272 |
+
return pa.array(values, type=ty)
|
273 |
+
elif pa.types.is_floating(ty):
|
274 |
+
values = draw(npst.arrays(ty.to_pandas_dtype(), shape=(size,)))
|
275 |
+
# Workaround ARROW-4952: no easy way to assert array equality
|
276 |
+
# in a NaN-tolerant way.
|
277 |
+
values[np.isnan(values)] = -42.0
|
278 |
+
return pa.array(values, type=ty)
|
279 |
+
elif pa.types.is_decimal(ty):
|
280 |
+
# TODO(kszucs): properly limit the precision
|
281 |
+
# value = st.decimals(places=type.scale, allow_infinity=False)
|
282 |
+
h.reject()
|
283 |
+
elif pa.types.is_time(ty):
|
284 |
+
value = st.times()
|
285 |
+
elif pa.types.is_date(ty):
|
286 |
+
value = st.dates()
|
287 |
+
elif pa.types.is_timestamp(ty):
|
288 |
+
if zoneinfo is None:
|
289 |
+
pytest.skip('no module named zoneinfo (or tzdata on Windows)')
|
290 |
+
if ty.tz is None:
|
291 |
+
pytest.skip('requires timezone not None')
|
292 |
+
min_int64 = -(2**63)
|
293 |
+
max_int64 = 2**63 - 1
|
294 |
+
min_datetime = datetime.datetime.fromtimestamp(
|
295 |
+
min_int64 // 10**9) + datetime.timedelta(hours=12)
|
296 |
+
max_datetime = datetime.datetime.fromtimestamp(
|
297 |
+
max_int64 // 10**9) - datetime.timedelta(hours=12)
|
298 |
+
try:
|
299 |
+
offset = ty.tz.split(":")
|
300 |
+
offset_hours = int(offset[0])
|
301 |
+
offset_min = int(offset[1])
|
302 |
+
tz = datetime.timedelta(hours=offset_hours, minutes=offset_min)
|
303 |
+
except ValueError:
|
304 |
+
tz = zoneinfo.ZoneInfo(ty.tz)
|
305 |
+
value = st.datetimes(timezones=st.just(tz), min_value=min_datetime,
|
306 |
+
max_value=max_datetime)
|
307 |
+
elif pa.types.is_duration(ty):
|
308 |
+
value = st.timedeltas()
|
309 |
+
elif pa.types.is_interval(ty):
|
310 |
+
value = st.timedeltas()
|
311 |
+
elif pa.types.is_binary(ty) or pa.types.is_large_binary(ty):
|
312 |
+
value = st.binary()
|
313 |
+
elif pa.types.is_string(ty) or pa.types.is_large_string(ty):
|
314 |
+
value = st.text()
|
315 |
+
elif pa.types.is_fixed_size_binary(ty):
|
316 |
+
value = st.binary(min_size=ty.byte_width, max_size=ty.byte_width)
|
317 |
+
elif pa.types.is_list(ty):
|
318 |
+
value = _pylist(ty.value_type, size=size, nullable=nullable)
|
319 |
+
elif pa.types.is_large_list(ty):
|
320 |
+
value = _pylist(ty.value_type, size=size, nullable=nullable)
|
321 |
+
elif pa.types.is_fixed_size_list(ty):
|
322 |
+
value = _pylist(ty.value_type, size=ty.list_size, nullable=nullable)
|
323 |
+
elif pa.types.is_dictionary(ty):
|
324 |
+
values = _pylist(ty.value_type, size=size, nullable=nullable)
|
325 |
+
return pa.array(draw(values), type=ty)
|
326 |
+
elif pa.types.is_map(ty):
|
327 |
+
value = _pymap(ty.key_type, ty.item_type, size=_default_array_sizes,
|
328 |
+
nullable=nullable)
|
329 |
+
elif pa.types.is_struct(ty):
|
330 |
+
h.assume(len(ty) > 0)
|
331 |
+
fields, child_arrays = [], []
|
332 |
+
for field in ty:
|
333 |
+
fields.append(field)
|
334 |
+
child_arrays.append(draw(arrays(field.type, size=size)))
|
335 |
+
return pa.StructArray.from_arrays(child_arrays, fields=fields)
|
336 |
+
else:
|
337 |
+
raise NotImplementedError(ty)
|
338 |
+
|
339 |
+
if nullable:
|
340 |
+
value = st.one_of(st.none(), value)
|
341 |
+
values = st.lists(value, min_size=size, max_size=size)
|
342 |
+
|
343 |
+
return pa.array(draw(values), type=ty)
|
344 |
+
|
345 |
+
|
346 |
+
@st.composite
|
347 |
+
def chunked_arrays(draw, type, min_chunks=0, max_chunks=None, chunk_size=None):
|
348 |
+
if isinstance(type, st.SearchStrategy):
|
349 |
+
type = draw(type)
|
350 |
+
|
351 |
+
# TODO(kszucs): remove it, field metadata is not kept
|
352 |
+
h.assume(not pa.types.is_struct(type))
|
353 |
+
|
354 |
+
chunk = arrays(type, size=chunk_size)
|
355 |
+
chunks = st.lists(chunk, min_size=min_chunks, max_size=max_chunks)
|
356 |
+
|
357 |
+
return pa.chunked_array(draw(chunks), type=type)
|
358 |
+
|
359 |
+
|
360 |
+
@st.composite
|
361 |
+
def record_batches(draw, type, rows=None, max_fields=None):
|
362 |
+
if isinstance(rows, st.SearchStrategy):
|
363 |
+
rows = draw(rows)
|
364 |
+
elif rows is None:
|
365 |
+
rows = draw(_default_array_sizes)
|
366 |
+
elif not isinstance(rows, int):
|
367 |
+
raise TypeError('Rows must be an integer')
|
368 |
+
|
369 |
+
schema = draw(schemas(type, max_fields=max_fields))
|
370 |
+
children = [draw(arrays(field.type, size=rows)) for field in schema]
|
371 |
+
# TODO(kszucs): the names and schema arguments are not consistent with
|
372 |
+
# Table.from_array's arguments
|
373 |
+
return pa.RecordBatch.from_arrays(children, schema=schema)
|
374 |
+
|
375 |
+
|
376 |
+
@st.composite
|
377 |
+
def tables(draw, type, rows=None, max_fields=None):
|
378 |
+
if isinstance(rows, st.SearchStrategy):
|
379 |
+
rows = draw(rows)
|
380 |
+
elif rows is None:
|
381 |
+
rows = draw(_default_array_sizes)
|
382 |
+
elif not isinstance(rows, int):
|
383 |
+
raise TypeError('Rows must be an integer')
|
384 |
+
|
385 |
+
schema = draw(schemas(type, max_fields=max_fields))
|
386 |
+
children = [draw(arrays(field.type, size=rows)) for field in schema]
|
387 |
+
return pa.Table.from_arrays(children, schema=schema)
|
388 |
+
|
389 |
+
|
390 |
+
all_arrays = arrays(all_types)
|
391 |
+
all_chunked_arrays = chunked_arrays(all_types)
|
392 |
+
all_record_batches = record_batches(all_types)
|
393 |
+
all_tables = tables(all_types)
|
394 |
+
|
395 |
+
|
396 |
+
# Define the same rules as above for pandas tests by excluding certain types
|
397 |
+
# from the generation because of known issues.
|
398 |
+
|
399 |
+
pandas_compatible_primitive_types = st.one_of(
|
400 |
+
null_type,
|
401 |
+
bool_type,
|
402 |
+
integer_types,
|
403 |
+
st.sampled_from([pa.float32(), pa.float64()]),
|
404 |
+
decimal128_type,
|
405 |
+
date_types,
|
406 |
+
time_types,
|
407 |
+
# Need to exclude timestamp and duration types otherwise hypothesis
|
408 |
+
# discovers ARROW-10210
|
409 |
+
# timestamp_types,
|
410 |
+
# duration_types
|
411 |
+
interval_types,
|
412 |
+
binary_type,
|
413 |
+
string_type,
|
414 |
+
large_binary_type,
|
415 |
+
large_string_type,
|
416 |
+
)
|
417 |
+
|
418 |
+
# Need to exclude floating point types otherwise hypothesis discovers
|
419 |
+
# ARROW-10211
|
420 |
+
pandas_compatible_dictionary_value_types = st.one_of(
|
421 |
+
bool_type,
|
422 |
+
integer_types,
|
423 |
+
binary_type,
|
424 |
+
string_type,
|
425 |
+
fixed_size_binary_type,
|
426 |
+
)
|
427 |
+
|
428 |
+
|
429 |
+
def pandas_compatible_list_types(
|
430 |
+
item_strategy=pandas_compatible_primitive_types
|
431 |
+
):
|
432 |
+
# Need to exclude fixed size list type otherwise hypothesis discovers
|
433 |
+
# ARROW-10194
|
434 |
+
return (
|
435 |
+
st.builds(pa.list_, item_strategy) |
|
436 |
+
st.builds(pa.large_list, item_strategy)
|
437 |
+
)
|
438 |
+
|
439 |
+
|
440 |
+
pandas_compatible_types = st.deferred(
|
441 |
+
lambda: st.one_of(
|
442 |
+
pandas_compatible_primitive_types,
|
443 |
+
pandas_compatible_list_types(pandas_compatible_primitive_types),
|
444 |
+
struct_types(pandas_compatible_primitive_types),
|
445 |
+
dictionary_types(
|
446 |
+
value_strategy=pandas_compatible_dictionary_value_types
|
447 |
+
),
|
448 |
+
pandas_compatible_list_types(pandas_compatible_types),
|
449 |
+
struct_types(pandas_compatible_types)
|
450 |
+
)
|
451 |
+
)
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_acero.py
ADDED
@@ -0,0 +1,378 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import pytest
|
19 |
+
|
20 |
+
import pyarrow as pa
|
21 |
+
import pyarrow.compute as pc
|
22 |
+
from pyarrow.compute import field
|
23 |
+
|
24 |
+
try:
|
25 |
+
from pyarrow.acero import (
|
26 |
+
Declaration,
|
27 |
+
TableSourceNodeOptions,
|
28 |
+
FilterNodeOptions,
|
29 |
+
ProjectNodeOptions,
|
30 |
+
AggregateNodeOptions,
|
31 |
+
OrderByNodeOptions,
|
32 |
+
HashJoinNodeOptions,
|
33 |
+
)
|
34 |
+
except ImportError:
|
35 |
+
pass
|
36 |
+
|
37 |
+
try:
|
38 |
+
import pyarrow.dataset as ds
|
39 |
+
from pyarrow.acero import ScanNodeOptions
|
40 |
+
except ImportError:
|
41 |
+
ds = None
|
42 |
+
|
43 |
+
pytestmark = pytest.mark.acero
|
44 |
+
|
45 |
+
|
46 |
+
@pytest.fixture
|
47 |
+
def table_source():
|
48 |
+
table = pa.table({'a': [1, 2, 3], 'b': [4, 5, 6]})
|
49 |
+
table_opts = TableSourceNodeOptions(table)
|
50 |
+
table_source = Declaration("table_source", options=table_opts)
|
51 |
+
return table_source
|
52 |
+
|
53 |
+
|
54 |
+
def test_declaration():
|
55 |
+
|
56 |
+
table = pa.table({'a': [1, 2, 3], 'b': [4, 5, 6]})
|
57 |
+
table_opts = TableSourceNodeOptions(table)
|
58 |
+
filter_opts = FilterNodeOptions(field('a') > 1)
|
59 |
+
|
60 |
+
# using sequence
|
61 |
+
decl = Declaration.from_sequence([
|
62 |
+
Declaration("table_source", options=table_opts),
|
63 |
+
Declaration("filter", options=filter_opts)
|
64 |
+
])
|
65 |
+
result = decl.to_table()
|
66 |
+
assert result.equals(table.slice(1, 2))
|
67 |
+
|
68 |
+
# using explicit inputs
|
69 |
+
table_source = Declaration("table_source", options=table_opts)
|
70 |
+
filtered = Declaration("filter", options=filter_opts, inputs=[table_source])
|
71 |
+
result = filtered.to_table()
|
72 |
+
assert result.equals(table.slice(1, 2))
|
73 |
+
|
74 |
+
|
75 |
+
def test_declaration_repr(table_source):
|
76 |
+
|
77 |
+
assert "TableSourceNode" in str(table_source)
|
78 |
+
assert "TableSourceNode" in repr(table_source)
|
79 |
+
|
80 |
+
|
81 |
+
def test_declaration_to_reader(table_source):
|
82 |
+
with table_source.to_reader() as reader:
|
83 |
+
assert reader.schema == pa.schema([("a", pa.int64()), ("b", pa.int64())])
|
84 |
+
result = reader.read_all()
|
85 |
+
expected = pa.table({'a': [1, 2, 3], 'b': [4, 5, 6]})
|
86 |
+
assert result.equals(expected)
|
87 |
+
|
88 |
+
|
89 |
+
def test_table_source():
|
90 |
+
with pytest.raises(TypeError):
|
91 |
+
TableSourceNodeOptions(pa.record_batch([pa.array([1, 2, 3])], ["a"]))
|
92 |
+
|
93 |
+
table_source = TableSourceNodeOptions(None)
|
94 |
+
decl = Declaration("table_source", table_source)
|
95 |
+
with pytest.raises(
|
96 |
+
ValueError, match="TableSourceNode requires table which is not null"
|
97 |
+
):
|
98 |
+
_ = decl.to_table()
|
99 |
+
|
100 |
+
|
101 |
+
def test_filter(table_source):
|
102 |
+
# referencing unknown field
|
103 |
+
decl = Declaration.from_sequence([
|
104 |
+
table_source,
|
105 |
+
Declaration("filter", options=FilterNodeOptions(field("c") > 1))
|
106 |
+
])
|
107 |
+
with pytest.raises(ValueError, match=r"No match for FieldRef.Name\(c\)"):
|
108 |
+
_ = decl.to_table()
|
109 |
+
|
110 |
+
# requires a pyarrow Expression
|
111 |
+
with pytest.raises(TypeError):
|
112 |
+
FilterNodeOptions(pa.array([True, False, True]))
|
113 |
+
with pytest.raises(TypeError):
|
114 |
+
FilterNodeOptions(None)
|
115 |
+
|
116 |
+
|
117 |
+
def test_project(table_source):
|
118 |
+
# default name from expression
|
119 |
+
decl = Declaration.from_sequence([
|
120 |
+
table_source,
|
121 |
+
Declaration("project", ProjectNodeOptions([pc.multiply(field("a"), 2)]))
|
122 |
+
])
|
123 |
+
result = decl.to_table()
|
124 |
+
assert result.schema.names == ["multiply(a, 2)"]
|
125 |
+
assert result[0].to_pylist() == [2, 4, 6]
|
126 |
+
|
127 |
+
# provide name
|
128 |
+
decl = Declaration.from_sequence([
|
129 |
+
table_source,
|
130 |
+
Declaration("project", ProjectNodeOptions([pc.multiply(field("a"), 2)], ["a2"]))
|
131 |
+
])
|
132 |
+
result = decl.to_table()
|
133 |
+
assert result.schema.names == ["a2"]
|
134 |
+
assert result["a2"].to_pylist() == [2, 4, 6]
|
135 |
+
|
136 |
+
# input validation
|
137 |
+
with pytest.raises(ValueError):
|
138 |
+
ProjectNodeOptions([pc.multiply(field("a"), 2)], ["a2", "b2"])
|
139 |
+
|
140 |
+
# no scalar expression
|
141 |
+
decl = Declaration.from_sequence([
|
142 |
+
table_source,
|
143 |
+
Declaration("project", ProjectNodeOptions([pc.sum(field("a"))]))
|
144 |
+
])
|
145 |
+
with pytest.raises(ValueError, match="cannot Execute non-scalar expression"):
|
146 |
+
_ = decl.to_table()
|
147 |
+
|
148 |
+
|
149 |
+
def test_aggregate_scalar(table_source):
|
150 |
+
decl = Declaration.from_sequence([
|
151 |
+
table_source,
|
152 |
+
Declaration("aggregate", AggregateNodeOptions([("a", "sum", None, "a_sum")]))
|
153 |
+
])
|
154 |
+
result = decl.to_table()
|
155 |
+
assert result.schema.names == ["a_sum"]
|
156 |
+
assert result["a_sum"].to_pylist() == [6]
|
157 |
+
|
158 |
+
# with options class
|
159 |
+
table = pa.table({'a': [1, 2, None]})
|
160 |
+
aggr_opts = AggregateNodeOptions(
|
161 |
+
[("a", "sum", pc.ScalarAggregateOptions(skip_nulls=False), "a_sum")]
|
162 |
+
)
|
163 |
+
decl = Declaration.from_sequence([
|
164 |
+
Declaration("table_source", TableSourceNodeOptions(table)),
|
165 |
+
Declaration("aggregate", aggr_opts),
|
166 |
+
])
|
167 |
+
result = decl.to_table()
|
168 |
+
assert result.schema.names == ["a_sum"]
|
169 |
+
assert result["a_sum"].to_pylist() == [None]
|
170 |
+
|
171 |
+
# test various ways of specifying the target column
|
172 |
+
for target in ["a", field("a"), 0, field(0), ["a"], [field("a")], [0]]:
|
173 |
+
aggr_opts = AggregateNodeOptions([(target, "sum", None, "a_sum")])
|
174 |
+
decl = Declaration.from_sequence(
|
175 |
+
[table_source, Declaration("aggregate", aggr_opts)]
|
176 |
+
)
|
177 |
+
result = decl.to_table()
|
178 |
+
assert result.schema.names == ["a_sum"]
|
179 |
+
assert result["a_sum"].to_pylist() == [6]
|
180 |
+
|
181 |
+
# proper error when specifying the wrong number of target columns
|
182 |
+
aggr_opts = AggregateNodeOptions([(["a", "b"], "sum", None, "a_sum")])
|
183 |
+
decl = Declaration.from_sequence(
|
184 |
+
[table_source, Declaration("aggregate", aggr_opts)]
|
185 |
+
)
|
186 |
+
with pytest.raises(
|
187 |
+
ValueError, match="Function 'sum' accepts 1 arguments but 2 passed"
|
188 |
+
):
|
189 |
+
_ = decl.to_table()
|
190 |
+
|
191 |
+
# proper error when using hash aggregation without keys
|
192 |
+
aggr_opts = AggregateNodeOptions([("a", "hash_sum", None, "a_sum")])
|
193 |
+
decl = Declaration.from_sequence(
|
194 |
+
[table_source, Declaration("aggregate", aggr_opts)]
|
195 |
+
)
|
196 |
+
with pytest.raises(ValueError, match="is a hash aggregate function"):
|
197 |
+
_ = decl.to_table()
|
198 |
+
|
199 |
+
|
200 |
+
def test_aggregate_hash():
|
201 |
+
table = pa.table({'a': [1, 2, None], 'b': ["foo", "bar", "foo"]})
|
202 |
+
table_opts = TableSourceNodeOptions(table)
|
203 |
+
table_source = Declaration("table_source", options=table_opts)
|
204 |
+
|
205 |
+
# default options
|
206 |
+
aggr_opts = AggregateNodeOptions(
|
207 |
+
[("a", "hash_count", None, "count(a)")], keys=["b"])
|
208 |
+
decl = Declaration.from_sequence([
|
209 |
+
table_source, Declaration("aggregate", aggr_opts)
|
210 |
+
])
|
211 |
+
result = decl.to_table()
|
212 |
+
expected = pa.table({"b": ["foo", "bar"], "count(a)": [1, 1]})
|
213 |
+
assert result.equals(expected)
|
214 |
+
|
215 |
+
# specify function options
|
216 |
+
aggr_opts = AggregateNodeOptions(
|
217 |
+
[("a", "hash_count", pc.CountOptions("all"), "count(a)")], keys=["b"]
|
218 |
+
)
|
219 |
+
decl = Declaration.from_sequence([
|
220 |
+
table_source, Declaration("aggregate", aggr_opts)
|
221 |
+
])
|
222 |
+
result = decl.to_table()
|
223 |
+
expected_all = pa.table({"b": ["foo", "bar"], "count(a)": [2, 1]})
|
224 |
+
assert result.equals(expected_all)
|
225 |
+
|
226 |
+
# specify keys as field references
|
227 |
+
aggr_opts = AggregateNodeOptions(
|
228 |
+
[("a", "hash_count", None, "count(a)")], keys=[field("b")]
|
229 |
+
)
|
230 |
+
decl = Declaration.from_sequence([
|
231 |
+
table_source, Declaration("aggregate", aggr_opts)
|
232 |
+
])
|
233 |
+
result = decl.to_table()
|
234 |
+
assert result.equals(expected)
|
235 |
+
|
236 |
+
# wrong type of (aggregation) function
|
237 |
+
# TODO test with kernel that matches number of arguments (arity) -> avoid segfault
|
238 |
+
aggr_opts = AggregateNodeOptions([("a", "sum", None, "a_sum")], keys=["b"])
|
239 |
+
decl = Declaration.from_sequence([
|
240 |
+
table_source, Declaration("aggregate", aggr_opts)
|
241 |
+
])
|
242 |
+
with pytest.raises(ValueError):
|
243 |
+
_ = decl.to_table()
|
244 |
+
|
245 |
+
|
246 |
+
def test_order_by():
|
247 |
+
table = pa.table({'a': [1, 2, 3, 4], 'b': [1, 3, None, 2]})
|
248 |
+
table_source = Declaration("table_source", TableSourceNodeOptions(table))
|
249 |
+
|
250 |
+
ord_opts = OrderByNodeOptions([("b", "ascending")])
|
251 |
+
decl = Declaration.from_sequence([table_source, Declaration("order_by", ord_opts)])
|
252 |
+
result = decl.to_table()
|
253 |
+
expected = pa.table({"a": [1, 4, 2, 3], "b": [1, 2, 3, None]})
|
254 |
+
assert result.equals(expected)
|
255 |
+
|
256 |
+
ord_opts = OrderByNodeOptions([(field("b"), "descending")])
|
257 |
+
decl = Declaration.from_sequence([table_source, Declaration("order_by", ord_opts)])
|
258 |
+
result = decl.to_table()
|
259 |
+
expected = pa.table({"a": [2, 4, 1, 3], "b": [3, 2, 1, None]})
|
260 |
+
assert result.equals(expected)
|
261 |
+
|
262 |
+
ord_opts = OrderByNodeOptions([(1, "descending")], null_placement="at_start")
|
263 |
+
decl = Declaration.from_sequence([table_source, Declaration("order_by", ord_opts)])
|
264 |
+
result = decl.to_table()
|
265 |
+
expected = pa.table({"a": [3, 2, 4, 1], "b": [None, 3, 2, 1]})
|
266 |
+
assert result.equals(expected)
|
267 |
+
|
268 |
+
# empty ordering
|
269 |
+
ord_opts = OrderByNodeOptions([])
|
270 |
+
decl = Declaration.from_sequence([table_source, Declaration("order_by", ord_opts)])
|
271 |
+
with pytest.raises(
|
272 |
+
ValueError, match="`ordering` must be an explicit non-empty ordering"
|
273 |
+
):
|
274 |
+
_ = decl.to_table()
|
275 |
+
|
276 |
+
with pytest.raises(ValueError, match="\"decreasing\" is not a valid sort order"):
|
277 |
+
_ = OrderByNodeOptions([("b", "decreasing")])
|
278 |
+
|
279 |
+
with pytest.raises(ValueError, match="\"start\" is not a valid null placement"):
|
280 |
+
_ = OrderByNodeOptions([("b", "ascending")], null_placement="start")
|
281 |
+
|
282 |
+
|
283 |
+
def test_hash_join():
|
284 |
+
left = pa.table({'key': [1, 2, 3], 'a': [4, 5, 6]})
|
285 |
+
left_source = Declaration("table_source", options=TableSourceNodeOptions(left))
|
286 |
+
right = pa.table({'key': [2, 3, 4], 'b': [4, 5, 6]})
|
287 |
+
right_source = Declaration("table_source", options=TableSourceNodeOptions(right))
|
288 |
+
|
289 |
+
# inner join
|
290 |
+
join_opts = HashJoinNodeOptions("inner", left_keys="key", right_keys="key")
|
291 |
+
joined = Declaration(
|
292 |
+
"hashjoin", options=join_opts, inputs=[left_source, right_source])
|
293 |
+
result = joined.to_table()
|
294 |
+
expected = pa.table(
|
295 |
+
[[2, 3], [5, 6], [2, 3], [4, 5]],
|
296 |
+
names=["key", "a", "key", "b"])
|
297 |
+
assert result.equals(expected)
|
298 |
+
|
299 |
+
for keys in [field("key"), ["key"], [field("key")]]:
|
300 |
+
join_opts = HashJoinNodeOptions("inner", left_keys=keys, right_keys=keys)
|
301 |
+
joined = Declaration(
|
302 |
+
"hashjoin", options=join_opts, inputs=[left_source, right_source])
|
303 |
+
result = joined.to_table()
|
304 |
+
assert result.equals(expected)
|
305 |
+
|
306 |
+
# left join
|
307 |
+
join_opts = HashJoinNodeOptions(
|
308 |
+
"left outer", left_keys="key", right_keys="key")
|
309 |
+
joined = Declaration(
|
310 |
+
"hashjoin", options=join_opts, inputs=[left_source, right_source])
|
311 |
+
result = joined.to_table()
|
312 |
+
expected = pa.table(
|
313 |
+
[[1, 2, 3], [4, 5, 6], [None, 2, 3], [None, 4, 5]],
|
314 |
+
names=["key", "a", "key", "b"]
|
315 |
+
)
|
316 |
+
assert result.sort_by("a").equals(expected)
|
317 |
+
|
318 |
+
# suffixes
|
319 |
+
join_opts = HashJoinNodeOptions(
|
320 |
+
"left outer", left_keys="key", right_keys="key",
|
321 |
+
output_suffix_for_left="_left", output_suffix_for_right="_right")
|
322 |
+
joined = Declaration(
|
323 |
+
"hashjoin", options=join_opts, inputs=[left_source, right_source])
|
324 |
+
result = joined.to_table()
|
325 |
+
expected = pa.table(
|
326 |
+
[[1, 2, 3], [4, 5, 6], [None, 2, 3], [None, 4, 5]],
|
327 |
+
names=["key_left", "a", "key_right", "b"]
|
328 |
+
)
|
329 |
+
assert result.sort_by("a").equals(expected)
|
330 |
+
|
331 |
+
# manually specifying output columns
|
332 |
+
join_opts = HashJoinNodeOptions(
|
333 |
+
"left outer", left_keys="key", right_keys="key",
|
334 |
+
left_output=["key", "a"], right_output=[field("b")])
|
335 |
+
joined = Declaration(
|
336 |
+
"hashjoin", options=join_opts, inputs=[left_source, right_source])
|
337 |
+
result = joined.to_table()
|
338 |
+
expected = pa.table(
|
339 |
+
[[1, 2, 3], [4, 5, 6], [None, 4, 5]],
|
340 |
+
names=["key", "a", "b"]
|
341 |
+
)
|
342 |
+
assert result.sort_by("a").equals(expected)
|
343 |
+
|
344 |
+
|
345 |
+
@pytest.mark.dataset
|
346 |
+
def test_scan(tempdir):
|
347 |
+
table = pa.table({'a': [1, 2, 3], 'b': [4, 5, 6]})
|
348 |
+
ds.write_dataset(table, tempdir / "dataset", format="parquet")
|
349 |
+
dataset = ds.dataset(tempdir / "dataset", format="parquet")
|
350 |
+
decl = Declaration("scan", ScanNodeOptions(dataset))
|
351 |
+
result = decl.to_table()
|
352 |
+
assert result.schema.names == [
|
353 |
+
"a", "b", "__fragment_index", "__batch_index",
|
354 |
+
"__last_in_fragment", "__filename"
|
355 |
+
]
|
356 |
+
assert result.select(["a", "b"]).equals(table)
|
357 |
+
|
358 |
+
# using a filter only does pushdown (depending on file format), not actual filter
|
359 |
+
|
360 |
+
scan_opts = ScanNodeOptions(dataset, filter=field('a') > 1)
|
361 |
+
decl = Declaration("scan", scan_opts)
|
362 |
+
# fragment not filtered based on min/max statistics
|
363 |
+
assert decl.to_table().num_rows == 3
|
364 |
+
|
365 |
+
scan_opts = ScanNodeOptions(dataset, filter=field('a') > 4)
|
366 |
+
decl = Declaration("scan", scan_opts)
|
367 |
+
# full fragment filtered based on min/max statistics
|
368 |
+
assert decl.to_table().num_rows == 0
|
369 |
+
|
370 |
+
# projection scan option
|
371 |
+
|
372 |
+
scan_opts = ScanNodeOptions(dataset, columns={"a2": pc.multiply(field("a"), 2)})
|
373 |
+
decl = Declaration("scan", scan_opts)
|
374 |
+
result = decl.to_table()
|
375 |
+
# "a" is included in the result (needed later on for the actual projection)
|
376 |
+
assert result["a"].to_pylist() == [1, 2, 3]
|
377 |
+
# "b" is still included, but without data as it will be removed by the projection
|
378 |
+
assert pc.all(result["b"].is_null()).as_py()
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_adhoc_memory_leak.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import pytest
|
19 |
+
|
20 |
+
import numpy as np
|
21 |
+
import pyarrow as pa
|
22 |
+
|
23 |
+
import pyarrow.tests.util as test_util
|
24 |
+
|
25 |
+
try:
|
26 |
+
import pandas as pd
|
27 |
+
except ImportError:
|
28 |
+
pass
|
29 |
+
|
30 |
+
|
31 |
+
@pytest.mark.memory_leak
|
32 |
+
@pytest.mark.pandas
|
33 |
+
def test_deserialize_pandas_arrow_7956():
|
34 |
+
df = pd.DataFrame({'a': np.arange(10000),
|
35 |
+
'b': [test_util.rands(5) for _ in range(10000)]})
|
36 |
+
|
37 |
+
def action():
|
38 |
+
df_bytes = pa.ipc.serialize_pandas(df).to_pybytes()
|
39 |
+
buf = pa.py_buffer(df_bytes)
|
40 |
+
pa.ipc.deserialize_pandas(buf)
|
41 |
+
|
42 |
+
# Abort at 128MB threshold
|
43 |
+
test_util.memory_leak_check(action, threshold=1 << 27, iterations=100)
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_array.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_builder.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import weakref
|
19 |
+
|
20 |
+
import numpy as np
|
21 |
+
|
22 |
+
import pyarrow as pa
|
23 |
+
from pyarrow.lib import StringBuilder
|
24 |
+
|
25 |
+
|
26 |
+
def test_weakref():
|
27 |
+
sbuilder = StringBuilder()
|
28 |
+
wr = weakref.ref(sbuilder)
|
29 |
+
assert wr() is not None
|
30 |
+
del sbuilder
|
31 |
+
assert wr() is None
|
32 |
+
|
33 |
+
|
34 |
+
def test_string_builder_append():
|
35 |
+
sbuilder = StringBuilder()
|
36 |
+
sbuilder.append(b"a byte string")
|
37 |
+
sbuilder.append("a string")
|
38 |
+
sbuilder.append(np.nan)
|
39 |
+
sbuilder.append(None)
|
40 |
+
assert len(sbuilder) == 4
|
41 |
+
assert sbuilder.null_count == 2
|
42 |
+
arr = sbuilder.finish()
|
43 |
+
assert len(sbuilder) == 0
|
44 |
+
assert isinstance(arr, pa.Array)
|
45 |
+
assert arr.null_count == 2
|
46 |
+
assert arr.type == 'str'
|
47 |
+
expected = ["a byte string", "a string", None, None]
|
48 |
+
assert arr.to_pylist() == expected
|
49 |
+
|
50 |
+
|
51 |
+
def test_string_builder_append_values():
|
52 |
+
sbuilder = StringBuilder()
|
53 |
+
sbuilder.append_values([np.nan, None, "text", None, "other text"])
|
54 |
+
assert sbuilder.null_count == 3
|
55 |
+
arr = sbuilder.finish()
|
56 |
+
assert arr.null_count == 3
|
57 |
+
expected = [None, None, "text", None, "other text"]
|
58 |
+
assert arr.to_pylist() == expected
|
59 |
+
|
60 |
+
|
61 |
+
def test_string_builder_append_after_finish():
|
62 |
+
sbuilder = StringBuilder()
|
63 |
+
sbuilder.append_values([np.nan, None, "text", None, "other text"])
|
64 |
+
arr = sbuilder.finish()
|
65 |
+
sbuilder.append("No effect")
|
66 |
+
expected = [None, None, "text", None, "other text"]
|
67 |
+
assert arr.to_pylist() == expected
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_cffi.py
ADDED
@@ -0,0 +1,569 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
3 |
+
# or more contributor license agreements. See the NOTICE file
|
4 |
+
# distributed with this work for additional information
|
5 |
+
# regarding copyright ownership. The ASF licenses this file
|
6 |
+
# to you under the Apache License, Version 2.0 (the
|
7 |
+
# "License"); you may not use this file except in compliance
|
8 |
+
# with the License. You may obtain a copy of the License at
|
9 |
+
#
|
10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
11 |
+
#
|
12 |
+
# Unless required by applicable law or agreed to in writing,
|
13 |
+
# software distributed under the License is distributed on an
|
14 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
15 |
+
# KIND, either express or implied. See the License for the
|
16 |
+
# specific language governing permissions and limitations
|
17 |
+
# under the License.
|
18 |
+
|
19 |
+
import contextlib
|
20 |
+
import ctypes
|
21 |
+
import gc
|
22 |
+
|
23 |
+
import pyarrow as pa
|
24 |
+
try:
|
25 |
+
from pyarrow.cffi import ffi
|
26 |
+
except ImportError:
|
27 |
+
ffi = None
|
28 |
+
|
29 |
+
import pytest
|
30 |
+
|
31 |
+
try:
|
32 |
+
import pandas as pd
|
33 |
+
import pandas.testing as tm
|
34 |
+
except ImportError:
|
35 |
+
pd = tm = None
|
36 |
+
|
37 |
+
|
38 |
+
needs_cffi = pytest.mark.skipif(ffi is None,
|
39 |
+
reason="test needs cffi package installed")
|
40 |
+
|
41 |
+
assert_schema_released = pytest.raises(
|
42 |
+
ValueError, match="Cannot import released ArrowSchema")
|
43 |
+
|
44 |
+
assert_array_released = pytest.raises(
|
45 |
+
ValueError, match="Cannot import released ArrowArray")
|
46 |
+
|
47 |
+
assert_stream_released = pytest.raises(
|
48 |
+
ValueError, match="Cannot import released ArrowArrayStream")
|
49 |
+
|
50 |
+
|
51 |
+
def PyCapsule_IsValid(capsule, name):
|
52 |
+
return ctypes.pythonapi.PyCapsule_IsValid(ctypes.py_object(capsule), name) == 1
|
53 |
+
|
54 |
+
|
55 |
+
@contextlib.contextmanager
|
56 |
+
def registered_extension_type(ext_type):
|
57 |
+
pa.register_extension_type(ext_type)
|
58 |
+
try:
|
59 |
+
yield
|
60 |
+
finally:
|
61 |
+
pa.unregister_extension_type(ext_type.extension_name)
|
62 |
+
|
63 |
+
|
64 |
+
class ParamExtType(pa.ExtensionType):
|
65 |
+
|
66 |
+
def __init__(self, width):
|
67 |
+
self._width = width
|
68 |
+
super().__init__(pa.binary(width),
|
69 |
+
"pyarrow.tests.test_cffi.ParamExtType")
|
70 |
+
|
71 |
+
@property
|
72 |
+
def width(self):
|
73 |
+
return self._width
|
74 |
+
|
75 |
+
def __arrow_ext_serialize__(self):
|
76 |
+
return str(self.width).encode()
|
77 |
+
|
78 |
+
@classmethod
|
79 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized):
|
80 |
+
width = int(serialized.decode())
|
81 |
+
return cls(width)
|
82 |
+
|
83 |
+
|
84 |
+
def make_schema():
|
85 |
+
return pa.schema([('ints', pa.list_(pa.int32()))],
|
86 |
+
metadata={b'key1': b'value1'})
|
87 |
+
|
88 |
+
|
89 |
+
def make_extension_schema():
|
90 |
+
return pa.schema([('ext', ParamExtType(3))],
|
91 |
+
metadata={b'key1': b'value1'})
|
92 |
+
|
93 |
+
|
94 |
+
def make_extension_storage_schema():
|
95 |
+
# Should be kept in sync with make_extension_schema
|
96 |
+
return pa.schema([('ext', ParamExtType(3).storage_type)],
|
97 |
+
metadata={b'key1': b'value1'})
|
98 |
+
|
99 |
+
|
100 |
+
def make_batch():
|
101 |
+
return pa.record_batch([[[1], [2, 42]]], make_schema())
|
102 |
+
|
103 |
+
|
104 |
+
def make_extension_batch():
|
105 |
+
schema = make_extension_schema()
|
106 |
+
ext_col = schema[0].type.wrap_array(pa.array([b"foo", b"bar"],
|
107 |
+
type=pa.binary(3)))
|
108 |
+
return pa.record_batch([ext_col], schema)
|
109 |
+
|
110 |
+
|
111 |
+
def make_batches():
|
112 |
+
schema = make_schema()
|
113 |
+
return [
|
114 |
+
pa.record_batch([[[1], [2, 42]]], schema),
|
115 |
+
pa.record_batch([[None, [], [5, 6]]], schema),
|
116 |
+
]
|
117 |
+
|
118 |
+
|
119 |
+
def make_serialized(schema, batches):
|
120 |
+
with pa.BufferOutputStream() as sink:
|
121 |
+
with pa.ipc.new_stream(sink, schema) as out:
|
122 |
+
for batch in batches:
|
123 |
+
out.write(batch)
|
124 |
+
return sink.getvalue()
|
125 |
+
|
126 |
+
|
127 |
+
@needs_cffi
|
128 |
+
def test_export_import_type():
|
129 |
+
c_schema = ffi.new("struct ArrowSchema*")
|
130 |
+
ptr_schema = int(ffi.cast("uintptr_t", c_schema))
|
131 |
+
|
132 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
133 |
+
old_allocated = pa.total_allocated_bytes()
|
134 |
+
|
135 |
+
typ = pa.list_(pa.int32())
|
136 |
+
typ._export_to_c(ptr_schema)
|
137 |
+
assert pa.total_allocated_bytes() > old_allocated
|
138 |
+
# Delete and recreate C++ object from exported pointer
|
139 |
+
del typ
|
140 |
+
assert pa.total_allocated_bytes() > old_allocated
|
141 |
+
typ_new = pa.DataType._import_from_c(ptr_schema)
|
142 |
+
assert typ_new == pa.list_(pa.int32())
|
143 |
+
assert pa.total_allocated_bytes() == old_allocated
|
144 |
+
# Now released
|
145 |
+
with assert_schema_released:
|
146 |
+
pa.DataType._import_from_c(ptr_schema)
|
147 |
+
|
148 |
+
# Invalid format string
|
149 |
+
pa.int32()._export_to_c(ptr_schema)
|
150 |
+
bad_format = ffi.new("char[]", b"zzz")
|
151 |
+
c_schema.format = bad_format
|
152 |
+
with pytest.raises(ValueError,
|
153 |
+
match="Invalid or unsupported format string"):
|
154 |
+
pa.DataType._import_from_c(ptr_schema)
|
155 |
+
# Now released
|
156 |
+
with assert_schema_released:
|
157 |
+
pa.DataType._import_from_c(ptr_schema)
|
158 |
+
|
159 |
+
|
160 |
+
@needs_cffi
|
161 |
+
def test_export_import_field():
|
162 |
+
c_schema = ffi.new("struct ArrowSchema*")
|
163 |
+
ptr_schema = int(ffi.cast("uintptr_t", c_schema))
|
164 |
+
|
165 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
166 |
+
old_allocated = pa.total_allocated_bytes()
|
167 |
+
|
168 |
+
field = pa.field("test", pa.list_(pa.int32()), nullable=True)
|
169 |
+
field._export_to_c(ptr_schema)
|
170 |
+
assert pa.total_allocated_bytes() > old_allocated
|
171 |
+
# Delete and recreate C++ object from exported pointer
|
172 |
+
del field
|
173 |
+
assert pa.total_allocated_bytes() > old_allocated
|
174 |
+
|
175 |
+
field_new = pa.Field._import_from_c(ptr_schema)
|
176 |
+
assert field_new == pa.field("test", pa.list_(pa.int32()), nullable=True)
|
177 |
+
assert pa.total_allocated_bytes() == old_allocated
|
178 |
+
|
179 |
+
# Now released
|
180 |
+
with assert_schema_released:
|
181 |
+
pa.Field._import_from_c(ptr_schema)
|
182 |
+
|
183 |
+
|
184 |
+
@needs_cffi
|
185 |
+
def test_export_import_array():
|
186 |
+
c_schema = ffi.new("struct ArrowSchema*")
|
187 |
+
ptr_schema = int(ffi.cast("uintptr_t", c_schema))
|
188 |
+
c_array = ffi.new("struct ArrowArray*")
|
189 |
+
ptr_array = int(ffi.cast("uintptr_t", c_array))
|
190 |
+
|
191 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
192 |
+
old_allocated = pa.total_allocated_bytes()
|
193 |
+
|
194 |
+
# Type is known up front
|
195 |
+
typ = pa.list_(pa.int32())
|
196 |
+
arr = pa.array([[1], [2, 42]], type=typ)
|
197 |
+
py_value = arr.to_pylist()
|
198 |
+
arr._export_to_c(ptr_array)
|
199 |
+
assert pa.total_allocated_bytes() > old_allocated
|
200 |
+
# Delete recreate C++ object from exported pointer
|
201 |
+
del arr
|
202 |
+
arr_new = pa.Array._import_from_c(ptr_array, typ)
|
203 |
+
assert arr_new.to_pylist() == py_value
|
204 |
+
assert arr_new.type == pa.list_(pa.int32())
|
205 |
+
assert pa.total_allocated_bytes() > old_allocated
|
206 |
+
del arr_new, typ
|
207 |
+
assert pa.total_allocated_bytes() == old_allocated
|
208 |
+
# Now released
|
209 |
+
with assert_array_released:
|
210 |
+
pa.Array._import_from_c(ptr_array, pa.list_(pa.int32()))
|
211 |
+
|
212 |
+
# Type is exported and imported at the same time
|
213 |
+
arr = pa.array([[1], [2, 42]], type=pa.list_(pa.int32()))
|
214 |
+
py_value = arr.to_pylist()
|
215 |
+
arr._export_to_c(ptr_array, ptr_schema)
|
216 |
+
# Delete and recreate C++ objects from exported pointers
|
217 |
+
del arr
|
218 |
+
arr_new = pa.Array._import_from_c(ptr_array, ptr_schema)
|
219 |
+
assert arr_new.to_pylist() == py_value
|
220 |
+
assert arr_new.type == pa.list_(pa.int32())
|
221 |
+
assert pa.total_allocated_bytes() > old_allocated
|
222 |
+
del arr_new
|
223 |
+
assert pa.total_allocated_bytes() == old_allocated
|
224 |
+
# Now released
|
225 |
+
with assert_schema_released:
|
226 |
+
pa.Array._import_from_c(ptr_array, ptr_schema)
|
227 |
+
|
228 |
+
|
229 |
+
def check_export_import_schema(schema_factory, expected_schema_factory=None):
|
230 |
+
if expected_schema_factory is None:
|
231 |
+
expected_schema_factory = schema_factory
|
232 |
+
|
233 |
+
c_schema = ffi.new("struct ArrowSchema*")
|
234 |
+
ptr_schema = int(ffi.cast("uintptr_t", c_schema))
|
235 |
+
|
236 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
237 |
+
old_allocated = pa.total_allocated_bytes()
|
238 |
+
|
239 |
+
schema_factory()._export_to_c(ptr_schema)
|
240 |
+
assert pa.total_allocated_bytes() > old_allocated
|
241 |
+
# Delete and recreate C++ object from exported pointer
|
242 |
+
schema_new = pa.Schema._import_from_c(ptr_schema)
|
243 |
+
assert schema_new == expected_schema_factory()
|
244 |
+
assert pa.total_allocated_bytes() == old_allocated
|
245 |
+
del schema_new
|
246 |
+
assert pa.total_allocated_bytes() == old_allocated
|
247 |
+
# Now released
|
248 |
+
with assert_schema_released:
|
249 |
+
pa.Schema._import_from_c(ptr_schema)
|
250 |
+
|
251 |
+
# Not a struct type
|
252 |
+
pa.int32()._export_to_c(ptr_schema)
|
253 |
+
with pytest.raises(ValueError,
|
254 |
+
match="ArrowSchema describes non-struct type"):
|
255 |
+
pa.Schema._import_from_c(ptr_schema)
|
256 |
+
# Now released
|
257 |
+
with assert_schema_released:
|
258 |
+
pa.Schema._import_from_c(ptr_schema)
|
259 |
+
|
260 |
+
|
261 |
+
@needs_cffi
|
262 |
+
def test_export_import_schema():
|
263 |
+
check_export_import_schema(make_schema)
|
264 |
+
|
265 |
+
|
266 |
+
@needs_cffi
|
267 |
+
def test_export_import_schema_with_extension():
|
268 |
+
# Extension type is unregistered => the storage type is imported
|
269 |
+
check_export_import_schema(make_extension_schema,
|
270 |
+
make_extension_storage_schema)
|
271 |
+
|
272 |
+
# Extension type is registered => the extension type is imported
|
273 |
+
with registered_extension_type(ParamExtType(1)):
|
274 |
+
check_export_import_schema(make_extension_schema)
|
275 |
+
|
276 |
+
|
277 |
+
@needs_cffi
|
278 |
+
def test_export_import_schema_float_pointer():
|
279 |
+
# Previous versions of the R Arrow library used to pass pointer
|
280 |
+
# values as a double.
|
281 |
+
c_schema = ffi.new("struct ArrowSchema*")
|
282 |
+
ptr_schema = int(ffi.cast("uintptr_t", c_schema))
|
283 |
+
|
284 |
+
match = "Passing a pointer value as a float is unsafe"
|
285 |
+
with pytest.warns(UserWarning, match=match):
|
286 |
+
make_schema()._export_to_c(float(ptr_schema))
|
287 |
+
with pytest.warns(UserWarning, match=match):
|
288 |
+
schema_new = pa.Schema._import_from_c(float(ptr_schema))
|
289 |
+
assert schema_new == make_schema()
|
290 |
+
|
291 |
+
|
292 |
+
def check_export_import_batch(batch_factory):
|
293 |
+
c_schema = ffi.new("struct ArrowSchema*")
|
294 |
+
ptr_schema = int(ffi.cast("uintptr_t", c_schema))
|
295 |
+
c_array = ffi.new("struct ArrowArray*")
|
296 |
+
ptr_array = int(ffi.cast("uintptr_t", c_array))
|
297 |
+
|
298 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
299 |
+
old_allocated = pa.total_allocated_bytes()
|
300 |
+
|
301 |
+
# Schema is known up front
|
302 |
+
batch = batch_factory()
|
303 |
+
schema = batch.schema
|
304 |
+
py_value = batch.to_pydict()
|
305 |
+
batch._export_to_c(ptr_array)
|
306 |
+
assert pa.total_allocated_bytes() > old_allocated
|
307 |
+
# Delete and recreate C++ object from exported pointer
|
308 |
+
del batch
|
309 |
+
batch_new = pa.RecordBatch._import_from_c(ptr_array, schema)
|
310 |
+
assert batch_new.to_pydict() == py_value
|
311 |
+
assert batch_new.schema == schema
|
312 |
+
assert pa.total_allocated_bytes() > old_allocated
|
313 |
+
del batch_new, schema
|
314 |
+
assert pa.total_allocated_bytes() == old_allocated
|
315 |
+
# Now released
|
316 |
+
with assert_array_released:
|
317 |
+
pa.RecordBatch._import_from_c(ptr_array, make_schema())
|
318 |
+
|
319 |
+
# Type is exported and imported at the same time
|
320 |
+
batch = batch_factory()
|
321 |
+
py_value = batch.to_pydict()
|
322 |
+
batch._export_to_c(ptr_array, ptr_schema)
|
323 |
+
# Delete and recreate C++ objects from exported pointers
|
324 |
+
del batch
|
325 |
+
batch_new = pa.RecordBatch._import_from_c(ptr_array, ptr_schema)
|
326 |
+
assert batch_new.to_pydict() == py_value
|
327 |
+
assert batch_new.schema == batch_factory().schema
|
328 |
+
assert pa.total_allocated_bytes() > old_allocated
|
329 |
+
del batch_new
|
330 |
+
assert pa.total_allocated_bytes() == old_allocated
|
331 |
+
# Now released
|
332 |
+
with assert_schema_released:
|
333 |
+
pa.RecordBatch._import_from_c(ptr_array, ptr_schema)
|
334 |
+
|
335 |
+
# Not a struct type
|
336 |
+
pa.int32()._export_to_c(ptr_schema)
|
337 |
+
batch_factory()._export_to_c(ptr_array)
|
338 |
+
with pytest.raises(ValueError,
|
339 |
+
match="ArrowSchema describes non-struct type"):
|
340 |
+
pa.RecordBatch._import_from_c(ptr_array, ptr_schema)
|
341 |
+
# Now released
|
342 |
+
with assert_schema_released:
|
343 |
+
pa.RecordBatch._import_from_c(ptr_array, ptr_schema)
|
344 |
+
|
345 |
+
|
346 |
+
@needs_cffi
|
347 |
+
def test_export_import_batch():
|
348 |
+
check_export_import_batch(make_batch)
|
349 |
+
|
350 |
+
|
351 |
+
@needs_cffi
|
352 |
+
def test_export_import_batch_with_extension():
|
353 |
+
with registered_extension_type(ParamExtType(1)):
|
354 |
+
check_export_import_batch(make_extension_batch)
|
355 |
+
|
356 |
+
|
357 |
+
def _export_import_batch_reader(ptr_stream, reader_factory):
|
358 |
+
# Prepare input
|
359 |
+
batches = make_batches()
|
360 |
+
schema = batches[0].schema
|
361 |
+
|
362 |
+
reader = reader_factory(schema, batches)
|
363 |
+
reader._export_to_c(ptr_stream)
|
364 |
+
# Delete and recreate C++ object from exported pointer
|
365 |
+
del reader, batches
|
366 |
+
|
367 |
+
reader_new = pa.RecordBatchReader._import_from_c(ptr_stream)
|
368 |
+
assert reader_new.schema == schema
|
369 |
+
got_batches = list(reader_new)
|
370 |
+
del reader_new
|
371 |
+
assert got_batches == make_batches()
|
372 |
+
|
373 |
+
# Test read_pandas()
|
374 |
+
if pd is not None:
|
375 |
+
batches = make_batches()
|
376 |
+
schema = batches[0].schema
|
377 |
+
expected_df = pa.Table.from_batches(batches).to_pandas()
|
378 |
+
|
379 |
+
reader = reader_factory(schema, batches)
|
380 |
+
reader._export_to_c(ptr_stream)
|
381 |
+
del reader, batches
|
382 |
+
|
383 |
+
reader_new = pa.RecordBatchReader._import_from_c(ptr_stream)
|
384 |
+
got_df = reader_new.read_pandas()
|
385 |
+
del reader_new
|
386 |
+
tm.assert_frame_equal(expected_df, got_df)
|
387 |
+
|
388 |
+
|
389 |
+
def make_ipc_stream_reader(schema, batches):
|
390 |
+
return pa.ipc.open_stream(make_serialized(schema, batches))
|
391 |
+
|
392 |
+
|
393 |
+
def make_py_record_batch_reader(schema, batches):
|
394 |
+
return pa.RecordBatchReader.from_batches(schema, batches)
|
395 |
+
|
396 |
+
|
397 |
+
@needs_cffi
|
398 |
+
@pytest.mark.parametrize('reader_factory',
|
399 |
+
[make_ipc_stream_reader,
|
400 |
+
make_py_record_batch_reader])
|
401 |
+
def test_export_import_batch_reader(reader_factory):
|
402 |
+
c_stream = ffi.new("struct ArrowArrayStream*")
|
403 |
+
ptr_stream = int(ffi.cast("uintptr_t", c_stream))
|
404 |
+
|
405 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
406 |
+
old_allocated = pa.total_allocated_bytes()
|
407 |
+
|
408 |
+
_export_import_batch_reader(ptr_stream, reader_factory)
|
409 |
+
|
410 |
+
assert pa.total_allocated_bytes() == old_allocated
|
411 |
+
|
412 |
+
# Now released
|
413 |
+
with assert_stream_released:
|
414 |
+
pa.RecordBatchReader._import_from_c(ptr_stream)
|
415 |
+
|
416 |
+
|
417 |
+
@needs_cffi
|
418 |
+
def test_imported_batch_reader_error():
|
419 |
+
c_stream = ffi.new("struct ArrowArrayStream*")
|
420 |
+
ptr_stream = int(ffi.cast("uintptr_t", c_stream))
|
421 |
+
|
422 |
+
schema = pa.schema([('foo', pa.int32())])
|
423 |
+
batches = [pa.record_batch([[1, 2, 3]], schema=schema),
|
424 |
+
pa.record_batch([[4, 5, 6]], schema=schema)]
|
425 |
+
buf = make_serialized(schema, batches)
|
426 |
+
|
427 |
+
# Open a corrupt/incomplete stream and export it
|
428 |
+
reader = pa.ipc.open_stream(buf[:-16])
|
429 |
+
reader._export_to_c(ptr_stream)
|
430 |
+
del reader
|
431 |
+
|
432 |
+
reader_new = pa.RecordBatchReader._import_from_c(ptr_stream)
|
433 |
+
batch = reader_new.read_next_batch()
|
434 |
+
assert batch == batches[0]
|
435 |
+
with pytest.raises(OSError,
|
436 |
+
match="Expected to be able to read 16 bytes "
|
437 |
+
"for message body, got 8"):
|
438 |
+
reader_new.read_next_batch()
|
439 |
+
|
440 |
+
# Again, but call read_all()
|
441 |
+
reader = pa.ipc.open_stream(buf[:-16])
|
442 |
+
reader._export_to_c(ptr_stream)
|
443 |
+
del reader
|
444 |
+
|
445 |
+
reader_new = pa.RecordBatchReader._import_from_c(ptr_stream)
|
446 |
+
with pytest.raises(OSError,
|
447 |
+
match="Expected to be able to read 16 bytes "
|
448 |
+
"for message body, got 8"):
|
449 |
+
reader_new.read_all()
|
450 |
+
|
451 |
+
|
452 |
+
@pytest.mark.parametrize('obj', [pa.int32(), pa.field('foo', pa.int32()),
|
453 |
+
pa.schema({'foo': pa.int32()})],
|
454 |
+
ids=['type', 'field', 'schema'])
|
455 |
+
def test_roundtrip_schema_capsule(obj):
|
456 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
457 |
+
old_allocated = pa.total_allocated_bytes()
|
458 |
+
|
459 |
+
capsule = obj.__arrow_c_schema__()
|
460 |
+
assert PyCapsule_IsValid(capsule, b"arrow_schema") == 1
|
461 |
+
assert pa.total_allocated_bytes() > old_allocated
|
462 |
+
obj_out = type(obj)._import_from_c_capsule(capsule)
|
463 |
+
assert obj_out == obj
|
464 |
+
|
465 |
+
assert pa.total_allocated_bytes() == old_allocated
|
466 |
+
|
467 |
+
capsule = obj.__arrow_c_schema__()
|
468 |
+
|
469 |
+
assert pa.total_allocated_bytes() > old_allocated
|
470 |
+
del capsule
|
471 |
+
assert pa.total_allocated_bytes() == old_allocated
|
472 |
+
|
473 |
+
|
474 |
+
@pytest.mark.parametrize('arr,schema_accessor,bad_type,good_type', [
|
475 |
+
(pa.array(['a', 'b', 'c']), lambda x: x.type, pa.int32(), pa.string()),
|
476 |
+
(
|
477 |
+
pa.record_batch([pa.array(['a', 'b', 'c'])], names=['x']),
|
478 |
+
lambda x: x.schema,
|
479 |
+
pa.schema({'x': pa.int32()}),
|
480 |
+
pa.schema({'x': pa.string()})
|
481 |
+
),
|
482 |
+
], ids=['array', 'record_batch'])
|
483 |
+
def test_roundtrip_array_capsule(arr, schema_accessor, bad_type, good_type):
|
484 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
485 |
+
old_allocated = pa.total_allocated_bytes()
|
486 |
+
|
487 |
+
import_array = type(arr)._import_from_c_capsule
|
488 |
+
|
489 |
+
schema_capsule, capsule = arr.__arrow_c_array__()
|
490 |
+
assert PyCapsule_IsValid(schema_capsule, b"arrow_schema") == 1
|
491 |
+
assert PyCapsule_IsValid(capsule, b"arrow_array") == 1
|
492 |
+
arr_out = import_array(schema_capsule, capsule)
|
493 |
+
assert arr_out.equals(arr)
|
494 |
+
|
495 |
+
assert pa.total_allocated_bytes() > old_allocated
|
496 |
+
del arr_out
|
497 |
+
|
498 |
+
assert pa.total_allocated_bytes() == old_allocated
|
499 |
+
|
500 |
+
capsule = arr.__arrow_c_array__()
|
501 |
+
|
502 |
+
assert pa.total_allocated_bytes() > old_allocated
|
503 |
+
del capsule
|
504 |
+
assert pa.total_allocated_bytes() == old_allocated
|
505 |
+
|
506 |
+
with pytest.raises(ValueError,
|
507 |
+
match=r"Could not cast.* string to requested .* int32"):
|
508 |
+
arr.__arrow_c_array__(bad_type.__arrow_c_schema__())
|
509 |
+
|
510 |
+
schema_capsule, array_capsule = arr.__arrow_c_array__(
|
511 |
+
good_type.__arrow_c_schema__())
|
512 |
+
arr_out = import_array(schema_capsule, array_capsule)
|
513 |
+
assert schema_accessor(arr_out) == good_type
|
514 |
+
|
515 |
+
|
516 |
+
# TODO: implement requested_schema for stream
|
517 |
+
@pytest.mark.parametrize('constructor', [
|
518 |
+
pa.RecordBatchReader.from_batches,
|
519 |
+
# Use a lambda because we need to re-order the parameters
|
520 |
+
lambda schema, batches: pa.Table.from_batches(batches, schema),
|
521 |
+
], ids=['recordbatchreader', 'table'])
|
522 |
+
def test_roundtrip_reader_capsule(constructor):
|
523 |
+
batches = make_batches()
|
524 |
+
schema = batches[0].schema
|
525 |
+
|
526 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
527 |
+
old_allocated = pa.total_allocated_bytes()
|
528 |
+
|
529 |
+
obj = constructor(schema, batches)
|
530 |
+
|
531 |
+
capsule = obj.__arrow_c_stream__()
|
532 |
+
assert PyCapsule_IsValid(capsule, b"arrow_array_stream") == 1
|
533 |
+
imported_reader = pa.RecordBatchReader._import_from_c_capsule(capsule)
|
534 |
+
assert imported_reader.schema == schema
|
535 |
+
imported_batches = list(imported_reader)
|
536 |
+
assert len(imported_batches) == len(batches)
|
537 |
+
for batch, expected in zip(imported_batches, batches):
|
538 |
+
assert batch.equals(expected)
|
539 |
+
|
540 |
+
del obj, imported_reader, batch, expected, imported_batches
|
541 |
+
|
542 |
+
assert pa.total_allocated_bytes() == old_allocated
|
543 |
+
|
544 |
+
obj = constructor(schema, batches)
|
545 |
+
|
546 |
+
# TODO: turn this to ValueError once we implement validation.
|
547 |
+
bad_schema = pa.schema({'ints': pa.int32()})
|
548 |
+
with pytest.raises(NotImplementedError):
|
549 |
+
obj.__arrow_c_stream__(bad_schema.__arrow_c_schema__())
|
550 |
+
|
551 |
+
# Can work with matching schema
|
552 |
+
matching_schema = pa.schema({'ints': pa.list_(pa.int32())})
|
553 |
+
capsule = obj.__arrow_c_stream__(matching_schema.__arrow_c_schema__())
|
554 |
+
imported_reader = pa.RecordBatchReader._import_from_c_capsule(capsule)
|
555 |
+
assert imported_reader.schema == matching_schema
|
556 |
+
for batch, expected in zip(imported_reader, batches):
|
557 |
+
assert batch.equals(expected)
|
558 |
+
|
559 |
+
|
560 |
+
def test_roundtrip_batch_reader_capsule():
|
561 |
+
batch = make_batch()
|
562 |
+
|
563 |
+
capsule = batch.__arrow_c_stream__()
|
564 |
+
assert PyCapsule_IsValid(capsule, b"arrow_array_stream") == 1
|
565 |
+
imported_reader = pa.RecordBatchReader._import_from_c_capsule(capsule)
|
566 |
+
assert imported_reader.schema == batch.schema
|
567 |
+
assert imported_reader.read_next_batch().equals(batch)
|
568 |
+
with pytest.raises(StopIteration):
|
569 |
+
imported_reader.read_next_batch()
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_compute.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_convert_builtin.py
ADDED
@@ -0,0 +1,2507 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import collections
|
19 |
+
import datetime
|
20 |
+
import decimal
|
21 |
+
import itertools
|
22 |
+
import math
|
23 |
+
import re
|
24 |
+
import sys
|
25 |
+
|
26 |
+
import hypothesis as h
|
27 |
+
import numpy as np
|
28 |
+
import pytest
|
29 |
+
|
30 |
+
from pyarrow.pandas_compat import _pandas_api # noqa
|
31 |
+
import pyarrow as pa
|
32 |
+
from pyarrow.tests import util
|
33 |
+
import pyarrow.tests.strategies as past
|
34 |
+
|
35 |
+
|
36 |
+
int_type_pairs = [
|
37 |
+
(np.int8, pa.int8()),
|
38 |
+
(np.int16, pa.int16()),
|
39 |
+
(np.int32, pa.int32()),
|
40 |
+
(np.int64, pa.int64()),
|
41 |
+
(np.uint8, pa.uint8()),
|
42 |
+
(np.uint16, pa.uint16()),
|
43 |
+
(np.uint32, pa.uint32()),
|
44 |
+
(np.uint64, pa.uint64())]
|
45 |
+
|
46 |
+
|
47 |
+
np_int_types, pa_int_types = zip(*int_type_pairs)
|
48 |
+
|
49 |
+
|
50 |
+
class StrangeIterable:
|
51 |
+
def __init__(self, lst):
|
52 |
+
self.lst = lst
|
53 |
+
|
54 |
+
def __iter__(self):
|
55 |
+
return self.lst.__iter__()
|
56 |
+
|
57 |
+
|
58 |
+
class MyInt:
|
59 |
+
def __init__(self, value):
|
60 |
+
self.value = value
|
61 |
+
|
62 |
+
def __int__(self):
|
63 |
+
return self.value
|
64 |
+
|
65 |
+
|
66 |
+
class MyBrokenInt:
|
67 |
+
def __int__(self):
|
68 |
+
1/0 # MARKER
|
69 |
+
|
70 |
+
|
71 |
+
def check_struct_type(ty, expected):
|
72 |
+
"""
|
73 |
+
Check a struct type is as expected, but not taking order into account.
|
74 |
+
"""
|
75 |
+
assert pa.types.is_struct(ty)
|
76 |
+
assert set(ty) == set(expected)
|
77 |
+
|
78 |
+
|
79 |
+
def test_iterable_types():
|
80 |
+
arr1 = pa.array(StrangeIterable([0, 1, 2, 3]))
|
81 |
+
arr2 = pa.array((0, 1, 2, 3))
|
82 |
+
|
83 |
+
assert arr1.equals(arr2)
|
84 |
+
|
85 |
+
|
86 |
+
def test_empty_iterable():
|
87 |
+
arr = pa.array(StrangeIterable([]))
|
88 |
+
assert len(arr) == 0
|
89 |
+
assert arr.null_count == 0
|
90 |
+
assert arr.type == pa.null()
|
91 |
+
assert arr.to_pylist() == []
|
92 |
+
|
93 |
+
|
94 |
+
def test_limited_iterator_types():
|
95 |
+
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=3)
|
96 |
+
arr2 = pa.array((0, 1, 2))
|
97 |
+
assert arr1.equals(arr2)
|
98 |
+
|
99 |
+
|
100 |
+
def test_limited_iterator_size_overflow():
|
101 |
+
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=2)
|
102 |
+
arr2 = pa.array((0, 1))
|
103 |
+
assert arr1.equals(arr2)
|
104 |
+
|
105 |
+
|
106 |
+
def test_limited_iterator_size_underflow():
|
107 |
+
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=10)
|
108 |
+
arr2 = pa.array((0, 1, 2))
|
109 |
+
assert arr1.equals(arr2)
|
110 |
+
|
111 |
+
|
112 |
+
def test_iterator_without_size():
|
113 |
+
expected = pa.array((0, 1, 2))
|
114 |
+
arr1 = pa.array(iter(range(3)))
|
115 |
+
assert arr1.equals(expected)
|
116 |
+
# Same with explicit type
|
117 |
+
arr1 = pa.array(iter(range(3)), type=pa.int64())
|
118 |
+
assert arr1.equals(expected)
|
119 |
+
|
120 |
+
|
121 |
+
def test_infinite_iterator():
|
122 |
+
expected = pa.array((0, 1, 2))
|
123 |
+
arr1 = pa.array(itertools.count(0), size=3)
|
124 |
+
assert arr1.equals(expected)
|
125 |
+
# Same with explicit type
|
126 |
+
arr1 = pa.array(itertools.count(0), type=pa.int64(), size=3)
|
127 |
+
assert arr1.equals(expected)
|
128 |
+
|
129 |
+
|
130 |
+
def test_failing_iterator():
|
131 |
+
with pytest.raises(ZeroDivisionError):
|
132 |
+
pa.array((1 // 0 for x in range(10)))
|
133 |
+
# ARROW-17253
|
134 |
+
with pytest.raises(ZeroDivisionError):
|
135 |
+
pa.array((1 // 0 for x in range(10)), size=10)
|
136 |
+
|
137 |
+
|
138 |
+
class ObjectWithOnlyGetitem:
|
139 |
+
def __getitem__(self, key):
|
140 |
+
return 3
|
141 |
+
|
142 |
+
|
143 |
+
def test_object_with_getitem():
|
144 |
+
# https://github.com/apache/arrow/issues/34944
|
145 |
+
# considered as sequence because of __getitem__, but has no length
|
146 |
+
with pytest.raises(TypeError, match="has no len()"):
|
147 |
+
pa.array(ObjectWithOnlyGetitem())
|
148 |
+
|
149 |
+
|
150 |
+
def _as_list(xs):
|
151 |
+
return xs
|
152 |
+
|
153 |
+
|
154 |
+
def _as_tuple(xs):
|
155 |
+
return tuple(xs)
|
156 |
+
|
157 |
+
|
158 |
+
def _as_deque(xs):
|
159 |
+
# deque is a sequence while neither tuple nor list
|
160 |
+
return collections.deque(xs)
|
161 |
+
|
162 |
+
|
163 |
+
def _as_dict_values(xs):
|
164 |
+
# a dict values object is not a sequence, just a regular iterable
|
165 |
+
dct = {k: v for k, v in enumerate(xs)}
|
166 |
+
return dct.values()
|
167 |
+
|
168 |
+
|
169 |
+
def _as_numpy_array(xs):
|
170 |
+
arr = np.empty(len(xs), dtype=object)
|
171 |
+
arr[:] = xs
|
172 |
+
return arr
|
173 |
+
|
174 |
+
|
175 |
+
def _as_set(xs):
|
176 |
+
return set(xs)
|
177 |
+
|
178 |
+
|
179 |
+
SEQUENCE_TYPES = [_as_list, _as_tuple, _as_numpy_array]
|
180 |
+
ITERABLE_TYPES = [_as_set, _as_dict_values] + SEQUENCE_TYPES
|
181 |
+
COLLECTIONS_TYPES = [_as_deque] + ITERABLE_TYPES
|
182 |
+
|
183 |
+
parametrize_with_iterable_types = pytest.mark.parametrize(
|
184 |
+
"seq", ITERABLE_TYPES
|
185 |
+
)
|
186 |
+
|
187 |
+
parametrize_with_sequence_types = pytest.mark.parametrize(
|
188 |
+
"seq", SEQUENCE_TYPES
|
189 |
+
)
|
190 |
+
|
191 |
+
parametrize_with_collections_types = pytest.mark.parametrize(
|
192 |
+
"seq", COLLECTIONS_TYPES
|
193 |
+
)
|
194 |
+
|
195 |
+
|
196 |
+
@parametrize_with_collections_types
|
197 |
+
def test_sequence_types(seq):
|
198 |
+
arr1 = pa.array(seq([1, 2, 3]))
|
199 |
+
arr2 = pa.array([1, 2, 3])
|
200 |
+
|
201 |
+
assert arr1.equals(arr2)
|
202 |
+
|
203 |
+
|
204 |
+
@parametrize_with_iterable_types
|
205 |
+
def test_nested_sequence_types(seq):
|
206 |
+
arr1 = pa.array([seq([1, 2, 3])])
|
207 |
+
arr2 = pa.array([[1, 2, 3]])
|
208 |
+
|
209 |
+
assert arr1.equals(arr2)
|
210 |
+
|
211 |
+
|
212 |
+
@parametrize_with_sequence_types
|
213 |
+
def test_sequence_boolean(seq):
|
214 |
+
expected = [True, None, False, None]
|
215 |
+
arr = pa.array(seq(expected))
|
216 |
+
assert len(arr) == 4
|
217 |
+
assert arr.null_count == 2
|
218 |
+
assert arr.type == pa.bool_()
|
219 |
+
assert arr.to_pylist() == expected
|
220 |
+
|
221 |
+
|
222 |
+
@parametrize_with_sequence_types
|
223 |
+
def test_sequence_numpy_boolean(seq):
|
224 |
+
expected = [np.bool_(True), None, np.bool_(False), None]
|
225 |
+
arr = pa.array(seq(expected))
|
226 |
+
assert arr.type == pa.bool_()
|
227 |
+
assert arr.to_pylist() == [True, None, False, None]
|
228 |
+
|
229 |
+
|
230 |
+
@parametrize_with_sequence_types
|
231 |
+
def test_sequence_mixed_numpy_python_bools(seq):
|
232 |
+
values = np.array([True, False])
|
233 |
+
arr = pa.array(seq([values[0], None, values[1], True, False]))
|
234 |
+
assert arr.type == pa.bool_()
|
235 |
+
assert arr.to_pylist() == [True, None, False, True, False]
|
236 |
+
|
237 |
+
|
238 |
+
@parametrize_with_collections_types
|
239 |
+
def test_empty_list(seq):
|
240 |
+
arr = pa.array(seq([]))
|
241 |
+
assert len(arr) == 0
|
242 |
+
assert arr.null_count == 0
|
243 |
+
assert arr.type == pa.null()
|
244 |
+
assert arr.to_pylist() == []
|
245 |
+
|
246 |
+
|
247 |
+
@parametrize_with_sequence_types
|
248 |
+
def test_nested_lists(seq):
|
249 |
+
data = [[], [1, 2], None]
|
250 |
+
arr = pa.array(seq(data))
|
251 |
+
assert len(arr) == 3
|
252 |
+
assert arr.null_count == 1
|
253 |
+
assert arr.type == pa.list_(pa.int64())
|
254 |
+
assert arr.to_pylist() == data
|
255 |
+
# With explicit type
|
256 |
+
arr = pa.array(seq(data), type=pa.list_(pa.int32()))
|
257 |
+
assert len(arr) == 3
|
258 |
+
assert arr.null_count == 1
|
259 |
+
assert arr.type == pa.list_(pa.int32())
|
260 |
+
assert arr.to_pylist() == data
|
261 |
+
|
262 |
+
|
263 |
+
@parametrize_with_sequence_types
|
264 |
+
def test_nested_large_lists(seq):
|
265 |
+
data = [[], [1, 2], None]
|
266 |
+
arr = pa.array(seq(data), type=pa.large_list(pa.int16()))
|
267 |
+
assert len(arr) == 3
|
268 |
+
assert arr.null_count == 1
|
269 |
+
assert arr.type == pa.large_list(pa.int16())
|
270 |
+
assert arr.to_pylist() == data
|
271 |
+
|
272 |
+
|
273 |
+
@parametrize_with_collections_types
|
274 |
+
def test_list_with_non_list(seq):
|
275 |
+
# List types don't accept non-sequences
|
276 |
+
with pytest.raises(TypeError):
|
277 |
+
pa.array(seq([[], [1, 2], 3]), type=pa.list_(pa.int64()))
|
278 |
+
with pytest.raises(TypeError):
|
279 |
+
pa.array(seq([[], [1, 2], 3]), type=pa.large_list(pa.int64()))
|
280 |
+
|
281 |
+
|
282 |
+
@parametrize_with_sequence_types
|
283 |
+
def test_nested_arrays(seq):
|
284 |
+
arr = pa.array(seq([np.array([], dtype=np.int64),
|
285 |
+
np.array([1, 2], dtype=np.int64), None]))
|
286 |
+
assert len(arr) == 3
|
287 |
+
assert arr.null_count == 1
|
288 |
+
assert arr.type == pa.list_(pa.int64())
|
289 |
+
assert arr.to_pylist() == [[], [1, 2], None]
|
290 |
+
|
291 |
+
|
292 |
+
@parametrize_with_sequence_types
|
293 |
+
def test_nested_fixed_size_list(seq):
|
294 |
+
# sequence of lists
|
295 |
+
data = [[1, 2], [3, None], None]
|
296 |
+
arr = pa.array(seq(data), type=pa.list_(pa.int64(), 2))
|
297 |
+
assert len(arr) == 3
|
298 |
+
assert arr.null_count == 1
|
299 |
+
assert arr.type == pa.list_(pa.int64(), 2)
|
300 |
+
assert arr.to_pylist() == data
|
301 |
+
|
302 |
+
# sequence of numpy arrays
|
303 |
+
data = [np.array([1, 2], dtype='int64'), np.array([3, 4], dtype='int64'),
|
304 |
+
None]
|
305 |
+
arr = pa.array(seq(data), type=pa.list_(pa.int64(), 2))
|
306 |
+
assert len(arr) == 3
|
307 |
+
assert arr.null_count == 1
|
308 |
+
assert arr.type == pa.list_(pa.int64(), 2)
|
309 |
+
assert arr.to_pylist() == [[1, 2], [3, 4], None]
|
310 |
+
|
311 |
+
# incorrect length of the lists or arrays
|
312 |
+
data = [[1, 2, 4], [3, None], None]
|
313 |
+
for data in [[[1, 2, 3]], [np.array([1, 2, 4], dtype='int64')]]:
|
314 |
+
with pytest.raises(
|
315 |
+
ValueError, match="Length of item not correct: expected 2"):
|
316 |
+
pa.array(seq(data), type=pa.list_(pa.int64(), 2))
|
317 |
+
|
318 |
+
# with list size of 0
|
319 |
+
data = [[], [], None]
|
320 |
+
arr = pa.array(seq(data), type=pa.list_(pa.int64(), 0))
|
321 |
+
assert len(arr) == 3
|
322 |
+
assert arr.null_count == 1
|
323 |
+
assert arr.type == pa.list_(pa.int64(), 0)
|
324 |
+
assert arr.to_pylist() == [[], [], None]
|
325 |
+
|
326 |
+
|
327 |
+
@parametrize_with_sequence_types
|
328 |
+
def test_sequence_all_none(seq):
|
329 |
+
arr = pa.array(seq([None, None]))
|
330 |
+
assert len(arr) == 2
|
331 |
+
assert arr.null_count == 2
|
332 |
+
assert arr.type == pa.null()
|
333 |
+
assert arr.to_pylist() == [None, None]
|
334 |
+
|
335 |
+
|
336 |
+
@parametrize_with_sequence_types
|
337 |
+
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
|
338 |
+
def test_sequence_integer(seq, np_scalar_pa_type):
|
339 |
+
np_scalar, pa_type = np_scalar_pa_type
|
340 |
+
expected = [1, None, 3, None,
|
341 |
+
np.iinfo(np_scalar).min, np.iinfo(np_scalar).max]
|
342 |
+
arr = pa.array(seq(expected), type=pa_type)
|
343 |
+
assert len(arr) == 6
|
344 |
+
assert arr.null_count == 2
|
345 |
+
assert arr.type == pa_type
|
346 |
+
assert arr.to_pylist() == expected
|
347 |
+
|
348 |
+
|
349 |
+
@parametrize_with_collections_types
|
350 |
+
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
|
351 |
+
def test_sequence_integer_np_nan(seq, np_scalar_pa_type):
|
352 |
+
# ARROW-2806: numpy.nan is a double value and thus should produce
|
353 |
+
# a double array.
|
354 |
+
_, pa_type = np_scalar_pa_type
|
355 |
+
with pytest.raises(ValueError):
|
356 |
+
pa.array(seq([np.nan]), type=pa_type, from_pandas=False)
|
357 |
+
|
358 |
+
arr = pa.array(seq([np.nan]), type=pa_type, from_pandas=True)
|
359 |
+
expected = [None]
|
360 |
+
assert len(arr) == 1
|
361 |
+
assert arr.null_count == 1
|
362 |
+
assert arr.type == pa_type
|
363 |
+
assert arr.to_pylist() == expected
|
364 |
+
|
365 |
+
|
366 |
+
@parametrize_with_sequence_types
|
367 |
+
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
|
368 |
+
def test_sequence_integer_nested_np_nan(seq, np_scalar_pa_type):
|
369 |
+
# ARROW-2806: numpy.nan is a double value and thus should produce
|
370 |
+
# a double array.
|
371 |
+
_, pa_type = np_scalar_pa_type
|
372 |
+
with pytest.raises(ValueError):
|
373 |
+
pa.array(seq([[np.nan]]), type=pa.list_(pa_type), from_pandas=False)
|
374 |
+
|
375 |
+
arr = pa.array(seq([[np.nan]]), type=pa.list_(pa_type), from_pandas=True)
|
376 |
+
expected = [[None]]
|
377 |
+
assert len(arr) == 1
|
378 |
+
assert arr.null_count == 0
|
379 |
+
assert arr.type == pa.list_(pa_type)
|
380 |
+
assert arr.to_pylist() == expected
|
381 |
+
|
382 |
+
|
383 |
+
@parametrize_with_sequence_types
|
384 |
+
def test_sequence_integer_inferred(seq):
|
385 |
+
expected = [1, None, 3, None]
|
386 |
+
arr = pa.array(seq(expected))
|
387 |
+
assert len(arr) == 4
|
388 |
+
assert arr.null_count == 2
|
389 |
+
assert arr.type == pa.int64()
|
390 |
+
assert arr.to_pylist() == expected
|
391 |
+
|
392 |
+
|
393 |
+
@parametrize_with_sequence_types
|
394 |
+
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
|
395 |
+
def test_sequence_numpy_integer(seq, np_scalar_pa_type):
|
396 |
+
np_scalar, pa_type = np_scalar_pa_type
|
397 |
+
expected = [np_scalar(1), None, np_scalar(3), None,
|
398 |
+
np_scalar(np.iinfo(np_scalar).min),
|
399 |
+
np_scalar(np.iinfo(np_scalar).max)]
|
400 |
+
arr = pa.array(seq(expected), type=pa_type)
|
401 |
+
assert len(arr) == 6
|
402 |
+
assert arr.null_count == 2
|
403 |
+
assert arr.type == pa_type
|
404 |
+
assert arr.to_pylist() == expected
|
405 |
+
|
406 |
+
|
407 |
+
@parametrize_with_sequence_types
|
408 |
+
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
|
409 |
+
def test_sequence_numpy_integer_inferred(seq, np_scalar_pa_type):
|
410 |
+
np_scalar, pa_type = np_scalar_pa_type
|
411 |
+
expected = [np_scalar(1), None, np_scalar(3), None]
|
412 |
+
expected += [np_scalar(np.iinfo(np_scalar).min),
|
413 |
+
np_scalar(np.iinfo(np_scalar).max)]
|
414 |
+
arr = pa.array(seq(expected))
|
415 |
+
assert len(arr) == 6
|
416 |
+
assert arr.null_count == 2
|
417 |
+
assert arr.type == pa_type
|
418 |
+
assert arr.to_pylist() == expected
|
419 |
+
|
420 |
+
|
421 |
+
@parametrize_with_sequence_types
|
422 |
+
def test_sequence_custom_integers(seq):
|
423 |
+
expected = [0, 42, 2**33 + 1, -2**63]
|
424 |
+
data = list(map(MyInt, expected))
|
425 |
+
arr = pa.array(seq(data), type=pa.int64())
|
426 |
+
assert arr.to_pylist() == expected
|
427 |
+
|
428 |
+
|
429 |
+
@parametrize_with_collections_types
|
430 |
+
def test_broken_integers(seq):
|
431 |
+
data = [MyBrokenInt()]
|
432 |
+
with pytest.raises(pa.ArrowInvalid, match="tried to convert to int"):
|
433 |
+
pa.array(seq(data), type=pa.int64())
|
434 |
+
|
435 |
+
|
436 |
+
def test_numpy_scalars_mixed_type():
|
437 |
+
# ARROW-4324
|
438 |
+
data = [np.int32(10), np.float32(0.5)]
|
439 |
+
arr = pa.array(data)
|
440 |
+
expected = pa.array([10, 0.5], type="float64")
|
441 |
+
assert arr.equals(expected)
|
442 |
+
|
443 |
+
# ARROW-9490
|
444 |
+
data = [np.int8(10), np.float32(0.5)]
|
445 |
+
arr = pa.array(data)
|
446 |
+
expected = pa.array([10, 0.5], type="float32")
|
447 |
+
assert arr.equals(expected)
|
448 |
+
|
449 |
+
|
450 |
+
@pytest.mark.xfail(reason="Type inference for uint64 not implemented",
|
451 |
+
raises=OverflowError)
|
452 |
+
def test_uint64_max_convert():
|
453 |
+
data = [0, np.iinfo(np.uint64).max]
|
454 |
+
|
455 |
+
arr = pa.array(data, type=pa.uint64())
|
456 |
+
expected = pa.array(np.array(data, dtype='uint64'))
|
457 |
+
assert arr.equals(expected)
|
458 |
+
|
459 |
+
arr_inferred = pa.array(data)
|
460 |
+
assert arr_inferred.equals(expected)
|
461 |
+
|
462 |
+
|
463 |
+
@pytest.mark.parametrize("bits", [8, 16, 32, 64])
|
464 |
+
def test_signed_integer_overflow(bits):
|
465 |
+
ty = getattr(pa, "int%d" % bits)()
|
466 |
+
# XXX ideally would always raise OverflowError
|
467 |
+
with pytest.raises((OverflowError, pa.ArrowInvalid)):
|
468 |
+
pa.array([2 ** (bits - 1)], ty)
|
469 |
+
with pytest.raises((OverflowError, pa.ArrowInvalid)):
|
470 |
+
pa.array([-2 ** (bits - 1) - 1], ty)
|
471 |
+
|
472 |
+
|
473 |
+
@pytest.mark.parametrize("bits", [8, 16, 32, 64])
|
474 |
+
def test_unsigned_integer_overflow(bits):
|
475 |
+
ty = getattr(pa, "uint%d" % bits)()
|
476 |
+
# XXX ideally would always raise OverflowError
|
477 |
+
with pytest.raises((OverflowError, pa.ArrowInvalid)):
|
478 |
+
pa.array([2 ** bits], ty)
|
479 |
+
with pytest.raises((OverflowError, pa.ArrowInvalid)):
|
480 |
+
pa.array([-1], ty)
|
481 |
+
|
482 |
+
|
483 |
+
@parametrize_with_collections_types
|
484 |
+
@pytest.mark.parametrize("typ", pa_int_types)
|
485 |
+
def test_integer_from_string_error(seq, typ):
|
486 |
+
# ARROW-9451: pa.array(['1'], type=pa.uint32()) should not succeed
|
487 |
+
with pytest.raises(pa.ArrowInvalid):
|
488 |
+
pa.array(seq(['1']), type=typ)
|
489 |
+
|
490 |
+
|
491 |
+
def test_convert_with_mask():
|
492 |
+
data = [1, 2, 3, 4, 5]
|
493 |
+
mask = np.array([False, True, False, False, True])
|
494 |
+
|
495 |
+
result = pa.array(data, mask=mask)
|
496 |
+
expected = pa.array([1, None, 3, 4, None])
|
497 |
+
|
498 |
+
assert result.equals(expected)
|
499 |
+
|
500 |
+
# Mask wrong length
|
501 |
+
with pytest.raises(ValueError):
|
502 |
+
pa.array(data, mask=mask[1:])
|
503 |
+
|
504 |
+
|
505 |
+
def test_garbage_collection():
|
506 |
+
import gc
|
507 |
+
|
508 |
+
# Force the cyclic garbage collector to run
|
509 |
+
gc.collect()
|
510 |
+
|
511 |
+
bytes_before = pa.total_allocated_bytes()
|
512 |
+
pa.array([1, None, 3, None])
|
513 |
+
gc.collect()
|
514 |
+
assert pa.total_allocated_bytes() == bytes_before
|
515 |
+
|
516 |
+
|
517 |
+
def test_sequence_double():
|
518 |
+
data = [1.5, 1., None, 2.5, None, None]
|
519 |
+
arr = pa.array(data)
|
520 |
+
assert len(arr) == 6
|
521 |
+
assert arr.null_count == 3
|
522 |
+
assert arr.type == pa.float64()
|
523 |
+
assert arr.to_pylist() == data
|
524 |
+
|
525 |
+
|
526 |
+
def test_double_auto_coerce_from_integer():
|
527 |
+
# Done as part of ARROW-2814
|
528 |
+
data = [1.5, 1., None, 2.5, None, None]
|
529 |
+
arr = pa.array(data)
|
530 |
+
|
531 |
+
data2 = [1.5, 1, None, 2.5, None, None]
|
532 |
+
arr2 = pa.array(data2)
|
533 |
+
|
534 |
+
assert arr.equals(arr2)
|
535 |
+
|
536 |
+
data3 = [1, 1.5, None, 2.5, None, None]
|
537 |
+
arr3 = pa.array(data3)
|
538 |
+
|
539 |
+
data4 = [1., 1.5, None, 2.5, None, None]
|
540 |
+
arr4 = pa.array(data4)
|
541 |
+
|
542 |
+
assert arr3.equals(arr4)
|
543 |
+
|
544 |
+
|
545 |
+
def test_double_integer_coerce_representable_range():
|
546 |
+
valid_values = [1.5, 1, 2, None, 1 << 53, -(1 << 53)]
|
547 |
+
invalid_values = [1.5, 1, 2, None, (1 << 53) + 1]
|
548 |
+
invalid_values2 = [1.5, 1, 2, None, -((1 << 53) + 1)]
|
549 |
+
|
550 |
+
# it works
|
551 |
+
pa.array(valid_values)
|
552 |
+
|
553 |
+
# it fails
|
554 |
+
with pytest.raises(ValueError):
|
555 |
+
pa.array(invalid_values)
|
556 |
+
|
557 |
+
with pytest.raises(ValueError):
|
558 |
+
pa.array(invalid_values2)
|
559 |
+
|
560 |
+
|
561 |
+
def test_float32_integer_coerce_representable_range():
|
562 |
+
f32 = np.float32
|
563 |
+
valid_values = [f32(1.5), 1 << 24, -(1 << 24)]
|
564 |
+
invalid_values = [f32(1.5), (1 << 24) + 1]
|
565 |
+
invalid_values2 = [f32(1.5), -((1 << 24) + 1)]
|
566 |
+
|
567 |
+
# it works
|
568 |
+
pa.array(valid_values, type=pa.float32())
|
569 |
+
|
570 |
+
# it fails
|
571 |
+
with pytest.raises(ValueError):
|
572 |
+
pa.array(invalid_values, type=pa.float32())
|
573 |
+
|
574 |
+
with pytest.raises(ValueError):
|
575 |
+
pa.array(invalid_values2, type=pa.float32())
|
576 |
+
|
577 |
+
|
578 |
+
def test_mixed_sequence_errors():
|
579 |
+
with pytest.raises(ValueError, match="tried to convert to boolean"):
|
580 |
+
pa.array([True, 'foo'], type=pa.bool_())
|
581 |
+
|
582 |
+
with pytest.raises(ValueError, match="tried to convert to float32"):
|
583 |
+
pa.array([1.5, 'foo'], type=pa.float32())
|
584 |
+
|
585 |
+
with pytest.raises(ValueError, match="tried to convert to double"):
|
586 |
+
pa.array([1.5, 'foo'])
|
587 |
+
|
588 |
+
|
589 |
+
@parametrize_with_sequence_types
|
590 |
+
@pytest.mark.parametrize("np_scalar,pa_type", [
|
591 |
+
(np.float16, pa.float16()),
|
592 |
+
(np.float32, pa.float32()),
|
593 |
+
(np.float64, pa.float64())
|
594 |
+
])
|
595 |
+
@pytest.mark.parametrize("from_pandas", [True, False])
|
596 |
+
def test_sequence_numpy_double(seq, np_scalar, pa_type, from_pandas):
|
597 |
+
data = [np_scalar(1.5), np_scalar(1), None, np_scalar(2.5), None, np.nan]
|
598 |
+
arr = pa.array(seq(data), from_pandas=from_pandas)
|
599 |
+
assert len(arr) == 6
|
600 |
+
if from_pandas:
|
601 |
+
assert arr.null_count == 3
|
602 |
+
else:
|
603 |
+
assert arr.null_count == 2
|
604 |
+
if from_pandas:
|
605 |
+
# The NaN is skipped in type inference, otherwise it forces a
|
606 |
+
# float64 promotion
|
607 |
+
assert arr.type == pa_type
|
608 |
+
else:
|
609 |
+
assert arr.type == pa.float64()
|
610 |
+
|
611 |
+
assert arr.to_pylist()[:4] == data[:4]
|
612 |
+
if from_pandas:
|
613 |
+
assert arr.to_pylist()[5] is None
|
614 |
+
else:
|
615 |
+
assert np.isnan(arr.to_pylist()[5])
|
616 |
+
|
617 |
+
|
618 |
+
@pytest.mark.parametrize("from_pandas", [True, False])
|
619 |
+
@pytest.mark.parametrize("inner_seq", [np.array, list])
|
620 |
+
def test_ndarray_nested_numpy_double(from_pandas, inner_seq):
|
621 |
+
# ARROW-2806
|
622 |
+
data = np.array([
|
623 |
+
inner_seq([1., 2.]),
|
624 |
+
inner_seq([1., 2., 3.]),
|
625 |
+
inner_seq([np.nan]),
|
626 |
+
None
|
627 |
+
], dtype=object)
|
628 |
+
arr = pa.array(data, from_pandas=from_pandas)
|
629 |
+
assert len(arr) == 4
|
630 |
+
assert arr.null_count == 1
|
631 |
+
assert arr.type == pa.list_(pa.float64())
|
632 |
+
if from_pandas:
|
633 |
+
assert arr.to_pylist() == [[1.0, 2.0], [1.0, 2.0, 3.0], [None], None]
|
634 |
+
else:
|
635 |
+
np.testing.assert_equal(arr.to_pylist(),
|
636 |
+
[[1., 2.], [1., 2., 3.], [np.nan], None])
|
637 |
+
|
638 |
+
|
639 |
+
def test_nested_ndarray_in_object_array():
|
640 |
+
# ARROW-4350
|
641 |
+
arr = np.empty(2, dtype=object)
|
642 |
+
arr[:] = [np.array([1, 2], dtype=np.int64),
|
643 |
+
np.array([2, 3], dtype=np.int64)]
|
644 |
+
|
645 |
+
arr2 = np.empty(2, dtype=object)
|
646 |
+
arr2[0] = [3, 4]
|
647 |
+
arr2[1] = [5, 6]
|
648 |
+
|
649 |
+
expected_type = pa.list_(pa.list_(pa.int64()))
|
650 |
+
assert pa.infer_type([arr]) == expected_type
|
651 |
+
|
652 |
+
result = pa.array([arr, arr2])
|
653 |
+
expected = pa.array([[[1, 2], [2, 3]], [[3, 4], [5, 6]]],
|
654 |
+
type=expected_type)
|
655 |
+
|
656 |
+
assert result.equals(expected)
|
657 |
+
|
658 |
+
# test case for len-1 arrays to ensure they are interpreted as
|
659 |
+
# sublists and not scalars
|
660 |
+
arr = np.empty(2, dtype=object)
|
661 |
+
arr[:] = [np.array([1]), np.array([2])]
|
662 |
+
result = pa.array([arr, arr])
|
663 |
+
assert result.to_pylist() == [[[1], [2]], [[1], [2]]]
|
664 |
+
|
665 |
+
|
666 |
+
@pytest.mark.xfail(reason=("Type inference for multidimensional ndarray "
|
667 |
+
"not yet implemented"),
|
668 |
+
raises=AssertionError)
|
669 |
+
def test_multidimensional_ndarray_as_nested_list():
|
670 |
+
# TODO(wesm): see ARROW-5645
|
671 |
+
arr = np.array([[1, 2], [2, 3]], dtype=np.int64)
|
672 |
+
arr2 = np.array([[3, 4], [5, 6]], dtype=np.int64)
|
673 |
+
|
674 |
+
expected_type = pa.list_(pa.list_(pa.int64()))
|
675 |
+
assert pa.infer_type([arr]) == expected_type
|
676 |
+
|
677 |
+
result = pa.array([arr, arr2])
|
678 |
+
expected = pa.array([[[1, 2], [2, 3]], [[3, 4], [5, 6]]],
|
679 |
+
type=expected_type)
|
680 |
+
|
681 |
+
assert result.equals(expected)
|
682 |
+
|
683 |
+
|
684 |
+
@pytest.mark.parametrize(('data', 'value_type'), [
|
685 |
+
([True, False], pa.bool_()),
|
686 |
+
([None, None], pa.null()),
|
687 |
+
([1, 2, None], pa.int8()),
|
688 |
+
([1, 2., 3., None], pa.float32()),
|
689 |
+
([datetime.date.today(), None], pa.date32()),
|
690 |
+
([None, datetime.date.today()], pa.date64()),
|
691 |
+
([datetime.time(1, 1, 1), None], pa.time32('s')),
|
692 |
+
([None, datetime.time(2, 2, 2)], pa.time64('us')),
|
693 |
+
([datetime.datetime.now(), None], pa.timestamp('us')),
|
694 |
+
([datetime.timedelta(seconds=10)], pa.duration('s')),
|
695 |
+
([b"a", b"b"], pa.binary()),
|
696 |
+
([b"aaa", b"bbb", b"ccc"], pa.binary(3)),
|
697 |
+
([b"a", b"b", b"c"], pa.large_binary()),
|
698 |
+
(["a", "b", "c"], pa.string()),
|
699 |
+
(["a", "b", "c"], pa.large_string()),
|
700 |
+
(
|
701 |
+
[{"a": 1, "b": 2}, None, {"a": 5, "b": None}],
|
702 |
+
pa.struct([('a', pa.int8()), ('b', pa.int16())])
|
703 |
+
)
|
704 |
+
])
|
705 |
+
def test_list_array_from_object_ndarray(data, value_type):
|
706 |
+
ty = pa.list_(value_type)
|
707 |
+
ndarray = np.array(data, dtype=object)
|
708 |
+
arr = pa.array([ndarray], type=ty)
|
709 |
+
assert arr.type.equals(ty)
|
710 |
+
assert arr.to_pylist() == [data]
|
711 |
+
|
712 |
+
|
713 |
+
@pytest.mark.parametrize(('data', 'value_type'), [
|
714 |
+
([[1, 2], [3]], pa.list_(pa.int64())),
|
715 |
+
([[1, 2], [3, 4]], pa.list_(pa.int64(), 2)),
|
716 |
+
([[1], [2, 3]], pa.large_list(pa.int64()))
|
717 |
+
])
|
718 |
+
def test_nested_list_array_from_object_ndarray(data, value_type):
|
719 |
+
ndarray = np.empty(len(data), dtype=object)
|
720 |
+
ndarray[:] = [np.array(item, dtype=object) for item in data]
|
721 |
+
|
722 |
+
ty = pa.list_(value_type)
|
723 |
+
arr = pa.array([ndarray], type=ty)
|
724 |
+
assert arr.type.equals(ty)
|
725 |
+
assert arr.to_pylist() == [data]
|
726 |
+
|
727 |
+
|
728 |
+
def test_array_ignore_nan_from_pandas():
|
729 |
+
# See ARROW-4324, this reverts logic that was introduced in
|
730 |
+
# ARROW-2240
|
731 |
+
with pytest.raises(ValueError):
|
732 |
+
pa.array([np.nan, 'str'])
|
733 |
+
|
734 |
+
arr = pa.array([np.nan, 'str'], from_pandas=True)
|
735 |
+
expected = pa.array([None, 'str'])
|
736 |
+
assert arr.equals(expected)
|
737 |
+
|
738 |
+
|
739 |
+
def test_nested_ndarray_different_dtypes():
|
740 |
+
data = [
|
741 |
+
np.array([1, 2, 3], dtype='int64'),
|
742 |
+
None,
|
743 |
+
np.array([4, 5, 6], dtype='uint32')
|
744 |
+
]
|
745 |
+
|
746 |
+
arr = pa.array(data)
|
747 |
+
expected = pa.array([[1, 2, 3], None, [4, 5, 6]],
|
748 |
+
type=pa.list_(pa.int64()))
|
749 |
+
assert arr.equals(expected)
|
750 |
+
|
751 |
+
t2 = pa.list_(pa.uint32())
|
752 |
+
arr2 = pa.array(data, type=t2)
|
753 |
+
expected2 = expected.cast(t2)
|
754 |
+
assert arr2.equals(expected2)
|
755 |
+
|
756 |
+
|
757 |
+
def test_sequence_unicode():
|
758 |
+
data = ['foo', 'bar', None, 'mañana']
|
759 |
+
arr = pa.array(data)
|
760 |
+
assert len(arr) == 4
|
761 |
+
assert arr.null_count == 1
|
762 |
+
assert arr.type == pa.string()
|
763 |
+
assert arr.to_pylist() == data
|
764 |
+
|
765 |
+
|
766 |
+
def check_array_mixed_unicode_bytes(binary_type, string_type):
|
767 |
+
values = ['qux', b'foo', bytearray(b'barz')]
|
768 |
+
b_values = [b'qux', b'foo', b'barz']
|
769 |
+
u_values = ['qux', 'foo', 'barz']
|
770 |
+
|
771 |
+
arr = pa.array(values)
|
772 |
+
expected = pa.array(b_values, type=pa.binary())
|
773 |
+
assert arr.type == pa.binary()
|
774 |
+
assert arr.equals(expected)
|
775 |
+
|
776 |
+
arr = pa.array(values, type=binary_type)
|
777 |
+
expected = pa.array(b_values, type=binary_type)
|
778 |
+
assert arr.type == binary_type
|
779 |
+
assert arr.equals(expected)
|
780 |
+
|
781 |
+
arr = pa.array(values, type=string_type)
|
782 |
+
expected = pa.array(u_values, type=string_type)
|
783 |
+
assert arr.type == string_type
|
784 |
+
assert arr.equals(expected)
|
785 |
+
|
786 |
+
|
787 |
+
def test_array_mixed_unicode_bytes():
|
788 |
+
check_array_mixed_unicode_bytes(pa.binary(), pa.string())
|
789 |
+
check_array_mixed_unicode_bytes(pa.large_binary(), pa.large_string())
|
790 |
+
|
791 |
+
|
792 |
+
@pytest.mark.large_memory
|
793 |
+
@pytest.mark.parametrize("ty", [pa.large_binary(), pa.large_string()])
|
794 |
+
def test_large_binary_array(ty):
|
795 |
+
# Construct a large binary array with more than 4GB of data
|
796 |
+
s = b"0123456789abcdefghijklmnopqrstuvwxyz" * 10
|
797 |
+
nrepeats = math.ceil((2**32 + 5) / len(s))
|
798 |
+
data = [s] * nrepeats
|
799 |
+
arr = pa.array(data, type=ty)
|
800 |
+
assert isinstance(arr, pa.Array)
|
801 |
+
assert arr.type == ty
|
802 |
+
assert len(arr) == nrepeats
|
803 |
+
|
804 |
+
|
805 |
+
@pytest.mark.slow
|
806 |
+
@pytest.mark.large_memory
|
807 |
+
@pytest.mark.parametrize("ty", [pa.large_binary(), pa.large_string()])
|
808 |
+
def test_large_binary_value(ty):
|
809 |
+
# Construct a large binary array with a single value larger than 4GB
|
810 |
+
s = b"0123456789abcdefghijklmnopqrstuvwxyz"
|
811 |
+
nrepeats = math.ceil((2**32 + 5) / len(s))
|
812 |
+
arr = pa.array([b"foo", s * nrepeats, None, b"bar"], type=ty)
|
813 |
+
assert isinstance(arr, pa.Array)
|
814 |
+
assert arr.type == ty
|
815 |
+
assert len(arr) == 4
|
816 |
+
buf = arr[1].as_buffer()
|
817 |
+
assert len(buf) == len(s) * nrepeats
|
818 |
+
|
819 |
+
|
820 |
+
@pytest.mark.large_memory
|
821 |
+
@pytest.mark.parametrize("ty", [pa.binary(), pa.string()])
|
822 |
+
def test_string_too_large(ty):
|
823 |
+
# Construct a binary array with a single value larger than 4GB
|
824 |
+
s = b"0123456789abcdefghijklmnopqrstuvwxyz"
|
825 |
+
nrepeats = math.ceil((2**32 + 5) / len(s))
|
826 |
+
with pytest.raises(pa.ArrowCapacityError):
|
827 |
+
pa.array([b"foo", s * nrepeats, None, b"bar"], type=ty)
|
828 |
+
|
829 |
+
|
830 |
+
def test_sequence_bytes():
|
831 |
+
u1 = b'ma\xc3\xb1ana'
|
832 |
+
|
833 |
+
data = [b'foo',
|
834 |
+
memoryview(b'dada'),
|
835 |
+
memoryview(b'd-a-t-a')[::2], # non-contiguous is made contiguous
|
836 |
+
u1.decode('utf-8'), # unicode gets encoded,
|
837 |
+
bytearray(b'bar'),
|
838 |
+
None]
|
839 |
+
for ty in [None, pa.binary(), pa.large_binary()]:
|
840 |
+
arr = pa.array(data, type=ty)
|
841 |
+
assert len(arr) == 6
|
842 |
+
assert arr.null_count == 1
|
843 |
+
assert arr.type == ty or pa.binary()
|
844 |
+
assert arr.to_pylist() == [b'foo', b'dada', b'data', u1, b'bar', None]
|
845 |
+
|
846 |
+
|
847 |
+
@pytest.mark.parametrize("ty", [pa.string(), pa.large_string()])
|
848 |
+
def test_sequence_utf8_to_unicode(ty):
|
849 |
+
# ARROW-1225
|
850 |
+
data = [b'foo', None, b'bar']
|
851 |
+
arr = pa.array(data, type=ty)
|
852 |
+
assert arr.type == ty
|
853 |
+
assert arr[0].as_py() == 'foo'
|
854 |
+
|
855 |
+
# test a non-utf8 unicode string
|
856 |
+
val = ('mañana').encode('utf-16-le')
|
857 |
+
with pytest.raises(pa.ArrowInvalid):
|
858 |
+
pa.array([val], type=ty)
|
859 |
+
|
860 |
+
|
861 |
+
def test_sequence_fixed_size_bytes():
|
862 |
+
data = [b'foof', None, bytearray(b'barb'), b'2346']
|
863 |
+
arr = pa.array(data, type=pa.binary(4))
|
864 |
+
assert len(arr) == 4
|
865 |
+
assert arr.null_count == 1
|
866 |
+
assert arr.type == pa.binary(4)
|
867 |
+
assert arr.to_pylist() == [b'foof', None, b'barb', b'2346']
|
868 |
+
|
869 |
+
|
870 |
+
def test_fixed_size_bytes_does_not_accept_varying_lengths():
|
871 |
+
data = [b'foo', None, b'barb', b'2346']
|
872 |
+
with pytest.raises(pa.ArrowInvalid):
|
873 |
+
pa.array(data, type=pa.binary(4))
|
874 |
+
|
875 |
+
|
876 |
+
def test_fixed_size_binary_length_check():
|
877 |
+
# ARROW-10193
|
878 |
+
data = [b'\x19h\r\x9e\x00\x00\x00\x00\x01\x9b\x9fA']
|
879 |
+
assert len(data[0]) == 12
|
880 |
+
ty = pa.binary(12)
|
881 |
+
arr = pa.array(data, type=ty)
|
882 |
+
assert arr.to_pylist() == data
|
883 |
+
|
884 |
+
|
885 |
+
def test_sequence_date():
|
886 |
+
data = [datetime.date(2000, 1, 1), None, datetime.date(1970, 1, 1),
|
887 |
+
datetime.date(2040, 2, 26)]
|
888 |
+
arr = pa.array(data)
|
889 |
+
assert len(arr) == 4
|
890 |
+
assert arr.type == pa.date32()
|
891 |
+
assert arr.null_count == 1
|
892 |
+
assert arr[0].as_py() == datetime.date(2000, 1, 1)
|
893 |
+
assert arr[1].as_py() is None
|
894 |
+
assert arr[2].as_py() == datetime.date(1970, 1, 1)
|
895 |
+
assert arr[3].as_py() == datetime.date(2040, 2, 26)
|
896 |
+
|
897 |
+
|
898 |
+
@pytest.mark.parametrize('input',
|
899 |
+
[(pa.date32(), [10957, None]),
|
900 |
+
(pa.date64(), [10957 * 86400000, None])])
|
901 |
+
def test_sequence_explicit_types(input):
|
902 |
+
t, ex_values = input
|
903 |
+
data = [datetime.date(2000, 1, 1), None]
|
904 |
+
arr = pa.array(data, type=t)
|
905 |
+
arr2 = pa.array(ex_values, type=t)
|
906 |
+
|
907 |
+
for x in [arr, arr2]:
|
908 |
+
assert len(x) == 2
|
909 |
+
assert x.type == t
|
910 |
+
assert x.null_count == 1
|
911 |
+
assert x[0].as_py() == datetime.date(2000, 1, 1)
|
912 |
+
assert x[1].as_py() is None
|
913 |
+
|
914 |
+
|
915 |
+
def test_date32_overflow():
|
916 |
+
# Overflow
|
917 |
+
data3 = [2**32, None]
|
918 |
+
with pytest.raises((OverflowError, pa.ArrowException)):
|
919 |
+
pa.array(data3, type=pa.date32())
|
920 |
+
|
921 |
+
|
922 |
+
@pytest.mark.parametrize(('time_type', 'unit', 'int_type'), [
|
923 |
+
(pa.time32, 's', 'int32'),
|
924 |
+
(pa.time32, 'ms', 'int32'),
|
925 |
+
(pa.time64, 'us', 'int64'),
|
926 |
+
(pa.time64, 'ns', 'int64'),
|
927 |
+
])
|
928 |
+
def test_sequence_time_with_timezone(time_type, unit, int_type):
|
929 |
+
def expected_integer_value(t):
|
930 |
+
# only use with utc time object because it doesn't adjust with the
|
931 |
+
# offset
|
932 |
+
units = ['s', 'ms', 'us', 'ns']
|
933 |
+
multiplier = 10**(units.index(unit) * 3)
|
934 |
+
if t is None:
|
935 |
+
return None
|
936 |
+
seconds = (
|
937 |
+
t.hour * 3600 +
|
938 |
+
t.minute * 60 +
|
939 |
+
t.second +
|
940 |
+
t.microsecond * 10**-6
|
941 |
+
)
|
942 |
+
return int(seconds * multiplier)
|
943 |
+
|
944 |
+
def expected_time_value(t):
|
945 |
+
# only use with utc time object because it doesn't adjust with the
|
946 |
+
# time objects tzdata
|
947 |
+
if unit == 's':
|
948 |
+
return t.replace(microsecond=0)
|
949 |
+
elif unit == 'ms':
|
950 |
+
return t.replace(microsecond=(t.microsecond // 1000) * 1000)
|
951 |
+
else:
|
952 |
+
return t
|
953 |
+
|
954 |
+
# only timezone naive times are supported in arrow
|
955 |
+
data = [
|
956 |
+
datetime.time(8, 23, 34, 123456),
|
957 |
+
datetime.time(5, 0, 0, 1000),
|
958 |
+
None,
|
959 |
+
datetime.time(1, 11, 56, 432539),
|
960 |
+
datetime.time(23, 10, 0, 437699)
|
961 |
+
]
|
962 |
+
|
963 |
+
ty = time_type(unit)
|
964 |
+
arr = pa.array(data, type=ty)
|
965 |
+
assert len(arr) == 5
|
966 |
+
assert arr.type == ty
|
967 |
+
assert arr.null_count == 1
|
968 |
+
|
969 |
+
# test that the underlying integers are UTC values
|
970 |
+
values = arr.cast(int_type)
|
971 |
+
expected = list(map(expected_integer_value, data))
|
972 |
+
assert values.to_pylist() == expected
|
973 |
+
|
974 |
+
# test that the scalars are datetime.time objects with UTC timezone
|
975 |
+
assert arr[0].as_py() == expected_time_value(data[0])
|
976 |
+
assert arr[1].as_py() == expected_time_value(data[1])
|
977 |
+
assert arr[2].as_py() is None
|
978 |
+
assert arr[3].as_py() == expected_time_value(data[3])
|
979 |
+
assert arr[4].as_py() == expected_time_value(data[4])
|
980 |
+
|
981 |
+
def tz(hours, minutes=0):
|
982 |
+
offset = datetime.timedelta(hours=hours, minutes=minutes)
|
983 |
+
return datetime.timezone(offset)
|
984 |
+
|
985 |
+
|
986 |
+
def test_sequence_timestamp():
|
987 |
+
data = [
|
988 |
+
datetime.datetime(2007, 7, 13, 1, 23, 34, 123456),
|
989 |
+
None,
|
990 |
+
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539),
|
991 |
+
datetime.datetime(2010, 8, 13, 5, 46, 57, 437699)
|
992 |
+
]
|
993 |
+
arr = pa.array(data)
|
994 |
+
assert len(arr) == 4
|
995 |
+
assert arr.type == pa.timestamp('us')
|
996 |
+
assert arr.null_count == 1
|
997 |
+
assert arr[0].as_py() == datetime.datetime(2007, 7, 13, 1,
|
998 |
+
23, 34, 123456)
|
999 |
+
assert arr[1].as_py() is None
|
1000 |
+
assert arr[2].as_py() == datetime.datetime(2006, 1, 13, 12,
|
1001 |
+
34, 56, 432539)
|
1002 |
+
assert arr[3].as_py() == datetime.datetime(2010, 8, 13, 5,
|
1003 |
+
46, 57, 437699)
|
1004 |
+
|
1005 |
+
|
1006 |
+
@pytest.mark.parametrize('timezone', [
|
1007 |
+
None,
|
1008 |
+
'UTC',
|
1009 |
+
'Etc/GMT-1',
|
1010 |
+
'Europe/Budapest',
|
1011 |
+
])
|
1012 |
+
@pytest.mark.parametrize('unit', [
|
1013 |
+
's',
|
1014 |
+
'ms',
|
1015 |
+
'us',
|
1016 |
+
'ns'
|
1017 |
+
])
|
1018 |
+
def test_sequence_timestamp_with_timezone(timezone, unit):
|
1019 |
+
pytz = pytest.importorskip("pytz")
|
1020 |
+
|
1021 |
+
def expected_integer_value(dt):
|
1022 |
+
units = ['s', 'ms', 'us', 'ns']
|
1023 |
+
multiplier = 10**(units.index(unit) * 3)
|
1024 |
+
if dt is None:
|
1025 |
+
return None
|
1026 |
+
else:
|
1027 |
+
# avoid float precision issues
|
1028 |
+
ts = decimal.Decimal(str(dt.timestamp()))
|
1029 |
+
return int(ts * multiplier)
|
1030 |
+
|
1031 |
+
def expected_datetime_value(dt):
|
1032 |
+
if dt is None:
|
1033 |
+
return None
|
1034 |
+
|
1035 |
+
if unit == 's':
|
1036 |
+
dt = dt.replace(microsecond=0)
|
1037 |
+
elif unit == 'ms':
|
1038 |
+
dt = dt.replace(microsecond=(dt.microsecond // 1000) * 1000)
|
1039 |
+
|
1040 |
+
# adjust the timezone
|
1041 |
+
if timezone is None:
|
1042 |
+
# make datetime timezone unaware
|
1043 |
+
return dt.replace(tzinfo=None)
|
1044 |
+
else:
|
1045 |
+
# convert to the expected timezone
|
1046 |
+
return dt.astimezone(pytz.timezone(timezone))
|
1047 |
+
|
1048 |
+
data = [
|
1049 |
+
datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive
|
1050 |
+
pytz.utc.localize(
|
1051 |
+
datetime.datetime(2008, 1, 5, 5, 0, 0, 1000)
|
1052 |
+
),
|
1053 |
+
None,
|
1054 |
+
pytz.timezone('US/Eastern').localize(
|
1055 |
+
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539)
|
1056 |
+
),
|
1057 |
+
pytz.timezone('Europe/Moscow').localize(
|
1058 |
+
datetime.datetime(2010, 8, 13, 5, 0, 0, 437699)
|
1059 |
+
),
|
1060 |
+
]
|
1061 |
+
utcdata = [
|
1062 |
+
pytz.utc.localize(data[0]),
|
1063 |
+
data[1],
|
1064 |
+
None,
|
1065 |
+
data[3].astimezone(pytz.utc),
|
1066 |
+
data[4].astimezone(pytz.utc),
|
1067 |
+
]
|
1068 |
+
|
1069 |
+
ty = pa.timestamp(unit, tz=timezone)
|
1070 |
+
arr = pa.array(data, type=ty)
|
1071 |
+
assert len(arr) == 5
|
1072 |
+
assert arr.type == ty
|
1073 |
+
assert arr.null_count == 1
|
1074 |
+
|
1075 |
+
# test that the underlying integers are UTC values
|
1076 |
+
values = arr.cast('int64')
|
1077 |
+
expected = list(map(expected_integer_value, utcdata))
|
1078 |
+
assert values.to_pylist() == expected
|
1079 |
+
|
1080 |
+
# test that the scalars are datetimes with the correct timezone
|
1081 |
+
for i in range(len(arr)):
|
1082 |
+
assert arr[i].as_py() == expected_datetime_value(utcdata[i])
|
1083 |
+
|
1084 |
+
|
1085 |
+
@pytest.mark.parametrize('timezone', [
|
1086 |
+
None,
|
1087 |
+
'UTC',
|
1088 |
+
'Etc/GMT-1',
|
1089 |
+
'Europe/Budapest',
|
1090 |
+
])
|
1091 |
+
def test_pyarrow_ignore_timezone_environment_variable(monkeypatch, timezone):
|
1092 |
+
# note that any non-empty value will evaluate to true
|
1093 |
+
pytest.importorskip("pytz")
|
1094 |
+
import pytz
|
1095 |
+
|
1096 |
+
monkeypatch.setenv("PYARROW_IGNORE_TIMEZONE", "1")
|
1097 |
+
data = [
|
1098 |
+
datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive
|
1099 |
+
pytz.utc.localize(
|
1100 |
+
datetime.datetime(2008, 1, 5, 5, 0, 0, 1000)
|
1101 |
+
),
|
1102 |
+
pytz.timezone('US/Eastern').localize(
|
1103 |
+
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539)
|
1104 |
+
),
|
1105 |
+
pytz.timezone('Europe/Moscow').localize(
|
1106 |
+
datetime.datetime(2010, 8, 13, 5, 0, 0, 437699)
|
1107 |
+
),
|
1108 |
+
]
|
1109 |
+
|
1110 |
+
expected = [dt.replace(tzinfo=None) for dt in data]
|
1111 |
+
if timezone is not None:
|
1112 |
+
tzinfo = pytz.timezone(timezone)
|
1113 |
+
expected = [tzinfo.fromutc(dt) for dt in expected]
|
1114 |
+
|
1115 |
+
ty = pa.timestamp('us', tz=timezone)
|
1116 |
+
arr = pa.array(data, type=ty)
|
1117 |
+
assert arr.to_pylist() == expected
|
1118 |
+
|
1119 |
+
|
1120 |
+
def test_sequence_timestamp_with_timezone_inference():
|
1121 |
+
pytest.importorskip("pytz")
|
1122 |
+
import pytz
|
1123 |
+
|
1124 |
+
data = [
|
1125 |
+
datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive
|
1126 |
+
pytz.utc.localize(
|
1127 |
+
datetime.datetime(2008, 1, 5, 5, 0, 0, 1000)
|
1128 |
+
),
|
1129 |
+
None,
|
1130 |
+
pytz.timezone('US/Eastern').localize(
|
1131 |
+
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539)
|
1132 |
+
),
|
1133 |
+
pytz.timezone('Europe/Moscow').localize(
|
1134 |
+
datetime.datetime(2010, 8, 13, 5, 0, 0, 437699)
|
1135 |
+
),
|
1136 |
+
]
|
1137 |
+
expected = [
|
1138 |
+
pa.timestamp('us', tz=None),
|
1139 |
+
pa.timestamp('us', tz='UTC'),
|
1140 |
+
pa.timestamp('us', tz=None),
|
1141 |
+
pa.timestamp('us', tz='US/Eastern'),
|
1142 |
+
pa.timestamp('us', tz='Europe/Moscow')
|
1143 |
+
]
|
1144 |
+
for dt, expected_type in zip(data, expected):
|
1145 |
+
prepended = [dt] + data
|
1146 |
+
arr = pa.array(prepended)
|
1147 |
+
assert arr.type == expected_type
|
1148 |
+
|
1149 |
+
|
1150 |
+
def test_sequence_timestamp_with_zoneinfo_timezone_inference():
|
1151 |
+
pytest.importorskip("zoneinfo")
|
1152 |
+
import zoneinfo
|
1153 |
+
|
1154 |
+
data = [
|
1155 |
+
datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive
|
1156 |
+
datetime.datetime(2008, 1, 5, 5, 0, 0, 1000,
|
1157 |
+
tzinfo=datetime.timezone.utc),
|
1158 |
+
None,
|
1159 |
+
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539,
|
1160 |
+
tzinfo=zoneinfo.ZoneInfo(key='US/Eastern')),
|
1161 |
+
datetime.datetime(2010, 8, 13, 5, 0, 0, 437699,
|
1162 |
+
tzinfo=zoneinfo.ZoneInfo(key='Europe/Moscow')),
|
1163 |
+
]
|
1164 |
+
expected = [
|
1165 |
+
pa.timestamp('us', tz=None),
|
1166 |
+
pa.timestamp('us', tz='UTC'),
|
1167 |
+
pa.timestamp('us', tz=None),
|
1168 |
+
pa.timestamp('us', tz='US/Eastern'),
|
1169 |
+
pa.timestamp('us', tz='Europe/Moscow')
|
1170 |
+
]
|
1171 |
+
for dt, expected_type in zip(data, expected):
|
1172 |
+
prepended = [dt] + data
|
1173 |
+
arr = pa.array(prepended)
|
1174 |
+
assert arr.type == expected_type
|
1175 |
+
|
1176 |
+
|
1177 |
+
@pytest.mark.pandas
|
1178 |
+
def test_sequence_timestamp_from_mixed_builtin_and_pandas_datetimes():
|
1179 |
+
pytest.importorskip("pytz")
|
1180 |
+
import pytz
|
1181 |
+
import pandas as pd
|
1182 |
+
|
1183 |
+
data = [
|
1184 |
+
pd.Timestamp(1184307814123456123, tz=pytz.timezone('US/Eastern'),
|
1185 |
+
unit='ns'),
|
1186 |
+
datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive
|
1187 |
+
pytz.utc.localize(
|
1188 |
+
datetime.datetime(2008, 1, 5, 5, 0, 0, 1000)
|
1189 |
+
),
|
1190 |
+
None,
|
1191 |
+
]
|
1192 |
+
utcdata = [
|
1193 |
+
data[0].astimezone(pytz.utc),
|
1194 |
+
pytz.utc.localize(data[1]),
|
1195 |
+
data[2].astimezone(pytz.utc),
|
1196 |
+
None,
|
1197 |
+
]
|
1198 |
+
|
1199 |
+
arr = pa.array(data)
|
1200 |
+
assert arr.type == pa.timestamp('us', tz='US/Eastern')
|
1201 |
+
|
1202 |
+
values = arr.cast('int64')
|
1203 |
+
expected = [int(dt.timestamp() * 10**6) if dt else None for dt in utcdata]
|
1204 |
+
assert values.to_pylist() == expected
|
1205 |
+
|
1206 |
+
|
1207 |
+
def test_sequence_timestamp_out_of_bounds_nanosecond():
|
1208 |
+
# https://issues.apache.org/jira/browse/ARROW-9768
|
1209 |
+
# datetime outside of range supported for nanosecond resolution
|
1210 |
+
data = [datetime.datetime(2262, 4, 12)]
|
1211 |
+
with pytest.raises(ValueError, match="out of bounds"):
|
1212 |
+
pa.array(data, type=pa.timestamp('ns'))
|
1213 |
+
|
1214 |
+
# with microsecond resolution it works fine
|
1215 |
+
arr = pa.array(data, type=pa.timestamp('us'))
|
1216 |
+
assert arr.to_pylist() == data
|
1217 |
+
|
1218 |
+
# case where the naive is within bounds, but converted to UTC not
|
1219 |
+
tz = datetime.timezone(datetime.timedelta(hours=-1))
|
1220 |
+
data = [datetime.datetime(2262, 4, 11, 23, tzinfo=tz)]
|
1221 |
+
with pytest.raises(ValueError, match="out of bounds"):
|
1222 |
+
pa.array(data, type=pa.timestamp('ns'))
|
1223 |
+
|
1224 |
+
arr = pa.array(data, type=pa.timestamp('us'))
|
1225 |
+
assert arr.to_pylist()[0] == datetime.datetime(2262, 4, 12)
|
1226 |
+
|
1227 |
+
|
1228 |
+
def test_sequence_numpy_timestamp():
|
1229 |
+
data = [
|
1230 |
+
np.datetime64(datetime.datetime(2007, 7, 13, 1, 23, 34, 123456)),
|
1231 |
+
None,
|
1232 |
+
np.datetime64(datetime.datetime(2006, 1, 13, 12, 34, 56, 432539)),
|
1233 |
+
np.datetime64(datetime.datetime(2010, 8, 13, 5, 46, 57, 437699))
|
1234 |
+
]
|
1235 |
+
arr = pa.array(data)
|
1236 |
+
assert len(arr) == 4
|
1237 |
+
assert arr.type == pa.timestamp('us')
|
1238 |
+
assert arr.null_count == 1
|
1239 |
+
assert arr[0].as_py() == datetime.datetime(2007, 7, 13, 1,
|
1240 |
+
23, 34, 123456)
|
1241 |
+
assert arr[1].as_py() is None
|
1242 |
+
assert arr[2].as_py() == datetime.datetime(2006, 1, 13, 12,
|
1243 |
+
34, 56, 432539)
|
1244 |
+
assert arr[3].as_py() == datetime.datetime(2010, 8, 13, 5,
|
1245 |
+
46, 57, 437699)
|
1246 |
+
|
1247 |
+
|
1248 |
+
class MyDate(datetime.date):
|
1249 |
+
pass
|
1250 |
+
|
1251 |
+
|
1252 |
+
class MyDatetime(datetime.datetime):
|
1253 |
+
pass
|
1254 |
+
|
1255 |
+
|
1256 |
+
class MyTimedelta(datetime.timedelta):
|
1257 |
+
pass
|
1258 |
+
|
1259 |
+
|
1260 |
+
def test_datetime_subclassing():
|
1261 |
+
data = [
|
1262 |
+
MyDate(2007, 7, 13),
|
1263 |
+
]
|
1264 |
+
date_type = pa.date32()
|
1265 |
+
arr_date = pa.array(data, type=date_type)
|
1266 |
+
assert len(arr_date) == 1
|
1267 |
+
assert arr_date.type == date_type
|
1268 |
+
assert arr_date[0].as_py() == datetime.date(2007, 7, 13)
|
1269 |
+
|
1270 |
+
data = [
|
1271 |
+
MyDatetime(2007, 7, 13, 1, 23, 34, 123456),
|
1272 |
+
]
|
1273 |
+
|
1274 |
+
s = pa.timestamp('s')
|
1275 |
+
ms = pa.timestamp('ms')
|
1276 |
+
us = pa.timestamp('us')
|
1277 |
+
|
1278 |
+
arr_s = pa.array(data, type=s)
|
1279 |
+
assert len(arr_s) == 1
|
1280 |
+
assert arr_s.type == s
|
1281 |
+
assert arr_s[0].as_py() == datetime.datetime(2007, 7, 13, 1,
|
1282 |
+
23, 34, 0)
|
1283 |
+
|
1284 |
+
arr_ms = pa.array(data, type=ms)
|
1285 |
+
assert len(arr_ms) == 1
|
1286 |
+
assert arr_ms.type == ms
|
1287 |
+
assert arr_ms[0].as_py() == datetime.datetime(2007, 7, 13, 1,
|
1288 |
+
23, 34, 123000)
|
1289 |
+
|
1290 |
+
arr_us = pa.array(data, type=us)
|
1291 |
+
assert len(arr_us) == 1
|
1292 |
+
assert arr_us.type == us
|
1293 |
+
assert arr_us[0].as_py() == datetime.datetime(2007, 7, 13, 1,
|
1294 |
+
23, 34, 123456)
|
1295 |
+
|
1296 |
+
data = [
|
1297 |
+
MyTimedelta(123, 456, 1002),
|
1298 |
+
]
|
1299 |
+
|
1300 |
+
s = pa.duration('s')
|
1301 |
+
ms = pa.duration('ms')
|
1302 |
+
us = pa.duration('us')
|
1303 |
+
|
1304 |
+
arr_s = pa.array(data)
|
1305 |
+
assert len(arr_s) == 1
|
1306 |
+
assert arr_s.type == us
|
1307 |
+
assert arr_s[0].as_py() == datetime.timedelta(123, 456, 1002)
|
1308 |
+
|
1309 |
+
arr_s = pa.array(data, type=s)
|
1310 |
+
assert len(arr_s) == 1
|
1311 |
+
assert arr_s.type == s
|
1312 |
+
assert arr_s[0].as_py() == datetime.timedelta(123, 456)
|
1313 |
+
|
1314 |
+
arr_ms = pa.array(data, type=ms)
|
1315 |
+
assert len(arr_ms) == 1
|
1316 |
+
assert arr_ms.type == ms
|
1317 |
+
assert arr_ms[0].as_py() == datetime.timedelta(123, 456, 1000)
|
1318 |
+
|
1319 |
+
arr_us = pa.array(data, type=us)
|
1320 |
+
assert len(arr_us) == 1
|
1321 |
+
assert arr_us.type == us
|
1322 |
+
assert arr_us[0].as_py() == datetime.timedelta(123, 456, 1002)
|
1323 |
+
|
1324 |
+
|
1325 |
+
@pytest.mark.xfail(not _pandas_api.have_pandas,
|
1326 |
+
reason="pandas required for nanosecond conversion")
|
1327 |
+
def test_sequence_timestamp_nanoseconds():
|
1328 |
+
inputs = [
|
1329 |
+
[datetime.datetime(2007, 7, 13, 1, 23, 34, 123456)],
|
1330 |
+
[MyDatetime(2007, 7, 13, 1, 23, 34, 123456)]
|
1331 |
+
]
|
1332 |
+
|
1333 |
+
for data in inputs:
|
1334 |
+
ns = pa.timestamp('ns')
|
1335 |
+
arr_ns = pa.array(data, type=ns)
|
1336 |
+
assert len(arr_ns) == 1
|
1337 |
+
assert arr_ns.type == ns
|
1338 |
+
assert arr_ns[0].as_py() == datetime.datetime(2007, 7, 13, 1,
|
1339 |
+
23, 34, 123456)
|
1340 |
+
|
1341 |
+
|
1342 |
+
@pytest.mark.pandas
|
1343 |
+
@pytest.mark.skipif(sys.platform == "win32" and not util.windows_has_tzdata(),
|
1344 |
+
reason="Timezone database is not installed on Windows")
|
1345 |
+
def test_sequence_timestamp_from_int_with_unit():
|
1346 |
+
# TODO(wesm): This test might be rewritten to assert the actual behavior
|
1347 |
+
# when pandas is not installed
|
1348 |
+
|
1349 |
+
data = [1]
|
1350 |
+
|
1351 |
+
s = pa.timestamp('s')
|
1352 |
+
ms = pa.timestamp('ms')
|
1353 |
+
us = pa.timestamp('us')
|
1354 |
+
ns = pa.timestamp('ns')
|
1355 |
+
|
1356 |
+
arr_s = pa.array(data, type=s)
|
1357 |
+
assert len(arr_s) == 1
|
1358 |
+
assert arr_s.type == s
|
1359 |
+
assert repr(arr_s[0]) == (
|
1360 |
+
"<pyarrow.TimestampScalar: '1970-01-01T00:00:01'>"
|
1361 |
+
)
|
1362 |
+
assert str(arr_s[0]) == "1970-01-01 00:00:01"
|
1363 |
+
|
1364 |
+
arr_ms = pa.array(data, type=ms)
|
1365 |
+
assert len(arr_ms) == 1
|
1366 |
+
assert arr_ms.type == ms
|
1367 |
+
assert repr(arr_ms[0].as_py()) == (
|
1368 |
+
"datetime.datetime(1970, 1, 1, 0, 0, 0, 1000)"
|
1369 |
+
)
|
1370 |
+
assert str(arr_ms[0]) == "1970-01-01 00:00:00.001000"
|
1371 |
+
|
1372 |
+
arr_us = pa.array(data, type=us)
|
1373 |
+
assert len(arr_us) == 1
|
1374 |
+
assert arr_us.type == us
|
1375 |
+
assert repr(arr_us[0].as_py()) == (
|
1376 |
+
"datetime.datetime(1970, 1, 1, 0, 0, 0, 1)"
|
1377 |
+
)
|
1378 |
+
assert str(arr_us[0]) == "1970-01-01 00:00:00.000001"
|
1379 |
+
|
1380 |
+
arr_ns = pa.array(data, type=ns)
|
1381 |
+
assert len(arr_ns) == 1
|
1382 |
+
assert arr_ns.type == ns
|
1383 |
+
assert repr(arr_ns[0].as_py()) == (
|
1384 |
+
"Timestamp('1970-01-01 00:00:00.000000001')"
|
1385 |
+
)
|
1386 |
+
assert str(arr_ns[0]) == "1970-01-01 00:00:00.000000001"
|
1387 |
+
|
1388 |
+
expected_exc = TypeError
|
1389 |
+
|
1390 |
+
class CustomClass():
|
1391 |
+
pass
|
1392 |
+
|
1393 |
+
for ty in [ns, pa.date32(), pa.date64()]:
|
1394 |
+
with pytest.raises(expected_exc):
|
1395 |
+
pa.array([1, CustomClass()], type=ty)
|
1396 |
+
|
1397 |
+
|
1398 |
+
@pytest.mark.parametrize('np_scalar', [True, False])
|
1399 |
+
def test_sequence_duration(np_scalar):
|
1400 |
+
td1 = datetime.timedelta(2, 3601, 1)
|
1401 |
+
td2 = datetime.timedelta(1, 100, 1000)
|
1402 |
+
if np_scalar:
|
1403 |
+
data = [np.timedelta64(td1), None, np.timedelta64(td2)]
|
1404 |
+
else:
|
1405 |
+
data = [td1, None, td2]
|
1406 |
+
|
1407 |
+
arr = pa.array(data)
|
1408 |
+
assert len(arr) == 3
|
1409 |
+
assert arr.type == pa.duration('us')
|
1410 |
+
assert arr.null_count == 1
|
1411 |
+
assert arr[0].as_py() == td1
|
1412 |
+
assert arr[1].as_py() is None
|
1413 |
+
assert arr[2].as_py() == td2
|
1414 |
+
|
1415 |
+
|
1416 |
+
@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns'])
|
1417 |
+
def test_sequence_duration_with_unit(unit):
|
1418 |
+
data = [
|
1419 |
+
datetime.timedelta(3, 22, 1001),
|
1420 |
+
]
|
1421 |
+
expected = {'s': datetime.timedelta(3, 22),
|
1422 |
+
'ms': datetime.timedelta(3, 22, 1000),
|
1423 |
+
'us': datetime.timedelta(3, 22, 1001),
|
1424 |
+
'ns': datetime.timedelta(3, 22, 1001)}
|
1425 |
+
|
1426 |
+
ty = pa.duration(unit)
|
1427 |
+
|
1428 |
+
arr_s = pa.array(data, type=ty)
|
1429 |
+
assert len(arr_s) == 1
|
1430 |
+
assert arr_s.type == ty
|
1431 |
+
assert arr_s[0].as_py() == expected[unit]
|
1432 |
+
|
1433 |
+
|
1434 |
+
@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns'])
|
1435 |
+
def test_sequence_duration_from_int_with_unit(unit):
|
1436 |
+
data = [5]
|
1437 |
+
|
1438 |
+
ty = pa.duration(unit)
|
1439 |
+
arr = pa.array(data, type=ty)
|
1440 |
+
assert len(arr) == 1
|
1441 |
+
assert arr.type == ty
|
1442 |
+
assert arr[0].value == 5
|
1443 |
+
|
1444 |
+
|
1445 |
+
def test_sequence_duration_nested_lists():
|
1446 |
+
td1 = datetime.timedelta(1, 1, 1000)
|
1447 |
+
td2 = datetime.timedelta(1, 100)
|
1448 |
+
|
1449 |
+
data = [[td1, None], [td1, td2]]
|
1450 |
+
|
1451 |
+
arr = pa.array(data)
|
1452 |
+
assert len(arr) == 2
|
1453 |
+
assert arr.type == pa.list_(pa.duration('us'))
|
1454 |
+
assert arr.to_pylist() == data
|
1455 |
+
|
1456 |
+
arr = pa.array(data, type=pa.list_(pa.duration('ms')))
|
1457 |
+
assert len(arr) == 2
|
1458 |
+
assert arr.type == pa.list_(pa.duration('ms'))
|
1459 |
+
assert arr.to_pylist() == data
|
1460 |
+
|
1461 |
+
|
1462 |
+
def test_sequence_duration_nested_lists_numpy():
|
1463 |
+
td1 = datetime.timedelta(1, 1, 1000)
|
1464 |
+
td2 = datetime.timedelta(1, 100)
|
1465 |
+
|
1466 |
+
data = [[np.timedelta64(td1), None],
|
1467 |
+
[np.timedelta64(td1), np.timedelta64(td2)]]
|
1468 |
+
|
1469 |
+
arr = pa.array(data)
|
1470 |
+
assert len(arr) == 2
|
1471 |
+
assert arr.type == pa.list_(pa.duration('us'))
|
1472 |
+
assert arr.to_pylist() == [[td1, None], [td1, td2]]
|
1473 |
+
|
1474 |
+
data = [np.array([np.timedelta64(td1), None], dtype='timedelta64[us]'),
|
1475 |
+
np.array([np.timedelta64(td1), np.timedelta64(td2)])]
|
1476 |
+
|
1477 |
+
arr = pa.array(data)
|
1478 |
+
assert len(arr) == 2
|
1479 |
+
assert arr.type == pa.list_(pa.duration('us'))
|
1480 |
+
assert arr.to_pylist() == [[td1, None], [td1, td2]]
|
1481 |
+
|
1482 |
+
|
1483 |
+
def test_sequence_nesting_levels():
|
1484 |
+
data = [1, 2, None]
|
1485 |
+
arr = pa.array(data)
|
1486 |
+
assert arr.type == pa.int64()
|
1487 |
+
assert arr.to_pylist() == data
|
1488 |
+
|
1489 |
+
data = [[1], [2], None]
|
1490 |
+
arr = pa.array(data)
|
1491 |
+
assert arr.type == pa.list_(pa.int64())
|
1492 |
+
assert arr.to_pylist() == data
|
1493 |
+
|
1494 |
+
data = [[1], [2, 3, 4], [None]]
|
1495 |
+
arr = pa.array(data)
|
1496 |
+
assert arr.type == pa.list_(pa.int64())
|
1497 |
+
assert arr.to_pylist() == data
|
1498 |
+
|
1499 |
+
data = [None, [[None, 1]], [[2, 3, 4], None], [None]]
|
1500 |
+
arr = pa.array(data)
|
1501 |
+
assert arr.type == pa.list_(pa.list_(pa.int64()))
|
1502 |
+
assert arr.to_pylist() == data
|
1503 |
+
|
1504 |
+
exceptions = (pa.ArrowInvalid, pa.ArrowTypeError)
|
1505 |
+
|
1506 |
+
# Mixed nesting levels are rejected
|
1507 |
+
with pytest.raises(exceptions):
|
1508 |
+
pa.array([1, 2, [1]])
|
1509 |
+
|
1510 |
+
with pytest.raises(exceptions):
|
1511 |
+
pa.array([1, 2, []])
|
1512 |
+
|
1513 |
+
with pytest.raises(exceptions):
|
1514 |
+
pa.array([[1], [2], [None, [1]]])
|
1515 |
+
|
1516 |
+
|
1517 |
+
def test_sequence_mixed_types_fails():
|
1518 |
+
data = ['a', 1, 2.0]
|
1519 |
+
with pytest.raises(pa.ArrowTypeError):
|
1520 |
+
pa.array(data)
|
1521 |
+
|
1522 |
+
|
1523 |
+
def test_sequence_mixed_types_with_specified_type_fails():
|
1524 |
+
data = ['-10', '-5', {'a': 1}, '0', '5', '10']
|
1525 |
+
|
1526 |
+
type = pa.string()
|
1527 |
+
with pytest.raises(TypeError):
|
1528 |
+
pa.array(data, type=type)
|
1529 |
+
|
1530 |
+
|
1531 |
+
def test_sequence_decimal():
|
1532 |
+
data = [decimal.Decimal('1234.183'), decimal.Decimal('8094.234')]
|
1533 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1534 |
+
arr = pa.array(data, type=type(precision=7, scale=3))
|
1535 |
+
assert arr.to_pylist() == data
|
1536 |
+
|
1537 |
+
|
1538 |
+
def test_sequence_decimal_different_precisions():
|
1539 |
+
data = [
|
1540 |
+
decimal.Decimal('1234234983.183'), decimal.Decimal('80943244.234')
|
1541 |
+
]
|
1542 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1543 |
+
arr = pa.array(data, type=type(precision=13, scale=3))
|
1544 |
+
assert arr.to_pylist() == data
|
1545 |
+
|
1546 |
+
|
1547 |
+
def test_sequence_decimal_no_scale():
|
1548 |
+
data = [decimal.Decimal('1234234983'), decimal.Decimal('8094324')]
|
1549 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1550 |
+
arr = pa.array(data, type=type(precision=10))
|
1551 |
+
assert arr.to_pylist() == data
|
1552 |
+
|
1553 |
+
|
1554 |
+
def test_sequence_decimal_negative():
|
1555 |
+
data = [decimal.Decimal('-1234.234983'), decimal.Decimal('-8.094324')]
|
1556 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1557 |
+
arr = pa.array(data, type=type(precision=10, scale=6))
|
1558 |
+
assert arr.to_pylist() == data
|
1559 |
+
|
1560 |
+
|
1561 |
+
def test_sequence_decimal_no_whole_part():
|
1562 |
+
data = [decimal.Decimal('-.4234983'), decimal.Decimal('.0103943')]
|
1563 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1564 |
+
arr = pa.array(data, type=type(precision=7, scale=7))
|
1565 |
+
assert arr.to_pylist() == data
|
1566 |
+
|
1567 |
+
|
1568 |
+
def test_sequence_decimal_large_integer():
|
1569 |
+
data = [decimal.Decimal('-394029506937548693.42983'),
|
1570 |
+
decimal.Decimal('32358695912932.01033')]
|
1571 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1572 |
+
arr = pa.array(data, type=type(precision=23, scale=5))
|
1573 |
+
assert arr.to_pylist() == data
|
1574 |
+
|
1575 |
+
|
1576 |
+
def test_sequence_decimal_from_integers():
|
1577 |
+
data = [0, 1, -39402950693754869342983]
|
1578 |
+
expected = [decimal.Decimal(x) for x in data]
|
1579 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1580 |
+
arr = pa.array(data, type=type(precision=28, scale=5))
|
1581 |
+
assert arr.to_pylist() == expected
|
1582 |
+
|
1583 |
+
|
1584 |
+
def test_sequence_decimal_too_high_precision():
|
1585 |
+
# ARROW-6989 python decimal has too high precision
|
1586 |
+
with pytest.raises(ValueError, match="precision out of range"):
|
1587 |
+
pa.array([decimal.Decimal('1' * 80)])
|
1588 |
+
|
1589 |
+
|
1590 |
+
def test_sequence_decimal_infer():
|
1591 |
+
for data, typ in [
|
1592 |
+
# simple case
|
1593 |
+
(decimal.Decimal('1.234'), pa.decimal128(4, 3)),
|
1594 |
+
# trailing zeros
|
1595 |
+
(decimal.Decimal('12300'), pa.decimal128(5, 0)),
|
1596 |
+
(decimal.Decimal('12300.0'), pa.decimal128(6, 1)),
|
1597 |
+
# scientific power notation
|
1598 |
+
(decimal.Decimal('1.23E+4'), pa.decimal128(5, 0)),
|
1599 |
+
(decimal.Decimal('123E+2'), pa.decimal128(5, 0)),
|
1600 |
+
(decimal.Decimal('123E+4'), pa.decimal128(7, 0)),
|
1601 |
+
# leading zeros
|
1602 |
+
(decimal.Decimal('0.0123'), pa.decimal128(4, 4)),
|
1603 |
+
(decimal.Decimal('0.01230'), pa.decimal128(5, 5)),
|
1604 |
+
(decimal.Decimal('1.230E-2'), pa.decimal128(5, 5)),
|
1605 |
+
]:
|
1606 |
+
assert pa.infer_type([data]) == typ
|
1607 |
+
arr = pa.array([data])
|
1608 |
+
assert arr.type == typ
|
1609 |
+
assert arr.to_pylist()[0] == data
|
1610 |
+
|
1611 |
+
|
1612 |
+
def test_sequence_decimal_infer_mixed():
|
1613 |
+
# ARROW-12150 - ensure mixed precision gets correctly inferred to
|
1614 |
+
# common type that can hold all input values
|
1615 |
+
cases = [
|
1616 |
+
([decimal.Decimal('1.234'), decimal.Decimal('3.456')],
|
1617 |
+
pa.decimal128(4, 3)),
|
1618 |
+
([decimal.Decimal('1.234'), decimal.Decimal('456.7')],
|
1619 |
+
pa.decimal128(6, 3)),
|
1620 |
+
([decimal.Decimal('123.4'), decimal.Decimal('4.567')],
|
1621 |
+
pa.decimal128(6, 3)),
|
1622 |
+
([decimal.Decimal('123e2'), decimal.Decimal('4567e3')],
|
1623 |
+
pa.decimal128(7, 0)),
|
1624 |
+
([decimal.Decimal('123e4'), decimal.Decimal('4567e2')],
|
1625 |
+
pa.decimal128(7, 0)),
|
1626 |
+
([decimal.Decimal('0.123'), decimal.Decimal('0.04567')],
|
1627 |
+
pa.decimal128(5, 5)),
|
1628 |
+
([decimal.Decimal('0.001'), decimal.Decimal('1.01E5')],
|
1629 |
+
pa.decimal128(9, 3)),
|
1630 |
+
]
|
1631 |
+
for data, typ in cases:
|
1632 |
+
assert pa.infer_type(data) == typ
|
1633 |
+
arr = pa.array(data)
|
1634 |
+
assert arr.type == typ
|
1635 |
+
assert arr.to_pylist() == data
|
1636 |
+
|
1637 |
+
|
1638 |
+
def test_sequence_decimal_given_type():
|
1639 |
+
for data, typs, wrong_typs in [
|
1640 |
+
# simple case
|
1641 |
+
(
|
1642 |
+
decimal.Decimal('1.234'),
|
1643 |
+
[pa.decimal128(4, 3), pa.decimal128(5, 3), pa.decimal128(5, 4)],
|
1644 |
+
[pa.decimal128(4, 2), pa.decimal128(4, 4)]
|
1645 |
+
),
|
1646 |
+
# trailing zeros
|
1647 |
+
(
|
1648 |
+
decimal.Decimal('12300'),
|
1649 |
+
[pa.decimal128(5, 0), pa.decimal128(6, 0), pa.decimal128(3, -2)],
|
1650 |
+
[pa.decimal128(4, 0), pa.decimal128(3, -3)]
|
1651 |
+
),
|
1652 |
+
# scientific power notation
|
1653 |
+
(
|
1654 |
+
decimal.Decimal('1.23E+4'),
|
1655 |
+
[pa.decimal128(5, 0), pa.decimal128(6, 0), pa.decimal128(3, -2)],
|
1656 |
+
[pa.decimal128(4, 0), pa.decimal128(3, -3)]
|
1657 |
+
),
|
1658 |
+
]:
|
1659 |
+
for typ in typs:
|
1660 |
+
arr = pa.array([data], type=typ)
|
1661 |
+
assert arr.type == typ
|
1662 |
+
assert arr.to_pylist()[0] == data
|
1663 |
+
for typ in wrong_typs:
|
1664 |
+
with pytest.raises(ValueError):
|
1665 |
+
pa.array([data], type=typ)
|
1666 |
+
|
1667 |
+
|
1668 |
+
def test_range_types():
|
1669 |
+
arr1 = pa.array(range(3))
|
1670 |
+
arr2 = pa.array((0, 1, 2))
|
1671 |
+
assert arr1.equals(arr2)
|
1672 |
+
|
1673 |
+
|
1674 |
+
def test_empty_range():
|
1675 |
+
arr = pa.array(range(0))
|
1676 |
+
assert len(arr) == 0
|
1677 |
+
assert arr.null_count == 0
|
1678 |
+
assert arr.type == pa.null()
|
1679 |
+
assert arr.to_pylist() == []
|
1680 |
+
|
1681 |
+
|
1682 |
+
def test_structarray():
|
1683 |
+
arr = pa.StructArray.from_arrays([], names=[])
|
1684 |
+
assert arr.type == pa.struct([])
|
1685 |
+
assert len(arr) == 0
|
1686 |
+
assert arr.to_pylist() == []
|
1687 |
+
|
1688 |
+
ints = pa.array([None, 2, 3], type=pa.int64())
|
1689 |
+
strs = pa.array(['a', None, 'c'], type=pa.string())
|
1690 |
+
bools = pa.array([True, False, None], type=pa.bool_())
|
1691 |
+
arr = pa.StructArray.from_arrays(
|
1692 |
+
[ints, strs, bools],
|
1693 |
+
['ints', 'strs', 'bools'])
|
1694 |
+
|
1695 |
+
expected = [
|
1696 |
+
{'ints': None, 'strs': 'a', 'bools': True},
|
1697 |
+
{'ints': 2, 'strs': None, 'bools': False},
|
1698 |
+
{'ints': 3, 'strs': 'c', 'bools': None},
|
1699 |
+
]
|
1700 |
+
|
1701 |
+
pylist = arr.to_pylist()
|
1702 |
+
assert pylist == expected, (pylist, expected)
|
1703 |
+
|
1704 |
+
# len(names) != len(arrays)
|
1705 |
+
with pytest.raises(ValueError):
|
1706 |
+
pa.StructArray.from_arrays([ints], ['ints', 'strs'])
|
1707 |
+
|
1708 |
+
|
1709 |
+
def test_struct_from_dicts():
|
1710 |
+
ty = pa.struct([pa.field('a', pa.int32()),
|
1711 |
+
pa.field('b', pa.string()),
|
1712 |
+
pa.field('c', pa.bool_())])
|
1713 |
+
arr = pa.array([], type=ty)
|
1714 |
+
assert arr.to_pylist() == []
|
1715 |
+
|
1716 |
+
data = [{'a': 5, 'b': 'foo', 'c': True},
|
1717 |
+
{'a': 6, 'b': 'bar', 'c': False}]
|
1718 |
+
arr = pa.array(data, type=ty)
|
1719 |
+
assert arr.to_pylist() == data
|
1720 |
+
|
1721 |
+
# With omitted values
|
1722 |
+
data = [{'a': 5, 'c': True},
|
1723 |
+
None,
|
1724 |
+
{},
|
1725 |
+
{'a': None, 'b': 'bar'}]
|
1726 |
+
arr = pa.array(data, type=ty)
|
1727 |
+
expected = [{'a': 5, 'b': None, 'c': True},
|
1728 |
+
None,
|
1729 |
+
{'a': None, 'b': None, 'c': None},
|
1730 |
+
{'a': None, 'b': 'bar', 'c': None}]
|
1731 |
+
assert arr.to_pylist() == expected
|
1732 |
+
|
1733 |
+
|
1734 |
+
def test_struct_from_dicts_bytes_keys():
|
1735 |
+
# ARROW-6878
|
1736 |
+
ty = pa.struct([pa.field('a', pa.int32()),
|
1737 |
+
pa.field('b', pa.string()),
|
1738 |
+
pa.field('c', pa.bool_())])
|
1739 |
+
arr = pa.array([], type=ty)
|
1740 |
+
assert arr.to_pylist() == []
|
1741 |
+
|
1742 |
+
data = [{b'a': 5, b'b': 'foo'},
|
1743 |
+
{b'a': 6, b'c': False}]
|
1744 |
+
arr = pa.array(data, type=ty)
|
1745 |
+
assert arr.to_pylist() == [
|
1746 |
+
{'a': 5, 'b': 'foo', 'c': None},
|
1747 |
+
{'a': 6, 'b': None, 'c': False},
|
1748 |
+
]
|
1749 |
+
|
1750 |
+
|
1751 |
+
def test_struct_from_tuples():
|
1752 |
+
ty = pa.struct([pa.field('a', pa.int32()),
|
1753 |
+
pa.field('b', pa.string()),
|
1754 |
+
pa.field('c', pa.bool_())])
|
1755 |
+
|
1756 |
+
data = [(5, 'foo', True),
|
1757 |
+
(6, 'bar', False)]
|
1758 |
+
expected = [{'a': 5, 'b': 'foo', 'c': True},
|
1759 |
+
{'a': 6, 'b': 'bar', 'c': False}]
|
1760 |
+
arr = pa.array(data, type=ty)
|
1761 |
+
|
1762 |
+
data_as_ndarray = np.empty(len(data), dtype=object)
|
1763 |
+
data_as_ndarray[:] = data
|
1764 |
+
arr2 = pa.array(data_as_ndarray, type=ty)
|
1765 |
+
assert arr.to_pylist() == expected
|
1766 |
+
|
1767 |
+
assert arr.equals(arr2)
|
1768 |
+
|
1769 |
+
# With omitted values
|
1770 |
+
data = [(5, 'foo', None),
|
1771 |
+
None,
|
1772 |
+
(6, None, False)]
|
1773 |
+
expected = [{'a': 5, 'b': 'foo', 'c': None},
|
1774 |
+
None,
|
1775 |
+
{'a': 6, 'b': None, 'c': False}]
|
1776 |
+
arr = pa.array(data, type=ty)
|
1777 |
+
assert arr.to_pylist() == expected
|
1778 |
+
|
1779 |
+
# Invalid tuple size
|
1780 |
+
for tup in [(5, 'foo'), (), ('5', 'foo', True, None)]:
|
1781 |
+
with pytest.raises(ValueError, match="(?i)tuple size"):
|
1782 |
+
pa.array([tup], type=ty)
|
1783 |
+
|
1784 |
+
|
1785 |
+
def test_struct_from_list_of_pairs():
|
1786 |
+
ty = pa.struct([
|
1787 |
+
pa.field('a', pa.int32()),
|
1788 |
+
pa.field('b', pa.string()),
|
1789 |
+
pa.field('c', pa.bool_())
|
1790 |
+
])
|
1791 |
+
data = [
|
1792 |
+
[('a', 5), ('b', 'foo'), ('c', True)],
|
1793 |
+
[('a', 6), ('b', 'bar'), ('c', False)],
|
1794 |
+
None
|
1795 |
+
]
|
1796 |
+
arr = pa.array(data, type=ty)
|
1797 |
+
assert arr.to_pylist() == [
|
1798 |
+
{'a': 5, 'b': 'foo', 'c': True},
|
1799 |
+
{'a': 6, 'b': 'bar', 'c': False},
|
1800 |
+
None
|
1801 |
+
]
|
1802 |
+
|
1803 |
+
# test with duplicated field names
|
1804 |
+
ty = pa.struct([
|
1805 |
+
pa.field('a', pa.int32()),
|
1806 |
+
pa.field('a', pa.string()),
|
1807 |
+
pa.field('b', pa.bool_())
|
1808 |
+
])
|
1809 |
+
data = [
|
1810 |
+
[('a', 5), ('a', 'foo'), ('b', True)],
|
1811 |
+
[('a', 6), ('a', 'bar'), ('b', False)],
|
1812 |
+
]
|
1813 |
+
arr = pa.array(data, type=ty)
|
1814 |
+
with pytest.raises(ValueError):
|
1815 |
+
# TODO(kszucs): ARROW-9997
|
1816 |
+
arr.to_pylist()
|
1817 |
+
|
1818 |
+
# test with empty elements
|
1819 |
+
ty = pa.struct([
|
1820 |
+
pa.field('a', pa.int32()),
|
1821 |
+
pa.field('b', pa.string()),
|
1822 |
+
pa.field('c', pa.bool_())
|
1823 |
+
])
|
1824 |
+
data = [
|
1825 |
+
[],
|
1826 |
+
[('a', 5), ('b', 'foo'), ('c', True)],
|
1827 |
+
[('a', 2), ('b', 'baz')],
|
1828 |
+
[('a', 1), ('b', 'bar'), ('c', False), ('d', 'julia')],
|
1829 |
+
]
|
1830 |
+
expected = [
|
1831 |
+
{'a': None, 'b': None, 'c': None},
|
1832 |
+
{'a': 5, 'b': 'foo', 'c': True},
|
1833 |
+
{'a': 2, 'b': 'baz', 'c': None},
|
1834 |
+
{'a': 1, 'b': 'bar', 'c': False},
|
1835 |
+
]
|
1836 |
+
arr = pa.array(data, type=ty)
|
1837 |
+
assert arr.to_pylist() == expected
|
1838 |
+
|
1839 |
+
|
1840 |
+
def test_struct_from_list_of_pairs_errors():
|
1841 |
+
ty = pa.struct([
|
1842 |
+
pa.field('a', pa.int32()),
|
1843 |
+
pa.field('b', pa.string()),
|
1844 |
+
pa.field('c', pa.bool_())
|
1845 |
+
])
|
1846 |
+
|
1847 |
+
# test that it raises if the key doesn't match the expected field name
|
1848 |
+
data = [
|
1849 |
+
[],
|
1850 |
+
[('a', 5), ('c', True), ('b', None)],
|
1851 |
+
]
|
1852 |
+
msg = "The expected field name is `b` but `c` was given"
|
1853 |
+
with pytest.raises(ValueError, match=msg):
|
1854 |
+
pa.array(data, type=ty)
|
1855 |
+
|
1856 |
+
# test various errors both at the first position and after because of key
|
1857 |
+
# type inference
|
1858 |
+
template = (
|
1859 |
+
r"Could not convert {} with type {}: was expecting tuple of "
|
1860 |
+
r"(key, value) pair"
|
1861 |
+
)
|
1862 |
+
cases = [
|
1863 |
+
tuple(), # empty key-value pair
|
1864 |
+
tuple('a',), # missing value
|
1865 |
+
tuple('unknown-key',), # not known field name
|
1866 |
+
'string', # not a tuple
|
1867 |
+
]
|
1868 |
+
for key_value_pair in cases:
|
1869 |
+
msg = re.escape(template.format(
|
1870 |
+
repr(key_value_pair), type(key_value_pair).__name__
|
1871 |
+
))
|
1872 |
+
|
1873 |
+
with pytest.raises(TypeError, match=msg):
|
1874 |
+
pa.array([
|
1875 |
+
[key_value_pair],
|
1876 |
+
[('a', 5), ('b', 'foo'), ('c', None)],
|
1877 |
+
], type=ty)
|
1878 |
+
|
1879 |
+
with pytest.raises(TypeError, match=msg):
|
1880 |
+
pa.array([
|
1881 |
+
[('a', 5), ('b', 'foo'), ('c', None)],
|
1882 |
+
[key_value_pair],
|
1883 |
+
], type=ty)
|
1884 |
+
|
1885 |
+
|
1886 |
+
def test_struct_from_mixed_sequence():
|
1887 |
+
# It is forbidden to mix dicts and tuples when initializing a struct array
|
1888 |
+
ty = pa.struct([pa.field('a', pa.int32()),
|
1889 |
+
pa.field('b', pa.string()),
|
1890 |
+
pa.field('c', pa.bool_())])
|
1891 |
+
data = [(5, 'foo', True),
|
1892 |
+
{'a': 6, 'b': 'bar', 'c': False}]
|
1893 |
+
with pytest.raises(TypeError):
|
1894 |
+
pa.array(data, type=ty)
|
1895 |
+
|
1896 |
+
|
1897 |
+
def test_struct_from_dicts_inference():
|
1898 |
+
expected_type = pa.struct([pa.field('a', pa.int64()),
|
1899 |
+
pa.field('b', pa.string()),
|
1900 |
+
pa.field('c', pa.bool_())])
|
1901 |
+
data = [{'a': 5, 'b': 'foo', 'c': True},
|
1902 |
+
{'a': 6, 'b': 'bar', 'c': False}]
|
1903 |
+
|
1904 |
+
arr = pa.array(data)
|
1905 |
+
check_struct_type(arr.type, expected_type)
|
1906 |
+
assert arr.to_pylist() == data
|
1907 |
+
|
1908 |
+
# With omitted values
|
1909 |
+
data = [{'a': 5, 'c': True},
|
1910 |
+
None,
|
1911 |
+
{},
|
1912 |
+
{'a': None, 'b': 'bar'}]
|
1913 |
+
expected = [{'a': 5, 'b': None, 'c': True},
|
1914 |
+
None,
|
1915 |
+
{'a': None, 'b': None, 'c': None},
|
1916 |
+
{'a': None, 'b': 'bar', 'c': None}]
|
1917 |
+
|
1918 |
+
arr = pa.array(data)
|
1919 |
+
data_as_ndarray = np.empty(len(data), dtype=object)
|
1920 |
+
data_as_ndarray[:] = data
|
1921 |
+
arr2 = pa.array(data)
|
1922 |
+
|
1923 |
+
check_struct_type(arr.type, expected_type)
|
1924 |
+
assert arr.to_pylist() == expected
|
1925 |
+
assert arr.equals(arr2)
|
1926 |
+
|
1927 |
+
# Nested
|
1928 |
+
expected_type = pa.struct([
|
1929 |
+
pa.field('a', pa.struct([pa.field('aa', pa.list_(pa.int64())),
|
1930 |
+
pa.field('ab', pa.bool_())])),
|
1931 |
+
pa.field('b', pa.string())])
|
1932 |
+
data = [{'a': {'aa': [5, 6], 'ab': True}, 'b': 'foo'},
|
1933 |
+
{'a': {'aa': None, 'ab': False}, 'b': None},
|
1934 |
+
{'a': None, 'b': 'bar'}]
|
1935 |
+
arr = pa.array(data)
|
1936 |
+
|
1937 |
+
assert arr.to_pylist() == data
|
1938 |
+
|
1939 |
+
# Edge cases
|
1940 |
+
arr = pa.array([{}])
|
1941 |
+
assert arr.type == pa.struct([])
|
1942 |
+
assert arr.to_pylist() == [{}]
|
1943 |
+
|
1944 |
+
# Mixing structs and scalars is rejected
|
1945 |
+
with pytest.raises((pa.ArrowInvalid, pa.ArrowTypeError)):
|
1946 |
+
pa.array([1, {'a': 2}])
|
1947 |
+
|
1948 |
+
|
1949 |
+
def test_structarray_from_arrays_coerce():
|
1950 |
+
# ARROW-1706
|
1951 |
+
ints = [None, 2, 3]
|
1952 |
+
strs = ['a', None, 'c']
|
1953 |
+
bools = [True, False, None]
|
1954 |
+
ints_nonnull = [1, 2, 3]
|
1955 |
+
|
1956 |
+
arrays = [ints, strs, bools, ints_nonnull]
|
1957 |
+
result = pa.StructArray.from_arrays(arrays,
|
1958 |
+
['ints', 'strs', 'bools',
|
1959 |
+
'int_nonnull'])
|
1960 |
+
expected = pa.StructArray.from_arrays(
|
1961 |
+
[pa.array(ints, type='int64'),
|
1962 |
+
pa.array(strs, type='utf8'),
|
1963 |
+
pa.array(bools),
|
1964 |
+
pa.array(ints_nonnull, type='int64')],
|
1965 |
+
['ints', 'strs', 'bools', 'int_nonnull'])
|
1966 |
+
|
1967 |
+
with pytest.raises(ValueError):
|
1968 |
+
pa.StructArray.from_arrays(arrays)
|
1969 |
+
|
1970 |
+
assert result.equals(expected)
|
1971 |
+
|
1972 |
+
|
1973 |
+
def test_decimal_array_with_none_and_nan():
|
1974 |
+
values = [decimal.Decimal('1.234'), None, np.nan, decimal.Decimal('nan')]
|
1975 |
+
|
1976 |
+
with pytest.raises(TypeError):
|
1977 |
+
# ARROW-6227: Without from_pandas=True, NaN is considered a float
|
1978 |
+
array = pa.array(values)
|
1979 |
+
|
1980 |
+
array = pa.array(values, from_pandas=True)
|
1981 |
+
assert array.type == pa.decimal128(4, 3)
|
1982 |
+
assert array.to_pylist() == values[:2] + [None, None]
|
1983 |
+
|
1984 |
+
array = pa.array(values, type=pa.decimal128(10, 4), from_pandas=True)
|
1985 |
+
assert array.to_pylist() == [decimal.Decimal('1.2340'), None, None, None]
|
1986 |
+
|
1987 |
+
|
1988 |
+
def test_map_from_dicts():
|
1989 |
+
data = [[{'key': b'a', 'value': 1}, {'key': b'b', 'value': 2}],
|
1990 |
+
[{'key': b'c', 'value': 3}],
|
1991 |
+
[{'key': b'd', 'value': 4}, {'key': b'e', 'value': 5},
|
1992 |
+
{'key': b'f', 'value': None}],
|
1993 |
+
[{'key': b'g', 'value': 7}]]
|
1994 |
+
expected = [[(d['key'], d['value']) for d in entry] for entry in data]
|
1995 |
+
|
1996 |
+
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
|
1997 |
+
|
1998 |
+
assert arr.to_pylist() == expected
|
1999 |
+
|
2000 |
+
# With omitted values
|
2001 |
+
data[1] = None
|
2002 |
+
expected[1] = None
|
2003 |
+
|
2004 |
+
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
|
2005 |
+
|
2006 |
+
assert arr.to_pylist() == expected
|
2007 |
+
|
2008 |
+
# Invalid dictionary
|
2009 |
+
for entry in [[{'value': 5}], [{}], [{'k': 1, 'v': 2}]]:
|
2010 |
+
with pytest.raises(ValueError, match="Invalid Map"):
|
2011 |
+
pa.array([entry], type=pa.map_('i4', 'i4'))
|
2012 |
+
|
2013 |
+
# Invalid dictionary types
|
2014 |
+
for entry in [[{'key': '1', 'value': 5}], [{'key': {'value': 2}}]]:
|
2015 |
+
with pytest.raises(pa.ArrowInvalid, match="tried to convert to int"):
|
2016 |
+
pa.array([entry], type=pa.map_('i4', 'i4'))
|
2017 |
+
|
2018 |
+
|
2019 |
+
def test_map_from_tuples():
|
2020 |
+
expected = [[(b'a', 1), (b'b', 2)],
|
2021 |
+
[(b'c', 3)],
|
2022 |
+
[(b'd', 4), (b'e', 5), (b'f', None)],
|
2023 |
+
[(b'g', 7)]]
|
2024 |
+
|
2025 |
+
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
|
2026 |
+
|
2027 |
+
assert arr.to_pylist() == expected
|
2028 |
+
|
2029 |
+
# With omitted values
|
2030 |
+
expected[1] = None
|
2031 |
+
|
2032 |
+
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
|
2033 |
+
|
2034 |
+
assert arr.to_pylist() == expected
|
2035 |
+
|
2036 |
+
# Invalid tuple size
|
2037 |
+
for entry in [[(5,)], [()], [('5', 'foo', True)]]:
|
2038 |
+
with pytest.raises(ValueError, match="(?i)tuple size"):
|
2039 |
+
pa.array([entry], type=pa.map_('i4', 'i4'))
|
2040 |
+
|
2041 |
+
|
2042 |
+
def test_dictionary_from_boolean():
|
2043 |
+
typ = pa.dictionary(pa.int8(), value_type=pa.bool_())
|
2044 |
+
a = pa.array([False, False, True, False, True], type=typ)
|
2045 |
+
assert isinstance(a.type, pa.DictionaryType)
|
2046 |
+
assert a.type.equals(typ)
|
2047 |
+
|
2048 |
+
expected_indices = pa.array([0, 0, 1, 0, 1], type=pa.int8())
|
2049 |
+
expected_dictionary = pa.array([False, True], type=pa.bool_())
|
2050 |
+
assert a.indices.equals(expected_indices)
|
2051 |
+
assert a.dictionary.equals(expected_dictionary)
|
2052 |
+
|
2053 |
+
|
2054 |
+
@pytest.mark.parametrize('value_type', [
|
2055 |
+
pa.int8(),
|
2056 |
+
pa.int16(),
|
2057 |
+
pa.int32(),
|
2058 |
+
pa.int64(),
|
2059 |
+
pa.uint8(),
|
2060 |
+
pa.uint16(),
|
2061 |
+
pa.uint32(),
|
2062 |
+
pa.uint64(),
|
2063 |
+
pa.float32(),
|
2064 |
+
pa.float64(),
|
2065 |
+
])
|
2066 |
+
def test_dictionary_from_integers(value_type):
|
2067 |
+
typ = pa.dictionary(pa.int8(), value_type=value_type)
|
2068 |
+
a = pa.array([1, 2, 1, 1, 2, 3], type=typ)
|
2069 |
+
assert isinstance(a.type, pa.DictionaryType)
|
2070 |
+
assert a.type.equals(typ)
|
2071 |
+
|
2072 |
+
expected_indices = pa.array([0, 1, 0, 0, 1, 2], type=pa.int8())
|
2073 |
+
expected_dictionary = pa.array([1, 2, 3], type=value_type)
|
2074 |
+
assert a.indices.equals(expected_indices)
|
2075 |
+
assert a.dictionary.equals(expected_dictionary)
|
2076 |
+
|
2077 |
+
|
2078 |
+
@pytest.mark.parametrize('input_index_type', [
|
2079 |
+
pa.int8(),
|
2080 |
+
pa.int16(),
|
2081 |
+
pa.int32(),
|
2082 |
+
pa.int64()
|
2083 |
+
])
|
2084 |
+
def test_dictionary_index_type(input_index_type):
|
2085 |
+
# dictionary array is constructed using adaptive index type builder,
|
2086 |
+
# but the input index type is considered as the minimal width type to use
|
2087 |
+
|
2088 |
+
typ = pa.dictionary(input_index_type, value_type=pa.int64())
|
2089 |
+
arr = pa.array(range(10), type=typ)
|
2090 |
+
assert arr.type.equals(typ)
|
2091 |
+
|
2092 |
+
|
2093 |
+
def test_dictionary_is_always_adaptive():
|
2094 |
+
# dictionary array is constructed using adaptive index type builder,
|
2095 |
+
# meaning that the output index type may be wider than the given index type
|
2096 |
+
# since it depends on the input data
|
2097 |
+
typ = pa.dictionary(pa.int8(), value_type=pa.int64())
|
2098 |
+
|
2099 |
+
a = pa.array(range(2**7), type=typ)
|
2100 |
+
expected = pa.dictionary(pa.int8(), pa.int64())
|
2101 |
+
assert a.type.equals(expected)
|
2102 |
+
|
2103 |
+
a = pa.array(range(2**7 + 1), type=typ)
|
2104 |
+
expected = pa.dictionary(pa.int16(), pa.int64())
|
2105 |
+
assert a.type.equals(expected)
|
2106 |
+
|
2107 |
+
|
2108 |
+
def test_dictionary_from_strings():
|
2109 |
+
for value_type in [pa.binary(), pa.string()]:
|
2110 |
+
typ = pa.dictionary(pa.int8(), value_type)
|
2111 |
+
a = pa.array(["", "a", "bb", "a", "bb", "ccc"], type=typ)
|
2112 |
+
|
2113 |
+
assert isinstance(a.type, pa.DictionaryType)
|
2114 |
+
|
2115 |
+
expected_indices = pa.array([0, 1, 2, 1, 2, 3], type=pa.int8())
|
2116 |
+
expected_dictionary = pa.array(["", "a", "bb", "ccc"], type=value_type)
|
2117 |
+
assert a.indices.equals(expected_indices)
|
2118 |
+
assert a.dictionary.equals(expected_dictionary)
|
2119 |
+
|
2120 |
+
# fixed size binary type
|
2121 |
+
typ = pa.dictionary(pa.int8(), pa.binary(3))
|
2122 |
+
a = pa.array(["aaa", "aaa", "bbb", "ccc", "bbb"], type=typ)
|
2123 |
+
assert isinstance(a.type, pa.DictionaryType)
|
2124 |
+
|
2125 |
+
expected_indices = pa.array([0, 0, 1, 2, 1], type=pa.int8())
|
2126 |
+
expected_dictionary = pa.array(["aaa", "bbb", "ccc"], type=pa.binary(3))
|
2127 |
+
assert a.indices.equals(expected_indices)
|
2128 |
+
assert a.dictionary.equals(expected_dictionary)
|
2129 |
+
|
2130 |
+
|
2131 |
+
@pytest.mark.parametrize(('unit', 'expected'), [
|
2132 |
+
('s', datetime.timedelta(seconds=-2147483000)),
|
2133 |
+
('ms', datetime.timedelta(milliseconds=-2147483000)),
|
2134 |
+
('us', datetime.timedelta(microseconds=-2147483000)),
|
2135 |
+
('ns', datetime.timedelta(microseconds=-2147483))
|
2136 |
+
])
|
2137 |
+
def test_duration_array_roundtrip_corner_cases(unit, expected):
|
2138 |
+
# Corner case discovered by hypothesis: there were implicit conversions to
|
2139 |
+
# unsigned values resulting wrong values with wrong signs.
|
2140 |
+
ty = pa.duration(unit)
|
2141 |
+
arr = pa.array([-2147483000], type=ty)
|
2142 |
+
restored = pa.array(arr.to_pylist(), type=ty)
|
2143 |
+
assert arr.equals(restored)
|
2144 |
+
|
2145 |
+
expected_list = [expected]
|
2146 |
+
if unit == 'ns':
|
2147 |
+
# if pandas is available then a pandas Timedelta is returned
|
2148 |
+
try:
|
2149 |
+
import pandas as pd
|
2150 |
+
except ImportError:
|
2151 |
+
pass
|
2152 |
+
else:
|
2153 |
+
expected_list = [pd.Timedelta(-2147483000, unit='ns')]
|
2154 |
+
|
2155 |
+
assert restored.to_pylist() == expected_list
|
2156 |
+
|
2157 |
+
|
2158 |
+
@pytest.mark.pandas
|
2159 |
+
def test_roundtrip_nanosecond_resolution_pandas_temporal_objects():
|
2160 |
+
# corner case discovered by hypothesis: preserving the nanoseconds on
|
2161 |
+
# conversion from a list of Timedelta and Timestamp objects
|
2162 |
+
import pandas as pd
|
2163 |
+
|
2164 |
+
ty = pa.duration('ns')
|
2165 |
+
arr = pa.array([9223371273709551616], type=ty)
|
2166 |
+
data = arr.to_pylist()
|
2167 |
+
assert isinstance(data[0], pd.Timedelta)
|
2168 |
+
restored = pa.array(data, type=ty)
|
2169 |
+
assert arr.equals(restored)
|
2170 |
+
assert restored.to_pylist() == [
|
2171 |
+
pd.Timedelta(9223371273709551616, unit='ns')
|
2172 |
+
]
|
2173 |
+
|
2174 |
+
ty = pa.timestamp('ns')
|
2175 |
+
arr = pa.array([9223371273709551616], type=ty)
|
2176 |
+
data = arr.to_pylist()
|
2177 |
+
assert isinstance(data[0], pd.Timestamp)
|
2178 |
+
restored = pa.array(data, type=ty)
|
2179 |
+
assert arr.equals(restored)
|
2180 |
+
assert restored.to_pylist() == [
|
2181 |
+
pd.Timestamp(9223371273709551616, unit='ns')
|
2182 |
+
]
|
2183 |
+
|
2184 |
+
ty = pa.timestamp('ns', tz='US/Eastern')
|
2185 |
+
value = 1604119893000000000
|
2186 |
+
arr = pa.array([value], type=ty)
|
2187 |
+
data = arr.to_pylist()
|
2188 |
+
assert isinstance(data[0], pd.Timestamp)
|
2189 |
+
restored = pa.array(data, type=ty)
|
2190 |
+
assert arr.equals(restored)
|
2191 |
+
assert restored.to_pylist() == [
|
2192 |
+
pd.Timestamp(value, unit='ns').tz_localize(
|
2193 |
+
"UTC").tz_convert('US/Eastern')
|
2194 |
+
]
|
2195 |
+
|
2196 |
+
|
2197 |
+
@h.given(past.all_arrays)
|
2198 |
+
def test_array_to_pylist_roundtrip(arr):
|
2199 |
+
seq = arr.to_pylist()
|
2200 |
+
restored = pa.array(seq, type=arr.type)
|
2201 |
+
assert restored.equals(arr)
|
2202 |
+
|
2203 |
+
|
2204 |
+
@pytest.mark.large_memory
|
2205 |
+
def test_auto_chunking_binary_like():
|
2206 |
+
# single chunk
|
2207 |
+
v1 = b'x' * 100000000
|
2208 |
+
v2 = b'x' * 147483646
|
2209 |
+
|
2210 |
+
# single chunk
|
2211 |
+
one_chunk_data = [v1] * 20 + [b'', None, v2]
|
2212 |
+
arr = pa.array(one_chunk_data, type=pa.binary())
|
2213 |
+
assert isinstance(arr, pa.Array)
|
2214 |
+
assert len(arr) == 23
|
2215 |
+
assert arr[20].as_py() == b''
|
2216 |
+
assert arr[21].as_py() is None
|
2217 |
+
assert arr[22].as_py() == v2
|
2218 |
+
|
2219 |
+
# two chunks
|
2220 |
+
two_chunk_data = one_chunk_data + [b'two']
|
2221 |
+
arr = pa.array(two_chunk_data, type=pa.binary())
|
2222 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2223 |
+
assert arr.num_chunks == 2
|
2224 |
+
assert len(arr.chunk(0)) == 23
|
2225 |
+
assert len(arr.chunk(1)) == 1
|
2226 |
+
assert arr.chunk(0)[20].as_py() == b''
|
2227 |
+
assert arr.chunk(0)[21].as_py() is None
|
2228 |
+
assert arr.chunk(0)[22].as_py() == v2
|
2229 |
+
assert arr.chunk(1).to_pylist() == [b'two']
|
2230 |
+
|
2231 |
+
# three chunks
|
2232 |
+
three_chunk_data = one_chunk_data * 2 + [b'three', b'three']
|
2233 |
+
arr = pa.array(three_chunk_data, type=pa.binary())
|
2234 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2235 |
+
assert arr.num_chunks == 3
|
2236 |
+
assert len(arr.chunk(0)) == 23
|
2237 |
+
assert len(arr.chunk(1)) == 23
|
2238 |
+
assert len(arr.chunk(2)) == 2
|
2239 |
+
for i in range(2):
|
2240 |
+
assert arr.chunk(i)[20].as_py() == b''
|
2241 |
+
assert arr.chunk(i)[21].as_py() is None
|
2242 |
+
assert arr.chunk(i)[22].as_py() == v2
|
2243 |
+
assert arr.chunk(2).to_pylist() == [b'three', b'three']
|
2244 |
+
|
2245 |
+
|
2246 |
+
@pytest.mark.large_memory
|
2247 |
+
def test_auto_chunking_list_of_binary():
|
2248 |
+
# ARROW-6281
|
2249 |
+
vals = [['x' * 1024]] * ((2 << 20) + 1)
|
2250 |
+
arr = pa.array(vals)
|
2251 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2252 |
+
assert arr.num_chunks == 2
|
2253 |
+
assert len(arr.chunk(0)) == 2**21 - 1
|
2254 |
+
assert len(arr.chunk(1)) == 2
|
2255 |
+
assert arr.chunk(1).to_pylist() == [['x' * 1024]] * 2
|
2256 |
+
|
2257 |
+
|
2258 |
+
@pytest.mark.large_memory
|
2259 |
+
def test_auto_chunking_list_like():
|
2260 |
+
item = np.ones((2**28,), dtype='uint8')
|
2261 |
+
data = [item] * (2**3 - 1)
|
2262 |
+
arr = pa.array(data, type=pa.list_(pa.uint8()))
|
2263 |
+
assert isinstance(arr, pa.Array)
|
2264 |
+
assert len(arr) == 7
|
2265 |
+
|
2266 |
+
item = np.ones((2**28,), dtype='uint8')
|
2267 |
+
data = [item] * 2**3
|
2268 |
+
arr = pa.array(data, type=pa.list_(pa.uint8()))
|
2269 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2270 |
+
assert arr.num_chunks == 2
|
2271 |
+
assert len(arr.chunk(0)) == 7
|
2272 |
+
assert len(arr.chunk(1)) == 1
|
2273 |
+
chunk = arr.chunk(1)
|
2274 |
+
scalar = chunk[0]
|
2275 |
+
assert isinstance(scalar, pa.ListScalar)
|
2276 |
+
expected = pa.array(item, type=pa.uint8())
|
2277 |
+
assert scalar.values == expected
|
2278 |
+
|
2279 |
+
|
2280 |
+
@pytest.mark.slow
|
2281 |
+
@pytest.mark.large_memory
|
2282 |
+
def test_auto_chunking_map_type():
|
2283 |
+
# takes ~20 minutes locally
|
2284 |
+
ty = pa.map_(pa.int8(), pa.int8())
|
2285 |
+
item = [(1, 1)] * 2**28
|
2286 |
+
data = [item] * 2**3
|
2287 |
+
arr = pa.array(data, type=ty)
|
2288 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2289 |
+
assert len(arr.chunk(0)) == 7
|
2290 |
+
assert len(arr.chunk(1)) == 1
|
2291 |
+
|
2292 |
+
|
2293 |
+
@pytest.mark.large_memory
|
2294 |
+
@pytest.mark.parametrize(('ty', 'char'), [
|
2295 |
+
(pa.string(), 'x'),
|
2296 |
+
(pa.binary(), b'x'),
|
2297 |
+
])
|
2298 |
+
def test_nested_auto_chunking(ty, char):
|
2299 |
+
v1 = char * 100000000
|
2300 |
+
v2 = char * 147483646
|
2301 |
+
|
2302 |
+
struct_type = pa.struct([
|
2303 |
+
pa.field('bool', pa.bool_()),
|
2304 |
+
pa.field('integer', pa.int64()),
|
2305 |
+
pa.field('string-like', ty),
|
2306 |
+
])
|
2307 |
+
|
2308 |
+
data = [{'bool': True, 'integer': 1, 'string-like': v1}] * 20
|
2309 |
+
data.append({'bool': True, 'integer': 1, 'string-like': v2})
|
2310 |
+
arr = pa.array(data, type=struct_type)
|
2311 |
+
assert isinstance(arr, pa.Array)
|
2312 |
+
|
2313 |
+
data.append({'bool': True, 'integer': 1, 'string-like': char})
|
2314 |
+
arr = pa.array(data, type=struct_type)
|
2315 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2316 |
+
assert arr.num_chunks == 2
|
2317 |
+
assert len(arr.chunk(0)) == 21
|
2318 |
+
assert len(arr.chunk(1)) == 1
|
2319 |
+
assert arr.chunk(1)[0].as_py() == {
|
2320 |
+
'bool': True,
|
2321 |
+
'integer': 1,
|
2322 |
+
'string-like': char
|
2323 |
+
}
|
2324 |
+
|
2325 |
+
|
2326 |
+
@pytest.mark.large_memory
|
2327 |
+
def test_array_from_pylist_data_overflow():
|
2328 |
+
# Regression test for ARROW-12983
|
2329 |
+
# Data buffer overflow - should result in chunked array
|
2330 |
+
items = [b'a' * 4096] * (2 ** 19)
|
2331 |
+
arr = pa.array(items, type=pa.string())
|
2332 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2333 |
+
assert len(arr) == 2**19
|
2334 |
+
assert len(arr.chunks) > 1
|
2335 |
+
|
2336 |
+
mask = np.zeros(2**19, bool)
|
2337 |
+
arr = pa.array(items, mask=mask, type=pa.string())
|
2338 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2339 |
+
assert len(arr) == 2**19
|
2340 |
+
assert len(arr.chunks) > 1
|
2341 |
+
|
2342 |
+
arr = pa.array(items, type=pa.binary())
|
2343 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2344 |
+
assert len(arr) == 2**19
|
2345 |
+
assert len(arr.chunks) > 1
|
2346 |
+
|
2347 |
+
|
2348 |
+
@pytest.mark.slow
|
2349 |
+
@pytest.mark.large_memory
|
2350 |
+
def test_array_from_pylist_offset_overflow():
|
2351 |
+
# Regression test for ARROW-12983
|
2352 |
+
# Offset buffer overflow - should result in chunked array
|
2353 |
+
# Note this doesn't apply to primitive arrays
|
2354 |
+
items = [b'a'] * (2 ** 31)
|
2355 |
+
arr = pa.array(items, type=pa.string())
|
2356 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2357 |
+
assert len(arr) == 2**31
|
2358 |
+
assert len(arr.chunks) > 1
|
2359 |
+
|
2360 |
+
mask = np.zeros(2**31, bool)
|
2361 |
+
arr = pa.array(items, mask=mask, type=pa.string())
|
2362 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2363 |
+
assert len(arr) == 2**31
|
2364 |
+
assert len(arr.chunks) > 1
|
2365 |
+
|
2366 |
+
arr = pa.array(items, type=pa.binary())
|
2367 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2368 |
+
assert len(arr) == 2**31
|
2369 |
+
assert len(arr.chunks) > 1
|
2370 |
+
|
2371 |
+
|
2372 |
+
@parametrize_with_collections_types
|
2373 |
+
@pytest.mark.parametrize(('data', 'scalar_data', 'value_type'), [
|
2374 |
+
([True, False, None], [pa.scalar(True), pa.scalar(False), None], pa.bool_()),
|
2375 |
+
(
|
2376 |
+
[1, 2, None],
|
2377 |
+
[pa.scalar(1), pa.scalar(2), pa.scalar(None, pa.int64())],
|
2378 |
+
pa.int64()
|
2379 |
+
),
|
2380 |
+
([1, None, None], [pa.scalar(1), None, pa.scalar(None, pa.int64())], pa.int64()),
|
2381 |
+
([None, None], [pa.scalar(None), pa.scalar(None)], pa.null()),
|
2382 |
+
([1., 2., None], [pa.scalar(1.), pa.scalar(2.), None], pa.float64()),
|
2383 |
+
(
|
2384 |
+
[None, datetime.date.today()],
|
2385 |
+
[None, pa.scalar(datetime.date.today())],
|
2386 |
+
pa.date32()
|
2387 |
+
),
|
2388 |
+
(
|
2389 |
+
[None, datetime.date.today()],
|
2390 |
+
[None, pa.scalar(datetime.date.today(), pa.date64())],
|
2391 |
+
pa.date64()
|
2392 |
+
),
|
2393 |
+
(
|
2394 |
+
[datetime.time(1, 1, 1), None],
|
2395 |
+
[pa.scalar(datetime.time(1, 1, 1)), None],
|
2396 |
+
pa.time64('us')
|
2397 |
+
),
|
2398 |
+
(
|
2399 |
+
[datetime.timedelta(seconds=10)],
|
2400 |
+
[pa.scalar(datetime.timedelta(seconds=10))],
|
2401 |
+
pa.duration('us')
|
2402 |
+
),
|
2403 |
+
(
|
2404 |
+
[None, datetime.datetime(2014, 1, 1)],
|
2405 |
+
[None, pa.scalar(datetime.datetime(2014, 1, 1))],
|
2406 |
+
pa.timestamp('us')
|
2407 |
+
),
|
2408 |
+
(
|
2409 |
+
[pa.MonthDayNano([1, -1, -10100])],
|
2410 |
+
[pa.scalar(pa.MonthDayNano([1, -1, -10100]))],
|
2411 |
+
pa.month_day_nano_interval()
|
2412 |
+
),
|
2413 |
+
(["a", "b"], [pa.scalar("a"), pa.scalar("b")], pa.string()),
|
2414 |
+
([b"a", b"b"], [pa.scalar(b"a"), pa.scalar(b"b")], pa.binary()),
|
2415 |
+
(
|
2416 |
+
[b"a", b"b"],
|
2417 |
+
[pa.scalar(b"a", pa.binary(1)), pa.scalar(b"b", pa.binary(1))],
|
2418 |
+
pa.binary(1)
|
2419 |
+
),
|
2420 |
+
([[1, 2, 3]], [pa.scalar([1, 2, 3])], pa.list_(pa.int64())),
|
2421 |
+
([["a", "b"]], [pa.scalar(["a", "b"])], pa.list_(pa.string())),
|
2422 |
+
(
|
2423 |
+
[1, 2, None],
|
2424 |
+
[pa.scalar(1, type=pa.int8()), pa.scalar(2, type=pa.int8()), None],
|
2425 |
+
pa.int8()
|
2426 |
+
),
|
2427 |
+
([1, None], [pa.scalar(1.0, type=pa.int32()), None], pa.int32()),
|
2428 |
+
(
|
2429 |
+
["aaa", "bbb"],
|
2430 |
+
[pa.scalar("aaa", type=pa.binary(3)), pa.scalar("bbb", type=pa.binary(3))],
|
2431 |
+
pa.binary(3)),
|
2432 |
+
([b"a"], [pa.scalar("a", type=pa.large_binary())], pa.large_binary()),
|
2433 |
+
(["a"], [pa.scalar("a", type=pa.large_string())], pa.large_string()),
|
2434 |
+
(
|
2435 |
+
["a"],
|
2436 |
+
[pa.scalar("a", type=pa.dictionary(pa.int64(), pa.string()))],
|
2437 |
+
pa.dictionary(pa.int64(), pa.string())
|
2438 |
+
),
|
2439 |
+
(
|
2440 |
+
["a", "b"],
|
2441 |
+
[pa.scalar("a", pa.dictionary(pa.int64(), pa.string())),
|
2442 |
+
pa.scalar("b", pa.dictionary(pa.int64(), pa.string()))],
|
2443 |
+
pa.dictionary(pa.int64(), pa.string())
|
2444 |
+
),
|
2445 |
+
(
|
2446 |
+
[1],
|
2447 |
+
[pa.scalar(1, type=pa.dictionary(pa.int64(), pa.int32()))],
|
2448 |
+
pa.dictionary(pa.int64(), pa.int32())
|
2449 |
+
),
|
2450 |
+
(
|
2451 |
+
[(1, 2)],
|
2452 |
+
[pa.scalar([('a', 1), ('b', 2)], type=pa.struct(
|
2453 |
+
[('a', pa.int8()), ('b', pa.int8())]))],
|
2454 |
+
pa.struct([('a', pa.int8()), ('b', pa.int8())])
|
2455 |
+
),
|
2456 |
+
(
|
2457 |
+
[(1, 'bar')],
|
2458 |
+
[pa.scalar([('a', 1), ('b', 'bar')], type=pa.struct(
|
2459 |
+
[('a', pa.int8()), ('b', pa.string())]))],
|
2460 |
+
pa.struct([('a', pa.int8()), ('b', pa.string())])
|
2461 |
+
)
|
2462 |
+
])
|
2463 |
+
def test_array_accepts_pyarrow_scalar(seq, data, scalar_data, value_type):
|
2464 |
+
if type(seq(scalar_data)) == set:
|
2465 |
+
pytest.skip("The elements in the set get reordered.")
|
2466 |
+
expect = pa.array(data, type=value_type)
|
2467 |
+
result = pa.array(seq(scalar_data))
|
2468 |
+
assert expect.equals(result)
|
2469 |
+
|
2470 |
+
result = pa.array(seq(scalar_data), type=value_type)
|
2471 |
+
assert expect.equals(result)
|
2472 |
+
|
2473 |
+
|
2474 |
+
@parametrize_with_collections_types
|
2475 |
+
def test_array_accepts_pyarrow_scalar_errors(seq):
|
2476 |
+
sequence = seq([pa.scalar(1), pa.scalar("a"), pa.scalar(3.0)])
|
2477 |
+
with pytest.raises(pa.ArrowInvalid,
|
2478 |
+
match="cannot mix scalars with different types"):
|
2479 |
+
pa.array(sequence)
|
2480 |
+
|
2481 |
+
sequence = seq([1, pa.scalar("a"), None])
|
2482 |
+
with pytest.raises(pa.ArrowInvalid,
|
2483 |
+
match="pyarrow scalars cannot be mixed with other "
|
2484 |
+
"Python scalar values currently"):
|
2485 |
+
pa.array(sequence)
|
2486 |
+
|
2487 |
+
sequence = seq([np.float16("0.1"), pa.scalar("a"), None])
|
2488 |
+
with pytest.raises(pa.ArrowInvalid,
|
2489 |
+
match="pyarrow scalars cannot be mixed with other "
|
2490 |
+
"Python scalar values currently"):
|
2491 |
+
pa.array(sequence)
|
2492 |
+
|
2493 |
+
sequence = seq([pa.scalar("a"), np.float16("0.1"), None])
|
2494 |
+
with pytest.raises(pa.ArrowInvalid,
|
2495 |
+
match="pyarrow scalars cannot be mixed with other "
|
2496 |
+
"Python scalar values currently"):
|
2497 |
+
pa.array(sequence)
|
2498 |
+
|
2499 |
+
with pytest.raises(pa.ArrowInvalid,
|
2500 |
+
match="Cannot append scalar of type string "
|
2501 |
+
"to builder for type int32"):
|
2502 |
+
pa.array([pa.scalar("a")], type=pa.int32())
|
2503 |
+
|
2504 |
+
with pytest.raises(pa.ArrowInvalid,
|
2505 |
+
match="Cannot append scalar of type int64 "
|
2506 |
+
"to builder for type null"):
|
2507 |
+
pa.array([pa.scalar(1)], type=pa.null())
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_csv.py
ADDED
@@ -0,0 +1,1993 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import abc
|
19 |
+
import bz2
|
20 |
+
from datetime import date, datetime
|
21 |
+
from decimal import Decimal
|
22 |
+
import gc
|
23 |
+
import gzip
|
24 |
+
import io
|
25 |
+
import itertools
|
26 |
+
import os
|
27 |
+
import select
|
28 |
+
import shutil
|
29 |
+
import signal
|
30 |
+
import string
|
31 |
+
import tempfile
|
32 |
+
import threading
|
33 |
+
import time
|
34 |
+
import unittest
|
35 |
+
import weakref
|
36 |
+
|
37 |
+
import pytest
|
38 |
+
|
39 |
+
import numpy as np
|
40 |
+
|
41 |
+
import pyarrow as pa
|
42 |
+
from pyarrow.csv import (
|
43 |
+
open_csv, read_csv, ReadOptions, ParseOptions, ConvertOptions, ISO8601,
|
44 |
+
write_csv, WriteOptions, CSVWriter, InvalidRow)
|
45 |
+
from pyarrow.tests import util
|
46 |
+
|
47 |
+
|
48 |
+
def generate_col_names():
|
49 |
+
# 'a', 'b'... 'z', then 'aa', 'ab'...
|
50 |
+
letters = string.ascii_lowercase
|
51 |
+
yield from letters
|
52 |
+
for first in letters:
|
53 |
+
for second in letters:
|
54 |
+
yield first + second
|
55 |
+
|
56 |
+
|
57 |
+
def make_random_csv(num_cols=2, num_rows=10, linesep='\r\n', write_names=True):
|
58 |
+
arr = np.random.RandomState(42).randint(0, 1000, size=(num_cols, num_rows))
|
59 |
+
csv = io.StringIO()
|
60 |
+
col_names = list(itertools.islice(generate_col_names(), num_cols))
|
61 |
+
if write_names:
|
62 |
+
csv.write(",".join(col_names))
|
63 |
+
csv.write(linesep)
|
64 |
+
for row in arr.T:
|
65 |
+
csv.write(",".join(map(str, row)))
|
66 |
+
csv.write(linesep)
|
67 |
+
csv = csv.getvalue().encode()
|
68 |
+
columns = [pa.array(a, type=pa.int64()) for a in arr]
|
69 |
+
expected = pa.Table.from_arrays(columns, col_names)
|
70 |
+
return csv, expected
|
71 |
+
|
72 |
+
|
73 |
+
def make_empty_csv(column_names):
|
74 |
+
csv = io.StringIO()
|
75 |
+
csv.write(",".join(column_names))
|
76 |
+
csv.write("\n")
|
77 |
+
return csv.getvalue().encode()
|
78 |
+
|
79 |
+
|
80 |
+
def check_options_class(cls, **attr_values):
|
81 |
+
"""
|
82 |
+
Check setting and getting attributes of an *Options class.
|
83 |
+
"""
|
84 |
+
opts = cls()
|
85 |
+
|
86 |
+
for name, values in attr_values.items():
|
87 |
+
assert getattr(opts, name) == values[0], \
|
88 |
+
"incorrect default value for " + name
|
89 |
+
for v in values:
|
90 |
+
setattr(opts, name, v)
|
91 |
+
assert getattr(opts, name) == v, "failed setting value"
|
92 |
+
|
93 |
+
with pytest.raises(AttributeError):
|
94 |
+
opts.zzz_non_existent = True
|
95 |
+
|
96 |
+
# Check constructor named arguments
|
97 |
+
non_defaults = {name: values[1] for name, values in attr_values.items()}
|
98 |
+
opts = cls(**non_defaults)
|
99 |
+
for name, value in non_defaults.items():
|
100 |
+
assert getattr(opts, name) == value
|
101 |
+
|
102 |
+
|
103 |
+
# The various options classes need to be picklable for dataset
|
104 |
+
def check_options_class_pickling(cls, pickler, **attr_values):
|
105 |
+
opts = cls(**attr_values)
|
106 |
+
new_opts = pickler.loads(pickler.dumps(opts,
|
107 |
+
protocol=pickler.HIGHEST_PROTOCOL))
|
108 |
+
for name, value in attr_values.items():
|
109 |
+
assert getattr(new_opts, name) == value
|
110 |
+
|
111 |
+
|
112 |
+
class InvalidRowHandler:
|
113 |
+
def __init__(self, result):
|
114 |
+
self.result = result
|
115 |
+
self.rows = []
|
116 |
+
|
117 |
+
def __call__(self, row):
|
118 |
+
self.rows.append(row)
|
119 |
+
return self.result
|
120 |
+
|
121 |
+
def __eq__(self, other):
|
122 |
+
return (isinstance(other, InvalidRowHandler) and
|
123 |
+
other.result == self.result)
|
124 |
+
|
125 |
+
def __ne__(self, other):
|
126 |
+
return (not isinstance(other, InvalidRowHandler) or
|
127 |
+
other.result != self.result)
|
128 |
+
|
129 |
+
|
130 |
+
def test_read_options(pickle_module):
|
131 |
+
cls = ReadOptions
|
132 |
+
opts = cls()
|
133 |
+
|
134 |
+
check_options_class(cls, use_threads=[True, False],
|
135 |
+
skip_rows=[0, 3],
|
136 |
+
column_names=[[], ["ab", "cd"]],
|
137 |
+
autogenerate_column_names=[False, True],
|
138 |
+
encoding=['utf8', 'utf16'],
|
139 |
+
skip_rows_after_names=[0, 27])
|
140 |
+
|
141 |
+
check_options_class_pickling(cls, pickler=pickle_module,
|
142 |
+
use_threads=True,
|
143 |
+
skip_rows=3,
|
144 |
+
column_names=["ab", "cd"],
|
145 |
+
autogenerate_column_names=False,
|
146 |
+
encoding='utf16',
|
147 |
+
skip_rows_after_names=27)
|
148 |
+
|
149 |
+
assert opts.block_size > 0
|
150 |
+
opts.block_size = 12345
|
151 |
+
assert opts.block_size == 12345
|
152 |
+
|
153 |
+
opts = cls(block_size=1234)
|
154 |
+
assert opts.block_size == 1234
|
155 |
+
|
156 |
+
opts.validate()
|
157 |
+
|
158 |
+
match = "ReadOptions: block_size must be at least 1: 0"
|
159 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
160 |
+
opts = cls()
|
161 |
+
opts.block_size = 0
|
162 |
+
opts.validate()
|
163 |
+
|
164 |
+
match = "ReadOptions: skip_rows cannot be negative: -1"
|
165 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
166 |
+
opts = cls()
|
167 |
+
opts.skip_rows = -1
|
168 |
+
opts.validate()
|
169 |
+
|
170 |
+
match = "ReadOptions: skip_rows_after_names cannot be negative: -1"
|
171 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
172 |
+
opts = cls()
|
173 |
+
opts.skip_rows_after_names = -1
|
174 |
+
opts.validate()
|
175 |
+
|
176 |
+
match = "ReadOptions: autogenerate_column_names cannot be true when" \
|
177 |
+
" column_names are provided"
|
178 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
179 |
+
opts = cls()
|
180 |
+
opts.autogenerate_column_names = True
|
181 |
+
opts.column_names = ('a', 'b')
|
182 |
+
opts.validate()
|
183 |
+
|
184 |
+
|
185 |
+
def test_parse_options(pickle_module):
|
186 |
+
cls = ParseOptions
|
187 |
+
skip_handler = InvalidRowHandler('skip')
|
188 |
+
|
189 |
+
check_options_class(cls, delimiter=[',', 'x'],
|
190 |
+
escape_char=[False, 'y'],
|
191 |
+
quote_char=['"', 'z', False],
|
192 |
+
double_quote=[True, False],
|
193 |
+
newlines_in_values=[False, True],
|
194 |
+
ignore_empty_lines=[True, False],
|
195 |
+
invalid_row_handler=[None, skip_handler])
|
196 |
+
|
197 |
+
check_options_class_pickling(cls, pickler=pickle_module,
|
198 |
+
delimiter='x',
|
199 |
+
escape_char='y',
|
200 |
+
quote_char=False,
|
201 |
+
double_quote=False,
|
202 |
+
newlines_in_values=True,
|
203 |
+
ignore_empty_lines=False,
|
204 |
+
invalid_row_handler=skip_handler)
|
205 |
+
|
206 |
+
cls().validate()
|
207 |
+
opts = cls()
|
208 |
+
opts.delimiter = "\t"
|
209 |
+
opts.validate()
|
210 |
+
|
211 |
+
match = "ParseOptions: delimiter cannot be \\\\r or \\\\n"
|
212 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
213 |
+
opts = cls()
|
214 |
+
opts.delimiter = "\n"
|
215 |
+
opts.validate()
|
216 |
+
|
217 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
218 |
+
opts = cls()
|
219 |
+
opts.delimiter = "\r"
|
220 |
+
opts.validate()
|
221 |
+
|
222 |
+
match = "ParseOptions: quote_char cannot be \\\\r or \\\\n"
|
223 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
224 |
+
opts = cls()
|
225 |
+
opts.quote_char = "\n"
|
226 |
+
opts.validate()
|
227 |
+
|
228 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
229 |
+
opts = cls()
|
230 |
+
opts.quote_char = "\r"
|
231 |
+
opts.validate()
|
232 |
+
|
233 |
+
match = "ParseOptions: escape_char cannot be \\\\r or \\\\n"
|
234 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
235 |
+
opts = cls()
|
236 |
+
opts.escape_char = "\n"
|
237 |
+
opts.validate()
|
238 |
+
|
239 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
240 |
+
opts = cls()
|
241 |
+
opts.escape_char = "\r"
|
242 |
+
opts.validate()
|
243 |
+
|
244 |
+
|
245 |
+
def test_convert_options(pickle_module):
|
246 |
+
cls = ConvertOptions
|
247 |
+
opts = cls()
|
248 |
+
|
249 |
+
check_options_class(
|
250 |
+
cls, check_utf8=[True, False],
|
251 |
+
strings_can_be_null=[False, True],
|
252 |
+
quoted_strings_can_be_null=[True, False],
|
253 |
+
decimal_point=['.', ','],
|
254 |
+
include_columns=[[], ['def', 'abc']],
|
255 |
+
include_missing_columns=[False, True],
|
256 |
+
auto_dict_encode=[False, True],
|
257 |
+
timestamp_parsers=[[], [ISO8601, '%y-%m']])
|
258 |
+
|
259 |
+
check_options_class_pickling(
|
260 |
+
cls, pickler=pickle_module,
|
261 |
+
check_utf8=False,
|
262 |
+
strings_can_be_null=True,
|
263 |
+
quoted_strings_can_be_null=False,
|
264 |
+
decimal_point=',',
|
265 |
+
include_columns=['def', 'abc'],
|
266 |
+
include_missing_columns=False,
|
267 |
+
auto_dict_encode=True,
|
268 |
+
timestamp_parsers=[ISO8601, '%y-%m'])
|
269 |
+
|
270 |
+
with pytest.raises(ValueError):
|
271 |
+
opts.decimal_point = '..'
|
272 |
+
|
273 |
+
assert opts.auto_dict_max_cardinality > 0
|
274 |
+
opts.auto_dict_max_cardinality = 99999
|
275 |
+
assert opts.auto_dict_max_cardinality == 99999
|
276 |
+
|
277 |
+
assert opts.column_types == {}
|
278 |
+
# Pass column_types as mapping
|
279 |
+
opts.column_types = {'b': pa.int16(), 'c': pa.float32()}
|
280 |
+
assert opts.column_types == {'b': pa.int16(), 'c': pa.float32()}
|
281 |
+
opts.column_types = {'v': 'int16', 'w': 'null'}
|
282 |
+
assert opts.column_types == {'v': pa.int16(), 'w': pa.null()}
|
283 |
+
# Pass column_types as schema
|
284 |
+
schema = pa.schema([('a', pa.int32()), ('b', pa.string())])
|
285 |
+
opts.column_types = schema
|
286 |
+
assert opts.column_types == {'a': pa.int32(), 'b': pa.string()}
|
287 |
+
# Pass column_types as sequence
|
288 |
+
opts.column_types = [('x', pa.binary())]
|
289 |
+
assert opts.column_types == {'x': pa.binary()}
|
290 |
+
|
291 |
+
with pytest.raises(TypeError, match='DataType expected'):
|
292 |
+
opts.column_types = {'a': None}
|
293 |
+
with pytest.raises(TypeError):
|
294 |
+
opts.column_types = 0
|
295 |
+
|
296 |
+
assert isinstance(opts.null_values, list)
|
297 |
+
assert '' in opts.null_values
|
298 |
+
assert 'N/A' in opts.null_values
|
299 |
+
opts.null_values = ['xxx', 'yyy']
|
300 |
+
assert opts.null_values == ['xxx', 'yyy']
|
301 |
+
|
302 |
+
assert isinstance(opts.true_values, list)
|
303 |
+
opts.true_values = ['xxx', 'yyy']
|
304 |
+
assert opts.true_values == ['xxx', 'yyy']
|
305 |
+
|
306 |
+
assert isinstance(opts.false_values, list)
|
307 |
+
opts.false_values = ['xxx', 'yyy']
|
308 |
+
assert opts.false_values == ['xxx', 'yyy']
|
309 |
+
|
310 |
+
assert opts.timestamp_parsers == []
|
311 |
+
opts.timestamp_parsers = [ISO8601]
|
312 |
+
assert opts.timestamp_parsers == [ISO8601]
|
313 |
+
|
314 |
+
opts = cls(column_types={'a': pa.null()},
|
315 |
+
null_values=['N', 'nn'], true_values=['T', 'tt'],
|
316 |
+
false_values=['F', 'ff'], auto_dict_max_cardinality=999,
|
317 |
+
timestamp_parsers=[ISO8601, '%Y-%m-%d'])
|
318 |
+
assert opts.column_types == {'a': pa.null()}
|
319 |
+
assert opts.null_values == ['N', 'nn']
|
320 |
+
assert opts.false_values == ['F', 'ff']
|
321 |
+
assert opts.true_values == ['T', 'tt']
|
322 |
+
assert opts.auto_dict_max_cardinality == 999
|
323 |
+
assert opts.timestamp_parsers == [ISO8601, '%Y-%m-%d']
|
324 |
+
|
325 |
+
|
326 |
+
def test_write_options():
|
327 |
+
cls = WriteOptions
|
328 |
+
opts = cls()
|
329 |
+
|
330 |
+
check_options_class(
|
331 |
+
cls, include_header=[True, False], delimiter=[',', '\t', '|'],
|
332 |
+
quoting_style=['needed', 'none', 'all_valid'])
|
333 |
+
|
334 |
+
assert opts.batch_size > 0
|
335 |
+
opts.batch_size = 12345
|
336 |
+
assert opts.batch_size == 12345
|
337 |
+
|
338 |
+
opts = cls(batch_size=9876)
|
339 |
+
assert opts.batch_size == 9876
|
340 |
+
|
341 |
+
opts.validate()
|
342 |
+
|
343 |
+
match = "WriteOptions: batch_size must be at least 1: 0"
|
344 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
345 |
+
opts = cls()
|
346 |
+
opts.batch_size = 0
|
347 |
+
opts.validate()
|
348 |
+
|
349 |
+
|
350 |
+
class BaseTestCSV(abc.ABC):
|
351 |
+
"""Common tests which are shared by streaming and non streaming readers"""
|
352 |
+
|
353 |
+
@abc.abstractmethod
|
354 |
+
def read_bytes(self, b, **kwargs):
|
355 |
+
"""
|
356 |
+
:param b: bytes to be parsed
|
357 |
+
:param kwargs: arguments passed on to open the csv file
|
358 |
+
:return: b parsed as a single RecordBatch
|
359 |
+
"""
|
360 |
+
raise NotImplementedError
|
361 |
+
|
362 |
+
@property
|
363 |
+
@abc.abstractmethod
|
364 |
+
def use_threads(self):
|
365 |
+
"""Whether this test is multi-threaded"""
|
366 |
+
raise NotImplementedError
|
367 |
+
|
368 |
+
@staticmethod
|
369 |
+
def check_names(table, names):
|
370 |
+
assert table.num_columns == len(names)
|
371 |
+
assert table.column_names == names
|
372 |
+
|
373 |
+
def test_header_skip_rows(self):
|
374 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
375 |
+
|
376 |
+
opts = ReadOptions()
|
377 |
+
opts.skip_rows = 1
|
378 |
+
table = self.read_bytes(rows, read_options=opts)
|
379 |
+
self.check_names(table, ["ef", "gh"])
|
380 |
+
assert table.to_pydict() == {
|
381 |
+
"ef": ["ij", "mn"],
|
382 |
+
"gh": ["kl", "op"],
|
383 |
+
}
|
384 |
+
|
385 |
+
opts.skip_rows = 3
|
386 |
+
table = self.read_bytes(rows, read_options=opts)
|
387 |
+
self.check_names(table, ["mn", "op"])
|
388 |
+
assert table.to_pydict() == {
|
389 |
+
"mn": [],
|
390 |
+
"op": [],
|
391 |
+
}
|
392 |
+
|
393 |
+
opts.skip_rows = 4
|
394 |
+
with pytest.raises(pa.ArrowInvalid):
|
395 |
+
# Not enough rows
|
396 |
+
table = self.read_bytes(rows, read_options=opts)
|
397 |
+
|
398 |
+
# Can skip rows with a different number of columns
|
399 |
+
rows = b"abcd\n,,,,,\nij,kl\nmn,op\n"
|
400 |
+
opts.skip_rows = 2
|
401 |
+
table = self.read_bytes(rows, read_options=opts)
|
402 |
+
self.check_names(table, ["ij", "kl"])
|
403 |
+
assert table.to_pydict() == {
|
404 |
+
"ij": ["mn"],
|
405 |
+
"kl": ["op"],
|
406 |
+
}
|
407 |
+
|
408 |
+
# Can skip all rows exactly when columns are given
|
409 |
+
opts.skip_rows = 4
|
410 |
+
opts.column_names = ['ij', 'kl']
|
411 |
+
table = self.read_bytes(rows, read_options=opts)
|
412 |
+
self.check_names(table, ["ij", "kl"])
|
413 |
+
assert table.to_pydict() == {
|
414 |
+
"ij": [],
|
415 |
+
"kl": [],
|
416 |
+
}
|
417 |
+
|
418 |
+
def test_skip_rows_after_names(self):
|
419 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
420 |
+
|
421 |
+
opts = ReadOptions()
|
422 |
+
opts.skip_rows_after_names = 1
|
423 |
+
table = self.read_bytes(rows, read_options=opts)
|
424 |
+
self.check_names(table, ["ab", "cd"])
|
425 |
+
assert table.to_pydict() == {
|
426 |
+
"ab": ["ij", "mn"],
|
427 |
+
"cd": ["kl", "op"],
|
428 |
+
}
|
429 |
+
|
430 |
+
# Can skip exact number of rows
|
431 |
+
opts.skip_rows_after_names = 3
|
432 |
+
table = self.read_bytes(rows, read_options=opts)
|
433 |
+
self.check_names(table, ["ab", "cd"])
|
434 |
+
assert table.to_pydict() == {
|
435 |
+
"ab": [],
|
436 |
+
"cd": [],
|
437 |
+
}
|
438 |
+
|
439 |
+
# Can skip beyond all rows
|
440 |
+
opts.skip_rows_after_names = 4
|
441 |
+
table = self.read_bytes(rows, read_options=opts)
|
442 |
+
self.check_names(table, ["ab", "cd"])
|
443 |
+
assert table.to_pydict() == {
|
444 |
+
"ab": [],
|
445 |
+
"cd": [],
|
446 |
+
}
|
447 |
+
|
448 |
+
# Can skip rows with a different number of columns
|
449 |
+
rows = b"abcd\n,,,,,\nij,kl\nmn,op\n"
|
450 |
+
opts.skip_rows_after_names = 2
|
451 |
+
opts.column_names = ["f0", "f1"]
|
452 |
+
table = self.read_bytes(rows, read_options=opts)
|
453 |
+
self.check_names(table, ["f0", "f1"])
|
454 |
+
assert table.to_pydict() == {
|
455 |
+
"f0": ["ij", "mn"],
|
456 |
+
"f1": ["kl", "op"],
|
457 |
+
}
|
458 |
+
opts = ReadOptions()
|
459 |
+
|
460 |
+
# Can skip rows with new lines in the value
|
461 |
+
rows = b'ab,cd\n"e\nf","g\n\nh"\n"ij","k\nl"\nmn,op'
|
462 |
+
opts.skip_rows_after_names = 2
|
463 |
+
parse_opts = ParseOptions()
|
464 |
+
parse_opts.newlines_in_values = True
|
465 |
+
table = self.read_bytes(rows, read_options=opts,
|
466 |
+
parse_options=parse_opts)
|
467 |
+
self.check_names(table, ["ab", "cd"])
|
468 |
+
assert table.to_pydict() == {
|
469 |
+
"ab": ["mn"],
|
470 |
+
"cd": ["op"],
|
471 |
+
}
|
472 |
+
|
473 |
+
# Can skip rows when block ends in middle of quoted value
|
474 |
+
opts.skip_rows_after_names = 2
|
475 |
+
opts.block_size = 26
|
476 |
+
table = self.read_bytes(rows, read_options=opts,
|
477 |
+
parse_options=parse_opts)
|
478 |
+
self.check_names(table, ["ab", "cd"])
|
479 |
+
assert table.to_pydict() == {
|
480 |
+
"ab": ["mn"],
|
481 |
+
"cd": ["op"],
|
482 |
+
}
|
483 |
+
opts = ReadOptions()
|
484 |
+
|
485 |
+
# Can skip rows that are beyond the first block without lexer
|
486 |
+
rows, expected = make_random_csv(num_cols=5, num_rows=1000)
|
487 |
+
opts.skip_rows_after_names = 900
|
488 |
+
opts.block_size = len(rows) / 11
|
489 |
+
table = self.read_bytes(rows, read_options=opts)
|
490 |
+
assert table.schema == expected.schema
|
491 |
+
assert table.num_rows == 100
|
492 |
+
table_dict = table.to_pydict()
|
493 |
+
for name, values in expected.to_pydict().items():
|
494 |
+
assert values[900:] == table_dict[name]
|
495 |
+
|
496 |
+
# Can skip rows that are beyond the first block with lexer
|
497 |
+
table = self.read_bytes(rows, read_options=opts,
|
498 |
+
parse_options=parse_opts)
|
499 |
+
assert table.schema == expected.schema
|
500 |
+
assert table.num_rows == 100
|
501 |
+
table_dict = table.to_pydict()
|
502 |
+
for name, values in expected.to_pydict().items():
|
503 |
+
assert values[900:] == table_dict[name]
|
504 |
+
|
505 |
+
# Skip rows and skip rows after names
|
506 |
+
rows, expected = make_random_csv(num_cols=5, num_rows=200,
|
507 |
+
write_names=False)
|
508 |
+
opts = ReadOptions()
|
509 |
+
opts.skip_rows = 37
|
510 |
+
opts.skip_rows_after_names = 41
|
511 |
+
opts.column_names = expected.schema.names
|
512 |
+
table = self.read_bytes(rows, read_options=opts,
|
513 |
+
parse_options=parse_opts)
|
514 |
+
assert table.schema == expected.schema
|
515 |
+
assert (table.num_rows ==
|
516 |
+
expected.num_rows - opts.skip_rows -
|
517 |
+
opts.skip_rows_after_names)
|
518 |
+
table_dict = table.to_pydict()
|
519 |
+
for name, values in expected.to_pydict().items():
|
520 |
+
assert (values[opts.skip_rows + opts.skip_rows_after_names:] ==
|
521 |
+
table_dict[name])
|
522 |
+
|
523 |
+
def test_row_number_offset_in_errors(self):
|
524 |
+
# Row numbers are only correctly counted in serial reads
|
525 |
+
def format_msg(msg_format, row, *args):
|
526 |
+
if self.use_threads:
|
527 |
+
row_info = ""
|
528 |
+
else:
|
529 |
+
row_info = "Row #{}: ".format(row)
|
530 |
+
return msg_format.format(row_info, *args)
|
531 |
+
|
532 |
+
csv, _ = make_random_csv(4, 100, write_names=True)
|
533 |
+
|
534 |
+
read_options = ReadOptions()
|
535 |
+
read_options.block_size = len(csv) / 3
|
536 |
+
convert_options = ConvertOptions()
|
537 |
+
convert_options.column_types = {"a": pa.int32()}
|
538 |
+
|
539 |
+
# Test without skip_rows and column names in the csv
|
540 |
+
csv_bad_columns = csv + b"1,2\r\n"
|
541 |
+
message_columns = format_msg("{}Expected 4 columns, got 2", 102)
|
542 |
+
with pytest.raises(pa.ArrowInvalid, match=message_columns):
|
543 |
+
self.read_bytes(csv_bad_columns,
|
544 |
+
read_options=read_options,
|
545 |
+
convert_options=convert_options)
|
546 |
+
|
547 |
+
csv_bad_type = csv + b"a,b,c,d\r\n"
|
548 |
+
message_value = format_msg(
|
549 |
+
"In CSV column #0: {}"
|
550 |
+
"CSV conversion error to int32: invalid value 'a'",
|
551 |
+
102, csv)
|
552 |
+
with pytest.raises(pa.ArrowInvalid, match=message_value):
|
553 |
+
self.read_bytes(csv_bad_type,
|
554 |
+
read_options=read_options,
|
555 |
+
convert_options=convert_options)
|
556 |
+
|
557 |
+
long_row = (b"this is a long row" * 15) + b",3\r\n"
|
558 |
+
csv_bad_columns_long = csv + long_row
|
559 |
+
message_long = format_msg("{}Expected 4 columns, got 2: {} ...", 102,
|
560 |
+
long_row[0:96].decode("utf-8"))
|
561 |
+
with pytest.raises(pa.ArrowInvalid, match=message_long):
|
562 |
+
self.read_bytes(csv_bad_columns_long,
|
563 |
+
read_options=read_options,
|
564 |
+
convert_options=convert_options)
|
565 |
+
|
566 |
+
# Test skipping rows after the names
|
567 |
+
read_options.skip_rows_after_names = 47
|
568 |
+
|
569 |
+
with pytest.raises(pa.ArrowInvalid, match=message_columns):
|
570 |
+
self.read_bytes(csv_bad_columns,
|
571 |
+
read_options=read_options,
|
572 |
+
convert_options=convert_options)
|
573 |
+
|
574 |
+
with pytest.raises(pa.ArrowInvalid, match=message_value):
|
575 |
+
self.read_bytes(csv_bad_type,
|
576 |
+
read_options=read_options,
|
577 |
+
convert_options=convert_options)
|
578 |
+
|
579 |
+
with pytest.raises(pa.ArrowInvalid, match=message_long):
|
580 |
+
self.read_bytes(csv_bad_columns_long,
|
581 |
+
read_options=read_options,
|
582 |
+
convert_options=convert_options)
|
583 |
+
|
584 |
+
read_options.skip_rows_after_names = 0
|
585 |
+
|
586 |
+
# Test without skip_rows and column names not in the csv
|
587 |
+
csv, _ = make_random_csv(4, 100, write_names=False)
|
588 |
+
read_options.column_names = ["a", "b", "c", "d"]
|
589 |
+
csv_bad_columns = csv + b"1,2\r\n"
|
590 |
+
message_columns = format_msg("{}Expected 4 columns, got 2", 101)
|
591 |
+
with pytest.raises(pa.ArrowInvalid, match=message_columns):
|
592 |
+
self.read_bytes(csv_bad_columns,
|
593 |
+
read_options=read_options,
|
594 |
+
convert_options=convert_options)
|
595 |
+
|
596 |
+
csv_bad_columns_long = csv + long_row
|
597 |
+
message_long = format_msg("{}Expected 4 columns, got 2: {} ...", 101,
|
598 |
+
long_row[0:96].decode("utf-8"))
|
599 |
+
with pytest.raises(pa.ArrowInvalid, match=message_long):
|
600 |
+
self.read_bytes(csv_bad_columns_long,
|
601 |
+
read_options=read_options,
|
602 |
+
convert_options=convert_options)
|
603 |
+
|
604 |
+
csv_bad_type = csv + b"a,b,c,d\r\n"
|
605 |
+
message_value = format_msg(
|
606 |
+
"In CSV column #0: {}"
|
607 |
+
"CSV conversion error to int32: invalid value 'a'",
|
608 |
+
101)
|
609 |
+
message_value = message_value.format(len(csv))
|
610 |
+
with pytest.raises(pa.ArrowInvalid, match=message_value):
|
611 |
+
self.read_bytes(csv_bad_type,
|
612 |
+
read_options=read_options,
|
613 |
+
convert_options=convert_options)
|
614 |
+
|
615 |
+
# Test with skip_rows and column names not in the csv
|
616 |
+
read_options.skip_rows = 23
|
617 |
+
with pytest.raises(pa.ArrowInvalid, match=message_columns):
|
618 |
+
self.read_bytes(csv_bad_columns,
|
619 |
+
read_options=read_options,
|
620 |
+
convert_options=convert_options)
|
621 |
+
|
622 |
+
with pytest.raises(pa.ArrowInvalid, match=message_value):
|
623 |
+
self.read_bytes(csv_bad_type,
|
624 |
+
read_options=read_options,
|
625 |
+
convert_options=convert_options)
|
626 |
+
|
627 |
+
def test_invalid_row_handler(self, pickle_module):
|
628 |
+
rows = b"a,b\nc\nd,e\nf,g,h\ni,j\n"
|
629 |
+
parse_opts = ParseOptions()
|
630 |
+
with pytest.raises(
|
631 |
+
ValueError,
|
632 |
+
match="Expected 2 columns, got 1: c"):
|
633 |
+
self.read_bytes(rows, parse_options=parse_opts)
|
634 |
+
|
635 |
+
# Skip requested
|
636 |
+
parse_opts.invalid_row_handler = InvalidRowHandler('skip')
|
637 |
+
table = self.read_bytes(rows, parse_options=parse_opts)
|
638 |
+
assert table.to_pydict() == {
|
639 |
+
'a': ["d", "i"],
|
640 |
+
'b': ["e", "j"],
|
641 |
+
}
|
642 |
+
|
643 |
+
def row_num(x):
|
644 |
+
return None if self.use_threads else x
|
645 |
+
expected_rows = [
|
646 |
+
InvalidRow(2, 1, row_num(2), "c"),
|
647 |
+
InvalidRow(2, 3, row_num(4), "f,g,h"),
|
648 |
+
]
|
649 |
+
assert parse_opts.invalid_row_handler.rows == expected_rows
|
650 |
+
|
651 |
+
# Error requested
|
652 |
+
parse_opts.invalid_row_handler = InvalidRowHandler('error')
|
653 |
+
with pytest.raises(
|
654 |
+
ValueError,
|
655 |
+
match="Expected 2 columns, got 1: c"):
|
656 |
+
self.read_bytes(rows, parse_options=parse_opts)
|
657 |
+
expected_rows = [InvalidRow(2, 1, row_num(2), "c")]
|
658 |
+
assert parse_opts.invalid_row_handler.rows == expected_rows
|
659 |
+
|
660 |
+
# Test ser/de
|
661 |
+
parse_opts.invalid_row_handler = InvalidRowHandler('skip')
|
662 |
+
parse_opts = pickle_module.loads(pickle_module.dumps(parse_opts))
|
663 |
+
|
664 |
+
table = self.read_bytes(rows, parse_options=parse_opts)
|
665 |
+
assert table.to_pydict() == {
|
666 |
+
'a': ["d", "i"],
|
667 |
+
'b': ["e", "j"],
|
668 |
+
}
|
669 |
+
|
670 |
+
|
671 |
+
class BaseCSVTableRead(BaseTestCSV):
|
672 |
+
|
673 |
+
def read_csv(self, csv, *args, validate_full=True, **kwargs):
|
674 |
+
"""
|
675 |
+
Reads the CSV file into memory using pyarrow's read_csv
|
676 |
+
csv The CSV bytes
|
677 |
+
args Positional arguments to be forwarded to pyarrow's read_csv
|
678 |
+
validate_full Whether or not to fully validate the resulting table
|
679 |
+
kwargs Keyword arguments to be forwarded to pyarrow's read_csv
|
680 |
+
"""
|
681 |
+
assert isinstance(self.use_threads, bool) # sanity check
|
682 |
+
read_options = kwargs.setdefault('read_options', ReadOptions())
|
683 |
+
read_options.use_threads = self.use_threads
|
684 |
+
table = read_csv(csv, *args, **kwargs)
|
685 |
+
table.validate(full=validate_full)
|
686 |
+
return table
|
687 |
+
|
688 |
+
def read_bytes(self, b, **kwargs):
|
689 |
+
return self.read_csv(pa.py_buffer(b), **kwargs)
|
690 |
+
|
691 |
+
def test_file_object(self):
|
692 |
+
data = b"a,b\n1,2\n"
|
693 |
+
expected_data = {'a': [1], 'b': [2]}
|
694 |
+
bio = io.BytesIO(data)
|
695 |
+
table = self.read_csv(bio)
|
696 |
+
assert table.to_pydict() == expected_data
|
697 |
+
# Text files not allowed
|
698 |
+
sio = io.StringIO(data.decode())
|
699 |
+
with pytest.raises(TypeError):
|
700 |
+
self.read_csv(sio)
|
701 |
+
|
702 |
+
def test_header(self):
|
703 |
+
rows = b"abc,def,gh\n"
|
704 |
+
table = self.read_bytes(rows)
|
705 |
+
assert isinstance(table, pa.Table)
|
706 |
+
self.check_names(table, ["abc", "def", "gh"])
|
707 |
+
assert table.num_rows == 0
|
708 |
+
|
709 |
+
def test_bom(self):
|
710 |
+
rows = b"\xef\xbb\xbfa,b\n1,2\n"
|
711 |
+
expected_data = {'a': [1], 'b': [2]}
|
712 |
+
table = self.read_bytes(rows)
|
713 |
+
assert table.to_pydict() == expected_data
|
714 |
+
|
715 |
+
def test_one_chunk(self):
|
716 |
+
# ARROW-7661: lack of newline at end of file should not produce
|
717 |
+
# an additional chunk.
|
718 |
+
rows = [b"a,b", b"1,2", b"3,4", b"56,78"]
|
719 |
+
for line_ending in [b'\n', b'\r', b'\r\n']:
|
720 |
+
for file_ending in [b'', line_ending]:
|
721 |
+
data = line_ending.join(rows) + file_ending
|
722 |
+
table = self.read_bytes(data)
|
723 |
+
assert len(table.to_batches()) == 1
|
724 |
+
assert table.to_pydict() == {
|
725 |
+
"a": [1, 3, 56],
|
726 |
+
"b": [2, 4, 78],
|
727 |
+
}
|
728 |
+
|
729 |
+
def test_header_column_names(self):
|
730 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
731 |
+
|
732 |
+
opts = ReadOptions()
|
733 |
+
opts.column_names = ["x", "y"]
|
734 |
+
table = self.read_bytes(rows, read_options=opts)
|
735 |
+
self.check_names(table, ["x", "y"])
|
736 |
+
assert table.to_pydict() == {
|
737 |
+
"x": ["ab", "ef", "ij", "mn"],
|
738 |
+
"y": ["cd", "gh", "kl", "op"],
|
739 |
+
}
|
740 |
+
|
741 |
+
opts.skip_rows = 3
|
742 |
+
table = self.read_bytes(rows, read_options=opts)
|
743 |
+
self.check_names(table, ["x", "y"])
|
744 |
+
assert table.to_pydict() == {
|
745 |
+
"x": ["mn"],
|
746 |
+
"y": ["op"],
|
747 |
+
}
|
748 |
+
|
749 |
+
opts.skip_rows = 4
|
750 |
+
table = self.read_bytes(rows, read_options=opts)
|
751 |
+
self.check_names(table, ["x", "y"])
|
752 |
+
assert table.to_pydict() == {
|
753 |
+
"x": [],
|
754 |
+
"y": [],
|
755 |
+
}
|
756 |
+
|
757 |
+
opts.skip_rows = 5
|
758 |
+
with pytest.raises(pa.ArrowInvalid):
|
759 |
+
# Not enough rows
|
760 |
+
table = self.read_bytes(rows, read_options=opts)
|
761 |
+
|
762 |
+
# Unexpected number of columns
|
763 |
+
opts.skip_rows = 0
|
764 |
+
opts.column_names = ["x", "y", "z"]
|
765 |
+
with pytest.raises(pa.ArrowInvalid,
|
766 |
+
match="Expected 3 columns, got 2"):
|
767 |
+
table = self.read_bytes(rows, read_options=opts)
|
768 |
+
|
769 |
+
# Can skip rows with a different number of columns
|
770 |
+
rows = b"abcd\n,,,,,\nij,kl\nmn,op\n"
|
771 |
+
opts.skip_rows = 2
|
772 |
+
opts.column_names = ["x", "y"]
|
773 |
+
table = self.read_bytes(rows, read_options=opts)
|
774 |
+
self.check_names(table, ["x", "y"])
|
775 |
+
assert table.to_pydict() == {
|
776 |
+
"x": ["ij", "mn"],
|
777 |
+
"y": ["kl", "op"],
|
778 |
+
}
|
779 |
+
|
780 |
+
def test_header_autogenerate_column_names(self):
|
781 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
782 |
+
|
783 |
+
opts = ReadOptions()
|
784 |
+
opts.autogenerate_column_names = True
|
785 |
+
table = self.read_bytes(rows, read_options=opts)
|
786 |
+
self.check_names(table, ["f0", "f1"])
|
787 |
+
assert table.to_pydict() == {
|
788 |
+
"f0": ["ab", "ef", "ij", "mn"],
|
789 |
+
"f1": ["cd", "gh", "kl", "op"],
|
790 |
+
}
|
791 |
+
|
792 |
+
opts.skip_rows = 3
|
793 |
+
table = self.read_bytes(rows, read_options=opts)
|
794 |
+
self.check_names(table, ["f0", "f1"])
|
795 |
+
assert table.to_pydict() == {
|
796 |
+
"f0": ["mn"],
|
797 |
+
"f1": ["op"],
|
798 |
+
}
|
799 |
+
|
800 |
+
# Not enough rows, impossible to infer number of columns
|
801 |
+
opts.skip_rows = 4
|
802 |
+
with pytest.raises(pa.ArrowInvalid):
|
803 |
+
table = self.read_bytes(rows, read_options=opts)
|
804 |
+
|
805 |
+
def test_include_columns(self):
|
806 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
807 |
+
|
808 |
+
convert_options = ConvertOptions()
|
809 |
+
convert_options.include_columns = ['ab']
|
810 |
+
table = self.read_bytes(rows, convert_options=convert_options)
|
811 |
+
self.check_names(table, ["ab"])
|
812 |
+
assert table.to_pydict() == {
|
813 |
+
"ab": ["ef", "ij", "mn"],
|
814 |
+
}
|
815 |
+
|
816 |
+
# Order of include_columns is respected, regardless of CSV order
|
817 |
+
convert_options.include_columns = ['cd', 'ab']
|
818 |
+
table = self.read_bytes(rows, convert_options=convert_options)
|
819 |
+
schema = pa.schema([('cd', pa.string()),
|
820 |
+
('ab', pa.string())])
|
821 |
+
assert table.schema == schema
|
822 |
+
assert table.to_pydict() == {
|
823 |
+
"cd": ["gh", "kl", "op"],
|
824 |
+
"ab": ["ef", "ij", "mn"],
|
825 |
+
}
|
826 |
+
|
827 |
+
# Include a column not in the CSV file => raises by default
|
828 |
+
convert_options.include_columns = ['xx', 'ab', 'yy']
|
829 |
+
with pytest.raises(KeyError,
|
830 |
+
match="Column 'xx' in include_columns "
|
831 |
+
"does not exist in CSV file"):
|
832 |
+
self.read_bytes(rows, convert_options=convert_options)
|
833 |
+
|
834 |
+
def test_include_missing_columns(self):
|
835 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
836 |
+
|
837 |
+
read_options = ReadOptions()
|
838 |
+
convert_options = ConvertOptions()
|
839 |
+
convert_options.include_columns = ['xx', 'ab', 'yy']
|
840 |
+
convert_options.include_missing_columns = True
|
841 |
+
table = self.read_bytes(rows, read_options=read_options,
|
842 |
+
convert_options=convert_options)
|
843 |
+
schema = pa.schema([('xx', pa.null()),
|
844 |
+
('ab', pa.string()),
|
845 |
+
('yy', pa.null())])
|
846 |
+
assert table.schema == schema
|
847 |
+
assert table.to_pydict() == {
|
848 |
+
"xx": [None, None, None],
|
849 |
+
"ab": ["ef", "ij", "mn"],
|
850 |
+
"yy": [None, None, None],
|
851 |
+
}
|
852 |
+
|
853 |
+
# Combining with `column_names`
|
854 |
+
read_options.column_names = ["xx", "yy"]
|
855 |
+
convert_options.include_columns = ["yy", "cd"]
|
856 |
+
table = self.read_bytes(rows, read_options=read_options,
|
857 |
+
convert_options=convert_options)
|
858 |
+
schema = pa.schema([('yy', pa.string()),
|
859 |
+
('cd', pa.null())])
|
860 |
+
assert table.schema == schema
|
861 |
+
assert table.to_pydict() == {
|
862 |
+
"yy": ["cd", "gh", "kl", "op"],
|
863 |
+
"cd": [None, None, None, None],
|
864 |
+
}
|
865 |
+
|
866 |
+
# And with `column_types` as well
|
867 |
+
convert_options.column_types = {"yy": pa.binary(),
|
868 |
+
"cd": pa.int32()}
|
869 |
+
table = self.read_bytes(rows, read_options=read_options,
|
870 |
+
convert_options=convert_options)
|
871 |
+
schema = pa.schema([('yy', pa.binary()),
|
872 |
+
('cd', pa.int32())])
|
873 |
+
assert table.schema == schema
|
874 |
+
assert table.to_pydict() == {
|
875 |
+
"yy": [b"cd", b"gh", b"kl", b"op"],
|
876 |
+
"cd": [None, None, None, None],
|
877 |
+
}
|
878 |
+
|
879 |
+
def test_simple_ints(self):
|
880 |
+
# Infer integer columns
|
881 |
+
rows = b"a,b,c\n1,2,3\n4,5,6\n"
|
882 |
+
table = self.read_bytes(rows)
|
883 |
+
schema = pa.schema([('a', pa.int64()),
|
884 |
+
('b', pa.int64()),
|
885 |
+
('c', pa.int64())])
|
886 |
+
assert table.schema == schema
|
887 |
+
assert table.to_pydict() == {
|
888 |
+
'a': [1, 4],
|
889 |
+
'b': [2, 5],
|
890 |
+
'c': [3, 6],
|
891 |
+
}
|
892 |
+
|
893 |
+
def test_simple_varied(self):
|
894 |
+
# Infer various kinds of data
|
895 |
+
rows = b"a,b,c,d\n1,2,3,0\n4.0,-5,foo,True\n"
|
896 |
+
table = self.read_bytes(rows)
|
897 |
+
schema = pa.schema([('a', pa.float64()),
|
898 |
+
('b', pa.int64()),
|
899 |
+
('c', pa.string()),
|
900 |
+
('d', pa.bool_())])
|
901 |
+
assert table.schema == schema
|
902 |
+
assert table.to_pydict() == {
|
903 |
+
'a': [1.0, 4.0],
|
904 |
+
'b': [2, -5],
|
905 |
+
'c': ["3", "foo"],
|
906 |
+
'd': [False, True],
|
907 |
+
}
|
908 |
+
|
909 |
+
def test_simple_nulls(self):
|
910 |
+
# Infer various kinds of data, with nulls
|
911 |
+
rows = (b"a,b,c,d,e,f\n"
|
912 |
+
b"1,2,,,3,N/A\n"
|
913 |
+
b"nan,-5,foo,,nan,TRUE\n"
|
914 |
+
b"4.5,#N/A,nan,,\xff,false\n")
|
915 |
+
table = self.read_bytes(rows)
|
916 |
+
schema = pa.schema([('a', pa.float64()),
|
917 |
+
('b', pa.int64()),
|
918 |
+
('c', pa.string()),
|
919 |
+
('d', pa.null()),
|
920 |
+
('e', pa.binary()),
|
921 |
+
('f', pa.bool_())])
|
922 |
+
assert table.schema == schema
|
923 |
+
assert table.to_pydict() == {
|
924 |
+
'a': [1.0, None, 4.5],
|
925 |
+
'b': [2, -5, None],
|
926 |
+
'c': ["", "foo", "nan"],
|
927 |
+
'd': [None, None, None],
|
928 |
+
'e': [b"3", b"nan", b"\xff"],
|
929 |
+
'f': [None, True, False],
|
930 |
+
}
|
931 |
+
|
932 |
+
def test_decimal_point(self):
|
933 |
+
# Infer floats with a custom decimal point
|
934 |
+
parse_options = ParseOptions(delimiter=';')
|
935 |
+
rows = b"a;b\n1.25;2,5\nNA;-3\n-4;NA"
|
936 |
+
|
937 |
+
table = self.read_bytes(rows, parse_options=parse_options)
|
938 |
+
schema = pa.schema([('a', pa.float64()),
|
939 |
+
('b', pa.string())])
|
940 |
+
assert table.schema == schema
|
941 |
+
assert table.to_pydict() == {
|
942 |
+
'a': [1.25, None, -4.0],
|
943 |
+
'b': ["2,5", "-3", "NA"],
|
944 |
+
}
|
945 |
+
|
946 |
+
convert_options = ConvertOptions(decimal_point=',')
|
947 |
+
table = self.read_bytes(rows, parse_options=parse_options,
|
948 |
+
convert_options=convert_options)
|
949 |
+
schema = pa.schema([('a', pa.string()),
|
950 |
+
('b', pa.float64())])
|
951 |
+
assert table.schema == schema
|
952 |
+
assert table.to_pydict() == {
|
953 |
+
'a': ["1.25", "NA", "-4"],
|
954 |
+
'b': [2.5, -3.0, None],
|
955 |
+
}
|
956 |
+
|
957 |
+
def test_simple_timestamps(self):
|
958 |
+
# Infer a timestamp column
|
959 |
+
rows = (b"a,b,c\n"
|
960 |
+
b"1970,1970-01-01 00:00:00,1970-01-01 00:00:00.123\n"
|
961 |
+
b"1989,1989-07-14 01:00:00,1989-07-14 01:00:00.123456\n")
|
962 |
+
table = self.read_bytes(rows)
|
963 |
+
schema = pa.schema([('a', pa.int64()),
|
964 |
+
('b', pa.timestamp('s')),
|
965 |
+
('c', pa.timestamp('ns'))])
|
966 |
+
assert table.schema == schema
|
967 |
+
assert table.to_pydict() == {
|
968 |
+
'a': [1970, 1989],
|
969 |
+
'b': [datetime(1970, 1, 1), datetime(1989, 7, 14, 1)],
|
970 |
+
'c': [datetime(1970, 1, 1, 0, 0, 0, 123000),
|
971 |
+
datetime(1989, 7, 14, 1, 0, 0, 123456)],
|
972 |
+
}
|
973 |
+
|
974 |
+
def test_timestamp_parsers(self):
|
975 |
+
# Infer timestamps with custom parsers
|
976 |
+
rows = b"a,b\n1970/01/01,1980-01-01 00\n1970/01/02,1980-01-02 00\n"
|
977 |
+
opts = ConvertOptions()
|
978 |
+
|
979 |
+
table = self.read_bytes(rows, convert_options=opts)
|
980 |
+
schema = pa.schema([('a', pa.string()),
|
981 |
+
('b', pa.timestamp('s'))])
|
982 |
+
assert table.schema == schema
|
983 |
+
assert table.to_pydict() == {
|
984 |
+
'a': ['1970/01/01', '1970/01/02'],
|
985 |
+
'b': [datetime(1980, 1, 1), datetime(1980, 1, 2)],
|
986 |
+
}
|
987 |
+
|
988 |
+
opts.timestamp_parsers = ['%Y/%m/%d']
|
989 |
+
table = self.read_bytes(rows, convert_options=opts)
|
990 |
+
schema = pa.schema([('a', pa.timestamp('s')),
|
991 |
+
('b', pa.string())])
|
992 |
+
assert table.schema == schema
|
993 |
+
assert table.to_pydict() == {
|
994 |
+
'a': [datetime(1970, 1, 1), datetime(1970, 1, 2)],
|
995 |
+
'b': ['1980-01-01 00', '1980-01-02 00'],
|
996 |
+
}
|
997 |
+
|
998 |
+
opts.timestamp_parsers = ['%Y/%m/%d', ISO8601]
|
999 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1000 |
+
schema = pa.schema([('a', pa.timestamp('s')),
|
1001 |
+
('b', pa.timestamp('s'))])
|
1002 |
+
assert table.schema == schema
|
1003 |
+
assert table.to_pydict() == {
|
1004 |
+
'a': [datetime(1970, 1, 1), datetime(1970, 1, 2)],
|
1005 |
+
'b': [datetime(1980, 1, 1), datetime(1980, 1, 2)],
|
1006 |
+
}
|
1007 |
+
|
1008 |
+
def test_dates(self):
|
1009 |
+
# Dates are inferred as date32 by default
|
1010 |
+
rows = b"a,b\n1970-01-01,1970-01-02\n1971-01-01,1971-01-02\n"
|
1011 |
+
table = self.read_bytes(rows)
|
1012 |
+
schema = pa.schema([('a', pa.date32()),
|
1013 |
+
('b', pa.date32())])
|
1014 |
+
assert table.schema == schema
|
1015 |
+
assert table.to_pydict() == {
|
1016 |
+
'a': [date(1970, 1, 1), date(1971, 1, 1)],
|
1017 |
+
'b': [date(1970, 1, 2), date(1971, 1, 2)],
|
1018 |
+
}
|
1019 |
+
|
1020 |
+
# Can ask for date types explicitly
|
1021 |
+
opts = ConvertOptions()
|
1022 |
+
opts.column_types = {'a': pa.date32(), 'b': pa.date64()}
|
1023 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1024 |
+
schema = pa.schema([('a', pa.date32()),
|
1025 |
+
('b', pa.date64())])
|
1026 |
+
assert table.schema == schema
|
1027 |
+
assert table.to_pydict() == {
|
1028 |
+
'a': [date(1970, 1, 1), date(1971, 1, 1)],
|
1029 |
+
'b': [date(1970, 1, 2), date(1971, 1, 2)],
|
1030 |
+
}
|
1031 |
+
|
1032 |
+
# Can ask for timestamp types explicitly
|
1033 |
+
opts = ConvertOptions()
|
1034 |
+
opts.column_types = {'a': pa.timestamp('s'), 'b': pa.timestamp('ms')}
|
1035 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1036 |
+
schema = pa.schema([('a', pa.timestamp('s')),
|
1037 |
+
('b', pa.timestamp('ms'))])
|
1038 |
+
assert table.schema == schema
|
1039 |
+
assert table.to_pydict() == {
|
1040 |
+
'a': [datetime(1970, 1, 1), datetime(1971, 1, 1)],
|
1041 |
+
'b': [datetime(1970, 1, 2), datetime(1971, 1, 2)],
|
1042 |
+
}
|
1043 |
+
|
1044 |
+
def test_times(self):
|
1045 |
+
# Times are inferred as time32[s] by default
|
1046 |
+
from datetime import time
|
1047 |
+
|
1048 |
+
rows = b"a,b\n12:34:56,12:34:56.789\n23:59:59,23:59:59.999\n"
|
1049 |
+
table = self.read_bytes(rows)
|
1050 |
+
# Column 'b' has subseconds, so cannot be inferred as time32[s]
|
1051 |
+
schema = pa.schema([('a', pa.time32('s')),
|
1052 |
+
('b', pa.string())])
|
1053 |
+
assert table.schema == schema
|
1054 |
+
assert table.to_pydict() == {
|
1055 |
+
'a': [time(12, 34, 56), time(23, 59, 59)],
|
1056 |
+
'b': ["12:34:56.789", "23:59:59.999"],
|
1057 |
+
}
|
1058 |
+
|
1059 |
+
# Can ask for time types explicitly
|
1060 |
+
opts = ConvertOptions()
|
1061 |
+
opts.column_types = {'a': pa.time64('us'), 'b': pa.time32('ms')}
|
1062 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1063 |
+
schema = pa.schema([('a', pa.time64('us')),
|
1064 |
+
('b', pa.time32('ms'))])
|
1065 |
+
assert table.schema == schema
|
1066 |
+
assert table.to_pydict() == {
|
1067 |
+
'a': [time(12, 34, 56), time(23, 59, 59)],
|
1068 |
+
'b': [time(12, 34, 56, 789000), time(23, 59, 59, 999000)],
|
1069 |
+
}
|
1070 |
+
|
1071 |
+
def test_auto_dict_encode(self):
|
1072 |
+
opts = ConvertOptions(auto_dict_encode=True)
|
1073 |
+
rows = "a,b\nab,1\ncdé,2\ncdé,3\nab,4".encode()
|
1074 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1075 |
+
schema = pa.schema([('a', pa.dictionary(pa.int32(), pa.string())),
|
1076 |
+
('b', pa.int64())])
|
1077 |
+
expected = {
|
1078 |
+
'a': ["ab", "cdé", "cdé", "ab"],
|
1079 |
+
'b': [1, 2, 3, 4],
|
1080 |
+
}
|
1081 |
+
assert table.schema == schema
|
1082 |
+
assert table.to_pydict() == expected
|
1083 |
+
|
1084 |
+
opts.auto_dict_max_cardinality = 2
|
1085 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1086 |
+
assert table.schema == schema
|
1087 |
+
assert table.to_pydict() == expected
|
1088 |
+
|
1089 |
+
# Cardinality above max => plain-encoded
|
1090 |
+
opts.auto_dict_max_cardinality = 1
|
1091 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1092 |
+
assert table.schema == pa.schema([('a', pa.string()),
|
1093 |
+
('b', pa.int64())])
|
1094 |
+
assert table.to_pydict() == expected
|
1095 |
+
|
1096 |
+
# With invalid UTF8, not checked
|
1097 |
+
opts.auto_dict_max_cardinality = 50
|
1098 |
+
opts.check_utf8 = False
|
1099 |
+
rows = b"a,b\nab,1\ncd\xff,2\nab,3"
|
1100 |
+
table = self.read_bytes(rows, convert_options=opts,
|
1101 |
+
validate_full=False)
|
1102 |
+
assert table.schema == schema
|
1103 |
+
dict_values = table['a'].chunk(0).dictionary
|
1104 |
+
assert len(dict_values) == 2
|
1105 |
+
assert dict_values[0].as_py() == "ab"
|
1106 |
+
assert dict_values[1].as_buffer() == b"cd\xff"
|
1107 |
+
|
1108 |
+
# With invalid UTF8, checked
|
1109 |
+
opts.check_utf8 = True
|
1110 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1111 |
+
schema = pa.schema([('a', pa.dictionary(pa.int32(), pa.binary())),
|
1112 |
+
('b', pa.int64())])
|
1113 |
+
expected = {
|
1114 |
+
'a': [b"ab", b"cd\xff", b"ab"],
|
1115 |
+
'b': [1, 2, 3],
|
1116 |
+
}
|
1117 |
+
assert table.schema == schema
|
1118 |
+
assert table.to_pydict() == expected
|
1119 |
+
|
1120 |
+
def test_custom_nulls(self):
|
1121 |
+
# Infer nulls with custom values
|
1122 |
+
opts = ConvertOptions(null_values=['Xxx', 'Zzz'])
|
1123 |
+
rows = b"""a,b,c,d\nZzz,"Xxx",1,2\nXxx,#N/A,,Zzz\n"""
|
1124 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1125 |
+
schema = pa.schema([('a', pa.null()),
|
1126 |
+
('b', pa.string()),
|
1127 |
+
('c', pa.string()),
|
1128 |
+
('d', pa.int64())])
|
1129 |
+
assert table.schema == schema
|
1130 |
+
assert table.to_pydict() == {
|
1131 |
+
'a': [None, None],
|
1132 |
+
'b': ["Xxx", "#N/A"],
|
1133 |
+
'c': ["1", ""],
|
1134 |
+
'd': [2, None],
|
1135 |
+
}
|
1136 |
+
|
1137 |
+
opts = ConvertOptions(null_values=['Xxx', 'Zzz'],
|
1138 |
+
strings_can_be_null=True)
|
1139 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1140 |
+
assert table.to_pydict() == {
|
1141 |
+
'a': [None, None],
|
1142 |
+
'b': [None, "#N/A"],
|
1143 |
+
'c': ["1", ""],
|
1144 |
+
'd': [2, None],
|
1145 |
+
}
|
1146 |
+
opts.quoted_strings_can_be_null = False
|
1147 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1148 |
+
assert table.to_pydict() == {
|
1149 |
+
'a': [None, None],
|
1150 |
+
'b': ["Xxx", "#N/A"],
|
1151 |
+
'c': ["1", ""],
|
1152 |
+
'd': [2, None],
|
1153 |
+
}
|
1154 |
+
|
1155 |
+
opts = ConvertOptions(null_values=[])
|
1156 |
+
rows = b"a,b\n#N/A,\n"
|
1157 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1158 |
+
schema = pa.schema([('a', pa.string()),
|
1159 |
+
('b', pa.string())])
|
1160 |
+
assert table.schema == schema
|
1161 |
+
assert table.to_pydict() == {
|
1162 |
+
'a': ["#N/A"],
|
1163 |
+
'b': [""],
|
1164 |
+
}
|
1165 |
+
|
1166 |
+
def test_custom_bools(self):
|
1167 |
+
# Infer booleans with custom values
|
1168 |
+
opts = ConvertOptions(true_values=['T', 'yes'],
|
1169 |
+
false_values=['F', 'no'])
|
1170 |
+
rows = (b"a,b,c\n"
|
1171 |
+
b"True,T,t\n"
|
1172 |
+
b"False,F,f\n"
|
1173 |
+
b"True,yes,yes\n"
|
1174 |
+
b"False,no,no\n"
|
1175 |
+
b"N/A,N/A,N/A\n")
|
1176 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1177 |
+
schema = pa.schema([('a', pa.string()),
|
1178 |
+
('b', pa.bool_()),
|
1179 |
+
('c', pa.string())])
|
1180 |
+
assert table.schema == schema
|
1181 |
+
assert table.to_pydict() == {
|
1182 |
+
'a': ["True", "False", "True", "False", "N/A"],
|
1183 |
+
'b': [True, False, True, False, None],
|
1184 |
+
'c': ["t", "f", "yes", "no", "N/A"],
|
1185 |
+
}
|
1186 |
+
|
1187 |
+
def test_column_types(self):
|
1188 |
+
# Ask for specific column types in ConvertOptions
|
1189 |
+
opts = ConvertOptions(column_types={'b': 'float32',
|
1190 |
+
'c': 'string',
|
1191 |
+
'd': 'boolean',
|
1192 |
+
'e': pa.decimal128(11, 2),
|
1193 |
+
'zz': 'null'})
|
1194 |
+
rows = b"a,b,c,d,e\n1,2,3,true,1.0\n4,-5,6,false,0\n"
|
1195 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1196 |
+
schema = pa.schema([('a', pa.int64()),
|
1197 |
+
('b', pa.float32()),
|
1198 |
+
('c', pa.string()),
|
1199 |
+
('d', pa.bool_()),
|
1200 |
+
('e', pa.decimal128(11, 2))])
|
1201 |
+
expected = {
|
1202 |
+
'a': [1, 4],
|
1203 |
+
'b': [2.0, -5.0],
|
1204 |
+
'c': ["3", "6"],
|
1205 |
+
'd': [True, False],
|
1206 |
+
'e': [Decimal("1.00"), Decimal("0.00")]
|
1207 |
+
}
|
1208 |
+
assert table.schema == schema
|
1209 |
+
assert table.to_pydict() == expected
|
1210 |
+
# Pass column_types as schema
|
1211 |
+
opts = ConvertOptions(
|
1212 |
+
column_types=pa.schema([('b', pa.float32()),
|
1213 |
+
('c', pa.string()),
|
1214 |
+
('d', pa.bool_()),
|
1215 |
+
('e', pa.decimal128(11, 2)),
|
1216 |
+
('zz', pa.bool_())]))
|
1217 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1218 |
+
assert table.schema == schema
|
1219 |
+
assert table.to_pydict() == expected
|
1220 |
+
# One of the columns in column_types fails converting
|
1221 |
+
rows = b"a,b,c,d,e\n1,XXX,3,true,5\n4,-5,6,false,7\n"
|
1222 |
+
with pytest.raises(pa.ArrowInvalid) as exc:
|
1223 |
+
self.read_bytes(rows, convert_options=opts)
|
1224 |
+
err = str(exc.value)
|
1225 |
+
assert "In CSV column #1: " in err
|
1226 |
+
assert "CSV conversion error to float: invalid value 'XXX'" in err
|
1227 |
+
|
1228 |
+
def test_column_types_dict(self):
|
1229 |
+
# Ask for dict-encoded column types in ConvertOptions
|
1230 |
+
column_types = [
|
1231 |
+
('a', pa.dictionary(pa.int32(), pa.utf8())),
|
1232 |
+
('b', pa.dictionary(pa.int32(), pa.int64())),
|
1233 |
+
('c', pa.dictionary(pa.int32(), pa.decimal128(11, 2))),
|
1234 |
+
('d', pa.dictionary(pa.int32(), pa.large_utf8()))]
|
1235 |
+
|
1236 |
+
opts = ConvertOptions(column_types=dict(column_types))
|
1237 |
+
rows = (b"a,b,c,d\n"
|
1238 |
+
b"abc,123456,1.0,zz\n"
|
1239 |
+
b"defg,123456,0.5,xx\n"
|
1240 |
+
b"abc,N/A,1.0,xx\n")
|
1241 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1242 |
+
|
1243 |
+
schema = pa.schema(column_types)
|
1244 |
+
expected = {
|
1245 |
+
'a': ["abc", "defg", "abc"],
|
1246 |
+
'b': [123456, 123456, None],
|
1247 |
+
'c': [Decimal("1.00"), Decimal("0.50"), Decimal("1.00")],
|
1248 |
+
'd': ["zz", "xx", "xx"],
|
1249 |
+
}
|
1250 |
+
assert table.schema == schema
|
1251 |
+
assert table.to_pydict() == expected
|
1252 |
+
|
1253 |
+
# Unsupported index type
|
1254 |
+
column_types[0] = ('a', pa.dictionary(pa.int8(), pa.utf8()))
|
1255 |
+
|
1256 |
+
opts = ConvertOptions(column_types=dict(column_types))
|
1257 |
+
with pytest.raises(NotImplementedError):
|
1258 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1259 |
+
|
1260 |
+
def test_column_types_with_column_names(self):
|
1261 |
+
# When both `column_names` and `column_types` are given, names
|
1262 |
+
# in `column_types` should refer to names in `column_names`
|
1263 |
+
rows = b"a,b\nc,d\ne,f\n"
|
1264 |
+
read_options = ReadOptions(column_names=['x', 'y'])
|
1265 |
+
convert_options = ConvertOptions(column_types={'x': pa.binary()})
|
1266 |
+
table = self.read_bytes(rows, read_options=read_options,
|
1267 |
+
convert_options=convert_options)
|
1268 |
+
schema = pa.schema([('x', pa.binary()),
|
1269 |
+
('y', pa.string())])
|
1270 |
+
assert table.schema == schema
|
1271 |
+
assert table.to_pydict() == {
|
1272 |
+
'x': [b'a', b'c', b'e'],
|
1273 |
+
'y': ['b', 'd', 'f'],
|
1274 |
+
}
|
1275 |
+
|
1276 |
+
def test_no_ending_newline(self):
|
1277 |
+
# No \n after last line
|
1278 |
+
rows = b"a,b,c\n1,2,3\n4,5,6"
|
1279 |
+
table = self.read_bytes(rows)
|
1280 |
+
assert table.to_pydict() == {
|
1281 |
+
'a': [1, 4],
|
1282 |
+
'b': [2, 5],
|
1283 |
+
'c': [3, 6],
|
1284 |
+
}
|
1285 |
+
|
1286 |
+
def test_trivial(self):
|
1287 |
+
# A bit pointless, but at least it shouldn't crash
|
1288 |
+
rows = b",\n\n"
|
1289 |
+
table = self.read_bytes(rows)
|
1290 |
+
assert table.to_pydict() == {'': []}
|
1291 |
+
|
1292 |
+
def test_empty_lines(self):
|
1293 |
+
rows = b"a,b\n\r1,2\r\n\r\n3,4\r\n"
|
1294 |
+
table = self.read_bytes(rows)
|
1295 |
+
assert table.to_pydict() == {
|
1296 |
+
'a': [1, 3],
|
1297 |
+
'b': [2, 4],
|
1298 |
+
}
|
1299 |
+
parse_options = ParseOptions(ignore_empty_lines=False)
|
1300 |
+
table = self.read_bytes(rows, parse_options=parse_options)
|
1301 |
+
assert table.to_pydict() == {
|
1302 |
+
'a': [None, 1, None, 3],
|
1303 |
+
'b': [None, 2, None, 4],
|
1304 |
+
}
|
1305 |
+
read_options = ReadOptions(skip_rows=2)
|
1306 |
+
table = self.read_bytes(rows, parse_options=parse_options,
|
1307 |
+
read_options=read_options)
|
1308 |
+
assert table.to_pydict() == {
|
1309 |
+
'1': [None, 3],
|
1310 |
+
'2': [None, 4],
|
1311 |
+
}
|
1312 |
+
|
1313 |
+
def test_invalid_csv(self):
|
1314 |
+
# Various CSV errors
|
1315 |
+
rows = b"a,b,c\n1,2\n4,5,6\n"
|
1316 |
+
with pytest.raises(pa.ArrowInvalid, match="Expected 3 columns, got 2"):
|
1317 |
+
self.read_bytes(rows)
|
1318 |
+
rows = b"a,b,c\n1,2,3\n4"
|
1319 |
+
with pytest.raises(pa.ArrowInvalid, match="Expected 3 columns, got 1"):
|
1320 |
+
self.read_bytes(rows)
|
1321 |
+
for rows in [b"", b"\n", b"\r\n", b"\r", b"\n\n"]:
|
1322 |
+
with pytest.raises(pa.ArrowInvalid, match="Empty CSV file"):
|
1323 |
+
self.read_bytes(rows)
|
1324 |
+
|
1325 |
+
def test_options_delimiter(self):
|
1326 |
+
rows = b"a;b,c\nde,fg;eh\n"
|
1327 |
+
table = self.read_bytes(rows)
|
1328 |
+
assert table.to_pydict() == {
|
1329 |
+
'a;b': ['de'],
|
1330 |
+
'c': ['fg;eh'],
|
1331 |
+
}
|
1332 |
+
opts = ParseOptions(delimiter=';')
|
1333 |
+
table = self.read_bytes(rows, parse_options=opts)
|
1334 |
+
assert table.to_pydict() == {
|
1335 |
+
'a': ['de,fg'],
|
1336 |
+
'b,c': ['eh'],
|
1337 |
+
}
|
1338 |
+
|
1339 |
+
def test_small_random_csv(self):
|
1340 |
+
csv, expected = make_random_csv(num_cols=2, num_rows=10)
|
1341 |
+
table = self.read_bytes(csv)
|
1342 |
+
assert table.schema == expected.schema
|
1343 |
+
assert table.equals(expected)
|
1344 |
+
assert table.to_pydict() == expected.to_pydict()
|
1345 |
+
|
1346 |
+
def test_stress_block_sizes(self):
|
1347 |
+
# Test a number of small block sizes to stress block stitching
|
1348 |
+
csv_base, expected = make_random_csv(num_cols=2, num_rows=500)
|
1349 |
+
block_sizes = [11, 12, 13, 17, 37, 111]
|
1350 |
+
csvs = [csv_base, csv_base.rstrip(b'\r\n')]
|
1351 |
+
for csv in csvs:
|
1352 |
+
for block_size in block_sizes:
|
1353 |
+
read_options = ReadOptions(block_size=block_size)
|
1354 |
+
table = self.read_bytes(csv, read_options=read_options)
|
1355 |
+
assert table.schema == expected.schema
|
1356 |
+
if not table.equals(expected):
|
1357 |
+
# Better error output
|
1358 |
+
assert table.to_pydict() == expected.to_pydict()
|
1359 |
+
|
1360 |
+
def test_stress_convert_options_blowup(self):
|
1361 |
+
# ARROW-6481: A convert_options with a very large number of columns
|
1362 |
+
# should not blow memory and CPU time.
|
1363 |
+
try:
|
1364 |
+
clock = time.thread_time
|
1365 |
+
except AttributeError:
|
1366 |
+
clock = time.time
|
1367 |
+
num_columns = 10000
|
1368 |
+
col_names = ["K{}".format(i) for i in range(num_columns)]
|
1369 |
+
csv = make_empty_csv(col_names)
|
1370 |
+
t1 = clock()
|
1371 |
+
convert_options = ConvertOptions(
|
1372 |
+
column_types={k: pa.string() for k in col_names[::2]})
|
1373 |
+
table = self.read_bytes(csv, convert_options=convert_options)
|
1374 |
+
dt = clock() - t1
|
1375 |
+
# Check that processing time didn't blow up.
|
1376 |
+
# This is a conservative check (it takes less than 300 ms
|
1377 |
+
# in debug mode on my local machine).
|
1378 |
+
assert dt <= 10.0
|
1379 |
+
# Check result
|
1380 |
+
assert table.num_columns == num_columns
|
1381 |
+
assert table.num_rows == 0
|
1382 |
+
assert table.column_names == col_names
|
1383 |
+
|
1384 |
+
def test_cancellation(self):
|
1385 |
+
if (threading.current_thread().ident !=
|
1386 |
+
threading.main_thread().ident):
|
1387 |
+
pytest.skip("test only works from main Python thread")
|
1388 |
+
# Skips test if not available
|
1389 |
+
raise_signal = util.get_raise_signal()
|
1390 |
+
signum = signal.SIGINT
|
1391 |
+
|
1392 |
+
def signal_from_thread():
|
1393 |
+
# Give our workload a chance to start up
|
1394 |
+
time.sleep(0.2)
|
1395 |
+
raise_signal(signum)
|
1396 |
+
|
1397 |
+
# We start with a small CSV reading workload and increase its size
|
1398 |
+
# until it's large enough to get an interruption during it, even in
|
1399 |
+
# release mode on fast machines.
|
1400 |
+
last_duration = 0.0
|
1401 |
+
workload_size = 100_000
|
1402 |
+
attempts = 0
|
1403 |
+
|
1404 |
+
while last_duration < 5.0 and attempts < 10:
|
1405 |
+
print("workload size:", workload_size)
|
1406 |
+
large_csv = b"a,b,c\n" + b"1,2,3\n" * workload_size
|
1407 |
+
exc_info = None
|
1408 |
+
|
1409 |
+
try:
|
1410 |
+
# We use a signal fd to reliably ensure that the signal
|
1411 |
+
# has been delivered to Python, regardless of how exactly
|
1412 |
+
# it was caught.
|
1413 |
+
with util.signal_wakeup_fd() as sigfd:
|
1414 |
+
try:
|
1415 |
+
t = threading.Thread(target=signal_from_thread)
|
1416 |
+
t.start()
|
1417 |
+
t1 = time.time()
|
1418 |
+
try:
|
1419 |
+
self.read_bytes(large_csv)
|
1420 |
+
except KeyboardInterrupt as e:
|
1421 |
+
exc_info = e
|
1422 |
+
last_duration = time.time() - t1
|
1423 |
+
finally:
|
1424 |
+
# Wait for signal to arrive if it didn't already,
|
1425 |
+
# to avoid getting a KeyboardInterrupt after the
|
1426 |
+
# `except` block below.
|
1427 |
+
select.select([sigfd], [], [sigfd], 10.0)
|
1428 |
+
|
1429 |
+
except KeyboardInterrupt:
|
1430 |
+
# KeyboardInterrupt didn't interrupt `read_bytes` above.
|
1431 |
+
pass
|
1432 |
+
|
1433 |
+
if exc_info is not None:
|
1434 |
+
# We managed to get `self.read_bytes` interrupted, see if it
|
1435 |
+
# was actually interrupted inside Arrow C++ or in the Python
|
1436 |
+
# scaffolding.
|
1437 |
+
if exc_info.__context__ is not None:
|
1438 |
+
# Interrupted inside Arrow C++, we're satisfied now
|
1439 |
+
break
|
1440 |
+
|
1441 |
+
# Increase workload size to get a better chance
|
1442 |
+
workload_size = workload_size * 3
|
1443 |
+
|
1444 |
+
if exc_info is None:
|
1445 |
+
pytest.fail("Failed to get an interruption during CSV reading")
|
1446 |
+
|
1447 |
+
# Interruption should have arrived timely
|
1448 |
+
assert last_duration <= 1.0
|
1449 |
+
e = exc_info.__context__
|
1450 |
+
assert isinstance(e, pa.ArrowCancelled)
|
1451 |
+
assert e.signum == signum
|
1452 |
+
|
1453 |
+
def test_cancellation_disabled(self):
|
1454 |
+
# ARROW-12622: reader would segfault when the cancelling signal
|
1455 |
+
# handler was not enabled (e.g. if disabled, or if not on the
|
1456 |
+
# main thread)
|
1457 |
+
t = threading.Thread(
|
1458 |
+
target=lambda: self.read_bytes(b"f64\n0.1"))
|
1459 |
+
t.start()
|
1460 |
+
t.join()
|
1461 |
+
|
1462 |
+
|
1463 |
+
class TestSerialCSVTableRead(BaseCSVTableRead):
|
1464 |
+
@property
|
1465 |
+
def use_threads(self):
|
1466 |
+
return False
|
1467 |
+
|
1468 |
+
|
1469 |
+
class TestThreadedCSVTableRead(BaseCSVTableRead):
|
1470 |
+
@property
|
1471 |
+
def use_threads(self):
|
1472 |
+
return True
|
1473 |
+
|
1474 |
+
|
1475 |
+
class BaseStreamingCSVRead(BaseTestCSV):
|
1476 |
+
|
1477 |
+
def open_csv(self, csv, *args, **kwargs):
|
1478 |
+
"""
|
1479 |
+
Reads the CSV file into memory using pyarrow's open_csv
|
1480 |
+
csv The CSV bytes
|
1481 |
+
args Positional arguments to be forwarded to pyarrow's open_csv
|
1482 |
+
kwargs Keyword arguments to be forwarded to pyarrow's open_csv
|
1483 |
+
"""
|
1484 |
+
read_options = kwargs.setdefault('read_options', ReadOptions())
|
1485 |
+
read_options.use_threads = self.use_threads
|
1486 |
+
return open_csv(csv, *args, **kwargs)
|
1487 |
+
|
1488 |
+
def open_bytes(self, b, **kwargs):
|
1489 |
+
return self.open_csv(pa.py_buffer(b), **kwargs)
|
1490 |
+
|
1491 |
+
def check_reader(self, reader, expected_schema, expected_data):
|
1492 |
+
assert reader.schema == expected_schema
|
1493 |
+
batches = list(reader)
|
1494 |
+
assert len(batches) == len(expected_data)
|
1495 |
+
for batch, expected_batch in zip(batches, expected_data):
|
1496 |
+
batch.validate(full=True)
|
1497 |
+
assert batch.schema == expected_schema
|
1498 |
+
assert batch.to_pydict() == expected_batch
|
1499 |
+
|
1500 |
+
def read_bytes(self, b, **kwargs):
|
1501 |
+
return self.open_bytes(b, **kwargs).read_all()
|
1502 |
+
|
1503 |
+
def test_file_object(self):
|
1504 |
+
data = b"a,b\n1,2\n3,4\n"
|
1505 |
+
expected_data = {'a': [1, 3], 'b': [2, 4]}
|
1506 |
+
bio = io.BytesIO(data)
|
1507 |
+
reader = self.open_csv(bio)
|
1508 |
+
expected_schema = pa.schema([('a', pa.int64()),
|
1509 |
+
('b', pa.int64())])
|
1510 |
+
self.check_reader(reader, expected_schema, [expected_data])
|
1511 |
+
|
1512 |
+
def test_header(self):
|
1513 |
+
rows = b"abc,def,gh\n"
|
1514 |
+
reader = self.open_bytes(rows)
|
1515 |
+
expected_schema = pa.schema([('abc', pa.null()),
|
1516 |
+
('def', pa.null()),
|
1517 |
+
('gh', pa.null())])
|
1518 |
+
self.check_reader(reader, expected_schema, [])
|
1519 |
+
|
1520 |
+
def test_inference(self):
|
1521 |
+
# Inference is done on first block
|
1522 |
+
rows = b"a,b\n123,456\nabc,de\xff\ngh,ij\n"
|
1523 |
+
expected_schema = pa.schema([('a', pa.string()),
|
1524 |
+
('b', pa.binary())])
|
1525 |
+
|
1526 |
+
read_options = ReadOptions()
|
1527 |
+
read_options.block_size = len(rows)
|
1528 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1529 |
+
self.check_reader(reader, expected_schema,
|
1530 |
+
[{'a': ['123', 'abc', 'gh'],
|
1531 |
+
'b': [b'456', b'de\xff', b'ij']}])
|
1532 |
+
|
1533 |
+
read_options.block_size = len(rows) - 1
|
1534 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1535 |
+
self.check_reader(reader, expected_schema,
|
1536 |
+
[{'a': ['123', 'abc'],
|
1537 |
+
'b': [b'456', b'de\xff']},
|
1538 |
+
{'a': ['gh'],
|
1539 |
+
'b': [b'ij']}])
|
1540 |
+
|
1541 |
+
def test_inference_failure(self):
|
1542 |
+
# Inference on first block, then conversion failure on second block
|
1543 |
+
rows = b"a,b\n123,456\nabc,de\xff\ngh,ij\n"
|
1544 |
+
read_options = ReadOptions()
|
1545 |
+
read_options.block_size = len(rows) - 7
|
1546 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1547 |
+
expected_schema = pa.schema([('a', pa.int64()),
|
1548 |
+
('b', pa.int64())])
|
1549 |
+
assert reader.schema == expected_schema
|
1550 |
+
assert reader.read_next_batch().to_pydict() == {
|
1551 |
+
'a': [123], 'b': [456]
|
1552 |
+
}
|
1553 |
+
# Second block
|
1554 |
+
with pytest.raises(ValueError,
|
1555 |
+
match="CSV conversion error to int64"):
|
1556 |
+
reader.read_next_batch()
|
1557 |
+
# EOF
|
1558 |
+
with pytest.raises(StopIteration):
|
1559 |
+
reader.read_next_batch()
|
1560 |
+
|
1561 |
+
def test_invalid_csv(self):
|
1562 |
+
# CSV errors on first block
|
1563 |
+
rows = b"a,b\n1,2,3\n4,5\n6,7\n"
|
1564 |
+
read_options = ReadOptions()
|
1565 |
+
read_options.block_size = 10
|
1566 |
+
with pytest.raises(pa.ArrowInvalid,
|
1567 |
+
match="Expected 2 columns, got 3"):
|
1568 |
+
reader = self.open_bytes(
|
1569 |
+
rows, read_options=read_options)
|
1570 |
+
|
1571 |
+
# CSV errors on second block
|
1572 |
+
rows = b"a,b\n1,2\n3,4,5\n6,7\n"
|
1573 |
+
read_options.block_size = 8
|
1574 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1575 |
+
assert reader.read_next_batch().to_pydict() == {'a': [1], 'b': [2]}
|
1576 |
+
with pytest.raises(pa.ArrowInvalid,
|
1577 |
+
match="Expected 2 columns, got 3"):
|
1578 |
+
reader.read_next_batch()
|
1579 |
+
# Cannot continue after a parse error
|
1580 |
+
with pytest.raises(StopIteration):
|
1581 |
+
reader.read_next_batch()
|
1582 |
+
|
1583 |
+
def test_options_delimiter(self):
|
1584 |
+
rows = b"a;b,c\nde,fg;eh\n"
|
1585 |
+
reader = self.open_bytes(rows)
|
1586 |
+
expected_schema = pa.schema([('a;b', pa.string()),
|
1587 |
+
('c', pa.string())])
|
1588 |
+
self.check_reader(reader, expected_schema,
|
1589 |
+
[{'a;b': ['de'],
|
1590 |
+
'c': ['fg;eh']}])
|
1591 |
+
|
1592 |
+
opts = ParseOptions(delimiter=';')
|
1593 |
+
reader = self.open_bytes(rows, parse_options=opts)
|
1594 |
+
expected_schema = pa.schema([('a', pa.string()),
|
1595 |
+
('b,c', pa.string())])
|
1596 |
+
self.check_reader(reader, expected_schema,
|
1597 |
+
[{'a': ['de,fg'],
|
1598 |
+
'b,c': ['eh']}])
|
1599 |
+
|
1600 |
+
def test_no_ending_newline(self):
|
1601 |
+
# No \n after last line
|
1602 |
+
rows = b"a,b,c\n1,2,3\n4,5,6"
|
1603 |
+
reader = self.open_bytes(rows)
|
1604 |
+
expected_schema = pa.schema([('a', pa.int64()),
|
1605 |
+
('b', pa.int64()),
|
1606 |
+
('c', pa.int64())])
|
1607 |
+
self.check_reader(reader, expected_schema,
|
1608 |
+
[{'a': [1, 4],
|
1609 |
+
'b': [2, 5],
|
1610 |
+
'c': [3, 6]}])
|
1611 |
+
|
1612 |
+
def test_empty_file(self):
|
1613 |
+
with pytest.raises(ValueError, match="Empty CSV file"):
|
1614 |
+
self.open_bytes(b"")
|
1615 |
+
|
1616 |
+
def test_column_options(self):
|
1617 |
+
# With column_names
|
1618 |
+
rows = b"1,2,3\n4,5,6"
|
1619 |
+
read_options = ReadOptions()
|
1620 |
+
read_options.column_names = ['d', 'e', 'f']
|
1621 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1622 |
+
expected_schema = pa.schema([('d', pa.int64()),
|
1623 |
+
('e', pa.int64()),
|
1624 |
+
('f', pa.int64())])
|
1625 |
+
self.check_reader(reader, expected_schema,
|
1626 |
+
[{'d': [1, 4],
|
1627 |
+
'e': [2, 5],
|
1628 |
+
'f': [3, 6]}])
|
1629 |
+
|
1630 |
+
# With include_columns
|
1631 |
+
convert_options = ConvertOptions()
|
1632 |
+
convert_options.include_columns = ['f', 'e']
|
1633 |
+
reader = self.open_bytes(rows, read_options=read_options,
|
1634 |
+
convert_options=convert_options)
|
1635 |
+
expected_schema = pa.schema([('f', pa.int64()),
|
1636 |
+
('e', pa.int64())])
|
1637 |
+
self.check_reader(reader, expected_schema,
|
1638 |
+
[{'e': [2, 5],
|
1639 |
+
'f': [3, 6]}])
|
1640 |
+
|
1641 |
+
# With column_types
|
1642 |
+
convert_options.column_types = {'e': pa.string()}
|
1643 |
+
reader = self.open_bytes(rows, read_options=read_options,
|
1644 |
+
convert_options=convert_options)
|
1645 |
+
expected_schema = pa.schema([('f', pa.int64()),
|
1646 |
+
('e', pa.string())])
|
1647 |
+
self.check_reader(reader, expected_schema,
|
1648 |
+
[{'e': ["2", "5"],
|
1649 |
+
'f': [3, 6]}])
|
1650 |
+
|
1651 |
+
# Missing columns in include_columns
|
1652 |
+
convert_options.include_columns = ['g', 'f', 'e']
|
1653 |
+
with pytest.raises(
|
1654 |
+
KeyError,
|
1655 |
+
match="Column 'g' in include_columns does not exist"):
|
1656 |
+
reader = self.open_bytes(rows, read_options=read_options,
|
1657 |
+
convert_options=convert_options)
|
1658 |
+
|
1659 |
+
convert_options.include_missing_columns = True
|
1660 |
+
reader = self.open_bytes(rows, read_options=read_options,
|
1661 |
+
convert_options=convert_options)
|
1662 |
+
expected_schema = pa.schema([('g', pa.null()),
|
1663 |
+
('f', pa.int64()),
|
1664 |
+
('e', pa.string())])
|
1665 |
+
self.check_reader(reader, expected_schema,
|
1666 |
+
[{'g': [None, None],
|
1667 |
+
'e': ["2", "5"],
|
1668 |
+
'f': [3, 6]}])
|
1669 |
+
|
1670 |
+
convert_options.column_types = {'e': pa.string(), 'g': pa.float64()}
|
1671 |
+
reader = self.open_bytes(rows, read_options=read_options,
|
1672 |
+
convert_options=convert_options)
|
1673 |
+
expected_schema = pa.schema([('g', pa.float64()),
|
1674 |
+
('f', pa.int64()),
|
1675 |
+
('e', pa.string())])
|
1676 |
+
self.check_reader(reader, expected_schema,
|
1677 |
+
[{'g': [None, None],
|
1678 |
+
'e': ["2", "5"],
|
1679 |
+
'f': [3, 6]}])
|
1680 |
+
|
1681 |
+
def test_encoding(self):
|
1682 |
+
# latin-1 (invalid utf-8)
|
1683 |
+
rows = b"a,b\nun,\xe9l\xe9phant"
|
1684 |
+
read_options = ReadOptions()
|
1685 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1686 |
+
expected_schema = pa.schema([('a', pa.string()),
|
1687 |
+
('b', pa.binary())])
|
1688 |
+
self.check_reader(reader, expected_schema,
|
1689 |
+
[{'a': ["un"],
|
1690 |
+
'b': [b"\xe9l\xe9phant"]}])
|
1691 |
+
|
1692 |
+
read_options.encoding = 'latin1'
|
1693 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1694 |
+
expected_schema = pa.schema([('a', pa.string()),
|
1695 |
+
('b', pa.string())])
|
1696 |
+
self.check_reader(reader, expected_schema,
|
1697 |
+
[{'a': ["un"],
|
1698 |
+
'b': ["éléphant"]}])
|
1699 |
+
|
1700 |
+
# utf-16
|
1701 |
+
rows = (b'\xff\xfea\x00,\x00b\x00\n\x00u\x00n\x00,'
|
1702 |
+
b'\x00\xe9\x00l\x00\xe9\x00p\x00h\x00a\x00n\x00t\x00')
|
1703 |
+
read_options.encoding = 'utf16'
|
1704 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1705 |
+
expected_schema = pa.schema([('a', pa.string()),
|
1706 |
+
('b', pa.string())])
|
1707 |
+
self.check_reader(reader, expected_schema,
|
1708 |
+
[{'a': ["un"],
|
1709 |
+
'b': ["éléphant"]}])
|
1710 |
+
|
1711 |
+
def test_small_random_csv(self):
|
1712 |
+
csv, expected = make_random_csv(num_cols=2, num_rows=10)
|
1713 |
+
reader = self.open_bytes(csv)
|
1714 |
+
table = reader.read_all()
|
1715 |
+
assert table.schema == expected.schema
|
1716 |
+
assert table.equals(expected)
|
1717 |
+
assert table.to_pydict() == expected.to_pydict()
|
1718 |
+
|
1719 |
+
def test_stress_block_sizes(self):
|
1720 |
+
# Test a number of small block sizes to stress block stitching
|
1721 |
+
csv_base, expected = make_random_csv(num_cols=2, num_rows=500)
|
1722 |
+
block_sizes = [19, 21, 23, 26, 37, 111]
|
1723 |
+
csvs = [csv_base, csv_base.rstrip(b'\r\n')]
|
1724 |
+
for csv in csvs:
|
1725 |
+
for block_size in block_sizes:
|
1726 |
+
# Need at least two lines for type inference
|
1727 |
+
assert csv[:block_size].count(b'\n') >= 2
|
1728 |
+
read_options = ReadOptions(block_size=block_size)
|
1729 |
+
reader = self.open_bytes(
|
1730 |
+
csv, read_options=read_options)
|
1731 |
+
table = reader.read_all()
|
1732 |
+
assert table.schema == expected.schema
|
1733 |
+
if not table.equals(expected):
|
1734 |
+
# Better error output
|
1735 |
+
assert table.to_pydict() == expected.to_pydict()
|
1736 |
+
|
1737 |
+
def test_batch_lifetime(self):
|
1738 |
+
gc.collect()
|
1739 |
+
old_allocated = pa.total_allocated_bytes()
|
1740 |
+
|
1741 |
+
# Memory occupation should not grow with CSV file size
|
1742 |
+
def check_one_batch(reader, expected):
|
1743 |
+
batch = reader.read_next_batch()
|
1744 |
+
assert batch.to_pydict() == expected
|
1745 |
+
|
1746 |
+
rows = b"10,11\n12,13\n14,15\n16,17\n"
|
1747 |
+
read_options = ReadOptions()
|
1748 |
+
read_options.column_names = ['a', 'b']
|
1749 |
+
read_options.block_size = 6
|
1750 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1751 |
+
check_one_batch(reader, {'a': [10], 'b': [11]})
|
1752 |
+
allocated_after_first_batch = pa.total_allocated_bytes()
|
1753 |
+
check_one_batch(reader, {'a': [12], 'b': [13]})
|
1754 |
+
assert pa.total_allocated_bytes() <= allocated_after_first_batch
|
1755 |
+
check_one_batch(reader, {'a': [14], 'b': [15]})
|
1756 |
+
assert pa.total_allocated_bytes() <= allocated_after_first_batch
|
1757 |
+
check_one_batch(reader, {'a': [16], 'b': [17]})
|
1758 |
+
assert pa.total_allocated_bytes() <= allocated_after_first_batch
|
1759 |
+
with pytest.raises(StopIteration):
|
1760 |
+
reader.read_next_batch()
|
1761 |
+
assert pa.total_allocated_bytes() == old_allocated
|
1762 |
+
reader = None
|
1763 |
+
assert pa.total_allocated_bytes() == old_allocated
|
1764 |
+
|
1765 |
+
def test_header_skip_rows(self):
|
1766 |
+
super().test_header_skip_rows()
|
1767 |
+
|
1768 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
1769 |
+
|
1770 |
+
# Skipping all rows immediately results in end of iteration
|
1771 |
+
opts = ReadOptions()
|
1772 |
+
opts.skip_rows = 4
|
1773 |
+
opts.column_names = ['ab', 'cd']
|
1774 |
+
reader = self.open_bytes(rows, read_options=opts)
|
1775 |
+
with pytest.raises(StopIteration):
|
1776 |
+
assert reader.read_next_batch()
|
1777 |
+
|
1778 |
+
def test_skip_rows_after_names(self):
|
1779 |
+
super().test_skip_rows_after_names()
|
1780 |
+
|
1781 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
1782 |
+
|
1783 |
+
# Skipping all rows immediately results in end of iteration
|
1784 |
+
opts = ReadOptions()
|
1785 |
+
opts.skip_rows_after_names = 3
|
1786 |
+
reader = self.open_bytes(rows, read_options=opts)
|
1787 |
+
with pytest.raises(StopIteration):
|
1788 |
+
assert reader.read_next_batch()
|
1789 |
+
|
1790 |
+
# Skipping beyond all rows immediately results in end of iteration
|
1791 |
+
opts.skip_rows_after_names = 99999
|
1792 |
+
reader = self.open_bytes(rows, read_options=opts)
|
1793 |
+
with pytest.raises(StopIteration):
|
1794 |
+
assert reader.read_next_batch()
|
1795 |
+
|
1796 |
+
|
1797 |
+
class TestSerialStreamingCSVRead(BaseStreamingCSVRead):
|
1798 |
+
@property
|
1799 |
+
def use_threads(self):
|
1800 |
+
return False
|
1801 |
+
|
1802 |
+
|
1803 |
+
class TestThreadedStreamingCSVRead(BaseStreamingCSVRead):
|
1804 |
+
@property
|
1805 |
+
def use_threads(self):
|
1806 |
+
return True
|
1807 |
+
|
1808 |
+
|
1809 |
+
class BaseTestCompressedCSVRead:
|
1810 |
+
|
1811 |
+
def setUp(self):
|
1812 |
+
self.tmpdir = tempfile.mkdtemp(prefix='arrow-csv-test-')
|
1813 |
+
|
1814 |
+
def tearDown(self):
|
1815 |
+
shutil.rmtree(self.tmpdir)
|
1816 |
+
|
1817 |
+
def read_csv(self, csv_path):
|
1818 |
+
try:
|
1819 |
+
return read_csv(csv_path)
|
1820 |
+
except pa.ArrowNotImplementedError as e:
|
1821 |
+
pytest.skip(str(e))
|
1822 |
+
|
1823 |
+
def test_random_csv(self):
|
1824 |
+
csv, expected = make_random_csv(num_cols=2, num_rows=100)
|
1825 |
+
csv_path = os.path.join(self.tmpdir, self.csv_filename)
|
1826 |
+
self.write_file(csv_path, csv)
|
1827 |
+
table = self.read_csv(csv_path)
|
1828 |
+
table.validate(full=True)
|
1829 |
+
assert table.schema == expected.schema
|
1830 |
+
assert table.equals(expected)
|
1831 |
+
assert table.to_pydict() == expected.to_pydict()
|
1832 |
+
|
1833 |
+
|
1834 |
+
class TestGZipCSVRead(BaseTestCompressedCSVRead, unittest.TestCase):
|
1835 |
+
csv_filename = "compressed.csv.gz"
|
1836 |
+
|
1837 |
+
def write_file(self, path, contents):
|
1838 |
+
with gzip.open(path, 'wb', 3) as f:
|
1839 |
+
f.write(contents)
|
1840 |
+
|
1841 |
+
def test_concatenated(self):
|
1842 |
+
# ARROW-5974
|
1843 |
+
csv_path = os.path.join(self.tmpdir, self.csv_filename)
|
1844 |
+
with gzip.open(csv_path, 'wb', 3) as f:
|
1845 |
+
f.write(b"ab,cd\nef,gh\n")
|
1846 |
+
with gzip.open(csv_path, 'ab', 3) as f:
|
1847 |
+
f.write(b"ij,kl\nmn,op\n")
|
1848 |
+
table = self.read_csv(csv_path)
|
1849 |
+
assert table.to_pydict() == {
|
1850 |
+
'ab': ['ef', 'ij', 'mn'],
|
1851 |
+
'cd': ['gh', 'kl', 'op'],
|
1852 |
+
}
|
1853 |
+
|
1854 |
+
|
1855 |
+
class TestBZ2CSVRead(BaseTestCompressedCSVRead, unittest.TestCase):
|
1856 |
+
csv_filename = "compressed.csv.bz2"
|
1857 |
+
|
1858 |
+
def write_file(self, path, contents):
|
1859 |
+
with bz2.BZ2File(path, 'w') as f:
|
1860 |
+
f.write(contents)
|
1861 |
+
|
1862 |
+
|
1863 |
+
def test_read_csv_does_not_close_passed_file_handles():
|
1864 |
+
# ARROW-4823
|
1865 |
+
buf = io.BytesIO(b"a,b,c\n1,2,3\n4,5,6")
|
1866 |
+
read_csv(buf)
|
1867 |
+
assert not buf.closed
|
1868 |
+
|
1869 |
+
|
1870 |
+
def test_write_read_round_trip():
|
1871 |
+
t = pa.Table.from_arrays([[1, 2, 3], ["a", "b", "c"]], ["c1", "c2"])
|
1872 |
+
record_batch = t.to_batches(max_chunksize=4)[0]
|
1873 |
+
for data in [t, record_batch]:
|
1874 |
+
# Test with header
|
1875 |
+
buf = io.BytesIO()
|
1876 |
+
write_csv(data, buf, WriteOptions(include_header=True))
|
1877 |
+
buf.seek(0)
|
1878 |
+
assert t == read_csv(buf)
|
1879 |
+
|
1880 |
+
# Test without header
|
1881 |
+
buf = io.BytesIO()
|
1882 |
+
write_csv(data, buf, WriteOptions(include_header=False))
|
1883 |
+
buf.seek(0)
|
1884 |
+
|
1885 |
+
read_options = ReadOptions(column_names=t.column_names)
|
1886 |
+
assert t == read_csv(buf, read_options=read_options)
|
1887 |
+
|
1888 |
+
# Test with writer
|
1889 |
+
for read_options, parse_options, write_options in [
|
1890 |
+
(None, None, WriteOptions(include_header=True)),
|
1891 |
+
(ReadOptions(column_names=t.column_names), None,
|
1892 |
+
WriteOptions(include_header=False)),
|
1893 |
+
(None, ParseOptions(delimiter='|'),
|
1894 |
+
WriteOptions(include_header=True, delimiter='|')),
|
1895 |
+
(ReadOptions(column_names=t.column_names),
|
1896 |
+
ParseOptions(delimiter='\t'),
|
1897 |
+
WriteOptions(include_header=False, delimiter='\t')),
|
1898 |
+
]:
|
1899 |
+
buf = io.BytesIO()
|
1900 |
+
with CSVWriter(buf, t.schema, write_options=write_options) as writer:
|
1901 |
+
writer.write_table(t)
|
1902 |
+
buf.seek(0)
|
1903 |
+
assert t == read_csv(buf, read_options=read_options,
|
1904 |
+
parse_options=parse_options)
|
1905 |
+
buf = io.BytesIO()
|
1906 |
+
with CSVWriter(buf, t.schema, write_options=write_options) as writer:
|
1907 |
+
for batch in t.to_batches(max_chunksize=1):
|
1908 |
+
writer.write_batch(batch)
|
1909 |
+
buf.seek(0)
|
1910 |
+
assert t == read_csv(buf, read_options=read_options,
|
1911 |
+
parse_options=parse_options)
|
1912 |
+
|
1913 |
+
|
1914 |
+
def test_write_quoting_style():
|
1915 |
+
t = pa.Table.from_arrays([[1, 2, None], ["a", None, "c"]], ["c1", "c2"])
|
1916 |
+
buf = io.BytesIO()
|
1917 |
+
for write_options, res in [
|
1918 |
+
(WriteOptions(quoting_style='none'), b'"c1","c2"\n1,a\n2,\n,c\n'),
|
1919 |
+
(WriteOptions(), b'"c1","c2"\n1,"a"\n2,\n,"c"\n'),
|
1920 |
+
(WriteOptions(quoting_style='all_valid'),
|
1921 |
+
b'"c1","c2"\n"1","a"\n"2",\n,"c"\n'),
|
1922 |
+
]:
|
1923 |
+
with CSVWriter(buf, t.schema, write_options=write_options) as writer:
|
1924 |
+
writer.write_table(t)
|
1925 |
+
assert buf.getvalue() == res
|
1926 |
+
buf.seek(0)
|
1927 |
+
|
1928 |
+
# Test writing special characters with different quoting styles
|
1929 |
+
t = pa.Table.from_arrays([[",", "\""]], ["c1"])
|
1930 |
+
buf = io.BytesIO()
|
1931 |
+
for write_options, res in [
|
1932 |
+
(WriteOptions(quoting_style='needed'), b'"c1"\n","\n""""\n'),
|
1933 |
+
(WriteOptions(quoting_style='none'), pa.lib.ArrowInvalid),
|
1934 |
+
]:
|
1935 |
+
with CSVWriter(buf, t.schema, write_options=write_options) as writer:
|
1936 |
+
try:
|
1937 |
+
writer.write_table(t)
|
1938 |
+
except Exception as e:
|
1939 |
+
# This will trigger when we try to write a comma (,)
|
1940 |
+
# without quotes, which is invalid
|
1941 |
+
assert isinstance(e, res)
|
1942 |
+
break
|
1943 |
+
assert buf.getvalue() == res
|
1944 |
+
buf.seek(0)
|
1945 |
+
|
1946 |
+
|
1947 |
+
def test_read_csv_reference_cycle():
|
1948 |
+
# ARROW-13187
|
1949 |
+
def inner():
|
1950 |
+
buf = io.BytesIO(b"a,b,c\n1,2,3\n4,5,6")
|
1951 |
+
table = read_csv(buf)
|
1952 |
+
return weakref.ref(table)
|
1953 |
+
|
1954 |
+
with util.disabled_gc():
|
1955 |
+
wr = inner()
|
1956 |
+
assert wr() is None
|
1957 |
+
|
1958 |
+
|
1959 |
+
@pytest.mark.parametrize("type_factory", (
|
1960 |
+
lambda: pa.decimal128(20, 1),
|
1961 |
+
lambda: pa.decimal128(38, 15),
|
1962 |
+
lambda: pa.decimal256(20, 1),
|
1963 |
+
lambda: pa.decimal256(76, 10),
|
1964 |
+
))
|
1965 |
+
def test_write_csv_decimal(tmpdir, type_factory):
|
1966 |
+
type = type_factory()
|
1967 |
+
table = pa.table({"col": pa.array([1, 2]).cast(type)})
|
1968 |
+
|
1969 |
+
write_csv(table, tmpdir / "out.csv")
|
1970 |
+
out = read_csv(tmpdir / "out.csv")
|
1971 |
+
|
1972 |
+
assert out.column('col').cast(type) == table.column('col')
|
1973 |
+
|
1974 |
+
|
1975 |
+
def test_read_csv_gil_deadlock():
|
1976 |
+
# GH-38676
|
1977 |
+
# This test depends on several preconditions:
|
1978 |
+
# - the CSV input is a Python file object
|
1979 |
+
# - reading the CSV file produces an error
|
1980 |
+
data = b"a,b,c"
|
1981 |
+
|
1982 |
+
class MyBytesIO(io.BytesIO):
|
1983 |
+
def read(self, *args):
|
1984 |
+
time.sleep(0.001)
|
1985 |
+
return super().read(*args)
|
1986 |
+
|
1987 |
+
def readinto(self, *args):
|
1988 |
+
time.sleep(0.001)
|
1989 |
+
return super().readinto(*args)
|
1990 |
+
|
1991 |
+
for i in range(20):
|
1992 |
+
with pytest.raises(pa.ArrowInvalid):
|
1993 |
+
read_csv(MyBytesIO(data))
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_cuda.py
ADDED
@@ -0,0 +1,794 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
"""
|
19 |
+
UNTESTED:
|
20 |
+
read_message
|
21 |
+
"""
|
22 |
+
|
23 |
+
import sys
|
24 |
+
import sysconfig
|
25 |
+
|
26 |
+
import pytest
|
27 |
+
|
28 |
+
import pyarrow as pa
|
29 |
+
import numpy as np
|
30 |
+
|
31 |
+
|
32 |
+
cuda = pytest.importorskip("pyarrow.cuda")
|
33 |
+
|
34 |
+
platform = sysconfig.get_platform()
|
35 |
+
# TODO: enable ppc64 when Arrow C++ supports IPC in ppc64 systems:
|
36 |
+
has_ipc_support = platform == 'linux-x86_64' # or 'ppc64' in platform
|
37 |
+
|
38 |
+
cuda_ipc = pytest.mark.skipif(
|
39 |
+
not has_ipc_support,
|
40 |
+
reason='CUDA IPC not supported in platform `%s`' % (platform))
|
41 |
+
|
42 |
+
global_context = None # for flake8
|
43 |
+
global_context1 = None # for flake8
|
44 |
+
|
45 |
+
|
46 |
+
def setup_module(module):
|
47 |
+
module.global_context = cuda.Context(0)
|
48 |
+
module.global_context1 = cuda.Context(cuda.Context.get_num_devices() - 1)
|
49 |
+
|
50 |
+
|
51 |
+
def teardown_module(module):
|
52 |
+
del module.global_context
|
53 |
+
|
54 |
+
|
55 |
+
def test_Context():
|
56 |
+
assert cuda.Context.get_num_devices() > 0
|
57 |
+
assert global_context.device_number == 0
|
58 |
+
assert global_context1.device_number == cuda.Context.get_num_devices() - 1
|
59 |
+
|
60 |
+
with pytest.raises(ValueError,
|
61 |
+
match=("device_number argument must "
|
62 |
+
"be non-negative less than")):
|
63 |
+
cuda.Context(cuda.Context.get_num_devices())
|
64 |
+
|
65 |
+
|
66 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
67 |
+
def test_manage_allocate_free_host(size):
|
68 |
+
buf = cuda.new_host_buffer(size)
|
69 |
+
arr = np.frombuffer(buf, dtype=np.uint8)
|
70 |
+
arr[size//4:3*size//4] = 1
|
71 |
+
arr_cp = arr.copy()
|
72 |
+
arr2 = np.frombuffer(buf, dtype=np.uint8)
|
73 |
+
np.testing.assert_equal(arr2, arr_cp)
|
74 |
+
assert buf.size == size
|
75 |
+
|
76 |
+
|
77 |
+
def test_context_allocate_del():
|
78 |
+
bytes_allocated = global_context.bytes_allocated
|
79 |
+
cudabuf = global_context.new_buffer(128)
|
80 |
+
assert global_context.bytes_allocated == bytes_allocated + 128
|
81 |
+
del cudabuf
|
82 |
+
assert global_context.bytes_allocated == bytes_allocated
|
83 |
+
|
84 |
+
|
85 |
+
def make_random_buffer(size, target='host'):
|
86 |
+
"""Return a host or device buffer with random data.
|
87 |
+
"""
|
88 |
+
if target == 'host':
|
89 |
+
assert size >= 0
|
90 |
+
buf = pa.allocate_buffer(size)
|
91 |
+
assert buf.size == size
|
92 |
+
arr = np.frombuffer(buf, dtype=np.uint8)
|
93 |
+
assert arr.size == size
|
94 |
+
arr[:] = np.random.randint(low=1, high=255, size=size, dtype=np.uint8)
|
95 |
+
assert arr.sum() > 0 or size == 0
|
96 |
+
arr_ = np.frombuffer(buf, dtype=np.uint8)
|
97 |
+
np.testing.assert_equal(arr, arr_)
|
98 |
+
return arr, buf
|
99 |
+
elif target == 'device':
|
100 |
+
arr, buf = make_random_buffer(size, target='host')
|
101 |
+
dbuf = global_context.new_buffer(size)
|
102 |
+
assert dbuf.size == size
|
103 |
+
dbuf.copy_from_host(buf, position=0, nbytes=size)
|
104 |
+
return arr, dbuf
|
105 |
+
raise ValueError('invalid target value')
|
106 |
+
|
107 |
+
|
108 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
109 |
+
def test_context_device_buffer(size):
|
110 |
+
# Creating device buffer from host buffer;
|
111 |
+
arr, buf = make_random_buffer(size)
|
112 |
+
cudabuf = global_context.buffer_from_data(buf)
|
113 |
+
assert cudabuf.size == size
|
114 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
115 |
+
np.testing.assert_equal(arr, arr2)
|
116 |
+
|
117 |
+
# CudaBuffer does not support buffer protocol
|
118 |
+
with pytest.raises(BufferError):
|
119 |
+
memoryview(cudabuf)
|
120 |
+
|
121 |
+
# Creating device buffer from array:
|
122 |
+
cudabuf = global_context.buffer_from_data(arr)
|
123 |
+
assert cudabuf.size == size
|
124 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
125 |
+
np.testing.assert_equal(arr, arr2)
|
126 |
+
|
127 |
+
# Creating device buffer from bytes:
|
128 |
+
cudabuf = global_context.buffer_from_data(arr.tobytes())
|
129 |
+
assert cudabuf.size == size
|
130 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
131 |
+
np.testing.assert_equal(arr, arr2)
|
132 |
+
|
133 |
+
# Creating a device buffer from another device buffer, view:
|
134 |
+
cudabuf2 = cudabuf.slice(0, cudabuf.size)
|
135 |
+
assert cudabuf2.size == size
|
136 |
+
arr2 = np.frombuffer(cudabuf2.copy_to_host(), dtype=np.uint8)
|
137 |
+
np.testing.assert_equal(arr, arr2)
|
138 |
+
|
139 |
+
if size > 1:
|
140 |
+
cudabuf2.copy_from_host(arr[size//2:])
|
141 |
+
arr3 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
142 |
+
np.testing.assert_equal(np.concatenate((arr[size//2:], arr[size//2:])),
|
143 |
+
arr3)
|
144 |
+
cudabuf2.copy_from_host(arr[:size//2]) # restoring arr
|
145 |
+
|
146 |
+
# Creating a device buffer from another device buffer, copy:
|
147 |
+
cudabuf2 = global_context.buffer_from_data(cudabuf)
|
148 |
+
assert cudabuf2.size == size
|
149 |
+
arr2 = np.frombuffer(cudabuf2.copy_to_host(), dtype=np.uint8)
|
150 |
+
np.testing.assert_equal(arr, arr2)
|
151 |
+
|
152 |
+
cudabuf2.copy_from_host(arr[size//2:])
|
153 |
+
arr3 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
154 |
+
np.testing.assert_equal(arr, arr3)
|
155 |
+
|
156 |
+
# Slice of a device buffer
|
157 |
+
cudabuf2 = cudabuf.slice(0, cudabuf.size+10)
|
158 |
+
assert cudabuf2.size == size
|
159 |
+
arr2 = np.frombuffer(cudabuf2.copy_to_host(), dtype=np.uint8)
|
160 |
+
np.testing.assert_equal(arr, arr2)
|
161 |
+
|
162 |
+
cudabuf2 = cudabuf.slice(size//4, size+10)
|
163 |
+
assert cudabuf2.size == size - size//4
|
164 |
+
arr2 = np.frombuffer(cudabuf2.copy_to_host(), dtype=np.uint8)
|
165 |
+
np.testing.assert_equal(arr[size//4:], arr2)
|
166 |
+
|
167 |
+
# Creating a device buffer from a slice of host buffer
|
168 |
+
soffset = size//4
|
169 |
+
ssize = 2*size//4
|
170 |
+
cudabuf = global_context.buffer_from_data(buf, offset=soffset,
|
171 |
+
size=ssize)
|
172 |
+
assert cudabuf.size == ssize
|
173 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
174 |
+
np.testing.assert_equal(arr[soffset:soffset + ssize], arr2)
|
175 |
+
|
176 |
+
cudabuf = global_context.buffer_from_data(buf.slice(offset=soffset,
|
177 |
+
length=ssize))
|
178 |
+
assert cudabuf.size == ssize
|
179 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
180 |
+
np.testing.assert_equal(arr[soffset:soffset + ssize], arr2)
|
181 |
+
|
182 |
+
# Creating a device buffer from a slice of an array
|
183 |
+
cudabuf = global_context.buffer_from_data(arr, offset=soffset, size=ssize)
|
184 |
+
assert cudabuf.size == ssize
|
185 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
186 |
+
np.testing.assert_equal(arr[soffset:soffset + ssize], arr2)
|
187 |
+
|
188 |
+
cudabuf = global_context.buffer_from_data(arr[soffset:soffset+ssize])
|
189 |
+
assert cudabuf.size == ssize
|
190 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
191 |
+
np.testing.assert_equal(arr[soffset:soffset + ssize], arr2)
|
192 |
+
|
193 |
+
# Creating a device buffer from a slice of bytes
|
194 |
+
cudabuf = global_context.buffer_from_data(arr.tobytes(),
|
195 |
+
offset=soffset,
|
196 |
+
size=ssize)
|
197 |
+
assert cudabuf.size == ssize
|
198 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
199 |
+
np.testing.assert_equal(arr[soffset:soffset + ssize], arr2)
|
200 |
+
|
201 |
+
# Creating a device buffer from size
|
202 |
+
cudabuf = global_context.new_buffer(size)
|
203 |
+
assert cudabuf.size == size
|
204 |
+
|
205 |
+
# Creating device buffer from a slice of another device buffer:
|
206 |
+
cudabuf = global_context.buffer_from_data(arr)
|
207 |
+
cudabuf2 = cudabuf.slice(soffset, ssize)
|
208 |
+
assert cudabuf2.size == ssize
|
209 |
+
arr2 = np.frombuffer(cudabuf2.copy_to_host(), dtype=np.uint8)
|
210 |
+
np.testing.assert_equal(arr[soffset:soffset+ssize], arr2)
|
211 |
+
|
212 |
+
# Creating device buffer from HostBuffer
|
213 |
+
|
214 |
+
buf = cuda.new_host_buffer(size)
|
215 |
+
arr_ = np.frombuffer(buf, dtype=np.uint8)
|
216 |
+
arr_[:] = arr
|
217 |
+
cudabuf = global_context.buffer_from_data(buf)
|
218 |
+
assert cudabuf.size == size
|
219 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
220 |
+
np.testing.assert_equal(arr, arr2)
|
221 |
+
|
222 |
+
# Creating device buffer from HostBuffer slice
|
223 |
+
|
224 |
+
cudabuf = global_context.buffer_from_data(buf, offset=soffset, size=ssize)
|
225 |
+
assert cudabuf.size == ssize
|
226 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
227 |
+
np.testing.assert_equal(arr[soffset:soffset+ssize], arr2)
|
228 |
+
|
229 |
+
cudabuf = global_context.buffer_from_data(
|
230 |
+
buf.slice(offset=soffset, length=ssize))
|
231 |
+
assert cudabuf.size == ssize
|
232 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
233 |
+
np.testing.assert_equal(arr[soffset:soffset+ssize], arr2)
|
234 |
+
|
235 |
+
|
236 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
237 |
+
def test_context_from_object(size):
|
238 |
+
ctx = global_context
|
239 |
+
arr, cbuf = make_random_buffer(size, target='device')
|
240 |
+
dtype = arr.dtype
|
241 |
+
|
242 |
+
# Creating device buffer from a CUDA host buffer
|
243 |
+
hbuf = cuda.new_host_buffer(size * arr.dtype.itemsize)
|
244 |
+
np.frombuffer(hbuf, dtype=dtype)[:] = arr
|
245 |
+
cbuf2 = ctx.buffer_from_object(hbuf)
|
246 |
+
assert cbuf2.size == cbuf.size
|
247 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
248 |
+
np.testing.assert_equal(arr, arr2)
|
249 |
+
|
250 |
+
# Creating device buffer from a device buffer
|
251 |
+
cbuf2 = ctx.buffer_from_object(cbuf2)
|
252 |
+
assert cbuf2.size == cbuf.size
|
253 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
254 |
+
np.testing.assert_equal(arr, arr2)
|
255 |
+
|
256 |
+
# Trying to create a device buffer from a Buffer
|
257 |
+
with pytest.raises(pa.ArrowTypeError,
|
258 |
+
match=('buffer is not backed by a CudaBuffer')):
|
259 |
+
ctx.buffer_from_object(pa.py_buffer(b"123"))
|
260 |
+
|
261 |
+
# Trying to create a device buffer from numpy.array
|
262 |
+
with pytest.raises(pa.ArrowTypeError,
|
263 |
+
match=("cannot create device buffer view from "
|
264 |
+
".* \'numpy.ndarray\'")):
|
265 |
+
ctx.buffer_from_object(np.array([1, 2, 3]))
|
266 |
+
|
267 |
+
|
268 |
+
def test_foreign_buffer():
|
269 |
+
ctx = global_context
|
270 |
+
dtype = np.dtype(np.uint8)
|
271 |
+
size = 10
|
272 |
+
hbuf = cuda.new_host_buffer(size * dtype.itemsize)
|
273 |
+
|
274 |
+
# test host buffer memory reference counting
|
275 |
+
rc = sys.getrefcount(hbuf)
|
276 |
+
fbuf = ctx.foreign_buffer(hbuf.address, hbuf.size, hbuf)
|
277 |
+
assert sys.getrefcount(hbuf) == rc + 1
|
278 |
+
del fbuf
|
279 |
+
assert sys.getrefcount(hbuf) == rc
|
280 |
+
|
281 |
+
# test postponed deallocation of host buffer memory
|
282 |
+
fbuf = ctx.foreign_buffer(hbuf.address, hbuf.size, hbuf)
|
283 |
+
del hbuf
|
284 |
+
fbuf.copy_to_host()
|
285 |
+
|
286 |
+
# test deallocating the host buffer memory making it inaccessible
|
287 |
+
hbuf = cuda.new_host_buffer(size * dtype.itemsize)
|
288 |
+
fbuf = ctx.foreign_buffer(hbuf.address, hbuf.size)
|
289 |
+
del hbuf
|
290 |
+
with pytest.raises(pa.ArrowIOError,
|
291 |
+
match=('Cuda error ')):
|
292 |
+
fbuf.copy_to_host()
|
293 |
+
|
294 |
+
|
295 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
296 |
+
def test_CudaBuffer(size):
|
297 |
+
arr, buf = make_random_buffer(size)
|
298 |
+
assert arr.tobytes() == buf.to_pybytes()
|
299 |
+
cbuf = global_context.buffer_from_data(buf)
|
300 |
+
assert cbuf.size == size
|
301 |
+
assert not cbuf.is_cpu
|
302 |
+
assert arr.tobytes() == cbuf.to_pybytes()
|
303 |
+
if size > 0:
|
304 |
+
assert cbuf.address > 0
|
305 |
+
|
306 |
+
for i in range(size):
|
307 |
+
assert cbuf[i] == arr[i]
|
308 |
+
|
309 |
+
for s in [
|
310 |
+
slice(None),
|
311 |
+
slice(size//4, size//2),
|
312 |
+
]:
|
313 |
+
assert cbuf[s].to_pybytes() == arr[s].tobytes()
|
314 |
+
|
315 |
+
sbuf = cbuf.slice(size//4, size//2)
|
316 |
+
assert sbuf.parent == cbuf
|
317 |
+
|
318 |
+
with pytest.raises(TypeError,
|
319 |
+
match="Do not call CudaBuffer's constructor directly"):
|
320 |
+
cuda.CudaBuffer()
|
321 |
+
|
322 |
+
|
323 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
324 |
+
def test_HostBuffer(size):
|
325 |
+
arr, buf = make_random_buffer(size)
|
326 |
+
assert arr.tobytes() == buf.to_pybytes()
|
327 |
+
hbuf = cuda.new_host_buffer(size)
|
328 |
+
np.frombuffer(hbuf, dtype=np.uint8)[:] = arr
|
329 |
+
assert hbuf.size == size
|
330 |
+
assert hbuf.is_cpu
|
331 |
+
assert arr.tobytes() == hbuf.to_pybytes()
|
332 |
+
for i in range(size):
|
333 |
+
assert hbuf[i] == arr[i]
|
334 |
+
for s in [
|
335 |
+
slice(None),
|
336 |
+
slice(size//4, size//2),
|
337 |
+
]:
|
338 |
+
assert hbuf[s].to_pybytes() == arr[s].tobytes()
|
339 |
+
|
340 |
+
sbuf = hbuf.slice(size//4, size//2)
|
341 |
+
assert sbuf.parent == hbuf
|
342 |
+
|
343 |
+
del hbuf
|
344 |
+
|
345 |
+
with pytest.raises(TypeError,
|
346 |
+
match="Do not call HostBuffer's constructor directly"):
|
347 |
+
cuda.HostBuffer()
|
348 |
+
|
349 |
+
|
350 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
351 |
+
def test_copy_from_to_host(size):
|
352 |
+
# Create a buffer in host containing range(size)
|
353 |
+
dt = np.dtype('uint16')
|
354 |
+
nbytes = size * dt.itemsize
|
355 |
+
buf = pa.allocate_buffer(nbytes, resizable=True) # in host
|
356 |
+
assert isinstance(buf, pa.Buffer)
|
357 |
+
assert not isinstance(buf, cuda.CudaBuffer)
|
358 |
+
arr = np.frombuffer(buf, dtype=dt)
|
359 |
+
assert arr.size == size
|
360 |
+
arr[:] = range(size)
|
361 |
+
arr_ = np.frombuffer(buf, dtype=dt)
|
362 |
+
np.testing.assert_equal(arr, arr_)
|
363 |
+
|
364 |
+
# Create a device buffer of the same size and copy from host
|
365 |
+
device_buffer = global_context.new_buffer(nbytes)
|
366 |
+
assert isinstance(device_buffer, cuda.CudaBuffer)
|
367 |
+
assert isinstance(device_buffer, pa.Buffer)
|
368 |
+
assert device_buffer.size == nbytes
|
369 |
+
assert not device_buffer.is_cpu
|
370 |
+
device_buffer.copy_from_host(buf, position=0, nbytes=nbytes)
|
371 |
+
|
372 |
+
# Copy back to host and compare contents
|
373 |
+
buf2 = device_buffer.copy_to_host(position=0, nbytes=nbytes)
|
374 |
+
arr2 = np.frombuffer(buf2, dtype=dt)
|
375 |
+
np.testing.assert_equal(arr, arr2)
|
376 |
+
|
377 |
+
|
378 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
379 |
+
def test_copy_to_host(size):
|
380 |
+
arr, dbuf = make_random_buffer(size, target='device')
|
381 |
+
|
382 |
+
buf = dbuf.copy_to_host()
|
383 |
+
assert buf.is_cpu
|
384 |
+
np.testing.assert_equal(arr, np.frombuffer(buf, dtype=np.uint8))
|
385 |
+
|
386 |
+
buf = dbuf.copy_to_host(position=size//4)
|
387 |
+
assert buf.is_cpu
|
388 |
+
np.testing.assert_equal(arr[size//4:], np.frombuffer(buf, dtype=np.uint8))
|
389 |
+
|
390 |
+
buf = dbuf.copy_to_host(position=size//4, nbytes=size//8)
|
391 |
+
assert buf.is_cpu
|
392 |
+
np.testing.assert_equal(arr[size//4:size//4+size//8],
|
393 |
+
np.frombuffer(buf, dtype=np.uint8))
|
394 |
+
|
395 |
+
buf = dbuf.copy_to_host(position=size//4, nbytes=0)
|
396 |
+
assert buf.is_cpu
|
397 |
+
assert buf.size == 0
|
398 |
+
|
399 |
+
for (position, nbytes) in [
|
400 |
+
(size+2, -1), (-2, -1), (size+1, 0), (-3, 0),
|
401 |
+
]:
|
402 |
+
with pytest.raises(ValueError,
|
403 |
+
match='position argument is out-of-range'):
|
404 |
+
dbuf.copy_to_host(position=position, nbytes=nbytes)
|
405 |
+
|
406 |
+
for (position, nbytes) in [
|
407 |
+
(0, size+1), (size//2, (size+1)//2+1), (size, 1)
|
408 |
+
]:
|
409 |
+
with pytest.raises(ValueError,
|
410 |
+
match=('requested more to copy than'
|
411 |
+
' available from device buffer')):
|
412 |
+
dbuf.copy_to_host(position=position, nbytes=nbytes)
|
413 |
+
|
414 |
+
buf = pa.allocate_buffer(size//4)
|
415 |
+
dbuf.copy_to_host(buf=buf)
|
416 |
+
np.testing.assert_equal(arr[:size//4], np.frombuffer(buf, dtype=np.uint8))
|
417 |
+
|
418 |
+
if size < 12:
|
419 |
+
return
|
420 |
+
|
421 |
+
dbuf.copy_to_host(buf=buf, position=12)
|
422 |
+
np.testing.assert_equal(arr[12:12+size//4],
|
423 |
+
np.frombuffer(buf, dtype=np.uint8))
|
424 |
+
|
425 |
+
dbuf.copy_to_host(buf=buf, nbytes=12)
|
426 |
+
np.testing.assert_equal(arr[:12], np.frombuffer(buf, dtype=np.uint8)[:12])
|
427 |
+
|
428 |
+
dbuf.copy_to_host(buf=buf, nbytes=12, position=6)
|
429 |
+
np.testing.assert_equal(arr[6:6+12],
|
430 |
+
np.frombuffer(buf, dtype=np.uint8)[:12])
|
431 |
+
|
432 |
+
for (position, nbytes) in [
|
433 |
+
(0, size+10), (10, size-5),
|
434 |
+
(0, size//2), (size//4, size//4+1)
|
435 |
+
]:
|
436 |
+
with pytest.raises(ValueError,
|
437 |
+
match=('requested copy does not '
|
438 |
+
'fit into host buffer')):
|
439 |
+
dbuf.copy_to_host(buf=buf, position=position, nbytes=nbytes)
|
440 |
+
|
441 |
+
|
442 |
+
@pytest.mark.parametrize("dest_ctx", ['same', 'another'])
|
443 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
444 |
+
def test_copy_from_device(dest_ctx, size):
|
445 |
+
arr, buf = make_random_buffer(size=size, target='device')
|
446 |
+
lst = arr.tolist()
|
447 |
+
if dest_ctx == 'another':
|
448 |
+
dest_ctx = global_context1
|
449 |
+
if buf.context.device_number == dest_ctx.device_number:
|
450 |
+
pytest.skip("not a multi-GPU system")
|
451 |
+
else:
|
452 |
+
dest_ctx = buf.context
|
453 |
+
dbuf = dest_ctx.new_buffer(size)
|
454 |
+
|
455 |
+
def put(*args, **kwargs):
|
456 |
+
dbuf.copy_from_device(buf, *args, **kwargs)
|
457 |
+
rbuf = dbuf.copy_to_host()
|
458 |
+
return np.frombuffer(rbuf, dtype=np.uint8).tolist()
|
459 |
+
assert put() == lst
|
460 |
+
if size > 4:
|
461 |
+
assert put(position=size//4) == lst[:size//4]+lst[:-size//4]
|
462 |
+
assert put() == lst
|
463 |
+
assert put(position=1, nbytes=size//2) == \
|
464 |
+
lst[:1] + lst[:size//2] + lst[-(size-size//2-1):]
|
465 |
+
|
466 |
+
for (position, nbytes) in [
|
467 |
+
(size+2, -1), (-2, -1), (size+1, 0), (-3, 0),
|
468 |
+
]:
|
469 |
+
with pytest.raises(ValueError,
|
470 |
+
match='position argument is out-of-range'):
|
471 |
+
put(position=position, nbytes=nbytes)
|
472 |
+
|
473 |
+
for (position, nbytes) in [
|
474 |
+
(0, size+1),
|
475 |
+
]:
|
476 |
+
with pytest.raises(ValueError,
|
477 |
+
match=('requested more to copy than'
|
478 |
+
' available from device buffer')):
|
479 |
+
put(position=position, nbytes=nbytes)
|
480 |
+
|
481 |
+
if size < 4:
|
482 |
+
return
|
483 |
+
|
484 |
+
for (position, nbytes) in [
|
485 |
+
(size//2, (size+1)//2+1)
|
486 |
+
]:
|
487 |
+
with pytest.raises(ValueError,
|
488 |
+
match=('requested more to copy than'
|
489 |
+
' available in device buffer')):
|
490 |
+
put(position=position, nbytes=nbytes)
|
491 |
+
|
492 |
+
|
493 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
494 |
+
def test_copy_from_host(size):
|
495 |
+
arr, buf = make_random_buffer(size=size, target='host')
|
496 |
+
lst = arr.tolist()
|
497 |
+
dbuf = global_context.new_buffer(size)
|
498 |
+
|
499 |
+
def put(*args, **kwargs):
|
500 |
+
dbuf.copy_from_host(buf, *args, **kwargs)
|
501 |
+
rbuf = dbuf.copy_to_host()
|
502 |
+
return np.frombuffer(rbuf, dtype=np.uint8).tolist()
|
503 |
+
assert put() == lst
|
504 |
+
if size > 4:
|
505 |
+
assert put(position=size//4) == lst[:size//4]+lst[:-size//4]
|
506 |
+
assert put() == lst
|
507 |
+
assert put(position=1, nbytes=size//2) == \
|
508 |
+
lst[:1] + lst[:size//2] + lst[-(size-size//2-1):]
|
509 |
+
|
510 |
+
for (position, nbytes) in [
|
511 |
+
(size+2, -1), (-2, -1), (size+1, 0), (-3, 0),
|
512 |
+
]:
|
513 |
+
with pytest.raises(ValueError,
|
514 |
+
match='position argument is out-of-range'):
|
515 |
+
put(position=position, nbytes=nbytes)
|
516 |
+
|
517 |
+
for (position, nbytes) in [
|
518 |
+
(0, size+1),
|
519 |
+
]:
|
520 |
+
with pytest.raises(ValueError,
|
521 |
+
match=('requested more to copy than'
|
522 |
+
' available from host buffer')):
|
523 |
+
put(position=position, nbytes=nbytes)
|
524 |
+
|
525 |
+
if size < 4:
|
526 |
+
return
|
527 |
+
|
528 |
+
for (position, nbytes) in [
|
529 |
+
(size//2, (size+1)//2+1)
|
530 |
+
]:
|
531 |
+
with pytest.raises(ValueError,
|
532 |
+
match=('requested more to copy than'
|
533 |
+
' available in device buffer')):
|
534 |
+
put(position=position, nbytes=nbytes)
|
535 |
+
|
536 |
+
|
537 |
+
def test_BufferWriter():
|
538 |
+
def allocate(size):
|
539 |
+
cbuf = global_context.new_buffer(size)
|
540 |
+
writer = cuda.BufferWriter(cbuf)
|
541 |
+
return cbuf, writer
|
542 |
+
|
543 |
+
def test_writes(total_size, chunksize, buffer_size=0):
|
544 |
+
cbuf, writer = allocate(total_size)
|
545 |
+
arr, buf = make_random_buffer(size=total_size, target='host')
|
546 |
+
|
547 |
+
if buffer_size > 0:
|
548 |
+
writer.buffer_size = buffer_size
|
549 |
+
|
550 |
+
position = writer.tell()
|
551 |
+
assert position == 0
|
552 |
+
writer.write(buf.slice(length=chunksize))
|
553 |
+
assert writer.tell() == chunksize
|
554 |
+
writer.seek(0)
|
555 |
+
position = writer.tell()
|
556 |
+
assert position == 0
|
557 |
+
|
558 |
+
while position < total_size:
|
559 |
+
bytes_to_write = min(chunksize, total_size - position)
|
560 |
+
writer.write(buf.slice(offset=position, length=bytes_to_write))
|
561 |
+
position += bytes_to_write
|
562 |
+
|
563 |
+
writer.flush()
|
564 |
+
assert cbuf.size == total_size
|
565 |
+
cbuf.context.synchronize()
|
566 |
+
buf2 = cbuf.copy_to_host()
|
567 |
+
cbuf.context.synchronize()
|
568 |
+
assert buf2.size == total_size
|
569 |
+
arr2 = np.frombuffer(buf2, dtype=np.uint8)
|
570 |
+
np.testing.assert_equal(arr, arr2)
|
571 |
+
|
572 |
+
total_size, chunk_size = 1 << 16, 1000
|
573 |
+
test_writes(total_size, chunk_size)
|
574 |
+
test_writes(total_size, chunk_size, total_size // 16)
|
575 |
+
|
576 |
+
cbuf, writer = allocate(100)
|
577 |
+
writer.write(np.arange(100, dtype=np.uint8))
|
578 |
+
writer.writeat(50, np.arange(25, dtype=np.uint8))
|
579 |
+
writer.write(np.arange(25, dtype=np.uint8))
|
580 |
+
writer.flush()
|
581 |
+
|
582 |
+
arr = np.frombuffer(cbuf.copy_to_host(), np.uint8)
|
583 |
+
np.testing.assert_equal(arr[:50], np.arange(50, dtype=np.uint8))
|
584 |
+
np.testing.assert_equal(arr[50:75], np.arange(25, dtype=np.uint8))
|
585 |
+
np.testing.assert_equal(arr[75:], np.arange(25, dtype=np.uint8))
|
586 |
+
|
587 |
+
|
588 |
+
def test_BufferWriter_edge_cases():
|
589 |
+
# edge cases, see cuda-test.cc for more information:
|
590 |
+
size = 1000
|
591 |
+
cbuf = global_context.new_buffer(size)
|
592 |
+
writer = cuda.BufferWriter(cbuf)
|
593 |
+
arr, buf = make_random_buffer(size=size, target='host')
|
594 |
+
|
595 |
+
assert writer.buffer_size == 0
|
596 |
+
writer.buffer_size = 100
|
597 |
+
assert writer.buffer_size == 100
|
598 |
+
|
599 |
+
writer.write(buf.slice(length=0))
|
600 |
+
assert writer.tell() == 0
|
601 |
+
|
602 |
+
writer.write(buf.slice(length=10))
|
603 |
+
writer.buffer_size = 200
|
604 |
+
assert writer.buffer_size == 200
|
605 |
+
assert writer.num_bytes_buffered == 0
|
606 |
+
|
607 |
+
writer.write(buf.slice(offset=10, length=300))
|
608 |
+
assert writer.num_bytes_buffered == 0
|
609 |
+
|
610 |
+
writer.write(buf.slice(offset=310, length=200))
|
611 |
+
assert writer.num_bytes_buffered == 0
|
612 |
+
|
613 |
+
writer.write(buf.slice(offset=510, length=390))
|
614 |
+
writer.write(buf.slice(offset=900, length=100))
|
615 |
+
|
616 |
+
writer.flush()
|
617 |
+
|
618 |
+
buf2 = cbuf.copy_to_host()
|
619 |
+
assert buf2.size == size
|
620 |
+
arr2 = np.frombuffer(buf2, dtype=np.uint8)
|
621 |
+
np.testing.assert_equal(arr, arr2)
|
622 |
+
|
623 |
+
|
624 |
+
def test_BufferReader():
|
625 |
+
size = 1000
|
626 |
+
arr, cbuf = make_random_buffer(size=size, target='device')
|
627 |
+
|
628 |
+
reader = cuda.BufferReader(cbuf)
|
629 |
+
reader.seek(950)
|
630 |
+
assert reader.tell() == 950
|
631 |
+
|
632 |
+
data = reader.read(100)
|
633 |
+
assert len(data) == 50
|
634 |
+
assert reader.tell() == 1000
|
635 |
+
|
636 |
+
reader.seek(925)
|
637 |
+
arr2 = np.zeros(100, dtype=np.uint8)
|
638 |
+
n = reader.readinto(arr2)
|
639 |
+
assert n == 75
|
640 |
+
assert reader.tell() == 1000
|
641 |
+
np.testing.assert_equal(arr[925:], arr2[:75])
|
642 |
+
|
643 |
+
reader.seek(0)
|
644 |
+
assert reader.tell() == 0
|
645 |
+
buf2 = reader.read_buffer()
|
646 |
+
arr2 = np.frombuffer(buf2.copy_to_host(), dtype=np.uint8)
|
647 |
+
np.testing.assert_equal(arr, arr2)
|
648 |
+
|
649 |
+
|
650 |
+
def test_BufferReader_zero_size():
|
651 |
+
arr, cbuf = make_random_buffer(size=0, target='device')
|
652 |
+
reader = cuda.BufferReader(cbuf)
|
653 |
+
reader.seek(0)
|
654 |
+
data = reader.read()
|
655 |
+
assert len(data) == 0
|
656 |
+
assert reader.tell() == 0
|
657 |
+
buf2 = reader.read_buffer()
|
658 |
+
arr2 = np.frombuffer(buf2.copy_to_host(), dtype=np.uint8)
|
659 |
+
np.testing.assert_equal(arr, arr2)
|
660 |
+
|
661 |
+
|
662 |
+
def make_recordbatch(length):
|
663 |
+
schema = pa.schema([pa.field('f0', pa.int16()),
|
664 |
+
pa.field('f1', pa.int16())])
|
665 |
+
a0 = pa.array(np.random.randint(0, 255, size=length, dtype=np.int16))
|
666 |
+
a1 = pa.array(np.random.randint(0, 255, size=length, dtype=np.int16))
|
667 |
+
batch = pa.record_batch([a0, a1], schema=schema)
|
668 |
+
return batch
|
669 |
+
|
670 |
+
|
671 |
+
def test_batch_serialize():
|
672 |
+
batch = make_recordbatch(10)
|
673 |
+
hbuf = batch.serialize()
|
674 |
+
cbuf = cuda.serialize_record_batch(batch, global_context)
|
675 |
+
|
676 |
+
# Test that read_record_batch works properly
|
677 |
+
cbatch = cuda.read_record_batch(cbuf, batch.schema)
|
678 |
+
assert isinstance(cbatch, pa.RecordBatch)
|
679 |
+
assert batch.schema == cbatch.schema
|
680 |
+
assert batch.num_columns == cbatch.num_columns
|
681 |
+
assert batch.num_rows == cbatch.num_rows
|
682 |
+
|
683 |
+
# Deserialize CUDA-serialized batch on host
|
684 |
+
buf = cbuf.copy_to_host()
|
685 |
+
assert hbuf.equals(buf)
|
686 |
+
batch2 = pa.ipc.read_record_batch(buf, batch.schema)
|
687 |
+
assert hbuf.equals(batch2.serialize())
|
688 |
+
|
689 |
+
assert batch.num_columns == batch2.num_columns
|
690 |
+
assert batch.num_rows == batch2.num_rows
|
691 |
+
assert batch.column(0).equals(batch2.column(0))
|
692 |
+
assert batch.equals(batch2)
|
693 |
+
|
694 |
+
|
695 |
+
def make_table():
|
696 |
+
a0 = pa.array([0, 1, 42, None], type=pa.int16())
|
697 |
+
a1 = pa.array([[0, 1], [2], [], None], type=pa.list_(pa.int32()))
|
698 |
+
a2 = pa.array([("ab", True), ("cde", False), (None, None), None],
|
699 |
+
type=pa.struct([("strs", pa.utf8()),
|
700 |
+
("bools", pa.bool_())]))
|
701 |
+
# Dictionaries are validated on the IPC read path, but that can produce
|
702 |
+
# issues for GPU-located dictionaries. Check that they work fine.
|
703 |
+
a3 = pa.DictionaryArray.from_arrays(
|
704 |
+
indices=[0, 1, 1, None],
|
705 |
+
dictionary=pa.array(['foo', 'bar']))
|
706 |
+
a4 = pa.DictionaryArray.from_arrays(
|
707 |
+
indices=[2, 1, 2, None],
|
708 |
+
dictionary=a1)
|
709 |
+
a5 = pa.DictionaryArray.from_arrays(
|
710 |
+
indices=[2, 1, 0, None],
|
711 |
+
dictionary=a2)
|
712 |
+
|
713 |
+
arrays = [a0, a1, a2, a3, a4, a5]
|
714 |
+
schema = pa.schema([('f{}'.format(i), arr.type)
|
715 |
+
for i, arr in enumerate(arrays)])
|
716 |
+
batch = pa.record_batch(arrays, schema=schema)
|
717 |
+
table = pa.Table.from_batches([batch])
|
718 |
+
return table
|
719 |
+
|
720 |
+
|
721 |
+
def make_table_cuda():
|
722 |
+
htable = make_table()
|
723 |
+
# Serialize the host table to bytes
|
724 |
+
sink = pa.BufferOutputStream()
|
725 |
+
with pa.ipc.new_stream(sink, htable.schema) as out:
|
726 |
+
out.write_table(htable)
|
727 |
+
hbuf = pa.py_buffer(sink.getvalue().to_pybytes())
|
728 |
+
|
729 |
+
# Copy the host bytes to a device buffer
|
730 |
+
dbuf = global_context.new_buffer(len(hbuf))
|
731 |
+
dbuf.copy_from_host(hbuf, nbytes=len(hbuf))
|
732 |
+
# Deserialize the device buffer into a Table
|
733 |
+
dtable = pa.ipc.open_stream(cuda.BufferReader(dbuf)).read_all()
|
734 |
+
return hbuf, htable, dbuf, dtable
|
735 |
+
|
736 |
+
|
737 |
+
def test_table_deserialize():
|
738 |
+
# ARROW-9659: make sure that we can deserialize a GPU-located table
|
739 |
+
# without crashing when initializing or validating the underlying arrays.
|
740 |
+
hbuf, htable, dbuf, dtable = make_table_cuda()
|
741 |
+
# Assert basic fields the same between host and device tables
|
742 |
+
assert htable.schema == dtable.schema
|
743 |
+
assert htable.num_rows == dtable.num_rows
|
744 |
+
assert htable.num_columns == dtable.num_columns
|
745 |
+
# Assert byte-level equality
|
746 |
+
assert hbuf.equals(dbuf.copy_to_host())
|
747 |
+
# Copy DtoH and assert the tables are still equivalent
|
748 |
+
assert htable.equals(pa.ipc.open_stream(
|
749 |
+
dbuf.copy_to_host()
|
750 |
+
).read_all())
|
751 |
+
|
752 |
+
|
753 |
+
def test_create_table_with_device_buffers():
|
754 |
+
# ARROW-11872: make sure that we can create an Arrow Table from
|
755 |
+
# GPU-located Arrays without crashing.
|
756 |
+
hbuf, htable, dbuf, dtable = make_table_cuda()
|
757 |
+
# Construct a new Table from the device Table
|
758 |
+
dtable2 = pa.Table.from_arrays(dtable.columns, dtable.column_names)
|
759 |
+
# Assert basic fields the same between host and device tables
|
760 |
+
assert htable.schema == dtable2.schema
|
761 |
+
assert htable.num_rows == dtable2.num_rows
|
762 |
+
assert htable.num_columns == dtable2.num_columns
|
763 |
+
# Assert byte-level equality
|
764 |
+
assert hbuf.equals(dbuf.copy_to_host())
|
765 |
+
# Copy DtoH and assert the tables are still equivalent
|
766 |
+
assert htable.equals(pa.ipc.open_stream(
|
767 |
+
dbuf.copy_to_host()
|
768 |
+
).read_all())
|
769 |
+
|
770 |
+
|
771 |
+
def other_process_for_test_IPC(handle_buffer, expected_arr):
|
772 |
+
other_context = pa.cuda.Context(0)
|
773 |
+
ipc_handle = pa.cuda.IpcMemHandle.from_buffer(handle_buffer)
|
774 |
+
ipc_buf = other_context.open_ipc_buffer(ipc_handle)
|
775 |
+
ipc_buf.context.synchronize()
|
776 |
+
buf = ipc_buf.copy_to_host()
|
777 |
+
assert buf.size == expected_arr.size, repr((buf.size, expected_arr.size))
|
778 |
+
arr = np.frombuffer(buf, dtype=expected_arr.dtype)
|
779 |
+
np.testing.assert_equal(arr, expected_arr)
|
780 |
+
|
781 |
+
|
782 |
+
@cuda_ipc
|
783 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
784 |
+
def test_IPC(size):
|
785 |
+
import multiprocessing
|
786 |
+
ctx = multiprocessing.get_context('spawn')
|
787 |
+
arr, cbuf = make_random_buffer(size=size, target='device')
|
788 |
+
ipc_handle = cbuf.export_for_ipc()
|
789 |
+
handle_buffer = ipc_handle.serialize()
|
790 |
+
p = ctx.Process(target=other_process_for_test_IPC,
|
791 |
+
args=(handle_buffer, arr))
|
792 |
+
p.start()
|
793 |
+
p.join()
|
794 |
+
assert p.exitcode == 0
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_cuda_numba_interop.py
ADDED
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import pytest
|
19 |
+
import pyarrow as pa
|
20 |
+
import numpy as np
|
21 |
+
|
22 |
+
dtypes = ['uint8', 'int16', 'float32']
|
23 |
+
cuda = pytest.importorskip("pyarrow.cuda")
|
24 |
+
nb_cuda = pytest.importorskip("numba.cuda")
|
25 |
+
|
26 |
+
from numba.cuda.cudadrv.devicearray import DeviceNDArray # noqa: E402
|
27 |
+
|
28 |
+
|
29 |
+
context_choices = None
|
30 |
+
context_choice_ids = ['pyarrow.cuda', 'numba.cuda']
|
31 |
+
|
32 |
+
|
33 |
+
def setup_module(module):
|
34 |
+
np.random.seed(1234)
|
35 |
+
ctx1 = cuda.Context()
|
36 |
+
nb_ctx1 = ctx1.to_numba()
|
37 |
+
nb_ctx2 = nb_cuda.current_context()
|
38 |
+
ctx2 = cuda.Context.from_numba(nb_ctx2)
|
39 |
+
module.context_choices = [(ctx1, nb_ctx1), (ctx2, nb_ctx2)]
|
40 |
+
|
41 |
+
|
42 |
+
def teardown_module(module):
|
43 |
+
del module.context_choices
|
44 |
+
|
45 |
+
|
46 |
+
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
|
47 |
+
ids=context_choice_ids)
|
48 |
+
def test_context(c):
|
49 |
+
ctx, nb_ctx = context_choices[c]
|
50 |
+
assert ctx.handle == nb_ctx.handle.value
|
51 |
+
assert ctx.handle == ctx.to_numba().handle.value
|
52 |
+
ctx2 = cuda.Context.from_numba(nb_ctx)
|
53 |
+
assert ctx.handle == ctx2.handle
|
54 |
+
size = 10
|
55 |
+
buf = ctx.new_buffer(size)
|
56 |
+
assert ctx.handle == buf.context.handle
|
57 |
+
|
58 |
+
|
59 |
+
def make_random_buffer(size, target='host', dtype='uint8', ctx=None):
|
60 |
+
"""Return a host or device buffer with random data.
|
61 |
+
"""
|
62 |
+
dtype = np.dtype(dtype)
|
63 |
+
if target == 'host':
|
64 |
+
assert size >= 0
|
65 |
+
buf = pa.allocate_buffer(size*dtype.itemsize)
|
66 |
+
arr = np.frombuffer(buf, dtype=dtype)
|
67 |
+
arr[:] = np.random.randint(low=0, high=255, size=size,
|
68 |
+
dtype=np.uint8)
|
69 |
+
return arr, buf
|
70 |
+
elif target == 'device':
|
71 |
+
arr, buf = make_random_buffer(size, target='host', dtype=dtype)
|
72 |
+
dbuf = ctx.new_buffer(size * dtype.itemsize)
|
73 |
+
dbuf.copy_from_host(buf, position=0, nbytes=buf.size)
|
74 |
+
return arr, dbuf
|
75 |
+
raise ValueError('invalid target value')
|
76 |
+
|
77 |
+
|
78 |
+
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
|
79 |
+
ids=context_choice_ids)
|
80 |
+
@pytest.mark.parametrize("dtype", dtypes, ids=dtypes)
|
81 |
+
@pytest.mark.parametrize("size", [0, 1, 8, 1000])
|
82 |
+
def test_from_object(c, dtype, size):
|
83 |
+
ctx, nb_ctx = context_choices[c]
|
84 |
+
arr, cbuf = make_random_buffer(size, target='device', dtype=dtype, ctx=ctx)
|
85 |
+
|
86 |
+
# Creating device buffer from numba DeviceNDArray:
|
87 |
+
darr = nb_cuda.to_device(arr)
|
88 |
+
cbuf2 = ctx.buffer_from_object(darr)
|
89 |
+
assert cbuf2.size == cbuf.size
|
90 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
91 |
+
np.testing.assert_equal(arr, arr2)
|
92 |
+
|
93 |
+
# Creating device buffer from a slice of numba DeviceNDArray:
|
94 |
+
if size >= 8:
|
95 |
+
# 1-D arrays
|
96 |
+
for s in [slice(size//4, None, None),
|
97 |
+
slice(size//4, -(size//4), None)]:
|
98 |
+
cbuf2 = ctx.buffer_from_object(darr[s])
|
99 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
100 |
+
np.testing.assert_equal(arr[s], arr2)
|
101 |
+
|
102 |
+
# cannot test negative strides due to numba bug, see its issue 3705
|
103 |
+
if 0:
|
104 |
+
rdarr = darr[::-1]
|
105 |
+
cbuf2 = ctx.buffer_from_object(rdarr)
|
106 |
+
assert cbuf2.size == cbuf.size
|
107 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
108 |
+
np.testing.assert_equal(arr, arr2)
|
109 |
+
|
110 |
+
with pytest.raises(ValueError,
|
111 |
+
match=('array data is non-contiguous')):
|
112 |
+
ctx.buffer_from_object(darr[::2])
|
113 |
+
|
114 |
+
# a rectangular 2-D array
|
115 |
+
s1 = size//4
|
116 |
+
s2 = size//s1
|
117 |
+
assert s1 * s2 == size
|
118 |
+
cbuf2 = ctx.buffer_from_object(darr.reshape(s1, s2))
|
119 |
+
assert cbuf2.size == cbuf.size
|
120 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
121 |
+
np.testing.assert_equal(arr, arr2)
|
122 |
+
|
123 |
+
with pytest.raises(ValueError,
|
124 |
+
match=('array data is non-contiguous')):
|
125 |
+
ctx.buffer_from_object(darr.reshape(s1, s2)[:, ::2])
|
126 |
+
|
127 |
+
# a 3-D array
|
128 |
+
s1 = 4
|
129 |
+
s2 = size//8
|
130 |
+
s3 = size//(s1*s2)
|
131 |
+
assert s1 * s2 * s3 == size
|
132 |
+
cbuf2 = ctx.buffer_from_object(darr.reshape(s1, s2, s3))
|
133 |
+
assert cbuf2.size == cbuf.size
|
134 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
135 |
+
np.testing.assert_equal(arr, arr2)
|
136 |
+
|
137 |
+
with pytest.raises(ValueError,
|
138 |
+
match=('array data is non-contiguous')):
|
139 |
+
ctx.buffer_from_object(darr.reshape(s1, s2, s3)[::2])
|
140 |
+
|
141 |
+
# Creating device buffer from am object implementing cuda array
|
142 |
+
# interface:
|
143 |
+
class MyObj:
|
144 |
+
def __init__(self, darr):
|
145 |
+
self.darr = darr
|
146 |
+
|
147 |
+
@property
|
148 |
+
def __cuda_array_interface__(self):
|
149 |
+
return self.darr.__cuda_array_interface__
|
150 |
+
|
151 |
+
cbuf2 = ctx.buffer_from_object(MyObj(darr))
|
152 |
+
assert cbuf2.size == cbuf.size
|
153 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
154 |
+
np.testing.assert_equal(arr, arr2)
|
155 |
+
|
156 |
+
|
157 |
+
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
|
158 |
+
ids=context_choice_ids)
|
159 |
+
@pytest.mark.parametrize("dtype", dtypes, ids=dtypes)
|
160 |
+
def test_numba_memalloc(c, dtype):
|
161 |
+
ctx, nb_ctx = context_choices[c]
|
162 |
+
dtype = np.dtype(dtype)
|
163 |
+
# Allocate memory using numba context
|
164 |
+
# Warning: this will not be reflected in pyarrow context manager
|
165 |
+
# (e.g bytes_allocated does not change)
|
166 |
+
size = 10
|
167 |
+
mem = nb_ctx.memalloc(size * dtype.itemsize)
|
168 |
+
darr = DeviceNDArray((size,), (dtype.itemsize,), dtype, gpu_data=mem)
|
169 |
+
darr[:5] = 99
|
170 |
+
darr[5:] = 88
|
171 |
+
np.testing.assert_equal(darr.copy_to_host()[:5], 99)
|
172 |
+
np.testing.assert_equal(darr.copy_to_host()[5:], 88)
|
173 |
+
|
174 |
+
# wrap numba allocated memory with CudaBuffer
|
175 |
+
cbuf = cuda.CudaBuffer.from_numba(mem)
|
176 |
+
arr2 = np.frombuffer(cbuf.copy_to_host(), dtype=dtype)
|
177 |
+
np.testing.assert_equal(arr2, darr.copy_to_host())
|
178 |
+
|
179 |
+
|
180 |
+
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
|
181 |
+
ids=context_choice_ids)
|
182 |
+
@pytest.mark.parametrize("dtype", dtypes, ids=dtypes)
|
183 |
+
def test_pyarrow_memalloc(c, dtype):
|
184 |
+
ctx, nb_ctx = context_choices[c]
|
185 |
+
size = 10
|
186 |
+
arr, cbuf = make_random_buffer(size, target='device', dtype=dtype, ctx=ctx)
|
187 |
+
|
188 |
+
# wrap CudaBuffer with numba device array
|
189 |
+
mem = cbuf.to_numba()
|
190 |
+
darr = DeviceNDArray(arr.shape, arr.strides, arr.dtype, gpu_data=mem)
|
191 |
+
np.testing.assert_equal(darr.copy_to_host(), arr)
|
192 |
+
|
193 |
+
|
194 |
+
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
|
195 |
+
ids=context_choice_ids)
|
196 |
+
@pytest.mark.parametrize("dtype", dtypes, ids=dtypes)
|
197 |
+
def test_numba_context(c, dtype):
|
198 |
+
ctx, nb_ctx = context_choices[c]
|
199 |
+
size = 10
|
200 |
+
with nb_cuda.gpus[0]:
|
201 |
+
arr, cbuf = make_random_buffer(size, target='device',
|
202 |
+
dtype=dtype, ctx=ctx)
|
203 |
+
assert cbuf.context.handle == nb_ctx.handle.value
|
204 |
+
mem = cbuf.to_numba()
|
205 |
+
darr = DeviceNDArray(arr.shape, arr.strides, arr.dtype, gpu_data=mem)
|
206 |
+
np.testing.assert_equal(darr.copy_to_host(), arr)
|
207 |
+
darr[0] = 99
|
208 |
+
cbuf.context.synchronize()
|
209 |
+
arr2 = np.frombuffer(cbuf.copy_to_host(), dtype=dtype)
|
210 |
+
assert arr2[0] == 99
|
211 |
+
|
212 |
+
|
213 |
+
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
|
214 |
+
ids=context_choice_ids)
|
215 |
+
@pytest.mark.parametrize("dtype", dtypes, ids=dtypes)
|
216 |
+
def test_pyarrow_jit(c, dtype):
|
217 |
+
ctx, nb_ctx = context_choices[c]
|
218 |
+
|
219 |
+
@nb_cuda.jit
|
220 |
+
def increment_by_one(an_array):
|
221 |
+
pos = nb_cuda.grid(1)
|
222 |
+
if pos < an_array.size:
|
223 |
+
an_array[pos] += 1
|
224 |
+
|
225 |
+
# applying numba.cuda kernel to memory hold by CudaBuffer
|
226 |
+
size = 10
|
227 |
+
arr, cbuf = make_random_buffer(size, target='device', dtype=dtype, ctx=ctx)
|
228 |
+
threadsperblock = 32
|
229 |
+
blockspergrid = (arr.size + (threadsperblock - 1)) // threadsperblock
|
230 |
+
mem = cbuf.to_numba()
|
231 |
+
darr = DeviceNDArray(arr.shape, arr.strides, arr.dtype, gpu_data=mem)
|
232 |
+
increment_by_one[blockspergrid, threadsperblock](darr)
|
233 |
+
cbuf.context.synchronize()
|
234 |
+
arr1 = np.frombuffer(cbuf.copy_to_host(), dtype=arr.dtype)
|
235 |
+
np.testing.assert_equal(arr1, arr + 1)
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_dataset.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_deprecations.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# Check that various deprecation warnings are raised
|
19 |
+
|
20 |
+
# flake8: noqa
|
21 |
+
|
22 |
+
import pyarrow as pa
|
23 |
+
import pytest
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_dlpack.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import ctypes
|
19 |
+
from functools import wraps
|
20 |
+
import pytest
|
21 |
+
|
22 |
+
import numpy as np
|
23 |
+
|
24 |
+
import pyarrow as pa
|
25 |
+
from pyarrow.vendored.version import Version
|
26 |
+
|
27 |
+
|
28 |
+
def PyCapsule_IsValid(capsule, name):
|
29 |
+
return ctypes.pythonapi.PyCapsule_IsValid(ctypes.py_object(capsule), name) == 1
|
30 |
+
|
31 |
+
|
32 |
+
def check_dlpack_export(arr, expected_arr):
|
33 |
+
DLTensor = arr.__dlpack__()
|
34 |
+
assert PyCapsule_IsValid(DLTensor, b"dltensor") is True
|
35 |
+
|
36 |
+
result = np.from_dlpack(arr)
|
37 |
+
np.testing.assert_array_equal(result, expected_arr, strict=True)
|
38 |
+
|
39 |
+
assert arr.__dlpack_device__() == (1, 0)
|
40 |
+
|
41 |
+
|
42 |
+
def check_bytes_allocated(f):
|
43 |
+
@wraps(f)
|
44 |
+
def wrapper(*args, **kwargs):
|
45 |
+
allocated_bytes = pa.total_allocated_bytes()
|
46 |
+
try:
|
47 |
+
return f(*args, **kwargs)
|
48 |
+
finally:
|
49 |
+
assert pa.total_allocated_bytes() == allocated_bytes
|
50 |
+
return wrapper
|
51 |
+
|
52 |
+
|
53 |
+
@check_bytes_allocated
|
54 |
+
@pytest.mark.parametrize(
|
55 |
+
('value_type', 'np_type'),
|
56 |
+
[
|
57 |
+
(pa.uint8(), np.uint8),
|
58 |
+
(pa.uint16(), np.uint16),
|
59 |
+
(pa.uint32(), np.uint32),
|
60 |
+
(pa.uint64(), np.uint64),
|
61 |
+
(pa.int8(), np.int8),
|
62 |
+
(pa.int16(), np.int16),
|
63 |
+
(pa.int32(), np.int32),
|
64 |
+
(pa.int64(), np.int64),
|
65 |
+
(pa.float16(), np.float16),
|
66 |
+
(pa.float32(), np.float32),
|
67 |
+
(pa.float64(), np.float64),
|
68 |
+
]
|
69 |
+
)
|
70 |
+
def test_dlpack(value_type, np_type):
|
71 |
+
if Version(np.__version__) < Version("1.24.0"):
|
72 |
+
pytest.skip("No dlpack support in numpy versions older than 1.22.0, "
|
73 |
+
"strict keyword in assert_array_equal added in numpy version "
|
74 |
+
"1.24.0")
|
75 |
+
|
76 |
+
expected = np.array([1, 2, 3], dtype=np_type)
|
77 |
+
arr = pa.array(expected, type=value_type)
|
78 |
+
check_dlpack_export(arr, expected)
|
79 |
+
|
80 |
+
arr_sliced = arr.slice(1, 1)
|
81 |
+
expected = np.array([2], dtype=np_type)
|
82 |
+
check_dlpack_export(arr_sliced, expected)
|
83 |
+
|
84 |
+
arr_sliced = arr.slice(0, 1)
|
85 |
+
expected = np.array([1], dtype=np_type)
|
86 |
+
check_dlpack_export(arr_sliced, expected)
|
87 |
+
|
88 |
+
arr_sliced = arr.slice(1)
|
89 |
+
expected = np.array([2, 3], dtype=np_type)
|
90 |
+
check_dlpack_export(arr_sliced, expected)
|
91 |
+
|
92 |
+
arr_zero = pa.array([], type=value_type)
|
93 |
+
expected = np.array([], dtype=np_type)
|
94 |
+
check_dlpack_export(arr_zero, expected)
|
95 |
+
|
96 |
+
|
97 |
+
def test_dlpack_not_supported():
|
98 |
+
if Version(np.__version__) < Version("1.22.0"):
|
99 |
+
pytest.skip("No dlpack support in numpy versions older than 1.22.0.")
|
100 |
+
|
101 |
+
arr = pa.array([1, None, 3])
|
102 |
+
with pytest.raises(TypeError, match="Can only use DLPack "
|
103 |
+
"on arrays with no nulls."):
|
104 |
+
np.from_dlpack(arr)
|
105 |
+
|
106 |
+
arr = pa.array(
|
107 |
+
[[0, 1], [3, 4]],
|
108 |
+
type=pa.list_(pa.int32())
|
109 |
+
)
|
110 |
+
with pytest.raises(TypeError, match="DataType is not compatible with DLPack spec"):
|
111 |
+
np.from_dlpack(arr)
|
112 |
+
|
113 |
+
arr = pa.array([])
|
114 |
+
with pytest.raises(TypeError, match="DataType is not compatible with DLPack spec"):
|
115 |
+
np.from_dlpack(arr)
|
116 |
+
|
117 |
+
# DLPack doesn't support bit-packed boolean values
|
118 |
+
arr = pa.array([True, False, True])
|
119 |
+
with pytest.raises(TypeError, match="Bit-packed boolean data type "
|
120 |
+
"not supported by DLPack."):
|
121 |
+
np.from_dlpack(arr)
|
122 |
+
|
123 |
+
|
124 |
+
def test_dlpack_cuda_not_supported():
|
125 |
+
cuda = pytest.importorskip("pyarrow.cuda")
|
126 |
+
|
127 |
+
schema = pa.schema([pa.field('f0', pa.int16())])
|
128 |
+
a0 = pa.array([1, 2, 3], type=pa.int16())
|
129 |
+
batch = pa.record_batch([a0], schema=schema)
|
130 |
+
|
131 |
+
cbuf = cuda.serialize_record_batch(batch, cuda.Context(0))
|
132 |
+
cbatch = cuda.read_record_batch(cbuf, batch.schema)
|
133 |
+
carr = cbatch["f0"]
|
134 |
+
|
135 |
+
# CudaBuffers not yet supported
|
136 |
+
with pytest.raises(NotImplementedError, match="DLPack support is implemented "
|
137 |
+
"only for buffers on CPU device."):
|
138 |
+
np.from_dlpack(carr)
|
139 |
+
|
140 |
+
with pytest.raises(NotImplementedError, match="DLPack support is implemented "
|
141 |
+
"only for buffers on CPU device."):
|
142 |
+
carr.__dlpack_device__()
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_exec_plan.py
ADDED
@@ -0,0 +1,337 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import pytest
|
19 |
+
import pyarrow as pa
|
20 |
+
import pyarrow.compute as pc
|
21 |
+
from .test_extension_type import IntegerType
|
22 |
+
|
23 |
+
try:
|
24 |
+
import pyarrow.dataset as ds
|
25 |
+
except ImportError:
|
26 |
+
pass
|
27 |
+
|
28 |
+
try:
|
29 |
+
from pyarrow.acero import _perform_join, _filter_table
|
30 |
+
except ImportError:
|
31 |
+
pass
|
32 |
+
|
33 |
+
pytestmark = pytest.mark.acero
|
34 |
+
|
35 |
+
|
36 |
+
def test_joins_corner_cases():
|
37 |
+
t1 = pa.Table.from_pydict({
|
38 |
+
"colA": [1, 2, 3, 4, 5, 6],
|
39 |
+
"col2": ["a", "b", "c", "d", "e", "f"]
|
40 |
+
})
|
41 |
+
|
42 |
+
t2 = pa.Table.from_pydict({
|
43 |
+
"colB": [1, 2, 3, 4, 5],
|
44 |
+
"col3": ["A", "B", "C", "D", "E"]
|
45 |
+
})
|
46 |
+
|
47 |
+
with pytest.raises(pa.ArrowInvalid):
|
48 |
+
_perform_join("left outer", t1, "", t2, "")
|
49 |
+
|
50 |
+
with pytest.raises(TypeError):
|
51 |
+
_perform_join("left outer", None, "colA", t2, "colB")
|
52 |
+
|
53 |
+
with pytest.raises(ValueError):
|
54 |
+
_perform_join("super mario join", t1, "colA", t2, "colB")
|
55 |
+
|
56 |
+
|
57 |
+
@pytest.mark.parametrize("jointype,expected", [
|
58 |
+
("left semi", {
|
59 |
+
"colA": [1, 2],
|
60 |
+
"col2": ["a", "b"]
|
61 |
+
}),
|
62 |
+
("right semi", {
|
63 |
+
"colB": [1, 2],
|
64 |
+
"col3": ["A", "B"]
|
65 |
+
}),
|
66 |
+
("left anti", {
|
67 |
+
"colA": [6],
|
68 |
+
"col2": ["f"]
|
69 |
+
}),
|
70 |
+
("right anti", {
|
71 |
+
"colB": [99],
|
72 |
+
"col3": ["Z"]
|
73 |
+
}),
|
74 |
+
("inner", {
|
75 |
+
"colA": [1, 2],
|
76 |
+
"col2": ["a", "b"],
|
77 |
+
"colB": [1, 2],
|
78 |
+
"col3": ["A", "B"]
|
79 |
+
}),
|
80 |
+
("left outer", {
|
81 |
+
"colA": [1, 2, 6],
|
82 |
+
"col2": ["a", "b", "f"],
|
83 |
+
"colB": [1, 2, None],
|
84 |
+
"col3": ["A", "B", None]
|
85 |
+
}),
|
86 |
+
("right outer", {
|
87 |
+
"colA": [1, 2, None],
|
88 |
+
"col2": ["a", "b", None],
|
89 |
+
"colB": [1, 2, 99],
|
90 |
+
"col3": ["A", "B", "Z"]
|
91 |
+
}),
|
92 |
+
("full outer", {
|
93 |
+
"colA": [1, 2, 6, None],
|
94 |
+
"col2": ["a", "b", "f", None],
|
95 |
+
"colB": [1, 2, None, 99],
|
96 |
+
"col3": ["A", "B", None, "Z"]
|
97 |
+
})
|
98 |
+
])
|
99 |
+
@pytest.mark.parametrize("use_threads", [True, False])
|
100 |
+
@pytest.mark.parametrize("coalesce_keys", [True, False])
|
101 |
+
@pytest.mark.parametrize("use_datasets",
|
102 |
+
[False, pytest.param(True, marks=pytest.mark.dataset)])
|
103 |
+
def test_joins(jointype, expected, use_threads, coalesce_keys, use_datasets):
|
104 |
+
# Allocate table here instead of using parametrize
|
105 |
+
# this prevents having arrow allocated memory forever around.
|
106 |
+
expected = pa.table(expected)
|
107 |
+
|
108 |
+
t1 = pa.Table.from_pydict({
|
109 |
+
"colA": [1, 2, 6],
|
110 |
+
"col2": ["a", "b", "f"]
|
111 |
+
})
|
112 |
+
|
113 |
+
t2 = pa.Table.from_pydict({
|
114 |
+
"colB": [99, 2, 1],
|
115 |
+
"col3": ["Z", "B", "A"]
|
116 |
+
})
|
117 |
+
|
118 |
+
if use_datasets:
|
119 |
+
t1 = ds.dataset([t1])
|
120 |
+
t2 = ds.dataset([t2])
|
121 |
+
|
122 |
+
r = _perform_join(jointype, t1, "colA", t2, "colB",
|
123 |
+
use_threads=use_threads, coalesce_keys=coalesce_keys)
|
124 |
+
r = r.combine_chunks()
|
125 |
+
if "right" in jointype:
|
126 |
+
r = r.sort_by("colB")
|
127 |
+
else:
|
128 |
+
r = r.sort_by("colA")
|
129 |
+
if coalesce_keys:
|
130 |
+
if jointype in ("inner", "left outer"):
|
131 |
+
expected = expected.drop(["colB"])
|
132 |
+
elif jointype == "right outer":
|
133 |
+
expected = expected.drop(["colA"])
|
134 |
+
elif jointype == "full outer":
|
135 |
+
expected = expected.drop(["colB"]).set_column(0, "colA", [[1, 2, 6, 99]])
|
136 |
+
assert r == expected
|
137 |
+
|
138 |
+
|
139 |
+
def test_table_join_collisions():
|
140 |
+
t1 = pa.table({
|
141 |
+
"colA": [1, 2, 6],
|
142 |
+
"colB": [10, 20, 60],
|
143 |
+
"colVals": ["a", "b", "f"]
|
144 |
+
})
|
145 |
+
|
146 |
+
t2 = pa.table({
|
147 |
+
"colB": [99, 20, 10],
|
148 |
+
"colVals": ["Z", "B", "A"],
|
149 |
+
"colUniq": [100, 200, 300],
|
150 |
+
"colA": [99, 2, 1],
|
151 |
+
})
|
152 |
+
|
153 |
+
result = _perform_join(
|
154 |
+
"full outer", t1, ["colA", "colB"], t2, ["colA", "colB"])
|
155 |
+
result = result.combine_chunks()
|
156 |
+
result = result.sort_by("colUniq")
|
157 |
+
assert result == pa.table([
|
158 |
+
[None, 2, 1, 6],
|
159 |
+
[None, 20, 10, 60],
|
160 |
+
[None, "b", "a", "f"],
|
161 |
+
[99, 20, 10, None],
|
162 |
+
["Z", "B", "A", None],
|
163 |
+
[100, 200, 300, None],
|
164 |
+
[99, 2, 1, None],
|
165 |
+
], names=["colA", "colB", "colVals", "colB", "colVals", "colUniq", "colA"])
|
166 |
+
|
167 |
+
result = _perform_join("full outer", t1, "colA",
|
168 |
+
t2, "colA", right_suffix="_r",
|
169 |
+
coalesce_keys=False)
|
170 |
+
result = result.combine_chunks()
|
171 |
+
result = result.sort_by("colA")
|
172 |
+
assert result == pa.table({
|
173 |
+
"colA": [1, 2, 6, None],
|
174 |
+
"colB": [10, 20, 60, None],
|
175 |
+
"colVals": ["a", "b", "f", None],
|
176 |
+
"colB_r": [10, 20, None, 99],
|
177 |
+
"colVals_r": ["A", "B", None, "Z"],
|
178 |
+
"colUniq": [300, 200, None, 100],
|
179 |
+
"colA_r": [1, 2, None, 99],
|
180 |
+
})
|
181 |
+
|
182 |
+
result = _perform_join("full outer", t1, "colA",
|
183 |
+
t2, "colA", right_suffix="_r",
|
184 |
+
coalesce_keys=True)
|
185 |
+
result = result.combine_chunks()
|
186 |
+
result = result.sort_by("colA")
|
187 |
+
assert result == pa.table({
|
188 |
+
"colA": [1, 2, 6, 99],
|
189 |
+
"colB": [10, 20, 60, None],
|
190 |
+
"colVals": ["a", "b", "f", None],
|
191 |
+
"colB_r": [10, 20, None, 99],
|
192 |
+
"colVals_r": ["A", "B", None, "Z"],
|
193 |
+
"colUniq": [300, 200, None, 100]
|
194 |
+
})
|
195 |
+
|
196 |
+
|
197 |
+
def test_table_join_keys_order():
|
198 |
+
t1 = pa.table({
|
199 |
+
"colB": [10, 20, 60],
|
200 |
+
"colA": [1, 2, 6],
|
201 |
+
"colVals": ["a", "b", "f"]
|
202 |
+
})
|
203 |
+
|
204 |
+
t2 = pa.table({
|
205 |
+
"colVals": ["Z", "B", "A"],
|
206 |
+
"colX": [99, 2, 1],
|
207 |
+
})
|
208 |
+
|
209 |
+
result = _perform_join("full outer", t1, "colA", t2, "colX",
|
210 |
+
left_suffix="_l", right_suffix="_r",
|
211 |
+
coalesce_keys=True)
|
212 |
+
result = result.combine_chunks()
|
213 |
+
result = result.sort_by("colA")
|
214 |
+
assert result == pa.table({
|
215 |
+
"colB": [10, 20, 60, None],
|
216 |
+
"colA": [1, 2, 6, 99],
|
217 |
+
"colVals_l": ["a", "b", "f", None],
|
218 |
+
"colVals_r": ["A", "B", None, "Z"],
|
219 |
+
})
|
220 |
+
|
221 |
+
|
222 |
+
def test_filter_table_errors():
|
223 |
+
t = pa.table({
|
224 |
+
"a": [1, 2, 3, 4, 5],
|
225 |
+
"b": [10, 20, 30, 40, 50]
|
226 |
+
})
|
227 |
+
|
228 |
+
with pytest.raises(pa.ArrowTypeError):
|
229 |
+
_filter_table(t, pc.divide(pc.field("a"), pc.scalar(2)))
|
230 |
+
|
231 |
+
with pytest.raises(pa.ArrowInvalid):
|
232 |
+
_filter_table(t, (pc.field("Z") <= pc.scalar(2)))
|
233 |
+
|
234 |
+
|
235 |
+
def test_filter_table():
|
236 |
+
t = pa.table({
|
237 |
+
"a": [1, 2, 3, 4, 5],
|
238 |
+
"b": [10, 20, 30, 40, 50]
|
239 |
+
})
|
240 |
+
|
241 |
+
result = _filter_table(
|
242 |
+
t, (pc.field("a") <= pc.scalar(3)) & (pc.field("b") == pc.scalar(20)),
|
243 |
+
)
|
244 |
+
assert result == pa.table({
|
245 |
+
"a": [2],
|
246 |
+
"b": [20]
|
247 |
+
})
|
248 |
+
|
249 |
+
result = _filter_table(t, pc.field("b") > pc.scalar(30))
|
250 |
+
assert result == pa.table({
|
251 |
+
"a": [4, 5],
|
252 |
+
"b": [40, 50]
|
253 |
+
})
|
254 |
+
|
255 |
+
|
256 |
+
def test_filter_table_ordering():
|
257 |
+
table1 = pa.table({'a': [1, 2, 3, 4], 'b': ['a'] * 4})
|
258 |
+
table2 = pa.table({'a': [1, 2, 3, 4], 'b': ['b'] * 4})
|
259 |
+
table = pa.concat_tables([table1, table2])
|
260 |
+
|
261 |
+
for _ in range(20):
|
262 |
+
# 20 seems to consistently cause errors when order is not preserved.
|
263 |
+
# If the order problem is reintroduced this test will become flaky
|
264 |
+
# which is still a signal that the order is not preserved.
|
265 |
+
r = _filter_table(table, pc.field('a') == 1)
|
266 |
+
assert r["b"] == pa.chunked_array([["a"], ["b"]])
|
267 |
+
|
268 |
+
|
269 |
+
def test_complex_filter_table():
|
270 |
+
t = pa.table({
|
271 |
+
"a": [1, 2, 3, 4, 5, 6, 6],
|
272 |
+
"b": [10, 20, 30, 40, 50, 60, 61]
|
273 |
+
})
|
274 |
+
|
275 |
+
result = _filter_table(
|
276 |
+
t, ((pc.bit_wise_and(pc.field("a"), pc.scalar(1)) == pc.scalar(0)) &
|
277 |
+
(pc.multiply(pc.field("a"), pc.scalar(10)) == pc.field("b")))
|
278 |
+
)
|
279 |
+
|
280 |
+
assert result == pa.table({
|
281 |
+
"a": [2, 4, 6], # second six must be omitted because 6*10 != 61
|
282 |
+
"b": [20, 40, 60]
|
283 |
+
})
|
284 |
+
|
285 |
+
|
286 |
+
def test_join_extension_array_column():
|
287 |
+
storage = pa.array([1, 2, 3], type=pa.int64())
|
288 |
+
ty = IntegerType()
|
289 |
+
ext_array = pa.ExtensionArray.from_storage(ty, storage)
|
290 |
+
dict_array = pa.DictionaryArray.from_arrays(
|
291 |
+
pa.array([0, 2, 1]), pa.array(['a', 'b', 'c']))
|
292 |
+
t1 = pa.table({
|
293 |
+
"colA": [1, 2, 6],
|
294 |
+
"colB": ext_array,
|
295 |
+
"colVals": ext_array,
|
296 |
+
})
|
297 |
+
|
298 |
+
t2 = pa.table({
|
299 |
+
"colA": [99, 2, 1],
|
300 |
+
"colC": ext_array,
|
301 |
+
})
|
302 |
+
|
303 |
+
t3 = pa.table({
|
304 |
+
"colA": [99, 2, 1],
|
305 |
+
"colC": ext_array,
|
306 |
+
"colD": dict_array,
|
307 |
+
})
|
308 |
+
|
309 |
+
result = _perform_join(
|
310 |
+
"left outer", t1, ["colA"], t2, ["colA"])
|
311 |
+
assert result["colVals"] == pa.chunked_array(ext_array)
|
312 |
+
|
313 |
+
result = _perform_join(
|
314 |
+
"left outer", t1, ["colB"], t2, ["colC"])
|
315 |
+
assert result["colB"] == pa.chunked_array(ext_array)
|
316 |
+
|
317 |
+
result = _perform_join(
|
318 |
+
"left outer", t1, ["colA"], t3, ["colA"])
|
319 |
+
assert result["colVals"] == pa.chunked_array(ext_array)
|
320 |
+
|
321 |
+
result = _perform_join(
|
322 |
+
"left outer", t1, ["colB"], t3, ["colC"])
|
323 |
+
assert result["colB"] == pa.chunked_array(ext_array)
|
324 |
+
|
325 |
+
|
326 |
+
def test_group_by_ordering():
|
327 |
+
# GH-36709 - preserve ordering in groupby by setting use_threads=False
|
328 |
+
table1 = pa.table({'a': [1, 2, 3, 4], 'b': ['a'] * 4})
|
329 |
+
table2 = pa.table({'a': [1, 2, 3, 4], 'b': ['b'] * 4})
|
330 |
+
table = pa.concat_tables([table1, table2])
|
331 |
+
|
332 |
+
for _ in range(50):
|
333 |
+
# 50 seems to consistently cause errors when order is not preserved.
|
334 |
+
# If the order problem is reintroduced this test will become flaky
|
335 |
+
# which is still a signal that the order is not preserved.
|
336 |
+
result = table.group_by("b", use_threads=False).aggregate([])
|
337 |
+
assert result["b"] == pa.chunked_array([["a"], ["b"]])
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_extension_type.py
ADDED
@@ -0,0 +1,1496 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import contextlib
|
19 |
+
import os
|
20 |
+
import shutil
|
21 |
+
import subprocess
|
22 |
+
import weakref
|
23 |
+
from uuid import uuid4, UUID
|
24 |
+
import sys
|
25 |
+
|
26 |
+
import numpy as np
|
27 |
+
import pyarrow as pa
|
28 |
+
from pyarrow.vendored.version import Version
|
29 |
+
|
30 |
+
import pytest
|
31 |
+
|
32 |
+
|
33 |
+
@contextlib.contextmanager
|
34 |
+
def registered_extension_type(ext_type):
|
35 |
+
pa.register_extension_type(ext_type)
|
36 |
+
try:
|
37 |
+
yield
|
38 |
+
finally:
|
39 |
+
pa.unregister_extension_type(ext_type.extension_name)
|
40 |
+
|
41 |
+
|
42 |
+
@contextlib.contextmanager
|
43 |
+
def enabled_auto_load():
|
44 |
+
pa.PyExtensionType.set_auto_load(True)
|
45 |
+
try:
|
46 |
+
yield
|
47 |
+
finally:
|
48 |
+
pa.PyExtensionType.set_auto_load(False)
|
49 |
+
|
50 |
+
|
51 |
+
class TinyIntType(pa.ExtensionType):
|
52 |
+
|
53 |
+
def __init__(self):
|
54 |
+
super().__init__(pa.int8(), 'pyarrow.tests.TinyIntType')
|
55 |
+
|
56 |
+
def __arrow_ext_serialize__(self):
|
57 |
+
return b''
|
58 |
+
|
59 |
+
@classmethod
|
60 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized):
|
61 |
+
assert serialized == b''
|
62 |
+
assert storage_type == pa.int8()
|
63 |
+
return cls()
|
64 |
+
|
65 |
+
|
66 |
+
class IntegerType(pa.ExtensionType):
|
67 |
+
|
68 |
+
def __init__(self):
|
69 |
+
super().__init__(pa.int64(), 'pyarrow.tests.IntegerType')
|
70 |
+
|
71 |
+
def __arrow_ext_serialize__(self):
|
72 |
+
return b''
|
73 |
+
|
74 |
+
@classmethod
|
75 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized):
|
76 |
+
assert serialized == b''
|
77 |
+
assert storage_type == pa.int64()
|
78 |
+
return cls()
|
79 |
+
|
80 |
+
|
81 |
+
class IntegerEmbeddedType(pa.ExtensionType):
|
82 |
+
|
83 |
+
def __init__(self):
|
84 |
+
super().__init__(IntegerType(), 'pyarrow.tests.IntegerType')
|
85 |
+
|
86 |
+
def __arrow_ext_serialize__(self):
|
87 |
+
# XXX pa.BaseExtensionType should expose C++ serialization method
|
88 |
+
return self.storage_type.__arrow_ext_serialize__()
|
89 |
+
|
90 |
+
@classmethod
|
91 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized):
|
92 |
+
deserialized_storage_type = storage_type.__arrow_ext_deserialize__(
|
93 |
+
serialized)
|
94 |
+
assert deserialized_storage_type == storage_type
|
95 |
+
return cls()
|
96 |
+
|
97 |
+
|
98 |
+
class UuidScalarType(pa.ExtensionScalar):
|
99 |
+
def as_py(self):
|
100 |
+
return None if self.value is None else UUID(bytes=self.value.as_py())
|
101 |
+
|
102 |
+
|
103 |
+
class UuidType(pa.ExtensionType):
|
104 |
+
|
105 |
+
def __init__(self):
|
106 |
+
super().__init__(pa.binary(16), 'pyarrow.tests.UuidType')
|
107 |
+
|
108 |
+
def __arrow_ext_scalar_class__(self):
|
109 |
+
return UuidScalarType
|
110 |
+
|
111 |
+
def __arrow_ext_serialize__(self):
|
112 |
+
return b''
|
113 |
+
|
114 |
+
@classmethod
|
115 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized):
|
116 |
+
return cls()
|
117 |
+
|
118 |
+
|
119 |
+
class UuidType2(pa.ExtensionType):
|
120 |
+
|
121 |
+
def __init__(self):
|
122 |
+
super().__init__(pa.binary(16), 'pyarrow.tests.UuidType2')
|
123 |
+
|
124 |
+
def __arrow_ext_serialize__(self):
|
125 |
+
return b''
|
126 |
+
|
127 |
+
@classmethod
|
128 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized):
|
129 |
+
return cls()
|
130 |
+
|
131 |
+
|
132 |
+
class LabelType(pa.ExtensionType):
|
133 |
+
|
134 |
+
def __init__(self):
|
135 |
+
super().__init__(pa.string(), 'pyarrow.tests.LabelType')
|
136 |
+
|
137 |
+
def __arrow_ext_serialize__(self):
|
138 |
+
return b''
|
139 |
+
|
140 |
+
@classmethod
|
141 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized):
|
142 |
+
return cls()
|
143 |
+
|
144 |
+
|
145 |
+
class ParamExtType(pa.ExtensionType):
|
146 |
+
|
147 |
+
def __init__(self, width):
|
148 |
+
self._width = width
|
149 |
+
super().__init__(pa.binary(width), 'pyarrow.tests.ParamExtType')
|
150 |
+
|
151 |
+
@property
|
152 |
+
def width(self):
|
153 |
+
return self._width
|
154 |
+
|
155 |
+
def __arrow_ext_serialize__(self):
|
156 |
+
return str(self._width).encode()
|
157 |
+
|
158 |
+
@classmethod
|
159 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized):
|
160 |
+
width = int(serialized.decode())
|
161 |
+
assert storage_type == pa.binary(width)
|
162 |
+
return cls(width)
|
163 |
+
|
164 |
+
|
165 |
+
class MyStructType(pa.ExtensionType):
|
166 |
+
storage_type = pa.struct([('left', pa.int64()),
|
167 |
+
('right', pa.int64())])
|
168 |
+
|
169 |
+
def __init__(self):
|
170 |
+
super().__init__(self.storage_type, 'pyarrow.tests.MyStructType')
|
171 |
+
|
172 |
+
def __arrow_ext_serialize__(self):
|
173 |
+
return b''
|
174 |
+
|
175 |
+
@classmethod
|
176 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized):
|
177 |
+
assert serialized == b''
|
178 |
+
assert storage_type == cls.storage_type
|
179 |
+
return cls()
|
180 |
+
|
181 |
+
|
182 |
+
class MyListType(pa.ExtensionType):
|
183 |
+
|
184 |
+
def __init__(self, storage_type):
|
185 |
+
assert isinstance(storage_type, pa.ListType)
|
186 |
+
super().__init__(storage_type, 'pyarrow.tests.MyListType')
|
187 |
+
|
188 |
+
def __arrow_ext_serialize__(self):
|
189 |
+
return b''
|
190 |
+
|
191 |
+
@classmethod
|
192 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized):
|
193 |
+
assert serialized == b''
|
194 |
+
return cls(storage_type)
|
195 |
+
|
196 |
+
|
197 |
+
class AnnotatedType(pa.ExtensionType):
|
198 |
+
"""
|
199 |
+
Generic extension type that can store any storage type.
|
200 |
+
"""
|
201 |
+
|
202 |
+
def __init__(self, storage_type, annotation):
|
203 |
+
self.annotation = annotation
|
204 |
+
super().__init__(storage_type, 'pyarrow.tests.AnnotatedType')
|
205 |
+
|
206 |
+
def __arrow_ext_serialize__(self):
|
207 |
+
return b''
|
208 |
+
|
209 |
+
@classmethod
|
210 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized):
|
211 |
+
assert serialized == b''
|
212 |
+
return cls(storage_type)
|
213 |
+
|
214 |
+
|
215 |
+
class LegacyIntType(pa.PyExtensionType):
|
216 |
+
|
217 |
+
def __init__(self):
|
218 |
+
pa.PyExtensionType.__init__(self, pa.int8())
|
219 |
+
|
220 |
+
def __reduce__(self):
|
221 |
+
return LegacyIntType, ()
|
222 |
+
|
223 |
+
|
224 |
+
def ipc_write_batch(batch):
|
225 |
+
stream = pa.BufferOutputStream()
|
226 |
+
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
|
227 |
+
writer.write_batch(batch)
|
228 |
+
writer.close()
|
229 |
+
return stream.getvalue()
|
230 |
+
|
231 |
+
|
232 |
+
def ipc_read_batch(buf):
|
233 |
+
reader = pa.RecordBatchStreamReader(buf)
|
234 |
+
return reader.read_next_batch()
|
235 |
+
|
236 |
+
|
237 |
+
def test_ext_type_basics():
|
238 |
+
ty = UuidType()
|
239 |
+
assert ty.extension_name == "pyarrow.tests.UuidType"
|
240 |
+
|
241 |
+
|
242 |
+
def test_ext_type_str():
|
243 |
+
ty = IntegerType()
|
244 |
+
expected = "extension<pyarrow.tests.IntegerType<IntegerType>>"
|
245 |
+
assert str(ty) == expected
|
246 |
+
assert pa.DataType.__str__(ty) == expected
|
247 |
+
|
248 |
+
|
249 |
+
def test_ext_type_repr():
|
250 |
+
ty = IntegerType()
|
251 |
+
assert repr(ty) == "IntegerType(DataType(int64))"
|
252 |
+
|
253 |
+
|
254 |
+
def test_ext_type__lifetime():
|
255 |
+
ty = UuidType()
|
256 |
+
wr = weakref.ref(ty)
|
257 |
+
del ty
|
258 |
+
assert wr() is None
|
259 |
+
|
260 |
+
|
261 |
+
def test_ext_type__storage_type():
|
262 |
+
ty = UuidType()
|
263 |
+
assert ty.storage_type == pa.binary(16)
|
264 |
+
assert ty.__class__ is UuidType
|
265 |
+
ty = ParamExtType(5)
|
266 |
+
assert ty.storage_type == pa.binary(5)
|
267 |
+
assert ty.__class__ is ParamExtType
|
268 |
+
|
269 |
+
|
270 |
+
def test_ext_type_as_py():
|
271 |
+
ty = UuidType()
|
272 |
+
expected = uuid4()
|
273 |
+
scalar = pa.ExtensionScalar.from_storage(ty, expected.bytes)
|
274 |
+
assert scalar.as_py() == expected
|
275 |
+
|
276 |
+
# test array
|
277 |
+
uuids = [uuid4() for _ in range(3)]
|
278 |
+
storage = pa.array([uuid.bytes for uuid in uuids], type=pa.binary(16))
|
279 |
+
arr = pa.ExtensionArray.from_storage(ty, storage)
|
280 |
+
|
281 |
+
# Works for __get_item__
|
282 |
+
for i, expected in enumerate(uuids):
|
283 |
+
assert arr[i].as_py() == expected
|
284 |
+
|
285 |
+
# Works for __iter__
|
286 |
+
for result, expected in zip(arr, uuids):
|
287 |
+
assert result.as_py() == expected
|
288 |
+
|
289 |
+
# test chunked array
|
290 |
+
data = [
|
291 |
+
pa.ExtensionArray.from_storage(ty, storage),
|
292 |
+
pa.ExtensionArray.from_storage(ty, storage)
|
293 |
+
]
|
294 |
+
carr = pa.chunked_array(data)
|
295 |
+
for i, expected in enumerate(uuids + uuids):
|
296 |
+
assert carr[i].as_py() == expected
|
297 |
+
|
298 |
+
for result, expected in zip(carr, uuids + uuids):
|
299 |
+
assert result.as_py() == expected
|
300 |
+
|
301 |
+
|
302 |
+
def test_uuid_type_pickle(pickle_module):
|
303 |
+
for proto in range(0, pickle_module.HIGHEST_PROTOCOL + 1):
|
304 |
+
ty = UuidType()
|
305 |
+
ser = pickle_module.dumps(ty, protocol=proto)
|
306 |
+
del ty
|
307 |
+
ty = pickle_module.loads(ser)
|
308 |
+
wr = weakref.ref(ty)
|
309 |
+
assert ty.extension_name == "pyarrow.tests.UuidType"
|
310 |
+
del ty
|
311 |
+
assert wr() is None
|
312 |
+
|
313 |
+
|
314 |
+
def test_ext_type_equality():
|
315 |
+
a = ParamExtType(5)
|
316 |
+
b = ParamExtType(6)
|
317 |
+
c = ParamExtType(6)
|
318 |
+
assert a != b
|
319 |
+
assert b == c
|
320 |
+
d = UuidType()
|
321 |
+
e = UuidType()
|
322 |
+
assert a != d
|
323 |
+
assert d == e
|
324 |
+
|
325 |
+
|
326 |
+
def test_ext_array_basics():
|
327 |
+
ty = ParamExtType(3)
|
328 |
+
storage = pa.array([b"foo", b"bar"], type=pa.binary(3))
|
329 |
+
arr = pa.ExtensionArray.from_storage(ty, storage)
|
330 |
+
arr.validate()
|
331 |
+
assert arr.type is ty
|
332 |
+
assert arr.storage.equals(storage)
|
333 |
+
|
334 |
+
|
335 |
+
def test_ext_array_lifetime():
|
336 |
+
ty = ParamExtType(3)
|
337 |
+
storage = pa.array([b"foo", b"bar"], type=pa.binary(3))
|
338 |
+
arr = pa.ExtensionArray.from_storage(ty, storage)
|
339 |
+
|
340 |
+
refs = [weakref.ref(ty), weakref.ref(arr), weakref.ref(storage)]
|
341 |
+
del ty, storage, arr
|
342 |
+
for ref in refs:
|
343 |
+
assert ref() is None
|
344 |
+
|
345 |
+
|
346 |
+
def test_ext_array_to_pylist():
|
347 |
+
ty = ParamExtType(3)
|
348 |
+
storage = pa.array([b"foo", b"bar", None], type=pa.binary(3))
|
349 |
+
arr = pa.ExtensionArray.from_storage(ty, storage)
|
350 |
+
|
351 |
+
assert arr.to_pylist() == [b"foo", b"bar", None]
|
352 |
+
|
353 |
+
|
354 |
+
def test_ext_array_errors():
|
355 |
+
ty = ParamExtType(4)
|
356 |
+
storage = pa.array([b"foo", b"bar"], type=pa.binary(3))
|
357 |
+
with pytest.raises(TypeError, match="Incompatible storage type"):
|
358 |
+
pa.ExtensionArray.from_storage(ty, storage)
|
359 |
+
|
360 |
+
|
361 |
+
def test_ext_array_equality():
|
362 |
+
storage1 = pa.array([b"0123456789abcdef"], type=pa.binary(16))
|
363 |
+
storage2 = pa.array([b"0123456789abcdef"], type=pa.binary(16))
|
364 |
+
storage3 = pa.array([], type=pa.binary(16))
|
365 |
+
ty1 = UuidType()
|
366 |
+
ty2 = ParamExtType(16)
|
367 |
+
|
368 |
+
a = pa.ExtensionArray.from_storage(ty1, storage1)
|
369 |
+
b = pa.ExtensionArray.from_storage(ty1, storage2)
|
370 |
+
assert a.equals(b)
|
371 |
+
c = pa.ExtensionArray.from_storage(ty1, storage3)
|
372 |
+
assert not a.equals(c)
|
373 |
+
d = pa.ExtensionArray.from_storage(ty2, storage1)
|
374 |
+
assert not a.equals(d)
|
375 |
+
e = pa.ExtensionArray.from_storage(ty2, storage2)
|
376 |
+
assert d.equals(e)
|
377 |
+
f = pa.ExtensionArray.from_storage(ty2, storage3)
|
378 |
+
assert not d.equals(f)
|
379 |
+
|
380 |
+
|
381 |
+
def test_ext_array_wrap_array():
|
382 |
+
ty = ParamExtType(3)
|
383 |
+
storage = pa.array([b"foo", b"bar", None], type=pa.binary(3))
|
384 |
+
arr = ty.wrap_array(storage)
|
385 |
+
arr.validate(full=True)
|
386 |
+
assert isinstance(arr, pa.ExtensionArray)
|
387 |
+
assert arr.type == ty
|
388 |
+
assert arr.storage == storage
|
389 |
+
|
390 |
+
storage = pa.chunked_array([[b"abc", b"def"], [b"ghi"]],
|
391 |
+
type=pa.binary(3))
|
392 |
+
arr = ty.wrap_array(storage)
|
393 |
+
arr.validate(full=True)
|
394 |
+
assert isinstance(arr, pa.ChunkedArray)
|
395 |
+
assert arr.type == ty
|
396 |
+
assert arr.chunk(0).storage == storage.chunk(0)
|
397 |
+
assert arr.chunk(1).storage == storage.chunk(1)
|
398 |
+
|
399 |
+
# Wrong storage type
|
400 |
+
storage = pa.array([b"foo", b"bar", None])
|
401 |
+
with pytest.raises(TypeError, match="Incompatible storage type"):
|
402 |
+
ty.wrap_array(storage)
|
403 |
+
|
404 |
+
# Not an array or chunked array
|
405 |
+
with pytest.raises(TypeError, match="Expected array or chunked array"):
|
406 |
+
ty.wrap_array(None)
|
407 |
+
|
408 |
+
|
409 |
+
def test_ext_scalar_from_array():
|
410 |
+
data = [b"0123456789abcdef", b"0123456789abcdef",
|
411 |
+
b"zyxwvutsrqponmlk", None]
|
412 |
+
storage = pa.array(data, type=pa.binary(16))
|
413 |
+
ty1 = UuidType()
|
414 |
+
ty2 = ParamExtType(16)
|
415 |
+
ty3 = UuidType2()
|
416 |
+
|
417 |
+
a = pa.ExtensionArray.from_storage(ty1, storage)
|
418 |
+
b = pa.ExtensionArray.from_storage(ty2, storage)
|
419 |
+
c = pa.ExtensionArray.from_storage(ty3, storage)
|
420 |
+
|
421 |
+
scalars_a = list(a)
|
422 |
+
assert len(scalars_a) == 4
|
423 |
+
|
424 |
+
assert ty1.__arrow_ext_scalar_class__() == UuidScalarType
|
425 |
+
assert isinstance(a[0], UuidScalarType)
|
426 |
+
assert isinstance(scalars_a[0], UuidScalarType)
|
427 |
+
|
428 |
+
for s, val in zip(scalars_a, data):
|
429 |
+
assert isinstance(s, pa.ExtensionScalar)
|
430 |
+
assert s.is_valid == (val is not None)
|
431 |
+
assert s.type == ty1
|
432 |
+
if val is not None:
|
433 |
+
assert s.value == pa.scalar(val, storage.type)
|
434 |
+
assert s.as_py() == UUID(bytes=val)
|
435 |
+
else:
|
436 |
+
assert s.value is None
|
437 |
+
|
438 |
+
scalars_b = list(b)
|
439 |
+
assert len(scalars_b) == 4
|
440 |
+
|
441 |
+
for sa, sb in zip(scalars_a, scalars_b):
|
442 |
+
assert isinstance(sb, pa.ExtensionScalar)
|
443 |
+
assert sa.is_valid == sb.is_valid
|
444 |
+
if sa.as_py() is None:
|
445 |
+
assert sa.as_py() == sb.as_py()
|
446 |
+
else:
|
447 |
+
assert sa.as_py().bytes == sb.as_py()
|
448 |
+
assert sa != sb
|
449 |
+
|
450 |
+
scalars_c = list(c)
|
451 |
+
assert len(scalars_c) == 4
|
452 |
+
|
453 |
+
for s, val in zip(scalars_c, data):
|
454 |
+
assert isinstance(s, pa.ExtensionScalar)
|
455 |
+
assert s.is_valid == (val is not None)
|
456 |
+
assert s.type == ty3
|
457 |
+
if val is not None:
|
458 |
+
assert s.value == pa.scalar(val, storage.type)
|
459 |
+
assert s.as_py() == val
|
460 |
+
else:
|
461 |
+
assert s.value is None
|
462 |
+
|
463 |
+
assert a.to_pylist() == [UUID(bytes=x) if x else None for x in data]
|
464 |
+
|
465 |
+
|
466 |
+
def test_ext_scalar_from_storage():
|
467 |
+
ty = UuidType()
|
468 |
+
|
469 |
+
s = pa.ExtensionScalar.from_storage(ty, None)
|
470 |
+
assert isinstance(s, pa.ExtensionScalar)
|
471 |
+
assert s.type == ty
|
472 |
+
assert s.is_valid is False
|
473 |
+
assert s.value is None
|
474 |
+
|
475 |
+
s = pa.ExtensionScalar.from_storage(ty, b"0123456789abcdef")
|
476 |
+
assert isinstance(s, pa.ExtensionScalar)
|
477 |
+
assert s.type == ty
|
478 |
+
assert s.is_valid is True
|
479 |
+
assert s.value == pa.scalar(b"0123456789abcdef", ty.storage_type)
|
480 |
+
|
481 |
+
s = pa.ExtensionScalar.from_storage(ty, pa.scalar(None, ty.storage_type))
|
482 |
+
assert isinstance(s, pa.ExtensionScalar)
|
483 |
+
assert s.type == ty
|
484 |
+
assert s.is_valid is False
|
485 |
+
assert s.value is None
|
486 |
+
|
487 |
+
s = pa.ExtensionScalar.from_storage(
|
488 |
+
ty, pa.scalar(b"0123456789abcdef", ty.storage_type))
|
489 |
+
assert isinstance(s, pa.ExtensionScalar)
|
490 |
+
assert s.type == ty
|
491 |
+
assert s.is_valid is True
|
492 |
+
assert s.value == pa.scalar(b"0123456789abcdef", ty.storage_type)
|
493 |
+
|
494 |
+
|
495 |
+
def test_ext_array_pickling(pickle_module):
|
496 |
+
for proto in range(0, pickle_module.HIGHEST_PROTOCOL + 1):
|
497 |
+
ty = ParamExtType(3)
|
498 |
+
storage = pa.array([b"foo", b"bar"], type=pa.binary(3))
|
499 |
+
arr = pa.ExtensionArray.from_storage(ty, storage)
|
500 |
+
ser = pickle_module.dumps(arr, protocol=proto)
|
501 |
+
del ty, storage, arr
|
502 |
+
arr = pickle_module.loads(ser)
|
503 |
+
arr.validate()
|
504 |
+
assert isinstance(arr, pa.ExtensionArray)
|
505 |
+
assert arr.type == ParamExtType(3)
|
506 |
+
assert arr.type.storage_type == pa.binary(3)
|
507 |
+
assert arr.storage.type == pa.binary(3)
|
508 |
+
assert arr.storage.to_pylist() == [b"foo", b"bar"]
|
509 |
+
|
510 |
+
|
511 |
+
def test_ext_array_conversion_to_numpy():
|
512 |
+
storage1 = pa.array([1, 2, 3], type=pa.int64())
|
513 |
+
storage2 = pa.array([b"123", b"456", b"789"], type=pa.binary(3))
|
514 |
+
ty1 = IntegerType()
|
515 |
+
ty2 = ParamExtType(3)
|
516 |
+
|
517 |
+
arr1 = pa.ExtensionArray.from_storage(ty1, storage1)
|
518 |
+
arr2 = pa.ExtensionArray.from_storage(ty2, storage2)
|
519 |
+
|
520 |
+
result = arr1.to_numpy()
|
521 |
+
expected = np.array([1, 2, 3], dtype="int64")
|
522 |
+
np.testing.assert_array_equal(result, expected)
|
523 |
+
|
524 |
+
with pytest.raises(ValueError, match="zero_copy_only was True"):
|
525 |
+
arr2.to_numpy()
|
526 |
+
result = arr2.to_numpy(zero_copy_only=False)
|
527 |
+
expected = np.array([b"123", b"456", b"789"])
|
528 |
+
np.testing.assert_array_equal(result, expected)
|
529 |
+
|
530 |
+
|
531 |
+
@pytest.mark.pandas
|
532 |
+
def test_ext_array_conversion_to_pandas():
|
533 |
+
import pandas as pd
|
534 |
+
|
535 |
+
storage1 = pa.array([1, 2, 3], type=pa.int64())
|
536 |
+
storage2 = pa.array([b"123", b"456", b"789"], type=pa.binary(3))
|
537 |
+
ty1 = IntegerType()
|
538 |
+
ty2 = ParamExtType(3)
|
539 |
+
|
540 |
+
arr1 = pa.ExtensionArray.from_storage(ty1, storage1)
|
541 |
+
arr2 = pa.ExtensionArray.from_storage(ty2, storage2)
|
542 |
+
|
543 |
+
result = arr1.to_pandas()
|
544 |
+
expected = pd.Series([1, 2, 3], dtype="int64")
|
545 |
+
pd.testing.assert_series_equal(result, expected)
|
546 |
+
|
547 |
+
result = arr2.to_pandas()
|
548 |
+
expected = pd.Series([b"123", b"456", b"789"], dtype=object)
|
549 |
+
pd.testing.assert_series_equal(result, expected)
|
550 |
+
|
551 |
+
|
552 |
+
@pytest.fixture
|
553 |
+
def struct_w_ext_data():
|
554 |
+
storage1 = pa.array([1, 2, 3], type=pa.int64())
|
555 |
+
storage2 = pa.array([b"123", b"456", b"789"], type=pa.binary(3))
|
556 |
+
ty1 = IntegerType()
|
557 |
+
ty2 = ParamExtType(3)
|
558 |
+
|
559 |
+
arr1 = pa.ExtensionArray.from_storage(ty1, storage1)
|
560 |
+
arr2 = pa.ExtensionArray.from_storage(ty2, storage2)
|
561 |
+
|
562 |
+
sarr1 = pa.StructArray.from_arrays([arr1], ["f0"])
|
563 |
+
sarr2 = pa.StructArray.from_arrays([arr2], ["f1"])
|
564 |
+
|
565 |
+
return [sarr1, sarr2]
|
566 |
+
|
567 |
+
|
568 |
+
def test_struct_w_ext_array_to_numpy(struct_w_ext_data):
|
569 |
+
# ARROW-15291
|
570 |
+
# Check that we don't segfault when trying to build
|
571 |
+
# a numpy array from a StructArray with a field being
|
572 |
+
# an ExtensionArray
|
573 |
+
|
574 |
+
result = struct_w_ext_data[0].to_numpy(zero_copy_only=False)
|
575 |
+
expected = np.array([{'f0': 1}, {'f0': 2},
|
576 |
+
{'f0': 3}], dtype=object)
|
577 |
+
np.testing.assert_array_equal(result, expected)
|
578 |
+
|
579 |
+
result = struct_w_ext_data[1].to_numpy(zero_copy_only=False)
|
580 |
+
expected = np.array([{'f1': b'123'}, {'f1': b'456'},
|
581 |
+
{'f1': b'789'}], dtype=object)
|
582 |
+
np.testing.assert_array_equal(result, expected)
|
583 |
+
|
584 |
+
|
585 |
+
@pytest.mark.pandas
|
586 |
+
def test_struct_w_ext_array_to_pandas(struct_w_ext_data):
|
587 |
+
# ARROW-15291
|
588 |
+
# Check that we don't segfault when trying to build
|
589 |
+
# a Pandas dataframe from a StructArray with a field
|
590 |
+
# being an ExtensionArray
|
591 |
+
import pandas as pd
|
592 |
+
|
593 |
+
result = struct_w_ext_data[0].to_pandas()
|
594 |
+
expected = pd.Series([{'f0': 1}, {'f0': 2},
|
595 |
+
{'f0': 3}], dtype=object)
|
596 |
+
pd.testing.assert_series_equal(result, expected)
|
597 |
+
|
598 |
+
result = struct_w_ext_data[1].to_pandas()
|
599 |
+
expected = pd.Series([{'f1': b'123'}, {'f1': b'456'},
|
600 |
+
{'f1': b'789'}], dtype=object)
|
601 |
+
pd.testing.assert_series_equal(result, expected)
|
602 |
+
|
603 |
+
|
604 |
+
def test_cast_kernel_on_extension_arrays():
|
605 |
+
# test array casting
|
606 |
+
storage = pa.array([1, 2, 3, 4], pa.int64())
|
607 |
+
arr = pa.ExtensionArray.from_storage(IntegerType(), storage)
|
608 |
+
|
609 |
+
# test that no allocation happens during identity cast
|
610 |
+
allocated_before_cast = pa.total_allocated_bytes()
|
611 |
+
casted = arr.cast(pa.int64())
|
612 |
+
assert pa.total_allocated_bytes() == allocated_before_cast
|
613 |
+
|
614 |
+
cases = [
|
615 |
+
(pa.int64(), pa.Int64Array),
|
616 |
+
(pa.int32(), pa.Int32Array),
|
617 |
+
(pa.int16(), pa.Int16Array),
|
618 |
+
(pa.uint64(), pa.UInt64Array),
|
619 |
+
(pa.uint32(), pa.UInt32Array),
|
620 |
+
(pa.uint16(), pa.UInt16Array)
|
621 |
+
]
|
622 |
+
for typ, klass in cases:
|
623 |
+
casted = arr.cast(typ)
|
624 |
+
assert casted.type == typ
|
625 |
+
assert isinstance(casted, klass)
|
626 |
+
|
627 |
+
# test chunked array casting
|
628 |
+
arr = pa.chunked_array([arr, arr])
|
629 |
+
casted = arr.cast(pa.int16())
|
630 |
+
assert casted.type == pa.int16()
|
631 |
+
assert isinstance(casted, pa.ChunkedArray)
|
632 |
+
|
633 |
+
|
634 |
+
@pytest.mark.parametrize("data,ty", (
|
635 |
+
([1, 2], pa.int32),
|
636 |
+
([1, 2], pa.int64),
|
637 |
+
(["1", "2"], pa.string),
|
638 |
+
([b"1", b"2"], pa.binary),
|
639 |
+
([1.0, 2.0], pa.float32),
|
640 |
+
([1.0, 2.0], pa.float64)
|
641 |
+
))
|
642 |
+
def test_casting_to_extension_type(data, ty):
|
643 |
+
arr = pa.array(data, ty())
|
644 |
+
out = arr.cast(IntegerType())
|
645 |
+
assert isinstance(out, pa.ExtensionArray)
|
646 |
+
assert out.type == IntegerType()
|
647 |
+
assert out.to_pylist() == [1, 2]
|
648 |
+
|
649 |
+
|
650 |
+
def test_cast_between_extension_types():
|
651 |
+
array = pa.array([1, 2, 3], pa.int8())
|
652 |
+
|
653 |
+
tiny_int_arr = array.cast(TinyIntType())
|
654 |
+
assert tiny_int_arr.type == TinyIntType()
|
655 |
+
|
656 |
+
# Casting between extension types w/ different storage types not okay.
|
657 |
+
msg = ("Casting from 'extension<.*?<TinyIntType>>' "
|
658 |
+
"to different extension type "
|
659 |
+
"'extension<.*?<IntegerType>>' not permitted. "
|
660 |
+
"One can first cast to the storage type, "
|
661 |
+
"then to the extension type."
|
662 |
+
)
|
663 |
+
with pytest.raises(TypeError, match=msg):
|
664 |
+
tiny_int_arr.cast(IntegerType())
|
665 |
+
tiny_int_arr.cast(pa.int64()).cast(IntegerType())
|
666 |
+
|
667 |
+
# Between the same extension types is okay
|
668 |
+
array = pa.array([b'1' * 16, b'2' * 16], pa.binary(16)).cast(UuidType())
|
669 |
+
out = array.cast(UuidType())
|
670 |
+
assert out.type == UuidType()
|
671 |
+
|
672 |
+
# Will still fail casting between extensions who share storage type,
|
673 |
+
# can only cast between exactly the same extension types.
|
674 |
+
with pytest.raises(TypeError, match='Casting from *'):
|
675 |
+
array.cast(UuidType2())
|
676 |
+
|
677 |
+
|
678 |
+
def test_cast_to_extension_with_extension_storage():
|
679 |
+
# Test casting directly, and IntegerType -> IntegerEmbeddedType
|
680 |
+
array = pa.array([1, 2, 3], pa.int64())
|
681 |
+
array.cast(IntegerEmbeddedType())
|
682 |
+
array.cast(IntegerType()).cast(IntegerEmbeddedType())
|
683 |
+
|
684 |
+
|
685 |
+
@pytest.mark.parametrize("data,type_factory", (
|
686 |
+
# list<extension>
|
687 |
+
([[1, 2, 3]], lambda: pa.list_(IntegerType())),
|
688 |
+
# struct<extension>
|
689 |
+
([{"foo": 1}], lambda: pa.struct([("foo", IntegerType())])),
|
690 |
+
# list<struct<extension>>
|
691 |
+
([[{"foo": 1}]], lambda: pa.list_(pa.struct([("foo", IntegerType())]))),
|
692 |
+
# struct<list<extension>>
|
693 |
+
([{"foo": [1, 2, 3]}], lambda: pa.struct(
|
694 |
+
[("foo", pa.list_(IntegerType()))])),
|
695 |
+
))
|
696 |
+
def test_cast_nested_extension_types(data, type_factory):
|
697 |
+
ty = type_factory()
|
698 |
+
a = pa.array(data)
|
699 |
+
b = a.cast(ty)
|
700 |
+
assert b.type == ty # casted to target extension
|
701 |
+
assert b.cast(a.type) # and can cast back
|
702 |
+
|
703 |
+
|
704 |
+
def test_casting_dict_array_to_extension_type():
|
705 |
+
storage = pa.array([b"0123456789abcdef"], type=pa.binary(16))
|
706 |
+
arr = pa.ExtensionArray.from_storage(UuidType(), storage)
|
707 |
+
dict_arr = pa.DictionaryArray.from_arrays(pa.array([0, 0], pa.int32()),
|
708 |
+
arr)
|
709 |
+
out = dict_arr.cast(UuidType())
|
710 |
+
assert isinstance(out, pa.ExtensionArray)
|
711 |
+
assert out.to_pylist() == [UUID('30313233-3435-3637-3839-616263646566'),
|
712 |
+
UUID('30313233-3435-3637-3839-616263646566')]
|
713 |
+
|
714 |
+
|
715 |
+
def test_concat():
|
716 |
+
arr1 = pa.array([1, 2, 3], IntegerType())
|
717 |
+
arr2 = pa.array([4, 5, 6], IntegerType())
|
718 |
+
|
719 |
+
result = pa.concat_arrays([arr1, arr2])
|
720 |
+
expected = pa.array([1, 2, 3, 4, 5, 6], IntegerType())
|
721 |
+
assert result.equals(expected)
|
722 |
+
|
723 |
+
# nested in a struct
|
724 |
+
struct_arr1 = pa.StructArray.from_arrays([arr1], names=["a"])
|
725 |
+
struct_arr2 = pa.StructArray.from_arrays([arr2], names=["a"])
|
726 |
+
result = pa.concat_arrays([struct_arr1, struct_arr2])
|
727 |
+
expected = pa.StructArray.from_arrays([expected], names=["a"])
|
728 |
+
assert result.equals(expected)
|
729 |
+
|
730 |
+
|
731 |
+
def test_null_storage_type():
|
732 |
+
ext_type = AnnotatedType(pa.null(), {"key": "value"})
|
733 |
+
storage = pa.array([None] * 10, pa.null())
|
734 |
+
arr = pa.ExtensionArray.from_storage(ext_type, storage)
|
735 |
+
assert arr.null_count == 10
|
736 |
+
arr.validate(full=True)
|
737 |
+
|
738 |
+
|
739 |
+
def example_batch():
|
740 |
+
ty = ParamExtType(3)
|
741 |
+
storage = pa.array([b"foo", b"bar"], type=pa.binary(3))
|
742 |
+
arr = pa.ExtensionArray.from_storage(ty, storage)
|
743 |
+
return pa.RecordBatch.from_arrays([arr], ["exts"])
|
744 |
+
|
745 |
+
|
746 |
+
def check_example_batch(batch, *, expect_extension):
|
747 |
+
arr = batch.column(0)
|
748 |
+
if expect_extension:
|
749 |
+
assert isinstance(arr, pa.ExtensionArray)
|
750 |
+
assert arr.type.storage_type == pa.binary(3)
|
751 |
+
assert arr.storage.to_pylist() == [b"foo", b"bar"]
|
752 |
+
else:
|
753 |
+
assert arr.type == pa.binary(3)
|
754 |
+
assert arr.to_pylist() == [b"foo", b"bar"]
|
755 |
+
return arr
|
756 |
+
|
757 |
+
|
758 |
+
def test_ipc_unregistered():
|
759 |
+
batch = example_batch()
|
760 |
+
buf = ipc_write_batch(batch)
|
761 |
+
del batch
|
762 |
+
|
763 |
+
batch = ipc_read_batch(buf)
|
764 |
+
batch.validate(full=True)
|
765 |
+
check_example_batch(batch, expect_extension=False)
|
766 |
+
|
767 |
+
|
768 |
+
def test_ipc_registered():
|
769 |
+
with registered_extension_type(ParamExtType(1)):
|
770 |
+
batch = example_batch()
|
771 |
+
buf = ipc_write_batch(batch)
|
772 |
+
del batch
|
773 |
+
|
774 |
+
batch = ipc_read_batch(buf)
|
775 |
+
batch.validate(full=True)
|
776 |
+
arr = check_example_batch(batch, expect_extension=True)
|
777 |
+
assert arr.type == ParamExtType(3)
|
778 |
+
|
779 |
+
|
780 |
+
class PeriodArray(pa.ExtensionArray):
|
781 |
+
pass
|
782 |
+
|
783 |
+
|
784 |
+
class PeriodType(pa.ExtensionType):
|
785 |
+
def __init__(self, freq):
|
786 |
+
# attributes need to be set first before calling
|
787 |
+
# super init (as that calls serialize)
|
788 |
+
self._freq = freq
|
789 |
+
pa.ExtensionType.__init__(self, pa.int64(), 'test.period')
|
790 |
+
|
791 |
+
@property
|
792 |
+
def freq(self):
|
793 |
+
return self._freq
|
794 |
+
|
795 |
+
def __arrow_ext_serialize__(self):
|
796 |
+
return "freq={}".format(self.freq).encode()
|
797 |
+
|
798 |
+
@classmethod
|
799 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized):
|
800 |
+
serialized = serialized.decode()
|
801 |
+
assert serialized.startswith("freq=")
|
802 |
+
freq = serialized.split('=')[1]
|
803 |
+
return PeriodType(freq)
|
804 |
+
|
805 |
+
def __eq__(self, other):
|
806 |
+
if isinstance(other, pa.BaseExtensionType):
|
807 |
+
return (isinstance(self, type(other)) and
|
808 |
+
self.freq == other.freq)
|
809 |
+
else:
|
810 |
+
return NotImplemented
|
811 |
+
|
812 |
+
|
813 |
+
class PeriodTypeWithClass(PeriodType):
|
814 |
+
def __init__(self, freq):
|
815 |
+
PeriodType.__init__(self, freq)
|
816 |
+
|
817 |
+
def __arrow_ext_class__(self):
|
818 |
+
return PeriodArray
|
819 |
+
|
820 |
+
@classmethod
|
821 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized):
|
822 |
+
freq = PeriodType.__arrow_ext_deserialize__(
|
823 |
+
storage_type, serialized).freq
|
824 |
+
return PeriodTypeWithClass(freq)
|
825 |
+
|
826 |
+
|
827 |
+
class PeriodTypeWithToPandasDtype(PeriodType):
|
828 |
+
@classmethod
|
829 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized):
|
830 |
+
freq = PeriodType.__arrow_ext_deserialize__(
|
831 |
+
storage_type, serialized).freq
|
832 |
+
return PeriodTypeWithToPandasDtype(freq)
|
833 |
+
|
834 |
+
def to_pandas_dtype(self):
|
835 |
+
import pandas as pd
|
836 |
+
return pd.PeriodDtype(freq=self.freq)
|
837 |
+
|
838 |
+
|
839 |
+
@pytest.fixture(params=[PeriodType('D'),
|
840 |
+
PeriodTypeWithClass('D'),
|
841 |
+
PeriodTypeWithToPandasDtype('D')])
|
842 |
+
def registered_period_type(request):
|
843 |
+
# setup
|
844 |
+
period_type = request.param
|
845 |
+
period_class = period_type.__arrow_ext_class__()
|
846 |
+
pa.register_extension_type(period_type)
|
847 |
+
yield period_type, period_class
|
848 |
+
# teardown
|
849 |
+
try:
|
850 |
+
pa.unregister_extension_type('test.period')
|
851 |
+
except KeyError:
|
852 |
+
pass
|
853 |
+
|
854 |
+
|
855 |
+
def test_generic_ext_type():
|
856 |
+
period_type = PeriodType('D')
|
857 |
+
assert period_type.extension_name == "test.period"
|
858 |
+
assert period_type.storage_type == pa.int64()
|
859 |
+
# default ext_class expected.
|
860 |
+
assert period_type.__arrow_ext_class__() == pa.ExtensionArray
|
861 |
+
|
862 |
+
|
863 |
+
def test_generic_ext_type_ipc(registered_period_type):
|
864 |
+
period_type, period_class = registered_period_type
|
865 |
+
storage = pa.array([1, 2, 3, 4], pa.int64())
|
866 |
+
arr = pa.ExtensionArray.from_storage(period_type, storage)
|
867 |
+
batch = pa.RecordBatch.from_arrays([arr], ["ext"])
|
868 |
+
# check the built array has exactly the expected clss
|
869 |
+
assert isinstance(arr, period_class)
|
870 |
+
|
871 |
+
buf = ipc_write_batch(batch)
|
872 |
+
del batch
|
873 |
+
batch = ipc_read_batch(buf)
|
874 |
+
|
875 |
+
result = batch.column(0)
|
876 |
+
# check the deserialized array class is the expected one
|
877 |
+
assert isinstance(result, period_class)
|
878 |
+
assert result.type.extension_name == "test.period"
|
879 |
+
assert arr.storage.to_pylist() == [1, 2, 3, 4]
|
880 |
+
|
881 |
+
# we get back an actual PeriodType
|
882 |
+
assert isinstance(result.type, PeriodType)
|
883 |
+
assert result.type.freq == 'D'
|
884 |
+
assert result.type == period_type
|
885 |
+
|
886 |
+
# using different parametrization as how it was registered
|
887 |
+
period_type_H = period_type.__class__('H')
|
888 |
+
assert period_type_H.extension_name == "test.period"
|
889 |
+
assert period_type_H.freq == 'H'
|
890 |
+
|
891 |
+
arr = pa.ExtensionArray.from_storage(period_type_H, storage)
|
892 |
+
batch = pa.RecordBatch.from_arrays([arr], ["ext"])
|
893 |
+
|
894 |
+
buf = ipc_write_batch(batch)
|
895 |
+
del batch
|
896 |
+
batch = ipc_read_batch(buf)
|
897 |
+
result = batch.column(0)
|
898 |
+
assert isinstance(result.type, PeriodType)
|
899 |
+
assert result.type.freq == 'H'
|
900 |
+
assert isinstance(result, period_class)
|
901 |
+
|
902 |
+
|
903 |
+
def test_generic_ext_type_ipc_unknown(registered_period_type):
|
904 |
+
period_type, _ = registered_period_type
|
905 |
+
storage = pa.array([1, 2, 3, 4], pa.int64())
|
906 |
+
arr = pa.ExtensionArray.from_storage(period_type, storage)
|
907 |
+
batch = pa.RecordBatch.from_arrays([arr], ["ext"])
|
908 |
+
|
909 |
+
buf = ipc_write_batch(batch)
|
910 |
+
del batch
|
911 |
+
|
912 |
+
# unregister type before loading again => reading unknown extension type
|
913 |
+
# as plain array (but metadata in schema's field are preserved)
|
914 |
+
pa.unregister_extension_type('test.period')
|
915 |
+
|
916 |
+
batch = ipc_read_batch(buf)
|
917 |
+
result = batch.column(0)
|
918 |
+
|
919 |
+
assert isinstance(result, pa.Int64Array)
|
920 |
+
ext_field = batch.schema.field('ext')
|
921 |
+
assert ext_field.metadata == {
|
922 |
+
b'ARROW:extension:metadata': b'freq=D',
|
923 |
+
b'ARROW:extension:name': b'test.period'
|
924 |
+
}
|
925 |
+
|
926 |
+
|
927 |
+
def test_generic_ext_type_equality():
|
928 |
+
period_type = PeriodType('D')
|
929 |
+
assert period_type.extension_name == "test.period"
|
930 |
+
|
931 |
+
period_type2 = PeriodType('D')
|
932 |
+
period_type3 = PeriodType('H')
|
933 |
+
assert period_type == period_type2
|
934 |
+
assert not period_type == period_type3
|
935 |
+
|
936 |
+
|
937 |
+
def test_generic_ext_type_pickling(registered_period_type, pickle_module):
|
938 |
+
# GH-36038
|
939 |
+
for proto in range(0, pickle_module.HIGHEST_PROTOCOL + 1):
|
940 |
+
period_type, _ = registered_period_type
|
941 |
+
ser = pickle_module.dumps(period_type, protocol=proto)
|
942 |
+
period_type_pickled = pickle_module.loads(ser)
|
943 |
+
assert period_type == period_type_pickled
|
944 |
+
|
945 |
+
|
946 |
+
def test_generic_ext_array_pickling(registered_period_type, pickle_module):
|
947 |
+
for proto in range(0, pickle_module.HIGHEST_PROTOCOL + 1):
|
948 |
+
period_type, _ = registered_period_type
|
949 |
+
storage = pa.array([1, 2, 3, 4], pa.int64())
|
950 |
+
arr = pa.ExtensionArray.from_storage(period_type, storage)
|
951 |
+
ser = pickle_module.dumps(arr, protocol=proto)
|
952 |
+
del storage, arr
|
953 |
+
arr = pickle_module.loads(ser)
|
954 |
+
arr.validate()
|
955 |
+
assert isinstance(arr, pa.ExtensionArray)
|
956 |
+
assert arr.type == period_type
|
957 |
+
assert arr.type.storage_type == pa.int64()
|
958 |
+
assert arr.storage.type == pa.int64()
|
959 |
+
assert arr.storage.to_pylist() == [1, 2, 3, 4]
|
960 |
+
|
961 |
+
|
962 |
+
def test_generic_ext_type_register(registered_period_type):
|
963 |
+
# test that trying to register other type does not segfault
|
964 |
+
with pytest.raises(TypeError):
|
965 |
+
pa.register_extension_type(pa.string())
|
966 |
+
|
967 |
+
# register second time raises KeyError
|
968 |
+
period_type = PeriodType('D')
|
969 |
+
with pytest.raises(KeyError):
|
970 |
+
pa.register_extension_type(period_type)
|
971 |
+
|
972 |
+
|
973 |
+
@pytest.mark.parquet
|
974 |
+
def test_parquet_period(tmpdir, registered_period_type):
|
975 |
+
# Parquet support for primitive extension types
|
976 |
+
period_type, period_class = registered_period_type
|
977 |
+
storage = pa.array([1, 2, 3, 4], pa.int64())
|
978 |
+
arr = pa.ExtensionArray.from_storage(period_type, storage)
|
979 |
+
table = pa.table([arr], names=["ext"])
|
980 |
+
|
981 |
+
import pyarrow.parquet as pq
|
982 |
+
|
983 |
+
filename = tmpdir / 'period_extension_type.parquet'
|
984 |
+
pq.write_table(table, filename)
|
985 |
+
|
986 |
+
# Stored in parquet as storage type but with extension metadata saved
|
987 |
+
# in the serialized arrow schema
|
988 |
+
meta = pq.read_metadata(filename)
|
989 |
+
assert meta.schema.column(0).physical_type == "INT64"
|
990 |
+
assert b"ARROW:schema" in meta.metadata
|
991 |
+
|
992 |
+
import base64
|
993 |
+
decoded_schema = base64.b64decode(meta.metadata[b"ARROW:schema"])
|
994 |
+
schema = pa.ipc.read_schema(pa.BufferReader(decoded_schema))
|
995 |
+
# Since the type could be reconstructed, the extension type metadata is
|
996 |
+
# absent.
|
997 |
+
assert schema.field("ext").metadata == {}
|
998 |
+
|
999 |
+
# When reading in, properly create extension type if it is registered
|
1000 |
+
result = pq.read_table(filename)
|
1001 |
+
result.validate(full=True)
|
1002 |
+
assert result.schema.field("ext").type == period_type
|
1003 |
+
assert result.schema.field("ext").metadata == {}
|
1004 |
+
# Get the exact array class defined by the registered type.
|
1005 |
+
result_array = result.column("ext").chunk(0)
|
1006 |
+
assert type(result_array) is period_class
|
1007 |
+
|
1008 |
+
# When the type is not registered, read in as storage type
|
1009 |
+
pa.unregister_extension_type(period_type.extension_name)
|
1010 |
+
result = pq.read_table(filename)
|
1011 |
+
result.validate(full=True)
|
1012 |
+
assert result.schema.field("ext").type == pa.int64()
|
1013 |
+
# The extension metadata is present for roundtripping.
|
1014 |
+
assert result.schema.field("ext").metadata == {
|
1015 |
+
b'ARROW:extension:metadata': b'freq=D',
|
1016 |
+
b'ARROW:extension:name': b'test.period'
|
1017 |
+
}
|
1018 |
+
|
1019 |
+
|
1020 |
+
@pytest.mark.parquet
|
1021 |
+
def test_parquet_extension_with_nested_storage(tmpdir):
|
1022 |
+
# Parquet support for extension types with nested storage type
|
1023 |
+
import pyarrow.parquet as pq
|
1024 |
+
|
1025 |
+
struct_array = pa.StructArray.from_arrays(
|
1026 |
+
[pa.array([0, 1], type="int64"), pa.array([4, 5], type="int64")],
|
1027 |
+
names=["left", "right"])
|
1028 |
+
list_array = pa.array([[1, 2, 3], [4, 5]], type=pa.list_(pa.int32()))
|
1029 |
+
|
1030 |
+
mystruct_array = pa.ExtensionArray.from_storage(MyStructType(),
|
1031 |
+
struct_array)
|
1032 |
+
mylist_array = pa.ExtensionArray.from_storage(
|
1033 |
+
MyListType(list_array.type), list_array)
|
1034 |
+
|
1035 |
+
orig_table = pa.table({'structs': mystruct_array,
|
1036 |
+
'lists': mylist_array})
|
1037 |
+
filename = tmpdir / 'nested_extension_storage.parquet'
|
1038 |
+
pq.write_table(orig_table, filename)
|
1039 |
+
|
1040 |
+
# Unregistered
|
1041 |
+
table = pq.read_table(filename)
|
1042 |
+
table.validate(full=True)
|
1043 |
+
assert table.column('structs').type == struct_array.type
|
1044 |
+
assert table.column('structs').combine_chunks() == struct_array
|
1045 |
+
assert table.column('lists').type == list_array.type
|
1046 |
+
assert table.column('lists').combine_chunks() == list_array
|
1047 |
+
|
1048 |
+
# Registered
|
1049 |
+
with registered_extension_type(mystruct_array.type):
|
1050 |
+
with registered_extension_type(mylist_array.type):
|
1051 |
+
table = pq.read_table(filename)
|
1052 |
+
table.validate(full=True)
|
1053 |
+
assert table.column('structs').type == mystruct_array.type
|
1054 |
+
assert table.column('lists').type == mylist_array.type
|
1055 |
+
assert table == orig_table
|
1056 |
+
|
1057 |
+
# Cannot select a subfield of an extension type with
|
1058 |
+
# a struct storage type.
|
1059 |
+
with pytest.raises(pa.ArrowInvalid,
|
1060 |
+
match='without all of its fields'):
|
1061 |
+
pq.ParquetFile(filename).read(columns=['structs.left'])
|
1062 |
+
|
1063 |
+
|
1064 |
+
@pytest.mark.parquet
|
1065 |
+
def test_parquet_nested_extension(tmpdir):
|
1066 |
+
# Parquet support for extension types nested in struct or list
|
1067 |
+
import pyarrow.parquet as pq
|
1068 |
+
|
1069 |
+
ext_type = IntegerType()
|
1070 |
+
storage = pa.array([4, 5, 6, 7], type=pa.int64())
|
1071 |
+
ext_array = pa.ExtensionArray.from_storage(ext_type, storage)
|
1072 |
+
|
1073 |
+
# Struct of extensions
|
1074 |
+
struct_array = pa.StructArray.from_arrays(
|
1075 |
+
[storage, ext_array],
|
1076 |
+
names=['ints', 'exts'])
|
1077 |
+
|
1078 |
+
orig_table = pa.table({'structs': struct_array})
|
1079 |
+
filename = tmpdir / 'struct_of_ext.parquet'
|
1080 |
+
pq.write_table(orig_table, filename)
|
1081 |
+
|
1082 |
+
table = pq.read_table(filename)
|
1083 |
+
table.validate(full=True)
|
1084 |
+
assert table.column(0).type == pa.struct({'ints': pa.int64(),
|
1085 |
+
'exts': pa.int64()})
|
1086 |
+
with registered_extension_type(ext_type):
|
1087 |
+
table = pq.read_table(filename)
|
1088 |
+
table.validate(full=True)
|
1089 |
+
assert table.column(0).type == struct_array.type
|
1090 |
+
assert table == orig_table
|
1091 |
+
|
1092 |
+
# List of extensions
|
1093 |
+
list_array = pa.ListArray.from_arrays([0, 1, None, 3], ext_array)
|
1094 |
+
|
1095 |
+
orig_table = pa.table({'lists': list_array})
|
1096 |
+
filename = tmpdir / 'list_of_ext.parquet'
|
1097 |
+
pq.write_table(orig_table, filename)
|
1098 |
+
|
1099 |
+
table = pq.read_table(filename)
|
1100 |
+
table.validate(full=True)
|
1101 |
+
assert table.column(0).type == pa.list_(pa.int64())
|
1102 |
+
with registered_extension_type(ext_type):
|
1103 |
+
table = pq.read_table(filename)
|
1104 |
+
table.validate(full=True)
|
1105 |
+
assert table.column(0).type == list_array.type
|
1106 |
+
assert table == orig_table
|
1107 |
+
|
1108 |
+
# Large list of extensions
|
1109 |
+
list_array = pa.LargeListArray.from_arrays([0, 1, None, 3], ext_array)
|
1110 |
+
|
1111 |
+
orig_table = pa.table({'lists': list_array})
|
1112 |
+
filename = tmpdir / 'list_of_ext.parquet'
|
1113 |
+
pq.write_table(orig_table, filename)
|
1114 |
+
|
1115 |
+
table = pq.read_table(filename)
|
1116 |
+
table.validate(full=True)
|
1117 |
+
assert table.column(0).type == pa.large_list(pa.int64())
|
1118 |
+
with registered_extension_type(ext_type):
|
1119 |
+
table = pq.read_table(filename)
|
1120 |
+
table.validate(full=True)
|
1121 |
+
assert table.column(0).type == list_array.type
|
1122 |
+
assert table == orig_table
|
1123 |
+
|
1124 |
+
|
1125 |
+
@pytest.mark.parquet
|
1126 |
+
def test_parquet_extension_nested_in_extension(tmpdir):
|
1127 |
+
# Parquet support for extension<list<extension>>
|
1128 |
+
import pyarrow.parquet as pq
|
1129 |
+
|
1130 |
+
inner_ext_type = IntegerType()
|
1131 |
+
inner_storage = pa.array([4, 5, 6, 7], type=pa.int64())
|
1132 |
+
inner_ext_array = pa.ExtensionArray.from_storage(inner_ext_type,
|
1133 |
+
inner_storage)
|
1134 |
+
|
1135 |
+
list_array = pa.ListArray.from_arrays([0, 1, None, 3], inner_ext_array)
|
1136 |
+
mylist_array = pa.ExtensionArray.from_storage(
|
1137 |
+
MyListType(list_array.type), list_array)
|
1138 |
+
|
1139 |
+
orig_table = pa.table({'lists': mylist_array})
|
1140 |
+
filename = tmpdir / 'ext_of_list_of_ext.parquet'
|
1141 |
+
pq.write_table(orig_table, filename)
|
1142 |
+
|
1143 |
+
table = pq.read_table(filename)
|
1144 |
+
assert table.column(0).type == pa.list_(pa.int64())
|
1145 |
+
with registered_extension_type(mylist_array.type):
|
1146 |
+
with registered_extension_type(inner_ext_array.type):
|
1147 |
+
table = pq.read_table(filename)
|
1148 |
+
assert table.column(0).type == mylist_array.type
|
1149 |
+
assert table == orig_table
|
1150 |
+
|
1151 |
+
|
1152 |
+
def test_to_numpy():
|
1153 |
+
period_type = PeriodType('D')
|
1154 |
+
storage = pa.array([1, 2, 3, 4], pa.int64())
|
1155 |
+
arr = pa.ExtensionArray.from_storage(period_type, storage)
|
1156 |
+
|
1157 |
+
expected = storage.to_numpy()
|
1158 |
+
result = arr.to_numpy()
|
1159 |
+
np.testing.assert_array_equal(result, expected)
|
1160 |
+
|
1161 |
+
result = np.asarray(arr)
|
1162 |
+
np.testing.assert_array_equal(result, expected)
|
1163 |
+
|
1164 |
+
# chunked array
|
1165 |
+
a1 = pa.chunked_array([arr, arr])
|
1166 |
+
a2 = pa.chunked_array([arr, arr], type=period_type)
|
1167 |
+
expected = np.hstack([expected, expected])
|
1168 |
+
|
1169 |
+
for charr in [a1, a2]:
|
1170 |
+
assert charr.type == period_type
|
1171 |
+
for result in [np.asarray(charr), charr.to_numpy()]:
|
1172 |
+
assert result.dtype == np.int64
|
1173 |
+
np.testing.assert_array_equal(result, expected)
|
1174 |
+
|
1175 |
+
# zero chunks
|
1176 |
+
charr = pa.chunked_array([], type=period_type)
|
1177 |
+
assert charr.type == period_type
|
1178 |
+
|
1179 |
+
for result in [np.asarray(charr), charr.to_numpy()]:
|
1180 |
+
assert result.dtype == np.int64
|
1181 |
+
np.testing.assert_array_equal(result, np.array([], dtype='int64'))
|
1182 |
+
|
1183 |
+
|
1184 |
+
def test_empty_take():
|
1185 |
+
# https://issues.apache.org/jira/browse/ARROW-13474
|
1186 |
+
ext_type = IntegerType()
|
1187 |
+
storage = pa.array([], type=pa.int64())
|
1188 |
+
empty_arr = pa.ExtensionArray.from_storage(ext_type, storage)
|
1189 |
+
|
1190 |
+
result = empty_arr.filter(pa.array([], pa.bool_()))
|
1191 |
+
assert len(result) == 0
|
1192 |
+
assert result.equals(empty_arr)
|
1193 |
+
|
1194 |
+
result = empty_arr.take(pa.array([], pa.int32()))
|
1195 |
+
assert len(result) == 0
|
1196 |
+
assert result.equals(empty_arr)
|
1197 |
+
|
1198 |
+
|
1199 |
+
@pytest.mark.parametrize("data,ty", (
|
1200 |
+
([1, 2, 3], IntegerType),
|
1201 |
+
(["cat", "dog", "horse"], LabelType)
|
1202 |
+
))
|
1203 |
+
@pytest.mark.parametrize(
|
1204 |
+
"into", ["to_numpy", pytest.param("to_pandas", marks=pytest.mark.pandas)])
|
1205 |
+
def test_extension_array_to_numpy_pandas(data, ty, into):
|
1206 |
+
storage = pa.array(data)
|
1207 |
+
ext_arr = pa.ExtensionArray.from_storage(ty(), storage)
|
1208 |
+
offsets = pa.array([0, 1, 2, 3])
|
1209 |
+
list_arr = pa.ListArray.from_arrays(offsets, ext_arr)
|
1210 |
+
result = getattr(list_arr, into)(zero_copy_only=False)
|
1211 |
+
|
1212 |
+
list_arr_storage_type = list_arr.cast(pa.list_(ext_arr.type.storage_type))
|
1213 |
+
expected = getattr(list_arr_storage_type, into)(zero_copy_only=False)
|
1214 |
+
if into == "to_pandas":
|
1215 |
+
assert result.equals(expected)
|
1216 |
+
else:
|
1217 |
+
assert np.array_equal(result, expected)
|
1218 |
+
|
1219 |
+
|
1220 |
+
def test_array_constructor():
|
1221 |
+
ext_type = IntegerType()
|
1222 |
+
storage = pa.array([1, 2, 3], type=pa.int64())
|
1223 |
+
expected = pa.ExtensionArray.from_storage(ext_type, storage)
|
1224 |
+
|
1225 |
+
result = pa.array([1, 2, 3], type=IntegerType())
|
1226 |
+
assert result.equals(expected)
|
1227 |
+
|
1228 |
+
result = pa.array(np.array([1, 2, 3]), type=IntegerType())
|
1229 |
+
assert result.equals(expected)
|
1230 |
+
|
1231 |
+
result = pa.array(np.array([1.0, 2.0, 3.0]), type=IntegerType())
|
1232 |
+
assert result.equals(expected)
|
1233 |
+
|
1234 |
+
|
1235 |
+
@pytest.mark.pandas
|
1236 |
+
def test_array_constructor_from_pandas():
|
1237 |
+
import pandas as pd
|
1238 |
+
|
1239 |
+
ext_type = IntegerType()
|
1240 |
+
storage = pa.array([1, 2, 3], type=pa.int64())
|
1241 |
+
expected = pa.ExtensionArray.from_storage(ext_type, storage)
|
1242 |
+
|
1243 |
+
result = pa.array(pd.Series([1, 2, 3]), type=IntegerType())
|
1244 |
+
assert result.equals(expected)
|
1245 |
+
|
1246 |
+
result = pa.array(
|
1247 |
+
pd.Series([1, 2, 3], dtype="category"), type=IntegerType()
|
1248 |
+
)
|
1249 |
+
assert result.equals(expected)
|
1250 |
+
|
1251 |
+
|
1252 |
+
@pytest.mark.cython
|
1253 |
+
def test_cpp_extension_in_python(tmpdir):
|
1254 |
+
from .test_cython import (
|
1255 |
+
setup_template, compiler_opts, test_ld_path, test_util, here)
|
1256 |
+
with tmpdir.as_cwd():
|
1257 |
+
# Set up temporary workspace
|
1258 |
+
pyx_file = 'extensions.pyx'
|
1259 |
+
shutil.copyfile(os.path.join(here, pyx_file),
|
1260 |
+
os.path.join(str(tmpdir), pyx_file))
|
1261 |
+
# Create setup.py file
|
1262 |
+
setup_code = setup_template.format(pyx_file=pyx_file,
|
1263 |
+
compiler_opts=compiler_opts,
|
1264 |
+
test_ld_path=test_ld_path)
|
1265 |
+
with open('setup.py', 'w') as f:
|
1266 |
+
f.write(setup_code)
|
1267 |
+
|
1268 |
+
subprocess_env = test_util.get_modified_env_with_pythonpath()
|
1269 |
+
|
1270 |
+
# Compile extension module
|
1271 |
+
subprocess.check_call([sys.executable, 'setup.py',
|
1272 |
+
'build_ext', '--inplace'],
|
1273 |
+
env=subprocess_env)
|
1274 |
+
|
1275 |
+
sys.path.insert(0, str(tmpdir))
|
1276 |
+
mod = __import__('extensions')
|
1277 |
+
|
1278 |
+
uuid_type = mod._make_uuid_type()
|
1279 |
+
assert uuid_type.extension_name == "uuid"
|
1280 |
+
assert uuid_type.storage_type == pa.binary(16)
|
1281 |
+
|
1282 |
+
array = mod._make_uuid_array()
|
1283 |
+
assert array.type == uuid_type
|
1284 |
+
assert array.to_pylist() == [b'abcdefghijklmno0', b'0onmlkjihgfedcba']
|
1285 |
+
assert array[0].as_py() == b'abcdefghijklmno0'
|
1286 |
+
assert array[1].as_py() == b'0onmlkjihgfedcba'
|
1287 |
+
|
1288 |
+
buf = ipc_write_batch(pa.RecordBatch.from_arrays([array], ["uuid"]))
|
1289 |
+
|
1290 |
+
batch = ipc_read_batch(buf)
|
1291 |
+
reconstructed_array = batch.column(0)
|
1292 |
+
assert reconstructed_array.type == uuid_type
|
1293 |
+
assert reconstructed_array == array
|
1294 |
+
|
1295 |
+
|
1296 |
+
def test_tensor_type():
|
1297 |
+
tensor_type = pa.fixed_shape_tensor(pa.int8(), [2, 3])
|
1298 |
+
assert tensor_type.extension_name == "arrow.fixed_shape_tensor"
|
1299 |
+
assert tensor_type.storage_type == pa.list_(pa.int8(), 6)
|
1300 |
+
assert tensor_type.shape == [2, 3]
|
1301 |
+
assert tensor_type.dim_names is None
|
1302 |
+
assert tensor_type.permutation is None
|
1303 |
+
|
1304 |
+
tensor_type = pa.fixed_shape_tensor(pa.float64(), [2, 2, 3],
|
1305 |
+
permutation=[0, 2, 1])
|
1306 |
+
assert tensor_type.extension_name == "arrow.fixed_shape_tensor"
|
1307 |
+
assert tensor_type.storage_type == pa.list_(pa.float64(), 12)
|
1308 |
+
assert tensor_type.shape == [2, 2, 3]
|
1309 |
+
assert tensor_type.dim_names is None
|
1310 |
+
assert tensor_type.permutation == [0, 2, 1]
|
1311 |
+
|
1312 |
+
tensor_type = pa.fixed_shape_tensor(pa.bool_(), [2, 2, 3],
|
1313 |
+
dim_names=['C', 'H', 'W'])
|
1314 |
+
assert tensor_type.extension_name == "arrow.fixed_shape_tensor"
|
1315 |
+
assert tensor_type.storage_type == pa.list_(pa.bool_(), 12)
|
1316 |
+
assert tensor_type.shape == [2, 2, 3]
|
1317 |
+
assert tensor_type.dim_names == ['C', 'H', 'W']
|
1318 |
+
assert tensor_type.permutation is None
|
1319 |
+
|
1320 |
+
|
1321 |
+
def test_tensor_class_methods():
|
1322 |
+
tensor_type = pa.fixed_shape_tensor(pa.float32(), [2, 3])
|
1323 |
+
storage = pa.array([[1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6]],
|
1324 |
+
pa.list_(pa.float32(), 6))
|
1325 |
+
arr = pa.ExtensionArray.from_storage(tensor_type, storage)
|
1326 |
+
expected = np.array(
|
1327 |
+
[[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]], dtype=np.float32)
|
1328 |
+
result = arr.to_numpy_ndarray()
|
1329 |
+
np.testing.assert_array_equal(result, expected)
|
1330 |
+
|
1331 |
+
expected = np.array([[[1, 2, 3], [4, 5, 6]]], dtype=np.float32)
|
1332 |
+
result = arr[:1].to_numpy_ndarray()
|
1333 |
+
np.testing.assert_array_equal(result, expected)
|
1334 |
+
|
1335 |
+
arr = np.array(
|
1336 |
+
[[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]],
|
1337 |
+
dtype=np.float32, order="C")
|
1338 |
+
tensor_array_from_numpy = pa.FixedShapeTensorArray.from_numpy_ndarray(arr)
|
1339 |
+
assert isinstance(tensor_array_from_numpy.type, pa.FixedShapeTensorType)
|
1340 |
+
assert tensor_array_from_numpy.type.value_type == pa.float32()
|
1341 |
+
assert tensor_array_from_numpy.type.shape == [2, 3]
|
1342 |
+
|
1343 |
+
arr = np.array(
|
1344 |
+
[[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]],
|
1345 |
+
dtype=np.float32, order="F")
|
1346 |
+
with pytest.raises(ValueError, match="C-style contiguous segment"):
|
1347 |
+
pa.FixedShapeTensorArray.from_numpy_ndarray(arr)
|
1348 |
+
|
1349 |
+
tensor_type = pa.fixed_shape_tensor(pa.int8(), [2, 2, 3], permutation=[0, 2, 1])
|
1350 |
+
storage = pa.array([[1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6]], pa.list_(pa.int8(), 12))
|
1351 |
+
arr = pa.ExtensionArray.from_storage(tensor_type, storage)
|
1352 |
+
with pytest.raises(ValueError, match="non-permuted tensors"):
|
1353 |
+
arr.to_numpy_ndarray()
|
1354 |
+
|
1355 |
+
|
1356 |
+
@pytest.mark.parametrize("tensor_type", (
|
1357 |
+
pa.fixed_shape_tensor(pa.int8(), [2, 2, 3]),
|
1358 |
+
pa.fixed_shape_tensor(pa.int8(), [2, 2, 3], permutation=[0, 2, 1]),
|
1359 |
+
pa.fixed_shape_tensor(pa.int8(), [2, 2, 3], dim_names=['C', 'H', 'W'])
|
1360 |
+
))
|
1361 |
+
def test_tensor_type_ipc(tensor_type):
|
1362 |
+
storage = pa.array([[1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6]], pa.list_(pa.int8(), 12))
|
1363 |
+
arr = pa.ExtensionArray.from_storage(tensor_type, storage)
|
1364 |
+
batch = pa.RecordBatch.from_arrays([arr], ["ext"])
|
1365 |
+
|
1366 |
+
# check the built array has exactly the expected clss
|
1367 |
+
tensor_class = tensor_type.__arrow_ext_class__()
|
1368 |
+
assert isinstance(arr, tensor_class)
|
1369 |
+
|
1370 |
+
buf = ipc_write_batch(batch)
|
1371 |
+
del batch
|
1372 |
+
batch = ipc_read_batch(buf)
|
1373 |
+
|
1374 |
+
result = batch.column(0)
|
1375 |
+
# check the deserialized array class is the expected one
|
1376 |
+
assert isinstance(result, tensor_class)
|
1377 |
+
assert result.type.extension_name == "arrow.fixed_shape_tensor"
|
1378 |
+
assert arr.storage.to_pylist() == [[1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6]]
|
1379 |
+
|
1380 |
+
# we get back an actual TensorType
|
1381 |
+
assert isinstance(result.type, pa.FixedShapeTensorType)
|
1382 |
+
assert result.type.value_type == pa.int8()
|
1383 |
+
assert result.type.shape == [2, 2, 3]
|
1384 |
+
|
1385 |
+
|
1386 |
+
def test_tensor_type_equality():
|
1387 |
+
tensor_type = pa.fixed_shape_tensor(pa.int8(), [2, 2, 3])
|
1388 |
+
assert tensor_type.extension_name == "arrow.fixed_shape_tensor"
|
1389 |
+
|
1390 |
+
tensor_type2 = pa.fixed_shape_tensor(pa.int8(), [2, 2, 3])
|
1391 |
+
tensor_type3 = pa.fixed_shape_tensor(pa.uint8(), [2, 2, 3])
|
1392 |
+
assert tensor_type == tensor_type2
|
1393 |
+
assert not tensor_type == tensor_type3
|
1394 |
+
|
1395 |
+
|
1396 |
+
@pytest.mark.pandas
|
1397 |
+
def test_extension_to_pandas_storage_type(registered_period_type):
|
1398 |
+
period_type, _ = registered_period_type
|
1399 |
+
np_arr = np.array([1, 2, 3, 4], dtype='i8')
|
1400 |
+
storage = pa.array([1, 2, 3, 4], pa.int64())
|
1401 |
+
arr = pa.ExtensionArray.from_storage(period_type, storage)
|
1402 |
+
|
1403 |
+
if isinstance(period_type, PeriodTypeWithToPandasDtype):
|
1404 |
+
pandas_dtype = period_type.to_pandas_dtype()
|
1405 |
+
else:
|
1406 |
+
pandas_dtype = np_arr.dtype
|
1407 |
+
|
1408 |
+
# Test arrays
|
1409 |
+
result = arr.to_pandas()
|
1410 |
+
assert result.dtype == pandas_dtype
|
1411 |
+
|
1412 |
+
# Test chunked arrays
|
1413 |
+
chunked_arr = pa.chunked_array([arr])
|
1414 |
+
result = chunked_arr.to_numpy()
|
1415 |
+
assert result.dtype == np_arr.dtype
|
1416 |
+
|
1417 |
+
result = chunked_arr.to_pandas()
|
1418 |
+
assert result.dtype == pandas_dtype
|
1419 |
+
|
1420 |
+
# Test Table.to_pandas
|
1421 |
+
data = [
|
1422 |
+
pa.array([1, 2, 3, 4]),
|
1423 |
+
pa.array(['foo', 'bar', None, None]),
|
1424 |
+
pa.array([True, None, True, False]),
|
1425 |
+
arr
|
1426 |
+
]
|
1427 |
+
my_schema = pa.schema([('f0', pa.int8()),
|
1428 |
+
('f1', pa.string()),
|
1429 |
+
('f2', pa.bool_()),
|
1430 |
+
('ext', period_type)])
|
1431 |
+
table = pa.Table.from_arrays(data, schema=my_schema)
|
1432 |
+
result = table.to_pandas()
|
1433 |
+
assert result["ext"].dtype == pandas_dtype
|
1434 |
+
|
1435 |
+
import pandas as pd
|
1436 |
+
# Skip tests for 2.0.x, See: GH-35821
|
1437 |
+
if (
|
1438 |
+
Version(pd.__version__) >= Version("2.1.0")
|
1439 |
+
):
|
1440 |
+
# Check the usage of types_mapper
|
1441 |
+
result = table.to_pandas(types_mapper=pd.ArrowDtype)
|
1442 |
+
assert isinstance(result["ext"].dtype, pd.ArrowDtype)
|
1443 |
+
|
1444 |
+
|
1445 |
+
def test_tensor_type_is_picklable(pickle_module):
|
1446 |
+
# GH-35599
|
1447 |
+
|
1448 |
+
expected_type = pa.fixed_shape_tensor(pa.int32(), (2, 2))
|
1449 |
+
result = pickle_module.loads(pickle_module.dumps(expected_type))
|
1450 |
+
|
1451 |
+
assert result == expected_type
|
1452 |
+
|
1453 |
+
arr = [[1, 2, 3, 4], [10, 20, 30, 40], [100, 200, 300, 400]]
|
1454 |
+
storage = pa.array(arr, pa.list_(pa.int32(), 4))
|
1455 |
+
expected_arr = pa.ExtensionArray.from_storage(expected_type, storage)
|
1456 |
+
result = pickle_module.loads(pickle_module.dumps(expected_arr))
|
1457 |
+
|
1458 |
+
assert result == expected_arr
|
1459 |
+
|
1460 |
+
|
1461 |
+
@pytest.mark.parametrize(("tensor_type", "text"), [
|
1462 |
+
(
|
1463 |
+
pa.fixed_shape_tensor(pa.int8(), [2, 2, 3]),
|
1464 |
+
'fixed_shape_tensor[value_type=int8, shape=[2,2,3]]'
|
1465 |
+
),
|
1466 |
+
(
|
1467 |
+
pa.fixed_shape_tensor(pa.int32(), [2, 2, 3], permutation=[0, 2, 1]),
|
1468 |
+
'fixed_shape_tensor[value_type=int32, shape=[2,2,3], permutation=[0,2,1]]'
|
1469 |
+
),
|
1470 |
+
(
|
1471 |
+
pa.fixed_shape_tensor(pa.int64(), [2, 2, 3], dim_names=['C', 'H', 'W']),
|
1472 |
+
'fixed_shape_tensor[value_type=int64, shape=[2,2,3], dim_names=[C,H,W]]'
|
1473 |
+
)
|
1474 |
+
])
|
1475 |
+
def test_tensor_type_str(tensor_type, text):
|
1476 |
+
tensor_type_str = tensor_type.__str__()
|
1477 |
+
assert text in tensor_type_str
|
1478 |
+
|
1479 |
+
|
1480 |
+
def test_legacy_int_type():
|
1481 |
+
with pytest.warns(FutureWarning, match="PyExtensionType is deprecated"):
|
1482 |
+
ext_ty = LegacyIntType()
|
1483 |
+
arr = pa.array([1, 2, 3], type=ext_ty.storage_type)
|
1484 |
+
ext_arr = pa.ExtensionArray.from_storage(ext_ty, arr)
|
1485 |
+
batch = pa.RecordBatch.from_arrays([ext_arr], names=['ext'])
|
1486 |
+
buf = ipc_write_batch(batch)
|
1487 |
+
|
1488 |
+
with pytest.warns((RuntimeWarning, FutureWarning)):
|
1489 |
+
batch = ipc_read_batch(buf)
|
1490 |
+
assert isinstance(batch.column(0).type, pa.UnknownExtensionType)
|
1491 |
+
|
1492 |
+
with enabled_auto_load():
|
1493 |
+
with pytest.warns(FutureWarning, match="PyExtensionType is deprecated"):
|
1494 |
+
batch = ipc_read_batch(buf)
|
1495 |
+
assert isinstance(batch.column(0).type, LegacyIntType)
|
1496 |
+
assert batch.column(0) == ext_arr
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_feather.py
ADDED
@@ -0,0 +1,863 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import io
|
19 |
+
import os
|
20 |
+
import sys
|
21 |
+
import tempfile
|
22 |
+
import pytest
|
23 |
+
import hypothesis as h
|
24 |
+
import hypothesis.strategies as st
|
25 |
+
|
26 |
+
import numpy as np
|
27 |
+
|
28 |
+
import pyarrow as pa
|
29 |
+
import pyarrow.tests.strategies as past
|
30 |
+
from pyarrow.feather import (read_feather, write_feather, read_table,
|
31 |
+
FeatherDataset)
|
32 |
+
|
33 |
+
try:
|
34 |
+
from pandas.testing import assert_frame_equal
|
35 |
+
import pandas as pd
|
36 |
+
import pyarrow.pandas_compat
|
37 |
+
except ImportError:
|
38 |
+
pass
|
39 |
+
|
40 |
+
|
41 |
+
@pytest.fixture(scope='module')
|
42 |
+
def datadir(base_datadir):
|
43 |
+
return base_datadir / 'feather'
|
44 |
+
|
45 |
+
|
46 |
+
def random_path(prefix='feather_'):
|
47 |
+
return tempfile.mktemp(prefix=prefix)
|
48 |
+
|
49 |
+
|
50 |
+
@pytest.fixture(scope="module", params=[1, 2])
|
51 |
+
def version(request):
|
52 |
+
yield request.param
|
53 |
+
|
54 |
+
|
55 |
+
@pytest.fixture(scope="module", params=[None, "uncompressed", "lz4", "zstd"])
|
56 |
+
def compression(request):
|
57 |
+
if request.param in ['lz4', 'zstd'] and not pa.Codec.is_available(
|
58 |
+
request.param):
|
59 |
+
pytest.skip(f'{request.param} is not available')
|
60 |
+
yield request.param
|
61 |
+
|
62 |
+
|
63 |
+
TEST_FILES = None
|
64 |
+
|
65 |
+
|
66 |
+
def setup_module(module):
|
67 |
+
global TEST_FILES
|
68 |
+
TEST_FILES = []
|
69 |
+
|
70 |
+
|
71 |
+
def teardown_module(module):
|
72 |
+
for path in TEST_FILES:
|
73 |
+
try:
|
74 |
+
os.remove(path)
|
75 |
+
except os.error:
|
76 |
+
pass
|
77 |
+
|
78 |
+
|
79 |
+
@pytest.mark.pandas
|
80 |
+
def test_file_not_exist():
|
81 |
+
with pytest.raises(pa.ArrowIOError):
|
82 |
+
read_feather('test_invalid_file')
|
83 |
+
|
84 |
+
|
85 |
+
def _check_pandas_roundtrip(df, expected=None, path=None,
|
86 |
+
columns=None, use_threads=False,
|
87 |
+
version=None, compression=None,
|
88 |
+
compression_level=None):
|
89 |
+
if path is None:
|
90 |
+
path = random_path()
|
91 |
+
|
92 |
+
if version is None:
|
93 |
+
version = 2
|
94 |
+
|
95 |
+
TEST_FILES.append(path)
|
96 |
+
write_feather(df, path, compression=compression,
|
97 |
+
compression_level=compression_level, version=version)
|
98 |
+
|
99 |
+
if not os.path.exists(path):
|
100 |
+
raise Exception('file not written')
|
101 |
+
|
102 |
+
result = read_feather(path, columns, use_threads=use_threads)
|
103 |
+
|
104 |
+
if expected is None:
|
105 |
+
expected = df
|
106 |
+
|
107 |
+
assert_frame_equal(result, expected)
|
108 |
+
|
109 |
+
|
110 |
+
def _check_arrow_roundtrip(table, path=None, compression=None):
|
111 |
+
if path is None:
|
112 |
+
path = random_path()
|
113 |
+
|
114 |
+
TEST_FILES.append(path)
|
115 |
+
write_feather(table, path, compression=compression)
|
116 |
+
if not os.path.exists(path):
|
117 |
+
raise Exception('file not written')
|
118 |
+
|
119 |
+
result = read_table(path)
|
120 |
+
assert result.equals(table)
|
121 |
+
|
122 |
+
|
123 |
+
def _assert_error_on_write(df, exc, path=None, version=2):
|
124 |
+
# check that we are raising the exception
|
125 |
+
# on writing
|
126 |
+
|
127 |
+
if path is None:
|
128 |
+
path = random_path()
|
129 |
+
|
130 |
+
TEST_FILES.append(path)
|
131 |
+
|
132 |
+
def f():
|
133 |
+
write_feather(df, path, version=version)
|
134 |
+
|
135 |
+
pytest.raises(exc, f)
|
136 |
+
|
137 |
+
|
138 |
+
def test_dataset(version):
|
139 |
+
num_values = (100, 100)
|
140 |
+
num_files = 5
|
141 |
+
paths = [random_path() for i in range(num_files)]
|
142 |
+
data = {
|
143 |
+
"col_" + str(i): np.random.randn(num_values[0])
|
144 |
+
for i in range(num_values[1])
|
145 |
+
}
|
146 |
+
table = pa.table(data)
|
147 |
+
|
148 |
+
TEST_FILES.extend(paths)
|
149 |
+
for index, path in enumerate(paths):
|
150 |
+
rows = (
|
151 |
+
index * (num_values[0] // num_files),
|
152 |
+
(index + 1) * (num_values[0] // num_files),
|
153 |
+
)
|
154 |
+
|
155 |
+
write_feather(table[rows[0]: rows[1]], path, version=version)
|
156 |
+
|
157 |
+
data = FeatherDataset(paths).read_table()
|
158 |
+
assert data.equals(table)
|
159 |
+
|
160 |
+
|
161 |
+
@pytest.mark.pandas
|
162 |
+
def test_float_no_nulls(version):
|
163 |
+
data = {}
|
164 |
+
numpy_dtypes = ['f4', 'f8']
|
165 |
+
num_values = 100
|
166 |
+
|
167 |
+
for dtype in numpy_dtypes:
|
168 |
+
values = np.random.randn(num_values)
|
169 |
+
data[dtype] = values.astype(dtype)
|
170 |
+
|
171 |
+
df = pd.DataFrame(data)
|
172 |
+
_check_pandas_roundtrip(df, version=version)
|
173 |
+
|
174 |
+
|
175 |
+
@pytest.mark.pandas
|
176 |
+
def test_read_table(version):
|
177 |
+
num_values = (100, 100)
|
178 |
+
path = random_path()
|
179 |
+
|
180 |
+
TEST_FILES.append(path)
|
181 |
+
|
182 |
+
values = np.random.randint(0, 100, size=num_values)
|
183 |
+
columns = ['col_' + str(i) for i in range(100)]
|
184 |
+
table = pa.Table.from_arrays(values, columns)
|
185 |
+
|
186 |
+
write_feather(table, path, version=version)
|
187 |
+
|
188 |
+
result = read_table(path)
|
189 |
+
assert result.equals(table)
|
190 |
+
|
191 |
+
# Test without memory mapping
|
192 |
+
result = read_table(path, memory_map=False)
|
193 |
+
assert result.equals(table)
|
194 |
+
|
195 |
+
result = read_feather(path, memory_map=False)
|
196 |
+
assert_frame_equal(table.to_pandas(), result)
|
197 |
+
|
198 |
+
|
199 |
+
@pytest.mark.pandas
|
200 |
+
def test_use_threads(version):
|
201 |
+
# ARROW-14470
|
202 |
+
num_values = (10, 10)
|
203 |
+
path = random_path()
|
204 |
+
|
205 |
+
TEST_FILES.append(path)
|
206 |
+
|
207 |
+
values = np.random.randint(0, 10, size=num_values)
|
208 |
+
columns = ['col_' + str(i) for i in range(10)]
|
209 |
+
table = pa.Table.from_arrays(values, columns)
|
210 |
+
|
211 |
+
write_feather(table, path, version=version)
|
212 |
+
|
213 |
+
result = read_feather(path)
|
214 |
+
assert_frame_equal(table.to_pandas(), result)
|
215 |
+
|
216 |
+
# Test read_feather with use_threads=False
|
217 |
+
result = read_feather(path, use_threads=False)
|
218 |
+
assert_frame_equal(table.to_pandas(), result)
|
219 |
+
|
220 |
+
# Test read_table with use_threads=False
|
221 |
+
result = read_table(path, use_threads=False)
|
222 |
+
assert result.equals(table)
|
223 |
+
|
224 |
+
|
225 |
+
@pytest.mark.pandas
|
226 |
+
def test_float_nulls(version):
|
227 |
+
num_values = 100
|
228 |
+
|
229 |
+
path = random_path()
|
230 |
+
TEST_FILES.append(path)
|
231 |
+
|
232 |
+
null_mask = np.random.randint(0, 10, size=num_values) < 3
|
233 |
+
dtypes = ['f4', 'f8']
|
234 |
+
expected_cols = []
|
235 |
+
|
236 |
+
arrays = []
|
237 |
+
for name in dtypes:
|
238 |
+
values = np.random.randn(num_values).astype(name)
|
239 |
+
arrays.append(pa.array(values, mask=null_mask))
|
240 |
+
|
241 |
+
values[null_mask] = np.nan
|
242 |
+
|
243 |
+
expected_cols.append(values)
|
244 |
+
|
245 |
+
table = pa.table(arrays, names=dtypes)
|
246 |
+
_check_arrow_roundtrip(table)
|
247 |
+
|
248 |
+
df = table.to_pandas()
|
249 |
+
_check_pandas_roundtrip(df, version=version)
|
250 |
+
|
251 |
+
|
252 |
+
@pytest.mark.pandas
|
253 |
+
def test_integer_no_nulls(version):
|
254 |
+
data, arr = {}, []
|
255 |
+
|
256 |
+
numpy_dtypes = ['i1', 'i2', 'i4', 'i8',
|
257 |
+
'u1', 'u2', 'u4', 'u8']
|
258 |
+
num_values = 100
|
259 |
+
|
260 |
+
for dtype in numpy_dtypes:
|
261 |
+
values = np.random.randint(0, 100, size=num_values)
|
262 |
+
data[dtype] = values.astype(dtype)
|
263 |
+
arr.append(values.astype(dtype))
|
264 |
+
|
265 |
+
df = pd.DataFrame(data)
|
266 |
+
_check_pandas_roundtrip(df, version=version)
|
267 |
+
|
268 |
+
table = pa.table(arr, names=numpy_dtypes)
|
269 |
+
_check_arrow_roundtrip(table)
|
270 |
+
|
271 |
+
|
272 |
+
@pytest.mark.pandas
|
273 |
+
def test_platform_numpy_integers(version):
|
274 |
+
data = {}
|
275 |
+
|
276 |
+
numpy_dtypes = ['longlong']
|
277 |
+
num_values = 100
|
278 |
+
|
279 |
+
for dtype in numpy_dtypes:
|
280 |
+
values = np.random.randint(0, 100, size=num_values)
|
281 |
+
data[dtype] = values.astype(dtype)
|
282 |
+
|
283 |
+
df = pd.DataFrame(data)
|
284 |
+
_check_pandas_roundtrip(df, version=version)
|
285 |
+
|
286 |
+
|
287 |
+
@pytest.mark.pandas
|
288 |
+
def test_integer_with_nulls(version):
|
289 |
+
# pandas requires upcast to float dtype
|
290 |
+
path = random_path()
|
291 |
+
TEST_FILES.append(path)
|
292 |
+
|
293 |
+
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
|
294 |
+
num_values = 100
|
295 |
+
|
296 |
+
arrays = []
|
297 |
+
null_mask = np.random.randint(0, 10, size=num_values) < 3
|
298 |
+
expected_cols = []
|
299 |
+
for name in int_dtypes:
|
300 |
+
values = np.random.randint(0, 100, size=num_values)
|
301 |
+
arrays.append(pa.array(values, mask=null_mask))
|
302 |
+
|
303 |
+
expected = values.astype('f8')
|
304 |
+
expected[null_mask] = np.nan
|
305 |
+
|
306 |
+
expected_cols.append(expected)
|
307 |
+
|
308 |
+
table = pa.table(arrays, names=int_dtypes)
|
309 |
+
_check_arrow_roundtrip(table)
|
310 |
+
|
311 |
+
df = table.to_pandas()
|
312 |
+
_check_pandas_roundtrip(df, version=version)
|
313 |
+
|
314 |
+
|
315 |
+
@pytest.mark.pandas
|
316 |
+
def test_boolean_no_nulls(version):
|
317 |
+
num_values = 100
|
318 |
+
|
319 |
+
np.random.seed(0)
|
320 |
+
|
321 |
+
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
|
322 |
+
_check_pandas_roundtrip(df, version=version)
|
323 |
+
|
324 |
+
|
325 |
+
@pytest.mark.pandas
|
326 |
+
def test_boolean_nulls(version):
|
327 |
+
# pandas requires upcast to object dtype
|
328 |
+
path = random_path()
|
329 |
+
TEST_FILES.append(path)
|
330 |
+
|
331 |
+
num_values = 100
|
332 |
+
np.random.seed(0)
|
333 |
+
|
334 |
+
mask = np.random.randint(0, 10, size=num_values) < 3
|
335 |
+
values = np.random.randint(0, 10, size=num_values) < 5
|
336 |
+
|
337 |
+
table = pa.table([pa.array(values, mask=mask)], names=['bools'])
|
338 |
+
_check_arrow_roundtrip(table)
|
339 |
+
|
340 |
+
df = table.to_pandas()
|
341 |
+
_check_pandas_roundtrip(df, version=version)
|
342 |
+
|
343 |
+
|
344 |
+
def test_buffer_bounds_error(version):
|
345 |
+
# ARROW-1676
|
346 |
+
path = random_path()
|
347 |
+
TEST_FILES.append(path)
|
348 |
+
|
349 |
+
for i in range(16, 256):
|
350 |
+
table = pa.Table.from_arrays(
|
351 |
+
[pa.array([None] + list(range(i)), type=pa.float64())],
|
352 |
+
names=["arr"]
|
353 |
+
)
|
354 |
+
_check_arrow_roundtrip(table)
|
355 |
+
|
356 |
+
|
357 |
+
def test_boolean_object_nulls(version):
|
358 |
+
repeats = 100
|
359 |
+
table = pa.Table.from_arrays(
|
360 |
+
[np.array([False, None, True] * repeats, dtype=object)],
|
361 |
+
names=["arr"]
|
362 |
+
)
|
363 |
+
_check_arrow_roundtrip(table)
|
364 |
+
|
365 |
+
|
366 |
+
@pytest.mark.pandas
|
367 |
+
def test_delete_partial_file_on_error(version):
|
368 |
+
if sys.platform == 'win32':
|
369 |
+
pytest.skip('Windows hangs on to file handle for some reason')
|
370 |
+
|
371 |
+
class CustomClass:
|
372 |
+
pass
|
373 |
+
|
374 |
+
# strings will fail
|
375 |
+
df = pd.DataFrame(
|
376 |
+
{
|
377 |
+
'numbers': range(5),
|
378 |
+
'strings': [b'foo', None, 'bar', CustomClass(), np.nan]},
|
379 |
+
columns=['numbers', 'strings'])
|
380 |
+
|
381 |
+
path = random_path()
|
382 |
+
try:
|
383 |
+
write_feather(df, path, version=version)
|
384 |
+
except Exception:
|
385 |
+
pass
|
386 |
+
|
387 |
+
assert not os.path.exists(path)
|
388 |
+
|
389 |
+
|
390 |
+
@pytest.mark.pandas
|
391 |
+
def test_strings(version):
|
392 |
+
repeats = 1000
|
393 |
+
|
394 |
+
# Mixed bytes, unicode, strings coerced to binary
|
395 |
+
values = [b'foo', None, 'bar', 'qux', np.nan]
|
396 |
+
df = pd.DataFrame({'strings': values * repeats})
|
397 |
+
|
398 |
+
ex_values = [b'foo', None, b'bar', b'qux', None]
|
399 |
+
expected = pd.DataFrame({'strings': ex_values * repeats})
|
400 |
+
_check_pandas_roundtrip(df, expected, version=version)
|
401 |
+
|
402 |
+
# embedded nulls are ok
|
403 |
+
values = ['foo', None, 'bar', 'qux', None]
|
404 |
+
df = pd.DataFrame({'strings': values * repeats})
|
405 |
+
expected = pd.DataFrame({'strings': values * repeats})
|
406 |
+
_check_pandas_roundtrip(df, expected, version=version)
|
407 |
+
|
408 |
+
values = ['foo', None, 'bar', 'qux', np.nan]
|
409 |
+
df = pd.DataFrame({'strings': values * repeats})
|
410 |
+
ex_values = ['foo', None, 'bar', 'qux', None]
|
411 |
+
expected = pd.DataFrame({'strings': ex_values * repeats})
|
412 |
+
_check_pandas_roundtrip(df, expected, version=version)
|
413 |
+
|
414 |
+
|
415 |
+
@pytest.mark.pandas
|
416 |
+
def test_empty_strings(version):
|
417 |
+
df = pd.DataFrame({'strings': [''] * 10})
|
418 |
+
_check_pandas_roundtrip(df, version=version)
|
419 |
+
|
420 |
+
|
421 |
+
@pytest.mark.pandas
|
422 |
+
def test_all_none(version):
|
423 |
+
df = pd.DataFrame({'all_none': [None] * 10})
|
424 |
+
_check_pandas_roundtrip(df, version=version)
|
425 |
+
|
426 |
+
|
427 |
+
@pytest.mark.pandas
|
428 |
+
def test_all_null_category(version):
|
429 |
+
# ARROW-1188
|
430 |
+
df = pd.DataFrame({"A": (1, 2, 3), "B": (None, None, None)})
|
431 |
+
df = df.assign(B=df.B.astype("category"))
|
432 |
+
_check_pandas_roundtrip(df, version=version)
|
433 |
+
|
434 |
+
|
435 |
+
@pytest.mark.pandas
|
436 |
+
def test_multithreaded_read(version):
|
437 |
+
data = {'c{}'.format(i): [''] * 10
|
438 |
+
for i in range(100)}
|
439 |
+
df = pd.DataFrame(data)
|
440 |
+
_check_pandas_roundtrip(df, use_threads=True, version=version)
|
441 |
+
|
442 |
+
|
443 |
+
@pytest.mark.pandas
|
444 |
+
def test_nan_as_null(version):
|
445 |
+
# Create a nan that is not numpy.nan
|
446 |
+
values = np.array(['foo', np.nan, np.nan * 2, 'bar'] * 10)
|
447 |
+
df = pd.DataFrame({'strings': values})
|
448 |
+
_check_pandas_roundtrip(df, version=version)
|
449 |
+
|
450 |
+
|
451 |
+
@pytest.mark.pandas
|
452 |
+
def test_category(version):
|
453 |
+
repeats = 1000
|
454 |
+
values = ['foo', None, 'bar', 'qux', np.nan]
|
455 |
+
df = pd.DataFrame({'strings': values * repeats})
|
456 |
+
df['strings'] = df['strings'].astype('category')
|
457 |
+
|
458 |
+
values = ['foo', None, 'bar', 'qux', None]
|
459 |
+
expected = pd.DataFrame({'strings': pd.Categorical(values * repeats)})
|
460 |
+
_check_pandas_roundtrip(df, expected, version=version)
|
461 |
+
|
462 |
+
|
463 |
+
@pytest.mark.pandas
|
464 |
+
def test_timestamp(version):
|
465 |
+
df = pd.DataFrame({'naive': pd.date_range('2016-03-28', periods=10)})
|
466 |
+
df['with_tz'] = (df.naive.dt.tz_localize('utc')
|
467 |
+
.dt.tz_convert('America/Los_Angeles'))
|
468 |
+
|
469 |
+
_check_pandas_roundtrip(df, version=version)
|
470 |
+
|
471 |
+
|
472 |
+
@pytest.mark.pandas
|
473 |
+
def test_timestamp_with_nulls(version):
|
474 |
+
df = pd.DataFrame({'test': [pd.Timestamp(2016, 1, 1),
|
475 |
+
None,
|
476 |
+
pd.Timestamp(2016, 1, 3)]})
|
477 |
+
df['with_tz'] = df.test.dt.tz_localize('utc')
|
478 |
+
|
479 |
+
_check_pandas_roundtrip(df, version=version)
|
480 |
+
|
481 |
+
|
482 |
+
@pytest.mark.pandas
|
483 |
+
@pytest.mark.xfail(reason="not supported", raises=TypeError)
|
484 |
+
def test_timedelta_with_nulls_v1():
|
485 |
+
df = pd.DataFrame({'test': [pd.Timedelta('1 day'),
|
486 |
+
None,
|
487 |
+
pd.Timedelta('3 day')]})
|
488 |
+
_check_pandas_roundtrip(df, version=1)
|
489 |
+
|
490 |
+
|
491 |
+
@pytest.mark.pandas
|
492 |
+
def test_timedelta_with_nulls():
|
493 |
+
df = pd.DataFrame({'test': [pd.Timedelta('1 day'),
|
494 |
+
None,
|
495 |
+
pd.Timedelta('3 day')]})
|
496 |
+
_check_pandas_roundtrip(df, version=2)
|
497 |
+
|
498 |
+
|
499 |
+
@pytest.mark.pandas
|
500 |
+
def test_out_of_float64_timestamp_with_nulls(version):
|
501 |
+
df = pd.DataFrame(
|
502 |
+
{'test': pd.DatetimeIndex([1451606400000000001,
|
503 |
+
None, 14516064000030405])})
|
504 |
+
df['with_tz'] = df.test.dt.tz_localize('utc')
|
505 |
+
_check_pandas_roundtrip(df, version=version)
|
506 |
+
|
507 |
+
|
508 |
+
@pytest.mark.pandas
|
509 |
+
def test_non_string_columns(version):
|
510 |
+
df = pd.DataFrame({0: [1, 2, 3, 4],
|
511 |
+
1: [True, False, True, False]})
|
512 |
+
expected = df
|
513 |
+
|
514 |
+
if version == 1:
|
515 |
+
expected = df.rename(columns=str)
|
516 |
+
_check_pandas_roundtrip(df, expected, version=version)
|
517 |
+
|
518 |
+
|
519 |
+
@pytest.mark.pandas
|
520 |
+
@pytest.mark.skipif(not os.path.supports_unicode_filenames,
|
521 |
+
reason='unicode filenames not supported')
|
522 |
+
def test_unicode_filename(version):
|
523 |
+
# GH #209
|
524 |
+
name = (b'Besa_Kavaj\xc3\xab.feather').decode('utf-8')
|
525 |
+
df = pd.DataFrame({'foo': [1, 2, 3, 4]})
|
526 |
+
_check_pandas_roundtrip(df, path=random_path(prefix=name),
|
527 |
+
version=version)
|
528 |
+
|
529 |
+
|
530 |
+
@pytest.mark.pandas
|
531 |
+
def test_read_columns(version):
|
532 |
+
df = pd.DataFrame({
|
533 |
+
'foo': [1, 2, 3, 4],
|
534 |
+
'boo': [5, 6, 7, 8],
|
535 |
+
'woo': [1, 3, 5, 7]
|
536 |
+
})
|
537 |
+
expected = df[['boo', 'woo']]
|
538 |
+
|
539 |
+
_check_pandas_roundtrip(df, expected, version=version,
|
540 |
+
columns=['boo', 'woo'])
|
541 |
+
|
542 |
+
|
543 |
+
def test_overwritten_file(version):
|
544 |
+
path = random_path()
|
545 |
+
TEST_FILES.append(path)
|
546 |
+
|
547 |
+
num_values = 100
|
548 |
+
np.random.seed(0)
|
549 |
+
|
550 |
+
values = np.random.randint(0, 10, size=num_values)
|
551 |
+
|
552 |
+
table = pa.table({'ints': values})
|
553 |
+
write_feather(table, path)
|
554 |
+
|
555 |
+
table = pa.table({'more_ints': values[0:num_values//2]})
|
556 |
+
_check_arrow_roundtrip(table, path=path)
|
557 |
+
|
558 |
+
|
559 |
+
@pytest.mark.pandas
|
560 |
+
def test_filelike_objects(version):
|
561 |
+
buf = io.BytesIO()
|
562 |
+
|
563 |
+
# the copy makes it non-strided
|
564 |
+
df = pd.DataFrame(np.arange(12).reshape(4, 3),
|
565 |
+
columns=['a', 'b', 'c']).copy()
|
566 |
+
write_feather(df, buf, version=version)
|
567 |
+
|
568 |
+
buf.seek(0)
|
569 |
+
|
570 |
+
result = read_feather(buf)
|
571 |
+
assert_frame_equal(result, df)
|
572 |
+
|
573 |
+
|
574 |
+
@pytest.mark.pandas
|
575 |
+
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
|
576 |
+
@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
|
577 |
+
def test_sparse_dataframe(version):
|
578 |
+
if not pa.pandas_compat._pandas_api.has_sparse:
|
579 |
+
pytest.skip("version of pandas does not support SparseDataFrame")
|
580 |
+
# GH #221
|
581 |
+
data = {'A': [0, 1, 2],
|
582 |
+
'B': [1, 0, 1]}
|
583 |
+
df = pd.DataFrame(data).to_sparse(fill_value=1)
|
584 |
+
expected = df.to_dense()
|
585 |
+
_check_pandas_roundtrip(df, expected, version=version)
|
586 |
+
|
587 |
+
|
588 |
+
@pytest.mark.pandas
|
589 |
+
def test_duplicate_columns_pandas():
|
590 |
+
|
591 |
+
# https://github.com/wesm/feather/issues/53
|
592 |
+
# not currently able to handle duplicate columns
|
593 |
+
df = pd.DataFrame(np.arange(12).reshape(4, 3),
|
594 |
+
columns=list('aaa')).copy()
|
595 |
+
_assert_error_on_write(df, ValueError)
|
596 |
+
|
597 |
+
|
598 |
+
def test_duplicate_columns():
|
599 |
+
# only works for version 2
|
600 |
+
table = pa.table([[1, 2, 3], [4, 5, 6], [7, 8, 9]], names=['a', 'a', 'b'])
|
601 |
+
_check_arrow_roundtrip(table)
|
602 |
+
_assert_error_on_write(table, ValueError, version=1)
|
603 |
+
|
604 |
+
|
605 |
+
@pytest.mark.pandas
|
606 |
+
def test_unsupported():
|
607 |
+
# https://github.com/wesm/feather/issues/240
|
608 |
+
# serializing actual python objects
|
609 |
+
|
610 |
+
# custom python objects
|
611 |
+
class A:
|
612 |
+
pass
|
613 |
+
|
614 |
+
df = pd.DataFrame({'a': [A(), A()]})
|
615 |
+
_assert_error_on_write(df, ValueError)
|
616 |
+
|
617 |
+
# non-strings
|
618 |
+
df = pd.DataFrame({'a': ['a', 1, 2.0]})
|
619 |
+
_assert_error_on_write(df, TypeError)
|
620 |
+
|
621 |
+
|
622 |
+
@pytest.mark.pandas
|
623 |
+
def test_v2_set_chunksize():
|
624 |
+
df = pd.DataFrame({'A': np.arange(1000)})
|
625 |
+
table = pa.table(df)
|
626 |
+
|
627 |
+
buf = io.BytesIO()
|
628 |
+
write_feather(table, buf, chunksize=250, version=2)
|
629 |
+
|
630 |
+
result = buf.getvalue()
|
631 |
+
|
632 |
+
ipc_file = pa.ipc.open_file(pa.BufferReader(result))
|
633 |
+
assert ipc_file.num_record_batches == 4
|
634 |
+
assert len(ipc_file.get_batch(0)) == 250
|
635 |
+
|
636 |
+
|
637 |
+
@pytest.mark.pandas
|
638 |
+
@pytest.mark.lz4
|
639 |
+
@pytest.mark.snappy
|
640 |
+
@pytest.mark.zstd
|
641 |
+
def test_v2_compression_options():
|
642 |
+
df = pd.DataFrame({'A': np.arange(1000)})
|
643 |
+
|
644 |
+
cases = [
|
645 |
+
# compression, compression_level
|
646 |
+
('uncompressed', None),
|
647 |
+
('lz4', None),
|
648 |
+
('lz4', 1),
|
649 |
+
('lz4', 12),
|
650 |
+
('zstd', 1),
|
651 |
+
('zstd', 10)
|
652 |
+
]
|
653 |
+
|
654 |
+
for compression, compression_level in cases:
|
655 |
+
_check_pandas_roundtrip(df, compression=compression,
|
656 |
+
compression_level=compression_level)
|
657 |
+
|
658 |
+
buf = io.BytesIO()
|
659 |
+
|
660 |
+
# Trying to compress with V1
|
661 |
+
with pytest.raises(
|
662 |
+
ValueError,
|
663 |
+
match="Feather V1 files do not support compression option"):
|
664 |
+
write_feather(df, buf, compression='lz4', version=1)
|
665 |
+
|
666 |
+
# Trying to set chunksize with V1
|
667 |
+
with pytest.raises(
|
668 |
+
ValueError,
|
669 |
+
match="Feather V1 files do not support chunksize option"):
|
670 |
+
write_feather(df, buf, chunksize=4096, version=1)
|
671 |
+
|
672 |
+
# Unsupported compressor
|
673 |
+
with pytest.raises(ValueError,
|
674 |
+
match='compression="snappy" not supported'):
|
675 |
+
write_feather(df, buf, compression='snappy')
|
676 |
+
|
677 |
+
|
678 |
+
def test_v2_lz4_default_compression():
|
679 |
+
# ARROW-8750: Make sure that the compression=None option selects lz4 if
|
680 |
+
# it's available
|
681 |
+
if not pa.Codec.is_available('lz4_frame'):
|
682 |
+
pytest.skip("LZ4 compression support is not built in C++")
|
683 |
+
|
684 |
+
# some highly compressible data
|
685 |
+
t = pa.table([np.repeat(0, 100000)], names=['f0'])
|
686 |
+
|
687 |
+
buf = io.BytesIO()
|
688 |
+
write_feather(t, buf)
|
689 |
+
default_result = buf.getvalue()
|
690 |
+
|
691 |
+
buf = io.BytesIO()
|
692 |
+
write_feather(t, buf, compression='uncompressed')
|
693 |
+
uncompressed_result = buf.getvalue()
|
694 |
+
|
695 |
+
assert len(default_result) < len(uncompressed_result)
|
696 |
+
|
697 |
+
|
698 |
+
def test_v1_unsupported_types():
|
699 |
+
table = pa.table([pa.array([[1, 2, 3], [], None])], names=['f0'])
|
700 |
+
|
701 |
+
buf = io.BytesIO()
|
702 |
+
with pytest.raises(TypeError,
|
703 |
+
match=("Unsupported Feather V1 type: "
|
704 |
+
"list<item: int64>. "
|
705 |
+
"Use V2 format to serialize all Arrow types.")):
|
706 |
+
write_feather(table, buf, version=1)
|
707 |
+
|
708 |
+
|
709 |
+
@pytest.mark.slow
|
710 |
+
@pytest.mark.pandas
|
711 |
+
def test_large_dataframe(version):
|
712 |
+
df = pd.DataFrame({'A': np.arange(400000000)})
|
713 |
+
_check_pandas_roundtrip(df, version=version)
|
714 |
+
|
715 |
+
|
716 |
+
@pytest.mark.large_memory
|
717 |
+
@pytest.mark.pandas
|
718 |
+
def test_chunked_binary_error_message():
|
719 |
+
# ARROW-3058: As Feather does not yet support chunked columns, we at least
|
720 |
+
# make sure it's clear to the user what is going on
|
721 |
+
|
722 |
+
# 2^31 + 1 bytes
|
723 |
+
values = [b'x'] + [
|
724 |
+
b'x' * (1 << 20)
|
725 |
+
] * 2 * (1 << 10)
|
726 |
+
df = pd.DataFrame({'byte_col': values})
|
727 |
+
|
728 |
+
# Works fine with version 2
|
729 |
+
buf = io.BytesIO()
|
730 |
+
write_feather(df, buf, version=2)
|
731 |
+
result = read_feather(pa.BufferReader(buf.getvalue()))
|
732 |
+
assert_frame_equal(result, df)
|
733 |
+
|
734 |
+
with pytest.raises(ValueError, match="'byte_col' exceeds 2GB maximum "
|
735 |
+
"capacity of a Feather binary column. This restriction "
|
736 |
+
"may be lifted in the future"):
|
737 |
+
write_feather(df, io.BytesIO(), version=1)
|
738 |
+
|
739 |
+
|
740 |
+
def test_feather_without_pandas(tempdir, version):
|
741 |
+
# ARROW-8345
|
742 |
+
table = pa.table([pa.array([1, 2, 3])], names=['f0'])
|
743 |
+
path = str(tempdir / "data.feather")
|
744 |
+
_check_arrow_roundtrip(table, path)
|
745 |
+
|
746 |
+
|
747 |
+
@pytest.mark.pandas
|
748 |
+
def test_read_column_selection(version):
|
749 |
+
# ARROW-8641
|
750 |
+
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=['a', 'b', 'c'])
|
751 |
+
|
752 |
+
# select columns as string names or integer indices
|
753 |
+
_check_pandas_roundtrip(
|
754 |
+
df, columns=['a', 'c'], expected=df[['a', 'c']], version=version)
|
755 |
+
_check_pandas_roundtrip(
|
756 |
+
df, columns=[0, 2], expected=df[['a', 'c']], version=version)
|
757 |
+
|
758 |
+
# different order is followed
|
759 |
+
_check_pandas_roundtrip(
|
760 |
+
df, columns=['b', 'a'], expected=df[['b', 'a']], version=version)
|
761 |
+
_check_pandas_roundtrip(
|
762 |
+
df, columns=[1, 0], expected=df[['b', 'a']], version=version)
|
763 |
+
|
764 |
+
|
765 |
+
def test_read_column_duplicated_selection(tempdir, version):
|
766 |
+
# duplicated columns in the column selection
|
767 |
+
table = pa.table([[1, 2, 3], [4, 5, 6], [7, 8, 9]], names=['a', 'b', 'c'])
|
768 |
+
path = str(tempdir / "data.feather")
|
769 |
+
write_feather(table, path, version=version)
|
770 |
+
|
771 |
+
expected = pa.table([[1, 2, 3], [4, 5, 6], [1, 2, 3]],
|
772 |
+
names=['a', 'b', 'a'])
|
773 |
+
for col_selection in [['a', 'b', 'a'], [0, 1, 0]]:
|
774 |
+
result = read_table(path, columns=col_selection)
|
775 |
+
assert result.equals(expected)
|
776 |
+
|
777 |
+
|
778 |
+
def test_read_column_duplicated_in_file(tempdir):
|
779 |
+
# duplicated columns in feather file (only works for feather v2)
|
780 |
+
table = pa.table([[1, 2, 3], [4, 5, 6], [7, 8, 9]], names=['a', 'b', 'a'])
|
781 |
+
path = str(tempdir / "data.feather")
|
782 |
+
write_feather(table, path, version=2)
|
783 |
+
|
784 |
+
# no selection works fine
|
785 |
+
result = read_table(path)
|
786 |
+
assert result.equals(table)
|
787 |
+
|
788 |
+
# selection with indices works
|
789 |
+
result = read_table(path, columns=[0, 2])
|
790 |
+
assert result.column_names == ['a', 'a']
|
791 |
+
|
792 |
+
# selection with column names errors
|
793 |
+
with pytest.raises(ValueError):
|
794 |
+
read_table(path, columns=['a', 'b'])
|
795 |
+
|
796 |
+
|
797 |
+
def test_nested_types(compression):
|
798 |
+
# https://issues.apache.org/jira/browse/ARROW-8860
|
799 |
+
table = pa.table({'col': pa.StructArray.from_arrays(
|
800 |
+
[[0, 1, 2], [1, 2, 3]], names=["f1", "f2"])})
|
801 |
+
_check_arrow_roundtrip(table, compression=compression)
|
802 |
+
|
803 |
+
table = pa.table({'col': pa.array([[1, 2], [3, 4]])})
|
804 |
+
_check_arrow_roundtrip(table, compression=compression)
|
805 |
+
|
806 |
+
table = pa.table({'col': pa.array([[[1, 2], [3, 4]], [[5, 6], None]])})
|
807 |
+
_check_arrow_roundtrip(table, compression=compression)
|
808 |
+
|
809 |
+
|
810 |
+
@h.given(past.all_tables, st.sampled_from(["uncompressed", "lz4", "zstd"]))
|
811 |
+
def test_roundtrip(table, compression):
|
812 |
+
_check_arrow_roundtrip(table, compression=compression)
|
813 |
+
|
814 |
+
|
815 |
+
@pytest.mark.lz4
|
816 |
+
def test_feather_v017_experimental_compression_backward_compatibility(datadir):
|
817 |
+
# ARROW-11163 - ensure newer pyarrow versions can read the old feather
|
818 |
+
# files from version 0.17.0 with experimental compression support (before
|
819 |
+
# it was officially added to IPC format in 1.0.0)
|
820 |
+
|
821 |
+
# file generated with:
|
822 |
+
# table = pa.table({'a': range(5)})
|
823 |
+
# from pyarrow import feather
|
824 |
+
# feather.write_feather(
|
825 |
+
# table, "v0.17.0.version.2-compression.lz4.feather",
|
826 |
+
# compression="lz4", version=2)
|
827 |
+
expected = pa.table({'a': range(5)})
|
828 |
+
result = read_table(datadir / "v0.17.0.version.2-compression.lz4.feather")
|
829 |
+
assert result.equals(expected)
|
830 |
+
|
831 |
+
|
832 |
+
@pytest.mark.pandas
|
833 |
+
def test_preserve_index_pandas(version):
|
834 |
+
df = pd.DataFrame({'a': [1, 2, 3]}, index=['a', 'b', 'c'])
|
835 |
+
|
836 |
+
if version == 1:
|
837 |
+
expected = df.reset_index(drop=True).rename(columns=str)
|
838 |
+
else:
|
839 |
+
expected = df
|
840 |
+
|
841 |
+
_check_pandas_roundtrip(df, expected, version=version)
|
842 |
+
|
843 |
+
|
844 |
+
@pytest.mark.pandas
|
845 |
+
def test_feather_datetime_resolution_arrow_to_pandas(tempdir):
|
846 |
+
# ARROW-17192 - ensure timestamp_as_object=True (together with other
|
847 |
+
# **kwargs) can be passed in read_feather to to_pandas.
|
848 |
+
|
849 |
+
from datetime import datetime
|
850 |
+
df = pd.DataFrame({"date": [
|
851 |
+
datetime.fromisoformat("1654-01-01"),
|
852 |
+
datetime.fromisoformat("1920-01-01"), ],
|
853 |
+
})
|
854 |
+
write_feather(df, tempdir / "test_resolution.feather")
|
855 |
+
|
856 |
+
expected_0 = datetime.fromisoformat("1654-01-01")
|
857 |
+
expected_1 = datetime.fromisoformat("1920-01-01")
|
858 |
+
|
859 |
+
result = read_feather(tempdir / "test_resolution.feather",
|
860 |
+
timestamp_as_object=True)
|
861 |
+
|
862 |
+
assert expected_0 == result['date'][0]
|
863 |
+
assert expected_1 == result['date'][1]
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_filesystem.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import pyarrow as pa
|
19 |
+
from pyarrow import filesystem
|
20 |
+
|
21 |
+
import os
|
22 |
+
import pytest
|
23 |
+
|
24 |
+
|
25 |
+
def test_filesystem_deprecated():
|
26 |
+
with pytest.warns(FutureWarning):
|
27 |
+
filesystem.LocalFileSystem()
|
28 |
+
|
29 |
+
with pytest.warns(FutureWarning):
|
30 |
+
filesystem.LocalFileSystem.get_instance()
|
31 |
+
|
32 |
+
|
33 |
+
def test_filesystem_deprecated_toplevel():
|
34 |
+
with pytest.warns(FutureWarning):
|
35 |
+
pa.localfs
|
36 |
+
|
37 |
+
with pytest.warns(FutureWarning):
|
38 |
+
pa.FileSystem
|
39 |
+
|
40 |
+
with pytest.warns(FutureWarning):
|
41 |
+
pa.LocalFileSystem
|
42 |
+
|
43 |
+
with pytest.warns(FutureWarning):
|
44 |
+
pa.HadoopFileSystem
|
45 |
+
|
46 |
+
|
47 |
+
def test_resolve_uri():
|
48 |
+
uri = "file:///home/user/myfile.parquet"
|
49 |
+
fs, path = filesystem.resolve_filesystem_and_path(uri)
|
50 |
+
assert isinstance(fs, filesystem.LocalFileSystem)
|
51 |
+
assert path == "/home/user/myfile.parquet"
|
52 |
+
|
53 |
+
|
54 |
+
def test_resolve_local_path():
|
55 |
+
for uri in ['/home/user/myfile.parquet',
|
56 |
+
'myfile.parquet',
|
57 |
+
'my # file ? parquet',
|
58 |
+
'C:/Windows/myfile.parquet',
|
59 |
+
r'C:\\Windows\\myfile.parquet',
|
60 |
+
]:
|
61 |
+
fs, path = filesystem.resolve_filesystem_and_path(uri)
|
62 |
+
assert isinstance(fs, filesystem.LocalFileSystem)
|
63 |
+
assert path == uri
|
64 |
+
|
65 |
+
|
66 |
+
@pytest.mark.filterwarnings("ignore:pyarrow.filesystem.LocalFileSystem")
|
67 |
+
def test_resolve_home_directory():
|
68 |
+
uri = '~/myfile.parquet'
|
69 |
+
fs, path = filesystem.resolve_filesystem_and_path(uri)
|
70 |
+
assert isinstance(fs, filesystem.LocalFileSystem)
|
71 |
+
assert path == os.path.expanduser(uri)
|
72 |
+
|
73 |
+
local_fs = filesystem.LocalFileSystem()
|
74 |
+
fs, path = filesystem.resolve_filesystem_and_path(uri, local_fs)
|
75 |
+
assert path == os.path.expanduser(uri)
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_flight.py
ADDED
@@ -0,0 +1,2367 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import ast
|
19 |
+
import base64
|
20 |
+
import itertools
|
21 |
+
import os
|
22 |
+
import pathlib
|
23 |
+
import signal
|
24 |
+
import struct
|
25 |
+
import tempfile
|
26 |
+
import threading
|
27 |
+
import time
|
28 |
+
import traceback
|
29 |
+
import json
|
30 |
+
|
31 |
+
import numpy as np
|
32 |
+
import pytest
|
33 |
+
import pyarrow as pa
|
34 |
+
|
35 |
+
from pyarrow.lib import IpcReadOptions, tobytes
|
36 |
+
from pyarrow.util import find_free_port
|
37 |
+
from pyarrow.tests import util
|
38 |
+
|
39 |
+
try:
|
40 |
+
from pyarrow import flight
|
41 |
+
from pyarrow.flight import (
|
42 |
+
FlightClient, FlightServerBase,
|
43 |
+
ServerAuthHandler, ClientAuthHandler,
|
44 |
+
ServerMiddleware, ServerMiddlewareFactory,
|
45 |
+
ClientMiddleware, ClientMiddlewareFactory,
|
46 |
+
)
|
47 |
+
except ImportError:
|
48 |
+
flight = None
|
49 |
+
FlightClient, FlightServerBase = object, object
|
50 |
+
ServerAuthHandler, ClientAuthHandler = object, object
|
51 |
+
ServerMiddleware, ServerMiddlewareFactory = object, object
|
52 |
+
ClientMiddleware, ClientMiddlewareFactory = object, object
|
53 |
+
|
54 |
+
# Marks all of the tests in this module
|
55 |
+
# Ignore these with pytest ... -m 'not flight'
|
56 |
+
pytestmark = pytest.mark.flight
|
57 |
+
|
58 |
+
|
59 |
+
def test_import():
|
60 |
+
# So we see the ImportError somewhere
|
61 |
+
import pyarrow.flight # noqa
|
62 |
+
|
63 |
+
|
64 |
+
def resource_root():
|
65 |
+
"""Get the path to the test resources directory."""
|
66 |
+
if not os.environ.get("ARROW_TEST_DATA"):
|
67 |
+
raise RuntimeError("Test resources not found; set "
|
68 |
+
"ARROW_TEST_DATA to <repo root>/testing/data")
|
69 |
+
return pathlib.Path(os.environ["ARROW_TEST_DATA"]) / "flight"
|
70 |
+
|
71 |
+
|
72 |
+
def read_flight_resource(path):
|
73 |
+
"""Get the contents of a test resource file."""
|
74 |
+
root = resource_root()
|
75 |
+
if not root:
|
76 |
+
return None
|
77 |
+
try:
|
78 |
+
with (root / path).open("rb") as f:
|
79 |
+
return f.read()
|
80 |
+
except FileNotFoundError:
|
81 |
+
raise RuntimeError(
|
82 |
+
"Test resource {} not found; did you initialize the "
|
83 |
+
"test resource submodule?\n{}".format(root / path,
|
84 |
+
traceback.format_exc()))
|
85 |
+
|
86 |
+
|
87 |
+
def example_tls_certs():
|
88 |
+
"""Get the paths to test TLS certificates."""
|
89 |
+
return {
|
90 |
+
"root_cert": read_flight_resource("root-ca.pem"),
|
91 |
+
"certificates": [
|
92 |
+
flight.CertKeyPair(
|
93 |
+
cert=read_flight_resource("cert0.pem"),
|
94 |
+
key=read_flight_resource("cert0.key"),
|
95 |
+
),
|
96 |
+
flight.CertKeyPair(
|
97 |
+
cert=read_flight_resource("cert1.pem"),
|
98 |
+
key=read_flight_resource("cert1.key"),
|
99 |
+
),
|
100 |
+
]
|
101 |
+
}
|
102 |
+
|
103 |
+
|
104 |
+
def simple_ints_table():
|
105 |
+
data = [
|
106 |
+
pa.array([-10, -5, 0, 5, 10])
|
107 |
+
]
|
108 |
+
return pa.Table.from_arrays(data, names=['some_ints'])
|
109 |
+
|
110 |
+
|
111 |
+
def simple_dicts_table():
|
112 |
+
dict_values = pa.array(["foo", "baz", "quux"], type=pa.utf8())
|
113 |
+
data = [
|
114 |
+
pa.chunked_array([
|
115 |
+
pa.DictionaryArray.from_arrays([1, 0, None], dict_values),
|
116 |
+
pa.DictionaryArray.from_arrays([2, 1], dict_values)
|
117 |
+
])
|
118 |
+
]
|
119 |
+
return pa.Table.from_arrays(data, names=['some_dicts'])
|
120 |
+
|
121 |
+
|
122 |
+
def multiple_column_table():
|
123 |
+
return pa.Table.from_arrays([pa.array(['foo', 'bar', 'baz', 'qux']),
|
124 |
+
pa.array([1, 2, 3, 4])],
|
125 |
+
names=['a', 'b'])
|
126 |
+
|
127 |
+
|
128 |
+
class ConstantFlightServer(FlightServerBase):
|
129 |
+
"""A Flight server that always returns the same data.
|
130 |
+
|
131 |
+
See ARROW-4796: this server implementation will segfault if Flight
|
132 |
+
does not properly hold a reference to the Table object.
|
133 |
+
"""
|
134 |
+
|
135 |
+
CRITERIA = b"the expected criteria"
|
136 |
+
|
137 |
+
def __init__(self, location=None, options=None, **kwargs):
|
138 |
+
super().__init__(location, **kwargs)
|
139 |
+
# Ticket -> Table
|
140 |
+
self.table_factories = {
|
141 |
+
b'ints': simple_ints_table,
|
142 |
+
b'dicts': simple_dicts_table,
|
143 |
+
b'multi': multiple_column_table,
|
144 |
+
}
|
145 |
+
self.options = options
|
146 |
+
|
147 |
+
def list_flights(self, context, criteria):
|
148 |
+
if criteria == self.CRITERIA:
|
149 |
+
yield flight.FlightInfo(
|
150 |
+
pa.schema([]),
|
151 |
+
flight.FlightDescriptor.for_path('/foo'),
|
152 |
+
[],
|
153 |
+
-1, -1
|
154 |
+
)
|
155 |
+
|
156 |
+
def do_get(self, context, ticket):
|
157 |
+
# Return a fresh table, so that Flight is the only one keeping a
|
158 |
+
# reference.
|
159 |
+
table = self.table_factories[ticket.ticket]()
|
160 |
+
return flight.RecordBatchStream(table, options=self.options)
|
161 |
+
|
162 |
+
|
163 |
+
class MetadataFlightServer(FlightServerBase):
|
164 |
+
"""A Flight server that numbers incoming/outgoing data."""
|
165 |
+
|
166 |
+
def __init__(self, options=None, **kwargs):
|
167 |
+
super().__init__(**kwargs)
|
168 |
+
self.options = options
|
169 |
+
|
170 |
+
def do_get(self, context, ticket):
|
171 |
+
data = [
|
172 |
+
pa.array([-10, -5, 0, 5, 10])
|
173 |
+
]
|
174 |
+
table = pa.Table.from_arrays(data, names=['a'])
|
175 |
+
return flight.GeneratorStream(
|
176 |
+
table.schema,
|
177 |
+
self.number_batches(table),
|
178 |
+
options=self.options)
|
179 |
+
|
180 |
+
def do_put(self, context, descriptor, reader, writer):
|
181 |
+
counter = 0
|
182 |
+
expected_data = [-10, -5, 0, 5, 10]
|
183 |
+
while True:
|
184 |
+
try:
|
185 |
+
batch, buf = reader.read_chunk()
|
186 |
+
assert batch.equals(pa.RecordBatch.from_arrays(
|
187 |
+
[pa.array([expected_data[counter]])],
|
188 |
+
['a']
|
189 |
+
))
|
190 |
+
assert buf is not None
|
191 |
+
client_counter, = struct.unpack('<i', buf.to_pybytes())
|
192 |
+
assert counter == client_counter
|
193 |
+
writer.write(struct.pack('<i', counter))
|
194 |
+
counter += 1
|
195 |
+
except StopIteration:
|
196 |
+
return
|
197 |
+
|
198 |
+
@staticmethod
|
199 |
+
def number_batches(table):
|
200 |
+
for idx, batch in enumerate(table.to_batches()):
|
201 |
+
buf = struct.pack('<i', idx)
|
202 |
+
yield batch, buf
|
203 |
+
|
204 |
+
|
205 |
+
class EchoFlightServer(FlightServerBase):
|
206 |
+
"""A Flight server that returns the last data uploaded."""
|
207 |
+
|
208 |
+
def __init__(self, location=None, expected_schema=None, **kwargs):
|
209 |
+
super().__init__(location, **kwargs)
|
210 |
+
self.last_message = None
|
211 |
+
self.expected_schema = expected_schema
|
212 |
+
|
213 |
+
def do_get(self, context, ticket):
|
214 |
+
return flight.RecordBatchStream(self.last_message)
|
215 |
+
|
216 |
+
def do_put(self, context, descriptor, reader, writer):
|
217 |
+
if self.expected_schema:
|
218 |
+
assert self.expected_schema == reader.schema
|
219 |
+
self.last_message = reader.read_all()
|
220 |
+
|
221 |
+
def do_exchange(self, context, descriptor, reader, writer):
|
222 |
+
for chunk in reader:
|
223 |
+
pass
|
224 |
+
|
225 |
+
|
226 |
+
class EchoStreamFlightServer(EchoFlightServer):
|
227 |
+
"""An echo server that streams individual record batches."""
|
228 |
+
|
229 |
+
def do_get(self, context, ticket):
|
230 |
+
return flight.GeneratorStream(
|
231 |
+
self.last_message.schema,
|
232 |
+
self.last_message.to_batches(max_chunksize=1024))
|
233 |
+
|
234 |
+
def list_actions(self, context):
|
235 |
+
return []
|
236 |
+
|
237 |
+
def do_action(self, context, action):
|
238 |
+
if action.type == "who-am-i":
|
239 |
+
return [context.peer_identity(), context.peer().encode("utf-8")]
|
240 |
+
raise NotImplementedError
|
241 |
+
|
242 |
+
|
243 |
+
class GetInfoFlightServer(FlightServerBase):
|
244 |
+
"""A Flight server that tests GetFlightInfo."""
|
245 |
+
|
246 |
+
def get_flight_info(self, context, descriptor):
|
247 |
+
return flight.FlightInfo(
|
248 |
+
pa.schema([('a', pa.int32())]),
|
249 |
+
descriptor,
|
250 |
+
[
|
251 |
+
flight.FlightEndpoint(b'', ['grpc://test']),
|
252 |
+
flight.FlightEndpoint(
|
253 |
+
b'',
|
254 |
+
[flight.Location.for_grpc_tcp('localhost', 5005)],
|
255 |
+
),
|
256 |
+
],
|
257 |
+
-1,
|
258 |
+
-1,
|
259 |
+
)
|
260 |
+
|
261 |
+
def get_schema(self, context, descriptor):
|
262 |
+
info = self.get_flight_info(context, descriptor)
|
263 |
+
return flight.SchemaResult(info.schema)
|
264 |
+
|
265 |
+
|
266 |
+
class ListActionsFlightServer(FlightServerBase):
|
267 |
+
"""A Flight server that tests ListActions."""
|
268 |
+
|
269 |
+
@classmethod
|
270 |
+
def expected_actions(cls):
|
271 |
+
return [
|
272 |
+
("action-1", "description"),
|
273 |
+
("action-2", ""),
|
274 |
+
flight.ActionType("action-3", "more detail"),
|
275 |
+
]
|
276 |
+
|
277 |
+
def list_actions(self, context):
|
278 |
+
yield from self.expected_actions()
|
279 |
+
|
280 |
+
|
281 |
+
class ListActionsErrorFlightServer(FlightServerBase):
|
282 |
+
"""A Flight server that tests ListActions."""
|
283 |
+
|
284 |
+
def list_actions(self, context):
|
285 |
+
yield ("action-1", "")
|
286 |
+
yield "foo"
|
287 |
+
|
288 |
+
|
289 |
+
class CheckTicketFlightServer(FlightServerBase):
|
290 |
+
"""A Flight server that compares the given ticket to an expected value."""
|
291 |
+
|
292 |
+
def __init__(self, expected_ticket, location=None, **kwargs):
|
293 |
+
super().__init__(location, **kwargs)
|
294 |
+
self.expected_ticket = expected_ticket
|
295 |
+
|
296 |
+
def do_get(self, context, ticket):
|
297 |
+
assert self.expected_ticket == ticket.ticket
|
298 |
+
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
|
299 |
+
table = pa.Table.from_arrays(data1, names=['a'])
|
300 |
+
return flight.RecordBatchStream(table)
|
301 |
+
|
302 |
+
def do_put(self, context, descriptor, reader):
|
303 |
+
self.last_message = reader.read_all()
|
304 |
+
|
305 |
+
|
306 |
+
class InvalidStreamFlightServer(FlightServerBase):
|
307 |
+
"""A Flight server that tries to return messages with differing schemas."""
|
308 |
+
|
309 |
+
schema = pa.schema([('a', pa.int32())])
|
310 |
+
|
311 |
+
def do_get(self, context, ticket):
|
312 |
+
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
|
313 |
+
data2 = [pa.array([-10.0, -5.0, 0.0, 5.0, 10.0], type=pa.float64())]
|
314 |
+
assert data1.type != data2.type
|
315 |
+
table1 = pa.Table.from_arrays(data1, names=['a'])
|
316 |
+
table2 = pa.Table.from_arrays(data2, names=['a'])
|
317 |
+
assert table1.schema == self.schema
|
318 |
+
|
319 |
+
return flight.GeneratorStream(self.schema, [table1, table2])
|
320 |
+
|
321 |
+
|
322 |
+
class NeverSendsDataFlightServer(FlightServerBase):
|
323 |
+
"""A Flight server that never actually yields data."""
|
324 |
+
|
325 |
+
schema = pa.schema([('a', pa.int32())])
|
326 |
+
|
327 |
+
def do_get(self, context, ticket):
|
328 |
+
if ticket.ticket == b'yield_data':
|
329 |
+
# Check that the server handler will ignore empty tables
|
330 |
+
# up to a certain extent
|
331 |
+
data = [
|
332 |
+
self.schema.empty_table(),
|
333 |
+
self.schema.empty_table(),
|
334 |
+
pa.RecordBatch.from_arrays([range(5)], schema=self.schema),
|
335 |
+
]
|
336 |
+
return flight.GeneratorStream(self.schema, data)
|
337 |
+
return flight.GeneratorStream(
|
338 |
+
self.schema, itertools.repeat(self.schema.empty_table()))
|
339 |
+
|
340 |
+
|
341 |
+
class SlowFlightServer(FlightServerBase):
|
342 |
+
"""A Flight server that delays its responses to test timeouts."""
|
343 |
+
|
344 |
+
def do_get(self, context, ticket):
|
345 |
+
return flight.GeneratorStream(pa.schema([('a', pa.int32())]),
|
346 |
+
self.slow_stream())
|
347 |
+
|
348 |
+
def do_action(self, context, action):
|
349 |
+
time.sleep(0.5)
|
350 |
+
return []
|
351 |
+
|
352 |
+
@staticmethod
|
353 |
+
def slow_stream():
|
354 |
+
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
|
355 |
+
yield pa.Table.from_arrays(data1, names=['a'])
|
356 |
+
# The second message should never get sent; the client should
|
357 |
+
# cancel before we send this
|
358 |
+
time.sleep(10)
|
359 |
+
yield pa.Table.from_arrays(data1, names=['a'])
|
360 |
+
|
361 |
+
|
362 |
+
class ErrorFlightServer(FlightServerBase):
|
363 |
+
"""A Flight server that uses all the Flight-specific errors."""
|
364 |
+
|
365 |
+
@staticmethod
|
366 |
+
def error_cases():
|
367 |
+
return {
|
368 |
+
"internal": flight.FlightInternalError,
|
369 |
+
"timedout": flight.FlightTimedOutError,
|
370 |
+
"cancel": flight.FlightCancelledError,
|
371 |
+
"unauthenticated": flight.FlightUnauthenticatedError,
|
372 |
+
"unauthorized": flight.FlightUnauthorizedError,
|
373 |
+
"notimplemented": NotImplementedError,
|
374 |
+
"invalid": pa.ArrowInvalid,
|
375 |
+
"key": KeyError,
|
376 |
+
}
|
377 |
+
|
378 |
+
def do_action(self, context, action):
|
379 |
+
error_cases = ErrorFlightServer.error_cases()
|
380 |
+
if action.type in error_cases:
|
381 |
+
raise error_cases[action.type]("foo")
|
382 |
+
elif action.type == "protobuf":
|
383 |
+
err_msg = b'this is an error message'
|
384 |
+
raise flight.FlightUnauthorizedError("foo", err_msg)
|
385 |
+
raise NotImplementedError
|
386 |
+
|
387 |
+
def list_flights(self, context, criteria):
|
388 |
+
yield flight.FlightInfo(
|
389 |
+
pa.schema([]),
|
390 |
+
flight.FlightDescriptor.for_path('/foo'),
|
391 |
+
[],
|
392 |
+
-1, -1
|
393 |
+
)
|
394 |
+
raise flight.FlightInternalError("foo")
|
395 |
+
|
396 |
+
def do_put(self, context, descriptor, reader, writer):
|
397 |
+
if descriptor.command == b"internal":
|
398 |
+
raise flight.FlightInternalError("foo")
|
399 |
+
elif descriptor.command == b"timedout":
|
400 |
+
raise flight.FlightTimedOutError("foo")
|
401 |
+
elif descriptor.command == b"cancel":
|
402 |
+
raise flight.FlightCancelledError("foo")
|
403 |
+
elif descriptor.command == b"unauthenticated":
|
404 |
+
raise flight.FlightUnauthenticatedError("foo")
|
405 |
+
elif descriptor.command == b"unauthorized":
|
406 |
+
raise flight.FlightUnauthorizedError("foo")
|
407 |
+
elif descriptor.command == b"protobuf":
|
408 |
+
err_msg = b'this is an error message'
|
409 |
+
raise flight.FlightUnauthorizedError("foo", err_msg)
|
410 |
+
|
411 |
+
|
412 |
+
class ExchangeFlightServer(FlightServerBase):
|
413 |
+
"""A server for testing DoExchange."""
|
414 |
+
|
415 |
+
def __init__(self, options=None, **kwargs):
|
416 |
+
super().__init__(**kwargs)
|
417 |
+
self.options = options
|
418 |
+
|
419 |
+
def do_exchange(self, context, descriptor, reader, writer):
|
420 |
+
if descriptor.descriptor_type != flight.DescriptorType.CMD:
|
421 |
+
raise pa.ArrowInvalid("Must provide a command descriptor")
|
422 |
+
elif descriptor.command == b"echo":
|
423 |
+
return self.exchange_echo(context, reader, writer)
|
424 |
+
elif descriptor.command == b"get":
|
425 |
+
return self.exchange_do_get(context, reader, writer)
|
426 |
+
elif descriptor.command == b"put":
|
427 |
+
return self.exchange_do_put(context, reader, writer)
|
428 |
+
elif descriptor.command == b"transform":
|
429 |
+
return self.exchange_transform(context, reader, writer)
|
430 |
+
else:
|
431 |
+
raise pa.ArrowInvalid(
|
432 |
+
"Unknown command: {}".format(descriptor.command))
|
433 |
+
|
434 |
+
def exchange_do_get(self, context, reader, writer):
|
435 |
+
"""Emulate DoGet with DoExchange."""
|
436 |
+
data = pa.Table.from_arrays([
|
437 |
+
pa.array(range(0, 10 * 1024))
|
438 |
+
], names=["a"])
|
439 |
+
writer.begin(data.schema)
|
440 |
+
writer.write_table(data)
|
441 |
+
|
442 |
+
def exchange_do_put(self, context, reader, writer):
|
443 |
+
"""Emulate DoPut with DoExchange."""
|
444 |
+
num_batches = 0
|
445 |
+
for chunk in reader:
|
446 |
+
if not chunk.data:
|
447 |
+
raise pa.ArrowInvalid("All chunks must have data.")
|
448 |
+
num_batches += 1
|
449 |
+
writer.write_metadata(str(num_batches).encode("utf-8"))
|
450 |
+
|
451 |
+
def exchange_echo(self, context, reader, writer):
|
452 |
+
"""Run a simple echo server."""
|
453 |
+
started = False
|
454 |
+
for chunk in reader:
|
455 |
+
if not started and chunk.data:
|
456 |
+
writer.begin(chunk.data.schema, options=self.options)
|
457 |
+
started = True
|
458 |
+
if chunk.app_metadata and chunk.data:
|
459 |
+
writer.write_with_metadata(chunk.data, chunk.app_metadata)
|
460 |
+
elif chunk.app_metadata:
|
461 |
+
writer.write_metadata(chunk.app_metadata)
|
462 |
+
elif chunk.data:
|
463 |
+
writer.write_batch(chunk.data)
|
464 |
+
else:
|
465 |
+
assert False, "Should not happen"
|
466 |
+
|
467 |
+
def exchange_transform(self, context, reader, writer):
|
468 |
+
"""Sum rows in an uploaded table."""
|
469 |
+
for field in reader.schema:
|
470 |
+
if not pa.types.is_integer(field.type):
|
471 |
+
raise pa.ArrowInvalid("Invalid field: " + repr(field))
|
472 |
+
table = reader.read_all()
|
473 |
+
sums = [0] * table.num_rows
|
474 |
+
for column in table:
|
475 |
+
for row, value in enumerate(column):
|
476 |
+
sums[row] += value.as_py()
|
477 |
+
result = pa.Table.from_arrays([pa.array(sums)], names=["sum"])
|
478 |
+
writer.begin(result.schema)
|
479 |
+
writer.write_table(result)
|
480 |
+
|
481 |
+
|
482 |
+
class HttpBasicServerAuthHandler(ServerAuthHandler):
|
483 |
+
"""An example implementation of HTTP basic authentication."""
|
484 |
+
|
485 |
+
def __init__(self, creds):
|
486 |
+
super().__init__()
|
487 |
+
self.creds = creds
|
488 |
+
|
489 |
+
def authenticate(self, outgoing, incoming):
|
490 |
+
buf = incoming.read()
|
491 |
+
auth = flight.BasicAuth.deserialize(buf)
|
492 |
+
if auth.username not in self.creds:
|
493 |
+
raise flight.FlightUnauthenticatedError("unknown user")
|
494 |
+
if self.creds[auth.username] != auth.password:
|
495 |
+
raise flight.FlightUnauthenticatedError("wrong password")
|
496 |
+
outgoing.write(tobytes(auth.username))
|
497 |
+
|
498 |
+
def is_valid(self, token):
|
499 |
+
if not token:
|
500 |
+
raise flight.FlightUnauthenticatedError("token not provided")
|
501 |
+
if token not in self.creds:
|
502 |
+
raise flight.FlightUnauthenticatedError("unknown user")
|
503 |
+
return token
|
504 |
+
|
505 |
+
|
506 |
+
class HttpBasicClientAuthHandler(ClientAuthHandler):
|
507 |
+
"""An example implementation of HTTP basic authentication."""
|
508 |
+
|
509 |
+
def __init__(self, username, password):
|
510 |
+
super().__init__()
|
511 |
+
self.basic_auth = flight.BasicAuth(username, password)
|
512 |
+
self.token = None
|
513 |
+
|
514 |
+
def authenticate(self, outgoing, incoming):
|
515 |
+
auth = self.basic_auth.serialize()
|
516 |
+
outgoing.write(auth)
|
517 |
+
self.token = incoming.read()
|
518 |
+
|
519 |
+
def get_token(self):
|
520 |
+
return self.token
|
521 |
+
|
522 |
+
|
523 |
+
class TokenServerAuthHandler(ServerAuthHandler):
|
524 |
+
"""An example implementation of authentication via handshake."""
|
525 |
+
|
526 |
+
def __init__(self, creds):
|
527 |
+
super().__init__()
|
528 |
+
self.creds = creds
|
529 |
+
|
530 |
+
def authenticate(self, outgoing, incoming):
|
531 |
+
username = incoming.read()
|
532 |
+
password = incoming.read()
|
533 |
+
if username in self.creds and self.creds[username] == password:
|
534 |
+
outgoing.write(base64.b64encode(b'secret:' + username))
|
535 |
+
else:
|
536 |
+
raise flight.FlightUnauthenticatedError(
|
537 |
+
"invalid username/password")
|
538 |
+
|
539 |
+
def is_valid(self, token):
|
540 |
+
token = base64.b64decode(token)
|
541 |
+
if not token.startswith(b'secret:'):
|
542 |
+
raise flight.FlightUnauthenticatedError("invalid token")
|
543 |
+
return token[7:]
|
544 |
+
|
545 |
+
|
546 |
+
class TokenClientAuthHandler(ClientAuthHandler):
|
547 |
+
"""An example implementation of authentication via handshake."""
|
548 |
+
|
549 |
+
def __init__(self, username, password):
|
550 |
+
super().__init__()
|
551 |
+
self.username = username
|
552 |
+
self.password = password
|
553 |
+
self.token = b''
|
554 |
+
|
555 |
+
def authenticate(self, outgoing, incoming):
|
556 |
+
outgoing.write(self.username)
|
557 |
+
outgoing.write(self.password)
|
558 |
+
self.token = incoming.read()
|
559 |
+
|
560 |
+
def get_token(self):
|
561 |
+
return self.token
|
562 |
+
|
563 |
+
|
564 |
+
class NoopAuthHandler(ServerAuthHandler):
|
565 |
+
"""A no-op auth handler."""
|
566 |
+
|
567 |
+
def authenticate(self, outgoing, incoming):
|
568 |
+
"""Do nothing."""
|
569 |
+
|
570 |
+
def is_valid(self, token):
|
571 |
+
"""
|
572 |
+
Returning an empty string.
|
573 |
+
Returning None causes Type error.
|
574 |
+
"""
|
575 |
+
return ""
|
576 |
+
|
577 |
+
|
578 |
+
def case_insensitive_header_lookup(headers, lookup_key):
|
579 |
+
"""Lookup the value of given key in the given headers.
|
580 |
+
The key lookup is case-insensitive.
|
581 |
+
"""
|
582 |
+
for key in headers:
|
583 |
+
if key.lower() == lookup_key.lower():
|
584 |
+
return headers.get(key)
|
585 |
+
|
586 |
+
|
587 |
+
class ClientHeaderAuthMiddlewareFactory(ClientMiddlewareFactory):
|
588 |
+
"""ClientMiddlewareFactory that creates ClientAuthHeaderMiddleware."""
|
589 |
+
|
590 |
+
def __init__(self):
|
591 |
+
self.call_credential = []
|
592 |
+
|
593 |
+
def start_call(self, info):
|
594 |
+
return ClientHeaderAuthMiddleware(self)
|
595 |
+
|
596 |
+
def set_call_credential(self, call_credential):
|
597 |
+
self.call_credential = call_credential
|
598 |
+
|
599 |
+
|
600 |
+
class ClientHeaderAuthMiddleware(ClientMiddleware):
|
601 |
+
"""
|
602 |
+
ClientMiddleware that extracts the authorization header
|
603 |
+
from the server.
|
604 |
+
|
605 |
+
This is an example of a ClientMiddleware that can extract
|
606 |
+
the bearer token authorization header from a HTTP header
|
607 |
+
authentication enabled server.
|
608 |
+
|
609 |
+
Parameters
|
610 |
+
----------
|
611 |
+
factory : ClientHeaderAuthMiddlewareFactory
|
612 |
+
This factory is used to set call credentials if an
|
613 |
+
authorization header is found in the headers from the server.
|
614 |
+
"""
|
615 |
+
|
616 |
+
def __init__(self, factory):
|
617 |
+
self.factory = factory
|
618 |
+
|
619 |
+
def received_headers(self, headers):
|
620 |
+
auth_header = case_insensitive_header_lookup(headers, 'Authorization')
|
621 |
+
self.factory.set_call_credential([
|
622 |
+
b'authorization',
|
623 |
+
auth_header[0].encode("utf-8")])
|
624 |
+
|
625 |
+
|
626 |
+
class HeaderAuthServerMiddlewareFactory(ServerMiddlewareFactory):
|
627 |
+
"""Validates incoming username and password."""
|
628 |
+
|
629 |
+
def start_call(self, info, headers):
|
630 |
+
auth_header = case_insensitive_header_lookup(
|
631 |
+
headers,
|
632 |
+
'Authorization'
|
633 |
+
)
|
634 |
+
values = auth_header[0].split(' ')
|
635 |
+
token = ''
|
636 |
+
error_message = 'Invalid credentials'
|
637 |
+
|
638 |
+
if values[0] == 'Basic':
|
639 |
+
decoded = base64.b64decode(values[1])
|
640 |
+
pair = decoded.decode("utf-8").split(':')
|
641 |
+
if not (pair[0] == 'test' and pair[1] == 'password'):
|
642 |
+
raise flight.FlightUnauthenticatedError(error_message)
|
643 |
+
token = 'token1234'
|
644 |
+
elif values[0] == 'Bearer':
|
645 |
+
token = values[1]
|
646 |
+
if not token == 'token1234':
|
647 |
+
raise flight.FlightUnauthenticatedError(error_message)
|
648 |
+
else:
|
649 |
+
raise flight.FlightUnauthenticatedError(error_message)
|
650 |
+
|
651 |
+
return HeaderAuthServerMiddleware(token)
|
652 |
+
|
653 |
+
|
654 |
+
class HeaderAuthServerMiddleware(ServerMiddleware):
|
655 |
+
"""A ServerMiddleware that transports incoming username and password."""
|
656 |
+
|
657 |
+
def __init__(self, token):
|
658 |
+
self.token = token
|
659 |
+
|
660 |
+
def sending_headers(self):
|
661 |
+
return {'authorization': 'Bearer ' + self.token}
|
662 |
+
|
663 |
+
|
664 |
+
class HeaderAuthFlightServer(FlightServerBase):
|
665 |
+
"""A Flight server that tests with basic token authentication. """
|
666 |
+
|
667 |
+
def do_action(self, context, action):
|
668 |
+
middleware = context.get_middleware("auth")
|
669 |
+
if middleware:
|
670 |
+
auth_header = case_insensitive_header_lookup(
|
671 |
+
middleware.sending_headers(), 'Authorization')
|
672 |
+
values = auth_header.split(' ')
|
673 |
+
return [values[1].encode("utf-8")]
|
674 |
+
raise flight.FlightUnauthenticatedError(
|
675 |
+
'No token auth middleware found.')
|
676 |
+
|
677 |
+
|
678 |
+
class ArbitraryHeadersServerMiddlewareFactory(ServerMiddlewareFactory):
|
679 |
+
"""A ServerMiddlewareFactory that transports arbitrary headers."""
|
680 |
+
|
681 |
+
def start_call(self, info, headers):
|
682 |
+
return ArbitraryHeadersServerMiddleware(headers)
|
683 |
+
|
684 |
+
|
685 |
+
class ArbitraryHeadersServerMiddleware(ServerMiddleware):
|
686 |
+
"""A ServerMiddleware that transports arbitrary headers."""
|
687 |
+
|
688 |
+
def __init__(self, incoming):
|
689 |
+
self.incoming = incoming
|
690 |
+
|
691 |
+
def sending_headers(self):
|
692 |
+
return self.incoming
|
693 |
+
|
694 |
+
|
695 |
+
class ArbitraryHeadersFlightServer(FlightServerBase):
|
696 |
+
"""A Flight server that tests multiple arbitrary headers."""
|
697 |
+
|
698 |
+
def do_action(self, context, action):
|
699 |
+
middleware = context.get_middleware("arbitrary-headers")
|
700 |
+
if middleware:
|
701 |
+
headers = middleware.sending_headers()
|
702 |
+
header_1 = case_insensitive_header_lookup(
|
703 |
+
headers,
|
704 |
+
'test-header-1'
|
705 |
+
)
|
706 |
+
header_2 = case_insensitive_header_lookup(
|
707 |
+
headers,
|
708 |
+
'test-header-2'
|
709 |
+
)
|
710 |
+
value1 = header_1[0].encode("utf-8")
|
711 |
+
value2 = header_2[0].encode("utf-8")
|
712 |
+
return [value1, value2]
|
713 |
+
raise flight.FlightServerError("No headers middleware found")
|
714 |
+
|
715 |
+
|
716 |
+
class HeaderServerMiddleware(ServerMiddleware):
|
717 |
+
"""Expose a per-call value to the RPC method body."""
|
718 |
+
|
719 |
+
def __init__(self, special_value):
|
720 |
+
self.special_value = special_value
|
721 |
+
|
722 |
+
|
723 |
+
class HeaderServerMiddlewareFactory(ServerMiddlewareFactory):
|
724 |
+
"""Expose a per-call hard-coded value to the RPC method body."""
|
725 |
+
|
726 |
+
def start_call(self, info, headers):
|
727 |
+
return HeaderServerMiddleware("right value")
|
728 |
+
|
729 |
+
|
730 |
+
class HeaderFlightServer(FlightServerBase):
|
731 |
+
"""Echo back the per-call hard-coded value."""
|
732 |
+
|
733 |
+
def do_action(self, context, action):
|
734 |
+
middleware = context.get_middleware("test")
|
735 |
+
if middleware:
|
736 |
+
return [middleware.special_value.encode()]
|
737 |
+
return [b""]
|
738 |
+
|
739 |
+
|
740 |
+
class MultiHeaderFlightServer(FlightServerBase):
|
741 |
+
"""Test sending/receiving multiple (binary-valued) headers."""
|
742 |
+
|
743 |
+
def do_action(self, context, action):
|
744 |
+
middleware = context.get_middleware("test")
|
745 |
+
headers = repr(middleware.client_headers).encode("utf-8")
|
746 |
+
return [headers]
|
747 |
+
|
748 |
+
|
749 |
+
class SelectiveAuthServerMiddlewareFactory(ServerMiddlewareFactory):
|
750 |
+
"""Deny access to certain methods based on a header."""
|
751 |
+
|
752 |
+
def start_call(self, info, headers):
|
753 |
+
if info.method == flight.FlightMethod.LIST_ACTIONS:
|
754 |
+
# No auth needed
|
755 |
+
return
|
756 |
+
|
757 |
+
token = headers.get("x-auth-token")
|
758 |
+
if not token:
|
759 |
+
raise flight.FlightUnauthenticatedError("No token")
|
760 |
+
|
761 |
+
token = token[0]
|
762 |
+
if token != "password":
|
763 |
+
raise flight.FlightUnauthenticatedError("Invalid token")
|
764 |
+
|
765 |
+
return HeaderServerMiddleware(token)
|
766 |
+
|
767 |
+
|
768 |
+
class SelectiveAuthClientMiddlewareFactory(ClientMiddlewareFactory):
|
769 |
+
def start_call(self, info):
|
770 |
+
return SelectiveAuthClientMiddleware()
|
771 |
+
|
772 |
+
|
773 |
+
class SelectiveAuthClientMiddleware(ClientMiddleware):
|
774 |
+
def sending_headers(self):
|
775 |
+
return {
|
776 |
+
"x-auth-token": "password",
|
777 |
+
}
|
778 |
+
|
779 |
+
|
780 |
+
class RecordingServerMiddlewareFactory(ServerMiddlewareFactory):
|
781 |
+
"""Record what methods were called."""
|
782 |
+
|
783 |
+
def __init__(self):
|
784 |
+
super().__init__()
|
785 |
+
self.methods = []
|
786 |
+
|
787 |
+
def start_call(self, info, headers):
|
788 |
+
self.methods.append(info.method)
|
789 |
+
return None
|
790 |
+
|
791 |
+
|
792 |
+
class RecordingClientMiddlewareFactory(ClientMiddlewareFactory):
|
793 |
+
"""Record what methods were called."""
|
794 |
+
|
795 |
+
def __init__(self):
|
796 |
+
super().__init__()
|
797 |
+
self.methods = []
|
798 |
+
|
799 |
+
def start_call(self, info):
|
800 |
+
self.methods.append(info.method)
|
801 |
+
return None
|
802 |
+
|
803 |
+
|
804 |
+
class MultiHeaderClientMiddlewareFactory(ClientMiddlewareFactory):
|
805 |
+
"""Test sending/receiving multiple (binary-valued) headers."""
|
806 |
+
|
807 |
+
def __init__(self):
|
808 |
+
# Read in test_middleware_multi_header below.
|
809 |
+
# The middleware instance will update this value.
|
810 |
+
self.last_headers = {}
|
811 |
+
|
812 |
+
def start_call(self, info):
|
813 |
+
return MultiHeaderClientMiddleware(self)
|
814 |
+
|
815 |
+
|
816 |
+
class MultiHeaderClientMiddleware(ClientMiddleware):
|
817 |
+
"""Test sending/receiving multiple (binary-valued) headers."""
|
818 |
+
|
819 |
+
EXPECTED = {
|
820 |
+
"x-text": ["foo", "bar"],
|
821 |
+
"x-binary-bin": [b"\x00", b"\x01"],
|
822 |
+
# ARROW-16606: ensure mixed-case headers are accepted
|
823 |
+
"x-MIXED-case": ["baz"],
|
824 |
+
b"x-other-MIXED-case": ["baz"],
|
825 |
+
}
|
826 |
+
|
827 |
+
def __init__(self, factory):
|
828 |
+
self.factory = factory
|
829 |
+
|
830 |
+
def sending_headers(self):
|
831 |
+
return self.EXPECTED
|
832 |
+
|
833 |
+
def received_headers(self, headers):
|
834 |
+
# Let the test code know what the last set of headers we
|
835 |
+
# received were.
|
836 |
+
self.factory.last_headers.update(headers)
|
837 |
+
|
838 |
+
|
839 |
+
class MultiHeaderServerMiddlewareFactory(ServerMiddlewareFactory):
|
840 |
+
"""Test sending/receiving multiple (binary-valued) headers."""
|
841 |
+
|
842 |
+
def start_call(self, info, headers):
|
843 |
+
return MultiHeaderServerMiddleware(headers)
|
844 |
+
|
845 |
+
|
846 |
+
class MultiHeaderServerMiddleware(ServerMiddleware):
|
847 |
+
"""Test sending/receiving multiple (binary-valued) headers."""
|
848 |
+
|
849 |
+
def __init__(self, client_headers):
|
850 |
+
self.client_headers = client_headers
|
851 |
+
|
852 |
+
def sending_headers(self):
|
853 |
+
return MultiHeaderClientMiddleware.EXPECTED
|
854 |
+
|
855 |
+
|
856 |
+
class LargeMetadataFlightServer(FlightServerBase):
|
857 |
+
"""Regression test for ARROW-13253."""
|
858 |
+
|
859 |
+
def __init__(self, *args, **kwargs):
|
860 |
+
super().__init__(*args, **kwargs)
|
861 |
+
self._metadata = b' ' * (2 ** 31 + 1)
|
862 |
+
|
863 |
+
def do_get(self, context, ticket):
|
864 |
+
schema = pa.schema([('a', pa.int64())])
|
865 |
+
return flight.GeneratorStream(schema, [
|
866 |
+
(pa.record_batch([[1]], schema=schema), self._metadata),
|
867 |
+
])
|
868 |
+
|
869 |
+
def do_exchange(self, context, descriptor, reader, writer):
|
870 |
+
writer.write_metadata(self._metadata)
|
871 |
+
|
872 |
+
|
873 |
+
def test_repr():
|
874 |
+
action_repr = "<pyarrow.flight.Action type='foo' body=(0 bytes)>"
|
875 |
+
action_type_repr = "ActionType(type='foo', description='bar')"
|
876 |
+
basic_auth_repr = "<pyarrow.flight.BasicAuth username=b'user' password=(redacted)>"
|
877 |
+
descriptor_repr = "<pyarrow.flight.FlightDescriptor cmd=b'foo'>"
|
878 |
+
endpoint_repr = ("<pyarrow.flight.FlightEndpoint "
|
879 |
+
"ticket=<pyarrow.flight.Ticket ticket=b'foo'> "
|
880 |
+
"locations=[]>")
|
881 |
+
info_repr = (
|
882 |
+
"<pyarrow.flight.FlightInfo "
|
883 |
+
"schema= "
|
884 |
+
"descriptor=<pyarrow.flight.FlightDescriptor path=[]> "
|
885 |
+
"endpoints=[] "
|
886 |
+
"total_records=-1 "
|
887 |
+
"total_bytes=-1>")
|
888 |
+
location_repr = "<pyarrow.flight.Location b'grpc+tcp://localhost:1234'>"
|
889 |
+
result_repr = "<pyarrow.flight.Result body=(3 bytes)>"
|
890 |
+
schema_result_repr = "<pyarrow.flight.SchemaResult schema=()>"
|
891 |
+
ticket_repr = "<pyarrow.flight.Ticket ticket=b'foo'>"
|
892 |
+
|
893 |
+
assert repr(flight.Action("foo", b"")) == action_repr
|
894 |
+
assert repr(flight.ActionType("foo", "bar")) == action_type_repr
|
895 |
+
assert repr(flight.BasicAuth("user", "pass")) == basic_auth_repr
|
896 |
+
assert repr(flight.FlightDescriptor.for_command("foo")) == descriptor_repr
|
897 |
+
assert repr(flight.FlightEndpoint(b"foo", [])) == endpoint_repr
|
898 |
+
info = flight.FlightInfo(
|
899 |
+
pa.schema([]), flight.FlightDescriptor.for_path(), [], -1, -1)
|
900 |
+
assert repr(info) == info_repr
|
901 |
+
assert repr(flight.Location("grpc+tcp://localhost:1234")) == location_repr
|
902 |
+
assert repr(flight.Result(b"foo")) == result_repr
|
903 |
+
assert repr(flight.SchemaResult(pa.schema([]))) == schema_result_repr
|
904 |
+
assert repr(flight.SchemaResult(pa.schema([("int", "int64")]))) == \
|
905 |
+
"<pyarrow.flight.SchemaResult schema=(int: int64)>"
|
906 |
+
assert repr(flight.Ticket(b"foo")) == ticket_repr
|
907 |
+
|
908 |
+
with pytest.raises(TypeError):
|
909 |
+
flight.Action("foo", None)
|
910 |
+
|
911 |
+
|
912 |
+
def test_eq():
|
913 |
+
items = [
|
914 |
+
lambda: (flight.Action("foo", b""), flight.Action("foo", b"bar")),
|
915 |
+
lambda: (flight.ActionType("foo", "bar"),
|
916 |
+
flight.ActionType("foo", "baz")),
|
917 |
+
lambda: (flight.BasicAuth("user", "pass"),
|
918 |
+
flight.BasicAuth("user2", "pass")),
|
919 |
+
lambda: (flight.FlightDescriptor.for_command("foo"),
|
920 |
+
flight.FlightDescriptor.for_path("foo")),
|
921 |
+
lambda: (flight.FlightEndpoint(b"foo", []),
|
922 |
+
flight.FlightEndpoint(b"", [])),
|
923 |
+
lambda: (
|
924 |
+
flight.FlightInfo(
|
925 |
+
pa.schema([]),
|
926 |
+
flight.FlightDescriptor.for_path(), [], -1, -1),
|
927 |
+
flight.FlightInfo(
|
928 |
+
pa.schema([]),
|
929 |
+
flight.FlightDescriptor.for_command(b"foo"), [], -1, 42)),
|
930 |
+
lambda: (flight.Location("grpc+tcp://localhost:1234"),
|
931 |
+
flight.Location("grpc+tls://localhost:1234")),
|
932 |
+
lambda: (flight.Result(b"foo"), flight.Result(b"bar")),
|
933 |
+
lambda: (flight.SchemaResult(pa.schema([])),
|
934 |
+
flight.SchemaResult(pa.schema([("ints", pa.int64())]))),
|
935 |
+
lambda: (flight.Ticket(b""), flight.Ticket(b"foo")),
|
936 |
+
]
|
937 |
+
|
938 |
+
for gen in items:
|
939 |
+
lhs1, rhs1 = gen()
|
940 |
+
lhs2, rhs2 = gen()
|
941 |
+
assert lhs1 == lhs2
|
942 |
+
assert rhs1 == rhs2
|
943 |
+
assert lhs1 != rhs1
|
944 |
+
|
945 |
+
|
946 |
+
def test_flight_server_location_argument():
|
947 |
+
locations = [
|
948 |
+
None,
|
949 |
+
'grpc://localhost:0',
|
950 |
+
('localhost', find_free_port()),
|
951 |
+
]
|
952 |
+
for location in locations:
|
953 |
+
with FlightServerBase(location) as server:
|
954 |
+
assert isinstance(server, FlightServerBase)
|
955 |
+
|
956 |
+
|
957 |
+
def test_server_exit_reraises_exception():
|
958 |
+
with pytest.raises(ValueError):
|
959 |
+
with FlightServerBase():
|
960 |
+
raise ValueError()
|
961 |
+
|
962 |
+
|
963 |
+
@pytest.mark.slow
|
964 |
+
def test_client_wait_for_available():
|
965 |
+
location = ('localhost', find_free_port())
|
966 |
+
server = None
|
967 |
+
|
968 |
+
def serve():
|
969 |
+
global server
|
970 |
+
time.sleep(0.5)
|
971 |
+
server = FlightServerBase(location)
|
972 |
+
server.serve()
|
973 |
+
|
974 |
+
with FlightClient(location) as client:
|
975 |
+
thread = threading.Thread(target=serve, daemon=True)
|
976 |
+
thread.start()
|
977 |
+
|
978 |
+
started = time.time()
|
979 |
+
client.wait_for_available(timeout=5)
|
980 |
+
elapsed = time.time() - started
|
981 |
+
assert elapsed >= 0.5
|
982 |
+
|
983 |
+
|
984 |
+
def test_flight_list_flights():
|
985 |
+
"""Try a simple list_flights call."""
|
986 |
+
with ConstantFlightServer() as server, \
|
987 |
+
flight.connect(('localhost', server.port)) as client:
|
988 |
+
assert list(client.list_flights()) == []
|
989 |
+
flights = client.list_flights(ConstantFlightServer.CRITERIA)
|
990 |
+
assert len(list(flights)) == 1
|
991 |
+
|
992 |
+
|
993 |
+
def test_flight_client_close():
|
994 |
+
with ConstantFlightServer() as server, \
|
995 |
+
flight.connect(('localhost', server.port)) as client:
|
996 |
+
assert list(client.list_flights()) == []
|
997 |
+
client.close()
|
998 |
+
client.close() # Idempotent
|
999 |
+
with pytest.raises(pa.ArrowInvalid):
|
1000 |
+
list(client.list_flights())
|
1001 |
+
|
1002 |
+
|
1003 |
+
def test_flight_do_get_ints():
|
1004 |
+
"""Try a simple do_get call."""
|
1005 |
+
table = simple_ints_table()
|
1006 |
+
|
1007 |
+
with ConstantFlightServer() as server, \
|
1008 |
+
flight.connect(('localhost', server.port)) as client:
|
1009 |
+
data = client.do_get(flight.Ticket(b'ints')).read_all()
|
1010 |
+
assert data.equals(table)
|
1011 |
+
|
1012 |
+
options = pa.ipc.IpcWriteOptions(
|
1013 |
+
metadata_version=pa.ipc.MetadataVersion.V4)
|
1014 |
+
with ConstantFlightServer(options=options) as server, \
|
1015 |
+
flight.connect(('localhost', server.port)) as client:
|
1016 |
+
data = client.do_get(flight.Ticket(b'ints')).read_all()
|
1017 |
+
assert data.equals(table)
|
1018 |
+
|
1019 |
+
# Also test via RecordBatchReader interface
|
1020 |
+
data = client.do_get(flight.Ticket(b'ints')).to_reader().read_all()
|
1021 |
+
assert data.equals(table)
|
1022 |
+
|
1023 |
+
with pytest.raises(flight.FlightServerError,
|
1024 |
+
match="expected IpcWriteOptions, got <class 'int'>"):
|
1025 |
+
with ConstantFlightServer(options=42) as server, \
|
1026 |
+
flight.connect(('localhost', server.port)) as client:
|
1027 |
+
data = client.do_get(flight.Ticket(b'ints')).read_all()
|
1028 |
+
|
1029 |
+
|
1030 |
+
@pytest.mark.pandas
|
1031 |
+
def test_do_get_ints_pandas():
|
1032 |
+
"""Try a simple do_get call."""
|
1033 |
+
table = simple_ints_table()
|
1034 |
+
|
1035 |
+
with ConstantFlightServer() as server, \
|
1036 |
+
flight.connect(('localhost', server.port)) as client:
|
1037 |
+
data = client.do_get(flight.Ticket(b'ints')).read_pandas()
|
1038 |
+
assert list(data['some_ints']) == table.column(0).to_pylist()
|
1039 |
+
|
1040 |
+
|
1041 |
+
def test_flight_do_get_dicts():
|
1042 |
+
table = simple_dicts_table()
|
1043 |
+
|
1044 |
+
with ConstantFlightServer() as server, \
|
1045 |
+
flight.connect(('localhost', server.port)) as client:
|
1046 |
+
data = client.do_get(flight.Ticket(b'dicts')).read_all()
|
1047 |
+
assert data.equals(table)
|
1048 |
+
|
1049 |
+
|
1050 |
+
def test_flight_do_get_ticket():
|
1051 |
+
"""Make sure Tickets get passed to the server."""
|
1052 |
+
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
|
1053 |
+
table = pa.Table.from_arrays(data1, names=['a'])
|
1054 |
+
with CheckTicketFlightServer(expected_ticket=b'the-ticket') as server, \
|
1055 |
+
flight.connect(('localhost', server.port)) as client:
|
1056 |
+
data = client.do_get(flight.Ticket(b'the-ticket')).read_all()
|
1057 |
+
assert data.equals(table)
|
1058 |
+
|
1059 |
+
|
1060 |
+
def test_flight_get_info():
|
1061 |
+
"""Make sure FlightEndpoint accepts string and object URIs."""
|
1062 |
+
with GetInfoFlightServer() as server:
|
1063 |
+
client = FlightClient(('localhost', server.port))
|
1064 |
+
info = client.get_flight_info(flight.FlightDescriptor.for_command(b''))
|
1065 |
+
assert info.total_records == -1
|
1066 |
+
assert info.total_bytes == -1
|
1067 |
+
assert info.schema == pa.schema([('a', pa.int32())])
|
1068 |
+
assert len(info.endpoints) == 2
|
1069 |
+
assert len(info.endpoints[0].locations) == 1
|
1070 |
+
assert info.endpoints[0].locations[0] == flight.Location('grpc://test')
|
1071 |
+
assert info.endpoints[1].locations[0] == \
|
1072 |
+
flight.Location.for_grpc_tcp('localhost', 5005)
|
1073 |
+
|
1074 |
+
|
1075 |
+
def test_flight_get_schema():
|
1076 |
+
"""Make sure GetSchema returns correct schema."""
|
1077 |
+
with GetInfoFlightServer() as server, \
|
1078 |
+
FlightClient(('localhost', server.port)) as client:
|
1079 |
+
info = client.get_schema(flight.FlightDescriptor.for_command(b''))
|
1080 |
+
assert info.schema == pa.schema([('a', pa.int32())])
|
1081 |
+
|
1082 |
+
|
1083 |
+
def test_list_actions():
|
1084 |
+
"""Make sure the return type of ListActions is validated."""
|
1085 |
+
# ARROW-6392
|
1086 |
+
with ListActionsErrorFlightServer() as server, \
|
1087 |
+
FlightClient(('localhost', server.port)) as client:
|
1088 |
+
with pytest.raises(
|
1089 |
+
flight.FlightServerError,
|
1090 |
+
match=("Results of list_actions must be "
|
1091 |
+
"ActionType or tuple")
|
1092 |
+
):
|
1093 |
+
list(client.list_actions())
|
1094 |
+
|
1095 |
+
with ListActionsFlightServer() as server, \
|
1096 |
+
FlightClient(('localhost', server.port)) as client:
|
1097 |
+
assert list(client.list_actions()) == \
|
1098 |
+
ListActionsFlightServer.expected_actions()
|
1099 |
+
|
1100 |
+
|
1101 |
+
class ConvenienceServer(FlightServerBase):
|
1102 |
+
"""
|
1103 |
+
Server for testing various implementation conveniences (auto-boxing, etc.)
|
1104 |
+
"""
|
1105 |
+
|
1106 |
+
@property
|
1107 |
+
def simple_action_results(self):
|
1108 |
+
return [b'foo', b'bar', b'baz']
|
1109 |
+
|
1110 |
+
def do_action(self, context, action):
|
1111 |
+
if action.type == 'simple-action':
|
1112 |
+
return self.simple_action_results
|
1113 |
+
elif action.type == 'echo':
|
1114 |
+
return [action.body]
|
1115 |
+
elif action.type == 'bad-action':
|
1116 |
+
return ['foo']
|
1117 |
+
elif action.type == 'arrow-exception':
|
1118 |
+
raise pa.ArrowMemoryError()
|
1119 |
+
elif action.type == 'forever':
|
1120 |
+
def gen():
|
1121 |
+
while not context.is_cancelled():
|
1122 |
+
yield b'foo'
|
1123 |
+
return gen()
|
1124 |
+
|
1125 |
+
|
1126 |
+
def test_do_action_result_convenience():
|
1127 |
+
with ConvenienceServer() as server, \
|
1128 |
+
FlightClient(('localhost', server.port)) as client:
|
1129 |
+
|
1130 |
+
# do_action as action type without body
|
1131 |
+
results = [x.body for x in client.do_action('simple-action')]
|
1132 |
+
assert results == server.simple_action_results
|
1133 |
+
|
1134 |
+
# do_action with tuple of type and body
|
1135 |
+
body = b'the-body'
|
1136 |
+
results = [x.body for x in client.do_action(('echo', body))]
|
1137 |
+
assert results == [body]
|
1138 |
+
|
1139 |
+
|
1140 |
+
def test_nicer_server_exceptions():
|
1141 |
+
with ConvenienceServer() as server, \
|
1142 |
+
FlightClient(('localhost', server.port)) as client:
|
1143 |
+
with pytest.raises(flight.FlightServerError,
|
1144 |
+
match="a bytes-like object is required"):
|
1145 |
+
list(client.do_action('bad-action'))
|
1146 |
+
# While Flight/C++ sends across the original status code, it
|
1147 |
+
# doesn't get mapped to the equivalent code here, since we
|
1148 |
+
# want to be able to distinguish between client- and server-
|
1149 |
+
# side errors.
|
1150 |
+
with pytest.raises(flight.FlightServerError,
|
1151 |
+
match="ArrowMemoryError"):
|
1152 |
+
list(client.do_action('arrow-exception'))
|
1153 |
+
|
1154 |
+
|
1155 |
+
def test_get_port():
|
1156 |
+
"""Make sure port() works."""
|
1157 |
+
server = GetInfoFlightServer("grpc://localhost:0")
|
1158 |
+
try:
|
1159 |
+
assert server.port > 0
|
1160 |
+
finally:
|
1161 |
+
server.shutdown()
|
1162 |
+
|
1163 |
+
|
1164 |
+
@pytest.mark.skipif(os.name == 'nt',
|
1165 |
+
reason="Unix sockets can't be tested on Windows")
|
1166 |
+
def test_flight_domain_socket():
|
1167 |
+
"""Try a simple do_get call over a Unix domain socket."""
|
1168 |
+
with tempfile.NamedTemporaryFile() as sock:
|
1169 |
+
sock.close()
|
1170 |
+
location = flight.Location.for_grpc_unix(sock.name)
|
1171 |
+
with ConstantFlightServer(location=location), \
|
1172 |
+
FlightClient(location) as client:
|
1173 |
+
|
1174 |
+
reader = client.do_get(flight.Ticket(b'ints'))
|
1175 |
+
table = simple_ints_table()
|
1176 |
+
assert reader.schema.equals(table.schema)
|
1177 |
+
data = reader.read_all()
|
1178 |
+
assert data.equals(table)
|
1179 |
+
|
1180 |
+
reader = client.do_get(flight.Ticket(b'dicts'))
|
1181 |
+
table = simple_dicts_table()
|
1182 |
+
assert reader.schema.equals(table.schema)
|
1183 |
+
data = reader.read_all()
|
1184 |
+
assert data.equals(table)
|
1185 |
+
|
1186 |
+
|
1187 |
+
@pytest.mark.slow
|
1188 |
+
def test_flight_large_message():
|
1189 |
+
"""Try sending/receiving a large message via Flight.
|
1190 |
+
|
1191 |
+
See ARROW-4421: by default, gRPC won't allow us to send messages >
|
1192 |
+
4MiB in size.
|
1193 |
+
"""
|
1194 |
+
data = pa.Table.from_arrays([
|
1195 |
+
pa.array(range(0, 10 * 1024 * 1024))
|
1196 |
+
], names=['a'])
|
1197 |
+
|
1198 |
+
with EchoFlightServer(expected_schema=data.schema) as server, \
|
1199 |
+
FlightClient(('localhost', server.port)) as client:
|
1200 |
+
writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'),
|
1201 |
+
data.schema)
|
1202 |
+
# Write a single giant chunk
|
1203 |
+
writer.write_table(data, 10 * 1024 * 1024)
|
1204 |
+
writer.close()
|
1205 |
+
result = client.do_get(flight.Ticket(b'')).read_all()
|
1206 |
+
assert result.equals(data)
|
1207 |
+
|
1208 |
+
|
1209 |
+
def test_flight_generator_stream():
|
1210 |
+
"""Try downloading a flight of RecordBatches in a GeneratorStream."""
|
1211 |
+
data = pa.Table.from_arrays([
|
1212 |
+
pa.array(range(0, 10 * 1024))
|
1213 |
+
], names=['a'])
|
1214 |
+
|
1215 |
+
with EchoStreamFlightServer() as server, \
|
1216 |
+
FlightClient(('localhost', server.port)) as client:
|
1217 |
+
writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'),
|
1218 |
+
data.schema)
|
1219 |
+
writer.write_table(data)
|
1220 |
+
writer.close()
|
1221 |
+
result = client.do_get(flight.Ticket(b'')).read_all()
|
1222 |
+
assert result.equals(data)
|
1223 |
+
|
1224 |
+
|
1225 |
+
def test_flight_invalid_generator_stream():
|
1226 |
+
"""Try streaming data with mismatched schemas."""
|
1227 |
+
with InvalidStreamFlightServer() as server, \
|
1228 |
+
FlightClient(('localhost', server.port)) as client:
|
1229 |
+
with pytest.raises(pa.ArrowException):
|
1230 |
+
client.do_get(flight.Ticket(b'')).read_all()
|
1231 |
+
|
1232 |
+
|
1233 |
+
def test_timeout_fires():
|
1234 |
+
"""Make sure timeouts fire on slow requests."""
|
1235 |
+
# Do this in a separate thread so that if it fails, we don't hang
|
1236 |
+
# the entire test process
|
1237 |
+
with SlowFlightServer() as server, \
|
1238 |
+
FlightClient(('localhost', server.port)) as client:
|
1239 |
+
action = flight.Action("", b"")
|
1240 |
+
options = flight.FlightCallOptions(timeout=0.2)
|
1241 |
+
# gRPC error messages change based on version, so don't look
|
1242 |
+
# for a particular error
|
1243 |
+
with pytest.raises(flight.FlightTimedOutError):
|
1244 |
+
list(client.do_action(action, options=options))
|
1245 |
+
|
1246 |
+
|
1247 |
+
def test_timeout_passes():
|
1248 |
+
"""Make sure timeouts do not fire on fast requests."""
|
1249 |
+
with ConstantFlightServer() as server, \
|
1250 |
+
FlightClient(('localhost', server.port)) as client:
|
1251 |
+
options = flight.FlightCallOptions(timeout=5.0)
|
1252 |
+
client.do_get(flight.Ticket(b'ints'), options=options).read_all()
|
1253 |
+
|
1254 |
+
|
1255 |
+
def test_read_options():
|
1256 |
+
"""Make sure ReadOptions can be used."""
|
1257 |
+
expected = pa.Table.from_arrays([pa.array([1, 2, 3, 4])], names=["b"])
|
1258 |
+
with ConstantFlightServer() as server, \
|
1259 |
+
FlightClient(('localhost', server.port)) as client:
|
1260 |
+
options = flight.FlightCallOptions(
|
1261 |
+
read_options=IpcReadOptions(included_fields=[1]))
|
1262 |
+
response1 = client.do_get(flight.Ticket(
|
1263 |
+
b'multi'), options=options).read_all()
|
1264 |
+
response2 = client.do_get(flight.Ticket(b'multi')).read_all()
|
1265 |
+
|
1266 |
+
assert response2.num_columns == 2
|
1267 |
+
assert response1.num_columns == 1
|
1268 |
+
assert response1 == expected
|
1269 |
+
assert response2 == multiple_column_table()
|
1270 |
+
|
1271 |
+
|
1272 |
+
basic_auth_handler = HttpBasicServerAuthHandler(creds={
|
1273 |
+
b"test": b"p4ssw0rd",
|
1274 |
+
})
|
1275 |
+
|
1276 |
+
token_auth_handler = TokenServerAuthHandler(creds={
|
1277 |
+
b"test": b"p4ssw0rd",
|
1278 |
+
})
|
1279 |
+
|
1280 |
+
|
1281 |
+
@pytest.mark.slow
|
1282 |
+
def test_http_basic_unauth():
|
1283 |
+
"""Test that auth fails when not authenticated."""
|
1284 |
+
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server, \
|
1285 |
+
FlightClient(('localhost', server.port)) as client:
|
1286 |
+
action = flight.Action("who-am-i", b"")
|
1287 |
+
with pytest.raises(flight.FlightUnauthenticatedError,
|
1288 |
+
match=".*unauthenticated.*"):
|
1289 |
+
list(client.do_action(action))
|
1290 |
+
|
1291 |
+
|
1292 |
+
@pytest.mark.skipif(os.name == 'nt',
|
1293 |
+
reason="ARROW-10013: gRPC on Windows corrupts peer()")
|
1294 |
+
def test_http_basic_auth():
|
1295 |
+
"""Test a Python implementation of HTTP basic authentication."""
|
1296 |
+
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server, \
|
1297 |
+
FlightClient(('localhost', server.port)) as client:
|
1298 |
+
action = flight.Action("who-am-i", b"")
|
1299 |
+
client.authenticate(HttpBasicClientAuthHandler('test', 'p4ssw0rd'))
|
1300 |
+
results = client.do_action(action)
|
1301 |
+
identity = next(results)
|
1302 |
+
assert identity.body.to_pybytes() == b'test'
|
1303 |
+
peer_address = next(results)
|
1304 |
+
assert peer_address.body.to_pybytes() != b''
|
1305 |
+
|
1306 |
+
|
1307 |
+
def test_http_basic_auth_invalid_password():
|
1308 |
+
"""Test that auth fails with the wrong password."""
|
1309 |
+
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server, \
|
1310 |
+
FlightClient(('localhost', server.port)) as client:
|
1311 |
+
action = flight.Action("who-am-i", b"")
|
1312 |
+
with pytest.raises(flight.FlightUnauthenticatedError,
|
1313 |
+
match=".*wrong password.*"):
|
1314 |
+
client.authenticate(HttpBasicClientAuthHandler('test', 'wrong'))
|
1315 |
+
next(client.do_action(action))
|
1316 |
+
|
1317 |
+
|
1318 |
+
def test_token_auth():
|
1319 |
+
"""Test an auth mechanism that uses a handshake."""
|
1320 |
+
with EchoStreamFlightServer(auth_handler=token_auth_handler) as server, \
|
1321 |
+
FlightClient(('localhost', server.port)) as client:
|
1322 |
+
action = flight.Action("who-am-i", b"")
|
1323 |
+
client.authenticate(TokenClientAuthHandler('test', 'p4ssw0rd'))
|
1324 |
+
identity = next(client.do_action(action))
|
1325 |
+
assert identity.body.to_pybytes() == b'test'
|
1326 |
+
|
1327 |
+
|
1328 |
+
def test_token_auth_invalid():
|
1329 |
+
"""Test an auth mechanism that uses a handshake."""
|
1330 |
+
with EchoStreamFlightServer(auth_handler=token_auth_handler) as server, \
|
1331 |
+
FlightClient(('localhost', server.port)) as client:
|
1332 |
+
with pytest.raises(flight.FlightUnauthenticatedError):
|
1333 |
+
client.authenticate(TokenClientAuthHandler('test', 'wrong'))
|
1334 |
+
|
1335 |
+
|
1336 |
+
header_auth_server_middleware_factory = HeaderAuthServerMiddlewareFactory()
|
1337 |
+
no_op_auth_handler = NoopAuthHandler()
|
1338 |
+
|
1339 |
+
|
1340 |
+
def test_authenticate_basic_token():
|
1341 |
+
"""Test authenticate_basic_token with bearer token and auth headers."""
|
1342 |
+
with HeaderAuthFlightServer(auth_handler=no_op_auth_handler, middleware={
|
1343 |
+
"auth": HeaderAuthServerMiddlewareFactory()
|
1344 |
+
}) as server, \
|
1345 |
+
FlightClient(('localhost', server.port)) as client:
|
1346 |
+
token_pair = client.authenticate_basic_token(b'test', b'password')
|
1347 |
+
assert token_pair[0] == b'authorization'
|
1348 |
+
assert token_pair[1] == b'Bearer token1234'
|
1349 |
+
|
1350 |
+
|
1351 |
+
def test_authenticate_basic_token_invalid_password():
|
1352 |
+
"""Test authenticate_basic_token with an invalid password."""
|
1353 |
+
with HeaderAuthFlightServer(auth_handler=no_op_auth_handler, middleware={
|
1354 |
+
"auth": HeaderAuthServerMiddlewareFactory()
|
1355 |
+
}) as server, \
|
1356 |
+
FlightClient(('localhost', server.port)) as client:
|
1357 |
+
with pytest.raises(flight.FlightUnauthenticatedError):
|
1358 |
+
client.authenticate_basic_token(b'test', b'badpassword')
|
1359 |
+
|
1360 |
+
|
1361 |
+
def test_authenticate_basic_token_and_action():
|
1362 |
+
"""Test authenticate_basic_token and doAction after authentication."""
|
1363 |
+
with HeaderAuthFlightServer(auth_handler=no_op_auth_handler, middleware={
|
1364 |
+
"auth": HeaderAuthServerMiddlewareFactory()
|
1365 |
+
}) as server, \
|
1366 |
+
FlightClient(('localhost', server.port)) as client:
|
1367 |
+
token_pair = client.authenticate_basic_token(b'test', b'password')
|
1368 |
+
assert token_pair[0] == b'authorization'
|
1369 |
+
assert token_pair[1] == b'Bearer token1234'
|
1370 |
+
options = flight.FlightCallOptions(headers=[token_pair])
|
1371 |
+
result = list(client.do_action(
|
1372 |
+
action=flight.Action('test-action', b''), options=options))
|
1373 |
+
assert result[0].body.to_pybytes() == b'token1234'
|
1374 |
+
|
1375 |
+
|
1376 |
+
def test_authenticate_basic_token_with_client_middleware():
|
1377 |
+
"""Test authenticate_basic_token with client middleware
|
1378 |
+
to intercept authorization header returned by the
|
1379 |
+
HTTP header auth enabled server.
|
1380 |
+
"""
|
1381 |
+
with HeaderAuthFlightServer(auth_handler=no_op_auth_handler, middleware={
|
1382 |
+
"auth": HeaderAuthServerMiddlewareFactory()
|
1383 |
+
}) as server:
|
1384 |
+
client_auth_middleware = ClientHeaderAuthMiddlewareFactory()
|
1385 |
+
client = FlightClient(
|
1386 |
+
('localhost', server.port),
|
1387 |
+
middleware=[client_auth_middleware]
|
1388 |
+
)
|
1389 |
+
encoded_credentials = base64.b64encode(b'test:password')
|
1390 |
+
options = flight.FlightCallOptions(headers=[
|
1391 |
+
(b'authorization', b'Basic ' + encoded_credentials)
|
1392 |
+
])
|
1393 |
+
result = list(client.do_action(
|
1394 |
+
action=flight.Action('test-action', b''), options=options))
|
1395 |
+
assert result[0].body.to_pybytes() == b'token1234'
|
1396 |
+
assert client_auth_middleware.call_credential[0] == b'authorization'
|
1397 |
+
assert client_auth_middleware.call_credential[1] == \
|
1398 |
+
b'Bearer ' + b'token1234'
|
1399 |
+
result2 = list(client.do_action(
|
1400 |
+
action=flight.Action('test-action', b''), options=options))
|
1401 |
+
assert result2[0].body.to_pybytes() == b'token1234'
|
1402 |
+
assert client_auth_middleware.call_credential[0] == b'authorization'
|
1403 |
+
assert client_auth_middleware.call_credential[1] == \
|
1404 |
+
b'Bearer ' + b'token1234'
|
1405 |
+
client.close()
|
1406 |
+
|
1407 |
+
|
1408 |
+
def test_arbitrary_headers_in_flight_call_options():
|
1409 |
+
"""Test passing multiple arbitrary headers to the middleware."""
|
1410 |
+
with ArbitraryHeadersFlightServer(
|
1411 |
+
auth_handler=no_op_auth_handler,
|
1412 |
+
middleware={
|
1413 |
+
"auth": HeaderAuthServerMiddlewareFactory(),
|
1414 |
+
"arbitrary-headers": ArbitraryHeadersServerMiddlewareFactory()
|
1415 |
+
}) as server, \
|
1416 |
+
FlightClient(('localhost', server.port)) as client:
|
1417 |
+
token_pair = client.authenticate_basic_token(b'test', b'password')
|
1418 |
+
assert token_pair[0] == b'authorization'
|
1419 |
+
assert token_pair[1] == b'Bearer token1234'
|
1420 |
+
options = flight.FlightCallOptions(headers=[
|
1421 |
+
token_pair,
|
1422 |
+
(b'test-header-1', b'value1'),
|
1423 |
+
(b'test-header-2', b'value2')
|
1424 |
+
])
|
1425 |
+
result = list(client.do_action(flight.Action(
|
1426 |
+
"test-action", b""), options=options))
|
1427 |
+
assert result[0].body.to_pybytes() == b'value1'
|
1428 |
+
assert result[1].body.to_pybytes() == b'value2'
|
1429 |
+
|
1430 |
+
|
1431 |
+
def test_location_invalid():
|
1432 |
+
"""Test constructing invalid URIs."""
|
1433 |
+
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
|
1434 |
+
flight.connect("%")
|
1435 |
+
|
1436 |
+
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
|
1437 |
+
ConstantFlightServer("%")
|
1438 |
+
|
1439 |
+
|
1440 |
+
def test_location_unknown_scheme():
|
1441 |
+
"""Test creating locations for unknown schemes."""
|
1442 |
+
assert flight.Location("s3://foo").uri == b"s3://foo"
|
1443 |
+
assert flight.Location("https://example.com/bar.parquet").uri == \
|
1444 |
+
b"https://example.com/bar.parquet"
|
1445 |
+
|
1446 |
+
|
1447 |
+
@pytest.mark.slow
|
1448 |
+
@pytest.mark.requires_testing_data
|
1449 |
+
def test_tls_fails():
|
1450 |
+
"""Make sure clients cannot connect when cert verification fails."""
|
1451 |
+
certs = example_tls_certs()
|
1452 |
+
|
1453 |
+
# Ensure client doesn't connect when certificate verification
|
1454 |
+
# fails (this is a slow test since gRPC does retry a few times)
|
1455 |
+
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s, \
|
1456 |
+
FlightClient("grpc+tls://localhost:" + str(s.port)) as client:
|
1457 |
+
# gRPC error messages change based on version, so don't look
|
1458 |
+
# for a particular error
|
1459 |
+
with pytest.raises(flight.FlightUnavailableError):
|
1460 |
+
client.do_get(flight.Ticket(b'ints')).read_all()
|
1461 |
+
|
1462 |
+
|
1463 |
+
@pytest.mark.requires_testing_data
|
1464 |
+
def test_tls_do_get():
|
1465 |
+
"""Try a simple do_get call over TLS."""
|
1466 |
+
table = simple_ints_table()
|
1467 |
+
certs = example_tls_certs()
|
1468 |
+
|
1469 |
+
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s, \
|
1470 |
+
FlightClient(('localhost', s.port),
|
1471 |
+
tls_root_certs=certs["root_cert"]) as client:
|
1472 |
+
data = client.do_get(flight.Ticket(b'ints')).read_all()
|
1473 |
+
assert data.equals(table)
|
1474 |
+
|
1475 |
+
|
1476 |
+
@pytest.mark.requires_testing_data
|
1477 |
+
def test_tls_disable_server_verification():
|
1478 |
+
"""Try a simple do_get call over TLS with server verification disabled."""
|
1479 |
+
table = simple_ints_table()
|
1480 |
+
certs = example_tls_certs()
|
1481 |
+
|
1482 |
+
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
|
1483 |
+
try:
|
1484 |
+
client = FlightClient(('localhost', s.port),
|
1485 |
+
disable_server_verification=True)
|
1486 |
+
except NotImplementedError:
|
1487 |
+
pytest.skip('disable_server_verification feature is not available')
|
1488 |
+
data = client.do_get(flight.Ticket(b'ints')).read_all()
|
1489 |
+
assert data.equals(table)
|
1490 |
+
client.close()
|
1491 |
+
|
1492 |
+
|
1493 |
+
@pytest.mark.requires_testing_data
|
1494 |
+
def test_tls_override_hostname():
|
1495 |
+
"""Check that incorrectly overriding the hostname fails."""
|
1496 |
+
certs = example_tls_certs()
|
1497 |
+
|
1498 |
+
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s, \
|
1499 |
+
flight.connect(('localhost', s.port),
|
1500 |
+
tls_root_certs=certs["root_cert"],
|
1501 |
+
override_hostname="fakehostname") as client:
|
1502 |
+
with pytest.raises(flight.FlightUnavailableError):
|
1503 |
+
client.do_get(flight.Ticket(b'ints'))
|
1504 |
+
|
1505 |
+
|
1506 |
+
def test_flight_do_get_metadata():
|
1507 |
+
"""Try a simple do_get call with metadata."""
|
1508 |
+
data = [
|
1509 |
+
pa.array([-10, -5, 0, 5, 10])
|
1510 |
+
]
|
1511 |
+
table = pa.Table.from_arrays(data, names=['a'])
|
1512 |
+
|
1513 |
+
batches = []
|
1514 |
+
with MetadataFlightServer() as server, \
|
1515 |
+
FlightClient(('localhost', server.port)) as client:
|
1516 |
+
reader = client.do_get(flight.Ticket(b''))
|
1517 |
+
idx = 0
|
1518 |
+
while True:
|
1519 |
+
try:
|
1520 |
+
batch, metadata = reader.read_chunk()
|
1521 |
+
batches.append(batch)
|
1522 |
+
server_idx, = struct.unpack('<i', metadata.to_pybytes())
|
1523 |
+
assert idx == server_idx
|
1524 |
+
idx += 1
|
1525 |
+
except StopIteration:
|
1526 |
+
break
|
1527 |
+
data = pa.Table.from_batches(batches)
|
1528 |
+
assert data.equals(table)
|
1529 |
+
|
1530 |
+
|
1531 |
+
def test_flight_do_get_metadata_v4():
|
1532 |
+
"""Try a simple do_get call with V4 metadata version."""
|
1533 |
+
table = pa.Table.from_arrays(
|
1534 |
+
[pa.array([-10, -5, 0, 5, 10])], names=['a'])
|
1535 |
+
options = pa.ipc.IpcWriteOptions(
|
1536 |
+
metadata_version=pa.ipc.MetadataVersion.V4)
|
1537 |
+
with MetadataFlightServer(options=options) as server, \
|
1538 |
+
FlightClient(('localhost', server.port)) as client:
|
1539 |
+
reader = client.do_get(flight.Ticket(b''))
|
1540 |
+
data = reader.read_all()
|
1541 |
+
assert data.equals(table)
|
1542 |
+
|
1543 |
+
|
1544 |
+
def test_flight_do_put_metadata():
|
1545 |
+
"""Try a simple do_put call with metadata."""
|
1546 |
+
data = [
|
1547 |
+
pa.array([-10, -5, 0, 5, 10])
|
1548 |
+
]
|
1549 |
+
table = pa.Table.from_arrays(data, names=['a'])
|
1550 |
+
|
1551 |
+
with MetadataFlightServer() as server, \
|
1552 |
+
FlightClient(('localhost', server.port)) as client:
|
1553 |
+
writer, metadata_reader = client.do_put(
|
1554 |
+
flight.FlightDescriptor.for_path(''),
|
1555 |
+
table.schema)
|
1556 |
+
with writer:
|
1557 |
+
for idx, batch in enumerate(table.to_batches(max_chunksize=1)):
|
1558 |
+
metadata = struct.pack('<i', idx)
|
1559 |
+
writer.write_with_metadata(batch, metadata)
|
1560 |
+
buf = metadata_reader.read()
|
1561 |
+
assert buf is not None
|
1562 |
+
server_idx, = struct.unpack('<i', buf.to_pybytes())
|
1563 |
+
assert idx == server_idx
|
1564 |
+
|
1565 |
+
|
1566 |
+
def test_flight_do_put_limit():
|
1567 |
+
"""Try a simple do_put call with a size limit."""
|
1568 |
+
large_batch = pa.RecordBatch.from_arrays([
|
1569 |
+
pa.array(np.ones(768, dtype=np.int64())),
|
1570 |
+
], names=['a'])
|
1571 |
+
|
1572 |
+
with EchoFlightServer() as server, \
|
1573 |
+
FlightClient(('localhost', server.port),
|
1574 |
+
write_size_limit_bytes=4096) as client:
|
1575 |
+
writer, metadata_reader = client.do_put(
|
1576 |
+
flight.FlightDescriptor.for_path(''),
|
1577 |
+
large_batch.schema)
|
1578 |
+
with writer:
|
1579 |
+
with pytest.raises(flight.FlightWriteSizeExceededError,
|
1580 |
+
match="exceeded soft limit") as excinfo:
|
1581 |
+
writer.write_batch(large_batch)
|
1582 |
+
assert excinfo.value.limit == 4096
|
1583 |
+
smaller_batches = [
|
1584 |
+
large_batch.slice(0, 384),
|
1585 |
+
large_batch.slice(384),
|
1586 |
+
]
|
1587 |
+
for batch in smaller_batches:
|
1588 |
+
writer.write_batch(batch)
|
1589 |
+
expected = pa.Table.from_batches([large_batch])
|
1590 |
+
actual = client.do_get(flight.Ticket(b'')).read_all()
|
1591 |
+
assert expected == actual
|
1592 |
+
|
1593 |
+
|
1594 |
+
@pytest.mark.slow
|
1595 |
+
def test_cancel_do_get():
|
1596 |
+
"""Test canceling a DoGet operation on the client side."""
|
1597 |
+
with ConstantFlightServer() as server, \
|
1598 |
+
FlightClient(('localhost', server.port)) as client:
|
1599 |
+
reader = client.do_get(flight.Ticket(b'ints'))
|
1600 |
+
reader.cancel()
|
1601 |
+
with pytest.raises(flight.FlightCancelledError,
|
1602 |
+
match="(?i).*cancel.*"):
|
1603 |
+
reader.read_chunk()
|
1604 |
+
|
1605 |
+
|
1606 |
+
@pytest.mark.slow
|
1607 |
+
def test_cancel_do_get_threaded():
|
1608 |
+
"""Test canceling a DoGet operation from another thread."""
|
1609 |
+
with SlowFlightServer() as server, \
|
1610 |
+
FlightClient(('localhost', server.port)) as client:
|
1611 |
+
reader = client.do_get(flight.Ticket(b'ints'))
|
1612 |
+
|
1613 |
+
read_first_message = threading.Event()
|
1614 |
+
stream_canceled = threading.Event()
|
1615 |
+
result_lock = threading.Lock()
|
1616 |
+
raised_proper_exception = threading.Event()
|
1617 |
+
|
1618 |
+
def block_read():
|
1619 |
+
reader.read_chunk()
|
1620 |
+
read_first_message.set()
|
1621 |
+
stream_canceled.wait(timeout=5)
|
1622 |
+
try:
|
1623 |
+
reader.read_chunk()
|
1624 |
+
except flight.FlightCancelledError:
|
1625 |
+
with result_lock:
|
1626 |
+
raised_proper_exception.set()
|
1627 |
+
|
1628 |
+
thread = threading.Thread(target=block_read, daemon=True)
|
1629 |
+
thread.start()
|
1630 |
+
read_first_message.wait(timeout=5)
|
1631 |
+
reader.cancel()
|
1632 |
+
stream_canceled.set()
|
1633 |
+
thread.join(timeout=1)
|
1634 |
+
|
1635 |
+
with result_lock:
|
1636 |
+
assert raised_proper_exception.is_set()
|
1637 |
+
|
1638 |
+
|
1639 |
+
def test_streaming_do_action():
|
1640 |
+
with ConvenienceServer() as server, \
|
1641 |
+
FlightClient(('localhost', server.port)) as client:
|
1642 |
+
results = client.do_action(flight.Action('forever', b''))
|
1643 |
+
assert next(results).body == b'foo'
|
1644 |
+
# Implicit cancel when destructed
|
1645 |
+
del results
|
1646 |
+
|
1647 |
+
|
1648 |
+
def test_roundtrip_types():
|
1649 |
+
"""Make sure serializable types round-trip."""
|
1650 |
+
action = flight.Action("action1", b"action1-body")
|
1651 |
+
assert action == flight.Action.deserialize(action.serialize())
|
1652 |
+
|
1653 |
+
ticket = flight.Ticket("foo")
|
1654 |
+
assert ticket == flight.Ticket.deserialize(ticket.serialize())
|
1655 |
+
|
1656 |
+
result = flight.Result(b"result1")
|
1657 |
+
assert result == flight.Result.deserialize(result.serialize())
|
1658 |
+
|
1659 |
+
basic_auth = flight.BasicAuth("username1", "password1")
|
1660 |
+
assert basic_auth == flight.BasicAuth.deserialize(basic_auth.serialize())
|
1661 |
+
|
1662 |
+
schema_result = flight.SchemaResult(pa.schema([('a', pa.int32())]))
|
1663 |
+
assert schema_result == flight.SchemaResult.deserialize(
|
1664 |
+
schema_result.serialize())
|
1665 |
+
|
1666 |
+
desc = flight.FlightDescriptor.for_command("test")
|
1667 |
+
assert desc == flight.FlightDescriptor.deserialize(desc.serialize())
|
1668 |
+
|
1669 |
+
desc = flight.FlightDescriptor.for_path("a", "b", "test.arrow")
|
1670 |
+
assert desc == flight.FlightDescriptor.deserialize(desc.serialize())
|
1671 |
+
|
1672 |
+
info = flight.FlightInfo(
|
1673 |
+
pa.schema([('a', pa.int32())]),
|
1674 |
+
desc,
|
1675 |
+
[
|
1676 |
+
flight.FlightEndpoint(b'', ['grpc://test']),
|
1677 |
+
flight.FlightEndpoint(
|
1678 |
+
b'',
|
1679 |
+
[flight.Location.for_grpc_tcp('localhost', 5005)],
|
1680 |
+
),
|
1681 |
+
],
|
1682 |
+
-1,
|
1683 |
+
-1,
|
1684 |
+
)
|
1685 |
+
info2 = flight.FlightInfo.deserialize(info.serialize())
|
1686 |
+
assert info.schema == info2.schema
|
1687 |
+
assert info.descriptor == info2.descriptor
|
1688 |
+
assert info.total_bytes == info2.total_bytes
|
1689 |
+
assert info.total_records == info2.total_records
|
1690 |
+
assert info.endpoints == info2.endpoints
|
1691 |
+
|
1692 |
+
endpoint = flight.FlightEndpoint(
|
1693 |
+
ticket,
|
1694 |
+
['grpc://test', flight.Location.for_grpc_tcp('localhost', 5005)]
|
1695 |
+
)
|
1696 |
+
assert endpoint == flight.FlightEndpoint.deserialize(endpoint.serialize())
|
1697 |
+
|
1698 |
+
|
1699 |
+
def test_roundtrip_errors():
|
1700 |
+
"""Ensure that Flight errors propagate from server to client."""
|
1701 |
+
with ErrorFlightServer() as server, \
|
1702 |
+
FlightClient(('localhost', server.port)) as client:
|
1703 |
+
|
1704 |
+
for arg, exc_type in ErrorFlightServer.error_cases().items():
|
1705 |
+
with pytest.raises(exc_type, match=".*foo.*"):
|
1706 |
+
list(client.do_action(flight.Action(arg, b"")))
|
1707 |
+
with pytest.raises(flight.FlightInternalError, match=".*foo.*"):
|
1708 |
+
list(client.list_flights())
|
1709 |
+
|
1710 |
+
data = [pa.array([-10, -5, 0, 5, 10])]
|
1711 |
+
table = pa.Table.from_arrays(data, names=['a'])
|
1712 |
+
|
1713 |
+
exceptions = {
|
1714 |
+
'internal': flight.FlightInternalError,
|
1715 |
+
'timedout': flight.FlightTimedOutError,
|
1716 |
+
'cancel': flight.FlightCancelledError,
|
1717 |
+
'unauthenticated': flight.FlightUnauthenticatedError,
|
1718 |
+
'unauthorized': flight.FlightUnauthorizedError,
|
1719 |
+
}
|
1720 |
+
|
1721 |
+
for command, exception in exceptions.items():
|
1722 |
+
|
1723 |
+
with pytest.raises(exception, match=".*foo.*"):
|
1724 |
+
writer, reader = client.do_put(
|
1725 |
+
flight.FlightDescriptor.for_command(command),
|
1726 |
+
table.schema)
|
1727 |
+
writer.write_table(table)
|
1728 |
+
writer.close()
|
1729 |
+
|
1730 |
+
with pytest.raises(exception, match=".*foo.*"):
|
1731 |
+
writer, reader = client.do_put(
|
1732 |
+
flight.FlightDescriptor.for_command(command),
|
1733 |
+
table.schema)
|
1734 |
+
writer.close()
|
1735 |
+
|
1736 |
+
|
1737 |
+
def test_do_put_independent_read_write():
|
1738 |
+
"""Ensure that separate threads can read/write on a DoPut."""
|
1739 |
+
# ARROW-6063: previously this would cause gRPC to abort when the
|
1740 |
+
# writer was closed (due to simultaneous reads), or would hang
|
1741 |
+
# forever.
|
1742 |
+
data = [
|
1743 |
+
pa.array([-10, -5, 0, 5, 10])
|
1744 |
+
]
|
1745 |
+
table = pa.Table.from_arrays(data, names=['a'])
|
1746 |
+
|
1747 |
+
with MetadataFlightServer() as server, \
|
1748 |
+
FlightClient(('localhost', server.port)) as client:
|
1749 |
+
writer, metadata_reader = client.do_put(
|
1750 |
+
flight.FlightDescriptor.for_path(''),
|
1751 |
+
table.schema)
|
1752 |
+
|
1753 |
+
count = [0]
|
1754 |
+
|
1755 |
+
def _reader_thread():
|
1756 |
+
while metadata_reader.read() is not None:
|
1757 |
+
count[0] += 1
|
1758 |
+
|
1759 |
+
thread = threading.Thread(target=_reader_thread)
|
1760 |
+
thread.start()
|
1761 |
+
|
1762 |
+
batches = table.to_batches(max_chunksize=1)
|
1763 |
+
with writer:
|
1764 |
+
for idx, batch in enumerate(batches):
|
1765 |
+
metadata = struct.pack('<i', idx)
|
1766 |
+
writer.write_with_metadata(batch, metadata)
|
1767 |
+
# Causes the server to stop writing and end the call
|
1768 |
+
writer.done_writing()
|
1769 |
+
# Thus reader thread will break out of loop
|
1770 |
+
thread.join()
|
1771 |
+
# writer.close() won't segfault since reader thread has
|
1772 |
+
# stopped
|
1773 |
+
assert count[0] == len(batches)
|
1774 |
+
|
1775 |
+
|
1776 |
+
def test_server_middleware_same_thread():
|
1777 |
+
"""Ensure that server middleware run on the same thread as the RPC."""
|
1778 |
+
with HeaderFlightServer(middleware={
|
1779 |
+
"test": HeaderServerMiddlewareFactory(),
|
1780 |
+
}) as server, \
|
1781 |
+
FlightClient(('localhost', server.port)) as client:
|
1782 |
+
results = list(client.do_action(flight.Action(b"test", b"")))
|
1783 |
+
assert len(results) == 1
|
1784 |
+
value = results[0].body.to_pybytes()
|
1785 |
+
assert b"right value" == value
|
1786 |
+
|
1787 |
+
|
1788 |
+
def test_middleware_reject():
|
1789 |
+
"""Test rejecting an RPC with server middleware."""
|
1790 |
+
with HeaderFlightServer(middleware={
|
1791 |
+
"test": SelectiveAuthServerMiddlewareFactory(),
|
1792 |
+
}) as server, \
|
1793 |
+
FlightClient(('localhost', server.port)) as client:
|
1794 |
+
# The middleware allows this through without auth.
|
1795 |
+
with pytest.raises(pa.ArrowNotImplementedError):
|
1796 |
+
list(client.list_actions())
|
1797 |
+
|
1798 |
+
# But not anything else.
|
1799 |
+
with pytest.raises(flight.FlightUnauthenticatedError):
|
1800 |
+
list(client.do_action(flight.Action(b"", b"")))
|
1801 |
+
|
1802 |
+
client = FlightClient(
|
1803 |
+
('localhost', server.port),
|
1804 |
+
middleware=[SelectiveAuthClientMiddlewareFactory()]
|
1805 |
+
)
|
1806 |
+
response = next(client.do_action(flight.Action(b"", b"")))
|
1807 |
+
assert b"password" == response.body.to_pybytes()
|
1808 |
+
|
1809 |
+
|
1810 |
+
def test_middleware_mapping():
|
1811 |
+
"""Test that middleware records methods correctly."""
|
1812 |
+
server_middleware = RecordingServerMiddlewareFactory()
|
1813 |
+
client_middleware = RecordingClientMiddlewareFactory()
|
1814 |
+
with FlightServerBase(middleware={"test": server_middleware}) as server, \
|
1815 |
+
FlightClient(
|
1816 |
+
('localhost', server.port),
|
1817 |
+
middleware=[client_middleware]
|
1818 |
+
) as client:
|
1819 |
+
|
1820 |
+
descriptor = flight.FlightDescriptor.for_command(b"")
|
1821 |
+
with pytest.raises(NotImplementedError):
|
1822 |
+
list(client.list_flights())
|
1823 |
+
with pytest.raises(NotImplementedError):
|
1824 |
+
client.get_flight_info(descriptor)
|
1825 |
+
with pytest.raises(NotImplementedError):
|
1826 |
+
client.get_schema(descriptor)
|
1827 |
+
with pytest.raises(NotImplementedError):
|
1828 |
+
client.do_get(flight.Ticket(b""))
|
1829 |
+
with pytest.raises(NotImplementedError):
|
1830 |
+
writer, _ = client.do_put(descriptor, pa.schema([]))
|
1831 |
+
writer.close()
|
1832 |
+
with pytest.raises(NotImplementedError):
|
1833 |
+
list(client.do_action(flight.Action(b"", b"")))
|
1834 |
+
with pytest.raises(NotImplementedError):
|
1835 |
+
list(client.list_actions())
|
1836 |
+
with pytest.raises(NotImplementedError):
|
1837 |
+
writer, _ = client.do_exchange(descriptor)
|
1838 |
+
writer.close()
|
1839 |
+
|
1840 |
+
expected = [
|
1841 |
+
flight.FlightMethod.LIST_FLIGHTS,
|
1842 |
+
flight.FlightMethod.GET_FLIGHT_INFO,
|
1843 |
+
flight.FlightMethod.GET_SCHEMA,
|
1844 |
+
flight.FlightMethod.DO_GET,
|
1845 |
+
flight.FlightMethod.DO_PUT,
|
1846 |
+
flight.FlightMethod.DO_ACTION,
|
1847 |
+
flight.FlightMethod.LIST_ACTIONS,
|
1848 |
+
flight.FlightMethod.DO_EXCHANGE,
|
1849 |
+
]
|
1850 |
+
assert server_middleware.methods == expected
|
1851 |
+
assert client_middleware.methods == expected
|
1852 |
+
|
1853 |
+
|
1854 |
+
def test_extra_info():
|
1855 |
+
with ErrorFlightServer() as server, \
|
1856 |
+
FlightClient(('localhost', server.port)) as client:
|
1857 |
+
try:
|
1858 |
+
list(client.do_action(flight.Action("protobuf", b"")))
|
1859 |
+
assert False
|
1860 |
+
except flight.FlightUnauthorizedError as e:
|
1861 |
+
assert e.extra_info is not None
|
1862 |
+
ei = e.extra_info
|
1863 |
+
assert ei == b'this is an error message'
|
1864 |
+
|
1865 |
+
|
1866 |
+
@pytest.mark.requires_testing_data
|
1867 |
+
def test_mtls():
|
1868 |
+
"""Test mutual TLS (mTLS) with gRPC."""
|
1869 |
+
certs = example_tls_certs()
|
1870 |
+
table = simple_ints_table()
|
1871 |
+
|
1872 |
+
with ConstantFlightServer(
|
1873 |
+
tls_certificates=[certs["certificates"][0]],
|
1874 |
+
verify_client=True,
|
1875 |
+
root_certificates=certs["root_cert"]) as s, \
|
1876 |
+
FlightClient(
|
1877 |
+
('localhost', s.port),
|
1878 |
+
tls_root_certs=certs["root_cert"],
|
1879 |
+
cert_chain=certs["certificates"][0].cert,
|
1880 |
+
private_key=certs["certificates"][0].key) as client:
|
1881 |
+
data = client.do_get(flight.Ticket(b'ints')).read_all()
|
1882 |
+
assert data.equals(table)
|
1883 |
+
|
1884 |
+
|
1885 |
+
def test_doexchange_get():
|
1886 |
+
"""Emulate DoGet with DoExchange."""
|
1887 |
+
expected = pa.Table.from_arrays([
|
1888 |
+
pa.array(range(0, 10 * 1024))
|
1889 |
+
], names=["a"])
|
1890 |
+
|
1891 |
+
with ExchangeFlightServer() as server, \
|
1892 |
+
FlightClient(("localhost", server.port)) as client:
|
1893 |
+
descriptor = flight.FlightDescriptor.for_command(b"get")
|
1894 |
+
writer, reader = client.do_exchange(descriptor)
|
1895 |
+
with writer:
|
1896 |
+
table = reader.read_all()
|
1897 |
+
assert expected == table
|
1898 |
+
|
1899 |
+
|
1900 |
+
def test_doexchange_put():
|
1901 |
+
"""Emulate DoPut with DoExchange."""
|
1902 |
+
data = pa.Table.from_arrays([
|
1903 |
+
pa.array(range(0, 10 * 1024))
|
1904 |
+
], names=["a"])
|
1905 |
+
batches = data.to_batches(max_chunksize=512)
|
1906 |
+
|
1907 |
+
with ExchangeFlightServer() as server, \
|
1908 |
+
FlightClient(("localhost", server.port)) as client:
|
1909 |
+
descriptor = flight.FlightDescriptor.for_command(b"put")
|
1910 |
+
writer, reader = client.do_exchange(descriptor)
|
1911 |
+
with writer:
|
1912 |
+
writer.begin(data.schema)
|
1913 |
+
for batch in batches:
|
1914 |
+
writer.write_batch(batch)
|
1915 |
+
writer.done_writing()
|
1916 |
+
chunk = reader.read_chunk()
|
1917 |
+
assert chunk.data is None
|
1918 |
+
expected_buf = str(len(batches)).encode("utf-8")
|
1919 |
+
assert chunk.app_metadata == expected_buf
|
1920 |
+
|
1921 |
+
|
1922 |
+
def test_doexchange_echo():
|
1923 |
+
"""Try a DoExchange echo server."""
|
1924 |
+
data = pa.Table.from_arrays([
|
1925 |
+
pa.array(range(0, 10 * 1024))
|
1926 |
+
], names=["a"])
|
1927 |
+
batches = data.to_batches(max_chunksize=512)
|
1928 |
+
|
1929 |
+
with ExchangeFlightServer() as server, \
|
1930 |
+
FlightClient(("localhost", server.port)) as client:
|
1931 |
+
descriptor = flight.FlightDescriptor.for_command(b"echo")
|
1932 |
+
writer, reader = client.do_exchange(descriptor)
|
1933 |
+
with writer:
|
1934 |
+
# Read/write metadata before starting data.
|
1935 |
+
for i in range(10):
|
1936 |
+
buf = str(i).encode("utf-8")
|
1937 |
+
writer.write_metadata(buf)
|
1938 |
+
chunk = reader.read_chunk()
|
1939 |
+
assert chunk.data is None
|
1940 |
+
assert chunk.app_metadata == buf
|
1941 |
+
|
1942 |
+
# Now write data without metadata.
|
1943 |
+
writer.begin(data.schema)
|
1944 |
+
for batch in batches:
|
1945 |
+
writer.write_batch(batch)
|
1946 |
+
assert reader.schema == data.schema
|
1947 |
+
chunk = reader.read_chunk()
|
1948 |
+
assert chunk.data == batch
|
1949 |
+
assert chunk.app_metadata is None
|
1950 |
+
|
1951 |
+
# And write data with metadata.
|
1952 |
+
for i, batch in enumerate(batches):
|
1953 |
+
buf = str(i).encode("utf-8")
|
1954 |
+
writer.write_with_metadata(batch, buf)
|
1955 |
+
chunk = reader.read_chunk()
|
1956 |
+
assert chunk.data == batch
|
1957 |
+
assert chunk.app_metadata == buf
|
1958 |
+
|
1959 |
+
|
1960 |
+
def test_doexchange_echo_v4():
|
1961 |
+
"""Try a DoExchange echo server using the V4 metadata version."""
|
1962 |
+
data = pa.Table.from_arrays([
|
1963 |
+
pa.array(range(0, 10 * 1024))
|
1964 |
+
], names=["a"])
|
1965 |
+
batches = data.to_batches(max_chunksize=512)
|
1966 |
+
|
1967 |
+
options = pa.ipc.IpcWriteOptions(
|
1968 |
+
metadata_version=pa.ipc.MetadataVersion.V4)
|
1969 |
+
with ExchangeFlightServer(options=options) as server, \
|
1970 |
+
FlightClient(("localhost", server.port)) as client:
|
1971 |
+
descriptor = flight.FlightDescriptor.for_command(b"echo")
|
1972 |
+
writer, reader = client.do_exchange(descriptor)
|
1973 |
+
with writer:
|
1974 |
+
# Now write data without metadata.
|
1975 |
+
writer.begin(data.schema, options=options)
|
1976 |
+
for batch in batches:
|
1977 |
+
writer.write_batch(batch)
|
1978 |
+
assert reader.schema == data.schema
|
1979 |
+
chunk = reader.read_chunk()
|
1980 |
+
assert chunk.data == batch
|
1981 |
+
assert chunk.app_metadata is None
|
1982 |
+
|
1983 |
+
|
1984 |
+
def test_doexchange_transform():
|
1985 |
+
"""Transform a table with a service."""
|
1986 |
+
data = pa.Table.from_arrays([
|
1987 |
+
pa.array(range(0, 1024)),
|
1988 |
+
pa.array(range(1, 1025)),
|
1989 |
+
pa.array(range(2, 1026)),
|
1990 |
+
], names=["a", "b", "c"])
|
1991 |
+
expected = pa.Table.from_arrays([
|
1992 |
+
pa.array(range(3, 1024 * 3 + 3, 3)),
|
1993 |
+
], names=["sum"])
|
1994 |
+
|
1995 |
+
with ExchangeFlightServer() as server, \
|
1996 |
+
FlightClient(("localhost", server.port)) as client:
|
1997 |
+
descriptor = flight.FlightDescriptor.for_command(b"transform")
|
1998 |
+
writer, reader = client.do_exchange(descriptor)
|
1999 |
+
with writer:
|
2000 |
+
writer.begin(data.schema)
|
2001 |
+
writer.write_table(data)
|
2002 |
+
writer.done_writing()
|
2003 |
+
table = reader.read_all()
|
2004 |
+
assert expected == table
|
2005 |
+
|
2006 |
+
|
2007 |
+
def test_middleware_multi_header():
|
2008 |
+
"""Test sending/receiving multiple (binary-valued) headers."""
|
2009 |
+
with MultiHeaderFlightServer(middleware={
|
2010 |
+
"test": MultiHeaderServerMiddlewareFactory(),
|
2011 |
+
}) as server:
|
2012 |
+
headers = MultiHeaderClientMiddlewareFactory()
|
2013 |
+
with FlightClient(
|
2014 |
+
('localhost', server.port),
|
2015 |
+
middleware=[headers]) as client:
|
2016 |
+
response = next(client.do_action(flight.Action(b"", b"")))
|
2017 |
+
# The server echoes the headers it got back to us.
|
2018 |
+
raw_headers = response.body.to_pybytes().decode("utf-8")
|
2019 |
+
client_headers = ast.literal_eval(raw_headers)
|
2020 |
+
# Don't directly compare; gRPC may add headers like User-Agent.
|
2021 |
+
for header, values in MultiHeaderClientMiddleware.EXPECTED.items():
|
2022 |
+
header = header.lower()
|
2023 |
+
if isinstance(header, bytes):
|
2024 |
+
header = header.decode("ascii")
|
2025 |
+
assert client_headers.get(header) == values
|
2026 |
+
assert headers.last_headers.get(header) == values
|
2027 |
+
|
2028 |
+
|
2029 |
+
@pytest.mark.requires_testing_data
|
2030 |
+
def test_generic_options():
|
2031 |
+
"""Test setting generic client options."""
|
2032 |
+
certs = example_tls_certs()
|
2033 |
+
|
2034 |
+
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
|
2035 |
+
# Try setting a string argument that will make requests fail
|
2036 |
+
options = [("grpc.ssl_target_name_override", "fakehostname")]
|
2037 |
+
client = flight.connect(('localhost', s.port),
|
2038 |
+
tls_root_certs=certs["root_cert"],
|
2039 |
+
generic_options=options)
|
2040 |
+
with pytest.raises(flight.FlightUnavailableError):
|
2041 |
+
client.do_get(flight.Ticket(b'ints'))
|
2042 |
+
client.close()
|
2043 |
+
# Try setting an int argument that will make requests fail
|
2044 |
+
options = [("grpc.max_receive_message_length", 32)]
|
2045 |
+
client = flight.connect(('localhost', s.port),
|
2046 |
+
tls_root_certs=certs["root_cert"],
|
2047 |
+
generic_options=options)
|
2048 |
+
with pytest.raises((pa.ArrowInvalid, flight.FlightCancelledError)):
|
2049 |
+
client.do_get(flight.Ticket(b'ints'))
|
2050 |
+
client.close()
|
2051 |
+
|
2052 |
+
|
2053 |
+
class CancelFlightServer(FlightServerBase):
|
2054 |
+
"""A server for testing StopToken."""
|
2055 |
+
|
2056 |
+
def do_get(self, context, ticket):
|
2057 |
+
schema = pa.schema([])
|
2058 |
+
rb = pa.RecordBatch.from_arrays([], schema=schema)
|
2059 |
+
return flight.GeneratorStream(schema, itertools.repeat(rb))
|
2060 |
+
|
2061 |
+
def do_exchange(self, context, descriptor, reader, writer):
|
2062 |
+
schema = pa.schema([])
|
2063 |
+
rb = pa.RecordBatch.from_arrays([], schema=schema)
|
2064 |
+
writer.begin(schema)
|
2065 |
+
while not context.is_cancelled():
|
2066 |
+
writer.write_batch(rb)
|
2067 |
+
time.sleep(0.5)
|
2068 |
+
|
2069 |
+
|
2070 |
+
def test_interrupt():
|
2071 |
+
if threading.current_thread().ident != threading.main_thread().ident:
|
2072 |
+
pytest.skip("test only works from main Python thread")
|
2073 |
+
# Skips test if not available
|
2074 |
+
raise_signal = util.get_raise_signal()
|
2075 |
+
|
2076 |
+
def signal_from_thread():
|
2077 |
+
time.sleep(0.5)
|
2078 |
+
raise_signal(signal.SIGINT)
|
2079 |
+
|
2080 |
+
exc_types = (KeyboardInterrupt, pa.ArrowCancelled)
|
2081 |
+
|
2082 |
+
def test(read_all):
|
2083 |
+
try:
|
2084 |
+
try:
|
2085 |
+
t = threading.Thread(target=signal_from_thread)
|
2086 |
+
with pytest.raises(exc_types) as exc_info:
|
2087 |
+
t.start()
|
2088 |
+
read_all()
|
2089 |
+
finally:
|
2090 |
+
t.join()
|
2091 |
+
except KeyboardInterrupt:
|
2092 |
+
# In case KeyboardInterrupt didn't interrupt read_all
|
2093 |
+
# above, at least prevent it from stopping the test suite
|
2094 |
+
pytest.fail("KeyboardInterrupt didn't interrupt Flight read_all")
|
2095 |
+
# __context__ is sometimes None
|
2096 |
+
e = exc_info.value
|
2097 |
+
assert isinstance(e, (pa.ArrowCancelled, KeyboardInterrupt)) or \
|
2098 |
+
isinstance(e.__context__, (pa.ArrowCancelled, KeyboardInterrupt))
|
2099 |
+
|
2100 |
+
with CancelFlightServer() as server, \
|
2101 |
+
FlightClient(("localhost", server.port)) as client:
|
2102 |
+
|
2103 |
+
reader = client.do_get(flight.Ticket(b""))
|
2104 |
+
test(reader.read_all)
|
2105 |
+
|
2106 |
+
descriptor = flight.FlightDescriptor.for_command(b"echo")
|
2107 |
+
writer, reader = client.do_exchange(descriptor)
|
2108 |
+
test(reader.read_all)
|
2109 |
+
try:
|
2110 |
+
writer.close()
|
2111 |
+
except (KeyboardInterrupt, flight.FlightCancelledError):
|
2112 |
+
# Silence the Cancelled/Interrupt exception
|
2113 |
+
pass
|
2114 |
+
|
2115 |
+
|
2116 |
+
def test_never_sends_data():
|
2117 |
+
# Regression test for ARROW-12779
|
2118 |
+
match = "application server implementation error"
|
2119 |
+
with NeverSendsDataFlightServer() as server, \
|
2120 |
+
flight.connect(('localhost', server.port)) as client:
|
2121 |
+
with pytest.raises(flight.FlightServerError, match=match):
|
2122 |
+
client.do_get(flight.Ticket(b'')).read_all()
|
2123 |
+
|
2124 |
+
# Check that the server handler will ignore empty tables
|
2125 |
+
# up to a certain extent
|
2126 |
+
table = client.do_get(flight.Ticket(b'yield_data')).read_all()
|
2127 |
+
assert table.num_rows == 5
|
2128 |
+
|
2129 |
+
|
2130 |
+
@pytest.mark.large_memory
|
2131 |
+
@pytest.mark.slow
|
2132 |
+
def test_large_descriptor():
|
2133 |
+
# Regression test for ARROW-13253. Placed here with appropriate marks
|
2134 |
+
# since some CI pipelines can't run the C++ equivalent
|
2135 |
+
large_descriptor = flight.FlightDescriptor.for_command(
|
2136 |
+
b' ' * (2 ** 31 + 1))
|
2137 |
+
with FlightServerBase() as server, \
|
2138 |
+
flight.connect(('localhost', server.port)) as client:
|
2139 |
+
with pytest.raises(OSError,
|
2140 |
+
match="Failed to serialize Flight descriptor"):
|
2141 |
+
writer, _ = client.do_put(large_descriptor, pa.schema([]))
|
2142 |
+
writer.close()
|
2143 |
+
with pytest.raises(pa.ArrowException,
|
2144 |
+
match="Failed to serialize Flight descriptor"):
|
2145 |
+
client.do_exchange(large_descriptor)
|
2146 |
+
|
2147 |
+
|
2148 |
+
@pytest.mark.large_memory
|
2149 |
+
@pytest.mark.slow
|
2150 |
+
def test_large_metadata_client():
|
2151 |
+
# Regression test for ARROW-13253
|
2152 |
+
descriptor = flight.FlightDescriptor.for_command(b'')
|
2153 |
+
metadata = b' ' * (2 ** 31 + 1)
|
2154 |
+
with EchoFlightServer() as server, \
|
2155 |
+
flight.connect(('localhost', server.port)) as client:
|
2156 |
+
with pytest.raises(pa.ArrowCapacityError,
|
2157 |
+
match="app_metadata size overflow"):
|
2158 |
+
writer, _ = client.do_put(descriptor, pa.schema([]))
|
2159 |
+
with writer:
|
2160 |
+
writer.write_metadata(metadata)
|
2161 |
+
writer.close()
|
2162 |
+
with pytest.raises(pa.ArrowCapacityError,
|
2163 |
+
match="app_metadata size overflow"):
|
2164 |
+
writer, reader = client.do_exchange(descriptor)
|
2165 |
+
with writer:
|
2166 |
+
writer.write_metadata(metadata)
|
2167 |
+
|
2168 |
+
del metadata
|
2169 |
+
with LargeMetadataFlightServer() as server, \
|
2170 |
+
flight.connect(('localhost', server.port)) as client:
|
2171 |
+
with pytest.raises(flight.FlightServerError,
|
2172 |
+
match="app_metadata size overflow"):
|
2173 |
+
reader = client.do_get(flight.Ticket(b''))
|
2174 |
+
reader.read_all()
|
2175 |
+
with pytest.raises(pa.ArrowException,
|
2176 |
+
match="app_metadata size overflow"):
|
2177 |
+
writer, reader = client.do_exchange(descriptor)
|
2178 |
+
with writer:
|
2179 |
+
reader.read_all()
|
2180 |
+
|
2181 |
+
|
2182 |
+
class ActionNoneFlightServer(EchoFlightServer):
|
2183 |
+
"""A server that implements a side effect to a non iterable action."""
|
2184 |
+
VALUES = []
|
2185 |
+
|
2186 |
+
def do_action(self, context, action):
|
2187 |
+
if action.type == "get_value":
|
2188 |
+
return [json.dumps(self.VALUES).encode('utf-8')]
|
2189 |
+
elif action.type == "append":
|
2190 |
+
self.VALUES.append(True)
|
2191 |
+
return None
|
2192 |
+
raise NotImplementedError
|
2193 |
+
|
2194 |
+
|
2195 |
+
def test_none_action_side_effect():
|
2196 |
+
"""Ensure that actions are executed even when we don't consume iterator.
|
2197 |
+
|
2198 |
+
See https://issues.apache.org/jira/browse/ARROW-14255
|
2199 |
+
"""
|
2200 |
+
|
2201 |
+
with ActionNoneFlightServer() as server, \
|
2202 |
+
FlightClient(('localhost', server.port)) as client:
|
2203 |
+
client.do_action(flight.Action("append", b""))
|
2204 |
+
r = client.do_action(flight.Action("get_value", b""))
|
2205 |
+
assert json.loads(next(r).body.to_pybytes()) == [True]
|
2206 |
+
|
2207 |
+
|
2208 |
+
@pytest.mark.slow # Takes a while for gRPC to "realize" writes fail
|
2209 |
+
def test_write_error_propagation():
|
2210 |
+
"""
|
2211 |
+
Ensure that exceptions during writing preserve error context.
|
2212 |
+
|
2213 |
+
See https://issues.apache.org/jira/browse/ARROW-16592.
|
2214 |
+
"""
|
2215 |
+
expected_message = "foo"
|
2216 |
+
expected_info = b"bar"
|
2217 |
+
exc = flight.FlightCancelledError(
|
2218 |
+
expected_message, extra_info=expected_info)
|
2219 |
+
descriptor = flight.FlightDescriptor.for_command(b"")
|
2220 |
+
schema = pa.schema([("int64", pa.int64())])
|
2221 |
+
|
2222 |
+
class FailServer(flight.FlightServerBase):
|
2223 |
+
def do_put(self, context, descriptor, reader, writer):
|
2224 |
+
raise exc
|
2225 |
+
|
2226 |
+
def do_exchange(self, context, descriptor, reader, writer):
|
2227 |
+
raise exc
|
2228 |
+
|
2229 |
+
with FailServer() as server, \
|
2230 |
+
FlightClient(('localhost', server.port)) as client:
|
2231 |
+
# DoPut
|
2232 |
+
writer, reader = client.do_put(descriptor, schema)
|
2233 |
+
|
2234 |
+
# Set a concurrent reader - ensure this doesn't block the
|
2235 |
+
# writer side from calling Close()
|
2236 |
+
def _reader():
|
2237 |
+
try:
|
2238 |
+
while True:
|
2239 |
+
reader.read()
|
2240 |
+
except flight.FlightError:
|
2241 |
+
return
|
2242 |
+
|
2243 |
+
thread = threading.Thread(target=_reader, daemon=True)
|
2244 |
+
thread.start()
|
2245 |
+
|
2246 |
+
with pytest.raises(flight.FlightCancelledError) as exc_info:
|
2247 |
+
while True:
|
2248 |
+
writer.write_batch(pa.record_batch([[1]], schema=schema))
|
2249 |
+
assert exc_info.value.extra_info == expected_info
|
2250 |
+
|
2251 |
+
with pytest.raises(flight.FlightCancelledError) as exc_info:
|
2252 |
+
writer.close()
|
2253 |
+
assert exc_info.value.extra_info == expected_info
|
2254 |
+
thread.join()
|
2255 |
+
|
2256 |
+
# DoExchange
|
2257 |
+
writer, reader = client.do_exchange(descriptor)
|
2258 |
+
|
2259 |
+
def _reader():
|
2260 |
+
try:
|
2261 |
+
while True:
|
2262 |
+
reader.read_chunk()
|
2263 |
+
except flight.FlightError:
|
2264 |
+
return
|
2265 |
+
|
2266 |
+
thread = threading.Thread(target=_reader, daemon=True)
|
2267 |
+
thread.start()
|
2268 |
+
with pytest.raises(flight.FlightCancelledError) as exc_info:
|
2269 |
+
while True:
|
2270 |
+
writer.write_metadata(b" ")
|
2271 |
+
assert exc_info.value.extra_info == expected_info
|
2272 |
+
|
2273 |
+
with pytest.raises(flight.FlightCancelledError) as exc_info:
|
2274 |
+
writer.close()
|
2275 |
+
assert exc_info.value.extra_info == expected_info
|
2276 |
+
thread.join()
|
2277 |
+
|
2278 |
+
|
2279 |
+
def test_interpreter_shutdown():
|
2280 |
+
"""
|
2281 |
+
Ensure that the gRPC server is stopped at interpreter shutdown.
|
2282 |
+
|
2283 |
+
See https://issues.apache.org/jira/browse/ARROW-16597.
|
2284 |
+
"""
|
2285 |
+
util.invoke_script("arrow_16597.py")
|
2286 |
+
|
2287 |
+
|
2288 |
+
class TracingFlightServer(FlightServerBase):
|
2289 |
+
"""A server that echoes back trace context values."""
|
2290 |
+
|
2291 |
+
def do_action(self, context, action):
|
2292 |
+
trace_context = context.get_middleware("tracing").trace_context
|
2293 |
+
# Don't turn this method into a generator since then
|
2294 |
+
# trace_context will be evaluated after we've exited the scope
|
2295 |
+
# of the OTel span (and so the value we want won't be present)
|
2296 |
+
return ((f"{key}: {value}").encode("utf-8")
|
2297 |
+
for (key, value) in trace_context.items())
|
2298 |
+
|
2299 |
+
|
2300 |
+
def test_tracing():
|
2301 |
+
with TracingFlightServer(middleware={
|
2302 |
+
"tracing": flight.TracingServerMiddlewareFactory(),
|
2303 |
+
}) as server, \
|
2304 |
+
FlightClient(('localhost', server.port)) as client:
|
2305 |
+
# We can't tell if Arrow was built with OpenTelemetry support,
|
2306 |
+
# so we can't count on any particular values being there; we
|
2307 |
+
# can only ensure things don't blow up either way.
|
2308 |
+
options = flight.FlightCallOptions(headers=[
|
2309 |
+
# Pretend we have an OTel implementation
|
2310 |
+
(b"traceparent", b"00-000ff00f00f0ff000f0f00ff0f00fff0-"
|
2311 |
+
b"000f0000f0f00000-00"),
|
2312 |
+
(b"tracestate", b""),
|
2313 |
+
])
|
2314 |
+
for value in client.do_action((b"", b""), options=options):
|
2315 |
+
pass
|
2316 |
+
|
2317 |
+
|
2318 |
+
def test_do_put_does_not_crash_when_schema_is_none():
|
2319 |
+
client = FlightClient('grpc+tls://localhost:9643',
|
2320 |
+
disable_server_verification=True)
|
2321 |
+
msg = ("Argument 'schema' has incorrect type "
|
2322 |
+
r"\(expected pyarrow.lib.Schema, got NoneType\)")
|
2323 |
+
with pytest.raises(TypeError, match=msg):
|
2324 |
+
client.do_put(flight.FlightDescriptor.for_command('foo'),
|
2325 |
+
schema=None)
|
2326 |
+
|
2327 |
+
|
2328 |
+
def test_headers_trailers():
|
2329 |
+
"""Ensure that server-sent headers/trailers make it through."""
|
2330 |
+
|
2331 |
+
class HeadersTrailersFlightServer(FlightServerBase):
|
2332 |
+
def get_flight_info(self, context, descriptor):
|
2333 |
+
context.add_header("x-header", "header-value")
|
2334 |
+
context.add_header("x-header-bin", "header\x01value")
|
2335 |
+
context.add_trailer("x-trailer", "trailer-value")
|
2336 |
+
context.add_trailer("x-trailer-bin", "trailer\x01value")
|
2337 |
+
return flight.FlightInfo(
|
2338 |
+
pa.schema([]),
|
2339 |
+
descriptor,
|
2340 |
+
[],
|
2341 |
+
-1, -1
|
2342 |
+
)
|
2343 |
+
|
2344 |
+
class HeadersTrailersMiddlewareFactory(ClientMiddlewareFactory):
|
2345 |
+
def __init__(self):
|
2346 |
+
self.headers = []
|
2347 |
+
|
2348 |
+
def start_call(self, info):
|
2349 |
+
return HeadersTrailersMiddleware(self)
|
2350 |
+
|
2351 |
+
class HeadersTrailersMiddleware(ClientMiddleware):
|
2352 |
+
def __init__(self, factory):
|
2353 |
+
self.factory = factory
|
2354 |
+
|
2355 |
+
def received_headers(self, headers):
|
2356 |
+
for key, values in headers.items():
|
2357 |
+
for value in values:
|
2358 |
+
self.factory.headers.append((key, value))
|
2359 |
+
|
2360 |
+
factory = HeadersTrailersMiddlewareFactory()
|
2361 |
+
with HeadersTrailersFlightServer() as server, \
|
2362 |
+
FlightClient(("localhost", server.port), middleware=[factory]) as client:
|
2363 |
+
client.get_flight_info(flight.FlightDescriptor.for_path(""))
|
2364 |
+
assert ("x-header", "header-value") in factory.headers
|
2365 |
+
assert ("x-header-bin", b"header\x01value") in factory.headers
|
2366 |
+
assert ("x-trailer", "trailer-value") in factory.headers
|
2367 |
+
assert ("x-trailer-bin", b"trailer\x01value") in factory.headers
|
env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_flight_async.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import asyncio
|
19 |
+
|
20 |
+
import pytest
|
21 |
+
|
22 |
+
import pyarrow
|
23 |
+
|
24 |
+
flight = pytest.importorskip("pyarrow.flight")
|
25 |
+
pytestmark = pytest.mark.flight
|
26 |
+
|
27 |
+
|
28 |
+
class ExampleServer(flight.FlightServerBase):
|
29 |
+
simple_info = flight.FlightInfo(
|
30 |
+
pyarrow.schema([("a", "int32")]),
|
31 |
+
flight.FlightDescriptor.for_command(b"simple"),
|
32 |
+
[],
|
33 |
+
-1,
|
34 |
+
-1,
|
35 |
+
)
|
36 |
+
|
37 |
+
def get_flight_info(self, context, descriptor):
|
38 |
+
if descriptor.command == b"simple":
|
39 |
+
return self.simple_info
|
40 |
+
elif descriptor.command == b"unknown":
|
41 |
+
raise NotImplementedError("Unknown command")
|
42 |
+
|
43 |
+
raise NotImplementedError("Unknown descriptor")
|
44 |
+
|
45 |
+
|
46 |
+
def async_or_skip(client):
|
47 |
+
if not client.supports_async:
|
48 |
+
# Use async error message as skip message
|
49 |
+
with pytest.raises(NotImplementedError) as e:
|
50 |
+
client.as_async()
|
51 |
+
pytest.skip(str(e.value))
|
52 |
+
|
53 |
+
|
54 |
+
@pytest.fixture(scope="module")
|
55 |
+
def flight_client():
|
56 |
+
with ExampleServer() as server:
|
57 |
+
with flight.connect(f"grpc://localhost:{server.port}") as client:
|
58 |
+
yield client
|
59 |
+
|
60 |
+
|
61 |
+
@pytest.fixture(scope="module")
|
62 |
+
def async_client(flight_client):
|
63 |
+
async_or_skip(flight_client)
|
64 |
+
yield flight_client.as_async()
|
65 |
+
|
66 |
+
|
67 |
+
def test_async_support_property(flight_client):
|
68 |
+
assert isinstance(flight_client.supports_async, bool)
|
69 |
+
if flight_client.supports_async:
|
70 |
+
flight_client.as_async()
|
71 |
+
else:
|
72 |
+
with pytest.raises(NotImplementedError):
|
73 |
+
flight_client.as_async()
|
74 |
+
|
75 |
+
|
76 |
+
def test_get_flight_info(async_client):
|
77 |
+
async def _test():
|
78 |
+
descriptor = flight.FlightDescriptor.for_command(b"simple")
|
79 |
+
info = await async_client.get_flight_info(descriptor)
|
80 |
+
assert info == ExampleServer.simple_info
|
81 |
+
|
82 |
+
asyncio.run(_test())
|
83 |
+
|
84 |
+
|
85 |
+
def test_get_flight_info_error(async_client):
|
86 |
+
async def _test():
|
87 |
+
descriptor = flight.FlightDescriptor.for_command(b"unknown")
|
88 |
+
with pytest.raises(NotImplementedError) as excinfo:
|
89 |
+
await async_client.get_flight_info(descriptor)
|
90 |
+
|
91 |
+
assert "Unknown command" in repr(excinfo.value)
|
92 |
+
|
93 |
+
asyncio.run(_test())
|