Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/_compute_docstrings.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/_generated_version.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/jvm.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/orc.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/pandas_threaded_import.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_acero.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_convert_builtin.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cpp_internals.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cuda_numba_interop.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cython.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_dataset.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_dlpack.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_memory.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_pandas.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_scalars.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_sparse_tensor.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_strategies.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_substrait.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_table.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_util.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/arrow_16597.py +37 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/arrow_39313.py +47 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/arrow_7980.py +30 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/bound_function_visit_strings.pyx +67 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/conftest.py +312 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.emptyFile.orc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.test1.orc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/data/orc/decimal.orc +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/extensions.pyx +94 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/pandas_examples.py +172 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/pandas_threaded_import.py +44 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/pyarrow_cython_example.pyx +61 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/read_record_batch.py +25 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/strategies.py +457 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_acero.py +413 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_adhoc_memory_leak.py +43 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_builder.py +86 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_cffi.py +707 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_compute.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_convert_builtin.py +2536 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_cpp_internals.py +50 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_csv.py +2018 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_cuda.py +794 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_cuda_numba_interop.py +235 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_cython.py +200 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_dataset.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_dataset_encryption.py +217 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_deprecations.py +23 -0
- llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_dlpack.py +142 -0
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/_compute_docstrings.cpython-310.pyc
ADDED
Binary file (1.08 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/_generated_version.cpython-310.pyc
ADDED
Binary file (514 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/jvm.cpython-310.pyc
ADDED
Binary file (8.08 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/__pycache__/orc.cpython-310.pyc
ADDED
Binary file (11.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/pandas_threaded_import.cpython-310.pyc
ADDED
Binary file (862 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_acero.cpython-310.pyc
ADDED
Binary file (9.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_convert_builtin.cpython-310.pyc
ADDED
Binary file (71.4 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cpp_internals.cpython-310.pyc
ADDED
Binary file (1.12 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cuda_numba_interop.cpython-310.pyc
ADDED
Binary file (6.08 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cython.cpython-310.pyc
ADDED
Binary file (5.09 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_dataset.cpython-310.pyc
ADDED
Binary file (141 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_dlpack.cpython-310.pyc
ADDED
Binary file (3.71 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_memory.cpython-310.pyc
ADDED
Binary file (7.51 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_pandas.cpython-310.pyc
ADDED
Binary file (146 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_scalars.cpython-310.pyc
ADDED
Binary file (23.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_sparse_tensor.cpython-310.pyc
ADDED
Binary file (11 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_strategies.cpython-310.pyc
ADDED
Binary file (1.67 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_substrait.cpython-310.pyc
ADDED
Binary file (28.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_table.cpython-310.pyc
ADDED
Binary file (77 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_util.cpython-310.pyc
ADDED
Binary file (4.43 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/arrow_16597.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# This file is called from a test in test_flight.py.
|
19 |
+
import time
|
20 |
+
|
21 |
+
import pyarrow as pa
|
22 |
+
import pyarrow.flight as flight
|
23 |
+
|
24 |
+
|
25 |
+
class Server(flight.FlightServerBase):
|
26 |
+
def do_put(self, context, descriptor, reader, writer):
|
27 |
+
time.sleep(1)
|
28 |
+
raise flight.FlightCancelledError("")
|
29 |
+
|
30 |
+
|
31 |
+
if __name__ == "__main__":
|
32 |
+
server = Server("grpc://localhost:0")
|
33 |
+
client = flight.connect(f"grpc://localhost:{server.port}")
|
34 |
+
schema = pa.schema([])
|
35 |
+
writer, reader = client.do_put(
|
36 |
+
flight.FlightDescriptor.for_command(b""), schema)
|
37 |
+
writer.done_writing()
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/arrow_39313.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# This file is called from a test in test_pandas.py.
|
19 |
+
|
20 |
+
from threading import Thread
|
21 |
+
|
22 |
+
import pandas as pd
|
23 |
+
from pyarrow.pandas_compat import _pandas_api
|
24 |
+
|
25 |
+
if __name__ == "__main__":
|
26 |
+
wait = True
|
27 |
+
num_threads = 10
|
28 |
+
df = pd.DataFrame()
|
29 |
+
results = []
|
30 |
+
|
31 |
+
def rc():
|
32 |
+
while wait:
|
33 |
+
pass
|
34 |
+
results.append(_pandas_api.is_data_frame(df))
|
35 |
+
|
36 |
+
threads = [Thread(target=rc) for _ in range(num_threads)]
|
37 |
+
|
38 |
+
for t in threads:
|
39 |
+
t.start()
|
40 |
+
|
41 |
+
wait = False
|
42 |
+
|
43 |
+
for t in threads:
|
44 |
+
t.join()
|
45 |
+
|
46 |
+
assert len(results) == num_threads
|
47 |
+
assert all(results), "`is_data_frame` returned False when given a DataFrame"
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/arrow_7980.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# This file is called from a test in test_schema.py.
|
19 |
+
|
20 |
+
import pyarrow as pa
|
21 |
+
|
22 |
+
|
23 |
+
# the types where to_pandas_dtype returns a non-numpy dtype
|
24 |
+
cases = [
|
25 |
+
(pa.timestamp('ns', tz='UTC'), "datetime64[ns, UTC]"),
|
26 |
+
]
|
27 |
+
|
28 |
+
|
29 |
+
for arrow_type, pandas_type in cases:
|
30 |
+
assert str(arrow_type.to_pandas_dtype()) == pandas_type
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/bound_function_visit_strings.pyx
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language=c++
|
19 |
+
# cython: language_level = 3
|
20 |
+
|
21 |
+
from pyarrow.lib cimport *
|
22 |
+
from pyarrow.lib import frombytes, tobytes
|
23 |
+
|
24 |
+
# basic test to roundtrip through a BoundFunction
|
25 |
+
|
26 |
+
ctypedef CStatus visit_string_cb(const c_string&)
|
27 |
+
|
28 |
+
cdef extern from * namespace "arrow::py" nogil:
|
29 |
+
"""
|
30 |
+
#include <functional>
|
31 |
+
#include <string>
|
32 |
+
#include <vector>
|
33 |
+
|
34 |
+
#include "arrow/status.h"
|
35 |
+
|
36 |
+
namespace arrow {
|
37 |
+
namespace py {
|
38 |
+
|
39 |
+
Status VisitStrings(const std::vector<std::string>& strs,
|
40 |
+
std::function<Status(const std::string&)> cb) {
|
41 |
+
for (const std::string& str : strs) {
|
42 |
+
RETURN_NOT_OK(cb(str));
|
43 |
+
}
|
44 |
+
return Status::OK();
|
45 |
+
}
|
46 |
+
|
47 |
+
} // namespace py
|
48 |
+
} // namespace arrow
|
49 |
+
"""
|
50 |
+
cdef CStatus CVisitStrings" arrow::py::VisitStrings"(
|
51 |
+
vector[c_string], function[visit_string_cb])
|
52 |
+
|
53 |
+
|
54 |
+
cdef void _visit_strings_impl(py_cb, const c_string& s) except *:
|
55 |
+
py_cb(frombytes(s))
|
56 |
+
|
57 |
+
|
58 |
+
def _visit_strings(strings, cb):
|
59 |
+
cdef:
|
60 |
+
function[visit_string_cb] c_cb
|
61 |
+
vector[c_string] c_strings
|
62 |
+
|
63 |
+
c_cb = BindFunction[visit_string_cb](&_visit_strings_impl, cb)
|
64 |
+
for s in strings:
|
65 |
+
c_strings.push_back(tobytes(s))
|
66 |
+
|
67 |
+
check_status(CVisitStrings(c_strings, c_cb))
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/conftest.py
ADDED
@@ -0,0 +1,312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import functools
|
19 |
+
import os
|
20 |
+
import pathlib
|
21 |
+
import subprocess
|
22 |
+
import sys
|
23 |
+
import time
|
24 |
+
import urllib.request
|
25 |
+
|
26 |
+
import pytest
|
27 |
+
import hypothesis as h
|
28 |
+
from ..conftest import groups, defaults
|
29 |
+
|
30 |
+
from pyarrow import set_timezone_db_path
|
31 |
+
from pyarrow.util import find_free_port
|
32 |
+
|
33 |
+
|
34 |
+
# setup hypothesis profiles
|
35 |
+
h.settings.register_profile('ci', max_examples=1000)
|
36 |
+
h.settings.register_profile('dev', max_examples=50)
|
37 |
+
h.settings.register_profile('debug', max_examples=10,
|
38 |
+
verbosity=h.Verbosity.verbose)
|
39 |
+
|
40 |
+
# load default hypothesis profile, either set HYPOTHESIS_PROFILE environment
|
41 |
+
# variable or pass --hypothesis-profile option to pytest, to see the generated
|
42 |
+
# examples try:
|
43 |
+
# pytest pyarrow -sv --enable-hypothesis --hypothesis-profile=debug
|
44 |
+
h.settings.load_profile(os.environ.get('HYPOTHESIS_PROFILE', 'dev'))
|
45 |
+
|
46 |
+
# Set this at the beginning before the AWS SDK was loaded to avoid reading in
|
47 |
+
# user configuration values.
|
48 |
+
os.environ['AWS_CONFIG_FILE'] = "/dev/null"
|
49 |
+
|
50 |
+
|
51 |
+
if sys.platform == 'win32':
|
52 |
+
tzdata_set_path = os.environ.get('PYARROW_TZDATA_PATH', None)
|
53 |
+
if tzdata_set_path:
|
54 |
+
set_timezone_db_path(tzdata_set_path)
|
55 |
+
|
56 |
+
|
57 |
+
def pytest_addoption(parser):
|
58 |
+
# Create options to selectively enable test groups
|
59 |
+
def bool_env(name, default=None):
|
60 |
+
value = os.environ.get(name.upper())
|
61 |
+
if not value: # missing or empty
|
62 |
+
return default
|
63 |
+
value = value.lower()
|
64 |
+
if value in {'1', 'true', 'on', 'yes', 'y'}:
|
65 |
+
return True
|
66 |
+
elif value in {'0', 'false', 'off', 'no', 'n'}:
|
67 |
+
return False
|
68 |
+
else:
|
69 |
+
raise ValueError('{}={} is not parsable as boolean'
|
70 |
+
.format(name.upper(), value))
|
71 |
+
|
72 |
+
for group in groups:
|
73 |
+
default = bool_env('PYARROW_TEST_{}'.format(group), defaults[group])
|
74 |
+
parser.addoption('--enable-{}'.format(group),
|
75 |
+
action='store_true', default=default,
|
76 |
+
help=('Enable the {} test group'.format(group)))
|
77 |
+
parser.addoption('--disable-{}'.format(group),
|
78 |
+
action='store_true', default=False,
|
79 |
+
help=('Disable the {} test group'.format(group)))
|
80 |
+
|
81 |
+
|
82 |
+
class PyArrowConfig:
|
83 |
+
def __init__(self):
|
84 |
+
self.is_enabled = {}
|
85 |
+
|
86 |
+
def apply_mark(self, mark):
|
87 |
+
group = mark.name
|
88 |
+
if group in groups:
|
89 |
+
self.requires(group)
|
90 |
+
|
91 |
+
def requires(self, group):
|
92 |
+
if not self.is_enabled[group]:
|
93 |
+
pytest.skip('{} NOT enabled'.format(group))
|
94 |
+
|
95 |
+
|
96 |
+
def pytest_configure(config):
|
97 |
+
# Apply command-line options to initialize PyArrow-specific config object
|
98 |
+
config.pyarrow = PyArrowConfig()
|
99 |
+
|
100 |
+
for mark in groups:
|
101 |
+
config.addinivalue_line(
|
102 |
+
"markers", mark,
|
103 |
+
)
|
104 |
+
|
105 |
+
enable_flag = '--enable-{}'.format(mark)
|
106 |
+
disable_flag = '--disable-{}'.format(mark)
|
107 |
+
|
108 |
+
is_enabled = (config.getoption(enable_flag) and not
|
109 |
+
config.getoption(disable_flag))
|
110 |
+
config.pyarrow.is_enabled[mark] = is_enabled
|
111 |
+
|
112 |
+
|
113 |
+
def pytest_runtest_setup(item):
|
114 |
+
# Apply test markers to skip tests selectively
|
115 |
+
for mark in item.iter_markers():
|
116 |
+
item.config.pyarrow.apply_mark(mark)
|
117 |
+
|
118 |
+
|
119 |
+
@pytest.fixture
|
120 |
+
def tempdir(tmpdir):
|
121 |
+
# convert pytest's LocalPath to pathlib.Path
|
122 |
+
return pathlib.Path(tmpdir.strpath)
|
123 |
+
|
124 |
+
|
125 |
+
@pytest.fixture(scope='session')
|
126 |
+
def base_datadir():
|
127 |
+
return pathlib.Path(__file__).parent / 'data'
|
128 |
+
|
129 |
+
|
130 |
+
@pytest.fixture(autouse=True)
|
131 |
+
def disable_aws_metadata(monkeypatch):
|
132 |
+
"""Stop the AWS SDK from trying to contact the EC2 metadata server.
|
133 |
+
|
134 |
+
Otherwise, this causes a 5 second delay in tests that exercise the
|
135 |
+
S3 filesystem.
|
136 |
+
"""
|
137 |
+
monkeypatch.setenv("AWS_EC2_METADATA_DISABLED", "true")
|
138 |
+
|
139 |
+
|
140 |
+
# TODO(kszucs): move the following fixtures to test_fs.py once the previous
|
141 |
+
# parquet dataset implementation and hdfs implementation are removed.
|
142 |
+
|
143 |
+
@pytest.fixture(scope='session')
|
144 |
+
def hdfs_connection():
|
145 |
+
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'default')
|
146 |
+
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))
|
147 |
+
user = os.environ.get('ARROW_HDFS_TEST_USER', 'hdfs')
|
148 |
+
return host, port, user
|
149 |
+
|
150 |
+
|
151 |
+
@pytest.fixture(scope='session')
|
152 |
+
def s3_connection():
|
153 |
+
host, port = 'localhost', find_free_port()
|
154 |
+
access_key, secret_key = 'arrow', 'apachearrow'
|
155 |
+
return host, port, access_key, secret_key
|
156 |
+
|
157 |
+
|
158 |
+
def retry(attempts=3, delay=1.0, max_delay=None, backoff=1):
|
159 |
+
"""
|
160 |
+
Retry decorator
|
161 |
+
|
162 |
+
Parameters
|
163 |
+
----------
|
164 |
+
attempts : int, default 3
|
165 |
+
The number of attempts.
|
166 |
+
delay : float, default 1
|
167 |
+
Initial delay in seconds.
|
168 |
+
max_delay : float, optional
|
169 |
+
The max delay between attempts.
|
170 |
+
backoff : float, default 1
|
171 |
+
The multiplier to delay after each attempt.
|
172 |
+
"""
|
173 |
+
def decorate(func):
|
174 |
+
@functools.wraps(func)
|
175 |
+
def wrapper(*args, **kwargs):
|
176 |
+
remaining_attempts = attempts
|
177 |
+
curr_delay = delay
|
178 |
+
while remaining_attempts > 0:
|
179 |
+
try:
|
180 |
+
return func(*args, **kwargs)
|
181 |
+
except Exception as err:
|
182 |
+
remaining_attempts -= 1
|
183 |
+
last_exception = err
|
184 |
+
curr_delay *= backoff
|
185 |
+
if max_delay:
|
186 |
+
curr_delay = min(curr_delay, max_delay)
|
187 |
+
time.sleep(curr_delay)
|
188 |
+
raise last_exception
|
189 |
+
return wrapper
|
190 |
+
return decorate
|
191 |
+
|
192 |
+
|
193 |
+
@pytest.fixture(scope='session')
|
194 |
+
def s3_server(s3_connection, tmpdir_factory):
|
195 |
+
@retry(attempts=5, delay=0.1, backoff=2)
|
196 |
+
def minio_server_health_check(address):
|
197 |
+
resp = urllib.request.urlopen(f"http://{address}/minio/health/cluster")
|
198 |
+
assert resp.getcode() == 200
|
199 |
+
|
200 |
+
tmpdir = tmpdir_factory.getbasetemp()
|
201 |
+
host, port, access_key, secret_key = s3_connection
|
202 |
+
|
203 |
+
address = '{}:{}'.format(host, port)
|
204 |
+
env = os.environ.copy()
|
205 |
+
env.update({
|
206 |
+
'MINIO_ACCESS_KEY': access_key,
|
207 |
+
'MINIO_SECRET_KEY': secret_key
|
208 |
+
})
|
209 |
+
|
210 |
+
args = ['minio', '--compat', 'server', '--quiet', '--address',
|
211 |
+
address, tmpdir]
|
212 |
+
proc = None
|
213 |
+
try:
|
214 |
+
proc = subprocess.Popen(args, env=env)
|
215 |
+
except OSError:
|
216 |
+
pytest.skip('`minio` command cannot be located')
|
217 |
+
else:
|
218 |
+
# Wait for the server to startup before yielding
|
219 |
+
minio_server_health_check(address)
|
220 |
+
|
221 |
+
yield {
|
222 |
+
'connection': s3_connection,
|
223 |
+
'process': proc,
|
224 |
+
'tempdir': tmpdir
|
225 |
+
}
|
226 |
+
finally:
|
227 |
+
if proc is not None:
|
228 |
+
proc.kill()
|
229 |
+
proc.wait()
|
230 |
+
|
231 |
+
|
232 |
+
@pytest.fixture(scope='session')
|
233 |
+
def gcs_server():
|
234 |
+
port = find_free_port()
|
235 |
+
env = os.environ.copy()
|
236 |
+
args = [sys.executable, '-m', 'testbench', '--port', str(port)]
|
237 |
+
proc = None
|
238 |
+
try:
|
239 |
+
# check first if testbench module is available
|
240 |
+
import testbench # noqa:F401
|
241 |
+
# start server
|
242 |
+
proc = subprocess.Popen(args, env=env)
|
243 |
+
# Make sure the server is alive.
|
244 |
+
if proc.poll() is not None:
|
245 |
+
pytest.skip(f"Command {args} did not start server successfully!")
|
246 |
+
except (ModuleNotFoundError, OSError) as e:
|
247 |
+
pytest.skip(f"Command {args} failed to execute: {e}")
|
248 |
+
else:
|
249 |
+
yield {
|
250 |
+
'connection': ('localhost', port),
|
251 |
+
'process': proc,
|
252 |
+
}
|
253 |
+
finally:
|
254 |
+
if proc is not None:
|
255 |
+
proc.kill()
|
256 |
+
proc.wait()
|
257 |
+
|
258 |
+
|
259 |
+
@pytest.fixture(scope='session')
|
260 |
+
def azure_server(tmpdir_factory):
|
261 |
+
port = find_free_port()
|
262 |
+
env = os.environ.copy()
|
263 |
+
tmpdir = tmpdir_factory.getbasetemp()
|
264 |
+
# We only need blob service emulator, not queue or table.
|
265 |
+
args = ['azurite-blob', "--location", tmpdir, "--blobPort", str(port)]
|
266 |
+
proc = None
|
267 |
+
try:
|
268 |
+
proc = subprocess.Popen(args, env=env)
|
269 |
+
# Make sure the server is alive.
|
270 |
+
if proc.poll() is not None:
|
271 |
+
pytest.skip(f"Command {args} did not start server successfully!")
|
272 |
+
except (ModuleNotFoundError, OSError) as e:
|
273 |
+
pytest.skip(f"Command {args} failed to execute: {e}")
|
274 |
+
else:
|
275 |
+
yield {
|
276 |
+
# Use the standard azurite account_name and account_key.
|
277 |
+
# https://learn.microsoft.com/en-us/azure/storage/common/storage-use-emulator#authorize-with-shared-key-credentials
|
278 |
+
'connection': ('127.0.0.1', port, 'devstoreaccount1',
|
279 |
+
'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2'
|
280 |
+
'UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='),
|
281 |
+
'process': proc,
|
282 |
+
'tempdir': tmpdir,
|
283 |
+
}
|
284 |
+
finally:
|
285 |
+
if proc is not None:
|
286 |
+
proc.kill()
|
287 |
+
proc.wait()
|
288 |
+
|
289 |
+
|
290 |
+
@pytest.fixture(
|
291 |
+
params=[
|
292 |
+
'builtin_pickle',
|
293 |
+
'cloudpickle'
|
294 |
+
],
|
295 |
+
scope='session'
|
296 |
+
)
|
297 |
+
def pickle_module(request):
|
298 |
+
return request.getfixturevalue(request.param)
|
299 |
+
|
300 |
+
|
301 |
+
@pytest.fixture(scope='session')
|
302 |
+
def builtin_pickle():
|
303 |
+
import pickle
|
304 |
+
return pickle
|
305 |
+
|
306 |
+
|
307 |
+
@pytest.fixture(scope='session')
|
308 |
+
def cloudpickle():
|
309 |
+
cp = pytest.importorskip('cloudpickle')
|
310 |
+
if 'HIGHEST_PROTOCOL' not in cp.__dict__:
|
311 |
+
cp.HIGHEST_PROTOCOL = cp.DEFAULT_PROTOCOL
|
312 |
+
return cp
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.emptyFile.orc
ADDED
Binary file (523 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.test1.orc
ADDED
Binary file (1.71 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/data/orc/decimal.orc
ADDED
Binary file (16.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/extensions.pyx
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language=c++
|
19 |
+
# cython: language_level = 3
|
20 |
+
|
21 |
+
from pyarrow.lib cimport *
|
22 |
+
|
23 |
+
cdef extern from * namespace "arrow::py" nogil:
|
24 |
+
"""
|
25 |
+
#include "arrow/status.h"
|
26 |
+
#include "arrow/extension_type.h"
|
27 |
+
#include "arrow/ipc/json_simple.h"
|
28 |
+
|
29 |
+
namespace arrow {
|
30 |
+
namespace py {
|
31 |
+
|
32 |
+
class UuidArray : public ExtensionArray {
|
33 |
+
public:
|
34 |
+
using ExtensionArray::ExtensionArray;
|
35 |
+
};
|
36 |
+
|
37 |
+
class UuidType : public ExtensionType {
|
38 |
+
public:
|
39 |
+
UuidType() : ExtensionType(fixed_size_binary(16)) {}
|
40 |
+
std::string extension_name() const override { return "uuid"; }
|
41 |
+
|
42 |
+
bool ExtensionEquals(const ExtensionType& other) const override {
|
43 |
+
return other.extension_name() == this->extension_name();
|
44 |
+
}
|
45 |
+
|
46 |
+
std::shared_ptr<Array> MakeArray(std::shared_ptr<ArrayData> data) const override {
|
47 |
+
return std::make_shared<ExtensionArray>(data);
|
48 |
+
}
|
49 |
+
|
50 |
+
Result<std::shared_ptr<DataType>> Deserialize(
|
51 |
+
std::shared_ptr<DataType> storage_type,
|
52 |
+
const std::string& serialized) const override {
|
53 |
+
return std::make_shared<UuidType>();
|
54 |
+
}
|
55 |
+
|
56 |
+
std::string Serialize() const override { return ""; }
|
57 |
+
};
|
58 |
+
|
59 |
+
|
60 |
+
std::shared_ptr<DataType> MakeUuidType() {
|
61 |
+
return std::make_shared<UuidType>();
|
62 |
+
}
|
63 |
+
|
64 |
+
std::shared_ptr<Array> MakeUuidArray() {
|
65 |
+
auto uuid_type = MakeUuidType();
|
66 |
+
auto json = "[\\"abcdefghijklmno0\\", \\"0onmlkjihgfedcba\\"]";
|
67 |
+
auto result = ipc::internal::json::ArrayFromJSON(fixed_size_binary(16), json);
|
68 |
+
return ExtensionType::WrapArray(uuid_type, result.ValueOrDie());
|
69 |
+
}
|
70 |
+
|
71 |
+
std::once_flag uuid_registered;
|
72 |
+
|
73 |
+
static bool RegisterUuidType() {
|
74 |
+
std::call_once(uuid_registered, RegisterExtensionType,
|
75 |
+
std::make_shared<UuidType>());
|
76 |
+
return true;
|
77 |
+
}
|
78 |
+
|
79 |
+
static auto uuid_type_registered = RegisterUuidType();
|
80 |
+
|
81 |
+
} // namespace py
|
82 |
+
} // namespace arrow
|
83 |
+
"""
|
84 |
+
|
85 |
+
cdef shared_ptr[CDataType] CMakeUuidType" arrow::py::MakeUuidType"()
|
86 |
+
cdef shared_ptr[CArray] CMakeUuidArray" arrow::py::MakeUuidArray"()
|
87 |
+
|
88 |
+
|
89 |
+
def _make_uuid_type():
|
90 |
+
return pyarrow_wrap_data_type(CMakeUuidType())
|
91 |
+
|
92 |
+
|
93 |
+
def _make_uuid_array():
|
94 |
+
return pyarrow_wrap_array(CMakeUuidArray())
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/pandas_examples.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
from collections import OrderedDict
|
19 |
+
from datetime import date, time
|
20 |
+
|
21 |
+
import numpy as np
|
22 |
+
import pandas as pd
|
23 |
+
import pyarrow as pa
|
24 |
+
|
25 |
+
|
26 |
+
def dataframe_with_arrays(include_index=False):
|
27 |
+
"""
|
28 |
+
Dataframe with numpy arrays columns of every possible primitive type.
|
29 |
+
|
30 |
+
Returns
|
31 |
+
-------
|
32 |
+
df: pandas.DataFrame
|
33 |
+
schema: pyarrow.Schema
|
34 |
+
Arrow schema definition that is in line with the constructed df.
|
35 |
+
"""
|
36 |
+
dtypes = [('i1', pa.int8()), ('i2', pa.int16()),
|
37 |
+
('i4', pa.int32()), ('i8', pa.int64()),
|
38 |
+
('u1', pa.uint8()), ('u2', pa.uint16()),
|
39 |
+
('u4', pa.uint32()), ('u8', pa.uint64()),
|
40 |
+
('f4', pa.float32()), ('f8', pa.float64())]
|
41 |
+
|
42 |
+
arrays = OrderedDict()
|
43 |
+
fields = []
|
44 |
+
for dtype, arrow_dtype in dtypes:
|
45 |
+
fields.append(pa.field(dtype, pa.list_(arrow_dtype)))
|
46 |
+
arrays[dtype] = [
|
47 |
+
np.arange(10, dtype=dtype),
|
48 |
+
np.arange(5, dtype=dtype),
|
49 |
+
None,
|
50 |
+
np.arange(1, dtype=dtype)
|
51 |
+
]
|
52 |
+
|
53 |
+
fields.append(pa.field('str', pa.list_(pa.string())))
|
54 |
+
arrays['str'] = [
|
55 |
+
np.array(["1", "ä"], dtype="object"),
|
56 |
+
None,
|
57 |
+
np.array(["1"], dtype="object"),
|
58 |
+
np.array(["1", "2", "3"], dtype="object")
|
59 |
+
]
|
60 |
+
|
61 |
+
fields.append(pa.field('datetime64', pa.list_(pa.timestamp('ms'))))
|
62 |
+
arrays['datetime64'] = [
|
63 |
+
np.array(['2007-07-13T01:23:34.123456789',
|
64 |
+
None,
|
65 |
+
'2010-08-13T05:46:57.437699912'],
|
66 |
+
dtype='datetime64[ms]'),
|
67 |
+
None,
|
68 |
+
None,
|
69 |
+
np.array(['2007-07-13T02',
|
70 |
+
None,
|
71 |
+
'2010-08-13T05:46:57.437699912'],
|
72 |
+
dtype='datetime64[ms]'),
|
73 |
+
]
|
74 |
+
|
75 |
+
if include_index:
|
76 |
+
fields.append(pa.field('__index_level_0__', pa.int64()))
|
77 |
+
df = pd.DataFrame(arrays)
|
78 |
+
schema = pa.schema(fields)
|
79 |
+
|
80 |
+
return df, schema
|
81 |
+
|
82 |
+
|
83 |
+
def dataframe_with_lists(include_index=False, parquet_compatible=False):
|
84 |
+
"""
|
85 |
+
Dataframe with list columns of every possible primitive type.
|
86 |
+
|
87 |
+
Returns
|
88 |
+
-------
|
89 |
+
df: pandas.DataFrame
|
90 |
+
schema: pyarrow.Schema
|
91 |
+
Arrow schema definition that is in line with the constructed df.
|
92 |
+
parquet_compatible: bool
|
93 |
+
Exclude types not supported by parquet
|
94 |
+
"""
|
95 |
+
arrays = OrderedDict()
|
96 |
+
fields = []
|
97 |
+
|
98 |
+
fields.append(pa.field('int64', pa.list_(pa.int64())))
|
99 |
+
arrays['int64'] = [
|
100 |
+
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
|
101 |
+
[0, 1, 2, 3, 4],
|
102 |
+
None,
|
103 |
+
[],
|
104 |
+
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9] * 2,
|
105 |
+
dtype=np.int64)[::2]
|
106 |
+
]
|
107 |
+
fields.append(pa.field('double', pa.list_(pa.float64())))
|
108 |
+
arrays['double'] = [
|
109 |
+
[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.],
|
110 |
+
[0., 1., 2., 3., 4.],
|
111 |
+
None,
|
112 |
+
[],
|
113 |
+
np.array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.] * 2)[::2],
|
114 |
+
]
|
115 |
+
fields.append(pa.field('bytes_list', pa.list_(pa.binary())))
|
116 |
+
arrays['bytes_list'] = [
|
117 |
+
[b"1", b"f"],
|
118 |
+
None,
|
119 |
+
[b"1"],
|
120 |
+
[b"1", b"2", b"3"],
|
121 |
+
[],
|
122 |
+
]
|
123 |
+
fields.append(pa.field('str_list', pa.list_(pa.string())))
|
124 |
+
arrays['str_list'] = [
|
125 |
+
["1", "ä"],
|
126 |
+
None,
|
127 |
+
["1"],
|
128 |
+
["1", "2", "3"],
|
129 |
+
[],
|
130 |
+
]
|
131 |
+
|
132 |
+
date_data = [
|
133 |
+
[],
|
134 |
+
[date(2018, 1, 1), date(2032, 12, 30)],
|
135 |
+
[date(2000, 6, 7)],
|
136 |
+
None,
|
137 |
+
[date(1969, 6, 9), date(1972, 7, 3)]
|
138 |
+
]
|
139 |
+
time_data = [
|
140 |
+
[time(23, 11, 11), time(1, 2, 3), time(23, 59, 59)],
|
141 |
+
[],
|
142 |
+
[time(22, 5, 59)],
|
143 |
+
None,
|
144 |
+
[time(0, 0, 0), time(18, 0, 2), time(12, 7, 3)]
|
145 |
+
]
|
146 |
+
|
147 |
+
temporal_pairs = [
|
148 |
+
(pa.date32(), date_data),
|
149 |
+
(pa.date64(), date_data),
|
150 |
+
(pa.time32('s'), time_data),
|
151 |
+
(pa.time32('ms'), time_data),
|
152 |
+
(pa.time64('us'), time_data)
|
153 |
+
]
|
154 |
+
if not parquet_compatible:
|
155 |
+
temporal_pairs += [
|
156 |
+
(pa.time64('ns'), time_data),
|
157 |
+
]
|
158 |
+
|
159 |
+
for value_type, data in temporal_pairs:
|
160 |
+
field_name = '{}_list'.format(value_type)
|
161 |
+
field_type = pa.list_(value_type)
|
162 |
+
field = pa.field(field_name, field_type)
|
163 |
+
fields.append(field)
|
164 |
+
arrays[field_name] = data
|
165 |
+
|
166 |
+
if include_index:
|
167 |
+
fields.append(pa.field('__index_level_0__', pa.int64()))
|
168 |
+
|
169 |
+
df = pd.DataFrame(arrays)
|
170 |
+
schema = pa.schema(fields)
|
171 |
+
|
172 |
+
return df, schema
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/pandas_threaded_import.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# This file is called from a test in test_pandas.py.
|
19 |
+
|
20 |
+
from concurrent.futures import ThreadPoolExecutor
|
21 |
+
import faulthandler
|
22 |
+
import sys
|
23 |
+
|
24 |
+
import pyarrow as pa
|
25 |
+
|
26 |
+
num_threads = 60
|
27 |
+
timeout = 10 # seconds
|
28 |
+
|
29 |
+
|
30 |
+
def thread_func(i):
|
31 |
+
pa.array([i]).to_pandas()
|
32 |
+
|
33 |
+
|
34 |
+
def main():
|
35 |
+
# In case of import deadlock, crash after a finite timeout
|
36 |
+
faulthandler.dump_traceback_later(timeout, exit=True)
|
37 |
+
with ThreadPoolExecutor(num_threads) as pool:
|
38 |
+
assert "pandas" not in sys.modules # pandas is imported lazily
|
39 |
+
list(pool.map(thread_func, range(num_threads)))
|
40 |
+
assert "pandas" in sys.modules
|
41 |
+
|
42 |
+
|
43 |
+
if __name__ == "__main__":
|
44 |
+
main()
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/pyarrow_cython_example.pyx
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language=c++
|
19 |
+
# cython: language_level = 3
|
20 |
+
|
21 |
+
from pyarrow.lib cimport *
|
22 |
+
|
23 |
+
|
24 |
+
def get_array_length(obj):
|
25 |
+
# An example function accessing both the pyarrow Cython API
|
26 |
+
# and the Arrow C++ API
|
27 |
+
cdef shared_ptr[CArray] arr = pyarrow_unwrap_array(obj)
|
28 |
+
if arr.get() == NULL:
|
29 |
+
raise TypeError("not an array")
|
30 |
+
return arr.get().length()
|
31 |
+
|
32 |
+
|
33 |
+
def make_null_array(length):
|
34 |
+
# An example function that returns a PyArrow object without PyArrow
|
35 |
+
# being imported explicitly at the Python level.
|
36 |
+
cdef shared_ptr[CArray] null_array
|
37 |
+
null_array.reset(new CNullArray(length))
|
38 |
+
return pyarrow_wrap_array(null_array)
|
39 |
+
|
40 |
+
|
41 |
+
def cast_scalar(scalar, to_type):
|
42 |
+
cdef:
|
43 |
+
shared_ptr[CScalar] c_scalar
|
44 |
+
shared_ptr[CDataType] c_type
|
45 |
+
CCastOptions cast_options
|
46 |
+
CDatum c_datum
|
47 |
+
CResult[CDatum] c_cast_result
|
48 |
+
|
49 |
+
c_scalar = pyarrow_unwrap_scalar(scalar)
|
50 |
+
if c_scalar.get() == NULL:
|
51 |
+
raise TypeError("not a scalar")
|
52 |
+
c_type = pyarrow_unwrap_data_type(to_type)
|
53 |
+
if c_type.get() == NULL:
|
54 |
+
raise TypeError("not a type")
|
55 |
+
|
56 |
+
c_datum = CDatum(c_scalar)
|
57 |
+
cast_options = CCastOptions()
|
58 |
+
cast_options.to_type = c_type
|
59 |
+
c_cast_result = Cast(c_datum, cast_options)
|
60 |
+
c_datum = GetResultValue(c_cast_result)
|
61 |
+
return pyarrow_wrap_scalar(c_datum.scalar())
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/read_record_batch.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# This file is called from a test in test_ipc.py.
|
19 |
+
|
20 |
+
import sys
|
21 |
+
|
22 |
+
import pyarrow as pa
|
23 |
+
|
24 |
+
with open(sys.argv[1], 'rb') as f:
|
25 |
+
pa.ipc.open_file(f).read_all().to_pandas()
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/strategies.py
ADDED
@@ -0,0 +1,457 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import datetime
|
19 |
+
import sys
|
20 |
+
|
21 |
+
import pytest
|
22 |
+
import hypothesis as h
|
23 |
+
import hypothesis.strategies as st
|
24 |
+
import hypothesis.extra.numpy as npst
|
25 |
+
try:
|
26 |
+
import hypothesis.extra.pytz as tzst
|
27 |
+
except ImportError:
|
28 |
+
tzst = None
|
29 |
+
try:
|
30 |
+
import zoneinfo
|
31 |
+
except ImportError:
|
32 |
+
zoneinfo = None
|
33 |
+
if sys.platform == 'win32':
|
34 |
+
try:
|
35 |
+
import tzdata # noqa:F401
|
36 |
+
except ImportError:
|
37 |
+
zoneinfo = None
|
38 |
+
import numpy as np
|
39 |
+
|
40 |
+
import pyarrow as pa
|
41 |
+
|
42 |
+
|
43 |
+
# TODO(kszucs): alphanum_text, surrogate_text
|
44 |
+
custom_text = st.text(
|
45 |
+
alphabet=st.characters(
|
46 |
+
min_codepoint=0x41,
|
47 |
+
max_codepoint=0x7E
|
48 |
+
)
|
49 |
+
)
|
50 |
+
|
51 |
+
null_type = st.just(pa.null())
|
52 |
+
bool_type = st.just(pa.bool_())
|
53 |
+
|
54 |
+
binary_type = st.just(pa.binary())
|
55 |
+
string_type = st.just(pa.string())
|
56 |
+
large_binary_type = st.just(pa.large_binary())
|
57 |
+
large_string_type = st.just(pa.large_string())
|
58 |
+
fixed_size_binary_type = st.builds(
|
59 |
+
pa.binary,
|
60 |
+
st.integers(min_value=0, max_value=16)
|
61 |
+
)
|
62 |
+
binary_like_types = st.one_of(
|
63 |
+
binary_type,
|
64 |
+
string_type,
|
65 |
+
large_binary_type,
|
66 |
+
large_string_type,
|
67 |
+
fixed_size_binary_type
|
68 |
+
)
|
69 |
+
|
70 |
+
signed_integer_types = st.sampled_from([
|
71 |
+
pa.int8(),
|
72 |
+
pa.int16(),
|
73 |
+
pa.int32(),
|
74 |
+
pa.int64()
|
75 |
+
])
|
76 |
+
unsigned_integer_types = st.sampled_from([
|
77 |
+
pa.uint8(),
|
78 |
+
pa.uint16(),
|
79 |
+
pa.uint32(),
|
80 |
+
pa.uint64()
|
81 |
+
])
|
82 |
+
integer_types = st.one_of(signed_integer_types, unsigned_integer_types)
|
83 |
+
|
84 |
+
floating_types = st.sampled_from([
|
85 |
+
pa.float16(),
|
86 |
+
pa.float32(),
|
87 |
+
pa.float64()
|
88 |
+
])
|
89 |
+
decimal128_type = st.builds(
|
90 |
+
pa.decimal128,
|
91 |
+
precision=st.integers(min_value=1, max_value=38),
|
92 |
+
scale=st.integers(min_value=1, max_value=38)
|
93 |
+
)
|
94 |
+
decimal256_type = st.builds(
|
95 |
+
pa.decimal256,
|
96 |
+
precision=st.integers(min_value=1, max_value=76),
|
97 |
+
scale=st.integers(min_value=1, max_value=76)
|
98 |
+
)
|
99 |
+
numeric_types = st.one_of(integer_types, floating_types,
|
100 |
+
decimal128_type, decimal256_type)
|
101 |
+
|
102 |
+
date_types = st.sampled_from([
|
103 |
+
pa.date32(),
|
104 |
+
pa.date64()
|
105 |
+
])
|
106 |
+
time_types = st.sampled_from([
|
107 |
+
pa.time32('s'),
|
108 |
+
pa.time32('ms'),
|
109 |
+
pa.time64('us'),
|
110 |
+
pa.time64('ns')
|
111 |
+
])
|
112 |
+
|
113 |
+
if tzst and zoneinfo:
|
114 |
+
timezones = st.one_of(st.none(), tzst.timezones(), st.timezones())
|
115 |
+
elif tzst:
|
116 |
+
timezones = st.one_of(st.none(), tzst.timezones())
|
117 |
+
elif zoneinfo:
|
118 |
+
timezones = st.one_of(st.none(), st.timezones())
|
119 |
+
else:
|
120 |
+
timezones = st.none()
|
121 |
+
timestamp_types = st.builds(
|
122 |
+
pa.timestamp,
|
123 |
+
unit=st.sampled_from(['s', 'ms', 'us', 'ns']),
|
124 |
+
tz=timezones
|
125 |
+
)
|
126 |
+
duration_types = st.builds(
|
127 |
+
pa.duration,
|
128 |
+
st.sampled_from(['s', 'ms', 'us', 'ns'])
|
129 |
+
)
|
130 |
+
interval_types = st.just(pa.month_day_nano_interval())
|
131 |
+
temporal_types = st.one_of(
|
132 |
+
date_types,
|
133 |
+
time_types,
|
134 |
+
timestamp_types,
|
135 |
+
duration_types,
|
136 |
+
interval_types
|
137 |
+
)
|
138 |
+
|
139 |
+
primitive_types = st.one_of(
|
140 |
+
null_type,
|
141 |
+
bool_type,
|
142 |
+
numeric_types,
|
143 |
+
temporal_types,
|
144 |
+
binary_like_types
|
145 |
+
)
|
146 |
+
|
147 |
+
metadata = st.dictionaries(st.text(), st.text())
|
148 |
+
|
149 |
+
|
150 |
+
@st.composite
|
151 |
+
def fields(draw, type_strategy=primitive_types):
|
152 |
+
name = draw(custom_text)
|
153 |
+
typ = draw(type_strategy)
|
154 |
+
if pa.types.is_null(typ):
|
155 |
+
nullable = True
|
156 |
+
else:
|
157 |
+
nullable = draw(st.booleans())
|
158 |
+
meta = draw(metadata)
|
159 |
+
return pa.field(name, type=typ, nullable=nullable, metadata=meta)
|
160 |
+
|
161 |
+
|
162 |
+
def list_types(item_strategy=primitive_types):
|
163 |
+
return (
|
164 |
+
st.builds(pa.list_, item_strategy) |
|
165 |
+
st.builds(pa.large_list, item_strategy) |
|
166 |
+
st.builds(
|
167 |
+
pa.list_,
|
168 |
+
item_strategy,
|
169 |
+
st.integers(min_value=0, max_value=16)
|
170 |
+
) |
|
171 |
+
st.builds(pa.list_view, item_strategy) |
|
172 |
+
st.builds(pa.large_list_view, item_strategy)
|
173 |
+
)
|
174 |
+
|
175 |
+
|
176 |
+
@st.composite
|
177 |
+
def struct_types(draw, item_strategy=primitive_types):
|
178 |
+
fields_strategy = st.lists(fields(item_strategy))
|
179 |
+
fields_rendered = draw(fields_strategy)
|
180 |
+
field_names = [field.name for field in fields_rendered]
|
181 |
+
# check that field names are unique, see ARROW-9997
|
182 |
+
h.assume(len(set(field_names)) == len(field_names))
|
183 |
+
return pa.struct(fields_rendered)
|
184 |
+
|
185 |
+
|
186 |
+
def dictionary_types(key_strategy=None, value_strategy=None):
|
187 |
+
if key_strategy is None:
|
188 |
+
key_strategy = signed_integer_types
|
189 |
+
if value_strategy is None:
|
190 |
+
value_strategy = st.one_of(
|
191 |
+
bool_type,
|
192 |
+
integer_types,
|
193 |
+
st.sampled_from([pa.float32(), pa.float64()]),
|
194 |
+
binary_type,
|
195 |
+
string_type,
|
196 |
+
fixed_size_binary_type,
|
197 |
+
)
|
198 |
+
return st.builds(pa.dictionary, key_strategy, value_strategy)
|
199 |
+
|
200 |
+
|
201 |
+
@st.composite
|
202 |
+
def map_types(draw, key_strategy=primitive_types,
|
203 |
+
item_strategy=primitive_types):
|
204 |
+
key_type = draw(key_strategy)
|
205 |
+
h.assume(not pa.types.is_null(key_type))
|
206 |
+
value_type = draw(item_strategy)
|
207 |
+
return pa.map_(key_type, value_type)
|
208 |
+
|
209 |
+
|
210 |
+
# union type
|
211 |
+
# extension type
|
212 |
+
|
213 |
+
|
214 |
+
def schemas(type_strategy=primitive_types, max_fields=None):
|
215 |
+
children = st.lists(fields(type_strategy), max_size=max_fields)
|
216 |
+
return st.builds(pa.schema, children)
|
217 |
+
|
218 |
+
|
219 |
+
all_types = st.deferred(
|
220 |
+
lambda: (
|
221 |
+
primitive_types |
|
222 |
+
list_types() |
|
223 |
+
struct_types() |
|
224 |
+
dictionary_types() |
|
225 |
+
map_types() |
|
226 |
+
list_types(all_types) |
|
227 |
+
struct_types(all_types)
|
228 |
+
)
|
229 |
+
)
|
230 |
+
all_fields = fields(all_types)
|
231 |
+
all_schemas = schemas(all_types)
|
232 |
+
|
233 |
+
|
234 |
+
_default_array_sizes = st.integers(min_value=0, max_value=20)
|
235 |
+
|
236 |
+
|
237 |
+
@st.composite
|
238 |
+
def _pylist(draw, value_type, size, nullable=True):
|
239 |
+
arr = draw(arrays(value_type, size=size, nullable=False))
|
240 |
+
return arr.to_pylist()
|
241 |
+
|
242 |
+
|
243 |
+
@st.composite
|
244 |
+
def _pymap(draw, key_type, value_type, size, nullable=True):
|
245 |
+
length = draw(size)
|
246 |
+
keys = draw(_pylist(key_type, size=length, nullable=False))
|
247 |
+
values = draw(_pylist(value_type, size=length, nullable=nullable))
|
248 |
+
return list(zip(keys, values))
|
249 |
+
|
250 |
+
|
251 |
+
@st.composite
|
252 |
+
def arrays(draw, type, size=None, nullable=True):
|
253 |
+
if isinstance(type, st.SearchStrategy):
|
254 |
+
ty = draw(type)
|
255 |
+
elif isinstance(type, pa.DataType):
|
256 |
+
ty = type
|
257 |
+
else:
|
258 |
+
raise TypeError('Type must be a pyarrow DataType')
|
259 |
+
|
260 |
+
if isinstance(size, st.SearchStrategy):
|
261 |
+
size = draw(size)
|
262 |
+
elif size is None:
|
263 |
+
size = draw(_default_array_sizes)
|
264 |
+
elif not isinstance(size, int):
|
265 |
+
raise TypeError('Size must be an integer')
|
266 |
+
|
267 |
+
if pa.types.is_null(ty):
|
268 |
+
h.assume(nullable)
|
269 |
+
value = st.none()
|
270 |
+
elif pa.types.is_boolean(ty):
|
271 |
+
value = st.booleans()
|
272 |
+
elif pa.types.is_integer(ty):
|
273 |
+
values = draw(npst.arrays(ty.to_pandas_dtype(), shape=(size,)))
|
274 |
+
return pa.array(values, type=ty)
|
275 |
+
elif pa.types.is_floating(ty):
|
276 |
+
values = draw(npst.arrays(ty.to_pandas_dtype(), shape=(size,)))
|
277 |
+
# Workaround ARROW-4952: no easy way to assert array equality
|
278 |
+
# in a NaN-tolerant way.
|
279 |
+
values[np.isnan(values)] = -42.0
|
280 |
+
return pa.array(values, type=ty)
|
281 |
+
elif pa.types.is_decimal(ty):
|
282 |
+
# TODO(kszucs): properly limit the precision
|
283 |
+
# value = st.decimals(places=type.scale, allow_infinity=False)
|
284 |
+
h.reject()
|
285 |
+
elif pa.types.is_time(ty):
|
286 |
+
value = st.times()
|
287 |
+
elif pa.types.is_date(ty):
|
288 |
+
value = st.dates()
|
289 |
+
elif pa.types.is_timestamp(ty):
|
290 |
+
if zoneinfo is None:
|
291 |
+
pytest.skip('no module named zoneinfo (or tzdata on Windows)')
|
292 |
+
if ty.tz is None:
|
293 |
+
pytest.skip('requires timezone not None')
|
294 |
+
min_int64 = -(2**63)
|
295 |
+
max_int64 = 2**63 - 1
|
296 |
+
min_datetime = datetime.datetime.fromtimestamp(
|
297 |
+
min_int64 // 10**9) + datetime.timedelta(hours=12)
|
298 |
+
max_datetime = datetime.datetime.fromtimestamp(
|
299 |
+
max_int64 // 10**9) - datetime.timedelta(hours=12)
|
300 |
+
try:
|
301 |
+
offset = ty.tz.split(":")
|
302 |
+
offset_hours = int(offset[0])
|
303 |
+
offset_min = int(offset[1])
|
304 |
+
tz = datetime.timedelta(hours=offset_hours, minutes=offset_min)
|
305 |
+
except ValueError:
|
306 |
+
tz = zoneinfo.ZoneInfo(ty.tz)
|
307 |
+
value = st.datetimes(timezones=st.just(tz), min_value=min_datetime,
|
308 |
+
max_value=max_datetime)
|
309 |
+
elif pa.types.is_duration(ty):
|
310 |
+
value = st.timedeltas()
|
311 |
+
elif pa.types.is_interval(ty):
|
312 |
+
value = st.timedeltas()
|
313 |
+
elif pa.types.is_binary(ty) or pa.types.is_large_binary(ty):
|
314 |
+
value = st.binary()
|
315 |
+
elif pa.types.is_string(ty) or pa.types.is_large_string(ty):
|
316 |
+
value = st.text()
|
317 |
+
elif pa.types.is_fixed_size_binary(ty):
|
318 |
+
value = st.binary(min_size=ty.byte_width, max_size=ty.byte_width)
|
319 |
+
elif pa.types.is_list(ty):
|
320 |
+
value = _pylist(ty.value_type, size=size, nullable=nullable)
|
321 |
+
elif pa.types.is_large_list(ty):
|
322 |
+
value = _pylist(ty.value_type, size=size, nullable=nullable)
|
323 |
+
elif pa.types.is_fixed_size_list(ty):
|
324 |
+
value = _pylist(ty.value_type, size=ty.list_size, nullable=nullable)
|
325 |
+
elif pa.types.is_list_view(ty):
|
326 |
+
value = _pylist(ty.value_type, size=size, nullable=nullable)
|
327 |
+
elif pa.types.is_large_list_view(ty):
|
328 |
+
value = _pylist(ty.value_type, size=size, nullable=nullable)
|
329 |
+
elif pa.types.is_dictionary(ty):
|
330 |
+
values = _pylist(ty.value_type, size=size, nullable=nullable)
|
331 |
+
return pa.array(draw(values), type=ty)
|
332 |
+
elif pa.types.is_map(ty):
|
333 |
+
value = _pymap(ty.key_type, ty.item_type, size=_default_array_sizes,
|
334 |
+
nullable=nullable)
|
335 |
+
elif pa.types.is_struct(ty):
|
336 |
+
h.assume(len(ty) > 0)
|
337 |
+
fields, child_arrays = [], []
|
338 |
+
for field in ty:
|
339 |
+
fields.append(field)
|
340 |
+
child_arrays.append(draw(arrays(field.type, size=size)))
|
341 |
+
return pa.StructArray.from_arrays(child_arrays, fields=fields)
|
342 |
+
else:
|
343 |
+
raise NotImplementedError(ty)
|
344 |
+
|
345 |
+
if nullable:
|
346 |
+
value = st.one_of(st.none(), value)
|
347 |
+
values = st.lists(value, min_size=size, max_size=size)
|
348 |
+
|
349 |
+
return pa.array(draw(values), type=ty)
|
350 |
+
|
351 |
+
|
352 |
+
@st.composite
|
353 |
+
def chunked_arrays(draw, type, min_chunks=0, max_chunks=None, chunk_size=None):
|
354 |
+
if isinstance(type, st.SearchStrategy):
|
355 |
+
type = draw(type)
|
356 |
+
|
357 |
+
# TODO(kszucs): remove it, field metadata is not kept
|
358 |
+
h.assume(not pa.types.is_struct(type))
|
359 |
+
|
360 |
+
chunk = arrays(type, size=chunk_size)
|
361 |
+
chunks = st.lists(chunk, min_size=min_chunks, max_size=max_chunks)
|
362 |
+
|
363 |
+
return pa.chunked_array(draw(chunks), type=type)
|
364 |
+
|
365 |
+
|
366 |
+
@st.composite
|
367 |
+
def record_batches(draw, type, rows=None, max_fields=None):
|
368 |
+
if isinstance(rows, st.SearchStrategy):
|
369 |
+
rows = draw(rows)
|
370 |
+
elif rows is None:
|
371 |
+
rows = draw(_default_array_sizes)
|
372 |
+
elif not isinstance(rows, int):
|
373 |
+
raise TypeError('Rows must be an integer')
|
374 |
+
|
375 |
+
schema = draw(schemas(type, max_fields=max_fields))
|
376 |
+
children = [draw(arrays(field.type, size=rows)) for field in schema]
|
377 |
+
# TODO(kszucs): the names and schema arguments are not consistent with
|
378 |
+
# Table.from_array's arguments
|
379 |
+
return pa.RecordBatch.from_arrays(children, schema=schema)
|
380 |
+
|
381 |
+
|
382 |
+
@st.composite
|
383 |
+
def tables(draw, type, rows=None, max_fields=None):
|
384 |
+
if isinstance(rows, st.SearchStrategy):
|
385 |
+
rows = draw(rows)
|
386 |
+
elif rows is None:
|
387 |
+
rows = draw(_default_array_sizes)
|
388 |
+
elif not isinstance(rows, int):
|
389 |
+
raise TypeError('Rows must be an integer')
|
390 |
+
|
391 |
+
schema = draw(schemas(type, max_fields=max_fields))
|
392 |
+
children = [draw(arrays(field.type, size=rows)) for field in schema]
|
393 |
+
return pa.Table.from_arrays(children, schema=schema)
|
394 |
+
|
395 |
+
|
396 |
+
all_arrays = arrays(all_types)
|
397 |
+
all_chunked_arrays = chunked_arrays(all_types)
|
398 |
+
all_record_batches = record_batches(all_types)
|
399 |
+
all_tables = tables(all_types)
|
400 |
+
|
401 |
+
|
402 |
+
# Define the same rules as above for pandas tests by excluding certain types
|
403 |
+
# from the generation because of known issues.
|
404 |
+
|
405 |
+
pandas_compatible_primitive_types = st.one_of(
|
406 |
+
null_type,
|
407 |
+
bool_type,
|
408 |
+
integer_types,
|
409 |
+
st.sampled_from([pa.float32(), pa.float64()]),
|
410 |
+
decimal128_type,
|
411 |
+
date_types,
|
412 |
+
time_types,
|
413 |
+
# Need to exclude timestamp and duration types otherwise hypothesis
|
414 |
+
# discovers ARROW-10210
|
415 |
+
# timestamp_types,
|
416 |
+
# duration_types
|
417 |
+
interval_types,
|
418 |
+
binary_type,
|
419 |
+
string_type,
|
420 |
+
large_binary_type,
|
421 |
+
large_string_type,
|
422 |
+
)
|
423 |
+
|
424 |
+
# Need to exclude floating point types otherwise hypothesis discovers
|
425 |
+
# ARROW-10211
|
426 |
+
pandas_compatible_dictionary_value_types = st.one_of(
|
427 |
+
bool_type,
|
428 |
+
integer_types,
|
429 |
+
binary_type,
|
430 |
+
string_type,
|
431 |
+
fixed_size_binary_type,
|
432 |
+
)
|
433 |
+
|
434 |
+
|
435 |
+
def pandas_compatible_list_types(
|
436 |
+
item_strategy=pandas_compatible_primitive_types
|
437 |
+
):
|
438 |
+
# Need to exclude fixed size list type otherwise hypothesis discovers
|
439 |
+
# ARROW-10194
|
440 |
+
return (
|
441 |
+
st.builds(pa.list_, item_strategy) |
|
442 |
+
st.builds(pa.large_list, item_strategy)
|
443 |
+
)
|
444 |
+
|
445 |
+
|
446 |
+
pandas_compatible_types = st.deferred(
|
447 |
+
lambda: st.one_of(
|
448 |
+
pandas_compatible_primitive_types,
|
449 |
+
pandas_compatible_list_types(pandas_compatible_primitive_types),
|
450 |
+
struct_types(pandas_compatible_primitive_types),
|
451 |
+
dictionary_types(
|
452 |
+
value_strategy=pandas_compatible_dictionary_value_types
|
453 |
+
),
|
454 |
+
pandas_compatible_list_types(pandas_compatible_types),
|
455 |
+
struct_types(pandas_compatible_types)
|
456 |
+
)
|
457 |
+
)
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_acero.py
ADDED
@@ -0,0 +1,413 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import pytest
|
19 |
+
|
20 |
+
import pyarrow as pa
|
21 |
+
import pyarrow.compute as pc
|
22 |
+
from pyarrow.compute import field
|
23 |
+
|
24 |
+
try:
|
25 |
+
from pyarrow.acero import (
|
26 |
+
Declaration,
|
27 |
+
TableSourceNodeOptions,
|
28 |
+
FilterNodeOptions,
|
29 |
+
ProjectNodeOptions,
|
30 |
+
AggregateNodeOptions,
|
31 |
+
OrderByNodeOptions,
|
32 |
+
HashJoinNodeOptions,
|
33 |
+
AsofJoinNodeOptions,
|
34 |
+
)
|
35 |
+
except ImportError:
|
36 |
+
pass
|
37 |
+
|
38 |
+
try:
|
39 |
+
import pyarrow.dataset as ds
|
40 |
+
from pyarrow.acero import ScanNodeOptions
|
41 |
+
except ImportError:
|
42 |
+
ds = None
|
43 |
+
|
44 |
+
pytestmark = pytest.mark.acero
|
45 |
+
|
46 |
+
|
47 |
+
@pytest.fixture
|
48 |
+
def table_source():
|
49 |
+
table = pa.table({'a': [1, 2, 3], 'b': [4, 5, 6]})
|
50 |
+
table_opts = TableSourceNodeOptions(table)
|
51 |
+
table_source = Declaration("table_source", options=table_opts)
|
52 |
+
return table_source
|
53 |
+
|
54 |
+
|
55 |
+
def test_declaration():
|
56 |
+
|
57 |
+
table = pa.table({'a': [1, 2, 3], 'b': [4, 5, 6]})
|
58 |
+
table_opts = TableSourceNodeOptions(table)
|
59 |
+
filter_opts = FilterNodeOptions(field('a') > 1)
|
60 |
+
|
61 |
+
# using sequence
|
62 |
+
decl = Declaration.from_sequence([
|
63 |
+
Declaration("table_source", options=table_opts),
|
64 |
+
Declaration("filter", options=filter_opts)
|
65 |
+
])
|
66 |
+
result = decl.to_table()
|
67 |
+
assert result.equals(table.slice(1, 2))
|
68 |
+
|
69 |
+
# using explicit inputs
|
70 |
+
table_source = Declaration("table_source", options=table_opts)
|
71 |
+
filtered = Declaration("filter", options=filter_opts, inputs=[table_source])
|
72 |
+
result = filtered.to_table()
|
73 |
+
assert result.equals(table.slice(1, 2))
|
74 |
+
|
75 |
+
|
76 |
+
def test_declaration_repr(table_source):
|
77 |
+
|
78 |
+
assert "TableSourceNode" in str(table_source)
|
79 |
+
assert "TableSourceNode" in repr(table_source)
|
80 |
+
|
81 |
+
|
82 |
+
def test_declaration_to_reader(table_source):
|
83 |
+
with table_source.to_reader() as reader:
|
84 |
+
assert reader.schema == pa.schema([("a", pa.int64()), ("b", pa.int64())])
|
85 |
+
result = reader.read_all()
|
86 |
+
expected = pa.table({'a': [1, 2, 3], 'b': [4, 5, 6]})
|
87 |
+
assert result.equals(expected)
|
88 |
+
|
89 |
+
|
90 |
+
def test_table_source():
|
91 |
+
with pytest.raises(TypeError):
|
92 |
+
TableSourceNodeOptions(pa.record_batch([pa.array([1, 2, 3])], ["a"]))
|
93 |
+
|
94 |
+
table_source = TableSourceNodeOptions(None)
|
95 |
+
decl = Declaration("table_source", table_source)
|
96 |
+
with pytest.raises(
|
97 |
+
ValueError, match="TableSourceNode requires table which is not null"
|
98 |
+
):
|
99 |
+
_ = decl.to_table()
|
100 |
+
|
101 |
+
|
102 |
+
def test_filter(table_source):
|
103 |
+
# referencing unknown field
|
104 |
+
decl = Declaration.from_sequence([
|
105 |
+
table_source,
|
106 |
+
Declaration("filter", options=FilterNodeOptions(field("c") > 1))
|
107 |
+
])
|
108 |
+
with pytest.raises(ValueError, match=r"No match for FieldRef.Name\(c\)"):
|
109 |
+
_ = decl.to_table()
|
110 |
+
|
111 |
+
# requires a pyarrow Expression
|
112 |
+
with pytest.raises(TypeError):
|
113 |
+
FilterNodeOptions(pa.array([True, False, True]))
|
114 |
+
with pytest.raises(TypeError):
|
115 |
+
FilterNodeOptions(None)
|
116 |
+
|
117 |
+
|
118 |
+
def test_project(table_source):
|
119 |
+
# default name from expression
|
120 |
+
decl = Declaration.from_sequence([
|
121 |
+
table_source,
|
122 |
+
Declaration("project", ProjectNodeOptions([pc.multiply(field("a"), 2)]))
|
123 |
+
])
|
124 |
+
result = decl.to_table()
|
125 |
+
assert result.schema.names == ["multiply(a, 2)"]
|
126 |
+
assert result[0].to_pylist() == [2, 4, 6]
|
127 |
+
|
128 |
+
# provide name
|
129 |
+
decl = Declaration.from_sequence([
|
130 |
+
table_source,
|
131 |
+
Declaration("project", ProjectNodeOptions([pc.multiply(field("a"), 2)], ["a2"]))
|
132 |
+
])
|
133 |
+
result = decl.to_table()
|
134 |
+
assert result.schema.names == ["a2"]
|
135 |
+
assert result["a2"].to_pylist() == [2, 4, 6]
|
136 |
+
|
137 |
+
# input validation
|
138 |
+
with pytest.raises(ValueError):
|
139 |
+
ProjectNodeOptions([pc.multiply(field("a"), 2)], ["a2", "b2"])
|
140 |
+
|
141 |
+
# no scalar expression
|
142 |
+
decl = Declaration.from_sequence([
|
143 |
+
table_source,
|
144 |
+
Declaration("project", ProjectNodeOptions([pc.sum(field("a"))]))
|
145 |
+
])
|
146 |
+
with pytest.raises(ValueError, match="cannot Execute non-scalar expression"):
|
147 |
+
_ = decl.to_table()
|
148 |
+
|
149 |
+
|
150 |
+
def test_aggregate_scalar(table_source):
|
151 |
+
decl = Declaration.from_sequence([
|
152 |
+
table_source,
|
153 |
+
Declaration("aggregate", AggregateNodeOptions([("a", "sum", None, "a_sum")]))
|
154 |
+
])
|
155 |
+
result = decl.to_table()
|
156 |
+
assert result.schema.names == ["a_sum"]
|
157 |
+
assert result["a_sum"].to_pylist() == [6]
|
158 |
+
|
159 |
+
# with options class
|
160 |
+
table = pa.table({'a': [1, 2, None]})
|
161 |
+
aggr_opts = AggregateNodeOptions(
|
162 |
+
[("a", "sum", pc.ScalarAggregateOptions(skip_nulls=False), "a_sum")]
|
163 |
+
)
|
164 |
+
decl = Declaration.from_sequence([
|
165 |
+
Declaration("table_source", TableSourceNodeOptions(table)),
|
166 |
+
Declaration("aggregate", aggr_opts),
|
167 |
+
])
|
168 |
+
result = decl.to_table()
|
169 |
+
assert result.schema.names == ["a_sum"]
|
170 |
+
assert result["a_sum"].to_pylist() == [None]
|
171 |
+
|
172 |
+
# test various ways of specifying the target column
|
173 |
+
for target in ["a", field("a"), 0, field(0), ["a"], [field("a")], [0]]:
|
174 |
+
aggr_opts = AggregateNodeOptions([(target, "sum", None, "a_sum")])
|
175 |
+
decl = Declaration.from_sequence(
|
176 |
+
[table_source, Declaration("aggregate", aggr_opts)]
|
177 |
+
)
|
178 |
+
result = decl.to_table()
|
179 |
+
assert result.schema.names == ["a_sum"]
|
180 |
+
assert result["a_sum"].to_pylist() == [6]
|
181 |
+
|
182 |
+
# proper error when specifying the wrong number of target columns
|
183 |
+
aggr_opts = AggregateNodeOptions([(["a", "b"], "sum", None, "a_sum")])
|
184 |
+
decl = Declaration.from_sequence(
|
185 |
+
[table_source, Declaration("aggregate", aggr_opts)]
|
186 |
+
)
|
187 |
+
with pytest.raises(
|
188 |
+
ValueError, match="Function 'sum' accepts 1 arguments but 2 passed"
|
189 |
+
):
|
190 |
+
_ = decl.to_table()
|
191 |
+
|
192 |
+
# proper error when using hash aggregation without keys
|
193 |
+
aggr_opts = AggregateNodeOptions([("a", "hash_sum", None, "a_sum")])
|
194 |
+
decl = Declaration.from_sequence(
|
195 |
+
[table_source, Declaration("aggregate", aggr_opts)]
|
196 |
+
)
|
197 |
+
with pytest.raises(ValueError, match="is a hash aggregate function"):
|
198 |
+
_ = decl.to_table()
|
199 |
+
|
200 |
+
|
201 |
+
def test_aggregate_hash():
|
202 |
+
table = pa.table({'a': [1, 2, None], 'b': ["foo", "bar", "foo"]})
|
203 |
+
table_opts = TableSourceNodeOptions(table)
|
204 |
+
table_source = Declaration("table_source", options=table_opts)
|
205 |
+
|
206 |
+
# default options
|
207 |
+
aggr_opts = AggregateNodeOptions(
|
208 |
+
[("a", "hash_count", None, "count(a)")], keys=["b"])
|
209 |
+
decl = Declaration.from_sequence([
|
210 |
+
table_source, Declaration("aggregate", aggr_opts)
|
211 |
+
])
|
212 |
+
result = decl.to_table()
|
213 |
+
expected = pa.table({"b": ["foo", "bar"], "count(a)": [1, 1]})
|
214 |
+
assert result.equals(expected)
|
215 |
+
|
216 |
+
# specify function options
|
217 |
+
aggr_opts = AggregateNodeOptions(
|
218 |
+
[("a", "hash_count", pc.CountOptions("all"), "count(a)")], keys=["b"]
|
219 |
+
)
|
220 |
+
decl = Declaration.from_sequence([
|
221 |
+
table_source, Declaration("aggregate", aggr_opts)
|
222 |
+
])
|
223 |
+
result = decl.to_table()
|
224 |
+
expected_all = pa.table({"b": ["foo", "bar"], "count(a)": [2, 1]})
|
225 |
+
assert result.equals(expected_all)
|
226 |
+
|
227 |
+
# specify keys as field references
|
228 |
+
aggr_opts = AggregateNodeOptions(
|
229 |
+
[("a", "hash_count", None, "count(a)")], keys=[field("b")]
|
230 |
+
)
|
231 |
+
decl = Declaration.from_sequence([
|
232 |
+
table_source, Declaration("aggregate", aggr_opts)
|
233 |
+
])
|
234 |
+
result = decl.to_table()
|
235 |
+
assert result.equals(expected)
|
236 |
+
|
237 |
+
# wrong type of (aggregation) function
|
238 |
+
# TODO test with kernel that matches number of arguments (arity) -> avoid segfault
|
239 |
+
aggr_opts = AggregateNodeOptions([("a", "sum", None, "a_sum")], keys=["b"])
|
240 |
+
decl = Declaration.from_sequence([
|
241 |
+
table_source, Declaration("aggregate", aggr_opts)
|
242 |
+
])
|
243 |
+
with pytest.raises(ValueError):
|
244 |
+
_ = decl.to_table()
|
245 |
+
|
246 |
+
|
247 |
+
def test_order_by():
|
248 |
+
table = pa.table({'a': [1, 2, 3, 4], 'b': [1, 3, None, 2]})
|
249 |
+
table_source = Declaration("table_source", TableSourceNodeOptions(table))
|
250 |
+
|
251 |
+
ord_opts = OrderByNodeOptions([("b", "ascending")])
|
252 |
+
decl = Declaration.from_sequence([table_source, Declaration("order_by", ord_opts)])
|
253 |
+
result = decl.to_table()
|
254 |
+
expected = pa.table({"a": [1, 4, 2, 3], "b": [1, 2, 3, None]})
|
255 |
+
assert result.equals(expected)
|
256 |
+
|
257 |
+
ord_opts = OrderByNodeOptions([(field("b"), "descending")])
|
258 |
+
decl = Declaration.from_sequence([table_source, Declaration("order_by", ord_opts)])
|
259 |
+
result = decl.to_table()
|
260 |
+
expected = pa.table({"a": [2, 4, 1, 3], "b": [3, 2, 1, None]})
|
261 |
+
assert result.equals(expected)
|
262 |
+
|
263 |
+
ord_opts = OrderByNodeOptions([(1, "descending")], null_placement="at_start")
|
264 |
+
decl = Declaration.from_sequence([table_source, Declaration("order_by", ord_opts)])
|
265 |
+
result = decl.to_table()
|
266 |
+
expected = pa.table({"a": [3, 2, 4, 1], "b": [None, 3, 2, 1]})
|
267 |
+
assert result.equals(expected)
|
268 |
+
|
269 |
+
# empty ordering
|
270 |
+
ord_opts = OrderByNodeOptions([])
|
271 |
+
decl = Declaration.from_sequence([table_source, Declaration("order_by", ord_opts)])
|
272 |
+
with pytest.raises(
|
273 |
+
ValueError, match="`ordering` must be an explicit non-empty ordering"
|
274 |
+
):
|
275 |
+
_ = decl.to_table()
|
276 |
+
|
277 |
+
with pytest.raises(ValueError, match="\"decreasing\" is not a valid sort order"):
|
278 |
+
_ = OrderByNodeOptions([("b", "decreasing")])
|
279 |
+
|
280 |
+
with pytest.raises(ValueError, match="\"start\" is not a valid null placement"):
|
281 |
+
_ = OrderByNodeOptions([("b", "ascending")], null_placement="start")
|
282 |
+
|
283 |
+
|
284 |
+
def test_hash_join():
|
285 |
+
left = pa.table({'key': [1, 2, 3], 'a': [4, 5, 6]})
|
286 |
+
left_source = Declaration("table_source", options=TableSourceNodeOptions(left))
|
287 |
+
right = pa.table({'key': [2, 3, 4], 'b': [4, 5, 6]})
|
288 |
+
right_source = Declaration("table_source", options=TableSourceNodeOptions(right))
|
289 |
+
|
290 |
+
# inner join
|
291 |
+
join_opts = HashJoinNodeOptions("inner", left_keys="key", right_keys="key")
|
292 |
+
joined = Declaration(
|
293 |
+
"hashjoin", options=join_opts, inputs=[left_source, right_source])
|
294 |
+
result = joined.to_table()
|
295 |
+
expected = pa.table(
|
296 |
+
[[2, 3], [5, 6], [2, 3], [4, 5]],
|
297 |
+
names=["key", "a", "key", "b"])
|
298 |
+
assert result.equals(expected)
|
299 |
+
|
300 |
+
for keys in [field("key"), ["key"], [field("key")]]:
|
301 |
+
join_opts = HashJoinNodeOptions("inner", left_keys=keys, right_keys=keys)
|
302 |
+
joined = Declaration(
|
303 |
+
"hashjoin", options=join_opts, inputs=[left_source, right_source])
|
304 |
+
result = joined.to_table()
|
305 |
+
assert result.equals(expected)
|
306 |
+
|
307 |
+
# left join
|
308 |
+
join_opts = HashJoinNodeOptions(
|
309 |
+
"left outer", left_keys="key", right_keys="key")
|
310 |
+
joined = Declaration(
|
311 |
+
"hashjoin", options=join_opts, inputs=[left_source, right_source])
|
312 |
+
result = joined.to_table()
|
313 |
+
expected = pa.table(
|
314 |
+
[[1, 2, 3], [4, 5, 6], [None, 2, 3], [None, 4, 5]],
|
315 |
+
names=["key", "a", "key", "b"]
|
316 |
+
)
|
317 |
+
assert result.sort_by("a").equals(expected)
|
318 |
+
|
319 |
+
# suffixes
|
320 |
+
join_opts = HashJoinNodeOptions(
|
321 |
+
"left outer", left_keys="key", right_keys="key",
|
322 |
+
output_suffix_for_left="_left", output_suffix_for_right="_right")
|
323 |
+
joined = Declaration(
|
324 |
+
"hashjoin", options=join_opts, inputs=[left_source, right_source])
|
325 |
+
result = joined.to_table()
|
326 |
+
expected = pa.table(
|
327 |
+
[[1, 2, 3], [4, 5, 6], [None, 2, 3], [None, 4, 5]],
|
328 |
+
names=["key_left", "a", "key_right", "b"]
|
329 |
+
)
|
330 |
+
assert result.sort_by("a").equals(expected)
|
331 |
+
|
332 |
+
# manually specifying output columns
|
333 |
+
join_opts = HashJoinNodeOptions(
|
334 |
+
"left outer", left_keys="key", right_keys="key",
|
335 |
+
left_output=["key", "a"], right_output=[field("b")])
|
336 |
+
joined = Declaration(
|
337 |
+
"hashjoin", options=join_opts, inputs=[left_source, right_source])
|
338 |
+
result = joined.to_table()
|
339 |
+
expected = pa.table(
|
340 |
+
[[1, 2, 3], [4, 5, 6], [None, 4, 5]],
|
341 |
+
names=["key", "a", "b"]
|
342 |
+
)
|
343 |
+
assert result.sort_by("a").equals(expected)
|
344 |
+
|
345 |
+
|
346 |
+
def test_asof_join():
|
347 |
+
left = pa.table({'key': [1, 2, 3], 'ts': [1, 1, 1], 'a': [4, 5, 6]})
|
348 |
+
left_source = Declaration("table_source", options=TableSourceNodeOptions(left))
|
349 |
+
right = pa.table({'key': [2, 3, 4], 'ts': [2, 5, 2], 'b': [4, 5, 6]})
|
350 |
+
right_source = Declaration("table_source", options=TableSourceNodeOptions(right))
|
351 |
+
|
352 |
+
# asof join
|
353 |
+
join_opts = AsofJoinNodeOptions(
|
354 |
+
left_on="ts", left_by=["key"],
|
355 |
+
right_on="ts", right_by=["key"],
|
356 |
+
tolerance=1,
|
357 |
+
)
|
358 |
+
joined = Declaration(
|
359 |
+
"asofjoin", options=join_opts, inputs=[left_source, right_source]
|
360 |
+
)
|
361 |
+
result = joined.to_table()
|
362 |
+
expected = pa.table(
|
363 |
+
[[1, 2, 3], [1, 1, 1], [4, 5, 6], [None, 4, None]],
|
364 |
+
names=["key", "ts", "a", "b"])
|
365 |
+
assert result == expected
|
366 |
+
|
367 |
+
for by in [field("key"), ["key"], [field("key")]]:
|
368 |
+
for on in [field("ts"), "ts"]:
|
369 |
+
join_opts = AsofJoinNodeOptions(
|
370 |
+
left_on=on, left_by=by,
|
371 |
+
right_on=on, right_by=by,
|
372 |
+
tolerance=1,
|
373 |
+
)
|
374 |
+
joined = Declaration(
|
375 |
+
"asofjoin", options=join_opts, inputs=[left_source, right_source])
|
376 |
+
result = joined.to_table()
|
377 |
+
assert result == expected
|
378 |
+
|
379 |
+
|
380 |
+
@pytest.mark.dataset
|
381 |
+
def test_scan(tempdir):
|
382 |
+
table = pa.table({'a': [1, 2, 3], 'b': [4, 5, 6]})
|
383 |
+
ds.write_dataset(table, tempdir / "dataset", format="parquet")
|
384 |
+
dataset = ds.dataset(tempdir / "dataset", format="parquet")
|
385 |
+
decl = Declaration("scan", ScanNodeOptions(dataset))
|
386 |
+
result = decl.to_table()
|
387 |
+
assert result.schema.names == [
|
388 |
+
"a", "b", "__fragment_index", "__batch_index",
|
389 |
+
"__last_in_fragment", "__filename"
|
390 |
+
]
|
391 |
+
assert result.select(["a", "b"]).equals(table)
|
392 |
+
|
393 |
+
# using a filter only does pushdown (depending on file format), not actual filter
|
394 |
+
|
395 |
+
scan_opts = ScanNodeOptions(dataset, filter=field('a') > 1)
|
396 |
+
decl = Declaration("scan", scan_opts)
|
397 |
+
# fragment not filtered based on min/max statistics
|
398 |
+
assert decl.to_table().num_rows == 3
|
399 |
+
|
400 |
+
scan_opts = ScanNodeOptions(dataset, filter=field('a') > 4)
|
401 |
+
decl = Declaration("scan", scan_opts)
|
402 |
+
# full fragment filtered based on min/max statistics
|
403 |
+
assert decl.to_table().num_rows == 0
|
404 |
+
|
405 |
+
# projection scan option
|
406 |
+
|
407 |
+
scan_opts = ScanNodeOptions(dataset, columns={"a2": pc.multiply(field("a"), 2)})
|
408 |
+
decl = Declaration("scan", scan_opts)
|
409 |
+
result = decl.to_table()
|
410 |
+
# "a" is included in the result (needed later on for the actual projection)
|
411 |
+
assert result["a"].to_pylist() == [1, 2, 3]
|
412 |
+
# "b" is still included, but without data as it will be removed by the projection
|
413 |
+
assert pc.all(result["b"].is_null()).as_py()
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_adhoc_memory_leak.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import pytest
|
19 |
+
|
20 |
+
import numpy as np
|
21 |
+
import pyarrow as pa
|
22 |
+
|
23 |
+
import pyarrow.tests.util as test_util
|
24 |
+
|
25 |
+
try:
|
26 |
+
import pandas as pd
|
27 |
+
except ImportError:
|
28 |
+
pass
|
29 |
+
|
30 |
+
|
31 |
+
@pytest.mark.memory_leak
|
32 |
+
@pytest.mark.pandas
|
33 |
+
def test_deserialize_pandas_arrow_7956():
|
34 |
+
df = pd.DataFrame({'a': np.arange(10000),
|
35 |
+
'b': [test_util.rands(5) for _ in range(10000)]})
|
36 |
+
|
37 |
+
def action():
|
38 |
+
df_bytes = pa.ipc.serialize_pandas(df).to_pybytes()
|
39 |
+
buf = pa.py_buffer(df_bytes)
|
40 |
+
pa.ipc.deserialize_pandas(buf)
|
41 |
+
|
42 |
+
# Abort at 128MB threshold
|
43 |
+
test_util.memory_leak_check(action, threshold=1 << 27, iterations=100)
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_builder.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import weakref
|
19 |
+
|
20 |
+
import numpy as np
|
21 |
+
|
22 |
+
import pyarrow as pa
|
23 |
+
from pyarrow.lib import StringBuilder, StringViewBuilder
|
24 |
+
|
25 |
+
|
26 |
+
def test_weakref():
|
27 |
+
sbuilder = StringBuilder()
|
28 |
+
wr = weakref.ref(sbuilder)
|
29 |
+
assert wr() is not None
|
30 |
+
del sbuilder
|
31 |
+
assert wr() is None
|
32 |
+
|
33 |
+
|
34 |
+
def test_string_builder_append():
|
35 |
+
sbuilder = StringBuilder()
|
36 |
+
sbuilder.append(b"a byte string")
|
37 |
+
sbuilder.append("a string")
|
38 |
+
sbuilder.append(np.nan)
|
39 |
+
sbuilder.append(None)
|
40 |
+
assert len(sbuilder) == 4
|
41 |
+
assert sbuilder.null_count == 2
|
42 |
+
arr = sbuilder.finish()
|
43 |
+
assert len(sbuilder) == 0
|
44 |
+
assert isinstance(arr, pa.Array)
|
45 |
+
assert arr.null_count == 2
|
46 |
+
assert arr.type == 'str'
|
47 |
+
expected = ["a byte string", "a string", None, None]
|
48 |
+
assert arr.to_pylist() == expected
|
49 |
+
|
50 |
+
|
51 |
+
def test_string_builder_append_values():
|
52 |
+
sbuilder = StringBuilder()
|
53 |
+
sbuilder.append_values([np.nan, None, "text", None, "other text"])
|
54 |
+
assert sbuilder.null_count == 3
|
55 |
+
arr = sbuilder.finish()
|
56 |
+
assert arr.null_count == 3
|
57 |
+
expected = [None, None, "text", None, "other text"]
|
58 |
+
assert arr.to_pylist() == expected
|
59 |
+
|
60 |
+
|
61 |
+
def test_string_builder_append_after_finish():
|
62 |
+
sbuilder = StringBuilder()
|
63 |
+
sbuilder.append_values([np.nan, None, "text", None, "other text"])
|
64 |
+
arr = sbuilder.finish()
|
65 |
+
sbuilder.append("No effect")
|
66 |
+
expected = [None, None, "text", None, "other text"]
|
67 |
+
assert arr.to_pylist() == expected
|
68 |
+
|
69 |
+
|
70 |
+
def test_string_view_builder():
|
71 |
+
builder = StringViewBuilder()
|
72 |
+
builder.append(b"a byte string")
|
73 |
+
builder.append("a string")
|
74 |
+
builder.append("a longer not-inlined string")
|
75 |
+
builder.append(np.nan)
|
76 |
+
builder.append_values([None, "text"])
|
77 |
+
assert len(builder) == 6
|
78 |
+
assert builder.null_count == 2
|
79 |
+
arr = builder.finish()
|
80 |
+
assert isinstance(arr, pa.Array)
|
81 |
+
assert arr.null_count == 2
|
82 |
+
assert arr.type == 'string_view'
|
83 |
+
expected = [
|
84 |
+
"a byte string", "a string", "a longer not-inlined string", None, None, "text"
|
85 |
+
]
|
86 |
+
assert arr.to_pylist() == expected
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_cffi.py
ADDED
@@ -0,0 +1,707 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
3 |
+
# or more contributor license agreements. See the NOTICE file
|
4 |
+
# distributed with this work for additional information
|
5 |
+
# regarding copyright ownership. The ASF licenses this file
|
6 |
+
# to you under the Apache License, Version 2.0 (the
|
7 |
+
# "License"); you may not use this file except in compliance
|
8 |
+
# with the License. You may obtain a copy of the License at
|
9 |
+
#
|
10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
11 |
+
#
|
12 |
+
# Unless required by applicable law or agreed to in writing,
|
13 |
+
# software distributed under the License is distributed on an
|
14 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
15 |
+
# KIND, either express or implied. See the License for the
|
16 |
+
# specific language governing permissions and limitations
|
17 |
+
# under the License.
|
18 |
+
|
19 |
+
import contextlib
|
20 |
+
import ctypes
|
21 |
+
import gc
|
22 |
+
|
23 |
+
import pyarrow as pa
|
24 |
+
try:
|
25 |
+
from pyarrow.cffi import ffi
|
26 |
+
except ImportError:
|
27 |
+
ffi = None
|
28 |
+
|
29 |
+
import pytest
|
30 |
+
|
31 |
+
try:
|
32 |
+
import pandas as pd
|
33 |
+
import pandas.testing as tm
|
34 |
+
except ImportError:
|
35 |
+
pd = tm = None
|
36 |
+
|
37 |
+
|
38 |
+
needs_cffi = pytest.mark.skipif(ffi is None,
|
39 |
+
reason="test needs cffi package installed")
|
40 |
+
|
41 |
+
assert_schema_released = pytest.raises(
|
42 |
+
ValueError, match="Cannot import released ArrowSchema")
|
43 |
+
|
44 |
+
assert_array_released = pytest.raises(
|
45 |
+
ValueError, match="Cannot import released ArrowArray")
|
46 |
+
|
47 |
+
assert_stream_released = pytest.raises(
|
48 |
+
ValueError, match="Cannot import released ArrowArrayStream")
|
49 |
+
|
50 |
+
|
51 |
+
def PyCapsule_IsValid(capsule, name):
|
52 |
+
return ctypes.pythonapi.PyCapsule_IsValid(ctypes.py_object(capsule), name) == 1
|
53 |
+
|
54 |
+
|
55 |
+
@contextlib.contextmanager
|
56 |
+
def registered_extension_type(ext_type):
|
57 |
+
pa.register_extension_type(ext_type)
|
58 |
+
try:
|
59 |
+
yield
|
60 |
+
finally:
|
61 |
+
pa.unregister_extension_type(ext_type.extension_name)
|
62 |
+
|
63 |
+
|
64 |
+
class ParamExtType(pa.ExtensionType):
|
65 |
+
|
66 |
+
def __init__(self, width):
|
67 |
+
self._width = width
|
68 |
+
super().__init__(pa.binary(width),
|
69 |
+
"pyarrow.tests.test_cffi.ParamExtType")
|
70 |
+
|
71 |
+
@property
|
72 |
+
def width(self):
|
73 |
+
return self._width
|
74 |
+
|
75 |
+
def __arrow_ext_serialize__(self):
|
76 |
+
return str(self.width).encode()
|
77 |
+
|
78 |
+
@classmethod
|
79 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized):
|
80 |
+
width = int(serialized.decode())
|
81 |
+
return cls(width)
|
82 |
+
|
83 |
+
|
84 |
+
def make_schema():
|
85 |
+
return pa.schema([('ints', pa.list_(pa.int32()))],
|
86 |
+
metadata={b'key1': b'value1'})
|
87 |
+
|
88 |
+
|
89 |
+
def make_extension_schema():
|
90 |
+
return pa.schema([('ext', ParamExtType(3))],
|
91 |
+
metadata={b'key1': b'value1'})
|
92 |
+
|
93 |
+
|
94 |
+
def make_extension_storage_schema():
|
95 |
+
# Should be kept in sync with make_extension_schema
|
96 |
+
return pa.schema([('ext', ParamExtType(3).storage_type)],
|
97 |
+
metadata={b'key1': b'value1'})
|
98 |
+
|
99 |
+
|
100 |
+
def make_batch():
|
101 |
+
return pa.record_batch([[[1], [2, 42]]], make_schema())
|
102 |
+
|
103 |
+
|
104 |
+
def make_extension_batch():
|
105 |
+
schema = make_extension_schema()
|
106 |
+
ext_col = schema[0].type.wrap_array(pa.array([b"foo", b"bar"],
|
107 |
+
type=pa.binary(3)))
|
108 |
+
return pa.record_batch([ext_col], schema)
|
109 |
+
|
110 |
+
|
111 |
+
def make_batches():
|
112 |
+
schema = make_schema()
|
113 |
+
return [
|
114 |
+
pa.record_batch([[[1], [2, 42]]], schema),
|
115 |
+
pa.record_batch([[None, [], [5, 6]]], schema),
|
116 |
+
]
|
117 |
+
|
118 |
+
|
119 |
+
def make_serialized(schema, batches):
|
120 |
+
with pa.BufferOutputStream() as sink:
|
121 |
+
with pa.ipc.new_stream(sink, schema) as out:
|
122 |
+
for batch in batches:
|
123 |
+
out.write(batch)
|
124 |
+
return sink.getvalue()
|
125 |
+
|
126 |
+
|
127 |
+
@needs_cffi
|
128 |
+
def test_export_import_type():
|
129 |
+
c_schema = ffi.new("struct ArrowSchema*")
|
130 |
+
ptr_schema = int(ffi.cast("uintptr_t", c_schema))
|
131 |
+
|
132 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
133 |
+
old_allocated = pa.total_allocated_bytes()
|
134 |
+
|
135 |
+
typ = pa.list_(pa.int32())
|
136 |
+
typ._export_to_c(ptr_schema)
|
137 |
+
assert pa.total_allocated_bytes() > old_allocated
|
138 |
+
# Delete and recreate C++ object from exported pointer
|
139 |
+
del typ
|
140 |
+
assert pa.total_allocated_bytes() > old_allocated
|
141 |
+
typ_new = pa.DataType._import_from_c(ptr_schema)
|
142 |
+
assert typ_new == pa.list_(pa.int32())
|
143 |
+
assert pa.total_allocated_bytes() == old_allocated
|
144 |
+
# Now released
|
145 |
+
with assert_schema_released:
|
146 |
+
pa.DataType._import_from_c(ptr_schema)
|
147 |
+
|
148 |
+
# Invalid format string
|
149 |
+
pa.int32()._export_to_c(ptr_schema)
|
150 |
+
bad_format = ffi.new("char[]", b"zzz")
|
151 |
+
c_schema.format = bad_format
|
152 |
+
with pytest.raises(ValueError,
|
153 |
+
match="Invalid or unsupported format string"):
|
154 |
+
pa.DataType._import_from_c(ptr_schema)
|
155 |
+
# Now released
|
156 |
+
with assert_schema_released:
|
157 |
+
pa.DataType._import_from_c(ptr_schema)
|
158 |
+
|
159 |
+
|
160 |
+
@needs_cffi
|
161 |
+
def test_export_import_field():
|
162 |
+
c_schema = ffi.new("struct ArrowSchema*")
|
163 |
+
ptr_schema = int(ffi.cast("uintptr_t", c_schema))
|
164 |
+
|
165 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
166 |
+
old_allocated = pa.total_allocated_bytes()
|
167 |
+
|
168 |
+
field = pa.field("test", pa.list_(pa.int32()), nullable=True)
|
169 |
+
field._export_to_c(ptr_schema)
|
170 |
+
assert pa.total_allocated_bytes() > old_allocated
|
171 |
+
# Delete and recreate C++ object from exported pointer
|
172 |
+
del field
|
173 |
+
assert pa.total_allocated_bytes() > old_allocated
|
174 |
+
|
175 |
+
field_new = pa.Field._import_from_c(ptr_schema)
|
176 |
+
assert field_new == pa.field("test", pa.list_(pa.int32()), nullable=True)
|
177 |
+
assert pa.total_allocated_bytes() == old_allocated
|
178 |
+
|
179 |
+
# Now released
|
180 |
+
with assert_schema_released:
|
181 |
+
pa.Field._import_from_c(ptr_schema)
|
182 |
+
|
183 |
+
|
184 |
+
def check_export_import_array(array_type, exporter, importer):
|
185 |
+
c_schema = ffi.new("struct ArrowSchema*")
|
186 |
+
ptr_schema = int(ffi.cast("uintptr_t", c_schema))
|
187 |
+
c_array = ffi.new(f"struct {array_type}*")
|
188 |
+
ptr_array = int(ffi.cast("uintptr_t", c_array))
|
189 |
+
|
190 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
191 |
+
old_allocated = pa.total_allocated_bytes()
|
192 |
+
|
193 |
+
# Type is known up front
|
194 |
+
typ = pa.list_(pa.int32())
|
195 |
+
arr = pa.array([[1], [2, 42]], type=typ)
|
196 |
+
py_value = arr.to_pylist()
|
197 |
+
exporter(arr, ptr_array)
|
198 |
+
assert pa.total_allocated_bytes() > old_allocated
|
199 |
+
# Delete recreate C++ object from exported pointer
|
200 |
+
del arr
|
201 |
+
arr_new = importer(ptr_array, typ)
|
202 |
+
assert arr_new.to_pylist() == py_value
|
203 |
+
assert arr_new.type == pa.list_(pa.int32())
|
204 |
+
assert pa.total_allocated_bytes() > old_allocated
|
205 |
+
del arr_new, typ
|
206 |
+
assert pa.total_allocated_bytes() == old_allocated
|
207 |
+
# Now released
|
208 |
+
with assert_array_released:
|
209 |
+
importer(ptr_array, pa.list_(pa.int32()))
|
210 |
+
|
211 |
+
# Type is exported and imported at the same time
|
212 |
+
arr = pa.array([[1], [2, 42]], type=pa.list_(pa.int32()))
|
213 |
+
py_value = arr.to_pylist()
|
214 |
+
exporter(arr, ptr_array, ptr_schema)
|
215 |
+
# Delete and recreate C++ objects from exported pointers
|
216 |
+
del arr
|
217 |
+
arr_new = importer(ptr_array, ptr_schema)
|
218 |
+
assert arr_new.to_pylist() == py_value
|
219 |
+
assert arr_new.type == pa.list_(pa.int32())
|
220 |
+
assert pa.total_allocated_bytes() > old_allocated
|
221 |
+
del arr_new
|
222 |
+
assert pa.total_allocated_bytes() == old_allocated
|
223 |
+
# Now released
|
224 |
+
with assert_schema_released:
|
225 |
+
importer(ptr_array, ptr_schema)
|
226 |
+
|
227 |
+
|
228 |
+
@needs_cffi
|
229 |
+
def test_export_import_array():
|
230 |
+
check_export_import_array(
|
231 |
+
"ArrowArray",
|
232 |
+
pa.Array._export_to_c,
|
233 |
+
pa.Array._import_from_c,
|
234 |
+
)
|
235 |
+
|
236 |
+
|
237 |
+
@needs_cffi
|
238 |
+
def test_export_import_device_array():
|
239 |
+
check_export_import_array(
|
240 |
+
"ArrowDeviceArray",
|
241 |
+
pa.Array._export_to_c_device,
|
242 |
+
pa.Array._import_from_c_device,
|
243 |
+
)
|
244 |
+
|
245 |
+
# verify exported struct
|
246 |
+
c_array = ffi.new("struct ArrowDeviceArray*")
|
247 |
+
ptr_array = int(ffi.cast("uintptr_t", c_array))
|
248 |
+
arr = pa.array([[1], [2, 42]], type=pa.list_(pa.int32()))
|
249 |
+
arr._export_to_c_device(ptr_array)
|
250 |
+
|
251 |
+
assert c_array.device_type == 1 # ARROW_DEVICE_CPU 1
|
252 |
+
assert c_array.device_id == -1
|
253 |
+
assert c_array.array.length == 2
|
254 |
+
|
255 |
+
|
256 |
+
def check_export_import_schema(schema_factory, expected_schema_factory=None):
|
257 |
+
if expected_schema_factory is None:
|
258 |
+
expected_schema_factory = schema_factory
|
259 |
+
|
260 |
+
c_schema = ffi.new("struct ArrowSchema*")
|
261 |
+
ptr_schema = int(ffi.cast("uintptr_t", c_schema))
|
262 |
+
|
263 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
264 |
+
old_allocated = pa.total_allocated_bytes()
|
265 |
+
|
266 |
+
schema_factory()._export_to_c(ptr_schema)
|
267 |
+
assert pa.total_allocated_bytes() > old_allocated
|
268 |
+
# Delete and recreate C++ object from exported pointer
|
269 |
+
schema_new = pa.Schema._import_from_c(ptr_schema)
|
270 |
+
assert schema_new == expected_schema_factory()
|
271 |
+
assert pa.total_allocated_bytes() == old_allocated
|
272 |
+
del schema_new
|
273 |
+
assert pa.total_allocated_bytes() == old_allocated
|
274 |
+
# Now released
|
275 |
+
with assert_schema_released:
|
276 |
+
pa.Schema._import_from_c(ptr_schema)
|
277 |
+
|
278 |
+
# Not a struct type
|
279 |
+
pa.int32()._export_to_c(ptr_schema)
|
280 |
+
with pytest.raises(ValueError,
|
281 |
+
match="ArrowSchema describes non-struct type"):
|
282 |
+
pa.Schema._import_from_c(ptr_schema)
|
283 |
+
# Now released
|
284 |
+
with assert_schema_released:
|
285 |
+
pa.Schema._import_from_c(ptr_schema)
|
286 |
+
|
287 |
+
|
288 |
+
@needs_cffi
|
289 |
+
def test_export_import_schema():
|
290 |
+
check_export_import_schema(make_schema)
|
291 |
+
|
292 |
+
|
293 |
+
@needs_cffi
|
294 |
+
def test_export_import_schema_with_extension():
|
295 |
+
# Extension type is unregistered => the storage type is imported
|
296 |
+
check_export_import_schema(make_extension_schema,
|
297 |
+
make_extension_storage_schema)
|
298 |
+
|
299 |
+
# Extension type is registered => the extension type is imported
|
300 |
+
with registered_extension_type(ParamExtType(1)):
|
301 |
+
check_export_import_schema(make_extension_schema)
|
302 |
+
|
303 |
+
|
304 |
+
@needs_cffi
|
305 |
+
def test_export_import_schema_float_pointer():
|
306 |
+
# Previous versions of the R Arrow library used to pass pointer
|
307 |
+
# values as a double.
|
308 |
+
c_schema = ffi.new("struct ArrowSchema*")
|
309 |
+
ptr_schema = int(ffi.cast("uintptr_t", c_schema))
|
310 |
+
|
311 |
+
match = "Passing a pointer value as a float is unsafe"
|
312 |
+
with pytest.warns(UserWarning, match=match):
|
313 |
+
make_schema()._export_to_c(float(ptr_schema))
|
314 |
+
with pytest.warns(UserWarning, match=match):
|
315 |
+
schema_new = pa.Schema._import_from_c(float(ptr_schema))
|
316 |
+
assert schema_new == make_schema()
|
317 |
+
|
318 |
+
|
319 |
+
def check_export_import_batch(array_type, exporter, importer, batch_factory):
|
320 |
+
c_schema = ffi.new("struct ArrowSchema*")
|
321 |
+
ptr_schema = int(ffi.cast("uintptr_t", c_schema))
|
322 |
+
c_array = ffi.new(f"struct {array_type}*")
|
323 |
+
ptr_array = int(ffi.cast("uintptr_t", c_array))
|
324 |
+
|
325 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
326 |
+
old_allocated = pa.total_allocated_bytes()
|
327 |
+
|
328 |
+
# Schema is known up front
|
329 |
+
batch = batch_factory()
|
330 |
+
schema = batch.schema
|
331 |
+
py_value = batch.to_pydict()
|
332 |
+
exporter(batch, ptr_array)
|
333 |
+
assert pa.total_allocated_bytes() > old_allocated
|
334 |
+
# Delete and recreate C++ object from exported pointer
|
335 |
+
del batch
|
336 |
+
batch_new = importer(ptr_array, schema)
|
337 |
+
assert batch_new.to_pydict() == py_value
|
338 |
+
assert batch_new.schema == schema
|
339 |
+
assert pa.total_allocated_bytes() > old_allocated
|
340 |
+
del batch_new, schema
|
341 |
+
assert pa.total_allocated_bytes() == old_allocated
|
342 |
+
# Now released
|
343 |
+
with assert_array_released:
|
344 |
+
importer(ptr_array, make_schema())
|
345 |
+
|
346 |
+
# Type is exported and imported at the same time
|
347 |
+
batch = batch_factory()
|
348 |
+
py_value = batch.to_pydict()
|
349 |
+
batch._export_to_c(ptr_array, ptr_schema)
|
350 |
+
# Delete and recreate C++ objects from exported pointers
|
351 |
+
del batch
|
352 |
+
batch_new = importer(ptr_array, ptr_schema)
|
353 |
+
assert batch_new.to_pydict() == py_value
|
354 |
+
assert batch_new.schema == batch_factory().schema
|
355 |
+
assert pa.total_allocated_bytes() > old_allocated
|
356 |
+
del batch_new
|
357 |
+
assert pa.total_allocated_bytes() == old_allocated
|
358 |
+
# Now released
|
359 |
+
with assert_schema_released:
|
360 |
+
importer(ptr_array, ptr_schema)
|
361 |
+
|
362 |
+
# Not a struct type
|
363 |
+
pa.int32()._export_to_c(ptr_schema)
|
364 |
+
batch_factory()._export_to_c(ptr_array)
|
365 |
+
with pytest.raises(ValueError,
|
366 |
+
match="ArrowSchema describes non-struct type"):
|
367 |
+
importer(ptr_array, ptr_schema)
|
368 |
+
# Now released
|
369 |
+
with assert_schema_released:
|
370 |
+
importer(ptr_array, ptr_schema)
|
371 |
+
|
372 |
+
|
373 |
+
@needs_cffi
|
374 |
+
def test_export_import_batch():
|
375 |
+
check_export_import_batch(
|
376 |
+
"ArrowArray",
|
377 |
+
pa.RecordBatch._export_to_c,
|
378 |
+
pa.RecordBatch._import_from_c,
|
379 |
+
make_batch,
|
380 |
+
)
|
381 |
+
|
382 |
+
|
383 |
+
@needs_cffi
|
384 |
+
def test_export_import_batch_with_extension():
|
385 |
+
with registered_extension_type(ParamExtType(1)):
|
386 |
+
check_export_import_batch(
|
387 |
+
"ArrowArray",
|
388 |
+
pa.RecordBatch._export_to_c,
|
389 |
+
pa.RecordBatch._import_from_c,
|
390 |
+
make_extension_batch,
|
391 |
+
)
|
392 |
+
|
393 |
+
|
394 |
+
@needs_cffi
|
395 |
+
def test_export_import_device_batch():
|
396 |
+
check_export_import_batch(
|
397 |
+
"ArrowDeviceArray",
|
398 |
+
pa.RecordBatch._export_to_c_device,
|
399 |
+
pa.RecordBatch._import_from_c_device,
|
400 |
+
make_batch,
|
401 |
+
)
|
402 |
+
|
403 |
+
# verify exported struct
|
404 |
+
c_array = ffi.new("struct ArrowDeviceArray*")
|
405 |
+
ptr_array = int(ffi.cast("uintptr_t", c_array))
|
406 |
+
batch = make_batch()
|
407 |
+
batch._export_to_c_device(ptr_array)
|
408 |
+
assert c_array.device_type == 1 # ARROW_DEVICE_CPU 1
|
409 |
+
assert c_array.device_id == -1
|
410 |
+
assert c_array.array.length == 2
|
411 |
+
|
412 |
+
|
413 |
+
def _export_import_batch_reader(ptr_stream, reader_factory):
|
414 |
+
# Prepare input
|
415 |
+
batches = make_batches()
|
416 |
+
schema = batches[0].schema
|
417 |
+
|
418 |
+
reader = reader_factory(schema, batches)
|
419 |
+
reader._export_to_c(ptr_stream)
|
420 |
+
# Delete and recreate C++ object from exported pointer
|
421 |
+
del reader, batches
|
422 |
+
|
423 |
+
reader_new = pa.RecordBatchReader._import_from_c(ptr_stream)
|
424 |
+
assert reader_new.schema == schema
|
425 |
+
got_batches = list(reader_new)
|
426 |
+
del reader_new
|
427 |
+
assert got_batches == make_batches()
|
428 |
+
|
429 |
+
# Test read_pandas()
|
430 |
+
if pd is not None:
|
431 |
+
batches = make_batches()
|
432 |
+
schema = batches[0].schema
|
433 |
+
expected_df = pa.Table.from_batches(batches).to_pandas()
|
434 |
+
|
435 |
+
reader = reader_factory(schema, batches)
|
436 |
+
reader._export_to_c(ptr_stream)
|
437 |
+
del reader, batches
|
438 |
+
|
439 |
+
reader_new = pa.RecordBatchReader._import_from_c(ptr_stream)
|
440 |
+
got_df = reader_new.read_pandas()
|
441 |
+
del reader_new
|
442 |
+
tm.assert_frame_equal(expected_df, got_df)
|
443 |
+
|
444 |
+
|
445 |
+
def make_ipc_stream_reader(schema, batches):
|
446 |
+
return pa.ipc.open_stream(make_serialized(schema, batches))
|
447 |
+
|
448 |
+
|
449 |
+
def make_py_record_batch_reader(schema, batches):
|
450 |
+
return pa.RecordBatchReader.from_batches(schema, batches)
|
451 |
+
|
452 |
+
|
453 |
+
@needs_cffi
|
454 |
+
@pytest.mark.parametrize('reader_factory',
|
455 |
+
[make_ipc_stream_reader,
|
456 |
+
make_py_record_batch_reader])
|
457 |
+
def test_export_import_batch_reader(reader_factory):
|
458 |
+
c_stream = ffi.new("struct ArrowArrayStream*")
|
459 |
+
ptr_stream = int(ffi.cast("uintptr_t", c_stream))
|
460 |
+
|
461 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
462 |
+
old_allocated = pa.total_allocated_bytes()
|
463 |
+
|
464 |
+
_export_import_batch_reader(ptr_stream, reader_factory)
|
465 |
+
|
466 |
+
assert pa.total_allocated_bytes() == old_allocated
|
467 |
+
|
468 |
+
# Now released
|
469 |
+
with assert_stream_released:
|
470 |
+
pa.RecordBatchReader._import_from_c(ptr_stream)
|
471 |
+
|
472 |
+
|
473 |
+
@needs_cffi
|
474 |
+
def test_export_import_exception_reader():
|
475 |
+
# See: https://github.com/apache/arrow/issues/37164
|
476 |
+
c_stream = ffi.new("struct ArrowArrayStream*")
|
477 |
+
ptr_stream = int(ffi.cast("uintptr_t", c_stream))
|
478 |
+
|
479 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
480 |
+
old_allocated = pa.total_allocated_bytes()
|
481 |
+
|
482 |
+
def gen():
|
483 |
+
if True:
|
484 |
+
try:
|
485 |
+
raise ValueError('foo')
|
486 |
+
except ValueError as e:
|
487 |
+
raise NotImplementedError('bar') from e
|
488 |
+
else:
|
489 |
+
yield from make_batches()
|
490 |
+
|
491 |
+
original = pa.RecordBatchReader.from_batches(make_schema(), gen())
|
492 |
+
original._export_to_c(ptr_stream)
|
493 |
+
|
494 |
+
reader = pa.RecordBatchReader._import_from_c(ptr_stream)
|
495 |
+
with pytest.raises(OSError) as exc_info:
|
496 |
+
reader.read_next_batch()
|
497 |
+
|
498 |
+
# inner *and* outer exception should be present
|
499 |
+
assert 'ValueError: foo' in str(exc_info.value)
|
500 |
+
assert 'NotImplementedError: bar' in str(exc_info.value)
|
501 |
+
# Stacktrace containing line of the raise statement
|
502 |
+
assert 'raise ValueError(\'foo\')' in str(exc_info.value)
|
503 |
+
|
504 |
+
assert pa.total_allocated_bytes() == old_allocated
|
505 |
+
|
506 |
+
|
507 |
+
@needs_cffi
|
508 |
+
def test_imported_batch_reader_error():
|
509 |
+
c_stream = ffi.new("struct ArrowArrayStream*")
|
510 |
+
ptr_stream = int(ffi.cast("uintptr_t", c_stream))
|
511 |
+
|
512 |
+
schema = pa.schema([('foo', pa.int32())])
|
513 |
+
batches = [pa.record_batch([[1, 2, 3]], schema=schema),
|
514 |
+
pa.record_batch([[4, 5, 6]], schema=schema)]
|
515 |
+
buf = make_serialized(schema, batches)
|
516 |
+
|
517 |
+
# Open a corrupt/incomplete stream and export it
|
518 |
+
reader = pa.ipc.open_stream(buf[:-16])
|
519 |
+
reader._export_to_c(ptr_stream)
|
520 |
+
del reader
|
521 |
+
|
522 |
+
reader_new = pa.RecordBatchReader._import_from_c(ptr_stream)
|
523 |
+
batch = reader_new.read_next_batch()
|
524 |
+
assert batch == batches[0]
|
525 |
+
with pytest.raises(OSError,
|
526 |
+
match="Expected to be able to read 16 bytes "
|
527 |
+
"for message body, got 8"):
|
528 |
+
reader_new.read_next_batch()
|
529 |
+
|
530 |
+
# Again, but call read_all()
|
531 |
+
reader = pa.ipc.open_stream(buf[:-16])
|
532 |
+
reader._export_to_c(ptr_stream)
|
533 |
+
del reader
|
534 |
+
|
535 |
+
reader_new = pa.RecordBatchReader._import_from_c(ptr_stream)
|
536 |
+
with pytest.raises(OSError,
|
537 |
+
match="Expected to be able to read 16 bytes "
|
538 |
+
"for message body, got 8"):
|
539 |
+
reader_new.read_all()
|
540 |
+
|
541 |
+
|
542 |
+
@pytest.mark.parametrize('obj', [pa.int32(), pa.field('foo', pa.int32()),
|
543 |
+
pa.schema({'foo': pa.int32()})],
|
544 |
+
ids=['type', 'field', 'schema'])
|
545 |
+
def test_roundtrip_schema_capsule(obj):
|
546 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
547 |
+
old_allocated = pa.total_allocated_bytes()
|
548 |
+
|
549 |
+
capsule = obj.__arrow_c_schema__()
|
550 |
+
assert PyCapsule_IsValid(capsule, b"arrow_schema") == 1
|
551 |
+
assert pa.total_allocated_bytes() > old_allocated
|
552 |
+
obj_out = type(obj)._import_from_c_capsule(capsule)
|
553 |
+
assert obj_out == obj
|
554 |
+
|
555 |
+
assert pa.total_allocated_bytes() == old_allocated
|
556 |
+
|
557 |
+
capsule = obj.__arrow_c_schema__()
|
558 |
+
|
559 |
+
assert pa.total_allocated_bytes() > old_allocated
|
560 |
+
del capsule
|
561 |
+
assert pa.total_allocated_bytes() == old_allocated
|
562 |
+
|
563 |
+
|
564 |
+
@pytest.mark.parametrize('arr,schema_accessor,bad_type,good_type', [
|
565 |
+
(pa.array(['a', 'b', 'c']), lambda x: x.type, pa.int32(), pa.string()),
|
566 |
+
(
|
567 |
+
pa.record_batch([pa.array(['a', 'b', 'c'])], names=['x']),
|
568 |
+
lambda x: x.schema,
|
569 |
+
pa.schema({'x': pa.int32()}),
|
570 |
+
pa.schema({'x': pa.string()})
|
571 |
+
),
|
572 |
+
], ids=['array', 'record_batch'])
|
573 |
+
def test_roundtrip_array_capsule(arr, schema_accessor, bad_type, good_type):
|
574 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
575 |
+
old_allocated = pa.total_allocated_bytes()
|
576 |
+
|
577 |
+
import_array = type(arr)._import_from_c_capsule
|
578 |
+
|
579 |
+
schema_capsule, capsule = arr.__arrow_c_array__()
|
580 |
+
assert PyCapsule_IsValid(schema_capsule, b"arrow_schema") == 1
|
581 |
+
assert PyCapsule_IsValid(capsule, b"arrow_array") == 1
|
582 |
+
arr_out = import_array(schema_capsule, capsule)
|
583 |
+
assert arr_out.equals(arr)
|
584 |
+
|
585 |
+
assert pa.total_allocated_bytes() > old_allocated
|
586 |
+
del arr_out
|
587 |
+
|
588 |
+
assert pa.total_allocated_bytes() == old_allocated
|
589 |
+
|
590 |
+
capsule = arr.__arrow_c_array__()
|
591 |
+
|
592 |
+
assert pa.total_allocated_bytes() > old_allocated
|
593 |
+
del capsule
|
594 |
+
assert pa.total_allocated_bytes() == old_allocated
|
595 |
+
|
596 |
+
with pytest.raises(ValueError,
|
597 |
+
match=r"Could not cast.* string to requested .* int32"):
|
598 |
+
arr.__arrow_c_array__(bad_type.__arrow_c_schema__())
|
599 |
+
|
600 |
+
schema_capsule, array_capsule = arr.__arrow_c_array__(
|
601 |
+
good_type.__arrow_c_schema__())
|
602 |
+
arr_out = import_array(schema_capsule, array_capsule)
|
603 |
+
assert schema_accessor(arr_out) == good_type
|
604 |
+
|
605 |
+
|
606 |
+
# TODO: implement requested_schema for stream
|
607 |
+
@pytest.mark.parametrize('constructor', [
|
608 |
+
pa.RecordBatchReader.from_batches,
|
609 |
+
# Use a lambda because we need to re-order the parameters
|
610 |
+
lambda schema, batches: pa.Table.from_batches(batches, schema),
|
611 |
+
], ids=['recordbatchreader', 'table'])
|
612 |
+
def test_roundtrip_reader_capsule(constructor):
|
613 |
+
batches = make_batches()
|
614 |
+
schema = batches[0].schema
|
615 |
+
|
616 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
617 |
+
old_allocated = pa.total_allocated_bytes()
|
618 |
+
|
619 |
+
obj = constructor(schema, batches)
|
620 |
+
|
621 |
+
capsule = obj.__arrow_c_stream__()
|
622 |
+
assert PyCapsule_IsValid(capsule, b"arrow_array_stream") == 1
|
623 |
+
imported_reader = pa.RecordBatchReader._import_from_c_capsule(capsule)
|
624 |
+
assert imported_reader.schema == schema
|
625 |
+
imported_batches = list(imported_reader)
|
626 |
+
assert len(imported_batches) == len(batches)
|
627 |
+
for batch, expected in zip(imported_batches, batches):
|
628 |
+
assert batch.equals(expected)
|
629 |
+
|
630 |
+
del obj, imported_reader, batch, expected, imported_batches
|
631 |
+
|
632 |
+
assert pa.total_allocated_bytes() == old_allocated
|
633 |
+
|
634 |
+
obj = constructor(schema, batches)
|
635 |
+
|
636 |
+
bad_schema = pa.schema({'ints': pa.int32()})
|
637 |
+
with pytest.raises(pa.lib.ArrowTypeError, match="Field 0 cannot be cast"):
|
638 |
+
obj.__arrow_c_stream__(bad_schema.__arrow_c_schema__())
|
639 |
+
|
640 |
+
# Can work with matching schema
|
641 |
+
matching_schema = pa.schema({'ints': pa.list_(pa.int32())})
|
642 |
+
capsule = obj.__arrow_c_stream__(matching_schema.__arrow_c_schema__())
|
643 |
+
imported_reader = pa.RecordBatchReader._import_from_c_capsule(capsule)
|
644 |
+
assert imported_reader.schema == matching_schema
|
645 |
+
for batch, expected in zip(imported_reader, batches):
|
646 |
+
assert batch.equals(expected)
|
647 |
+
|
648 |
+
|
649 |
+
def test_roundtrip_batch_reader_capsule_requested_schema():
|
650 |
+
batch = make_batch()
|
651 |
+
requested_schema = pa.schema([('ints', pa.list_(pa.int64()))])
|
652 |
+
requested_capsule = requested_schema.__arrow_c_schema__()
|
653 |
+
batch_as_requested = batch.cast(requested_schema)
|
654 |
+
|
655 |
+
capsule = batch.__arrow_c_stream__(requested_capsule)
|
656 |
+
assert PyCapsule_IsValid(capsule, b"arrow_array_stream") == 1
|
657 |
+
imported_reader = pa.RecordBatchReader._import_from_c_capsule(capsule)
|
658 |
+
assert imported_reader.schema == requested_schema
|
659 |
+
assert imported_reader.read_next_batch().equals(batch_as_requested)
|
660 |
+
with pytest.raises(StopIteration):
|
661 |
+
imported_reader.read_next_batch()
|
662 |
+
|
663 |
+
|
664 |
+
def test_roundtrip_batch_reader_capsule():
|
665 |
+
batch = make_batch()
|
666 |
+
|
667 |
+
capsule = batch.__arrow_c_stream__()
|
668 |
+
assert PyCapsule_IsValid(capsule, b"arrow_array_stream") == 1
|
669 |
+
imported_reader = pa.RecordBatchReader._import_from_c_capsule(capsule)
|
670 |
+
assert imported_reader.schema == batch.schema
|
671 |
+
assert imported_reader.read_next_batch().equals(batch)
|
672 |
+
with pytest.raises(StopIteration):
|
673 |
+
imported_reader.read_next_batch()
|
674 |
+
|
675 |
+
|
676 |
+
def test_roundtrip_chunked_array_capsule():
|
677 |
+
chunked = pa.chunked_array([pa.array(["a", "b", "c"])])
|
678 |
+
|
679 |
+
capsule = chunked.__arrow_c_stream__()
|
680 |
+
assert PyCapsule_IsValid(capsule, b"arrow_array_stream") == 1
|
681 |
+
imported_chunked = pa.ChunkedArray._import_from_c_capsule(capsule)
|
682 |
+
assert imported_chunked.type == chunked.type
|
683 |
+
assert imported_chunked == chunked
|
684 |
+
|
685 |
+
|
686 |
+
def test_roundtrip_chunked_array_capsule_requested_schema():
|
687 |
+
chunked = pa.chunked_array([pa.array(["a", "b", "c"])])
|
688 |
+
|
689 |
+
# Requesting the same type should work
|
690 |
+
requested_capsule = chunked.type.__arrow_c_schema__()
|
691 |
+
capsule = chunked.__arrow_c_stream__(requested_capsule)
|
692 |
+
imported_chunked = pa.ChunkedArray._import_from_c_capsule(capsule)
|
693 |
+
assert imported_chunked == chunked
|
694 |
+
|
695 |
+
# Casting to something else should error if not possible
|
696 |
+
requested_type = pa.binary()
|
697 |
+
requested_capsule = requested_type.__arrow_c_schema__()
|
698 |
+
capsule = chunked.__arrow_c_stream__(requested_capsule)
|
699 |
+
imported_chunked = pa.ChunkedArray._import_from_c_capsule(capsule)
|
700 |
+
assert imported_chunked == chunked.cast(pa.binary())
|
701 |
+
|
702 |
+
requested_type = pa.int64()
|
703 |
+
requested_capsule = requested_type.__arrow_c_schema__()
|
704 |
+
with pytest.raises(
|
705 |
+
ValueError, match="Could not cast string to requested type int64"
|
706 |
+
):
|
707 |
+
chunked.__arrow_c_stream__(requested_capsule)
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_compute.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_convert_builtin.py
ADDED
@@ -0,0 +1,2536 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import collections
|
19 |
+
import datetime
|
20 |
+
import decimal
|
21 |
+
import itertools
|
22 |
+
import math
|
23 |
+
import re
|
24 |
+
import sys
|
25 |
+
|
26 |
+
import hypothesis as h
|
27 |
+
import numpy as np
|
28 |
+
import pytest
|
29 |
+
|
30 |
+
from pyarrow.pandas_compat import _pandas_api # noqa
|
31 |
+
import pyarrow as pa
|
32 |
+
from pyarrow.tests import util
|
33 |
+
import pyarrow.tests.strategies as past
|
34 |
+
|
35 |
+
|
36 |
+
int_type_pairs = [
|
37 |
+
(np.int8, pa.int8()),
|
38 |
+
(np.int16, pa.int16()),
|
39 |
+
(np.int32, pa.int32()),
|
40 |
+
(np.int64, pa.int64()),
|
41 |
+
(np.uint8, pa.uint8()),
|
42 |
+
(np.uint16, pa.uint16()),
|
43 |
+
(np.uint32, pa.uint32()),
|
44 |
+
(np.uint64, pa.uint64())]
|
45 |
+
|
46 |
+
|
47 |
+
np_int_types, pa_int_types = zip(*int_type_pairs)
|
48 |
+
|
49 |
+
|
50 |
+
class StrangeIterable:
|
51 |
+
def __init__(self, lst):
|
52 |
+
self.lst = lst
|
53 |
+
|
54 |
+
def __iter__(self):
|
55 |
+
return self.lst.__iter__()
|
56 |
+
|
57 |
+
|
58 |
+
class MyInt:
|
59 |
+
def __init__(self, value):
|
60 |
+
self.value = value
|
61 |
+
|
62 |
+
def __int__(self):
|
63 |
+
return self.value
|
64 |
+
|
65 |
+
|
66 |
+
class MyBrokenInt:
|
67 |
+
def __int__(self):
|
68 |
+
1/0 # MARKER
|
69 |
+
|
70 |
+
|
71 |
+
def check_struct_type(ty, expected):
|
72 |
+
"""
|
73 |
+
Check a struct type is as expected, but not taking order into account.
|
74 |
+
"""
|
75 |
+
assert pa.types.is_struct(ty)
|
76 |
+
assert set(ty) == set(expected)
|
77 |
+
|
78 |
+
|
79 |
+
def test_iterable_types():
|
80 |
+
arr1 = pa.array(StrangeIterable([0, 1, 2, 3]))
|
81 |
+
arr2 = pa.array((0, 1, 2, 3))
|
82 |
+
|
83 |
+
assert arr1.equals(arr2)
|
84 |
+
|
85 |
+
|
86 |
+
def test_empty_iterable():
|
87 |
+
arr = pa.array(StrangeIterable([]))
|
88 |
+
assert len(arr) == 0
|
89 |
+
assert arr.null_count == 0
|
90 |
+
assert arr.type == pa.null()
|
91 |
+
assert arr.to_pylist() == []
|
92 |
+
|
93 |
+
|
94 |
+
def test_limited_iterator_types():
|
95 |
+
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=3)
|
96 |
+
arr2 = pa.array((0, 1, 2))
|
97 |
+
assert arr1.equals(arr2)
|
98 |
+
|
99 |
+
|
100 |
+
def test_limited_iterator_size_overflow():
|
101 |
+
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=2)
|
102 |
+
arr2 = pa.array((0, 1))
|
103 |
+
assert arr1.equals(arr2)
|
104 |
+
|
105 |
+
|
106 |
+
def test_limited_iterator_size_underflow():
|
107 |
+
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=10)
|
108 |
+
arr2 = pa.array((0, 1, 2))
|
109 |
+
assert arr1.equals(arr2)
|
110 |
+
|
111 |
+
|
112 |
+
def test_iterator_without_size():
|
113 |
+
expected = pa.array((0, 1, 2))
|
114 |
+
arr1 = pa.array(iter(range(3)))
|
115 |
+
assert arr1.equals(expected)
|
116 |
+
# Same with explicit type
|
117 |
+
arr1 = pa.array(iter(range(3)), type=pa.int64())
|
118 |
+
assert arr1.equals(expected)
|
119 |
+
|
120 |
+
|
121 |
+
def test_infinite_iterator():
|
122 |
+
expected = pa.array((0, 1, 2))
|
123 |
+
arr1 = pa.array(itertools.count(0), size=3)
|
124 |
+
assert arr1.equals(expected)
|
125 |
+
# Same with explicit type
|
126 |
+
arr1 = pa.array(itertools.count(0), type=pa.int64(), size=3)
|
127 |
+
assert arr1.equals(expected)
|
128 |
+
|
129 |
+
|
130 |
+
def test_failing_iterator():
|
131 |
+
with pytest.raises(ZeroDivisionError):
|
132 |
+
pa.array((1 // 0 for x in range(10)))
|
133 |
+
# ARROW-17253
|
134 |
+
with pytest.raises(ZeroDivisionError):
|
135 |
+
pa.array((1 // 0 for x in range(10)), size=10)
|
136 |
+
|
137 |
+
|
138 |
+
class ObjectWithOnlyGetitem:
|
139 |
+
def __getitem__(self, key):
|
140 |
+
return 3
|
141 |
+
|
142 |
+
|
143 |
+
def test_object_with_getitem():
|
144 |
+
# https://github.com/apache/arrow/issues/34944
|
145 |
+
# considered as sequence because of __getitem__, but has no length
|
146 |
+
with pytest.raises(TypeError, match="has no len()"):
|
147 |
+
pa.array(ObjectWithOnlyGetitem())
|
148 |
+
|
149 |
+
|
150 |
+
def _as_list(xs):
|
151 |
+
return xs
|
152 |
+
|
153 |
+
|
154 |
+
def _as_tuple(xs):
|
155 |
+
return tuple(xs)
|
156 |
+
|
157 |
+
|
158 |
+
def _as_deque(xs):
|
159 |
+
# deque is a sequence while neither tuple nor list
|
160 |
+
return collections.deque(xs)
|
161 |
+
|
162 |
+
|
163 |
+
def _as_dict_values(xs):
|
164 |
+
# a dict values object is not a sequence, just a regular iterable
|
165 |
+
dct = {k: v for k, v in enumerate(xs)}
|
166 |
+
return dct.values()
|
167 |
+
|
168 |
+
|
169 |
+
def _as_numpy_array(xs):
|
170 |
+
arr = np.empty(len(xs), dtype=object)
|
171 |
+
arr[:] = xs
|
172 |
+
return arr
|
173 |
+
|
174 |
+
|
175 |
+
def _as_set(xs):
|
176 |
+
return set(xs)
|
177 |
+
|
178 |
+
|
179 |
+
SEQUENCE_TYPES = [_as_list, _as_tuple, _as_numpy_array]
|
180 |
+
ITERABLE_TYPES = [_as_set, _as_dict_values] + SEQUENCE_TYPES
|
181 |
+
COLLECTIONS_TYPES = [_as_deque] + ITERABLE_TYPES
|
182 |
+
|
183 |
+
parametrize_with_iterable_types = pytest.mark.parametrize(
|
184 |
+
"seq", ITERABLE_TYPES
|
185 |
+
)
|
186 |
+
|
187 |
+
parametrize_with_sequence_types = pytest.mark.parametrize(
|
188 |
+
"seq", SEQUENCE_TYPES
|
189 |
+
)
|
190 |
+
|
191 |
+
parametrize_with_collections_types = pytest.mark.parametrize(
|
192 |
+
"seq", COLLECTIONS_TYPES
|
193 |
+
)
|
194 |
+
|
195 |
+
|
196 |
+
@parametrize_with_collections_types
|
197 |
+
def test_sequence_types(seq):
|
198 |
+
arr1 = pa.array(seq([1, 2, 3]))
|
199 |
+
arr2 = pa.array([1, 2, 3])
|
200 |
+
|
201 |
+
assert arr1.equals(arr2)
|
202 |
+
|
203 |
+
|
204 |
+
@parametrize_with_iterable_types
|
205 |
+
def test_nested_sequence_types(seq):
|
206 |
+
arr1 = pa.array([seq([1, 2, 3])])
|
207 |
+
arr2 = pa.array([[1, 2, 3]])
|
208 |
+
|
209 |
+
assert arr1.equals(arr2)
|
210 |
+
|
211 |
+
|
212 |
+
@parametrize_with_sequence_types
|
213 |
+
def test_sequence_boolean(seq):
|
214 |
+
expected = [True, None, False, None]
|
215 |
+
arr = pa.array(seq(expected))
|
216 |
+
assert len(arr) == 4
|
217 |
+
assert arr.null_count == 2
|
218 |
+
assert arr.type == pa.bool_()
|
219 |
+
assert arr.to_pylist() == expected
|
220 |
+
|
221 |
+
|
222 |
+
@parametrize_with_sequence_types
|
223 |
+
def test_sequence_numpy_boolean(seq):
|
224 |
+
expected = [np.bool_(True), None, np.bool_(False), None]
|
225 |
+
arr = pa.array(seq(expected))
|
226 |
+
assert arr.type == pa.bool_()
|
227 |
+
assert arr.to_pylist() == [True, None, False, None]
|
228 |
+
|
229 |
+
|
230 |
+
@parametrize_with_sequence_types
|
231 |
+
def test_sequence_mixed_numpy_python_bools(seq):
|
232 |
+
values = np.array([True, False])
|
233 |
+
arr = pa.array(seq([values[0], None, values[1], True, False]))
|
234 |
+
assert arr.type == pa.bool_()
|
235 |
+
assert arr.to_pylist() == [True, None, False, True, False]
|
236 |
+
|
237 |
+
|
238 |
+
@parametrize_with_collections_types
|
239 |
+
def test_empty_list(seq):
|
240 |
+
arr = pa.array(seq([]))
|
241 |
+
assert len(arr) == 0
|
242 |
+
assert arr.null_count == 0
|
243 |
+
assert arr.type == pa.null()
|
244 |
+
assert arr.to_pylist() == []
|
245 |
+
|
246 |
+
|
247 |
+
@parametrize_with_sequence_types
|
248 |
+
def test_nested_lists(seq):
|
249 |
+
data = [[], [1, 2], None]
|
250 |
+
arr = pa.array(seq(data))
|
251 |
+
assert len(arr) == 3
|
252 |
+
assert arr.null_count == 1
|
253 |
+
assert arr.type == pa.list_(pa.int64())
|
254 |
+
assert arr.to_pylist() == data
|
255 |
+
|
256 |
+
|
257 |
+
@parametrize_with_sequence_types
|
258 |
+
@pytest.mark.parametrize("factory", [
|
259 |
+
pa.list_, pa.large_list, pa.list_view, pa.large_list_view])
|
260 |
+
def test_nested_lists_with_explicit_type(seq, factory):
|
261 |
+
data = [[], [1, 2], None]
|
262 |
+
arr = pa.array(seq(data), type=factory(pa.int16()))
|
263 |
+
assert len(arr) == 3
|
264 |
+
assert arr.null_count == 1
|
265 |
+
assert arr.type == factory(pa.int16())
|
266 |
+
assert arr.to_pylist() == data
|
267 |
+
|
268 |
+
|
269 |
+
@parametrize_with_collections_types
|
270 |
+
def test_list_with_non_list(seq):
|
271 |
+
# List types don't accept non-sequences
|
272 |
+
with pytest.raises(TypeError):
|
273 |
+
pa.array(seq([[], [1, 2], 3]), type=pa.list_(pa.int64()))
|
274 |
+
with pytest.raises(TypeError):
|
275 |
+
pa.array(seq([[], [1, 2], 3]), type=pa.large_list(pa.int64()))
|
276 |
+
with pytest.raises(TypeError):
|
277 |
+
pa.array(seq([[], [1, 2], 3]), type=pa.list_view(pa.int64()))
|
278 |
+
with pytest.raises(TypeError):
|
279 |
+
pa.array(seq([[], [1, 2], 3]), type=pa.large_list_view(pa.int64()))
|
280 |
+
|
281 |
+
|
282 |
+
@parametrize_with_sequence_types
|
283 |
+
@pytest.mark.parametrize("factory", [
|
284 |
+
pa.list_, pa.large_list, pa.list_view, pa.large_list_view])
|
285 |
+
def test_nested_arrays(seq, factory):
|
286 |
+
arr = pa.array(seq([np.array([], dtype=np.int64),
|
287 |
+
np.array([1, 2], dtype=np.int64), None]),
|
288 |
+
type=factory(pa.int64()))
|
289 |
+
assert len(arr) == 3
|
290 |
+
assert arr.null_count == 1
|
291 |
+
assert arr.type == factory(pa.int64())
|
292 |
+
assert arr.to_pylist() == [[], [1, 2], None]
|
293 |
+
|
294 |
+
|
295 |
+
@parametrize_with_sequence_types
|
296 |
+
def test_nested_fixed_size_list(seq):
|
297 |
+
# sequence of lists
|
298 |
+
data = [[1, 2], [3, None], None]
|
299 |
+
arr = pa.array(seq(data), type=pa.list_(pa.int64(), 2))
|
300 |
+
assert len(arr) == 3
|
301 |
+
assert arr.null_count == 1
|
302 |
+
assert arr.type == pa.list_(pa.int64(), 2)
|
303 |
+
assert arr.to_pylist() == data
|
304 |
+
|
305 |
+
# sequence of numpy arrays
|
306 |
+
data = [np.array([1, 2], dtype='int64'), np.array([3, 4], dtype='int64'),
|
307 |
+
None]
|
308 |
+
arr = pa.array(seq(data), type=pa.list_(pa.int64(), 2))
|
309 |
+
assert len(arr) == 3
|
310 |
+
assert arr.null_count == 1
|
311 |
+
assert arr.type == pa.list_(pa.int64(), 2)
|
312 |
+
assert arr.to_pylist() == [[1, 2], [3, 4], None]
|
313 |
+
|
314 |
+
# incorrect length of the lists or arrays
|
315 |
+
data = [[1, 2, 4], [3, None], None]
|
316 |
+
for data in [[[1, 2, 3]], [np.array([1, 2, 4], dtype='int64')]]:
|
317 |
+
with pytest.raises(
|
318 |
+
ValueError, match="Length of item not correct: expected 2"):
|
319 |
+
pa.array(seq(data), type=pa.list_(pa.int64(), 2))
|
320 |
+
|
321 |
+
# with list size of 0
|
322 |
+
data = [[], [], None]
|
323 |
+
arr = pa.array(seq(data), type=pa.list_(pa.int64(), 0))
|
324 |
+
assert len(arr) == 3
|
325 |
+
assert arr.null_count == 1
|
326 |
+
assert arr.type == pa.list_(pa.int64(), 0)
|
327 |
+
assert arr.to_pylist() == [[], [], None]
|
328 |
+
|
329 |
+
|
330 |
+
@parametrize_with_sequence_types
|
331 |
+
def test_sequence_all_none(seq):
|
332 |
+
arr = pa.array(seq([None, None]))
|
333 |
+
assert len(arr) == 2
|
334 |
+
assert arr.null_count == 2
|
335 |
+
assert arr.type == pa.null()
|
336 |
+
assert arr.to_pylist() == [None, None]
|
337 |
+
|
338 |
+
|
339 |
+
@parametrize_with_sequence_types
|
340 |
+
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
|
341 |
+
def test_sequence_integer(seq, np_scalar_pa_type):
|
342 |
+
np_scalar, pa_type = np_scalar_pa_type
|
343 |
+
expected = [1, None, 3, None,
|
344 |
+
np.iinfo(np_scalar).min, np.iinfo(np_scalar).max]
|
345 |
+
arr = pa.array(seq(expected), type=pa_type)
|
346 |
+
assert len(arr) == 6
|
347 |
+
assert arr.null_count == 2
|
348 |
+
assert arr.type == pa_type
|
349 |
+
assert arr.to_pylist() == expected
|
350 |
+
|
351 |
+
|
352 |
+
@parametrize_with_collections_types
|
353 |
+
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
|
354 |
+
def test_sequence_integer_np_nan(seq, np_scalar_pa_type):
|
355 |
+
# ARROW-2806: numpy.nan is a double value and thus should produce
|
356 |
+
# a double array.
|
357 |
+
_, pa_type = np_scalar_pa_type
|
358 |
+
with pytest.raises(ValueError):
|
359 |
+
pa.array(seq([np.nan]), type=pa_type, from_pandas=False)
|
360 |
+
|
361 |
+
arr = pa.array(seq([np.nan]), type=pa_type, from_pandas=True)
|
362 |
+
expected = [None]
|
363 |
+
assert len(arr) == 1
|
364 |
+
assert arr.null_count == 1
|
365 |
+
assert arr.type == pa_type
|
366 |
+
assert arr.to_pylist() == expected
|
367 |
+
|
368 |
+
|
369 |
+
@parametrize_with_sequence_types
|
370 |
+
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
|
371 |
+
def test_sequence_integer_nested_np_nan(seq, np_scalar_pa_type):
|
372 |
+
# ARROW-2806: numpy.nan is a double value and thus should produce
|
373 |
+
# a double array.
|
374 |
+
_, pa_type = np_scalar_pa_type
|
375 |
+
with pytest.raises(ValueError):
|
376 |
+
pa.array(seq([[np.nan]]), type=pa.list_(pa_type), from_pandas=False)
|
377 |
+
|
378 |
+
arr = pa.array(seq([[np.nan]]), type=pa.list_(pa_type), from_pandas=True)
|
379 |
+
expected = [[None]]
|
380 |
+
assert len(arr) == 1
|
381 |
+
assert arr.null_count == 0
|
382 |
+
assert arr.type == pa.list_(pa_type)
|
383 |
+
assert arr.to_pylist() == expected
|
384 |
+
|
385 |
+
|
386 |
+
@parametrize_with_sequence_types
|
387 |
+
def test_sequence_integer_inferred(seq):
|
388 |
+
expected = [1, None, 3, None]
|
389 |
+
arr = pa.array(seq(expected))
|
390 |
+
assert len(arr) == 4
|
391 |
+
assert arr.null_count == 2
|
392 |
+
assert arr.type == pa.int64()
|
393 |
+
assert arr.to_pylist() == expected
|
394 |
+
|
395 |
+
|
396 |
+
@parametrize_with_sequence_types
|
397 |
+
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
|
398 |
+
def test_sequence_numpy_integer(seq, np_scalar_pa_type):
|
399 |
+
np_scalar, pa_type = np_scalar_pa_type
|
400 |
+
expected = [np_scalar(1), None, np_scalar(3), None,
|
401 |
+
np_scalar(np.iinfo(np_scalar).min),
|
402 |
+
np_scalar(np.iinfo(np_scalar).max)]
|
403 |
+
arr = pa.array(seq(expected), type=pa_type)
|
404 |
+
assert len(arr) == 6
|
405 |
+
assert arr.null_count == 2
|
406 |
+
assert arr.type == pa_type
|
407 |
+
assert arr.to_pylist() == expected
|
408 |
+
|
409 |
+
|
410 |
+
@parametrize_with_sequence_types
|
411 |
+
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
|
412 |
+
def test_sequence_numpy_integer_inferred(seq, np_scalar_pa_type):
|
413 |
+
np_scalar, pa_type = np_scalar_pa_type
|
414 |
+
expected = [np_scalar(1), None, np_scalar(3), None]
|
415 |
+
expected += [np_scalar(np.iinfo(np_scalar).min),
|
416 |
+
np_scalar(np.iinfo(np_scalar).max)]
|
417 |
+
arr = pa.array(seq(expected))
|
418 |
+
assert len(arr) == 6
|
419 |
+
assert arr.null_count == 2
|
420 |
+
assert arr.type == pa_type
|
421 |
+
assert arr.to_pylist() == expected
|
422 |
+
|
423 |
+
|
424 |
+
@parametrize_with_sequence_types
|
425 |
+
def test_sequence_custom_integers(seq):
|
426 |
+
expected = [0, 42, 2**33 + 1, -2**63]
|
427 |
+
data = list(map(MyInt, expected))
|
428 |
+
arr = pa.array(seq(data), type=pa.int64())
|
429 |
+
assert arr.to_pylist() == expected
|
430 |
+
|
431 |
+
|
432 |
+
@parametrize_with_collections_types
|
433 |
+
def test_broken_integers(seq):
|
434 |
+
data = [MyBrokenInt()]
|
435 |
+
with pytest.raises(pa.ArrowInvalid, match="tried to convert to int"):
|
436 |
+
pa.array(seq(data), type=pa.int64())
|
437 |
+
|
438 |
+
|
439 |
+
def test_numpy_scalars_mixed_type():
|
440 |
+
# ARROW-4324
|
441 |
+
data = [np.int32(10), np.float32(0.5)]
|
442 |
+
arr = pa.array(data)
|
443 |
+
expected = pa.array([10, 0.5], type="float64")
|
444 |
+
assert arr.equals(expected)
|
445 |
+
|
446 |
+
# ARROW-9490
|
447 |
+
data = [np.int8(10), np.float32(0.5)]
|
448 |
+
arr = pa.array(data)
|
449 |
+
expected = pa.array([10, 0.5], type="float32")
|
450 |
+
assert arr.equals(expected)
|
451 |
+
|
452 |
+
|
453 |
+
@pytest.mark.xfail(reason="Type inference for uint64 not implemented",
|
454 |
+
raises=OverflowError)
|
455 |
+
def test_uint64_max_convert():
|
456 |
+
data = [0, np.iinfo(np.uint64).max]
|
457 |
+
|
458 |
+
arr = pa.array(data, type=pa.uint64())
|
459 |
+
expected = pa.array(np.array(data, dtype='uint64'))
|
460 |
+
assert arr.equals(expected)
|
461 |
+
|
462 |
+
arr_inferred = pa.array(data)
|
463 |
+
assert arr_inferred.equals(expected)
|
464 |
+
|
465 |
+
|
466 |
+
@pytest.mark.parametrize("bits", [8, 16, 32, 64])
|
467 |
+
def test_signed_integer_overflow(bits):
|
468 |
+
ty = getattr(pa, "int%d" % bits)()
|
469 |
+
# XXX ideally would always raise OverflowError
|
470 |
+
with pytest.raises((OverflowError, pa.ArrowInvalid)):
|
471 |
+
pa.array([2 ** (bits - 1)], ty)
|
472 |
+
with pytest.raises((OverflowError, pa.ArrowInvalid)):
|
473 |
+
pa.array([-2 ** (bits - 1) - 1], ty)
|
474 |
+
|
475 |
+
|
476 |
+
@pytest.mark.parametrize("bits", [8, 16, 32, 64])
|
477 |
+
def test_unsigned_integer_overflow(bits):
|
478 |
+
ty = getattr(pa, "uint%d" % bits)()
|
479 |
+
# XXX ideally would always raise OverflowError
|
480 |
+
with pytest.raises((OverflowError, pa.ArrowInvalid)):
|
481 |
+
pa.array([2 ** bits], ty)
|
482 |
+
with pytest.raises((OverflowError, pa.ArrowInvalid)):
|
483 |
+
pa.array([-1], ty)
|
484 |
+
|
485 |
+
|
486 |
+
@parametrize_with_collections_types
|
487 |
+
@pytest.mark.parametrize("typ", pa_int_types)
|
488 |
+
def test_integer_from_string_error(seq, typ):
|
489 |
+
# ARROW-9451: pa.array(['1'], type=pa.uint32()) should not succeed
|
490 |
+
with pytest.raises(pa.ArrowInvalid):
|
491 |
+
pa.array(seq(['1']), type=typ)
|
492 |
+
|
493 |
+
|
494 |
+
def test_convert_with_mask():
|
495 |
+
data = [1, 2, 3, 4, 5]
|
496 |
+
mask = np.array([False, True, False, False, True])
|
497 |
+
|
498 |
+
result = pa.array(data, mask=mask)
|
499 |
+
expected = pa.array([1, None, 3, 4, None])
|
500 |
+
|
501 |
+
assert result.equals(expected)
|
502 |
+
|
503 |
+
# Mask wrong length
|
504 |
+
with pytest.raises(ValueError):
|
505 |
+
pa.array(data, mask=mask[1:])
|
506 |
+
|
507 |
+
|
508 |
+
def test_garbage_collection():
|
509 |
+
import gc
|
510 |
+
|
511 |
+
# Force the cyclic garbage collector to run
|
512 |
+
gc.collect()
|
513 |
+
|
514 |
+
bytes_before = pa.total_allocated_bytes()
|
515 |
+
pa.array([1, None, 3, None])
|
516 |
+
gc.collect()
|
517 |
+
assert pa.total_allocated_bytes() == bytes_before
|
518 |
+
|
519 |
+
|
520 |
+
def test_sequence_double():
|
521 |
+
data = [1.5, 1., None, 2.5, None, None]
|
522 |
+
arr = pa.array(data)
|
523 |
+
assert len(arr) == 6
|
524 |
+
assert arr.null_count == 3
|
525 |
+
assert arr.type == pa.float64()
|
526 |
+
assert arr.to_pylist() == data
|
527 |
+
|
528 |
+
|
529 |
+
def test_double_auto_coerce_from_integer():
|
530 |
+
# Done as part of ARROW-2814
|
531 |
+
data = [1.5, 1., None, 2.5, None, None]
|
532 |
+
arr = pa.array(data)
|
533 |
+
|
534 |
+
data2 = [1.5, 1, None, 2.5, None, None]
|
535 |
+
arr2 = pa.array(data2)
|
536 |
+
|
537 |
+
assert arr.equals(arr2)
|
538 |
+
|
539 |
+
data3 = [1, 1.5, None, 2.5, None, None]
|
540 |
+
arr3 = pa.array(data3)
|
541 |
+
|
542 |
+
data4 = [1., 1.5, None, 2.5, None, None]
|
543 |
+
arr4 = pa.array(data4)
|
544 |
+
|
545 |
+
assert arr3.equals(arr4)
|
546 |
+
|
547 |
+
|
548 |
+
def test_double_integer_coerce_representable_range():
|
549 |
+
valid_values = [1.5, 1, 2, None, 1 << 53, -(1 << 53)]
|
550 |
+
invalid_values = [1.5, 1, 2, None, (1 << 53) + 1]
|
551 |
+
invalid_values2 = [1.5, 1, 2, None, -((1 << 53) + 1)]
|
552 |
+
|
553 |
+
# it works
|
554 |
+
pa.array(valid_values)
|
555 |
+
|
556 |
+
# it fails
|
557 |
+
with pytest.raises(ValueError):
|
558 |
+
pa.array(invalid_values)
|
559 |
+
|
560 |
+
with pytest.raises(ValueError):
|
561 |
+
pa.array(invalid_values2)
|
562 |
+
|
563 |
+
|
564 |
+
def test_float32_integer_coerce_representable_range():
|
565 |
+
f32 = np.float32
|
566 |
+
valid_values = [f32(1.5), 1 << 24, -(1 << 24)]
|
567 |
+
invalid_values = [f32(1.5), (1 << 24) + 1]
|
568 |
+
invalid_values2 = [f32(1.5), -((1 << 24) + 1)]
|
569 |
+
|
570 |
+
# it works
|
571 |
+
pa.array(valid_values, type=pa.float32())
|
572 |
+
|
573 |
+
# it fails
|
574 |
+
with pytest.raises(ValueError):
|
575 |
+
pa.array(invalid_values, type=pa.float32())
|
576 |
+
|
577 |
+
with pytest.raises(ValueError):
|
578 |
+
pa.array(invalid_values2, type=pa.float32())
|
579 |
+
|
580 |
+
|
581 |
+
def test_mixed_sequence_errors():
|
582 |
+
with pytest.raises(ValueError, match="tried to convert to boolean"):
|
583 |
+
pa.array([True, 'foo'], type=pa.bool_())
|
584 |
+
|
585 |
+
with pytest.raises(ValueError, match="tried to convert to float32"):
|
586 |
+
pa.array([1.5, 'foo'], type=pa.float32())
|
587 |
+
|
588 |
+
with pytest.raises(ValueError, match="tried to convert to double"):
|
589 |
+
pa.array([1.5, 'foo'])
|
590 |
+
|
591 |
+
|
592 |
+
@parametrize_with_sequence_types
|
593 |
+
@pytest.mark.parametrize("np_scalar,pa_type", [
|
594 |
+
(np.float16, pa.float16()),
|
595 |
+
(np.float32, pa.float32()),
|
596 |
+
(np.float64, pa.float64())
|
597 |
+
])
|
598 |
+
@pytest.mark.parametrize("from_pandas", [True, False])
|
599 |
+
def test_sequence_numpy_double(seq, np_scalar, pa_type, from_pandas):
|
600 |
+
data = [np_scalar(1.5), np_scalar(1), None, np_scalar(2.5), None, np.nan]
|
601 |
+
arr = pa.array(seq(data), from_pandas=from_pandas)
|
602 |
+
assert len(arr) == 6
|
603 |
+
if from_pandas:
|
604 |
+
assert arr.null_count == 3
|
605 |
+
else:
|
606 |
+
assert arr.null_count == 2
|
607 |
+
if from_pandas:
|
608 |
+
# The NaN is skipped in type inference, otherwise it forces a
|
609 |
+
# float64 promotion
|
610 |
+
assert arr.type == pa_type
|
611 |
+
else:
|
612 |
+
assert arr.type == pa.float64()
|
613 |
+
|
614 |
+
assert arr.to_pylist()[:4] == data[:4]
|
615 |
+
if from_pandas:
|
616 |
+
assert arr.to_pylist()[5] is None
|
617 |
+
else:
|
618 |
+
assert np.isnan(arr.to_pylist()[5])
|
619 |
+
|
620 |
+
|
621 |
+
@pytest.mark.parametrize("from_pandas", [True, False])
|
622 |
+
@pytest.mark.parametrize("inner_seq", [np.array, list])
|
623 |
+
def test_ndarray_nested_numpy_double(from_pandas, inner_seq):
|
624 |
+
# ARROW-2806
|
625 |
+
data = np.array([
|
626 |
+
inner_seq([1., 2.]),
|
627 |
+
inner_seq([1., 2., 3.]),
|
628 |
+
inner_seq([np.nan]),
|
629 |
+
None
|
630 |
+
], dtype=object)
|
631 |
+
arr = pa.array(data, from_pandas=from_pandas)
|
632 |
+
assert len(arr) == 4
|
633 |
+
assert arr.null_count == 1
|
634 |
+
assert arr.type == pa.list_(pa.float64())
|
635 |
+
if from_pandas:
|
636 |
+
assert arr.to_pylist() == [[1.0, 2.0], [1.0, 2.0, 3.0], [None], None]
|
637 |
+
else:
|
638 |
+
np.testing.assert_equal(arr.to_pylist(),
|
639 |
+
[[1., 2.], [1., 2., 3.], [np.nan], None])
|
640 |
+
|
641 |
+
|
642 |
+
def test_nested_ndarray_in_object_array():
|
643 |
+
# ARROW-4350
|
644 |
+
arr = np.empty(2, dtype=object)
|
645 |
+
arr[:] = [np.array([1, 2], dtype=np.int64),
|
646 |
+
np.array([2, 3], dtype=np.int64)]
|
647 |
+
|
648 |
+
arr2 = np.empty(2, dtype=object)
|
649 |
+
arr2[0] = [3, 4]
|
650 |
+
arr2[1] = [5, 6]
|
651 |
+
|
652 |
+
expected_type = pa.list_(pa.list_(pa.int64()))
|
653 |
+
assert pa.infer_type([arr]) == expected_type
|
654 |
+
|
655 |
+
result = pa.array([arr, arr2])
|
656 |
+
expected = pa.array([[[1, 2], [2, 3]], [[3, 4], [5, 6]]],
|
657 |
+
type=expected_type)
|
658 |
+
|
659 |
+
assert result.equals(expected)
|
660 |
+
|
661 |
+
# test case for len-1 arrays to ensure they are interpreted as
|
662 |
+
# sublists and not scalars
|
663 |
+
arr = np.empty(2, dtype=object)
|
664 |
+
arr[:] = [np.array([1]), np.array([2])]
|
665 |
+
result = pa.array([arr, arr])
|
666 |
+
assert result.to_pylist() == [[[1], [2]], [[1], [2]]]
|
667 |
+
|
668 |
+
|
669 |
+
@pytest.mark.xfail(reason=("Type inference for multidimensional ndarray "
|
670 |
+
"not yet implemented"),
|
671 |
+
raises=AssertionError)
|
672 |
+
def test_multidimensional_ndarray_as_nested_list():
|
673 |
+
# TODO(wesm): see ARROW-5645
|
674 |
+
arr = np.array([[1, 2], [2, 3]], dtype=np.int64)
|
675 |
+
arr2 = np.array([[3, 4], [5, 6]], dtype=np.int64)
|
676 |
+
|
677 |
+
expected_type = pa.list_(pa.list_(pa.int64()))
|
678 |
+
assert pa.infer_type([arr]) == expected_type
|
679 |
+
|
680 |
+
result = pa.array([arr, arr2])
|
681 |
+
expected = pa.array([[[1, 2], [2, 3]], [[3, 4], [5, 6]]],
|
682 |
+
type=expected_type)
|
683 |
+
|
684 |
+
assert result.equals(expected)
|
685 |
+
|
686 |
+
|
687 |
+
@pytest.mark.parametrize(('data', 'value_type'), [
|
688 |
+
([True, False], pa.bool_()),
|
689 |
+
([None, None], pa.null()),
|
690 |
+
([1, 2, None], pa.int8()),
|
691 |
+
([1, 2., 3., None], pa.float32()),
|
692 |
+
([datetime.date.today(), None], pa.date32()),
|
693 |
+
([None, datetime.date.today()], pa.date64()),
|
694 |
+
([datetime.time(1, 1, 1), None], pa.time32('s')),
|
695 |
+
([None, datetime.time(2, 2, 2)], pa.time64('us')),
|
696 |
+
([datetime.datetime.now(), None], pa.timestamp('us')),
|
697 |
+
([datetime.timedelta(seconds=10)], pa.duration('s')),
|
698 |
+
([b"a", b"b"], pa.binary()),
|
699 |
+
([b"aaa", b"bbb", b"ccc"], pa.binary(3)),
|
700 |
+
([b"a", b"b", b"c"], pa.large_binary()),
|
701 |
+
(["a", "b", "c"], pa.string()),
|
702 |
+
(["a", "b", "c"], pa.large_string()),
|
703 |
+
(
|
704 |
+
[{"a": 1, "b": 2}, None, {"a": 5, "b": None}],
|
705 |
+
pa.struct([('a', pa.int8()), ('b', pa.int16())])
|
706 |
+
)
|
707 |
+
])
|
708 |
+
def test_list_array_from_object_ndarray(data, value_type):
|
709 |
+
ty = pa.list_(value_type)
|
710 |
+
ndarray = np.array(data, dtype=object)
|
711 |
+
arr = pa.array([ndarray], type=ty)
|
712 |
+
assert arr.type.equals(ty)
|
713 |
+
assert arr.to_pylist() == [data]
|
714 |
+
|
715 |
+
|
716 |
+
@pytest.mark.parametrize(('data', 'value_type'), [
|
717 |
+
([[1, 2], [3]], pa.list_(pa.int64())),
|
718 |
+
([[1, 2], [3, 4]], pa.list_(pa.int64(), 2)),
|
719 |
+
([[1], [2, 3]], pa.large_list(pa.int64()))
|
720 |
+
])
|
721 |
+
def test_nested_list_array_from_object_ndarray(data, value_type):
|
722 |
+
ndarray = np.empty(len(data), dtype=object)
|
723 |
+
ndarray[:] = [np.array(item, dtype=object) for item in data]
|
724 |
+
|
725 |
+
ty = pa.list_(value_type)
|
726 |
+
arr = pa.array([ndarray], type=ty)
|
727 |
+
assert arr.type.equals(ty)
|
728 |
+
assert arr.to_pylist() == [data]
|
729 |
+
|
730 |
+
|
731 |
+
def test_array_ignore_nan_from_pandas():
|
732 |
+
# See ARROW-4324, this reverts logic that was introduced in
|
733 |
+
# ARROW-2240
|
734 |
+
with pytest.raises(ValueError):
|
735 |
+
pa.array([np.nan, 'str'])
|
736 |
+
|
737 |
+
arr = pa.array([np.nan, 'str'], from_pandas=True)
|
738 |
+
expected = pa.array([None, 'str'])
|
739 |
+
assert arr.equals(expected)
|
740 |
+
|
741 |
+
|
742 |
+
def test_nested_ndarray_different_dtypes():
|
743 |
+
data = [
|
744 |
+
np.array([1, 2, 3], dtype='int64'),
|
745 |
+
None,
|
746 |
+
np.array([4, 5, 6], dtype='uint32')
|
747 |
+
]
|
748 |
+
|
749 |
+
arr = pa.array(data)
|
750 |
+
expected = pa.array([[1, 2, 3], None, [4, 5, 6]],
|
751 |
+
type=pa.list_(pa.int64()))
|
752 |
+
assert arr.equals(expected)
|
753 |
+
|
754 |
+
t2 = pa.list_(pa.uint32())
|
755 |
+
arr2 = pa.array(data, type=t2)
|
756 |
+
expected2 = expected.cast(t2)
|
757 |
+
assert arr2.equals(expected2)
|
758 |
+
|
759 |
+
|
760 |
+
def test_sequence_unicode():
|
761 |
+
data = ['foo', 'bar', None, 'mañana']
|
762 |
+
arr = pa.array(data)
|
763 |
+
assert len(arr) == 4
|
764 |
+
assert arr.null_count == 1
|
765 |
+
assert arr.type == pa.string()
|
766 |
+
assert arr.to_pylist() == data
|
767 |
+
|
768 |
+
|
769 |
+
@pytest.mark.parametrize("ty", [pa.string(), pa.large_string(), pa.string_view()])
|
770 |
+
def test_sequence_unicode_explicit_type(ty):
|
771 |
+
data = ['foo', 'bar', None, 'mañana']
|
772 |
+
arr = pa.array(data, type=ty)
|
773 |
+
assert len(arr) == 4
|
774 |
+
assert arr.null_count == 1
|
775 |
+
assert arr.type == ty
|
776 |
+
assert arr.to_pylist() == data
|
777 |
+
|
778 |
+
|
779 |
+
def check_array_mixed_unicode_bytes(binary_type, string_type):
|
780 |
+
values = ['qux', b'foo', bytearray(b'barz')]
|
781 |
+
b_values = [b'qux', b'foo', b'barz']
|
782 |
+
u_values = ['qux', 'foo', 'barz']
|
783 |
+
|
784 |
+
arr = pa.array(values)
|
785 |
+
expected = pa.array(b_values, type=pa.binary())
|
786 |
+
assert arr.type == pa.binary()
|
787 |
+
assert arr.equals(expected)
|
788 |
+
|
789 |
+
arr = pa.array(values, type=binary_type)
|
790 |
+
expected = pa.array(b_values, type=binary_type)
|
791 |
+
assert arr.type == binary_type
|
792 |
+
assert arr.equals(expected)
|
793 |
+
|
794 |
+
arr = pa.array(values, type=string_type)
|
795 |
+
expected = pa.array(u_values, type=string_type)
|
796 |
+
assert arr.type == string_type
|
797 |
+
assert arr.equals(expected)
|
798 |
+
|
799 |
+
|
800 |
+
def test_array_mixed_unicode_bytes():
|
801 |
+
check_array_mixed_unicode_bytes(pa.binary(), pa.string())
|
802 |
+
check_array_mixed_unicode_bytes(pa.large_binary(), pa.large_string())
|
803 |
+
check_array_mixed_unicode_bytes(pa.binary_view(), pa.string_view())
|
804 |
+
|
805 |
+
|
806 |
+
@pytest.mark.large_memory
|
807 |
+
@pytest.mark.parametrize("ty", [pa.large_binary(), pa.large_string()])
|
808 |
+
def test_large_binary_array(ty):
|
809 |
+
# Construct a large binary array with more than 4GB of data
|
810 |
+
s = b"0123456789abcdefghijklmnopqrstuvwxyz" * 10
|
811 |
+
nrepeats = math.ceil((2**32 + 5) / len(s))
|
812 |
+
data = [s] * nrepeats
|
813 |
+
arr = pa.array(data, type=ty)
|
814 |
+
assert isinstance(arr, pa.Array)
|
815 |
+
assert arr.type == ty
|
816 |
+
assert len(arr) == nrepeats
|
817 |
+
|
818 |
+
|
819 |
+
@pytest.mark.slow
|
820 |
+
@pytest.mark.large_memory
|
821 |
+
@pytest.mark.parametrize("ty", [pa.large_binary(), pa.large_string()])
|
822 |
+
def test_large_binary_value(ty):
|
823 |
+
# Construct a large binary array with a single value larger than 4GB
|
824 |
+
s = b"0123456789abcdefghijklmnopqrstuvwxyz"
|
825 |
+
nrepeats = math.ceil((2**32 + 5) / len(s))
|
826 |
+
arr = pa.array([b"foo", s * nrepeats, None, b"bar"], type=ty)
|
827 |
+
assert isinstance(arr, pa.Array)
|
828 |
+
assert arr.type == ty
|
829 |
+
assert len(arr) == 4
|
830 |
+
buf = arr[1].as_buffer()
|
831 |
+
assert len(buf) == len(s) * nrepeats
|
832 |
+
|
833 |
+
|
834 |
+
@pytest.mark.large_memory
|
835 |
+
@pytest.mark.parametrize("ty", [pa.binary(), pa.string(), pa.string_view()])
|
836 |
+
def test_string_too_large(ty):
|
837 |
+
# Construct a binary array with a single value larger than 4GB
|
838 |
+
s = b"0123456789abcdefghijklmnopqrstuvwxyz"
|
839 |
+
nrepeats = math.ceil((2**32 + 5) / len(s))
|
840 |
+
with pytest.raises(pa.ArrowCapacityError):
|
841 |
+
pa.array([b"foo", s * nrepeats, None, b"bar"], type=ty)
|
842 |
+
|
843 |
+
|
844 |
+
def test_sequence_bytes():
|
845 |
+
u1 = b'ma\xc3\xb1ana'
|
846 |
+
|
847 |
+
data = [b'foo',
|
848 |
+
memoryview(b'dada'),
|
849 |
+
memoryview(b'd-a-t-a')[::2], # non-contiguous is made contiguous
|
850 |
+
u1.decode('utf-8'), # unicode gets encoded,
|
851 |
+
bytearray(b'bar'),
|
852 |
+
None]
|
853 |
+
for ty in [None, pa.binary(), pa.large_binary(), pa.binary_view()]:
|
854 |
+
arr = pa.array(data, type=ty)
|
855 |
+
assert len(arr) == 6
|
856 |
+
assert arr.null_count == 1
|
857 |
+
assert arr.type == ty or pa.binary()
|
858 |
+
assert arr.to_pylist() == [b'foo', b'dada', b'data', u1, b'bar', None]
|
859 |
+
|
860 |
+
|
861 |
+
@pytest.mark.parametrize("ty", [pa.string(), pa.large_string(), pa.string_view()])
|
862 |
+
def test_sequence_utf8_to_unicode(ty):
|
863 |
+
# ARROW-1225
|
864 |
+
data = [b'foo', None, b'bar']
|
865 |
+
arr = pa.array(data, type=ty)
|
866 |
+
assert arr.type == ty
|
867 |
+
assert arr[0].as_py() == 'foo'
|
868 |
+
|
869 |
+
# test a non-utf8 unicode string
|
870 |
+
val = ('mañana').encode('utf-16-le')
|
871 |
+
with pytest.raises(pa.ArrowInvalid):
|
872 |
+
pa.array([val], type=ty)
|
873 |
+
|
874 |
+
|
875 |
+
def test_sequence_fixed_size_bytes():
|
876 |
+
data = [b'foof', None, bytearray(b'barb'), b'2346']
|
877 |
+
arr = pa.array(data, type=pa.binary(4))
|
878 |
+
assert len(arr) == 4
|
879 |
+
assert arr.null_count == 1
|
880 |
+
assert arr.type == pa.binary(4)
|
881 |
+
assert arr.to_pylist() == [b'foof', None, b'barb', b'2346']
|
882 |
+
|
883 |
+
|
884 |
+
def test_fixed_size_bytes_does_not_accept_varying_lengths():
|
885 |
+
data = [b'foo', None, b'barb', b'2346']
|
886 |
+
with pytest.raises(pa.ArrowInvalid):
|
887 |
+
pa.array(data, type=pa.binary(4))
|
888 |
+
|
889 |
+
|
890 |
+
def test_fixed_size_binary_length_check():
|
891 |
+
# ARROW-10193
|
892 |
+
data = [b'\x19h\r\x9e\x00\x00\x00\x00\x01\x9b\x9fA']
|
893 |
+
assert len(data[0]) == 12
|
894 |
+
ty = pa.binary(12)
|
895 |
+
arr = pa.array(data, type=ty)
|
896 |
+
assert arr.to_pylist() == data
|
897 |
+
|
898 |
+
|
899 |
+
def test_sequence_date():
|
900 |
+
data = [datetime.date(2000, 1, 1), None, datetime.date(1970, 1, 1),
|
901 |
+
datetime.date(2040, 2, 26)]
|
902 |
+
arr = pa.array(data)
|
903 |
+
assert len(arr) == 4
|
904 |
+
assert arr.type == pa.date32()
|
905 |
+
assert arr.null_count == 1
|
906 |
+
assert arr[0].as_py() == datetime.date(2000, 1, 1)
|
907 |
+
assert arr[1].as_py() is None
|
908 |
+
assert arr[2].as_py() == datetime.date(1970, 1, 1)
|
909 |
+
assert arr[3].as_py() == datetime.date(2040, 2, 26)
|
910 |
+
|
911 |
+
|
912 |
+
@pytest.mark.parametrize('input',
|
913 |
+
[(pa.date32(), [10957, None]),
|
914 |
+
(pa.date64(), [10957 * 86400000, None])])
|
915 |
+
def test_sequence_explicit_types(input):
|
916 |
+
t, ex_values = input
|
917 |
+
data = [datetime.date(2000, 1, 1), None]
|
918 |
+
arr = pa.array(data, type=t)
|
919 |
+
arr2 = pa.array(ex_values, type=t)
|
920 |
+
|
921 |
+
for x in [arr, arr2]:
|
922 |
+
assert len(x) == 2
|
923 |
+
assert x.type == t
|
924 |
+
assert x.null_count == 1
|
925 |
+
assert x[0].as_py() == datetime.date(2000, 1, 1)
|
926 |
+
assert x[1].as_py() is None
|
927 |
+
|
928 |
+
|
929 |
+
def test_date32_overflow():
|
930 |
+
# Overflow
|
931 |
+
data3 = [2**32, None]
|
932 |
+
with pytest.raises((OverflowError, pa.ArrowException)):
|
933 |
+
pa.array(data3, type=pa.date32())
|
934 |
+
|
935 |
+
|
936 |
+
@pytest.mark.parametrize(('time_type', 'unit', 'int_type'), [
|
937 |
+
(pa.time32, 's', 'int32'),
|
938 |
+
(pa.time32, 'ms', 'int32'),
|
939 |
+
(pa.time64, 'us', 'int64'),
|
940 |
+
(pa.time64, 'ns', 'int64'),
|
941 |
+
])
|
942 |
+
def test_sequence_time_with_timezone(time_type, unit, int_type):
|
943 |
+
def expected_integer_value(t):
|
944 |
+
# only use with utc time object because it doesn't adjust with the
|
945 |
+
# offset
|
946 |
+
units = ['s', 'ms', 'us', 'ns']
|
947 |
+
multiplier = 10**(units.index(unit) * 3)
|
948 |
+
if t is None:
|
949 |
+
return None
|
950 |
+
seconds = (
|
951 |
+
t.hour * 3600 +
|
952 |
+
t.minute * 60 +
|
953 |
+
t.second +
|
954 |
+
t.microsecond * 10**-6
|
955 |
+
)
|
956 |
+
return int(seconds * multiplier)
|
957 |
+
|
958 |
+
def expected_time_value(t):
|
959 |
+
# only use with utc time object because it doesn't adjust with the
|
960 |
+
# time objects tzdata
|
961 |
+
if unit == 's':
|
962 |
+
return t.replace(microsecond=0)
|
963 |
+
elif unit == 'ms':
|
964 |
+
return t.replace(microsecond=(t.microsecond // 1000) * 1000)
|
965 |
+
else:
|
966 |
+
return t
|
967 |
+
|
968 |
+
# only timezone naive times are supported in arrow
|
969 |
+
data = [
|
970 |
+
datetime.time(8, 23, 34, 123456),
|
971 |
+
datetime.time(5, 0, 0, 1000),
|
972 |
+
None,
|
973 |
+
datetime.time(1, 11, 56, 432539),
|
974 |
+
datetime.time(23, 10, 0, 437699)
|
975 |
+
]
|
976 |
+
|
977 |
+
ty = time_type(unit)
|
978 |
+
arr = pa.array(data, type=ty)
|
979 |
+
assert len(arr) == 5
|
980 |
+
assert arr.type == ty
|
981 |
+
assert arr.null_count == 1
|
982 |
+
|
983 |
+
# test that the underlying integers are UTC values
|
984 |
+
values = arr.cast(int_type)
|
985 |
+
expected = list(map(expected_integer_value, data))
|
986 |
+
assert values.to_pylist() == expected
|
987 |
+
|
988 |
+
# test that the scalars are datetime.time objects with UTC timezone
|
989 |
+
assert arr[0].as_py() == expected_time_value(data[0])
|
990 |
+
assert arr[1].as_py() == expected_time_value(data[1])
|
991 |
+
assert arr[2].as_py() is None
|
992 |
+
assert arr[3].as_py() == expected_time_value(data[3])
|
993 |
+
assert arr[4].as_py() == expected_time_value(data[4])
|
994 |
+
|
995 |
+
def tz(hours, minutes=0):
|
996 |
+
offset = datetime.timedelta(hours=hours, minutes=minutes)
|
997 |
+
return datetime.timezone(offset)
|
998 |
+
|
999 |
+
|
1000 |
+
def test_sequence_timestamp():
|
1001 |
+
data = [
|
1002 |
+
datetime.datetime(2007, 7, 13, 1, 23, 34, 123456),
|
1003 |
+
None,
|
1004 |
+
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539),
|
1005 |
+
datetime.datetime(2010, 8, 13, 5, 46, 57, 437699)
|
1006 |
+
]
|
1007 |
+
arr = pa.array(data)
|
1008 |
+
assert len(arr) == 4
|
1009 |
+
assert arr.type == pa.timestamp('us')
|
1010 |
+
assert arr.null_count == 1
|
1011 |
+
assert arr[0].as_py() == datetime.datetime(2007, 7, 13, 1,
|
1012 |
+
23, 34, 123456)
|
1013 |
+
assert arr[1].as_py() is None
|
1014 |
+
assert arr[2].as_py() == datetime.datetime(2006, 1, 13, 12,
|
1015 |
+
34, 56, 432539)
|
1016 |
+
assert arr[3].as_py() == datetime.datetime(2010, 8, 13, 5,
|
1017 |
+
46, 57, 437699)
|
1018 |
+
|
1019 |
+
|
1020 |
+
@pytest.mark.parametrize('timezone', [
|
1021 |
+
None,
|
1022 |
+
'UTC',
|
1023 |
+
'Etc/GMT-1',
|
1024 |
+
'Europe/Budapest',
|
1025 |
+
])
|
1026 |
+
@pytest.mark.parametrize('unit', [
|
1027 |
+
's',
|
1028 |
+
'ms',
|
1029 |
+
'us',
|
1030 |
+
'ns'
|
1031 |
+
])
|
1032 |
+
def test_sequence_timestamp_with_timezone(timezone, unit):
|
1033 |
+
pytz = pytest.importorskip("pytz")
|
1034 |
+
|
1035 |
+
def expected_integer_value(dt):
|
1036 |
+
units = ['s', 'ms', 'us', 'ns']
|
1037 |
+
multiplier = 10**(units.index(unit) * 3)
|
1038 |
+
if dt is None:
|
1039 |
+
return None
|
1040 |
+
else:
|
1041 |
+
# avoid float precision issues
|
1042 |
+
ts = decimal.Decimal(str(dt.timestamp()))
|
1043 |
+
return int(ts * multiplier)
|
1044 |
+
|
1045 |
+
def expected_datetime_value(dt):
|
1046 |
+
if dt is None:
|
1047 |
+
return None
|
1048 |
+
|
1049 |
+
if unit == 's':
|
1050 |
+
dt = dt.replace(microsecond=0)
|
1051 |
+
elif unit == 'ms':
|
1052 |
+
dt = dt.replace(microsecond=(dt.microsecond // 1000) * 1000)
|
1053 |
+
|
1054 |
+
# adjust the timezone
|
1055 |
+
if timezone is None:
|
1056 |
+
# make datetime timezone unaware
|
1057 |
+
return dt.replace(tzinfo=None)
|
1058 |
+
else:
|
1059 |
+
# convert to the expected timezone
|
1060 |
+
return dt.astimezone(pytz.timezone(timezone))
|
1061 |
+
|
1062 |
+
data = [
|
1063 |
+
datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive
|
1064 |
+
pytz.utc.localize(
|
1065 |
+
datetime.datetime(2008, 1, 5, 5, 0, 0, 1000)
|
1066 |
+
),
|
1067 |
+
None,
|
1068 |
+
pytz.timezone('US/Eastern').localize(
|
1069 |
+
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539)
|
1070 |
+
),
|
1071 |
+
pytz.timezone('Europe/Moscow').localize(
|
1072 |
+
datetime.datetime(2010, 8, 13, 5, 0, 0, 437699)
|
1073 |
+
),
|
1074 |
+
]
|
1075 |
+
utcdata = [
|
1076 |
+
pytz.utc.localize(data[0]),
|
1077 |
+
data[1],
|
1078 |
+
None,
|
1079 |
+
data[3].astimezone(pytz.utc),
|
1080 |
+
data[4].astimezone(pytz.utc),
|
1081 |
+
]
|
1082 |
+
|
1083 |
+
ty = pa.timestamp(unit, tz=timezone)
|
1084 |
+
arr = pa.array(data, type=ty)
|
1085 |
+
assert len(arr) == 5
|
1086 |
+
assert arr.type == ty
|
1087 |
+
assert arr.null_count == 1
|
1088 |
+
|
1089 |
+
# test that the underlying integers are UTC values
|
1090 |
+
values = arr.cast('int64')
|
1091 |
+
expected = list(map(expected_integer_value, utcdata))
|
1092 |
+
assert values.to_pylist() == expected
|
1093 |
+
|
1094 |
+
# test that the scalars are datetimes with the correct timezone
|
1095 |
+
for i in range(len(arr)):
|
1096 |
+
assert arr[i].as_py() == expected_datetime_value(utcdata[i])
|
1097 |
+
|
1098 |
+
|
1099 |
+
@pytest.mark.parametrize('timezone', [
|
1100 |
+
None,
|
1101 |
+
'UTC',
|
1102 |
+
'Etc/GMT-1',
|
1103 |
+
'Europe/Budapest',
|
1104 |
+
])
|
1105 |
+
def test_pyarrow_ignore_timezone_environment_variable(monkeypatch, timezone):
|
1106 |
+
# note that any non-empty value will evaluate to true
|
1107 |
+
pytest.importorskip("pytz")
|
1108 |
+
import pytz
|
1109 |
+
|
1110 |
+
monkeypatch.setenv("PYARROW_IGNORE_TIMEZONE", "1")
|
1111 |
+
data = [
|
1112 |
+
datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive
|
1113 |
+
pytz.utc.localize(
|
1114 |
+
datetime.datetime(2008, 1, 5, 5, 0, 0, 1000)
|
1115 |
+
),
|
1116 |
+
pytz.timezone('US/Eastern').localize(
|
1117 |
+
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539)
|
1118 |
+
),
|
1119 |
+
pytz.timezone('Europe/Moscow').localize(
|
1120 |
+
datetime.datetime(2010, 8, 13, 5, 0, 0, 437699)
|
1121 |
+
),
|
1122 |
+
]
|
1123 |
+
|
1124 |
+
expected = [dt.replace(tzinfo=None) for dt in data]
|
1125 |
+
if timezone is not None:
|
1126 |
+
tzinfo = pytz.timezone(timezone)
|
1127 |
+
expected = [tzinfo.fromutc(dt) for dt in expected]
|
1128 |
+
|
1129 |
+
ty = pa.timestamp('us', tz=timezone)
|
1130 |
+
arr = pa.array(data, type=ty)
|
1131 |
+
assert arr.to_pylist() == expected
|
1132 |
+
|
1133 |
+
|
1134 |
+
def test_sequence_timestamp_with_timezone_inference():
|
1135 |
+
pytest.importorskip("pytz")
|
1136 |
+
import pytz
|
1137 |
+
|
1138 |
+
data = [
|
1139 |
+
datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive
|
1140 |
+
pytz.utc.localize(
|
1141 |
+
datetime.datetime(2008, 1, 5, 5, 0, 0, 1000)
|
1142 |
+
),
|
1143 |
+
None,
|
1144 |
+
pytz.timezone('US/Eastern').localize(
|
1145 |
+
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539)
|
1146 |
+
),
|
1147 |
+
pytz.timezone('Europe/Moscow').localize(
|
1148 |
+
datetime.datetime(2010, 8, 13, 5, 0, 0, 437699)
|
1149 |
+
),
|
1150 |
+
]
|
1151 |
+
expected = [
|
1152 |
+
pa.timestamp('us', tz=None),
|
1153 |
+
pa.timestamp('us', tz='UTC'),
|
1154 |
+
pa.timestamp('us', tz=None),
|
1155 |
+
pa.timestamp('us', tz='US/Eastern'),
|
1156 |
+
pa.timestamp('us', tz='Europe/Moscow')
|
1157 |
+
]
|
1158 |
+
for dt, expected_type in zip(data, expected):
|
1159 |
+
prepended = [dt] + data
|
1160 |
+
arr = pa.array(prepended)
|
1161 |
+
assert arr.type == expected_type
|
1162 |
+
|
1163 |
+
|
1164 |
+
def test_sequence_timestamp_with_zoneinfo_timezone_inference():
|
1165 |
+
pytest.importorskip("zoneinfo")
|
1166 |
+
import zoneinfo
|
1167 |
+
|
1168 |
+
data = [
|
1169 |
+
datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive
|
1170 |
+
datetime.datetime(2008, 1, 5, 5, 0, 0, 1000,
|
1171 |
+
tzinfo=datetime.timezone.utc),
|
1172 |
+
None,
|
1173 |
+
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539,
|
1174 |
+
tzinfo=zoneinfo.ZoneInfo(key='US/Eastern')),
|
1175 |
+
datetime.datetime(2010, 8, 13, 5, 0, 0, 437699,
|
1176 |
+
tzinfo=zoneinfo.ZoneInfo(key='Europe/Moscow')),
|
1177 |
+
]
|
1178 |
+
expected = [
|
1179 |
+
pa.timestamp('us', tz=None),
|
1180 |
+
pa.timestamp('us', tz='UTC'),
|
1181 |
+
pa.timestamp('us', tz=None),
|
1182 |
+
pa.timestamp('us', tz='US/Eastern'),
|
1183 |
+
pa.timestamp('us', tz='Europe/Moscow')
|
1184 |
+
]
|
1185 |
+
for dt, expected_type in zip(data, expected):
|
1186 |
+
prepended = [dt] + data
|
1187 |
+
arr = pa.array(prepended)
|
1188 |
+
assert arr.type == expected_type
|
1189 |
+
|
1190 |
+
|
1191 |
+
@pytest.mark.pandas
|
1192 |
+
def test_sequence_timestamp_from_mixed_builtin_and_pandas_datetimes():
|
1193 |
+
pytest.importorskip("pytz")
|
1194 |
+
import pytz
|
1195 |
+
import pandas as pd
|
1196 |
+
|
1197 |
+
data = [
|
1198 |
+
pd.Timestamp(1184307814123456123, tz=pytz.timezone('US/Eastern'),
|
1199 |
+
unit='ns'),
|
1200 |
+
datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive
|
1201 |
+
pytz.utc.localize(
|
1202 |
+
datetime.datetime(2008, 1, 5, 5, 0, 0, 1000)
|
1203 |
+
),
|
1204 |
+
None,
|
1205 |
+
]
|
1206 |
+
utcdata = [
|
1207 |
+
data[0].astimezone(pytz.utc),
|
1208 |
+
pytz.utc.localize(data[1]),
|
1209 |
+
data[2].astimezone(pytz.utc),
|
1210 |
+
None,
|
1211 |
+
]
|
1212 |
+
|
1213 |
+
arr = pa.array(data)
|
1214 |
+
assert arr.type == pa.timestamp('us', tz='US/Eastern')
|
1215 |
+
|
1216 |
+
values = arr.cast('int64')
|
1217 |
+
expected = [int(dt.timestamp() * 10**6) if dt else None for dt in utcdata]
|
1218 |
+
assert values.to_pylist() == expected
|
1219 |
+
|
1220 |
+
|
1221 |
+
def test_sequence_timestamp_out_of_bounds_nanosecond():
|
1222 |
+
# https://issues.apache.org/jira/browse/ARROW-9768
|
1223 |
+
# datetime outside of range supported for nanosecond resolution
|
1224 |
+
data = [datetime.datetime(2262, 4, 12)]
|
1225 |
+
with pytest.raises(ValueError, match="out of bounds"):
|
1226 |
+
pa.array(data, type=pa.timestamp('ns'))
|
1227 |
+
|
1228 |
+
# with microsecond resolution it works fine
|
1229 |
+
arr = pa.array(data, type=pa.timestamp('us'))
|
1230 |
+
assert arr.to_pylist() == data
|
1231 |
+
|
1232 |
+
# case where the naive is within bounds, but converted to UTC not
|
1233 |
+
tz = datetime.timezone(datetime.timedelta(hours=-1))
|
1234 |
+
data = [datetime.datetime(2262, 4, 11, 23, tzinfo=tz)]
|
1235 |
+
with pytest.raises(ValueError, match="out of bounds"):
|
1236 |
+
pa.array(data, type=pa.timestamp('ns'))
|
1237 |
+
|
1238 |
+
arr = pa.array(data, type=pa.timestamp('us'))
|
1239 |
+
assert arr.to_pylist()[0] == datetime.datetime(2262, 4, 12)
|
1240 |
+
|
1241 |
+
|
1242 |
+
def test_sequence_numpy_timestamp():
|
1243 |
+
data = [
|
1244 |
+
np.datetime64(datetime.datetime(2007, 7, 13, 1, 23, 34, 123456)),
|
1245 |
+
None,
|
1246 |
+
np.datetime64(datetime.datetime(2006, 1, 13, 12, 34, 56, 432539)),
|
1247 |
+
np.datetime64(datetime.datetime(2010, 8, 13, 5, 46, 57, 437699))
|
1248 |
+
]
|
1249 |
+
arr = pa.array(data)
|
1250 |
+
assert len(arr) == 4
|
1251 |
+
assert arr.type == pa.timestamp('us')
|
1252 |
+
assert arr.null_count == 1
|
1253 |
+
assert arr[0].as_py() == datetime.datetime(2007, 7, 13, 1,
|
1254 |
+
23, 34, 123456)
|
1255 |
+
assert arr[1].as_py() is None
|
1256 |
+
assert arr[2].as_py() == datetime.datetime(2006, 1, 13, 12,
|
1257 |
+
34, 56, 432539)
|
1258 |
+
assert arr[3].as_py() == datetime.datetime(2010, 8, 13, 5,
|
1259 |
+
46, 57, 437699)
|
1260 |
+
|
1261 |
+
|
1262 |
+
class MyDate(datetime.date):
|
1263 |
+
pass
|
1264 |
+
|
1265 |
+
|
1266 |
+
class MyDatetime(datetime.datetime):
|
1267 |
+
pass
|
1268 |
+
|
1269 |
+
|
1270 |
+
class MyTimedelta(datetime.timedelta):
|
1271 |
+
pass
|
1272 |
+
|
1273 |
+
|
1274 |
+
def test_datetime_subclassing():
|
1275 |
+
data = [
|
1276 |
+
MyDate(2007, 7, 13),
|
1277 |
+
]
|
1278 |
+
date_type = pa.date32()
|
1279 |
+
arr_date = pa.array(data, type=date_type)
|
1280 |
+
assert len(arr_date) == 1
|
1281 |
+
assert arr_date.type == date_type
|
1282 |
+
assert arr_date[0].as_py() == datetime.date(2007, 7, 13)
|
1283 |
+
|
1284 |
+
data = [
|
1285 |
+
MyDatetime(2007, 7, 13, 1, 23, 34, 123456),
|
1286 |
+
]
|
1287 |
+
|
1288 |
+
s = pa.timestamp('s')
|
1289 |
+
ms = pa.timestamp('ms')
|
1290 |
+
us = pa.timestamp('us')
|
1291 |
+
|
1292 |
+
arr_s = pa.array(data, type=s)
|
1293 |
+
assert len(arr_s) == 1
|
1294 |
+
assert arr_s.type == s
|
1295 |
+
assert arr_s[0].as_py() == datetime.datetime(2007, 7, 13, 1,
|
1296 |
+
23, 34, 0)
|
1297 |
+
|
1298 |
+
arr_ms = pa.array(data, type=ms)
|
1299 |
+
assert len(arr_ms) == 1
|
1300 |
+
assert arr_ms.type == ms
|
1301 |
+
assert arr_ms[0].as_py() == datetime.datetime(2007, 7, 13, 1,
|
1302 |
+
23, 34, 123000)
|
1303 |
+
|
1304 |
+
arr_us = pa.array(data, type=us)
|
1305 |
+
assert len(arr_us) == 1
|
1306 |
+
assert arr_us.type == us
|
1307 |
+
assert arr_us[0].as_py() == datetime.datetime(2007, 7, 13, 1,
|
1308 |
+
23, 34, 123456)
|
1309 |
+
|
1310 |
+
data = [
|
1311 |
+
MyTimedelta(123, 456, 1002),
|
1312 |
+
]
|
1313 |
+
|
1314 |
+
s = pa.duration('s')
|
1315 |
+
ms = pa.duration('ms')
|
1316 |
+
us = pa.duration('us')
|
1317 |
+
|
1318 |
+
arr_s = pa.array(data)
|
1319 |
+
assert len(arr_s) == 1
|
1320 |
+
assert arr_s.type == us
|
1321 |
+
assert arr_s[0].as_py() == datetime.timedelta(123, 456, 1002)
|
1322 |
+
|
1323 |
+
arr_s = pa.array(data, type=s)
|
1324 |
+
assert len(arr_s) == 1
|
1325 |
+
assert arr_s.type == s
|
1326 |
+
assert arr_s[0].as_py() == datetime.timedelta(123, 456)
|
1327 |
+
|
1328 |
+
arr_ms = pa.array(data, type=ms)
|
1329 |
+
assert len(arr_ms) == 1
|
1330 |
+
assert arr_ms.type == ms
|
1331 |
+
assert arr_ms[0].as_py() == datetime.timedelta(123, 456, 1000)
|
1332 |
+
|
1333 |
+
arr_us = pa.array(data, type=us)
|
1334 |
+
assert len(arr_us) == 1
|
1335 |
+
assert arr_us.type == us
|
1336 |
+
assert arr_us[0].as_py() == datetime.timedelta(123, 456, 1002)
|
1337 |
+
|
1338 |
+
|
1339 |
+
@pytest.mark.xfail(not _pandas_api.have_pandas,
|
1340 |
+
reason="pandas required for nanosecond conversion")
|
1341 |
+
def test_sequence_timestamp_nanoseconds():
|
1342 |
+
inputs = [
|
1343 |
+
[datetime.datetime(2007, 7, 13, 1, 23, 34, 123456)],
|
1344 |
+
[MyDatetime(2007, 7, 13, 1, 23, 34, 123456)]
|
1345 |
+
]
|
1346 |
+
|
1347 |
+
for data in inputs:
|
1348 |
+
ns = pa.timestamp('ns')
|
1349 |
+
arr_ns = pa.array(data, type=ns)
|
1350 |
+
assert len(arr_ns) == 1
|
1351 |
+
assert arr_ns.type == ns
|
1352 |
+
assert arr_ns[0].as_py() == datetime.datetime(2007, 7, 13, 1,
|
1353 |
+
23, 34, 123456)
|
1354 |
+
|
1355 |
+
|
1356 |
+
@pytest.mark.pandas
|
1357 |
+
@pytest.mark.skipif(sys.platform == "win32" and not util.windows_has_tzdata(),
|
1358 |
+
reason="Timezone database is not installed on Windows")
|
1359 |
+
def test_sequence_timestamp_from_int_with_unit():
|
1360 |
+
# TODO(wesm): This test might be rewritten to assert the actual behavior
|
1361 |
+
# when pandas is not installed
|
1362 |
+
|
1363 |
+
data = [1]
|
1364 |
+
|
1365 |
+
s = pa.timestamp('s')
|
1366 |
+
ms = pa.timestamp('ms')
|
1367 |
+
us = pa.timestamp('us')
|
1368 |
+
ns = pa.timestamp('ns')
|
1369 |
+
|
1370 |
+
arr_s = pa.array(data, type=s)
|
1371 |
+
assert len(arr_s) == 1
|
1372 |
+
assert arr_s.type == s
|
1373 |
+
assert repr(arr_s[0]) == (
|
1374 |
+
"<pyarrow.TimestampScalar: '1970-01-01T00:00:01'>"
|
1375 |
+
)
|
1376 |
+
assert str(arr_s[0]) == "1970-01-01 00:00:01"
|
1377 |
+
|
1378 |
+
arr_ms = pa.array(data, type=ms)
|
1379 |
+
assert len(arr_ms) == 1
|
1380 |
+
assert arr_ms.type == ms
|
1381 |
+
assert repr(arr_ms[0].as_py()) == (
|
1382 |
+
"datetime.datetime(1970, 1, 1, 0, 0, 0, 1000)"
|
1383 |
+
)
|
1384 |
+
assert str(arr_ms[0]) == "1970-01-01 00:00:00.001000"
|
1385 |
+
|
1386 |
+
arr_us = pa.array(data, type=us)
|
1387 |
+
assert len(arr_us) == 1
|
1388 |
+
assert arr_us.type == us
|
1389 |
+
assert repr(arr_us[0].as_py()) == (
|
1390 |
+
"datetime.datetime(1970, 1, 1, 0, 0, 0, 1)"
|
1391 |
+
)
|
1392 |
+
assert str(arr_us[0]) == "1970-01-01 00:00:00.000001"
|
1393 |
+
|
1394 |
+
arr_ns = pa.array(data, type=ns)
|
1395 |
+
assert len(arr_ns) == 1
|
1396 |
+
assert arr_ns.type == ns
|
1397 |
+
assert repr(arr_ns[0].as_py()) == (
|
1398 |
+
"Timestamp('1970-01-01 00:00:00.000000001')"
|
1399 |
+
)
|
1400 |
+
assert str(arr_ns[0]) == "1970-01-01 00:00:00.000000001"
|
1401 |
+
|
1402 |
+
expected_exc = TypeError
|
1403 |
+
|
1404 |
+
class CustomClass():
|
1405 |
+
pass
|
1406 |
+
|
1407 |
+
for ty in [ns, pa.date32(), pa.date64()]:
|
1408 |
+
with pytest.raises(expected_exc):
|
1409 |
+
pa.array([1, CustomClass()], type=ty)
|
1410 |
+
|
1411 |
+
|
1412 |
+
@pytest.mark.parametrize('np_scalar', [True, False])
|
1413 |
+
def test_sequence_duration(np_scalar):
|
1414 |
+
td1 = datetime.timedelta(2, 3601, 1)
|
1415 |
+
td2 = datetime.timedelta(1, 100, 1000)
|
1416 |
+
if np_scalar:
|
1417 |
+
data = [np.timedelta64(td1), None, np.timedelta64(td2)]
|
1418 |
+
else:
|
1419 |
+
data = [td1, None, td2]
|
1420 |
+
|
1421 |
+
arr = pa.array(data)
|
1422 |
+
assert len(arr) == 3
|
1423 |
+
assert arr.type == pa.duration('us')
|
1424 |
+
assert arr.null_count == 1
|
1425 |
+
assert arr[0].as_py() == td1
|
1426 |
+
assert arr[1].as_py() is None
|
1427 |
+
assert arr[2].as_py() == td2
|
1428 |
+
|
1429 |
+
|
1430 |
+
@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns'])
|
1431 |
+
def test_sequence_duration_with_unit(unit):
|
1432 |
+
data = [
|
1433 |
+
datetime.timedelta(3, 22, 1001),
|
1434 |
+
]
|
1435 |
+
expected = {'s': datetime.timedelta(3, 22),
|
1436 |
+
'ms': datetime.timedelta(3, 22, 1000),
|
1437 |
+
'us': datetime.timedelta(3, 22, 1001),
|
1438 |
+
'ns': datetime.timedelta(3, 22, 1001)}
|
1439 |
+
|
1440 |
+
ty = pa.duration(unit)
|
1441 |
+
|
1442 |
+
arr_s = pa.array(data, type=ty)
|
1443 |
+
assert len(arr_s) == 1
|
1444 |
+
assert arr_s.type == ty
|
1445 |
+
assert arr_s[0].as_py() == expected[unit]
|
1446 |
+
|
1447 |
+
|
1448 |
+
@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns'])
|
1449 |
+
def test_sequence_duration_from_int_with_unit(unit):
|
1450 |
+
data = [5]
|
1451 |
+
|
1452 |
+
ty = pa.duration(unit)
|
1453 |
+
arr = pa.array(data, type=ty)
|
1454 |
+
assert len(arr) == 1
|
1455 |
+
assert arr.type == ty
|
1456 |
+
assert arr[0].value == 5
|
1457 |
+
|
1458 |
+
|
1459 |
+
def test_sequence_duration_nested_lists():
|
1460 |
+
td1 = datetime.timedelta(1, 1, 1000)
|
1461 |
+
td2 = datetime.timedelta(1, 100)
|
1462 |
+
|
1463 |
+
data = [[td1, None], [td1, td2]]
|
1464 |
+
|
1465 |
+
arr = pa.array(data)
|
1466 |
+
assert len(arr) == 2
|
1467 |
+
assert arr.type == pa.list_(pa.duration('us'))
|
1468 |
+
assert arr.to_pylist() == data
|
1469 |
+
|
1470 |
+
|
1471 |
+
@pytest.mark.parametrize("factory", [
|
1472 |
+
pa.list_, pa.large_list, pa.list_view, pa.large_list_view])
|
1473 |
+
def test_sequence_duration_nested_lists_with_explicit_type(factory):
|
1474 |
+
td1 = datetime.timedelta(1, 1, 1000)
|
1475 |
+
td2 = datetime.timedelta(1, 100)
|
1476 |
+
|
1477 |
+
data = [[td1, None], [td1, td2]]
|
1478 |
+
|
1479 |
+
arr = pa.array(data, type=factory(pa.duration('ms')))
|
1480 |
+
assert len(arr) == 2
|
1481 |
+
assert arr.type == factory(pa.duration('ms'))
|
1482 |
+
assert arr.to_pylist() == data
|
1483 |
+
|
1484 |
+
|
1485 |
+
def test_sequence_duration_nested_lists_numpy():
|
1486 |
+
td1 = datetime.timedelta(1, 1, 1000)
|
1487 |
+
td2 = datetime.timedelta(1, 100)
|
1488 |
+
|
1489 |
+
data = [[np.timedelta64(td1), None],
|
1490 |
+
[np.timedelta64(td1), np.timedelta64(td2)]]
|
1491 |
+
|
1492 |
+
arr = pa.array(data)
|
1493 |
+
assert len(arr) == 2
|
1494 |
+
assert arr.type == pa.list_(pa.duration('us'))
|
1495 |
+
assert arr.to_pylist() == [[td1, None], [td1, td2]]
|
1496 |
+
|
1497 |
+
data = [np.array([np.timedelta64(td1), None], dtype='timedelta64[us]'),
|
1498 |
+
np.array([np.timedelta64(td1), np.timedelta64(td2)])]
|
1499 |
+
|
1500 |
+
arr = pa.array(data)
|
1501 |
+
assert len(arr) == 2
|
1502 |
+
assert arr.type == pa.list_(pa.duration('us'))
|
1503 |
+
assert arr.to_pylist() == [[td1, None], [td1, td2]]
|
1504 |
+
|
1505 |
+
|
1506 |
+
def test_sequence_nesting_levels():
|
1507 |
+
data = [1, 2, None]
|
1508 |
+
arr = pa.array(data)
|
1509 |
+
assert arr.type == pa.int64()
|
1510 |
+
assert arr.to_pylist() == data
|
1511 |
+
|
1512 |
+
data = [[1], [2], None]
|
1513 |
+
arr = pa.array(data)
|
1514 |
+
assert arr.type == pa.list_(pa.int64())
|
1515 |
+
assert arr.to_pylist() == data
|
1516 |
+
|
1517 |
+
data = [[1], [2, 3, 4], [None]]
|
1518 |
+
arr = pa.array(data)
|
1519 |
+
assert arr.type == pa.list_(pa.int64())
|
1520 |
+
assert arr.to_pylist() == data
|
1521 |
+
|
1522 |
+
data = [None, [[None, 1]], [[2, 3, 4], None], [None]]
|
1523 |
+
arr = pa.array(data)
|
1524 |
+
assert arr.type == pa.list_(pa.list_(pa.int64()))
|
1525 |
+
assert arr.to_pylist() == data
|
1526 |
+
|
1527 |
+
exceptions = (pa.ArrowInvalid, pa.ArrowTypeError)
|
1528 |
+
|
1529 |
+
# Mixed nesting levels are rejected
|
1530 |
+
with pytest.raises(exceptions):
|
1531 |
+
pa.array([1, 2, [1]])
|
1532 |
+
|
1533 |
+
with pytest.raises(exceptions):
|
1534 |
+
pa.array([1, 2, []])
|
1535 |
+
|
1536 |
+
with pytest.raises(exceptions):
|
1537 |
+
pa.array([[1], [2], [None, [1]]])
|
1538 |
+
|
1539 |
+
|
1540 |
+
def test_sequence_mixed_types_fails():
|
1541 |
+
data = ['a', 1, 2.0]
|
1542 |
+
with pytest.raises(pa.ArrowTypeError):
|
1543 |
+
pa.array(data)
|
1544 |
+
|
1545 |
+
|
1546 |
+
def test_sequence_mixed_types_with_specified_type_fails():
|
1547 |
+
data = ['-10', '-5', {'a': 1}, '0', '5', '10']
|
1548 |
+
|
1549 |
+
type = pa.string()
|
1550 |
+
with pytest.raises(TypeError):
|
1551 |
+
pa.array(data, type=type)
|
1552 |
+
|
1553 |
+
|
1554 |
+
def test_sequence_decimal():
|
1555 |
+
data = [decimal.Decimal('1234.183'), decimal.Decimal('8094.234')]
|
1556 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1557 |
+
arr = pa.array(data, type=type(precision=7, scale=3))
|
1558 |
+
assert arr.to_pylist() == data
|
1559 |
+
|
1560 |
+
|
1561 |
+
def test_sequence_decimal_different_precisions():
|
1562 |
+
data = [
|
1563 |
+
decimal.Decimal('1234234983.183'), decimal.Decimal('80943244.234')
|
1564 |
+
]
|
1565 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1566 |
+
arr = pa.array(data, type=type(precision=13, scale=3))
|
1567 |
+
assert arr.to_pylist() == data
|
1568 |
+
|
1569 |
+
|
1570 |
+
def test_sequence_decimal_no_scale():
|
1571 |
+
data = [decimal.Decimal('1234234983'), decimal.Decimal('8094324')]
|
1572 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1573 |
+
arr = pa.array(data, type=type(precision=10))
|
1574 |
+
assert arr.to_pylist() == data
|
1575 |
+
|
1576 |
+
|
1577 |
+
def test_sequence_decimal_negative():
|
1578 |
+
data = [decimal.Decimal('-1234.234983'), decimal.Decimal('-8.094324')]
|
1579 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1580 |
+
arr = pa.array(data, type=type(precision=10, scale=6))
|
1581 |
+
assert arr.to_pylist() == data
|
1582 |
+
|
1583 |
+
|
1584 |
+
def test_sequence_decimal_no_whole_part():
|
1585 |
+
data = [decimal.Decimal('-.4234983'), decimal.Decimal('.0103943')]
|
1586 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1587 |
+
arr = pa.array(data, type=type(precision=7, scale=7))
|
1588 |
+
assert arr.to_pylist() == data
|
1589 |
+
|
1590 |
+
|
1591 |
+
def test_sequence_decimal_large_integer():
|
1592 |
+
data = [decimal.Decimal('-394029506937548693.42983'),
|
1593 |
+
decimal.Decimal('32358695912932.01033')]
|
1594 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1595 |
+
arr = pa.array(data, type=type(precision=23, scale=5))
|
1596 |
+
assert arr.to_pylist() == data
|
1597 |
+
|
1598 |
+
|
1599 |
+
def test_sequence_decimal_from_integers():
|
1600 |
+
data = [0, 1, -39402950693754869342983]
|
1601 |
+
expected = [decimal.Decimal(x) for x in data]
|
1602 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1603 |
+
arr = pa.array(data, type=type(precision=28, scale=5))
|
1604 |
+
assert arr.to_pylist() == expected
|
1605 |
+
|
1606 |
+
|
1607 |
+
def test_sequence_decimal_too_high_precision():
|
1608 |
+
# ARROW-6989 python decimal has too high precision
|
1609 |
+
with pytest.raises(ValueError, match="precision out of range"):
|
1610 |
+
pa.array([decimal.Decimal('1' * 80)])
|
1611 |
+
|
1612 |
+
|
1613 |
+
def test_sequence_decimal_infer():
|
1614 |
+
for data, typ in [
|
1615 |
+
# simple case
|
1616 |
+
(decimal.Decimal('1.234'), pa.decimal128(4, 3)),
|
1617 |
+
# trailing zeros
|
1618 |
+
(decimal.Decimal('12300'), pa.decimal128(5, 0)),
|
1619 |
+
(decimal.Decimal('12300.0'), pa.decimal128(6, 1)),
|
1620 |
+
# scientific power notation
|
1621 |
+
(decimal.Decimal('1.23E+4'), pa.decimal128(5, 0)),
|
1622 |
+
(decimal.Decimal('123E+2'), pa.decimal128(5, 0)),
|
1623 |
+
(decimal.Decimal('123E+4'), pa.decimal128(7, 0)),
|
1624 |
+
# leading zeros
|
1625 |
+
(decimal.Decimal('0.0123'), pa.decimal128(4, 4)),
|
1626 |
+
(decimal.Decimal('0.01230'), pa.decimal128(5, 5)),
|
1627 |
+
(decimal.Decimal('1.230E-2'), pa.decimal128(5, 5)),
|
1628 |
+
]:
|
1629 |
+
assert pa.infer_type([data]) == typ
|
1630 |
+
arr = pa.array([data])
|
1631 |
+
assert arr.type == typ
|
1632 |
+
assert arr.to_pylist()[0] == data
|
1633 |
+
|
1634 |
+
|
1635 |
+
def test_sequence_decimal_infer_mixed():
|
1636 |
+
# ARROW-12150 - ensure mixed precision gets correctly inferred to
|
1637 |
+
# common type that can hold all input values
|
1638 |
+
cases = [
|
1639 |
+
([decimal.Decimal('1.234'), decimal.Decimal('3.456')],
|
1640 |
+
pa.decimal128(4, 3)),
|
1641 |
+
([decimal.Decimal('1.234'), decimal.Decimal('456.7')],
|
1642 |
+
pa.decimal128(6, 3)),
|
1643 |
+
([decimal.Decimal('123.4'), decimal.Decimal('4.567')],
|
1644 |
+
pa.decimal128(6, 3)),
|
1645 |
+
([decimal.Decimal('123e2'), decimal.Decimal('4567e3')],
|
1646 |
+
pa.decimal128(7, 0)),
|
1647 |
+
([decimal.Decimal('123e4'), decimal.Decimal('4567e2')],
|
1648 |
+
pa.decimal128(7, 0)),
|
1649 |
+
([decimal.Decimal('0.123'), decimal.Decimal('0.04567')],
|
1650 |
+
pa.decimal128(5, 5)),
|
1651 |
+
([decimal.Decimal('0.001'), decimal.Decimal('1.01E5')],
|
1652 |
+
pa.decimal128(9, 3)),
|
1653 |
+
]
|
1654 |
+
for data, typ in cases:
|
1655 |
+
assert pa.infer_type(data) == typ
|
1656 |
+
arr = pa.array(data)
|
1657 |
+
assert arr.type == typ
|
1658 |
+
assert arr.to_pylist() == data
|
1659 |
+
|
1660 |
+
|
1661 |
+
def test_sequence_decimal_given_type():
|
1662 |
+
for data, typs, wrong_typs in [
|
1663 |
+
# simple case
|
1664 |
+
(
|
1665 |
+
decimal.Decimal('1.234'),
|
1666 |
+
[pa.decimal128(4, 3), pa.decimal128(5, 3), pa.decimal128(5, 4)],
|
1667 |
+
[pa.decimal128(4, 2), pa.decimal128(4, 4)]
|
1668 |
+
),
|
1669 |
+
# trailing zeros
|
1670 |
+
(
|
1671 |
+
decimal.Decimal('12300'),
|
1672 |
+
[pa.decimal128(5, 0), pa.decimal128(6, 0), pa.decimal128(3, -2)],
|
1673 |
+
[pa.decimal128(4, 0), pa.decimal128(3, -3)]
|
1674 |
+
),
|
1675 |
+
# scientific power notation
|
1676 |
+
(
|
1677 |
+
decimal.Decimal('1.23E+4'),
|
1678 |
+
[pa.decimal128(5, 0), pa.decimal128(6, 0), pa.decimal128(3, -2)],
|
1679 |
+
[pa.decimal128(4, 0), pa.decimal128(3, -3)]
|
1680 |
+
),
|
1681 |
+
]:
|
1682 |
+
for typ in typs:
|
1683 |
+
arr = pa.array([data], type=typ)
|
1684 |
+
assert arr.type == typ
|
1685 |
+
assert arr.to_pylist()[0] == data
|
1686 |
+
for typ in wrong_typs:
|
1687 |
+
with pytest.raises(ValueError):
|
1688 |
+
pa.array([data], type=typ)
|
1689 |
+
|
1690 |
+
|
1691 |
+
def test_range_types():
|
1692 |
+
arr1 = pa.array(range(3))
|
1693 |
+
arr2 = pa.array((0, 1, 2))
|
1694 |
+
assert arr1.equals(arr2)
|
1695 |
+
|
1696 |
+
|
1697 |
+
def test_empty_range():
|
1698 |
+
arr = pa.array(range(0))
|
1699 |
+
assert len(arr) == 0
|
1700 |
+
assert arr.null_count == 0
|
1701 |
+
assert arr.type == pa.null()
|
1702 |
+
assert arr.to_pylist() == []
|
1703 |
+
|
1704 |
+
|
1705 |
+
def test_structarray():
|
1706 |
+
arr = pa.StructArray.from_arrays([], names=[])
|
1707 |
+
assert arr.type == pa.struct([])
|
1708 |
+
assert len(arr) == 0
|
1709 |
+
assert arr.to_pylist() == []
|
1710 |
+
|
1711 |
+
ints = pa.array([None, 2, 3], type=pa.int64())
|
1712 |
+
strs = pa.array(['a', None, 'c'], type=pa.string())
|
1713 |
+
bools = pa.array([True, False, None], type=pa.bool_())
|
1714 |
+
arr = pa.StructArray.from_arrays(
|
1715 |
+
[ints, strs, bools],
|
1716 |
+
['ints', 'strs', 'bools'])
|
1717 |
+
|
1718 |
+
expected = [
|
1719 |
+
{'ints': None, 'strs': 'a', 'bools': True},
|
1720 |
+
{'ints': 2, 'strs': None, 'bools': False},
|
1721 |
+
{'ints': 3, 'strs': 'c', 'bools': None},
|
1722 |
+
]
|
1723 |
+
|
1724 |
+
pylist = arr.to_pylist()
|
1725 |
+
assert pylist == expected, (pylist, expected)
|
1726 |
+
|
1727 |
+
# len(names) != len(arrays)
|
1728 |
+
with pytest.raises(ValueError):
|
1729 |
+
pa.StructArray.from_arrays([ints], ['ints', 'strs'])
|
1730 |
+
|
1731 |
+
|
1732 |
+
def test_struct_from_dicts():
|
1733 |
+
ty = pa.struct([pa.field('a', pa.int32()),
|
1734 |
+
pa.field('b', pa.string()),
|
1735 |
+
pa.field('c', pa.bool_())])
|
1736 |
+
arr = pa.array([], type=ty)
|
1737 |
+
assert arr.to_pylist() == []
|
1738 |
+
|
1739 |
+
data = [{'a': 5, 'b': 'foo', 'c': True},
|
1740 |
+
{'a': 6, 'b': 'bar', 'c': False}]
|
1741 |
+
arr = pa.array(data, type=ty)
|
1742 |
+
assert arr.to_pylist() == data
|
1743 |
+
|
1744 |
+
# With omitted values
|
1745 |
+
data = [{'a': 5, 'c': True},
|
1746 |
+
None,
|
1747 |
+
{},
|
1748 |
+
{'a': None, 'b': 'bar'}]
|
1749 |
+
arr = pa.array(data, type=ty)
|
1750 |
+
expected = [{'a': 5, 'b': None, 'c': True},
|
1751 |
+
None,
|
1752 |
+
{'a': None, 'b': None, 'c': None},
|
1753 |
+
{'a': None, 'b': 'bar', 'c': None}]
|
1754 |
+
assert arr.to_pylist() == expected
|
1755 |
+
|
1756 |
+
|
1757 |
+
def test_struct_from_dicts_bytes_keys():
|
1758 |
+
# ARROW-6878
|
1759 |
+
ty = pa.struct([pa.field('a', pa.int32()),
|
1760 |
+
pa.field('b', pa.string()),
|
1761 |
+
pa.field('c', pa.bool_())])
|
1762 |
+
arr = pa.array([], type=ty)
|
1763 |
+
assert arr.to_pylist() == []
|
1764 |
+
|
1765 |
+
data = [{b'a': 5, b'b': 'foo'},
|
1766 |
+
{b'a': 6, b'c': False}]
|
1767 |
+
arr = pa.array(data, type=ty)
|
1768 |
+
assert arr.to_pylist() == [
|
1769 |
+
{'a': 5, 'b': 'foo', 'c': None},
|
1770 |
+
{'a': 6, 'b': None, 'c': False},
|
1771 |
+
]
|
1772 |
+
|
1773 |
+
|
1774 |
+
def test_struct_from_tuples():
|
1775 |
+
ty = pa.struct([pa.field('a', pa.int32()),
|
1776 |
+
pa.field('b', pa.string()),
|
1777 |
+
pa.field('c', pa.bool_())])
|
1778 |
+
|
1779 |
+
data = [(5, 'foo', True),
|
1780 |
+
(6, 'bar', False)]
|
1781 |
+
expected = [{'a': 5, 'b': 'foo', 'c': True},
|
1782 |
+
{'a': 6, 'b': 'bar', 'c': False}]
|
1783 |
+
arr = pa.array(data, type=ty)
|
1784 |
+
|
1785 |
+
data_as_ndarray = np.empty(len(data), dtype=object)
|
1786 |
+
data_as_ndarray[:] = data
|
1787 |
+
arr2 = pa.array(data_as_ndarray, type=ty)
|
1788 |
+
assert arr.to_pylist() == expected
|
1789 |
+
|
1790 |
+
assert arr.equals(arr2)
|
1791 |
+
|
1792 |
+
# With omitted values
|
1793 |
+
data = [(5, 'foo', None),
|
1794 |
+
None,
|
1795 |
+
(6, None, False)]
|
1796 |
+
expected = [{'a': 5, 'b': 'foo', 'c': None},
|
1797 |
+
None,
|
1798 |
+
{'a': 6, 'b': None, 'c': False}]
|
1799 |
+
arr = pa.array(data, type=ty)
|
1800 |
+
assert arr.to_pylist() == expected
|
1801 |
+
|
1802 |
+
# Invalid tuple size
|
1803 |
+
for tup in [(5, 'foo'), (), ('5', 'foo', True, None)]:
|
1804 |
+
with pytest.raises(ValueError, match="(?i)tuple size"):
|
1805 |
+
pa.array([tup], type=ty)
|
1806 |
+
|
1807 |
+
|
1808 |
+
def test_struct_from_list_of_pairs():
|
1809 |
+
ty = pa.struct([
|
1810 |
+
pa.field('a', pa.int32()),
|
1811 |
+
pa.field('b', pa.string()),
|
1812 |
+
pa.field('c', pa.bool_())
|
1813 |
+
])
|
1814 |
+
data = [
|
1815 |
+
[('a', 5), ('b', 'foo'), ('c', True)],
|
1816 |
+
[('a', 6), ('b', 'bar'), ('c', False)],
|
1817 |
+
None
|
1818 |
+
]
|
1819 |
+
arr = pa.array(data, type=ty)
|
1820 |
+
assert arr.to_pylist() == [
|
1821 |
+
{'a': 5, 'b': 'foo', 'c': True},
|
1822 |
+
{'a': 6, 'b': 'bar', 'c': False},
|
1823 |
+
None
|
1824 |
+
]
|
1825 |
+
|
1826 |
+
# test with duplicated field names
|
1827 |
+
ty = pa.struct([
|
1828 |
+
pa.field('a', pa.int32()),
|
1829 |
+
pa.field('a', pa.string()),
|
1830 |
+
pa.field('b', pa.bool_())
|
1831 |
+
])
|
1832 |
+
data = [
|
1833 |
+
[('a', 5), ('a', 'foo'), ('b', True)],
|
1834 |
+
[('a', 6), ('a', 'bar'), ('b', False)],
|
1835 |
+
]
|
1836 |
+
arr = pa.array(data, type=ty)
|
1837 |
+
with pytest.raises(ValueError):
|
1838 |
+
# TODO(kszucs): ARROW-9997
|
1839 |
+
arr.to_pylist()
|
1840 |
+
|
1841 |
+
# test with empty elements
|
1842 |
+
ty = pa.struct([
|
1843 |
+
pa.field('a', pa.int32()),
|
1844 |
+
pa.field('b', pa.string()),
|
1845 |
+
pa.field('c', pa.bool_())
|
1846 |
+
])
|
1847 |
+
data = [
|
1848 |
+
[],
|
1849 |
+
[('a', 5), ('b', 'foo'), ('c', True)],
|
1850 |
+
[('a', 2), ('b', 'baz')],
|
1851 |
+
[('a', 1), ('b', 'bar'), ('c', False), ('d', 'julia')],
|
1852 |
+
]
|
1853 |
+
expected = [
|
1854 |
+
{'a': None, 'b': None, 'c': None},
|
1855 |
+
{'a': 5, 'b': 'foo', 'c': True},
|
1856 |
+
{'a': 2, 'b': 'baz', 'c': None},
|
1857 |
+
{'a': 1, 'b': 'bar', 'c': False},
|
1858 |
+
]
|
1859 |
+
arr = pa.array(data, type=ty)
|
1860 |
+
assert arr.to_pylist() == expected
|
1861 |
+
|
1862 |
+
|
1863 |
+
def test_struct_from_list_of_pairs_errors():
|
1864 |
+
ty = pa.struct([
|
1865 |
+
pa.field('a', pa.int32()),
|
1866 |
+
pa.field('b', pa.string()),
|
1867 |
+
pa.field('c', pa.bool_())
|
1868 |
+
])
|
1869 |
+
|
1870 |
+
# test that it raises if the key doesn't match the expected field name
|
1871 |
+
data = [
|
1872 |
+
[],
|
1873 |
+
[('a', 5), ('c', True), ('b', None)],
|
1874 |
+
]
|
1875 |
+
msg = "The expected field name is `b` but `c` was given"
|
1876 |
+
with pytest.raises(ValueError, match=msg):
|
1877 |
+
pa.array(data, type=ty)
|
1878 |
+
|
1879 |
+
# test various errors both at the first position and after because of key
|
1880 |
+
# type inference
|
1881 |
+
template = (
|
1882 |
+
r"Could not convert {} with type {}: was expecting tuple of "
|
1883 |
+
r"(key, value) pair"
|
1884 |
+
)
|
1885 |
+
cases = [
|
1886 |
+
tuple(), # empty key-value pair
|
1887 |
+
tuple('a',), # missing value
|
1888 |
+
tuple('unknown-key',), # not known field name
|
1889 |
+
'string', # not a tuple
|
1890 |
+
]
|
1891 |
+
for key_value_pair in cases:
|
1892 |
+
msg = re.escape(template.format(
|
1893 |
+
repr(key_value_pair), type(key_value_pair).__name__
|
1894 |
+
))
|
1895 |
+
|
1896 |
+
with pytest.raises(TypeError, match=msg):
|
1897 |
+
pa.array([
|
1898 |
+
[key_value_pair],
|
1899 |
+
[('a', 5), ('b', 'foo'), ('c', None)],
|
1900 |
+
], type=ty)
|
1901 |
+
|
1902 |
+
with pytest.raises(TypeError, match=msg):
|
1903 |
+
pa.array([
|
1904 |
+
[('a', 5), ('b', 'foo'), ('c', None)],
|
1905 |
+
[key_value_pair],
|
1906 |
+
], type=ty)
|
1907 |
+
|
1908 |
+
|
1909 |
+
def test_struct_from_mixed_sequence():
|
1910 |
+
# It is forbidden to mix dicts and tuples when initializing a struct array
|
1911 |
+
ty = pa.struct([pa.field('a', pa.int32()),
|
1912 |
+
pa.field('b', pa.string()),
|
1913 |
+
pa.field('c', pa.bool_())])
|
1914 |
+
data = [(5, 'foo', True),
|
1915 |
+
{'a': 6, 'b': 'bar', 'c': False}]
|
1916 |
+
with pytest.raises(TypeError):
|
1917 |
+
pa.array(data, type=ty)
|
1918 |
+
|
1919 |
+
|
1920 |
+
def test_struct_from_dicts_inference():
|
1921 |
+
expected_type = pa.struct([pa.field('a', pa.int64()),
|
1922 |
+
pa.field('b', pa.string()),
|
1923 |
+
pa.field('c', pa.bool_())])
|
1924 |
+
data = [{'a': 5, 'b': 'foo', 'c': True},
|
1925 |
+
{'a': 6, 'b': 'bar', 'c': False}]
|
1926 |
+
|
1927 |
+
arr = pa.array(data)
|
1928 |
+
check_struct_type(arr.type, expected_type)
|
1929 |
+
assert arr.to_pylist() == data
|
1930 |
+
|
1931 |
+
# With omitted values
|
1932 |
+
data = [{'a': 5, 'c': True},
|
1933 |
+
None,
|
1934 |
+
{},
|
1935 |
+
{'a': None, 'b': 'bar'}]
|
1936 |
+
expected = [{'a': 5, 'b': None, 'c': True},
|
1937 |
+
None,
|
1938 |
+
{'a': None, 'b': None, 'c': None},
|
1939 |
+
{'a': None, 'b': 'bar', 'c': None}]
|
1940 |
+
|
1941 |
+
arr = pa.array(data)
|
1942 |
+
data_as_ndarray = np.empty(len(data), dtype=object)
|
1943 |
+
data_as_ndarray[:] = data
|
1944 |
+
arr2 = pa.array(data)
|
1945 |
+
|
1946 |
+
check_struct_type(arr.type, expected_type)
|
1947 |
+
assert arr.to_pylist() == expected
|
1948 |
+
assert arr.equals(arr2)
|
1949 |
+
|
1950 |
+
# Nested
|
1951 |
+
expected_type = pa.struct([
|
1952 |
+
pa.field('a', pa.struct([pa.field('aa', pa.list_(pa.int64())),
|
1953 |
+
pa.field('ab', pa.bool_())])),
|
1954 |
+
pa.field('b', pa.string())])
|
1955 |
+
data = [{'a': {'aa': [5, 6], 'ab': True}, 'b': 'foo'},
|
1956 |
+
{'a': {'aa': None, 'ab': False}, 'b': None},
|
1957 |
+
{'a': None, 'b': 'bar'}]
|
1958 |
+
arr = pa.array(data)
|
1959 |
+
|
1960 |
+
assert arr.to_pylist() == data
|
1961 |
+
|
1962 |
+
# Edge cases
|
1963 |
+
arr = pa.array([{}])
|
1964 |
+
assert arr.type == pa.struct([])
|
1965 |
+
assert arr.to_pylist() == [{}]
|
1966 |
+
|
1967 |
+
# Mixing structs and scalars is rejected
|
1968 |
+
with pytest.raises((pa.ArrowInvalid, pa.ArrowTypeError)):
|
1969 |
+
pa.array([1, {'a': 2}])
|
1970 |
+
|
1971 |
+
|
1972 |
+
def test_structarray_from_arrays_coerce():
|
1973 |
+
# ARROW-1706
|
1974 |
+
ints = [None, 2, 3]
|
1975 |
+
strs = ['a', None, 'c']
|
1976 |
+
bools = [True, False, None]
|
1977 |
+
ints_nonnull = [1, 2, 3]
|
1978 |
+
|
1979 |
+
arrays = [ints, strs, bools, ints_nonnull]
|
1980 |
+
result = pa.StructArray.from_arrays(arrays,
|
1981 |
+
['ints', 'strs', 'bools',
|
1982 |
+
'int_nonnull'])
|
1983 |
+
expected = pa.StructArray.from_arrays(
|
1984 |
+
[pa.array(ints, type='int64'),
|
1985 |
+
pa.array(strs, type='utf8'),
|
1986 |
+
pa.array(bools),
|
1987 |
+
pa.array(ints_nonnull, type='int64')],
|
1988 |
+
['ints', 'strs', 'bools', 'int_nonnull'])
|
1989 |
+
|
1990 |
+
with pytest.raises(ValueError):
|
1991 |
+
pa.StructArray.from_arrays(arrays)
|
1992 |
+
|
1993 |
+
assert result.equals(expected)
|
1994 |
+
|
1995 |
+
|
1996 |
+
def test_decimal_array_with_none_and_nan():
|
1997 |
+
values = [decimal.Decimal('1.234'), None, np.nan, decimal.Decimal('nan')]
|
1998 |
+
|
1999 |
+
with pytest.raises(TypeError):
|
2000 |
+
# ARROW-6227: Without from_pandas=True, NaN is considered a float
|
2001 |
+
array = pa.array(values)
|
2002 |
+
|
2003 |
+
array = pa.array(values, from_pandas=True)
|
2004 |
+
assert array.type == pa.decimal128(4, 3)
|
2005 |
+
assert array.to_pylist() == values[:2] + [None, None]
|
2006 |
+
|
2007 |
+
array = pa.array(values, type=pa.decimal128(10, 4), from_pandas=True)
|
2008 |
+
assert array.to_pylist() == [decimal.Decimal('1.2340'), None, None, None]
|
2009 |
+
|
2010 |
+
|
2011 |
+
def test_map_from_dicts():
|
2012 |
+
data = [[{'key': b'a', 'value': 1}, {'key': b'b', 'value': 2}],
|
2013 |
+
[{'key': b'c', 'value': 3}],
|
2014 |
+
[{'key': b'd', 'value': 4}, {'key': b'e', 'value': 5},
|
2015 |
+
{'key': b'f', 'value': None}],
|
2016 |
+
[{'key': b'g', 'value': 7}]]
|
2017 |
+
expected = [[(d['key'], d['value']) for d in entry] for entry in data]
|
2018 |
+
|
2019 |
+
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
|
2020 |
+
|
2021 |
+
assert arr.to_pylist() == expected
|
2022 |
+
|
2023 |
+
# With omitted values
|
2024 |
+
data[1] = None
|
2025 |
+
expected[1] = None
|
2026 |
+
|
2027 |
+
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
|
2028 |
+
|
2029 |
+
assert arr.to_pylist() == expected
|
2030 |
+
|
2031 |
+
# Invalid dictionary
|
2032 |
+
for entry in [[{'value': 5}], [{}], [{'k': 1, 'v': 2}]]:
|
2033 |
+
with pytest.raises(ValueError, match="Invalid Map"):
|
2034 |
+
pa.array([entry], type=pa.map_('i4', 'i4'))
|
2035 |
+
|
2036 |
+
# Invalid dictionary types
|
2037 |
+
for entry in [[{'key': '1', 'value': 5}], [{'key': {'value': 2}}]]:
|
2038 |
+
with pytest.raises(pa.ArrowInvalid, match="tried to convert to int"):
|
2039 |
+
pa.array([entry], type=pa.map_('i4', 'i4'))
|
2040 |
+
|
2041 |
+
|
2042 |
+
def test_map_from_tuples():
|
2043 |
+
expected = [[(b'a', 1), (b'b', 2)],
|
2044 |
+
[(b'c', 3)],
|
2045 |
+
[(b'd', 4), (b'e', 5), (b'f', None)],
|
2046 |
+
[(b'g', 7)]]
|
2047 |
+
|
2048 |
+
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
|
2049 |
+
|
2050 |
+
assert arr.to_pylist() == expected
|
2051 |
+
|
2052 |
+
# With omitted values
|
2053 |
+
expected[1] = None
|
2054 |
+
|
2055 |
+
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
|
2056 |
+
|
2057 |
+
assert arr.to_pylist() == expected
|
2058 |
+
|
2059 |
+
# Invalid tuple size
|
2060 |
+
for entry in [[(5,)], [()], [('5', 'foo', True)]]:
|
2061 |
+
with pytest.raises(ValueError, match="(?i)tuple size"):
|
2062 |
+
pa.array([entry], type=pa.map_('i4', 'i4'))
|
2063 |
+
|
2064 |
+
|
2065 |
+
def test_dictionary_from_boolean():
|
2066 |
+
typ = pa.dictionary(pa.int8(), value_type=pa.bool_())
|
2067 |
+
a = pa.array([False, False, True, False, True], type=typ)
|
2068 |
+
assert isinstance(a.type, pa.DictionaryType)
|
2069 |
+
assert a.type.equals(typ)
|
2070 |
+
|
2071 |
+
expected_indices = pa.array([0, 0, 1, 0, 1], type=pa.int8())
|
2072 |
+
expected_dictionary = pa.array([False, True], type=pa.bool_())
|
2073 |
+
assert a.indices.equals(expected_indices)
|
2074 |
+
assert a.dictionary.equals(expected_dictionary)
|
2075 |
+
|
2076 |
+
|
2077 |
+
@pytest.mark.parametrize('value_type', [
|
2078 |
+
pa.int8(),
|
2079 |
+
pa.int16(),
|
2080 |
+
pa.int32(),
|
2081 |
+
pa.int64(),
|
2082 |
+
pa.uint8(),
|
2083 |
+
pa.uint16(),
|
2084 |
+
pa.uint32(),
|
2085 |
+
pa.uint64(),
|
2086 |
+
pa.float32(),
|
2087 |
+
pa.float64(),
|
2088 |
+
])
|
2089 |
+
def test_dictionary_from_integers(value_type):
|
2090 |
+
typ = pa.dictionary(pa.int8(), value_type=value_type)
|
2091 |
+
a = pa.array([1, 2, 1, 1, 2, 3], type=typ)
|
2092 |
+
assert isinstance(a.type, pa.DictionaryType)
|
2093 |
+
assert a.type.equals(typ)
|
2094 |
+
|
2095 |
+
expected_indices = pa.array([0, 1, 0, 0, 1, 2], type=pa.int8())
|
2096 |
+
expected_dictionary = pa.array([1, 2, 3], type=value_type)
|
2097 |
+
assert a.indices.equals(expected_indices)
|
2098 |
+
assert a.dictionary.equals(expected_dictionary)
|
2099 |
+
|
2100 |
+
|
2101 |
+
@pytest.mark.parametrize('input_index_type', [
|
2102 |
+
pa.int8(),
|
2103 |
+
pa.int16(),
|
2104 |
+
pa.int32(),
|
2105 |
+
pa.int64()
|
2106 |
+
])
|
2107 |
+
def test_dictionary_index_type(input_index_type):
|
2108 |
+
# dictionary array is constructed using adaptive index type builder,
|
2109 |
+
# but the input index type is considered as the minimal width type to use
|
2110 |
+
|
2111 |
+
typ = pa.dictionary(input_index_type, value_type=pa.int64())
|
2112 |
+
arr = pa.array(range(10), type=typ)
|
2113 |
+
assert arr.type.equals(typ)
|
2114 |
+
|
2115 |
+
|
2116 |
+
def test_dictionary_is_always_adaptive():
|
2117 |
+
# dictionary array is constructed using adaptive index type builder,
|
2118 |
+
# meaning that the output index type may be wider than the given index type
|
2119 |
+
# since it depends on the input data
|
2120 |
+
typ = pa.dictionary(pa.int8(), value_type=pa.int64())
|
2121 |
+
|
2122 |
+
a = pa.array(range(2**7), type=typ)
|
2123 |
+
expected = pa.dictionary(pa.int8(), pa.int64())
|
2124 |
+
assert a.type.equals(expected)
|
2125 |
+
|
2126 |
+
a = pa.array(range(2**7 + 1), type=typ)
|
2127 |
+
expected = pa.dictionary(pa.int16(), pa.int64())
|
2128 |
+
assert a.type.equals(expected)
|
2129 |
+
|
2130 |
+
|
2131 |
+
def test_dictionary_from_strings():
|
2132 |
+
for value_type in [pa.binary(), pa.string()]:
|
2133 |
+
typ = pa.dictionary(pa.int8(), value_type)
|
2134 |
+
a = pa.array(["", "a", "bb", "a", "bb", "ccc"], type=typ)
|
2135 |
+
|
2136 |
+
assert isinstance(a.type, pa.DictionaryType)
|
2137 |
+
|
2138 |
+
expected_indices = pa.array([0, 1, 2, 1, 2, 3], type=pa.int8())
|
2139 |
+
expected_dictionary = pa.array(["", "a", "bb", "ccc"], type=value_type)
|
2140 |
+
assert a.indices.equals(expected_indices)
|
2141 |
+
assert a.dictionary.equals(expected_dictionary)
|
2142 |
+
|
2143 |
+
# fixed size binary type
|
2144 |
+
typ = pa.dictionary(pa.int8(), pa.binary(3))
|
2145 |
+
a = pa.array(["aaa", "aaa", "bbb", "ccc", "bbb"], type=typ)
|
2146 |
+
assert isinstance(a.type, pa.DictionaryType)
|
2147 |
+
|
2148 |
+
expected_indices = pa.array([0, 0, 1, 2, 1], type=pa.int8())
|
2149 |
+
expected_dictionary = pa.array(["aaa", "bbb", "ccc"], type=pa.binary(3))
|
2150 |
+
assert a.indices.equals(expected_indices)
|
2151 |
+
assert a.dictionary.equals(expected_dictionary)
|
2152 |
+
|
2153 |
+
|
2154 |
+
@pytest.mark.parametrize(('unit', 'expected'), [
|
2155 |
+
('s', datetime.timedelta(seconds=-2147483000)),
|
2156 |
+
('ms', datetime.timedelta(milliseconds=-2147483000)),
|
2157 |
+
('us', datetime.timedelta(microseconds=-2147483000)),
|
2158 |
+
('ns', datetime.timedelta(microseconds=-2147483))
|
2159 |
+
])
|
2160 |
+
def test_duration_array_roundtrip_corner_cases(unit, expected):
|
2161 |
+
# Corner case discovered by hypothesis: there were implicit conversions to
|
2162 |
+
# unsigned values resulting wrong values with wrong signs.
|
2163 |
+
ty = pa.duration(unit)
|
2164 |
+
arr = pa.array([-2147483000], type=ty)
|
2165 |
+
restored = pa.array(arr.to_pylist(), type=ty)
|
2166 |
+
assert arr.equals(restored)
|
2167 |
+
|
2168 |
+
expected_list = [expected]
|
2169 |
+
if unit == 'ns':
|
2170 |
+
# if pandas is available then a pandas Timedelta is returned
|
2171 |
+
try:
|
2172 |
+
import pandas as pd
|
2173 |
+
except ImportError:
|
2174 |
+
pass
|
2175 |
+
else:
|
2176 |
+
expected_list = [pd.Timedelta(-2147483000, unit='ns')]
|
2177 |
+
|
2178 |
+
assert restored.to_pylist() == expected_list
|
2179 |
+
|
2180 |
+
|
2181 |
+
@pytest.mark.pandas
|
2182 |
+
def test_roundtrip_nanosecond_resolution_pandas_temporal_objects():
|
2183 |
+
# corner case discovered by hypothesis: preserving the nanoseconds on
|
2184 |
+
# conversion from a list of Timedelta and Timestamp objects
|
2185 |
+
import pandas as pd
|
2186 |
+
|
2187 |
+
ty = pa.duration('ns')
|
2188 |
+
arr = pa.array([9223371273709551616], type=ty)
|
2189 |
+
data = arr.to_pylist()
|
2190 |
+
assert isinstance(data[0], pd.Timedelta)
|
2191 |
+
restored = pa.array(data, type=ty)
|
2192 |
+
assert arr.equals(restored)
|
2193 |
+
assert restored.to_pylist() == [
|
2194 |
+
pd.Timedelta(9223371273709551616, unit='ns')
|
2195 |
+
]
|
2196 |
+
|
2197 |
+
ty = pa.timestamp('ns')
|
2198 |
+
arr = pa.array([9223371273709551616], type=ty)
|
2199 |
+
data = arr.to_pylist()
|
2200 |
+
assert isinstance(data[0], pd.Timestamp)
|
2201 |
+
restored = pa.array(data, type=ty)
|
2202 |
+
assert arr.equals(restored)
|
2203 |
+
assert restored.to_pylist() == [
|
2204 |
+
pd.Timestamp(9223371273709551616, unit='ns')
|
2205 |
+
]
|
2206 |
+
|
2207 |
+
ty = pa.timestamp('ns', tz='US/Eastern')
|
2208 |
+
value = 1604119893000000000
|
2209 |
+
arr = pa.array([value], type=ty)
|
2210 |
+
data = arr.to_pylist()
|
2211 |
+
assert isinstance(data[0], pd.Timestamp)
|
2212 |
+
restored = pa.array(data, type=ty)
|
2213 |
+
assert arr.equals(restored)
|
2214 |
+
assert restored.to_pylist() == [
|
2215 |
+
pd.Timestamp(value, unit='ns').tz_localize(
|
2216 |
+
"UTC").tz_convert('US/Eastern')
|
2217 |
+
]
|
2218 |
+
|
2219 |
+
|
2220 |
+
@h.given(past.all_arrays)
|
2221 |
+
def test_array_to_pylist_roundtrip(arr):
|
2222 |
+
seq = arr.to_pylist()
|
2223 |
+
restored = pa.array(seq, type=arr.type)
|
2224 |
+
assert restored.equals(arr)
|
2225 |
+
|
2226 |
+
|
2227 |
+
@pytest.mark.large_memory
|
2228 |
+
def test_auto_chunking_binary_like():
|
2229 |
+
# single chunk
|
2230 |
+
v1 = b'x' * 100000000
|
2231 |
+
v2 = b'x' * 147483646
|
2232 |
+
|
2233 |
+
# single chunk
|
2234 |
+
one_chunk_data = [v1] * 20 + [b'', None, v2]
|
2235 |
+
arr = pa.array(one_chunk_data, type=pa.binary())
|
2236 |
+
assert isinstance(arr, pa.Array)
|
2237 |
+
assert len(arr) == 23
|
2238 |
+
assert arr[20].as_py() == b''
|
2239 |
+
assert arr[21].as_py() is None
|
2240 |
+
assert arr[22].as_py() == v2
|
2241 |
+
|
2242 |
+
# two chunks
|
2243 |
+
two_chunk_data = one_chunk_data + [b'two']
|
2244 |
+
arr = pa.array(two_chunk_data, type=pa.binary())
|
2245 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2246 |
+
assert arr.num_chunks == 2
|
2247 |
+
assert len(arr.chunk(0)) == 23
|
2248 |
+
assert len(arr.chunk(1)) == 1
|
2249 |
+
assert arr.chunk(0)[20].as_py() == b''
|
2250 |
+
assert arr.chunk(0)[21].as_py() is None
|
2251 |
+
assert arr.chunk(0)[22].as_py() == v2
|
2252 |
+
assert arr.chunk(1).to_pylist() == [b'two']
|
2253 |
+
|
2254 |
+
# three chunks
|
2255 |
+
three_chunk_data = one_chunk_data * 2 + [b'three', b'three']
|
2256 |
+
arr = pa.array(three_chunk_data, type=pa.binary())
|
2257 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2258 |
+
assert arr.num_chunks == 3
|
2259 |
+
assert len(arr.chunk(0)) == 23
|
2260 |
+
assert len(arr.chunk(1)) == 23
|
2261 |
+
assert len(arr.chunk(2)) == 2
|
2262 |
+
for i in range(2):
|
2263 |
+
assert arr.chunk(i)[20].as_py() == b''
|
2264 |
+
assert arr.chunk(i)[21].as_py() is None
|
2265 |
+
assert arr.chunk(i)[22].as_py() == v2
|
2266 |
+
assert arr.chunk(2).to_pylist() == [b'three', b'three']
|
2267 |
+
|
2268 |
+
|
2269 |
+
@pytest.mark.large_memory
|
2270 |
+
def test_auto_chunking_list_of_binary():
|
2271 |
+
# ARROW-6281
|
2272 |
+
vals = [['x' * 1024]] * ((2 << 20) + 1)
|
2273 |
+
arr = pa.array(vals)
|
2274 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2275 |
+
assert arr.num_chunks == 2
|
2276 |
+
assert len(arr.chunk(0)) == 2**21 - 1
|
2277 |
+
assert len(arr.chunk(1)) == 2
|
2278 |
+
assert arr.chunk(1).to_pylist() == [['x' * 1024]] * 2
|
2279 |
+
|
2280 |
+
|
2281 |
+
@pytest.mark.large_memory
|
2282 |
+
def test_auto_chunking_list_like():
|
2283 |
+
item = np.ones((2**28,), dtype='uint8')
|
2284 |
+
data = [item] * (2**3 - 1)
|
2285 |
+
arr = pa.array(data, type=pa.list_(pa.uint8()))
|
2286 |
+
assert isinstance(arr, pa.Array)
|
2287 |
+
assert len(arr) == 7
|
2288 |
+
|
2289 |
+
item = np.ones((2**28,), dtype='uint8')
|
2290 |
+
data = [item] * 2**3
|
2291 |
+
arr = pa.array(data, type=pa.list_(pa.uint8()))
|
2292 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2293 |
+
assert arr.num_chunks == 2
|
2294 |
+
assert len(arr.chunk(0)) == 7
|
2295 |
+
assert len(arr.chunk(1)) == 1
|
2296 |
+
chunk = arr.chunk(1)
|
2297 |
+
scalar = chunk[0]
|
2298 |
+
assert isinstance(scalar, pa.ListScalar)
|
2299 |
+
expected = pa.array(item, type=pa.uint8())
|
2300 |
+
assert scalar.values == expected
|
2301 |
+
|
2302 |
+
|
2303 |
+
@pytest.mark.slow
|
2304 |
+
@pytest.mark.large_memory
|
2305 |
+
def test_auto_chunking_map_type():
|
2306 |
+
# takes ~20 minutes locally
|
2307 |
+
ty = pa.map_(pa.int8(), pa.int8())
|
2308 |
+
item = [(1, 1)] * 2**28
|
2309 |
+
data = [item] * 2**3
|
2310 |
+
arr = pa.array(data, type=ty)
|
2311 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2312 |
+
assert len(arr.chunk(0)) == 7
|
2313 |
+
assert len(arr.chunk(1)) == 1
|
2314 |
+
|
2315 |
+
|
2316 |
+
@pytest.mark.large_memory
|
2317 |
+
@pytest.mark.parametrize(('ty', 'char'), [
|
2318 |
+
(pa.string(), 'x'),
|
2319 |
+
(pa.binary(), b'x'),
|
2320 |
+
])
|
2321 |
+
def test_nested_auto_chunking(ty, char):
|
2322 |
+
v1 = char * 100000000
|
2323 |
+
v2 = char * 147483646
|
2324 |
+
|
2325 |
+
struct_type = pa.struct([
|
2326 |
+
pa.field('bool', pa.bool_()),
|
2327 |
+
pa.field('integer', pa.int64()),
|
2328 |
+
pa.field('string-like', ty),
|
2329 |
+
])
|
2330 |
+
|
2331 |
+
data = [{'bool': True, 'integer': 1, 'string-like': v1}] * 20
|
2332 |
+
data.append({'bool': True, 'integer': 1, 'string-like': v2})
|
2333 |
+
arr = pa.array(data, type=struct_type)
|
2334 |
+
assert isinstance(arr, pa.Array)
|
2335 |
+
|
2336 |
+
data.append({'bool': True, 'integer': 1, 'string-like': char})
|
2337 |
+
arr = pa.array(data, type=struct_type)
|
2338 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2339 |
+
assert arr.num_chunks == 2
|
2340 |
+
assert len(arr.chunk(0)) == 21
|
2341 |
+
assert len(arr.chunk(1)) == 1
|
2342 |
+
assert arr.chunk(1)[0].as_py() == {
|
2343 |
+
'bool': True,
|
2344 |
+
'integer': 1,
|
2345 |
+
'string-like': char
|
2346 |
+
}
|
2347 |
+
|
2348 |
+
|
2349 |
+
@pytest.mark.large_memory
|
2350 |
+
def test_array_from_pylist_data_overflow():
|
2351 |
+
# Regression test for ARROW-12983
|
2352 |
+
# Data buffer overflow - should result in chunked array
|
2353 |
+
items = [b'a' * 4096] * (2 ** 19)
|
2354 |
+
arr = pa.array(items, type=pa.string())
|
2355 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2356 |
+
assert len(arr) == 2**19
|
2357 |
+
assert len(arr.chunks) > 1
|
2358 |
+
|
2359 |
+
mask = np.zeros(2**19, bool)
|
2360 |
+
arr = pa.array(items, mask=mask, type=pa.string())
|
2361 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2362 |
+
assert len(arr) == 2**19
|
2363 |
+
assert len(arr.chunks) > 1
|
2364 |
+
|
2365 |
+
arr = pa.array(items, type=pa.binary())
|
2366 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2367 |
+
assert len(arr) == 2**19
|
2368 |
+
assert len(arr.chunks) > 1
|
2369 |
+
|
2370 |
+
|
2371 |
+
@pytest.mark.slow
|
2372 |
+
@pytest.mark.large_memory
|
2373 |
+
def test_array_from_pylist_offset_overflow():
|
2374 |
+
# Regression test for ARROW-12983
|
2375 |
+
# Offset buffer overflow - should result in chunked array
|
2376 |
+
# Note this doesn't apply to primitive arrays
|
2377 |
+
items = [b'a'] * (2 ** 31)
|
2378 |
+
arr = pa.array(items, type=pa.string())
|
2379 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2380 |
+
assert len(arr) == 2**31
|
2381 |
+
assert len(arr.chunks) > 1
|
2382 |
+
|
2383 |
+
mask = np.zeros(2**31, bool)
|
2384 |
+
arr = pa.array(items, mask=mask, type=pa.string())
|
2385 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2386 |
+
assert len(arr) == 2**31
|
2387 |
+
assert len(arr.chunks) > 1
|
2388 |
+
|
2389 |
+
arr = pa.array(items, type=pa.binary())
|
2390 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2391 |
+
assert len(arr) == 2**31
|
2392 |
+
assert len(arr.chunks) > 1
|
2393 |
+
|
2394 |
+
|
2395 |
+
@parametrize_with_collections_types
|
2396 |
+
@pytest.mark.parametrize(('data', 'scalar_data', 'value_type'), [
|
2397 |
+
([True, False, None], [pa.scalar(True), pa.scalar(False), None], pa.bool_()),
|
2398 |
+
(
|
2399 |
+
[1, 2, None],
|
2400 |
+
[pa.scalar(1), pa.scalar(2), pa.scalar(None, pa.int64())],
|
2401 |
+
pa.int64()
|
2402 |
+
),
|
2403 |
+
([1, None, None], [pa.scalar(1), None, pa.scalar(None, pa.int64())], pa.int64()),
|
2404 |
+
([None, None], [pa.scalar(None), pa.scalar(None)], pa.null()),
|
2405 |
+
([1., 2., None], [pa.scalar(1.), pa.scalar(2.), None], pa.float64()),
|
2406 |
+
(
|
2407 |
+
[None, datetime.date.today()],
|
2408 |
+
[None, pa.scalar(datetime.date.today())],
|
2409 |
+
pa.date32()
|
2410 |
+
),
|
2411 |
+
(
|
2412 |
+
[None, datetime.date.today()],
|
2413 |
+
[None, pa.scalar(datetime.date.today(), pa.date64())],
|
2414 |
+
pa.date64()
|
2415 |
+
),
|
2416 |
+
(
|
2417 |
+
[datetime.time(1, 1, 1), None],
|
2418 |
+
[pa.scalar(datetime.time(1, 1, 1)), None],
|
2419 |
+
pa.time64('us')
|
2420 |
+
),
|
2421 |
+
(
|
2422 |
+
[datetime.timedelta(seconds=10)],
|
2423 |
+
[pa.scalar(datetime.timedelta(seconds=10))],
|
2424 |
+
pa.duration('us')
|
2425 |
+
),
|
2426 |
+
(
|
2427 |
+
[None, datetime.datetime(2014, 1, 1)],
|
2428 |
+
[None, pa.scalar(datetime.datetime(2014, 1, 1))],
|
2429 |
+
pa.timestamp('us')
|
2430 |
+
),
|
2431 |
+
(
|
2432 |
+
[pa.MonthDayNano([1, -1, -10100])],
|
2433 |
+
[pa.scalar(pa.MonthDayNano([1, -1, -10100]))],
|
2434 |
+
pa.month_day_nano_interval()
|
2435 |
+
),
|
2436 |
+
(["a", "b"], [pa.scalar("a"), pa.scalar("b")], pa.string()),
|
2437 |
+
([b"a", b"b"], [pa.scalar(b"a"), pa.scalar(b"b")], pa.binary()),
|
2438 |
+
(
|
2439 |
+
[b"a", b"b"],
|
2440 |
+
[pa.scalar(b"a", pa.binary(1)), pa.scalar(b"b", pa.binary(1))],
|
2441 |
+
pa.binary(1)
|
2442 |
+
),
|
2443 |
+
([[1, 2, 3]], [pa.scalar([1, 2, 3])], pa.list_(pa.int64())),
|
2444 |
+
([["a", "b"]], [pa.scalar(["a", "b"])], pa.list_(pa.string())),
|
2445 |
+
([[1, 2, 3]], [pa.scalar([1, 2, 3], type=pa.list_view(pa.int64()))],
|
2446 |
+
pa.list_view(pa.int64())),
|
2447 |
+
([["a", "b"]], [pa.scalar(["a", "b"], type=pa.list_view(pa.string()))],
|
2448 |
+
pa.list_view(pa.string())),
|
2449 |
+
(
|
2450 |
+
[1, 2, None],
|
2451 |
+
[pa.scalar(1, type=pa.int8()), pa.scalar(2, type=pa.int8()), None],
|
2452 |
+
pa.int8()
|
2453 |
+
),
|
2454 |
+
([1, None], [pa.scalar(1.0, type=pa.int32()), None], pa.int32()),
|
2455 |
+
(
|
2456 |
+
["aaa", "bbb"],
|
2457 |
+
[pa.scalar("aaa", type=pa.binary(3)), pa.scalar("bbb", type=pa.binary(3))],
|
2458 |
+
pa.binary(3)),
|
2459 |
+
([b"a"], [pa.scalar("a", type=pa.large_binary())], pa.large_binary()),
|
2460 |
+
(["a"], [pa.scalar("a", type=pa.large_string())], pa.large_string()),
|
2461 |
+
([b"a"], [pa.scalar("a", type=pa.binary_view())], pa.binary_view()),
|
2462 |
+
(["a"], [pa.scalar("a", type=pa.string_view())], pa.string_view()),
|
2463 |
+
(
|
2464 |
+
["a"],
|
2465 |
+
[pa.scalar("a", type=pa.dictionary(pa.int64(), pa.string()))],
|
2466 |
+
pa.dictionary(pa.int64(), pa.string())
|
2467 |
+
),
|
2468 |
+
(
|
2469 |
+
["a", "b"],
|
2470 |
+
[pa.scalar("a", pa.dictionary(pa.int64(), pa.string())),
|
2471 |
+
pa.scalar("b", pa.dictionary(pa.int64(), pa.string()))],
|
2472 |
+
pa.dictionary(pa.int64(), pa.string())
|
2473 |
+
),
|
2474 |
+
(
|
2475 |
+
[1],
|
2476 |
+
[pa.scalar(1, type=pa.dictionary(pa.int64(), pa.int32()))],
|
2477 |
+
pa.dictionary(pa.int64(), pa.int32())
|
2478 |
+
),
|
2479 |
+
(
|
2480 |
+
[(1, 2)],
|
2481 |
+
[pa.scalar([('a', 1), ('b', 2)], type=pa.struct(
|
2482 |
+
[('a', pa.int8()), ('b', pa.int8())]))],
|
2483 |
+
pa.struct([('a', pa.int8()), ('b', pa.int8())])
|
2484 |
+
),
|
2485 |
+
(
|
2486 |
+
[(1, 'bar')],
|
2487 |
+
[pa.scalar([('a', 1), ('b', 'bar')], type=pa.struct(
|
2488 |
+
[('a', pa.int8()), ('b', pa.string())]))],
|
2489 |
+
pa.struct([('a', pa.int8()), ('b', pa.string())])
|
2490 |
+
)
|
2491 |
+
])
|
2492 |
+
def test_array_accepts_pyarrow_scalar(seq, data, scalar_data, value_type):
|
2493 |
+
if type(seq(scalar_data)) == set:
|
2494 |
+
pytest.skip("The elements in the set get reordered.")
|
2495 |
+
expect = pa.array(data, type=value_type)
|
2496 |
+
result = pa.array(seq(scalar_data))
|
2497 |
+
assert expect.equals(result)
|
2498 |
+
|
2499 |
+
result = pa.array(seq(scalar_data), type=value_type)
|
2500 |
+
assert expect.equals(result)
|
2501 |
+
|
2502 |
+
|
2503 |
+
@parametrize_with_collections_types
|
2504 |
+
def test_array_accepts_pyarrow_scalar_errors(seq):
|
2505 |
+
sequence = seq([pa.scalar(1), pa.scalar("a"), pa.scalar(3.0)])
|
2506 |
+
with pytest.raises(pa.ArrowInvalid,
|
2507 |
+
match="cannot mix scalars with different types"):
|
2508 |
+
pa.array(sequence)
|
2509 |
+
|
2510 |
+
sequence = seq([1, pa.scalar("a"), None])
|
2511 |
+
with pytest.raises(pa.ArrowInvalid,
|
2512 |
+
match="pyarrow scalars cannot be mixed with other "
|
2513 |
+
"Python scalar values currently"):
|
2514 |
+
pa.array(sequence)
|
2515 |
+
|
2516 |
+
sequence = seq([np.float16("0.1"), pa.scalar("a"), None])
|
2517 |
+
with pytest.raises(pa.ArrowInvalid,
|
2518 |
+
match="pyarrow scalars cannot be mixed with other "
|
2519 |
+
"Python scalar values currently"):
|
2520 |
+
pa.array(sequence)
|
2521 |
+
|
2522 |
+
sequence = seq([pa.scalar("a"), np.float16("0.1"), None])
|
2523 |
+
with pytest.raises(pa.ArrowInvalid,
|
2524 |
+
match="pyarrow scalars cannot be mixed with other "
|
2525 |
+
"Python scalar values currently"):
|
2526 |
+
pa.array(sequence)
|
2527 |
+
|
2528 |
+
with pytest.raises(pa.ArrowInvalid,
|
2529 |
+
match="Cannot append scalar of type string "
|
2530 |
+
"to builder for type int32"):
|
2531 |
+
pa.array([pa.scalar("a")], type=pa.int32())
|
2532 |
+
|
2533 |
+
with pytest.raises(pa.ArrowInvalid,
|
2534 |
+
match="Cannot append scalar of type int64 "
|
2535 |
+
"to builder for type null"):
|
2536 |
+
pa.array([pa.scalar(1)], type=pa.null())
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_cpp_internals.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import os.path
|
19 |
+
from os.path import join as pjoin
|
20 |
+
|
21 |
+
from pyarrow._pyarrow_cpp_tests import get_cpp_tests
|
22 |
+
|
23 |
+
|
24 |
+
def inject_cpp_tests(ns):
|
25 |
+
"""
|
26 |
+
Inject C++ tests as Python functions into namespace `ns` (a dict).
|
27 |
+
"""
|
28 |
+
for case in get_cpp_tests():
|
29 |
+
def wrapper(case=case):
|
30 |
+
case()
|
31 |
+
wrapper.__name__ = wrapper.__qualname__ = case.name
|
32 |
+
wrapper.__module__ = ns['__name__']
|
33 |
+
ns[case.name] = wrapper
|
34 |
+
|
35 |
+
|
36 |
+
inject_cpp_tests(globals())
|
37 |
+
|
38 |
+
|
39 |
+
def test_pyarrow_include():
|
40 |
+
# We need to make sure that pyarrow/include is always
|
41 |
+
# created. Either with PyArrow C++ header files or with
|
42 |
+
# Arrow C++ and PyArrow C++ header files together
|
43 |
+
|
44 |
+
source = os.path.dirname(os.path.abspath(__file__))
|
45 |
+
pyarrow_dir = pjoin(source, '..')
|
46 |
+
pyarrow_include = pjoin(pyarrow_dir, 'include')
|
47 |
+
pyarrow_cpp_include = pjoin(pyarrow_include, 'arrow', 'python')
|
48 |
+
|
49 |
+
assert os.path.exists(pyarrow_include)
|
50 |
+
assert os.path.exists(pyarrow_cpp_include)
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_csv.py
ADDED
@@ -0,0 +1,2018 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import abc
|
19 |
+
import bz2
|
20 |
+
from datetime import date, datetime
|
21 |
+
from decimal import Decimal
|
22 |
+
import gc
|
23 |
+
import gzip
|
24 |
+
import io
|
25 |
+
import itertools
|
26 |
+
import os
|
27 |
+
import select
|
28 |
+
import shutil
|
29 |
+
import signal
|
30 |
+
import string
|
31 |
+
import tempfile
|
32 |
+
import threading
|
33 |
+
import time
|
34 |
+
import unittest
|
35 |
+
import weakref
|
36 |
+
|
37 |
+
import pytest
|
38 |
+
|
39 |
+
import numpy as np
|
40 |
+
|
41 |
+
import pyarrow as pa
|
42 |
+
from pyarrow.csv import (
|
43 |
+
open_csv, read_csv, ReadOptions, ParseOptions, ConvertOptions, ISO8601,
|
44 |
+
write_csv, WriteOptions, CSVWriter, InvalidRow)
|
45 |
+
from pyarrow.tests import util
|
46 |
+
|
47 |
+
|
48 |
+
def generate_col_names():
|
49 |
+
# 'a', 'b'... 'z', then 'aa', 'ab'...
|
50 |
+
letters = string.ascii_lowercase
|
51 |
+
yield from letters
|
52 |
+
for first in letters:
|
53 |
+
for second in letters:
|
54 |
+
yield first + second
|
55 |
+
|
56 |
+
|
57 |
+
def make_random_csv(num_cols=2, num_rows=10, linesep='\r\n', write_names=True):
|
58 |
+
arr = np.random.RandomState(42).randint(0, 1000, size=(num_cols, num_rows))
|
59 |
+
csv = io.StringIO()
|
60 |
+
col_names = list(itertools.islice(generate_col_names(), num_cols))
|
61 |
+
if write_names:
|
62 |
+
csv.write(",".join(col_names))
|
63 |
+
csv.write(linesep)
|
64 |
+
for row in arr.T:
|
65 |
+
csv.write(",".join(map(str, row)))
|
66 |
+
csv.write(linesep)
|
67 |
+
csv = csv.getvalue().encode()
|
68 |
+
columns = [pa.array(a, type=pa.int64()) for a in arr]
|
69 |
+
expected = pa.Table.from_arrays(columns, col_names)
|
70 |
+
return csv, expected
|
71 |
+
|
72 |
+
|
73 |
+
def make_empty_csv(column_names):
|
74 |
+
csv = io.StringIO()
|
75 |
+
csv.write(",".join(column_names))
|
76 |
+
csv.write("\n")
|
77 |
+
return csv.getvalue().encode()
|
78 |
+
|
79 |
+
|
80 |
+
def check_options_class(cls, **attr_values):
|
81 |
+
"""
|
82 |
+
Check setting and getting attributes of an *Options class.
|
83 |
+
"""
|
84 |
+
opts = cls()
|
85 |
+
|
86 |
+
for name, values in attr_values.items():
|
87 |
+
assert getattr(opts, name) == values[0], \
|
88 |
+
"incorrect default value for " + name
|
89 |
+
for v in values:
|
90 |
+
setattr(opts, name, v)
|
91 |
+
assert getattr(opts, name) == v, "failed setting value"
|
92 |
+
|
93 |
+
with pytest.raises(AttributeError):
|
94 |
+
opts.zzz_non_existent = True
|
95 |
+
|
96 |
+
# Check constructor named arguments
|
97 |
+
non_defaults = {name: values[1] for name, values in attr_values.items()}
|
98 |
+
opts = cls(**non_defaults)
|
99 |
+
for name, value in non_defaults.items():
|
100 |
+
assert getattr(opts, name) == value
|
101 |
+
|
102 |
+
|
103 |
+
# The various options classes need to be picklable for dataset
|
104 |
+
def check_options_class_pickling(cls, pickler, **attr_values):
|
105 |
+
opts = cls(**attr_values)
|
106 |
+
new_opts = pickler.loads(pickler.dumps(opts,
|
107 |
+
protocol=pickler.HIGHEST_PROTOCOL))
|
108 |
+
for name, value in attr_values.items():
|
109 |
+
assert getattr(new_opts, name) == value
|
110 |
+
|
111 |
+
|
112 |
+
class InvalidRowHandler:
|
113 |
+
def __init__(self, result):
|
114 |
+
self.result = result
|
115 |
+
self.rows = []
|
116 |
+
|
117 |
+
def __call__(self, row):
|
118 |
+
self.rows.append(row)
|
119 |
+
return self.result
|
120 |
+
|
121 |
+
def __eq__(self, other):
|
122 |
+
return (isinstance(other, InvalidRowHandler) and
|
123 |
+
other.result == self.result)
|
124 |
+
|
125 |
+
def __ne__(self, other):
|
126 |
+
return (not isinstance(other, InvalidRowHandler) or
|
127 |
+
other.result != self.result)
|
128 |
+
|
129 |
+
|
130 |
+
def test_read_options(pickle_module):
|
131 |
+
cls = ReadOptions
|
132 |
+
opts = cls()
|
133 |
+
|
134 |
+
check_options_class(cls, use_threads=[True, False],
|
135 |
+
skip_rows=[0, 3],
|
136 |
+
column_names=[[], ["ab", "cd"]],
|
137 |
+
autogenerate_column_names=[False, True],
|
138 |
+
encoding=['utf8', 'utf16'],
|
139 |
+
skip_rows_after_names=[0, 27])
|
140 |
+
|
141 |
+
check_options_class_pickling(cls, pickler=pickle_module,
|
142 |
+
use_threads=True,
|
143 |
+
skip_rows=3,
|
144 |
+
column_names=["ab", "cd"],
|
145 |
+
autogenerate_column_names=False,
|
146 |
+
encoding='utf16',
|
147 |
+
skip_rows_after_names=27)
|
148 |
+
|
149 |
+
assert opts.block_size > 0
|
150 |
+
opts.block_size = 12345
|
151 |
+
assert opts.block_size == 12345
|
152 |
+
|
153 |
+
opts = cls(block_size=1234)
|
154 |
+
assert opts.block_size == 1234
|
155 |
+
|
156 |
+
opts.validate()
|
157 |
+
|
158 |
+
match = "ReadOptions: block_size must be at least 1: 0"
|
159 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
160 |
+
opts = cls()
|
161 |
+
opts.block_size = 0
|
162 |
+
opts.validate()
|
163 |
+
|
164 |
+
match = "ReadOptions: skip_rows cannot be negative: -1"
|
165 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
166 |
+
opts = cls()
|
167 |
+
opts.skip_rows = -1
|
168 |
+
opts.validate()
|
169 |
+
|
170 |
+
match = "ReadOptions: skip_rows_after_names cannot be negative: -1"
|
171 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
172 |
+
opts = cls()
|
173 |
+
opts.skip_rows_after_names = -1
|
174 |
+
opts.validate()
|
175 |
+
|
176 |
+
match = "ReadOptions: autogenerate_column_names cannot be true when" \
|
177 |
+
" column_names are provided"
|
178 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
179 |
+
opts = cls()
|
180 |
+
opts.autogenerate_column_names = True
|
181 |
+
opts.column_names = ('a', 'b')
|
182 |
+
opts.validate()
|
183 |
+
|
184 |
+
|
185 |
+
def test_parse_options(pickle_module):
|
186 |
+
cls = ParseOptions
|
187 |
+
skip_handler = InvalidRowHandler('skip')
|
188 |
+
|
189 |
+
check_options_class(cls, delimiter=[',', 'x'],
|
190 |
+
escape_char=[False, 'y'],
|
191 |
+
quote_char=['"', 'z', False],
|
192 |
+
double_quote=[True, False],
|
193 |
+
newlines_in_values=[False, True],
|
194 |
+
ignore_empty_lines=[True, False],
|
195 |
+
invalid_row_handler=[None, skip_handler])
|
196 |
+
|
197 |
+
check_options_class_pickling(cls, pickler=pickle_module,
|
198 |
+
delimiter='x',
|
199 |
+
escape_char='y',
|
200 |
+
quote_char=False,
|
201 |
+
double_quote=False,
|
202 |
+
newlines_in_values=True,
|
203 |
+
ignore_empty_lines=False,
|
204 |
+
invalid_row_handler=skip_handler)
|
205 |
+
|
206 |
+
cls().validate()
|
207 |
+
opts = cls()
|
208 |
+
opts.delimiter = "\t"
|
209 |
+
opts.validate()
|
210 |
+
|
211 |
+
match = "ParseOptions: delimiter cannot be \\\\r or \\\\n"
|
212 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
213 |
+
opts = cls()
|
214 |
+
opts.delimiter = "\n"
|
215 |
+
opts.validate()
|
216 |
+
|
217 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
218 |
+
opts = cls()
|
219 |
+
opts.delimiter = "\r"
|
220 |
+
opts.validate()
|
221 |
+
|
222 |
+
match = "ParseOptions: quote_char cannot be \\\\r or \\\\n"
|
223 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
224 |
+
opts = cls()
|
225 |
+
opts.quote_char = "\n"
|
226 |
+
opts.validate()
|
227 |
+
|
228 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
229 |
+
opts = cls()
|
230 |
+
opts.quote_char = "\r"
|
231 |
+
opts.validate()
|
232 |
+
|
233 |
+
match = "ParseOptions: escape_char cannot be \\\\r or \\\\n"
|
234 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
235 |
+
opts = cls()
|
236 |
+
opts.escape_char = "\n"
|
237 |
+
opts.validate()
|
238 |
+
|
239 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
240 |
+
opts = cls()
|
241 |
+
opts.escape_char = "\r"
|
242 |
+
opts.validate()
|
243 |
+
|
244 |
+
|
245 |
+
def test_convert_options(pickle_module):
|
246 |
+
cls = ConvertOptions
|
247 |
+
opts = cls()
|
248 |
+
|
249 |
+
check_options_class(
|
250 |
+
cls, check_utf8=[True, False],
|
251 |
+
strings_can_be_null=[False, True],
|
252 |
+
quoted_strings_can_be_null=[True, False],
|
253 |
+
decimal_point=['.', ','],
|
254 |
+
include_columns=[[], ['def', 'abc']],
|
255 |
+
include_missing_columns=[False, True],
|
256 |
+
auto_dict_encode=[False, True],
|
257 |
+
timestamp_parsers=[[], [ISO8601, '%y-%m']])
|
258 |
+
|
259 |
+
check_options_class_pickling(
|
260 |
+
cls, pickler=pickle_module,
|
261 |
+
check_utf8=False,
|
262 |
+
strings_can_be_null=True,
|
263 |
+
quoted_strings_can_be_null=False,
|
264 |
+
decimal_point=',',
|
265 |
+
include_columns=['def', 'abc'],
|
266 |
+
include_missing_columns=False,
|
267 |
+
auto_dict_encode=True,
|
268 |
+
timestamp_parsers=[ISO8601, '%y-%m'])
|
269 |
+
|
270 |
+
with pytest.raises(ValueError):
|
271 |
+
opts.decimal_point = '..'
|
272 |
+
|
273 |
+
assert opts.auto_dict_max_cardinality > 0
|
274 |
+
opts.auto_dict_max_cardinality = 99999
|
275 |
+
assert opts.auto_dict_max_cardinality == 99999
|
276 |
+
|
277 |
+
assert opts.column_types == {}
|
278 |
+
# Pass column_types as mapping
|
279 |
+
opts.column_types = {'b': pa.int16(), 'c': pa.float32()}
|
280 |
+
assert opts.column_types == {'b': pa.int16(), 'c': pa.float32()}
|
281 |
+
opts.column_types = {'v': 'int16', 'w': 'null'}
|
282 |
+
assert opts.column_types == {'v': pa.int16(), 'w': pa.null()}
|
283 |
+
# Pass column_types as schema
|
284 |
+
schema = pa.schema([('a', pa.int32()), ('b', pa.string())])
|
285 |
+
opts.column_types = schema
|
286 |
+
assert opts.column_types == {'a': pa.int32(), 'b': pa.string()}
|
287 |
+
# Pass column_types as sequence
|
288 |
+
opts.column_types = [('x', pa.binary())]
|
289 |
+
assert opts.column_types == {'x': pa.binary()}
|
290 |
+
|
291 |
+
with pytest.raises(TypeError, match='DataType expected'):
|
292 |
+
opts.column_types = {'a': None}
|
293 |
+
with pytest.raises(TypeError):
|
294 |
+
opts.column_types = 0
|
295 |
+
|
296 |
+
assert isinstance(opts.null_values, list)
|
297 |
+
assert '' in opts.null_values
|
298 |
+
assert 'N/A' in opts.null_values
|
299 |
+
opts.null_values = ['xxx', 'yyy']
|
300 |
+
assert opts.null_values == ['xxx', 'yyy']
|
301 |
+
|
302 |
+
assert isinstance(opts.true_values, list)
|
303 |
+
opts.true_values = ['xxx', 'yyy']
|
304 |
+
assert opts.true_values == ['xxx', 'yyy']
|
305 |
+
|
306 |
+
assert isinstance(opts.false_values, list)
|
307 |
+
opts.false_values = ['xxx', 'yyy']
|
308 |
+
assert opts.false_values == ['xxx', 'yyy']
|
309 |
+
|
310 |
+
assert opts.timestamp_parsers == []
|
311 |
+
opts.timestamp_parsers = [ISO8601]
|
312 |
+
assert opts.timestamp_parsers == [ISO8601]
|
313 |
+
|
314 |
+
opts = cls(column_types={'a': pa.null()},
|
315 |
+
null_values=['N', 'nn'], true_values=['T', 'tt'],
|
316 |
+
false_values=['F', 'ff'], auto_dict_max_cardinality=999,
|
317 |
+
timestamp_parsers=[ISO8601, '%Y-%m-%d'])
|
318 |
+
assert opts.column_types == {'a': pa.null()}
|
319 |
+
assert opts.null_values == ['N', 'nn']
|
320 |
+
assert opts.false_values == ['F', 'ff']
|
321 |
+
assert opts.true_values == ['T', 'tt']
|
322 |
+
assert opts.auto_dict_max_cardinality == 999
|
323 |
+
assert opts.timestamp_parsers == [ISO8601, '%Y-%m-%d']
|
324 |
+
|
325 |
+
|
326 |
+
def test_write_options():
|
327 |
+
cls = WriteOptions
|
328 |
+
opts = cls()
|
329 |
+
|
330 |
+
check_options_class(
|
331 |
+
cls, include_header=[True, False], delimiter=[',', '\t', '|'],
|
332 |
+
quoting_style=['needed', 'none', 'all_valid'])
|
333 |
+
|
334 |
+
assert opts.batch_size > 0
|
335 |
+
opts.batch_size = 12345
|
336 |
+
assert opts.batch_size == 12345
|
337 |
+
|
338 |
+
opts = cls(batch_size=9876)
|
339 |
+
assert opts.batch_size == 9876
|
340 |
+
|
341 |
+
opts.validate()
|
342 |
+
|
343 |
+
match = "WriteOptions: batch_size must be at least 1: 0"
|
344 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
345 |
+
opts = cls()
|
346 |
+
opts.batch_size = 0
|
347 |
+
opts.validate()
|
348 |
+
|
349 |
+
|
350 |
+
class BaseTestCSV(abc.ABC):
|
351 |
+
"""Common tests which are shared by streaming and non streaming readers"""
|
352 |
+
|
353 |
+
@abc.abstractmethod
|
354 |
+
def read_bytes(self, b, **kwargs):
|
355 |
+
"""
|
356 |
+
:param b: bytes to be parsed
|
357 |
+
:param kwargs: arguments passed on to open the csv file
|
358 |
+
:return: b parsed as a single RecordBatch
|
359 |
+
"""
|
360 |
+
raise NotImplementedError
|
361 |
+
|
362 |
+
@property
|
363 |
+
@abc.abstractmethod
|
364 |
+
def use_threads(self):
|
365 |
+
"""Whether this test is multi-threaded"""
|
366 |
+
raise NotImplementedError
|
367 |
+
|
368 |
+
@staticmethod
|
369 |
+
def check_names(table, names):
|
370 |
+
assert table.num_columns == len(names)
|
371 |
+
assert table.column_names == names
|
372 |
+
|
373 |
+
def test_header_skip_rows(self):
|
374 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
375 |
+
|
376 |
+
opts = ReadOptions()
|
377 |
+
opts.skip_rows = 1
|
378 |
+
table = self.read_bytes(rows, read_options=opts)
|
379 |
+
self.check_names(table, ["ef", "gh"])
|
380 |
+
assert table.to_pydict() == {
|
381 |
+
"ef": ["ij", "mn"],
|
382 |
+
"gh": ["kl", "op"],
|
383 |
+
}
|
384 |
+
|
385 |
+
opts.skip_rows = 3
|
386 |
+
table = self.read_bytes(rows, read_options=opts)
|
387 |
+
self.check_names(table, ["mn", "op"])
|
388 |
+
assert table.to_pydict() == {
|
389 |
+
"mn": [],
|
390 |
+
"op": [],
|
391 |
+
}
|
392 |
+
|
393 |
+
opts.skip_rows = 4
|
394 |
+
with pytest.raises(pa.ArrowInvalid):
|
395 |
+
# Not enough rows
|
396 |
+
table = self.read_bytes(rows, read_options=opts)
|
397 |
+
|
398 |
+
# Can skip rows with a different number of columns
|
399 |
+
rows = b"abcd\n,,,,,\nij,kl\nmn,op\n"
|
400 |
+
opts.skip_rows = 2
|
401 |
+
table = self.read_bytes(rows, read_options=opts)
|
402 |
+
self.check_names(table, ["ij", "kl"])
|
403 |
+
assert table.to_pydict() == {
|
404 |
+
"ij": ["mn"],
|
405 |
+
"kl": ["op"],
|
406 |
+
}
|
407 |
+
|
408 |
+
# Can skip all rows exactly when columns are given
|
409 |
+
opts.skip_rows = 4
|
410 |
+
opts.column_names = ['ij', 'kl']
|
411 |
+
table = self.read_bytes(rows, read_options=opts)
|
412 |
+
self.check_names(table, ["ij", "kl"])
|
413 |
+
assert table.to_pydict() == {
|
414 |
+
"ij": [],
|
415 |
+
"kl": [],
|
416 |
+
}
|
417 |
+
|
418 |
+
def test_skip_rows_after_names(self):
|
419 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
420 |
+
|
421 |
+
opts = ReadOptions()
|
422 |
+
opts.skip_rows_after_names = 1
|
423 |
+
table = self.read_bytes(rows, read_options=opts)
|
424 |
+
self.check_names(table, ["ab", "cd"])
|
425 |
+
assert table.to_pydict() == {
|
426 |
+
"ab": ["ij", "mn"],
|
427 |
+
"cd": ["kl", "op"],
|
428 |
+
}
|
429 |
+
|
430 |
+
# Can skip exact number of rows
|
431 |
+
opts.skip_rows_after_names = 3
|
432 |
+
table = self.read_bytes(rows, read_options=opts)
|
433 |
+
self.check_names(table, ["ab", "cd"])
|
434 |
+
assert table.to_pydict() == {
|
435 |
+
"ab": [],
|
436 |
+
"cd": [],
|
437 |
+
}
|
438 |
+
|
439 |
+
# Can skip beyond all rows
|
440 |
+
opts.skip_rows_after_names = 4
|
441 |
+
table = self.read_bytes(rows, read_options=opts)
|
442 |
+
self.check_names(table, ["ab", "cd"])
|
443 |
+
assert table.to_pydict() == {
|
444 |
+
"ab": [],
|
445 |
+
"cd": [],
|
446 |
+
}
|
447 |
+
|
448 |
+
# Can skip rows with a different number of columns
|
449 |
+
rows = b"abcd\n,,,,,\nij,kl\nmn,op\n"
|
450 |
+
opts.skip_rows_after_names = 2
|
451 |
+
opts.column_names = ["f0", "f1"]
|
452 |
+
table = self.read_bytes(rows, read_options=opts)
|
453 |
+
self.check_names(table, ["f0", "f1"])
|
454 |
+
assert table.to_pydict() == {
|
455 |
+
"f0": ["ij", "mn"],
|
456 |
+
"f1": ["kl", "op"],
|
457 |
+
}
|
458 |
+
opts = ReadOptions()
|
459 |
+
|
460 |
+
# Can skip rows with new lines in the value
|
461 |
+
rows = b'ab,cd\n"e\nf","g\n\nh"\n"ij","k\nl"\nmn,op'
|
462 |
+
opts.skip_rows_after_names = 2
|
463 |
+
parse_opts = ParseOptions()
|
464 |
+
parse_opts.newlines_in_values = True
|
465 |
+
table = self.read_bytes(rows, read_options=opts,
|
466 |
+
parse_options=parse_opts)
|
467 |
+
self.check_names(table, ["ab", "cd"])
|
468 |
+
assert table.to_pydict() == {
|
469 |
+
"ab": ["mn"],
|
470 |
+
"cd": ["op"],
|
471 |
+
}
|
472 |
+
|
473 |
+
# Can skip rows when block ends in middle of quoted value
|
474 |
+
opts.skip_rows_after_names = 2
|
475 |
+
opts.block_size = 26
|
476 |
+
table = self.read_bytes(rows, read_options=opts,
|
477 |
+
parse_options=parse_opts)
|
478 |
+
self.check_names(table, ["ab", "cd"])
|
479 |
+
assert table.to_pydict() == {
|
480 |
+
"ab": ["mn"],
|
481 |
+
"cd": ["op"],
|
482 |
+
}
|
483 |
+
opts = ReadOptions()
|
484 |
+
|
485 |
+
# Can skip rows that are beyond the first block without lexer
|
486 |
+
rows, expected = make_random_csv(num_cols=5, num_rows=1000)
|
487 |
+
opts.skip_rows_after_names = 900
|
488 |
+
opts.block_size = len(rows) / 11
|
489 |
+
table = self.read_bytes(rows, read_options=opts)
|
490 |
+
assert table.schema == expected.schema
|
491 |
+
assert table.num_rows == 100
|
492 |
+
table_dict = table.to_pydict()
|
493 |
+
for name, values in expected.to_pydict().items():
|
494 |
+
assert values[900:] == table_dict[name]
|
495 |
+
|
496 |
+
# Can skip rows that are beyond the first block with lexer
|
497 |
+
table = self.read_bytes(rows, read_options=opts,
|
498 |
+
parse_options=parse_opts)
|
499 |
+
assert table.schema == expected.schema
|
500 |
+
assert table.num_rows == 100
|
501 |
+
table_dict = table.to_pydict()
|
502 |
+
for name, values in expected.to_pydict().items():
|
503 |
+
assert values[900:] == table_dict[name]
|
504 |
+
|
505 |
+
# Skip rows and skip rows after names
|
506 |
+
rows, expected = make_random_csv(num_cols=5, num_rows=200,
|
507 |
+
write_names=False)
|
508 |
+
opts = ReadOptions()
|
509 |
+
opts.skip_rows = 37
|
510 |
+
opts.skip_rows_after_names = 41
|
511 |
+
opts.column_names = expected.schema.names
|
512 |
+
table = self.read_bytes(rows, read_options=opts,
|
513 |
+
parse_options=parse_opts)
|
514 |
+
assert table.schema == expected.schema
|
515 |
+
assert (table.num_rows ==
|
516 |
+
expected.num_rows - opts.skip_rows -
|
517 |
+
opts.skip_rows_after_names)
|
518 |
+
table_dict = table.to_pydict()
|
519 |
+
for name, values in expected.to_pydict().items():
|
520 |
+
assert (values[opts.skip_rows + opts.skip_rows_after_names:] ==
|
521 |
+
table_dict[name])
|
522 |
+
|
523 |
+
def test_row_number_offset_in_errors(self):
|
524 |
+
# Row numbers are only correctly counted in serial reads
|
525 |
+
def format_msg(msg_format, row, *args):
|
526 |
+
if self.use_threads:
|
527 |
+
row_info = ""
|
528 |
+
else:
|
529 |
+
row_info = "Row #{}: ".format(row)
|
530 |
+
return msg_format.format(row_info, *args)
|
531 |
+
|
532 |
+
csv, _ = make_random_csv(4, 100, write_names=True)
|
533 |
+
|
534 |
+
read_options = ReadOptions()
|
535 |
+
read_options.block_size = len(csv) / 3
|
536 |
+
convert_options = ConvertOptions()
|
537 |
+
convert_options.column_types = {"a": pa.int32()}
|
538 |
+
|
539 |
+
# Test without skip_rows and column names in the csv
|
540 |
+
csv_bad_columns = csv + b"1,2\r\n"
|
541 |
+
message_columns = format_msg("{}Expected 4 columns, got 2", 102)
|
542 |
+
with pytest.raises(pa.ArrowInvalid, match=message_columns):
|
543 |
+
self.read_bytes(csv_bad_columns,
|
544 |
+
read_options=read_options,
|
545 |
+
convert_options=convert_options)
|
546 |
+
|
547 |
+
csv_bad_type = csv + b"a,b,c,d\r\n"
|
548 |
+
message_value = format_msg(
|
549 |
+
"In CSV column #0: {}"
|
550 |
+
"CSV conversion error to int32: invalid value 'a'",
|
551 |
+
102, csv)
|
552 |
+
with pytest.raises(pa.ArrowInvalid, match=message_value):
|
553 |
+
self.read_bytes(csv_bad_type,
|
554 |
+
read_options=read_options,
|
555 |
+
convert_options=convert_options)
|
556 |
+
|
557 |
+
long_row = (b"this is a long row" * 15) + b",3\r\n"
|
558 |
+
csv_bad_columns_long = csv + long_row
|
559 |
+
message_long = format_msg("{}Expected 4 columns, got 2: {} ...", 102,
|
560 |
+
long_row[0:96].decode("utf-8"))
|
561 |
+
with pytest.raises(pa.ArrowInvalid, match=message_long):
|
562 |
+
self.read_bytes(csv_bad_columns_long,
|
563 |
+
read_options=read_options,
|
564 |
+
convert_options=convert_options)
|
565 |
+
|
566 |
+
# Test skipping rows after the names
|
567 |
+
read_options.skip_rows_after_names = 47
|
568 |
+
|
569 |
+
with pytest.raises(pa.ArrowInvalid, match=message_columns):
|
570 |
+
self.read_bytes(csv_bad_columns,
|
571 |
+
read_options=read_options,
|
572 |
+
convert_options=convert_options)
|
573 |
+
|
574 |
+
with pytest.raises(pa.ArrowInvalid, match=message_value):
|
575 |
+
self.read_bytes(csv_bad_type,
|
576 |
+
read_options=read_options,
|
577 |
+
convert_options=convert_options)
|
578 |
+
|
579 |
+
with pytest.raises(pa.ArrowInvalid, match=message_long):
|
580 |
+
self.read_bytes(csv_bad_columns_long,
|
581 |
+
read_options=read_options,
|
582 |
+
convert_options=convert_options)
|
583 |
+
|
584 |
+
read_options.skip_rows_after_names = 0
|
585 |
+
|
586 |
+
# Test without skip_rows and column names not in the csv
|
587 |
+
csv, _ = make_random_csv(4, 100, write_names=False)
|
588 |
+
read_options.column_names = ["a", "b", "c", "d"]
|
589 |
+
csv_bad_columns = csv + b"1,2\r\n"
|
590 |
+
message_columns = format_msg("{}Expected 4 columns, got 2", 101)
|
591 |
+
with pytest.raises(pa.ArrowInvalid, match=message_columns):
|
592 |
+
self.read_bytes(csv_bad_columns,
|
593 |
+
read_options=read_options,
|
594 |
+
convert_options=convert_options)
|
595 |
+
|
596 |
+
csv_bad_columns_long = csv + long_row
|
597 |
+
message_long = format_msg("{}Expected 4 columns, got 2: {} ...", 101,
|
598 |
+
long_row[0:96].decode("utf-8"))
|
599 |
+
with pytest.raises(pa.ArrowInvalid, match=message_long):
|
600 |
+
self.read_bytes(csv_bad_columns_long,
|
601 |
+
read_options=read_options,
|
602 |
+
convert_options=convert_options)
|
603 |
+
|
604 |
+
csv_bad_type = csv + b"a,b,c,d\r\n"
|
605 |
+
message_value = format_msg(
|
606 |
+
"In CSV column #0: {}"
|
607 |
+
"CSV conversion error to int32: invalid value 'a'",
|
608 |
+
101)
|
609 |
+
message_value = message_value.format(len(csv))
|
610 |
+
with pytest.raises(pa.ArrowInvalid, match=message_value):
|
611 |
+
self.read_bytes(csv_bad_type,
|
612 |
+
read_options=read_options,
|
613 |
+
convert_options=convert_options)
|
614 |
+
|
615 |
+
# Test with skip_rows and column names not in the csv
|
616 |
+
read_options.skip_rows = 23
|
617 |
+
with pytest.raises(pa.ArrowInvalid, match=message_columns):
|
618 |
+
self.read_bytes(csv_bad_columns,
|
619 |
+
read_options=read_options,
|
620 |
+
convert_options=convert_options)
|
621 |
+
|
622 |
+
with pytest.raises(pa.ArrowInvalid, match=message_value):
|
623 |
+
self.read_bytes(csv_bad_type,
|
624 |
+
read_options=read_options,
|
625 |
+
convert_options=convert_options)
|
626 |
+
|
627 |
+
def test_invalid_row_handler(self, pickle_module):
|
628 |
+
rows = b"a,b\nc\nd,e\nf,g,h\ni,j\n"
|
629 |
+
parse_opts = ParseOptions()
|
630 |
+
with pytest.raises(
|
631 |
+
ValueError,
|
632 |
+
match="Expected 2 columns, got 1: c"):
|
633 |
+
self.read_bytes(rows, parse_options=parse_opts)
|
634 |
+
|
635 |
+
# Skip requested
|
636 |
+
parse_opts.invalid_row_handler = InvalidRowHandler('skip')
|
637 |
+
table = self.read_bytes(rows, parse_options=parse_opts)
|
638 |
+
assert table.to_pydict() == {
|
639 |
+
'a': ["d", "i"],
|
640 |
+
'b': ["e", "j"],
|
641 |
+
}
|
642 |
+
|
643 |
+
def row_num(x):
|
644 |
+
return None if self.use_threads else x
|
645 |
+
expected_rows = [
|
646 |
+
InvalidRow(2, 1, row_num(2), "c"),
|
647 |
+
InvalidRow(2, 3, row_num(4), "f,g,h"),
|
648 |
+
]
|
649 |
+
assert parse_opts.invalid_row_handler.rows == expected_rows
|
650 |
+
|
651 |
+
# Error requested
|
652 |
+
parse_opts.invalid_row_handler = InvalidRowHandler('error')
|
653 |
+
with pytest.raises(
|
654 |
+
ValueError,
|
655 |
+
match="Expected 2 columns, got 1: c"):
|
656 |
+
self.read_bytes(rows, parse_options=parse_opts)
|
657 |
+
expected_rows = [InvalidRow(2, 1, row_num(2), "c")]
|
658 |
+
assert parse_opts.invalid_row_handler.rows == expected_rows
|
659 |
+
|
660 |
+
# Test ser/de
|
661 |
+
parse_opts.invalid_row_handler = InvalidRowHandler('skip')
|
662 |
+
parse_opts = pickle_module.loads(pickle_module.dumps(parse_opts))
|
663 |
+
|
664 |
+
table = self.read_bytes(rows, parse_options=parse_opts)
|
665 |
+
assert table.to_pydict() == {
|
666 |
+
'a': ["d", "i"],
|
667 |
+
'b': ["e", "j"],
|
668 |
+
}
|
669 |
+
|
670 |
+
def test_chunker_out_of_sync(self):
|
671 |
+
# GH-39892: if there are newlines in values, the parser may become
|
672 |
+
# out of sync with the chunker. In this case, we try to produce an
|
673 |
+
# informative error message.
|
674 |
+
rows = b"""a,b,c\nd,e,"f\n"\ng,h,i\n"""
|
675 |
+
expected = {
|
676 |
+
'a': ["d", "g"],
|
677 |
+
'b': ["e", "h"],
|
678 |
+
'c': ["f\n", "i"],
|
679 |
+
}
|
680 |
+
for block_size in range(8, 15):
|
681 |
+
# Sanity check: parsing works with newlines_in_values=True
|
682 |
+
d = self.read_bytes(
|
683 |
+
rows, parse_options=ParseOptions(newlines_in_values=True),
|
684 |
+
read_options=ReadOptions(block_size=block_size)).to_pydict()
|
685 |
+
assert d == expected
|
686 |
+
# With these block sizes, a block would end on the physical newline
|
687 |
+
# inside the quoted cell value, leading to a mismatch between
|
688 |
+
# CSV chunker and parser.
|
689 |
+
for block_size in range(8, 11):
|
690 |
+
with pytest.raises(ValueError,
|
691 |
+
match="cell values spanning multiple lines"):
|
692 |
+
self.read_bytes(
|
693 |
+
rows, read_options=ReadOptions(block_size=block_size))
|
694 |
+
|
695 |
+
|
696 |
+
class BaseCSVTableRead(BaseTestCSV):
|
697 |
+
|
698 |
+
def read_csv(self, csv, *args, validate_full=True, **kwargs):
|
699 |
+
"""
|
700 |
+
Reads the CSV file into memory using pyarrow's read_csv
|
701 |
+
csv The CSV bytes
|
702 |
+
args Positional arguments to be forwarded to pyarrow's read_csv
|
703 |
+
validate_full Whether or not to fully validate the resulting table
|
704 |
+
kwargs Keyword arguments to be forwarded to pyarrow's read_csv
|
705 |
+
"""
|
706 |
+
assert isinstance(self.use_threads, bool) # sanity check
|
707 |
+
read_options = kwargs.setdefault('read_options', ReadOptions())
|
708 |
+
read_options.use_threads = self.use_threads
|
709 |
+
table = read_csv(csv, *args, **kwargs)
|
710 |
+
table.validate(full=validate_full)
|
711 |
+
return table
|
712 |
+
|
713 |
+
def read_bytes(self, b, **kwargs):
|
714 |
+
return self.read_csv(pa.py_buffer(b), **kwargs)
|
715 |
+
|
716 |
+
def test_file_object(self):
|
717 |
+
data = b"a,b\n1,2\n"
|
718 |
+
expected_data = {'a': [1], 'b': [2]}
|
719 |
+
bio = io.BytesIO(data)
|
720 |
+
table = self.read_csv(bio)
|
721 |
+
assert table.to_pydict() == expected_data
|
722 |
+
# Text files not allowed
|
723 |
+
sio = io.StringIO(data.decode())
|
724 |
+
with pytest.raises(TypeError):
|
725 |
+
self.read_csv(sio)
|
726 |
+
|
727 |
+
def test_header(self):
|
728 |
+
rows = b"abc,def,gh\n"
|
729 |
+
table = self.read_bytes(rows)
|
730 |
+
assert isinstance(table, pa.Table)
|
731 |
+
self.check_names(table, ["abc", "def", "gh"])
|
732 |
+
assert table.num_rows == 0
|
733 |
+
|
734 |
+
def test_bom(self):
|
735 |
+
rows = b"\xef\xbb\xbfa,b\n1,2\n"
|
736 |
+
expected_data = {'a': [1], 'b': [2]}
|
737 |
+
table = self.read_bytes(rows)
|
738 |
+
assert table.to_pydict() == expected_data
|
739 |
+
|
740 |
+
def test_one_chunk(self):
|
741 |
+
# ARROW-7661: lack of newline at end of file should not produce
|
742 |
+
# an additional chunk.
|
743 |
+
rows = [b"a,b", b"1,2", b"3,4", b"56,78"]
|
744 |
+
for line_ending in [b'\n', b'\r', b'\r\n']:
|
745 |
+
for file_ending in [b'', line_ending]:
|
746 |
+
data = line_ending.join(rows) + file_ending
|
747 |
+
table = self.read_bytes(data)
|
748 |
+
assert len(table.to_batches()) == 1
|
749 |
+
assert table.to_pydict() == {
|
750 |
+
"a": [1, 3, 56],
|
751 |
+
"b": [2, 4, 78],
|
752 |
+
}
|
753 |
+
|
754 |
+
def test_header_column_names(self):
|
755 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
756 |
+
|
757 |
+
opts = ReadOptions()
|
758 |
+
opts.column_names = ["x", "y"]
|
759 |
+
table = self.read_bytes(rows, read_options=opts)
|
760 |
+
self.check_names(table, ["x", "y"])
|
761 |
+
assert table.to_pydict() == {
|
762 |
+
"x": ["ab", "ef", "ij", "mn"],
|
763 |
+
"y": ["cd", "gh", "kl", "op"],
|
764 |
+
}
|
765 |
+
|
766 |
+
opts.skip_rows = 3
|
767 |
+
table = self.read_bytes(rows, read_options=opts)
|
768 |
+
self.check_names(table, ["x", "y"])
|
769 |
+
assert table.to_pydict() == {
|
770 |
+
"x": ["mn"],
|
771 |
+
"y": ["op"],
|
772 |
+
}
|
773 |
+
|
774 |
+
opts.skip_rows = 4
|
775 |
+
table = self.read_bytes(rows, read_options=opts)
|
776 |
+
self.check_names(table, ["x", "y"])
|
777 |
+
assert table.to_pydict() == {
|
778 |
+
"x": [],
|
779 |
+
"y": [],
|
780 |
+
}
|
781 |
+
|
782 |
+
opts.skip_rows = 5
|
783 |
+
with pytest.raises(pa.ArrowInvalid):
|
784 |
+
# Not enough rows
|
785 |
+
table = self.read_bytes(rows, read_options=opts)
|
786 |
+
|
787 |
+
# Unexpected number of columns
|
788 |
+
opts.skip_rows = 0
|
789 |
+
opts.column_names = ["x", "y", "z"]
|
790 |
+
with pytest.raises(pa.ArrowInvalid,
|
791 |
+
match="Expected 3 columns, got 2"):
|
792 |
+
table = self.read_bytes(rows, read_options=opts)
|
793 |
+
|
794 |
+
# Can skip rows with a different number of columns
|
795 |
+
rows = b"abcd\n,,,,,\nij,kl\nmn,op\n"
|
796 |
+
opts.skip_rows = 2
|
797 |
+
opts.column_names = ["x", "y"]
|
798 |
+
table = self.read_bytes(rows, read_options=opts)
|
799 |
+
self.check_names(table, ["x", "y"])
|
800 |
+
assert table.to_pydict() == {
|
801 |
+
"x": ["ij", "mn"],
|
802 |
+
"y": ["kl", "op"],
|
803 |
+
}
|
804 |
+
|
805 |
+
def test_header_autogenerate_column_names(self):
|
806 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
807 |
+
|
808 |
+
opts = ReadOptions()
|
809 |
+
opts.autogenerate_column_names = True
|
810 |
+
table = self.read_bytes(rows, read_options=opts)
|
811 |
+
self.check_names(table, ["f0", "f1"])
|
812 |
+
assert table.to_pydict() == {
|
813 |
+
"f0": ["ab", "ef", "ij", "mn"],
|
814 |
+
"f1": ["cd", "gh", "kl", "op"],
|
815 |
+
}
|
816 |
+
|
817 |
+
opts.skip_rows = 3
|
818 |
+
table = self.read_bytes(rows, read_options=opts)
|
819 |
+
self.check_names(table, ["f0", "f1"])
|
820 |
+
assert table.to_pydict() == {
|
821 |
+
"f0": ["mn"],
|
822 |
+
"f1": ["op"],
|
823 |
+
}
|
824 |
+
|
825 |
+
# Not enough rows, impossible to infer number of columns
|
826 |
+
opts.skip_rows = 4
|
827 |
+
with pytest.raises(pa.ArrowInvalid):
|
828 |
+
table = self.read_bytes(rows, read_options=opts)
|
829 |
+
|
830 |
+
def test_include_columns(self):
|
831 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
832 |
+
|
833 |
+
convert_options = ConvertOptions()
|
834 |
+
convert_options.include_columns = ['ab']
|
835 |
+
table = self.read_bytes(rows, convert_options=convert_options)
|
836 |
+
self.check_names(table, ["ab"])
|
837 |
+
assert table.to_pydict() == {
|
838 |
+
"ab": ["ef", "ij", "mn"],
|
839 |
+
}
|
840 |
+
|
841 |
+
# Order of include_columns is respected, regardless of CSV order
|
842 |
+
convert_options.include_columns = ['cd', 'ab']
|
843 |
+
table = self.read_bytes(rows, convert_options=convert_options)
|
844 |
+
schema = pa.schema([('cd', pa.string()),
|
845 |
+
('ab', pa.string())])
|
846 |
+
assert table.schema == schema
|
847 |
+
assert table.to_pydict() == {
|
848 |
+
"cd": ["gh", "kl", "op"],
|
849 |
+
"ab": ["ef", "ij", "mn"],
|
850 |
+
}
|
851 |
+
|
852 |
+
# Include a column not in the CSV file => raises by default
|
853 |
+
convert_options.include_columns = ['xx', 'ab', 'yy']
|
854 |
+
with pytest.raises(KeyError,
|
855 |
+
match="Column 'xx' in include_columns "
|
856 |
+
"does not exist in CSV file"):
|
857 |
+
self.read_bytes(rows, convert_options=convert_options)
|
858 |
+
|
859 |
+
def test_include_missing_columns(self):
|
860 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
861 |
+
|
862 |
+
read_options = ReadOptions()
|
863 |
+
convert_options = ConvertOptions()
|
864 |
+
convert_options.include_columns = ['xx', 'ab', 'yy']
|
865 |
+
convert_options.include_missing_columns = True
|
866 |
+
table = self.read_bytes(rows, read_options=read_options,
|
867 |
+
convert_options=convert_options)
|
868 |
+
schema = pa.schema([('xx', pa.null()),
|
869 |
+
('ab', pa.string()),
|
870 |
+
('yy', pa.null())])
|
871 |
+
assert table.schema == schema
|
872 |
+
assert table.to_pydict() == {
|
873 |
+
"xx": [None, None, None],
|
874 |
+
"ab": ["ef", "ij", "mn"],
|
875 |
+
"yy": [None, None, None],
|
876 |
+
}
|
877 |
+
|
878 |
+
# Combining with `column_names`
|
879 |
+
read_options.column_names = ["xx", "yy"]
|
880 |
+
convert_options.include_columns = ["yy", "cd"]
|
881 |
+
table = self.read_bytes(rows, read_options=read_options,
|
882 |
+
convert_options=convert_options)
|
883 |
+
schema = pa.schema([('yy', pa.string()),
|
884 |
+
('cd', pa.null())])
|
885 |
+
assert table.schema == schema
|
886 |
+
assert table.to_pydict() == {
|
887 |
+
"yy": ["cd", "gh", "kl", "op"],
|
888 |
+
"cd": [None, None, None, None],
|
889 |
+
}
|
890 |
+
|
891 |
+
# And with `column_types` as well
|
892 |
+
convert_options.column_types = {"yy": pa.binary(),
|
893 |
+
"cd": pa.int32()}
|
894 |
+
table = self.read_bytes(rows, read_options=read_options,
|
895 |
+
convert_options=convert_options)
|
896 |
+
schema = pa.schema([('yy', pa.binary()),
|
897 |
+
('cd', pa.int32())])
|
898 |
+
assert table.schema == schema
|
899 |
+
assert table.to_pydict() == {
|
900 |
+
"yy": [b"cd", b"gh", b"kl", b"op"],
|
901 |
+
"cd": [None, None, None, None],
|
902 |
+
}
|
903 |
+
|
904 |
+
def test_simple_ints(self):
|
905 |
+
# Infer integer columns
|
906 |
+
rows = b"a,b,c\n1,2,3\n4,5,6\n"
|
907 |
+
table = self.read_bytes(rows)
|
908 |
+
schema = pa.schema([('a', pa.int64()),
|
909 |
+
('b', pa.int64()),
|
910 |
+
('c', pa.int64())])
|
911 |
+
assert table.schema == schema
|
912 |
+
assert table.to_pydict() == {
|
913 |
+
'a': [1, 4],
|
914 |
+
'b': [2, 5],
|
915 |
+
'c': [3, 6],
|
916 |
+
}
|
917 |
+
|
918 |
+
def test_simple_varied(self):
|
919 |
+
# Infer various kinds of data
|
920 |
+
rows = b"a,b,c,d\n1,2,3,0\n4.0,-5,foo,True\n"
|
921 |
+
table = self.read_bytes(rows)
|
922 |
+
schema = pa.schema([('a', pa.float64()),
|
923 |
+
('b', pa.int64()),
|
924 |
+
('c', pa.string()),
|
925 |
+
('d', pa.bool_())])
|
926 |
+
assert table.schema == schema
|
927 |
+
assert table.to_pydict() == {
|
928 |
+
'a': [1.0, 4.0],
|
929 |
+
'b': [2, -5],
|
930 |
+
'c': ["3", "foo"],
|
931 |
+
'd': [False, True],
|
932 |
+
}
|
933 |
+
|
934 |
+
def test_simple_nulls(self):
|
935 |
+
# Infer various kinds of data, with nulls
|
936 |
+
rows = (b"a,b,c,d,e,f\n"
|
937 |
+
b"1,2,,,3,N/A\n"
|
938 |
+
b"nan,-5,foo,,nan,TRUE\n"
|
939 |
+
b"4.5,#N/A,nan,,\xff,false\n")
|
940 |
+
table = self.read_bytes(rows)
|
941 |
+
schema = pa.schema([('a', pa.float64()),
|
942 |
+
('b', pa.int64()),
|
943 |
+
('c', pa.string()),
|
944 |
+
('d', pa.null()),
|
945 |
+
('e', pa.binary()),
|
946 |
+
('f', pa.bool_())])
|
947 |
+
assert table.schema == schema
|
948 |
+
assert table.to_pydict() == {
|
949 |
+
'a': [1.0, None, 4.5],
|
950 |
+
'b': [2, -5, None],
|
951 |
+
'c': ["", "foo", "nan"],
|
952 |
+
'd': [None, None, None],
|
953 |
+
'e': [b"3", b"nan", b"\xff"],
|
954 |
+
'f': [None, True, False],
|
955 |
+
}
|
956 |
+
|
957 |
+
def test_decimal_point(self):
|
958 |
+
# Infer floats with a custom decimal point
|
959 |
+
parse_options = ParseOptions(delimiter=';')
|
960 |
+
rows = b"a;b\n1.25;2,5\nNA;-3\n-4;NA"
|
961 |
+
|
962 |
+
table = self.read_bytes(rows, parse_options=parse_options)
|
963 |
+
schema = pa.schema([('a', pa.float64()),
|
964 |
+
('b', pa.string())])
|
965 |
+
assert table.schema == schema
|
966 |
+
assert table.to_pydict() == {
|
967 |
+
'a': [1.25, None, -4.0],
|
968 |
+
'b': ["2,5", "-3", "NA"],
|
969 |
+
}
|
970 |
+
|
971 |
+
convert_options = ConvertOptions(decimal_point=',')
|
972 |
+
table = self.read_bytes(rows, parse_options=parse_options,
|
973 |
+
convert_options=convert_options)
|
974 |
+
schema = pa.schema([('a', pa.string()),
|
975 |
+
('b', pa.float64())])
|
976 |
+
assert table.schema == schema
|
977 |
+
assert table.to_pydict() == {
|
978 |
+
'a': ["1.25", "NA", "-4"],
|
979 |
+
'b': [2.5, -3.0, None],
|
980 |
+
}
|
981 |
+
|
982 |
+
def test_simple_timestamps(self):
|
983 |
+
# Infer a timestamp column
|
984 |
+
rows = (b"a,b,c\n"
|
985 |
+
b"1970,1970-01-01 00:00:00,1970-01-01 00:00:00.123\n"
|
986 |
+
b"1989,1989-07-14 01:00:00,1989-07-14 01:00:00.123456\n")
|
987 |
+
table = self.read_bytes(rows)
|
988 |
+
schema = pa.schema([('a', pa.int64()),
|
989 |
+
('b', pa.timestamp('s')),
|
990 |
+
('c', pa.timestamp('ns'))])
|
991 |
+
assert table.schema == schema
|
992 |
+
assert table.to_pydict() == {
|
993 |
+
'a': [1970, 1989],
|
994 |
+
'b': [datetime(1970, 1, 1), datetime(1989, 7, 14, 1)],
|
995 |
+
'c': [datetime(1970, 1, 1, 0, 0, 0, 123000),
|
996 |
+
datetime(1989, 7, 14, 1, 0, 0, 123456)],
|
997 |
+
}
|
998 |
+
|
999 |
+
def test_timestamp_parsers(self):
|
1000 |
+
# Infer timestamps with custom parsers
|
1001 |
+
rows = b"a,b\n1970/01/01,1980-01-01 00\n1970/01/02,1980-01-02 00\n"
|
1002 |
+
opts = ConvertOptions()
|
1003 |
+
|
1004 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1005 |
+
schema = pa.schema([('a', pa.string()),
|
1006 |
+
('b', pa.timestamp('s'))])
|
1007 |
+
assert table.schema == schema
|
1008 |
+
assert table.to_pydict() == {
|
1009 |
+
'a': ['1970/01/01', '1970/01/02'],
|
1010 |
+
'b': [datetime(1980, 1, 1), datetime(1980, 1, 2)],
|
1011 |
+
}
|
1012 |
+
|
1013 |
+
opts.timestamp_parsers = ['%Y/%m/%d']
|
1014 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1015 |
+
schema = pa.schema([('a', pa.timestamp('s')),
|
1016 |
+
('b', pa.string())])
|
1017 |
+
assert table.schema == schema
|
1018 |
+
assert table.to_pydict() == {
|
1019 |
+
'a': [datetime(1970, 1, 1), datetime(1970, 1, 2)],
|
1020 |
+
'b': ['1980-01-01 00', '1980-01-02 00'],
|
1021 |
+
}
|
1022 |
+
|
1023 |
+
opts.timestamp_parsers = ['%Y/%m/%d', ISO8601]
|
1024 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1025 |
+
schema = pa.schema([('a', pa.timestamp('s')),
|
1026 |
+
('b', pa.timestamp('s'))])
|
1027 |
+
assert table.schema == schema
|
1028 |
+
assert table.to_pydict() == {
|
1029 |
+
'a': [datetime(1970, 1, 1), datetime(1970, 1, 2)],
|
1030 |
+
'b': [datetime(1980, 1, 1), datetime(1980, 1, 2)],
|
1031 |
+
}
|
1032 |
+
|
1033 |
+
def test_dates(self):
|
1034 |
+
# Dates are inferred as date32 by default
|
1035 |
+
rows = b"a,b\n1970-01-01,1970-01-02\n1971-01-01,1971-01-02\n"
|
1036 |
+
table = self.read_bytes(rows)
|
1037 |
+
schema = pa.schema([('a', pa.date32()),
|
1038 |
+
('b', pa.date32())])
|
1039 |
+
assert table.schema == schema
|
1040 |
+
assert table.to_pydict() == {
|
1041 |
+
'a': [date(1970, 1, 1), date(1971, 1, 1)],
|
1042 |
+
'b': [date(1970, 1, 2), date(1971, 1, 2)],
|
1043 |
+
}
|
1044 |
+
|
1045 |
+
# Can ask for date types explicitly
|
1046 |
+
opts = ConvertOptions()
|
1047 |
+
opts.column_types = {'a': pa.date32(), 'b': pa.date64()}
|
1048 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1049 |
+
schema = pa.schema([('a', pa.date32()),
|
1050 |
+
('b', pa.date64())])
|
1051 |
+
assert table.schema == schema
|
1052 |
+
assert table.to_pydict() == {
|
1053 |
+
'a': [date(1970, 1, 1), date(1971, 1, 1)],
|
1054 |
+
'b': [date(1970, 1, 2), date(1971, 1, 2)],
|
1055 |
+
}
|
1056 |
+
|
1057 |
+
# Can ask for timestamp types explicitly
|
1058 |
+
opts = ConvertOptions()
|
1059 |
+
opts.column_types = {'a': pa.timestamp('s'), 'b': pa.timestamp('ms')}
|
1060 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1061 |
+
schema = pa.schema([('a', pa.timestamp('s')),
|
1062 |
+
('b', pa.timestamp('ms'))])
|
1063 |
+
assert table.schema == schema
|
1064 |
+
assert table.to_pydict() == {
|
1065 |
+
'a': [datetime(1970, 1, 1), datetime(1971, 1, 1)],
|
1066 |
+
'b': [datetime(1970, 1, 2), datetime(1971, 1, 2)],
|
1067 |
+
}
|
1068 |
+
|
1069 |
+
def test_times(self):
|
1070 |
+
# Times are inferred as time32[s] by default
|
1071 |
+
from datetime import time
|
1072 |
+
|
1073 |
+
rows = b"a,b\n12:34:56,12:34:56.789\n23:59:59,23:59:59.999\n"
|
1074 |
+
table = self.read_bytes(rows)
|
1075 |
+
# Column 'b' has subseconds, so cannot be inferred as time32[s]
|
1076 |
+
schema = pa.schema([('a', pa.time32('s')),
|
1077 |
+
('b', pa.string())])
|
1078 |
+
assert table.schema == schema
|
1079 |
+
assert table.to_pydict() == {
|
1080 |
+
'a': [time(12, 34, 56), time(23, 59, 59)],
|
1081 |
+
'b': ["12:34:56.789", "23:59:59.999"],
|
1082 |
+
}
|
1083 |
+
|
1084 |
+
# Can ask for time types explicitly
|
1085 |
+
opts = ConvertOptions()
|
1086 |
+
opts.column_types = {'a': pa.time64('us'), 'b': pa.time32('ms')}
|
1087 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1088 |
+
schema = pa.schema([('a', pa.time64('us')),
|
1089 |
+
('b', pa.time32('ms'))])
|
1090 |
+
assert table.schema == schema
|
1091 |
+
assert table.to_pydict() == {
|
1092 |
+
'a': [time(12, 34, 56), time(23, 59, 59)],
|
1093 |
+
'b': [time(12, 34, 56, 789000), time(23, 59, 59, 999000)],
|
1094 |
+
}
|
1095 |
+
|
1096 |
+
def test_auto_dict_encode(self):
|
1097 |
+
opts = ConvertOptions(auto_dict_encode=True)
|
1098 |
+
rows = "a,b\nab,1\ncdé,2\ncdé,3\nab,4".encode()
|
1099 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1100 |
+
schema = pa.schema([('a', pa.dictionary(pa.int32(), pa.string())),
|
1101 |
+
('b', pa.int64())])
|
1102 |
+
expected = {
|
1103 |
+
'a': ["ab", "cdé", "cdé", "ab"],
|
1104 |
+
'b': [1, 2, 3, 4],
|
1105 |
+
}
|
1106 |
+
assert table.schema == schema
|
1107 |
+
assert table.to_pydict() == expected
|
1108 |
+
|
1109 |
+
opts.auto_dict_max_cardinality = 2
|
1110 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1111 |
+
assert table.schema == schema
|
1112 |
+
assert table.to_pydict() == expected
|
1113 |
+
|
1114 |
+
# Cardinality above max => plain-encoded
|
1115 |
+
opts.auto_dict_max_cardinality = 1
|
1116 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1117 |
+
assert table.schema == pa.schema([('a', pa.string()),
|
1118 |
+
('b', pa.int64())])
|
1119 |
+
assert table.to_pydict() == expected
|
1120 |
+
|
1121 |
+
# With invalid UTF8, not checked
|
1122 |
+
opts.auto_dict_max_cardinality = 50
|
1123 |
+
opts.check_utf8 = False
|
1124 |
+
rows = b"a,b\nab,1\ncd\xff,2\nab,3"
|
1125 |
+
table = self.read_bytes(rows, convert_options=opts,
|
1126 |
+
validate_full=False)
|
1127 |
+
assert table.schema == schema
|
1128 |
+
dict_values = table['a'].chunk(0).dictionary
|
1129 |
+
assert len(dict_values) == 2
|
1130 |
+
assert dict_values[0].as_py() == "ab"
|
1131 |
+
assert dict_values[1].as_buffer() == b"cd\xff"
|
1132 |
+
|
1133 |
+
# With invalid UTF8, checked
|
1134 |
+
opts.check_utf8 = True
|
1135 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1136 |
+
schema = pa.schema([('a', pa.dictionary(pa.int32(), pa.binary())),
|
1137 |
+
('b', pa.int64())])
|
1138 |
+
expected = {
|
1139 |
+
'a': [b"ab", b"cd\xff", b"ab"],
|
1140 |
+
'b': [1, 2, 3],
|
1141 |
+
}
|
1142 |
+
assert table.schema == schema
|
1143 |
+
assert table.to_pydict() == expected
|
1144 |
+
|
1145 |
+
def test_custom_nulls(self):
|
1146 |
+
# Infer nulls with custom values
|
1147 |
+
opts = ConvertOptions(null_values=['Xxx', 'Zzz'])
|
1148 |
+
rows = b"""a,b,c,d\nZzz,"Xxx",1,2\nXxx,#N/A,,Zzz\n"""
|
1149 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1150 |
+
schema = pa.schema([('a', pa.null()),
|
1151 |
+
('b', pa.string()),
|
1152 |
+
('c', pa.string()),
|
1153 |
+
('d', pa.int64())])
|
1154 |
+
assert table.schema == schema
|
1155 |
+
assert table.to_pydict() == {
|
1156 |
+
'a': [None, None],
|
1157 |
+
'b': ["Xxx", "#N/A"],
|
1158 |
+
'c': ["1", ""],
|
1159 |
+
'd': [2, None],
|
1160 |
+
}
|
1161 |
+
|
1162 |
+
opts = ConvertOptions(null_values=['Xxx', 'Zzz'],
|
1163 |
+
strings_can_be_null=True)
|
1164 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1165 |
+
assert table.to_pydict() == {
|
1166 |
+
'a': [None, None],
|
1167 |
+
'b': [None, "#N/A"],
|
1168 |
+
'c': ["1", ""],
|
1169 |
+
'd': [2, None],
|
1170 |
+
}
|
1171 |
+
opts.quoted_strings_can_be_null = False
|
1172 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1173 |
+
assert table.to_pydict() == {
|
1174 |
+
'a': [None, None],
|
1175 |
+
'b': ["Xxx", "#N/A"],
|
1176 |
+
'c': ["1", ""],
|
1177 |
+
'd': [2, None],
|
1178 |
+
}
|
1179 |
+
|
1180 |
+
opts = ConvertOptions(null_values=[])
|
1181 |
+
rows = b"a,b\n#N/A,\n"
|
1182 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1183 |
+
schema = pa.schema([('a', pa.string()),
|
1184 |
+
('b', pa.string())])
|
1185 |
+
assert table.schema == schema
|
1186 |
+
assert table.to_pydict() == {
|
1187 |
+
'a': ["#N/A"],
|
1188 |
+
'b': [""],
|
1189 |
+
}
|
1190 |
+
|
1191 |
+
def test_custom_bools(self):
|
1192 |
+
# Infer booleans with custom values
|
1193 |
+
opts = ConvertOptions(true_values=['T', 'yes'],
|
1194 |
+
false_values=['F', 'no'])
|
1195 |
+
rows = (b"a,b,c\n"
|
1196 |
+
b"True,T,t\n"
|
1197 |
+
b"False,F,f\n"
|
1198 |
+
b"True,yes,yes\n"
|
1199 |
+
b"False,no,no\n"
|
1200 |
+
b"N/A,N/A,N/A\n")
|
1201 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1202 |
+
schema = pa.schema([('a', pa.string()),
|
1203 |
+
('b', pa.bool_()),
|
1204 |
+
('c', pa.string())])
|
1205 |
+
assert table.schema == schema
|
1206 |
+
assert table.to_pydict() == {
|
1207 |
+
'a': ["True", "False", "True", "False", "N/A"],
|
1208 |
+
'b': [True, False, True, False, None],
|
1209 |
+
'c': ["t", "f", "yes", "no", "N/A"],
|
1210 |
+
}
|
1211 |
+
|
1212 |
+
def test_column_types(self):
|
1213 |
+
# Ask for specific column types in ConvertOptions
|
1214 |
+
opts = ConvertOptions(column_types={'b': 'float32',
|
1215 |
+
'c': 'string',
|
1216 |
+
'd': 'boolean',
|
1217 |
+
'e': pa.decimal128(11, 2),
|
1218 |
+
'zz': 'null'})
|
1219 |
+
rows = b"a,b,c,d,e\n1,2,3,true,1.0\n4,-5,6,false,0\n"
|
1220 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1221 |
+
schema = pa.schema([('a', pa.int64()),
|
1222 |
+
('b', pa.float32()),
|
1223 |
+
('c', pa.string()),
|
1224 |
+
('d', pa.bool_()),
|
1225 |
+
('e', pa.decimal128(11, 2))])
|
1226 |
+
expected = {
|
1227 |
+
'a': [1, 4],
|
1228 |
+
'b': [2.0, -5.0],
|
1229 |
+
'c': ["3", "6"],
|
1230 |
+
'd': [True, False],
|
1231 |
+
'e': [Decimal("1.00"), Decimal("0.00")]
|
1232 |
+
}
|
1233 |
+
assert table.schema == schema
|
1234 |
+
assert table.to_pydict() == expected
|
1235 |
+
# Pass column_types as schema
|
1236 |
+
opts = ConvertOptions(
|
1237 |
+
column_types=pa.schema([('b', pa.float32()),
|
1238 |
+
('c', pa.string()),
|
1239 |
+
('d', pa.bool_()),
|
1240 |
+
('e', pa.decimal128(11, 2)),
|
1241 |
+
('zz', pa.bool_())]))
|
1242 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1243 |
+
assert table.schema == schema
|
1244 |
+
assert table.to_pydict() == expected
|
1245 |
+
# One of the columns in column_types fails converting
|
1246 |
+
rows = b"a,b,c,d,e\n1,XXX,3,true,5\n4,-5,6,false,7\n"
|
1247 |
+
with pytest.raises(pa.ArrowInvalid) as exc:
|
1248 |
+
self.read_bytes(rows, convert_options=opts)
|
1249 |
+
err = str(exc.value)
|
1250 |
+
assert "In CSV column #1: " in err
|
1251 |
+
assert "CSV conversion error to float: invalid value 'XXX'" in err
|
1252 |
+
|
1253 |
+
def test_column_types_dict(self):
|
1254 |
+
# Ask for dict-encoded column types in ConvertOptions
|
1255 |
+
column_types = [
|
1256 |
+
('a', pa.dictionary(pa.int32(), pa.utf8())),
|
1257 |
+
('b', pa.dictionary(pa.int32(), pa.int64())),
|
1258 |
+
('c', pa.dictionary(pa.int32(), pa.decimal128(11, 2))),
|
1259 |
+
('d', pa.dictionary(pa.int32(), pa.large_utf8()))]
|
1260 |
+
|
1261 |
+
opts = ConvertOptions(column_types=dict(column_types))
|
1262 |
+
rows = (b"a,b,c,d\n"
|
1263 |
+
b"abc,123456,1.0,zz\n"
|
1264 |
+
b"defg,123456,0.5,xx\n"
|
1265 |
+
b"abc,N/A,1.0,xx\n")
|
1266 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1267 |
+
|
1268 |
+
schema = pa.schema(column_types)
|
1269 |
+
expected = {
|
1270 |
+
'a': ["abc", "defg", "abc"],
|
1271 |
+
'b': [123456, 123456, None],
|
1272 |
+
'c': [Decimal("1.00"), Decimal("0.50"), Decimal("1.00")],
|
1273 |
+
'd': ["zz", "xx", "xx"],
|
1274 |
+
}
|
1275 |
+
assert table.schema == schema
|
1276 |
+
assert table.to_pydict() == expected
|
1277 |
+
|
1278 |
+
# Unsupported index type
|
1279 |
+
column_types[0] = ('a', pa.dictionary(pa.int8(), pa.utf8()))
|
1280 |
+
|
1281 |
+
opts = ConvertOptions(column_types=dict(column_types))
|
1282 |
+
with pytest.raises(NotImplementedError):
|
1283 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1284 |
+
|
1285 |
+
def test_column_types_with_column_names(self):
|
1286 |
+
# When both `column_names` and `column_types` are given, names
|
1287 |
+
# in `column_types` should refer to names in `column_names`
|
1288 |
+
rows = b"a,b\nc,d\ne,f\n"
|
1289 |
+
read_options = ReadOptions(column_names=['x', 'y'])
|
1290 |
+
convert_options = ConvertOptions(column_types={'x': pa.binary()})
|
1291 |
+
table = self.read_bytes(rows, read_options=read_options,
|
1292 |
+
convert_options=convert_options)
|
1293 |
+
schema = pa.schema([('x', pa.binary()),
|
1294 |
+
('y', pa.string())])
|
1295 |
+
assert table.schema == schema
|
1296 |
+
assert table.to_pydict() == {
|
1297 |
+
'x': [b'a', b'c', b'e'],
|
1298 |
+
'y': ['b', 'd', 'f'],
|
1299 |
+
}
|
1300 |
+
|
1301 |
+
def test_no_ending_newline(self):
|
1302 |
+
# No \n after last line
|
1303 |
+
rows = b"a,b,c\n1,2,3\n4,5,6"
|
1304 |
+
table = self.read_bytes(rows)
|
1305 |
+
assert table.to_pydict() == {
|
1306 |
+
'a': [1, 4],
|
1307 |
+
'b': [2, 5],
|
1308 |
+
'c': [3, 6],
|
1309 |
+
}
|
1310 |
+
|
1311 |
+
def test_trivial(self):
|
1312 |
+
# A bit pointless, but at least it shouldn't crash
|
1313 |
+
rows = b",\n\n"
|
1314 |
+
table = self.read_bytes(rows)
|
1315 |
+
assert table.to_pydict() == {'': []}
|
1316 |
+
|
1317 |
+
def test_empty_lines(self):
|
1318 |
+
rows = b"a,b\n\r1,2\r\n\r\n3,4\r\n"
|
1319 |
+
table = self.read_bytes(rows)
|
1320 |
+
assert table.to_pydict() == {
|
1321 |
+
'a': [1, 3],
|
1322 |
+
'b': [2, 4],
|
1323 |
+
}
|
1324 |
+
parse_options = ParseOptions(ignore_empty_lines=False)
|
1325 |
+
table = self.read_bytes(rows, parse_options=parse_options)
|
1326 |
+
assert table.to_pydict() == {
|
1327 |
+
'a': [None, 1, None, 3],
|
1328 |
+
'b': [None, 2, None, 4],
|
1329 |
+
}
|
1330 |
+
read_options = ReadOptions(skip_rows=2)
|
1331 |
+
table = self.read_bytes(rows, parse_options=parse_options,
|
1332 |
+
read_options=read_options)
|
1333 |
+
assert table.to_pydict() == {
|
1334 |
+
'1': [None, 3],
|
1335 |
+
'2': [None, 4],
|
1336 |
+
}
|
1337 |
+
|
1338 |
+
def test_invalid_csv(self):
|
1339 |
+
# Various CSV errors
|
1340 |
+
rows = b"a,b,c\n1,2\n4,5,6\n"
|
1341 |
+
with pytest.raises(pa.ArrowInvalid, match="Expected 3 columns, got 2"):
|
1342 |
+
self.read_bytes(rows)
|
1343 |
+
rows = b"a,b,c\n1,2,3\n4"
|
1344 |
+
with pytest.raises(pa.ArrowInvalid, match="Expected 3 columns, got 1"):
|
1345 |
+
self.read_bytes(rows)
|
1346 |
+
for rows in [b"", b"\n", b"\r\n", b"\r", b"\n\n"]:
|
1347 |
+
with pytest.raises(pa.ArrowInvalid, match="Empty CSV file"):
|
1348 |
+
self.read_bytes(rows)
|
1349 |
+
|
1350 |
+
def test_options_delimiter(self):
|
1351 |
+
rows = b"a;b,c\nde,fg;eh\n"
|
1352 |
+
table = self.read_bytes(rows)
|
1353 |
+
assert table.to_pydict() == {
|
1354 |
+
'a;b': ['de'],
|
1355 |
+
'c': ['fg;eh'],
|
1356 |
+
}
|
1357 |
+
opts = ParseOptions(delimiter=';')
|
1358 |
+
table = self.read_bytes(rows, parse_options=opts)
|
1359 |
+
assert table.to_pydict() == {
|
1360 |
+
'a': ['de,fg'],
|
1361 |
+
'b,c': ['eh'],
|
1362 |
+
}
|
1363 |
+
|
1364 |
+
def test_small_random_csv(self):
|
1365 |
+
csv, expected = make_random_csv(num_cols=2, num_rows=10)
|
1366 |
+
table = self.read_bytes(csv)
|
1367 |
+
assert table.schema == expected.schema
|
1368 |
+
assert table.equals(expected)
|
1369 |
+
assert table.to_pydict() == expected.to_pydict()
|
1370 |
+
|
1371 |
+
def test_stress_block_sizes(self):
|
1372 |
+
# Test a number of small block sizes to stress block stitching
|
1373 |
+
csv_base, expected = make_random_csv(num_cols=2, num_rows=500)
|
1374 |
+
block_sizes = [11, 12, 13, 17, 37, 111]
|
1375 |
+
csvs = [csv_base, csv_base.rstrip(b'\r\n')]
|
1376 |
+
for csv in csvs:
|
1377 |
+
for block_size in block_sizes:
|
1378 |
+
read_options = ReadOptions(block_size=block_size)
|
1379 |
+
table = self.read_bytes(csv, read_options=read_options)
|
1380 |
+
assert table.schema == expected.schema
|
1381 |
+
if not table.equals(expected):
|
1382 |
+
# Better error output
|
1383 |
+
assert table.to_pydict() == expected.to_pydict()
|
1384 |
+
|
1385 |
+
def test_stress_convert_options_blowup(self):
|
1386 |
+
# ARROW-6481: A convert_options with a very large number of columns
|
1387 |
+
# should not blow memory and CPU time.
|
1388 |
+
try:
|
1389 |
+
clock = time.thread_time
|
1390 |
+
except AttributeError:
|
1391 |
+
clock = time.time
|
1392 |
+
num_columns = 10000
|
1393 |
+
col_names = ["K{}".format(i) for i in range(num_columns)]
|
1394 |
+
csv = make_empty_csv(col_names)
|
1395 |
+
t1 = clock()
|
1396 |
+
convert_options = ConvertOptions(
|
1397 |
+
column_types={k: pa.string() for k in col_names[::2]})
|
1398 |
+
table = self.read_bytes(csv, convert_options=convert_options)
|
1399 |
+
dt = clock() - t1
|
1400 |
+
# Check that processing time didn't blow up.
|
1401 |
+
# This is a conservative check (it takes less than 300 ms
|
1402 |
+
# in debug mode on my local machine).
|
1403 |
+
assert dt <= 10.0
|
1404 |
+
# Check result
|
1405 |
+
assert table.num_columns == num_columns
|
1406 |
+
assert table.num_rows == 0
|
1407 |
+
assert table.column_names == col_names
|
1408 |
+
|
1409 |
+
def test_cancellation(self):
|
1410 |
+
if (threading.current_thread().ident !=
|
1411 |
+
threading.main_thread().ident):
|
1412 |
+
pytest.skip("test only works from main Python thread")
|
1413 |
+
# Skips test if not available
|
1414 |
+
raise_signal = util.get_raise_signal()
|
1415 |
+
signum = signal.SIGINT
|
1416 |
+
|
1417 |
+
def signal_from_thread():
|
1418 |
+
# Give our workload a chance to start up
|
1419 |
+
time.sleep(0.2)
|
1420 |
+
raise_signal(signum)
|
1421 |
+
|
1422 |
+
# We start with a small CSV reading workload and increase its size
|
1423 |
+
# until it's large enough to get an interruption during it, even in
|
1424 |
+
# release mode on fast machines.
|
1425 |
+
last_duration = 0.0
|
1426 |
+
workload_size = 100_000
|
1427 |
+
attempts = 0
|
1428 |
+
|
1429 |
+
while last_duration < 5.0 and attempts < 10:
|
1430 |
+
print("workload size:", workload_size)
|
1431 |
+
large_csv = b"a,b,c\n" + b"1,2,3\n" * workload_size
|
1432 |
+
exc_info = None
|
1433 |
+
|
1434 |
+
try:
|
1435 |
+
# We use a signal fd to reliably ensure that the signal
|
1436 |
+
# has been delivered to Python, regardless of how exactly
|
1437 |
+
# it was caught.
|
1438 |
+
with util.signal_wakeup_fd() as sigfd:
|
1439 |
+
try:
|
1440 |
+
t = threading.Thread(target=signal_from_thread)
|
1441 |
+
t.start()
|
1442 |
+
t1 = time.time()
|
1443 |
+
try:
|
1444 |
+
self.read_bytes(large_csv)
|
1445 |
+
except KeyboardInterrupt as e:
|
1446 |
+
exc_info = e
|
1447 |
+
last_duration = time.time() - t1
|
1448 |
+
finally:
|
1449 |
+
# Wait for signal to arrive if it didn't already,
|
1450 |
+
# to avoid getting a KeyboardInterrupt after the
|
1451 |
+
# `except` block below.
|
1452 |
+
select.select([sigfd], [], [sigfd], 10.0)
|
1453 |
+
|
1454 |
+
except KeyboardInterrupt:
|
1455 |
+
# KeyboardInterrupt didn't interrupt `read_bytes` above.
|
1456 |
+
pass
|
1457 |
+
|
1458 |
+
if exc_info is not None:
|
1459 |
+
# We managed to get `self.read_bytes` interrupted, see if it
|
1460 |
+
# was actually interrupted inside Arrow C++ or in the Python
|
1461 |
+
# scaffolding.
|
1462 |
+
if exc_info.__context__ is not None:
|
1463 |
+
# Interrupted inside Arrow C++, we're satisfied now
|
1464 |
+
break
|
1465 |
+
|
1466 |
+
# Increase workload size to get a better chance
|
1467 |
+
workload_size = workload_size * 3
|
1468 |
+
|
1469 |
+
if exc_info is None:
|
1470 |
+
pytest.fail("Failed to get an interruption during CSV reading")
|
1471 |
+
|
1472 |
+
# Interruption should have arrived timely
|
1473 |
+
assert last_duration <= 1.0
|
1474 |
+
e = exc_info.__context__
|
1475 |
+
assert isinstance(e, pa.ArrowCancelled)
|
1476 |
+
assert e.signum == signum
|
1477 |
+
|
1478 |
+
def test_cancellation_disabled(self):
|
1479 |
+
# ARROW-12622: reader would segfault when the cancelling signal
|
1480 |
+
# handler was not enabled (e.g. if disabled, or if not on the
|
1481 |
+
# main thread)
|
1482 |
+
t = threading.Thread(
|
1483 |
+
target=lambda: self.read_bytes(b"f64\n0.1"))
|
1484 |
+
t.start()
|
1485 |
+
t.join()
|
1486 |
+
|
1487 |
+
|
1488 |
+
class TestSerialCSVTableRead(BaseCSVTableRead):
|
1489 |
+
@property
|
1490 |
+
def use_threads(self):
|
1491 |
+
return False
|
1492 |
+
|
1493 |
+
|
1494 |
+
class TestThreadedCSVTableRead(BaseCSVTableRead):
|
1495 |
+
@property
|
1496 |
+
def use_threads(self):
|
1497 |
+
return True
|
1498 |
+
|
1499 |
+
|
1500 |
+
class BaseStreamingCSVRead(BaseTestCSV):
|
1501 |
+
|
1502 |
+
def open_csv(self, csv, *args, **kwargs):
|
1503 |
+
"""
|
1504 |
+
Reads the CSV file into memory using pyarrow's open_csv
|
1505 |
+
csv The CSV bytes
|
1506 |
+
args Positional arguments to be forwarded to pyarrow's open_csv
|
1507 |
+
kwargs Keyword arguments to be forwarded to pyarrow's open_csv
|
1508 |
+
"""
|
1509 |
+
read_options = kwargs.setdefault('read_options', ReadOptions())
|
1510 |
+
read_options.use_threads = self.use_threads
|
1511 |
+
return open_csv(csv, *args, **kwargs)
|
1512 |
+
|
1513 |
+
def open_bytes(self, b, **kwargs):
|
1514 |
+
return self.open_csv(pa.py_buffer(b), **kwargs)
|
1515 |
+
|
1516 |
+
def check_reader(self, reader, expected_schema, expected_data):
|
1517 |
+
assert reader.schema == expected_schema
|
1518 |
+
batches = list(reader)
|
1519 |
+
assert len(batches) == len(expected_data)
|
1520 |
+
for batch, expected_batch in zip(batches, expected_data):
|
1521 |
+
batch.validate(full=True)
|
1522 |
+
assert batch.schema == expected_schema
|
1523 |
+
assert batch.to_pydict() == expected_batch
|
1524 |
+
|
1525 |
+
def read_bytes(self, b, **kwargs):
|
1526 |
+
return self.open_bytes(b, **kwargs).read_all()
|
1527 |
+
|
1528 |
+
def test_file_object(self):
|
1529 |
+
data = b"a,b\n1,2\n3,4\n"
|
1530 |
+
expected_data = {'a': [1, 3], 'b': [2, 4]}
|
1531 |
+
bio = io.BytesIO(data)
|
1532 |
+
reader = self.open_csv(bio)
|
1533 |
+
expected_schema = pa.schema([('a', pa.int64()),
|
1534 |
+
('b', pa.int64())])
|
1535 |
+
self.check_reader(reader, expected_schema, [expected_data])
|
1536 |
+
|
1537 |
+
def test_header(self):
|
1538 |
+
rows = b"abc,def,gh\n"
|
1539 |
+
reader = self.open_bytes(rows)
|
1540 |
+
expected_schema = pa.schema([('abc', pa.null()),
|
1541 |
+
('def', pa.null()),
|
1542 |
+
('gh', pa.null())])
|
1543 |
+
self.check_reader(reader, expected_schema, [])
|
1544 |
+
|
1545 |
+
def test_inference(self):
|
1546 |
+
# Inference is done on first block
|
1547 |
+
rows = b"a,b\n123,456\nabc,de\xff\ngh,ij\n"
|
1548 |
+
expected_schema = pa.schema([('a', pa.string()),
|
1549 |
+
('b', pa.binary())])
|
1550 |
+
|
1551 |
+
read_options = ReadOptions()
|
1552 |
+
read_options.block_size = len(rows)
|
1553 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1554 |
+
self.check_reader(reader, expected_schema,
|
1555 |
+
[{'a': ['123', 'abc', 'gh'],
|
1556 |
+
'b': [b'456', b'de\xff', b'ij']}])
|
1557 |
+
|
1558 |
+
read_options.block_size = len(rows) - 1
|
1559 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1560 |
+
self.check_reader(reader, expected_schema,
|
1561 |
+
[{'a': ['123', 'abc'],
|
1562 |
+
'b': [b'456', b'de\xff']},
|
1563 |
+
{'a': ['gh'],
|
1564 |
+
'b': [b'ij']}])
|
1565 |
+
|
1566 |
+
def test_inference_failure(self):
|
1567 |
+
# Inference on first block, then conversion failure on second block
|
1568 |
+
rows = b"a,b\n123,456\nabc,de\xff\ngh,ij\n"
|
1569 |
+
read_options = ReadOptions()
|
1570 |
+
read_options.block_size = len(rows) - 7
|
1571 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1572 |
+
expected_schema = pa.schema([('a', pa.int64()),
|
1573 |
+
('b', pa.int64())])
|
1574 |
+
assert reader.schema == expected_schema
|
1575 |
+
assert reader.read_next_batch().to_pydict() == {
|
1576 |
+
'a': [123], 'b': [456]
|
1577 |
+
}
|
1578 |
+
# Second block
|
1579 |
+
with pytest.raises(ValueError,
|
1580 |
+
match="CSV conversion error to int64"):
|
1581 |
+
reader.read_next_batch()
|
1582 |
+
# EOF
|
1583 |
+
with pytest.raises(StopIteration):
|
1584 |
+
reader.read_next_batch()
|
1585 |
+
|
1586 |
+
def test_invalid_csv(self):
|
1587 |
+
# CSV errors on first block
|
1588 |
+
rows = b"a,b\n1,2,3\n4,5\n6,7\n"
|
1589 |
+
read_options = ReadOptions()
|
1590 |
+
read_options.block_size = 10
|
1591 |
+
with pytest.raises(pa.ArrowInvalid,
|
1592 |
+
match="Expected 2 columns, got 3"):
|
1593 |
+
reader = self.open_bytes(
|
1594 |
+
rows, read_options=read_options)
|
1595 |
+
|
1596 |
+
# CSV errors on second block
|
1597 |
+
rows = b"a,b\n1,2\n3,4,5\n6,7\n"
|
1598 |
+
read_options.block_size = 8
|
1599 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1600 |
+
assert reader.read_next_batch().to_pydict() == {'a': [1], 'b': [2]}
|
1601 |
+
with pytest.raises(pa.ArrowInvalid,
|
1602 |
+
match="Expected 2 columns, got 3"):
|
1603 |
+
reader.read_next_batch()
|
1604 |
+
# Cannot continue after a parse error
|
1605 |
+
with pytest.raises(StopIteration):
|
1606 |
+
reader.read_next_batch()
|
1607 |
+
|
1608 |
+
def test_options_delimiter(self):
|
1609 |
+
rows = b"a;b,c\nde,fg;eh\n"
|
1610 |
+
reader = self.open_bytes(rows)
|
1611 |
+
expected_schema = pa.schema([('a;b', pa.string()),
|
1612 |
+
('c', pa.string())])
|
1613 |
+
self.check_reader(reader, expected_schema,
|
1614 |
+
[{'a;b': ['de'],
|
1615 |
+
'c': ['fg;eh']}])
|
1616 |
+
|
1617 |
+
opts = ParseOptions(delimiter=';')
|
1618 |
+
reader = self.open_bytes(rows, parse_options=opts)
|
1619 |
+
expected_schema = pa.schema([('a', pa.string()),
|
1620 |
+
('b,c', pa.string())])
|
1621 |
+
self.check_reader(reader, expected_schema,
|
1622 |
+
[{'a': ['de,fg'],
|
1623 |
+
'b,c': ['eh']}])
|
1624 |
+
|
1625 |
+
def test_no_ending_newline(self):
|
1626 |
+
# No \n after last line
|
1627 |
+
rows = b"a,b,c\n1,2,3\n4,5,6"
|
1628 |
+
reader = self.open_bytes(rows)
|
1629 |
+
expected_schema = pa.schema([('a', pa.int64()),
|
1630 |
+
('b', pa.int64()),
|
1631 |
+
('c', pa.int64())])
|
1632 |
+
self.check_reader(reader, expected_schema,
|
1633 |
+
[{'a': [1, 4],
|
1634 |
+
'b': [2, 5],
|
1635 |
+
'c': [3, 6]}])
|
1636 |
+
|
1637 |
+
def test_empty_file(self):
|
1638 |
+
with pytest.raises(ValueError, match="Empty CSV file"):
|
1639 |
+
self.open_bytes(b"")
|
1640 |
+
|
1641 |
+
def test_column_options(self):
|
1642 |
+
# With column_names
|
1643 |
+
rows = b"1,2,3\n4,5,6"
|
1644 |
+
read_options = ReadOptions()
|
1645 |
+
read_options.column_names = ['d', 'e', 'f']
|
1646 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1647 |
+
expected_schema = pa.schema([('d', pa.int64()),
|
1648 |
+
('e', pa.int64()),
|
1649 |
+
('f', pa.int64())])
|
1650 |
+
self.check_reader(reader, expected_schema,
|
1651 |
+
[{'d': [1, 4],
|
1652 |
+
'e': [2, 5],
|
1653 |
+
'f': [3, 6]}])
|
1654 |
+
|
1655 |
+
# With include_columns
|
1656 |
+
convert_options = ConvertOptions()
|
1657 |
+
convert_options.include_columns = ['f', 'e']
|
1658 |
+
reader = self.open_bytes(rows, read_options=read_options,
|
1659 |
+
convert_options=convert_options)
|
1660 |
+
expected_schema = pa.schema([('f', pa.int64()),
|
1661 |
+
('e', pa.int64())])
|
1662 |
+
self.check_reader(reader, expected_schema,
|
1663 |
+
[{'e': [2, 5],
|
1664 |
+
'f': [3, 6]}])
|
1665 |
+
|
1666 |
+
# With column_types
|
1667 |
+
convert_options.column_types = {'e': pa.string()}
|
1668 |
+
reader = self.open_bytes(rows, read_options=read_options,
|
1669 |
+
convert_options=convert_options)
|
1670 |
+
expected_schema = pa.schema([('f', pa.int64()),
|
1671 |
+
('e', pa.string())])
|
1672 |
+
self.check_reader(reader, expected_schema,
|
1673 |
+
[{'e': ["2", "5"],
|
1674 |
+
'f': [3, 6]}])
|
1675 |
+
|
1676 |
+
# Missing columns in include_columns
|
1677 |
+
convert_options.include_columns = ['g', 'f', 'e']
|
1678 |
+
with pytest.raises(
|
1679 |
+
KeyError,
|
1680 |
+
match="Column 'g' in include_columns does not exist"):
|
1681 |
+
reader = self.open_bytes(rows, read_options=read_options,
|
1682 |
+
convert_options=convert_options)
|
1683 |
+
|
1684 |
+
convert_options.include_missing_columns = True
|
1685 |
+
reader = self.open_bytes(rows, read_options=read_options,
|
1686 |
+
convert_options=convert_options)
|
1687 |
+
expected_schema = pa.schema([('g', pa.null()),
|
1688 |
+
('f', pa.int64()),
|
1689 |
+
('e', pa.string())])
|
1690 |
+
self.check_reader(reader, expected_schema,
|
1691 |
+
[{'g': [None, None],
|
1692 |
+
'e': ["2", "5"],
|
1693 |
+
'f': [3, 6]}])
|
1694 |
+
|
1695 |
+
convert_options.column_types = {'e': pa.string(), 'g': pa.float64()}
|
1696 |
+
reader = self.open_bytes(rows, read_options=read_options,
|
1697 |
+
convert_options=convert_options)
|
1698 |
+
expected_schema = pa.schema([('g', pa.float64()),
|
1699 |
+
('f', pa.int64()),
|
1700 |
+
('e', pa.string())])
|
1701 |
+
self.check_reader(reader, expected_schema,
|
1702 |
+
[{'g': [None, None],
|
1703 |
+
'e': ["2", "5"],
|
1704 |
+
'f': [3, 6]}])
|
1705 |
+
|
1706 |
+
def test_encoding(self):
|
1707 |
+
# latin-1 (invalid utf-8)
|
1708 |
+
rows = b"a,b\nun,\xe9l\xe9phant"
|
1709 |
+
read_options = ReadOptions()
|
1710 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1711 |
+
expected_schema = pa.schema([('a', pa.string()),
|
1712 |
+
('b', pa.binary())])
|
1713 |
+
self.check_reader(reader, expected_schema,
|
1714 |
+
[{'a': ["un"],
|
1715 |
+
'b': [b"\xe9l\xe9phant"]}])
|
1716 |
+
|
1717 |
+
read_options.encoding = 'latin1'
|
1718 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1719 |
+
expected_schema = pa.schema([('a', pa.string()),
|
1720 |
+
('b', pa.string())])
|
1721 |
+
self.check_reader(reader, expected_schema,
|
1722 |
+
[{'a': ["un"],
|
1723 |
+
'b': ["éléphant"]}])
|
1724 |
+
|
1725 |
+
# utf-16
|
1726 |
+
rows = (b'\xff\xfea\x00,\x00b\x00\n\x00u\x00n\x00,'
|
1727 |
+
b'\x00\xe9\x00l\x00\xe9\x00p\x00h\x00a\x00n\x00t\x00')
|
1728 |
+
read_options.encoding = 'utf16'
|
1729 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1730 |
+
expected_schema = pa.schema([('a', pa.string()),
|
1731 |
+
('b', pa.string())])
|
1732 |
+
self.check_reader(reader, expected_schema,
|
1733 |
+
[{'a': ["un"],
|
1734 |
+
'b': ["éléphant"]}])
|
1735 |
+
|
1736 |
+
def test_small_random_csv(self):
|
1737 |
+
csv, expected = make_random_csv(num_cols=2, num_rows=10)
|
1738 |
+
reader = self.open_bytes(csv)
|
1739 |
+
table = reader.read_all()
|
1740 |
+
assert table.schema == expected.schema
|
1741 |
+
assert table.equals(expected)
|
1742 |
+
assert table.to_pydict() == expected.to_pydict()
|
1743 |
+
|
1744 |
+
def test_stress_block_sizes(self):
|
1745 |
+
# Test a number of small block sizes to stress block stitching
|
1746 |
+
csv_base, expected = make_random_csv(num_cols=2, num_rows=500)
|
1747 |
+
block_sizes = [19, 21, 23, 26, 37, 111]
|
1748 |
+
csvs = [csv_base, csv_base.rstrip(b'\r\n')]
|
1749 |
+
for csv in csvs:
|
1750 |
+
for block_size in block_sizes:
|
1751 |
+
# Need at least two lines for type inference
|
1752 |
+
assert csv[:block_size].count(b'\n') >= 2
|
1753 |
+
read_options = ReadOptions(block_size=block_size)
|
1754 |
+
reader = self.open_bytes(
|
1755 |
+
csv, read_options=read_options)
|
1756 |
+
table = reader.read_all()
|
1757 |
+
assert table.schema == expected.schema
|
1758 |
+
if not table.equals(expected):
|
1759 |
+
# Better error output
|
1760 |
+
assert table.to_pydict() == expected.to_pydict()
|
1761 |
+
|
1762 |
+
def test_batch_lifetime(self):
|
1763 |
+
gc.collect()
|
1764 |
+
old_allocated = pa.total_allocated_bytes()
|
1765 |
+
|
1766 |
+
# Memory occupation should not grow with CSV file size
|
1767 |
+
def check_one_batch(reader, expected):
|
1768 |
+
batch = reader.read_next_batch()
|
1769 |
+
assert batch.to_pydict() == expected
|
1770 |
+
|
1771 |
+
rows = b"10,11\n12,13\n14,15\n16,17\n"
|
1772 |
+
read_options = ReadOptions()
|
1773 |
+
read_options.column_names = ['a', 'b']
|
1774 |
+
read_options.block_size = 6
|
1775 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1776 |
+
check_one_batch(reader, {'a': [10], 'b': [11]})
|
1777 |
+
allocated_after_first_batch = pa.total_allocated_bytes()
|
1778 |
+
check_one_batch(reader, {'a': [12], 'b': [13]})
|
1779 |
+
assert pa.total_allocated_bytes() <= allocated_after_first_batch
|
1780 |
+
check_one_batch(reader, {'a': [14], 'b': [15]})
|
1781 |
+
assert pa.total_allocated_bytes() <= allocated_after_first_batch
|
1782 |
+
check_one_batch(reader, {'a': [16], 'b': [17]})
|
1783 |
+
assert pa.total_allocated_bytes() <= allocated_after_first_batch
|
1784 |
+
with pytest.raises(StopIteration):
|
1785 |
+
reader.read_next_batch()
|
1786 |
+
assert pa.total_allocated_bytes() == old_allocated
|
1787 |
+
reader = None
|
1788 |
+
assert pa.total_allocated_bytes() == old_allocated
|
1789 |
+
|
1790 |
+
def test_header_skip_rows(self):
|
1791 |
+
super().test_header_skip_rows()
|
1792 |
+
|
1793 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
1794 |
+
|
1795 |
+
# Skipping all rows immediately results in end of iteration
|
1796 |
+
opts = ReadOptions()
|
1797 |
+
opts.skip_rows = 4
|
1798 |
+
opts.column_names = ['ab', 'cd']
|
1799 |
+
reader = self.open_bytes(rows, read_options=opts)
|
1800 |
+
with pytest.raises(StopIteration):
|
1801 |
+
assert reader.read_next_batch()
|
1802 |
+
|
1803 |
+
def test_skip_rows_after_names(self):
|
1804 |
+
super().test_skip_rows_after_names()
|
1805 |
+
|
1806 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
1807 |
+
|
1808 |
+
# Skipping all rows immediately results in end of iteration
|
1809 |
+
opts = ReadOptions()
|
1810 |
+
opts.skip_rows_after_names = 3
|
1811 |
+
reader = self.open_bytes(rows, read_options=opts)
|
1812 |
+
with pytest.raises(StopIteration):
|
1813 |
+
assert reader.read_next_batch()
|
1814 |
+
|
1815 |
+
# Skipping beyond all rows immediately results in end of iteration
|
1816 |
+
opts.skip_rows_after_names = 99999
|
1817 |
+
reader = self.open_bytes(rows, read_options=opts)
|
1818 |
+
with pytest.raises(StopIteration):
|
1819 |
+
assert reader.read_next_batch()
|
1820 |
+
|
1821 |
+
|
1822 |
+
class TestSerialStreamingCSVRead(BaseStreamingCSVRead):
|
1823 |
+
@property
|
1824 |
+
def use_threads(self):
|
1825 |
+
return False
|
1826 |
+
|
1827 |
+
|
1828 |
+
class TestThreadedStreamingCSVRead(BaseStreamingCSVRead):
|
1829 |
+
@property
|
1830 |
+
def use_threads(self):
|
1831 |
+
return True
|
1832 |
+
|
1833 |
+
|
1834 |
+
class BaseTestCompressedCSVRead:
|
1835 |
+
|
1836 |
+
def setUp(self):
|
1837 |
+
self.tmpdir = tempfile.mkdtemp(prefix='arrow-csv-test-')
|
1838 |
+
|
1839 |
+
def tearDown(self):
|
1840 |
+
shutil.rmtree(self.tmpdir)
|
1841 |
+
|
1842 |
+
def read_csv(self, csv_path):
|
1843 |
+
try:
|
1844 |
+
return read_csv(csv_path)
|
1845 |
+
except pa.ArrowNotImplementedError as e:
|
1846 |
+
pytest.skip(str(e))
|
1847 |
+
|
1848 |
+
def test_random_csv(self):
|
1849 |
+
csv, expected = make_random_csv(num_cols=2, num_rows=100)
|
1850 |
+
csv_path = os.path.join(self.tmpdir, self.csv_filename)
|
1851 |
+
self.write_file(csv_path, csv)
|
1852 |
+
table = self.read_csv(csv_path)
|
1853 |
+
table.validate(full=True)
|
1854 |
+
assert table.schema == expected.schema
|
1855 |
+
assert table.equals(expected)
|
1856 |
+
assert table.to_pydict() == expected.to_pydict()
|
1857 |
+
|
1858 |
+
|
1859 |
+
class TestGZipCSVRead(BaseTestCompressedCSVRead, unittest.TestCase):
|
1860 |
+
csv_filename = "compressed.csv.gz"
|
1861 |
+
|
1862 |
+
def write_file(self, path, contents):
|
1863 |
+
with gzip.open(path, 'wb', 3) as f:
|
1864 |
+
f.write(contents)
|
1865 |
+
|
1866 |
+
def test_concatenated(self):
|
1867 |
+
# ARROW-5974
|
1868 |
+
csv_path = os.path.join(self.tmpdir, self.csv_filename)
|
1869 |
+
with gzip.open(csv_path, 'wb', 3) as f:
|
1870 |
+
f.write(b"ab,cd\nef,gh\n")
|
1871 |
+
with gzip.open(csv_path, 'ab', 3) as f:
|
1872 |
+
f.write(b"ij,kl\nmn,op\n")
|
1873 |
+
table = self.read_csv(csv_path)
|
1874 |
+
assert table.to_pydict() == {
|
1875 |
+
'ab': ['ef', 'ij', 'mn'],
|
1876 |
+
'cd': ['gh', 'kl', 'op'],
|
1877 |
+
}
|
1878 |
+
|
1879 |
+
|
1880 |
+
class TestBZ2CSVRead(BaseTestCompressedCSVRead, unittest.TestCase):
|
1881 |
+
csv_filename = "compressed.csv.bz2"
|
1882 |
+
|
1883 |
+
def write_file(self, path, contents):
|
1884 |
+
with bz2.BZ2File(path, 'w') as f:
|
1885 |
+
f.write(contents)
|
1886 |
+
|
1887 |
+
|
1888 |
+
def test_read_csv_does_not_close_passed_file_handles():
|
1889 |
+
# ARROW-4823
|
1890 |
+
buf = io.BytesIO(b"a,b,c\n1,2,3\n4,5,6")
|
1891 |
+
read_csv(buf)
|
1892 |
+
assert not buf.closed
|
1893 |
+
|
1894 |
+
|
1895 |
+
def test_write_read_round_trip():
|
1896 |
+
t = pa.Table.from_arrays([[1, 2, 3], ["a", "b", "c"]], ["c1", "c2"])
|
1897 |
+
record_batch = t.to_batches(max_chunksize=4)[0]
|
1898 |
+
for data in [t, record_batch]:
|
1899 |
+
# Test with header
|
1900 |
+
buf = io.BytesIO()
|
1901 |
+
write_csv(data, buf, WriteOptions(include_header=True))
|
1902 |
+
buf.seek(0)
|
1903 |
+
assert t == read_csv(buf)
|
1904 |
+
|
1905 |
+
# Test without header
|
1906 |
+
buf = io.BytesIO()
|
1907 |
+
write_csv(data, buf, WriteOptions(include_header=False))
|
1908 |
+
buf.seek(0)
|
1909 |
+
|
1910 |
+
read_options = ReadOptions(column_names=t.column_names)
|
1911 |
+
assert t == read_csv(buf, read_options=read_options)
|
1912 |
+
|
1913 |
+
# Test with writer
|
1914 |
+
for read_options, parse_options, write_options in [
|
1915 |
+
(None, None, WriteOptions(include_header=True)),
|
1916 |
+
(ReadOptions(column_names=t.column_names), None,
|
1917 |
+
WriteOptions(include_header=False)),
|
1918 |
+
(None, ParseOptions(delimiter='|'),
|
1919 |
+
WriteOptions(include_header=True, delimiter='|')),
|
1920 |
+
(ReadOptions(column_names=t.column_names),
|
1921 |
+
ParseOptions(delimiter='\t'),
|
1922 |
+
WriteOptions(include_header=False, delimiter='\t')),
|
1923 |
+
]:
|
1924 |
+
buf = io.BytesIO()
|
1925 |
+
with CSVWriter(buf, t.schema, write_options=write_options) as writer:
|
1926 |
+
writer.write_table(t)
|
1927 |
+
buf.seek(0)
|
1928 |
+
assert t == read_csv(buf, read_options=read_options,
|
1929 |
+
parse_options=parse_options)
|
1930 |
+
buf = io.BytesIO()
|
1931 |
+
with CSVWriter(buf, t.schema, write_options=write_options) as writer:
|
1932 |
+
for batch in t.to_batches(max_chunksize=1):
|
1933 |
+
writer.write_batch(batch)
|
1934 |
+
buf.seek(0)
|
1935 |
+
assert t == read_csv(buf, read_options=read_options,
|
1936 |
+
parse_options=parse_options)
|
1937 |
+
|
1938 |
+
|
1939 |
+
def test_write_quoting_style():
|
1940 |
+
t = pa.Table.from_arrays([[1, 2, None], ["a", None, "c"]], ["c1", "c2"])
|
1941 |
+
buf = io.BytesIO()
|
1942 |
+
for write_options, res in [
|
1943 |
+
(WriteOptions(quoting_style='none'), b'"c1","c2"\n1,a\n2,\n,c\n'),
|
1944 |
+
(WriteOptions(), b'"c1","c2"\n1,"a"\n2,\n,"c"\n'),
|
1945 |
+
(WriteOptions(quoting_style='all_valid'),
|
1946 |
+
b'"c1","c2"\n"1","a"\n"2",\n,"c"\n'),
|
1947 |
+
]:
|
1948 |
+
with CSVWriter(buf, t.schema, write_options=write_options) as writer:
|
1949 |
+
writer.write_table(t)
|
1950 |
+
assert buf.getvalue() == res
|
1951 |
+
buf.seek(0)
|
1952 |
+
|
1953 |
+
# Test writing special characters with different quoting styles
|
1954 |
+
t = pa.Table.from_arrays([[",", "\""]], ["c1"])
|
1955 |
+
buf = io.BytesIO()
|
1956 |
+
for write_options, res in [
|
1957 |
+
(WriteOptions(quoting_style='needed'), b'"c1"\n","\n""""\n'),
|
1958 |
+
(WriteOptions(quoting_style='none'), pa.lib.ArrowInvalid),
|
1959 |
+
]:
|
1960 |
+
with CSVWriter(buf, t.schema, write_options=write_options) as writer:
|
1961 |
+
try:
|
1962 |
+
writer.write_table(t)
|
1963 |
+
except Exception as e:
|
1964 |
+
# This will trigger when we try to write a comma (,)
|
1965 |
+
# without quotes, which is invalid
|
1966 |
+
assert isinstance(e, res)
|
1967 |
+
break
|
1968 |
+
assert buf.getvalue() == res
|
1969 |
+
buf.seek(0)
|
1970 |
+
|
1971 |
+
|
1972 |
+
def test_read_csv_reference_cycle():
|
1973 |
+
# ARROW-13187
|
1974 |
+
def inner():
|
1975 |
+
buf = io.BytesIO(b"a,b,c\n1,2,3\n4,5,6")
|
1976 |
+
table = read_csv(buf)
|
1977 |
+
return weakref.ref(table)
|
1978 |
+
|
1979 |
+
with util.disabled_gc():
|
1980 |
+
wr = inner()
|
1981 |
+
assert wr() is None
|
1982 |
+
|
1983 |
+
|
1984 |
+
@pytest.mark.parametrize("type_factory", (
|
1985 |
+
lambda: pa.decimal128(20, 1),
|
1986 |
+
lambda: pa.decimal128(38, 15),
|
1987 |
+
lambda: pa.decimal256(20, 1),
|
1988 |
+
lambda: pa.decimal256(76, 10),
|
1989 |
+
))
|
1990 |
+
def test_write_csv_decimal(tmpdir, type_factory):
|
1991 |
+
type = type_factory()
|
1992 |
+
table = pa.table({"col": pa.array([1, 2]).cast(type)})
|
1993 |
+
|
1994 |
+
write_csv(table, tmpdir / "out.csv")
|
1995 |
+
out = read_csv(tmpdir / "out.csv")
|
1996 |
+
|
1997 |
+
assert out.column('col').cast(type) == table.column('col')
|
1998 |
+
|
1999 |
+
|
2000 |
+
def test_read_csv_gil_deadlock():
|
2001 |
+
# GH-38676
|
2002 |
+
# This test depends on several preconditions:
|
2003 |
+
# - the CSV input is a Python file object
|
2004 |
+
# - reading the CSV file produces an error
|
2005 |
+
data = b"a,b,c"
|
2006 |
+
|
2007 |
+
class MyBytesIO(io.BytesIO):
|
2008 |
+
def read(self, *args):
|
2009 |
+
time.sleep(0.001)
|
2010 |
+
return super().read(*args)
|
2011 |
+
|
2012 |
+
def readinto(self, *args):
|
2013 |
+
time.sleep(0.001)
|
2014 |
+
return super().readinto(*args)
|
2015 |
+
|
2016 |
+
for i in range(20):
|
2017 |
+
with pytest.raises(pa.ArrowInvalid):
|
2018 |
+
read_csv(MyBytesIO(data))
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_cuda.py
ADDED
@@ -0,0 +1,794 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
"""
|
19 |
+
UNTESTED:
|
20 |
+
read_message
|
21 |
+
"""
|
22 |
+
|
23 |
+
import sys
|
24 |
+
import sysconfig
|
25 |
+
|
26 |
+
import pytest
|
27 |
+
|
28 |
+
import pyarrow as pa
|
29 |
+
import numpy as np
|
30 |
+
|
31 |
+
|
32 |
+
cuda = pytest.importorskip("pyarrow.cuda")
|
33 |
+
|
34 |
+
platform = sysconfig.get_platform()
|
35 |
+
# TODO: enable ppc64 when Arrow C++ supports IPC in ppc64 systems:
|
36 |
+
has_ipc_support = platform == 'linux-x86_64' # or 'ppc64' in platform
|
37 |
+
|
38 |
+
cuda_ipc = pytest.mark.skipif(
|
39 |
+
not has_ipc_support,
|
40 |
+
reason='CUDA IPC not supported in platform `%s`' % (platform))
|
41 |
+
|
42 |
+
global_context = None # for flake8
|
43 |
+
global_context1 = None # for flake8
|
44 |
+
|
45 |
+
|
46 |
+
def setup_module(module):
|
47 |
+
module.global_context = cuda.Context(0)
|
48 |
+
module.global_context1 = cuda.Context(cuda.Context.get_num_devices() - 1)
|
49 |
+
|
50 |
+
|
51 |
+
def teardown_module(module):
|
52 |
+
del module.global_context
|
53 |
+
|
54 |
+
|
55 |
+
def test_Context():
|
56 |
+
assert cuda.Context.get_num_devices() > 0
|
57 |
+
assert global_context.device_number == 0
|
58 |
+
assert global_context1.device_number == cuda.Context.get_num_devices() - 1
|
59 |
+
|
60 |
+
with pytest.raises(ValueError,
|
61 |
+
match=("device_number argument must "
|
62 |
+
"be non-negative less than")):
|
63 |
+
cuda.Context(cuda.Context.get_num_devices())
|
64 |
+
|
65 |
+
|
66 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
67 |
+
def test_manage_allocate_free_host(size):
|
68 |
+
buf = cuda.new_host_buffer(size)
|
69 |
+
arr = np.frombuffer(buf, dtype=np.uint8)
|
70 |
+
arr[size//4:3*size//4] = 1
|
71 |
+
arr_cp = arr.copy()
|
72 |
+
arr2 = np.frombuffer(buf, dtype=np.uint8)
|
73 |
+
np.testing.assert_equal(arr2, arr_cp)
|
74 |
+
assert buf.size == size
|
75 |
+
|
76 |
+
|
77 |
+
def test_context_allocate_del():
|
78 |
+
bytes_allocated = global_context.bytes_allocated
|
79 |
+
cudabuf = global_context.new_buffer(128)
|
80 |
+
assert global_context.bytes_allocated == bytes_allocated + 128
|
81 |
+
del cudabuf
|
82 |
+
assert global_context.bytes_allocated == bytes_allocated
|
83 |
+
|
84 |
+
|
85 |
+
def make_random_buffer(size, target='host'):
|
86 |
+
"""Return a host or device buffer with random data.
|
87 |
+
"""
|
88 |
+
if target == 'host':
|
89 |
+
assert size >= 0
|
90 |
+
buf = pa.allocate_buffer(size)
|
91 |
+
assert buf.size == size
|
92 |
+
arr = np.frombuffer(buf, dtype=np.uint8)
|
93 |
+
assert arr.size == size
|
94 |
+
arr[:] = np.random.randint(low=1, high=255, size=size, dtype=np.uint8)
|
95 |
+
assert arr.sum() > 0 or size == 0
|
96 |
+
arr_ = np.frombuffer(buf, dtype=np.uint8)
|
97 |
+
np.testing.assert_equal(arr, arr_)
|
98 |
+
return arr, buf
|
99 |
+
elif target == 'device':
|
100 |
+
arr, buf = make_random_buffer(size, target='host')
|
101 |
+
dbuf = global_context.new_buffer(size)
|
102 |
+
assert dbuf.size == size
|
103 |
+
dbuf.copy_from_host(buf, position=0, nbytes=size)
|
104 |
+
return arr, dbuf
|
105 |
+
raise ValueError('invalid target value')
|
106 |
+
|
107 |
+
|
108 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
109 |
+
def test_context_device_buffer(size):
|
110 |
+
# Creating device buffer from host buffer;
|
111 |
+
arr, buf = make_random_buffer(size)
|
112 |
+
cudabuf = global_context.buffer_from_data(buf)
|
113 |
+
assert cudabuf.size == size
|
114 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
115 |
+
np.testing.assert_equal(arr, arr2)
|
116 |
+
|
117 |
+
# CudaBuffer does not support buffer protocol
|
118 |
+
with pytest.raises(BufferError):
|
119 |
+
memoryview(cudabuf)
|
120 |
+
|
121 |
+
# Creating device buffer from array:
|
122 |
+
cudabuf = global_context.buffer_from_data(arr)
|
123 |
+
assert cudabuf.size == size
|
124 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
125 |
+
np.testing.assert_equal(arr, arr2)
|
126 |
+
|
127 |
+
# Creating device buffer from bytes:
|
128 |
+
cudabuf = global_context.buffer_from_data(arr.tobytes())
|
129 |
+
assert cudabuf.size == size
|
130 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
131 |
+
np.testing.assert_equal(arr, arr2)
|
132 |
+
|
133 |
+
# Creating a device buffer from another device buffer, view:
|
134 |
+
cudabuf2 = cudabuf.slice(0, cudabuf.size)
|
135 |
+
assert cudabuf2.size == size
|
136 |
+
arr2 = np.frombuffer(cudabuf2.copy_to_host(), dtype=np.uint8)
|
137 |
+
np.testing.assert_equal(arr, arr2)
|
138 |
+
|
139 |
+
if size > 1:
|
140 |
+
cudabuf2.copy_from_host(arr[size//2:])
|
141 |
+
arr3 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
142 |
+
np.testing.assert_equal(np.concatenate((arr[size//2:], arr[size//2:])),
|
143 |
+
arr3)
|
144 |
+
cudabuf2.copy_from_host(arr[:size//2]) # restoring arr
|
145 |
+
|
146 |
+
# Creating a device buffer from another device buffer, copy:
|
147 |
+
cudabuf2 = global_context.buffer_from_data(cudabuf)
|
148 |
+
assert cudabuf2.size == size
|
149 |
+
arr2 = np.frombuffer(cudabuf2.copy_to_host(), dtype=np.uint8)
|
150 |
+
np.testing.assert_equal(arr, arr2)
|
151 |
+
|
152 |
+
cudabuf2.copy_from_host(arr[size//2:])
|
153 |
+
arr3 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
154 |
+
np.testing.assert_equal(arr, arr3)
|
155 |
+
|
156 |
+
# Slice of a device buffer
|
157 |
+
cudabuf2 = cudabuf.slice(0, cudabuf.size+10)
|
158 |
+
assert cudabuf2.size == size
|
159 |
+
arr2 = np.frombuffer(cudabuf2.copy_to_host(), dtype=np.uint8)
|
160 |
+
np.testing.assert_equal(arr, arr2)
|
161 |
+
|
162 |
+
cudabuf2 = cudabuf.slice(size//4, size+10)
|
163 |
+
assert cudabuf2.size == size - size//4
|
164 |
+
arr2 = np.frombuffer(cudabuf2.copy_to_host(), dtype=np.uint8)
|
165 |
+
np.testing.assert_equal(arr[size//4:], arr2)
|
166 |
+
|
167 |
+
# Creating a device buffer from a slice of host buffer
|
168 |
+
soffset = size//4
|
169 |
+
ssize = 2*size//4
|
170 |
+
cudabuf = global_context.buffer_from_data(buf, offset=soffset,
|
171 |
+
size=ssize)
|
172 |
+
assert cudabuf.size == ssize
|
173 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
174 |
+
np.testing.assert_equal(arr[soffset:soffset + ssize], arr2)
|
175 |
+
|
176 |
+
cudabuf = global_context.buffer_from_data(buf.slice(offset=soffset,
|
177 |
+
length=ssize))
|
178 |
+
assert cudabuf.size == ssize
|
179 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
180 |
+
np.testing.assert_equal(arr[soffset:soffset + ssize], arr2)
|
181 |
+
|
182 |
+
# Creating a device buffer from a slice of an array
|
183 |
+
cudabuf = global_context.buffer_from_data(arr, offset=soffset, size=ssize)
|
184 |
+
assert cudabuf.size == ssize
|
185 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
186 |
+
np.testing.assert_equal(arr[soffset:soffset + ssize], arr2)
|
187 |
+
|
188 |
+
cudabuf = global_context.buffer_from_data(arr[soffset:soffset+ssize])
|
189 |
+
assert cudabuf.size == ssize
|
190 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
191 |
+
np.testing.assert_equal(arr[soffset:soffset + ssize], arr2)
|
192 |
+
|
193 |
+
# Creating a device buffer from a slice of bytes
|
194 |
+
cudabuf = global_context.buffer_from_data(arr.tobytes(),
|
195 |
+
offset=soffset,
|
196 |
+
size=ssize)
|
197 |
+
assert cudabuf.size == ssize
|
198 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
199 |
+
np.testing.assert_equal(arr[soffset:soffset + ssize], arr2)
|
200 |
+
|
201 |
+
# Creating a device buffer from size
|
202 |
+
cudabuf = global_context.new_buffer(size)
|
203 |
+
assert cudabuf.size == size
|
204 |
+
|
205 |
+
# Creating device buffer from a slice of another device buffer:
|
206 |
+
cudabuf = global_context.buffer_from_data(arr)
|
207 |
+
cudabuf2 = cudabuf.slice(soffset, ssize)
|
208 |
+
assert cudabuf2.size == ssize
|
209 |
+
arr2 = np.frombuffer(cudabuf2.copy_to_host(), dtype=np.uint8)
|
210 |
+
np.testing.assert_equal(arr[soffset:soffset+ssize], arr2)
|
211 |
+
|
212 |
+
# Creating device buffer from HostBuffer
|
213 |
+
|
214 |
+
buf = cuda.new_host_buffer(size)
|
215 |
+
arr_ = np.frombuffer(buf, dtype=np.uint8)
|
216 |
+
arr_[:] = arr
|
217 |
+
cudabuf = global_context.buffer_from_data(buf)
|
218 |
+
assert cudabuf.size == size
|
219 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
220 |
+
np.testing.assert_equal(arr, arr2)
|
221 |
+
|
222 |
+
# Creating device buffer from HostBuffer slice
|
223 |
+
|
224 |
+
cudabuf = global_context.buffer_from_data(buf, offset=soffset, size=ssize)
|
225 |
+
assert cudabuf.size == ssize
|
226 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
227 |
+
np.testing.assert_equal(arr[soffset:soffset+ssize], arr2)
|
228 |
+
|
229 |
+
cudabuf = global_context.buffer_from_data(
|
230 |
+
buf.slice(offset=soffset, length=ssize))
|
231 |
+
assert cudabuf.size == ssize
|
232 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
233 |
+
np.testing.assert_equal(arr[soffset:soffset+ssize], arr2)
|
234 |
+
|
235 |
+
|
236 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
237 |
+
def test_context_from_object(size):
|
238 |
+
ctx = global_context
|
239 |
+
arr, cbuf = make_random_buffer(size, target='device')
|
240 |
+
dtype = arr.dtype
|
241 |
+
|
242 |
+
# Creating device buffer from a CUDA host buffer
|
243 |
+
hbuf = cuda.new_host_buffer(size * arr.dtype.itemsize)
|
244 |
+
np.frombuffer(hbuf, dtype=dtype)[:] = arr
|
245 |
+
cbuf2 = ctx.buffer_from_object(hbuf)
|
246 |
+
assert cbuf2.size == cbuf.size
|
247 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
248 |
+
np.testing.assert_equal(arr, arr2)
|
249 |
+
|
250 |
+
# Creating device buffer from a device buffer
|
251 |
+
cbuf2 = ctx.buffer_from_object(cbuf2)
|
252 |
+
assert cbuf2.size == cbuf.size
|
253 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
254 |
+
np.testing.assert_equal(arr, arr2)
|
255 |
+
|
256 |
+
# Trying to create a device buffer from a Buffer
|
257 |
+
with pytest.raises(pa.ArrowTypeError,
|
258 |
+
match=('buffer is not backed by a CudaBuffer')):
|
259 |
+
ctx.buffer_from_object(pa.py_buffer(b"123"))
|
260 |
+
|
261 |
+
# Trying to create a device buffer from numpy.array
|
262 |
+
with pytest.raises(pa.ArrowTypeError,
|
263 |
+
match=("cannot create device buffer view from "
|
264 |
+
".* \'numpy.ndarray\'")):
|
265 |
+
ctx.buffer_from_object(np.array([1, 2, 3]))
|
266 |
+
|
267 |
+
|
268 |
+
def test_foreign_buffer():
|
269 |
+
ctx = global_context
|
270 |
+
dtype = np.dtype(np.uint8)
|
271 |
+
size = 10
|
272 |
+
hbuf = cuda.new_host_buffer(size * dtype.itemsize)
|
273 |
+
|
274 |
+
# test host buffer memory reference counting
|
275 |
+
rc = sys.getrefcount(hbuf)
|
276 |
+
fbuf = ctx.foreign_buffer(hbuf.address, hbuf.size, hbuf)
|
277 |
+
assert sys.getrefcount(hbuf) == rc + 1
|
278 |
+
del fbuf
|
279 |
+
assert sys.getrefcount(hbuf) == rc
|
280 |
+
|
281 |
+
# test postponed deallocation of host buffer memory
|
282 |
+
fbuf = ctx.foreign_buffer(hbuf.address, hbuf.size, hbuf)
|
283 |
+
del hbuf
|
284 |
+
fbuf.copy_to_host()
|
285 |
+
|
286 |
+
# test deallocating the host buffer memory making it inaccessible
|
287 |
+
hbuf = cuda.new_host_buffer(size * dtype.itemsize)
|
288 |
+
fbuf = ctx.foreign_buffer(hbuf.address, hbuf.size)
|
289 |
+
del hbuf
|
290 |
+
with pytest.raises(pa.ArrowIOError,
|
291 |
+
match=('Cuda error ')):
|
292 |
+
fbuf.copy_to_host()
|
293 |
+
|
294 |
+
|
295 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
296 |
+
def test_CudaBuffer(size):
|
297 |
+
arr, buf = make_random_buffer(size)
|
298 |
+
assert arr.tobytes() == buf.to_pybytes()
|
299 |
+
cbuf = global_context.buffer_from_data(buf)
|
300 |
+
assert cbuf.size == size
|
301 |
+
assert not cbuf.is_cpu
|
302 |
+
assert arr.tobytes() == cbuf.to_pybytes()
|
303 |
+
if size > 0:
|
304 |
+
assert cbuf.address > 0
|
305 |
+
|
306 |
+
for i in range(size):
|
307 |
+
assert cbuf[i] == arr[i]
|
308 |
+
|
309 |
+
for s in [
|
310 |
+
slice(None),
|
311 |
+
slice(size//4, size//2),
|
312 |
+
]:
|
313 |
+
assert cbuf[s].to_pybytes() == arr[s].tobytes()
|
314 |
+
|
315 |
+
sbuf = cbuf.slice(size//4, size//2)
|
316 |
+
assert sbuf.parent == cbuf
|
317 |
+
|
318 |
+
with pytest.raises(TypeError,
|
319 |
+
match="Do not call CudaBuffer's constructor directly"):
|
320 |
+
cuda.CudaBuffer()
|
321 |
+
|
322 |
+
|
323 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
324 |
+
def test_HostBuffer(size):
|
325 |
+
arr, buf = make_random_buffer(size)
|
326 |
+
assert arr.tobytes() == buf.to_pybytes()
|
327 |
+
hbuf = cuda.new_host_buffer(size)
|
328 |
+
np.frombuffer(hbuf, dtype=np.uint8)[:] = arr
|
329 |
+
assert hbuf.size == size
|
330 |
+
assert hbuf.is_cpu
|
331 |
+
assert arr.tobytes() == hbuf.to_pybytes()
|
332 |
+
for i in range(size):
|
333 |
+
assert hbuf[i] == arr[i]
|
334 |
+
for s in [
|
335 |
+
slice(None),
|
336 |
+
slice(size//4, size//2),
|
337 |
+
]:
|
338 |
+
assert hbuf[s].to_pybytes() == arr[s].tobytes()
|
339 |
+
|
340 |
+
sbuf = hbuf.slice(size//4, size//2)
|
341 |
+
assert sbuf.parent == hbuf
|
342 |
+
|
343 |
+
del hbuf
|
344 |
+
|
345 |
+
with pytest.raises(TypeError,
|
346 |
+
match="Do not call HostBuffer's constructor directly"):
|
347 |
+
cuda.HostBuffer()
|
348 |
+
|
349 |
+
|
350 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
351 |
+
def test_copy_from_to_host(size):
|
352 |
+
# Create a buffer in host containing range(size)
|
353 |
+
dt = np.dtype('uint16')
|
354 |
+
nbytes = size * dt.itemsize
|
355 |
+
buf = pa.allocate_buffer(nbytes, resizable=True) # in host
|
356 |
+
assert isinstance(buf, pa.Buffer)
|
357 |
+
assert not isinstance(buf, cuda.CudaBuffer)
|
358 |
+
arr = np.frombuffer(buf, dtype=dt)
|
359 |
+
assert arr.size == size
|
360 |
+
arr[:] = range(size)
|
361 |
+
arr_ = np.frombuffer(buf, dtype=dt)
|
362 |
+
np.testing.assert_equal(arr, arr_)
|
363 |
+
|
364 |
+
# Create a device buffer of the same size and copy from host
|
365 |
+
device_buffer = global_context.new_buffer(nbytes)
|
366 |
+
assert isinstance(device_buffer, cuda.CudaBuffer)
|
367 |
+
assert isinstance(device_buffer, pa.Buffer)
|
368 |
+
assert device_buffer.size == nbytes
|
369 |
+
assert not device_buffer.is_cpu
|
370 |
+
device_buffer.copy_from_host(buf, position=0, nbytes=nbytes)
|
371 |
+
|
372 |
+
# Copy back to host and compare contents
|
373 |
+
buf2 = device_buffer.copy_to_host(position=0, nbytes=nbytes)
|
374 |
+
arr2 = np.frombuffer(buf2, dtype=dt)
|
375 |
+
np.testing.assert_equal(arr, arr2)
|
376 |
+
|
377 |
+
|
378 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
379 |
+
def test_copy_to_host(size):
|
380 |
+
arr, dbuf = make_random_buffer(size, target='device')
|
381 |
+
|
382 |
+
buf = dbuf.copy_to_host()
|
383 |
+
assert buf.is_cpu
|
384 |
+
np.testing.assert_equal(arr, np.frombuffer(buf, dtype=np.uint8))
|
385 |
+
|
386 |
+
buf = dbuf.copy_to_host(position=size//4)
|
387 |
+
assert buf.is_cpu
|
388 |
+
np.testing.assert_equal(arr[size//4:], np.frombuffer(buf, dtype=np.uint8))
|
389 |
+
|
390 |
+
buf = dbuf.copy_to_host(position=size//4, nbytes=size//8)
|
391 |
+
assert buf.is_cpu
|
392 |
+
np.testing.assert_equal(arr[size//4:size//4+size//8],
|
393 |
+
np.frombuffer(buf, dtype=np.uint8))
|
394 |
+
|
395 |
+
buf = dbuf.copy_to_host(position=size//4, nbytes=0)
|
396 |
+
assert buf.is_cpu
|
397 |
+
assert buf.size == 0
|
398 |
+
|
399 |
+
for (position, nbytes) in [
|
400 |
+
(size+2, -1), (-2, -1), (size+1, 0), (-3, 0),
|
401 |
+
]:
|
402 |
+
with pytest.raises(ValueError,
|
403 |
+
match='position argument is out-of-range'):
|
404 |
+
dbuf.copy_to_host(position=position, nbytes=nbytes)
|
405 |
+
|
406 |
+
for (position, nbytes) in [
|
407 |
+
(0, size+1), (size//2, (size+1)//2+1), (size, 1)
|
408 |
+
]:
|
409 |
+
with pytest.raises(ValueError,
|
410 |
+
match=('requested more to copy than'
|
411 |
+
' available from device buffer')):
|
412 |
+
dbuf.copy_to_host(position=position, nbytes=nbytes)
|
413 |
+
|
414 |
+
buf = pa.allocate_buffer(size//4)
|
415 |
+
dbuf.copy_to_host(buf=buf)
|
416 |
+
np.testing.assert_equal(arr[:size//4], np.frombuffer(buf, dtype=np.uint8))
|
417 |
+
|
418 |
+
if size < 12:
|
419 |
+
return
|
420 |
+
|
421 |
+
dbuf.copy_to_host(buf=buf, position=12)
|
422 |
+
np.testing.assert_equal(arr[12:12+size//4],
|
423 |
+
np.frombuffer(buf, dtype=np.uint8))
|
424 |
+
|
425 |
+
dbuf.copy_to_host(buf=buf, nbytes=12)
|
426 |
+
np.testing.assert_equal(arr[:12], np.frombuffer(buf, dtype=np.uint8)[:12])
|
427 |
+
|
428 |
+
dbuf.copy_to_host(buf=buf, nbytes=12, position=6)
|
429 |
+
np.testing.assert_equal(arr[6:6+12],
|
430 |
+
np.frombuffer(buf, dtype=np.uint8)[:12])
|
431 |
+
|
432 |
+
for (position, nbytes) in [
|
433 |
+
(0, size+10), (10, size-5),
|
434 |
+
(0, size//2), (size//4, size//4+1)
|
435 |
+
]:
|
436 |
+
with pytest.raises(ValueError,
|
437 |
+
match=('requested copy does not '
|
438 |
+
'fit into host buffer')):
|
439 |
+
dbuf.copy_to_host(buf=buf, position=position, nbytes=nbytes)
|
440 |
+
|
441 |
+
|
442 |
+
@pytest.mark.parametrize("dest_ctx", ['same', 'another'])
|
443 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
444 |
+
def test_copy_from_device(dest_ctx, size):
|
445 |
+
arr, buf = make_random_buffer(size=size, target='device')
|
446 |
+
lst = arr.tolist()
|
447 |
+
if dest_ctx == 'another':
|
448 |
+
dest_ctx = global_context1
|
449 |
+
if buf.context.device_number == dest_ctx.device_number:
|
450 |
+
pytest.skip("not a multi-GPU system")
|
451 |
+
else:
|
452 |
+
dest_ctx = buf.context
|
453 |
+
dbuf = dest_ctx.new_buffer(size)
|
454 |
+
|
455 |
+
def put(*args, **kwargs):
|
456 |
+
dbuf.copy_from_device(buf, *args, **kwargs)
|
457 |
+
rbuf = dbuf.copy_to_host()
|
458 |
+
return np.frombuffer(rbuf, dtype=np.uint8).tolist()
|
459 |
+
assert put() == lst
|
460 |
+
if size > 4:
|
461 |
+
assert put(position=size//4) == lst[:size//4]+lst[:-size//4]
|
462 |
+
assert put() == lst
|
463 |
+
assert put(position=1, nbytes=size//2) == \
|
464 |
+
lst[:1] + lst[:size//2] + lst[-(size-size//2-1):]
|
465 |
+
|
466 |
+
for (position, nbytes) in [
|
467 |
+
(size+2, -1), (-2, -1), (size+1, 0), (-3, 0),
|
468 |
+
]:
|
469 |
+
with pytest.raises(ValueError,
|
470 |
+
match='position argument is out-of-range'):
|
471 |
+
put(position=position, nbytes=nbytes)
|
472 |
+
|
473 |
+
for (position, nbytes) in [
|
474 |
+
(0, size+1),
|
475 |
+
]:
|
476 |
+
with pytest.raises(ValueError,
|
477 |
+
match=('requested more to copy than'
|
478 |
+
' available from device buffer')):
|
479 |
+
put(position=position, nbytes=nbytes)
|
480 |
+
|
481 |
+
if size < 4:
|
482 |
+
return
|
483 |
+
|
484 |
+
for (position, nbytes) in [
|
485 |
+
(size//2, (size+1)//2+1)
|
486 |
+
]:
|
487 |
+
with pytest.raises(ValueError,
|
488 |
+
match=('requested more to copy than'
|
489 |
+
' available in device buffer')):
|
490 |
+
put(position=position, nbytes=nbytes)
|
491 |
+
|
492 |
+
|
493 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
494 |
+
def test_copy_from_host(size):
|
495 |
+
arr, buf = make_random_buffer(size=size, target='host')
|
496 |
+
lst = arr.tolist()
|
497 |
+
dbuf = global_context.new_buffer(size)
|
498 |
+
|
499 |
+
def put(*args, **kwargs):
|
500 |
+
dbuf.copy_from_host(buf, *args, **kwargs)
|
501 |
+
rbuf = dbuf.copy_to_host()
|
502 |
+
return np.frombuffer(rbuf, dtype=np.uint8).tolist()
|
503 |
+
assert put() == lst
|
504 |
+
if size > 4:
|
505 |
+
assert put(position=size//4) == lst[:size//4]+lst[:-size//4]
|
506 |
+
assert put() == lst
|
507 |
+
assert put(position=1, nbytes=size//2) == \
|
508 |
+
lst[:1] + lst[:size//2] + lst[-(size-size//2-1):]
|
509 |
+
|
510 |
+
for (position, nbytes) in [
|
511 |
+
(size+2, -1), (-2, -1), (size+1, 0), (-3, 0),
|
512 |
+
]:
|
513 |
+
with pytest.raises(ValueError,
|
514 |
+
match='position argument is out-of-range'):
|
515 |
+
put(position=position, nbytes=nbytes)
|
516 |
+
|
517 |
+
for (position, nbytes) in [
|
518 |
+
(0, size+1),
|
519 |
+
]:
|
520 |
+
with pytest.raises(ValueError,
|
521 |
+
match=('requested more to copy than'
|
522 |
+
' available from host buffer')):
|
523 |
+
put(position=position, nbytes=nbytes)
|
524 |
+
|
525 |
+
if size < 4:
|
526 |
+
return
|
527 |
+
|
528 |
+
for (position, nbytes) in [
|
529 |
+
(size//2, (size+1)//2+1)
|
530 |
+
]:
|
531 |
+
with pytest.raises(ValueError,
|
532 |
+
match=('requested more to copy than'
|
533 |
+
' available in device buffer')):
|
534 |
+
put(position=position, nbytes=nbytes)
|
535 |
+
|
536 |
+
|
537 |
+
def test_BufferWriter():
|
538 |
+
def allocate(size):
|
539 |
+
cbuf = global_context.new_buffer(size)
|
540 |
+
writer = cuda.BufferWriter(cbuf)
|
541 |
+
return cbuf, writer
|
542 |
+
|
543 |
+
def test_writes(total_size, chunksize, buffer_size=0):
|
544 |
+
cbuf, writer = allocate(total_size)
|
545 |
+
arr, buf = make_random_buffer(size=total_size, target='host')
|
546 |
+
|
547 |
+
if buffer_size > 0:
|
548 |
+
writer.buffer_size = buffer_size
|
549 |
+
|
550 |
+
position = writer.tell()
|
551 |
+
assert position == 0
|
552 |
+
writer.write(buf.slice(length=chunksize))
|
553 |
+
assert writer.tell() == chunksize
|
554 |
+
writer.seek(0)
|
555 |
+
position = writer.tell()
|
556 |
+
assert position == 0
|
557 |
+
|
558 |
+
while position < total_size:
|
559 |
+
bytes_to_write = min(chunksize, total_size - position)
|
560 |
+
writer.write(buf.slice(offset=position, length=bytes_to_write))
|
561 |
+
position += bytes_to_write
|
562 |
+
|
563 |
+
writer.flush()
|
564 |
+
assert cbuf.size == total_size
|
565 |
+
cbuf.context.synchronize()
|
566 |
+
buf2 = cbuf.copy_to_host()
|
567 |
+
cbuf.context.synchronize()
|
568 |
+
assert buf2.size == total_size
|
569 |
+
arr2 = np.frombuffer(buf2, dtype=np.uint8)
|
570 |
+
np.testing.assert_equal(arr, arr2)
|
571 |
+
|
572 |
+
total_size, chunk_size = 1 << 16, 1000
|
573 |
+
test_writes(total_size, chunk_size)
|
574 |
+
test_writes(total_size, chunk_size, total_size // 16)
|
575 |
+
|
576 |
+
cbuf, writer = allocate(100)
|
577 |
+
writer.write(np.arange(100, dtype=np.uint8))
|
578 |
+
writer.writeat(50, np.arange(25, dtype=np.uint8))
|
579 |
+
writer.write(np.arange(25, dtype=np.uint8))
|
580 |
+
writer.flush()
|
581 |
+
|
582 |
+
arr = np.frombuffer(cbuf.copy_to_host(), np.uint8)
|
583 |
+
np.testing.assert_equal(arr[:50], np.arange(50, dtype=np.uint8))
|
584 |
+
np.testing.assert_equal(arr[50:75], np.arange(25, dtype=np.uint8))
|
585 |
+
np.testing.assert_equal(arr[75:], np.arange(25, dtype=np.uint8))
|
586 |
+
|
587 |
+
|
588 |
+
def test_BufferWriter_edge_cases():
|
589 |
+
# edge cases, see cuda-test.cc for more information:
|
590 |
+
size = 1000
|
591 |
+
cbuf = global_context.new_buffer(size)
|
592 |
+
writer = cuda.BufferWriter(cbuf)
|
593 |
+
arr, buf = make_random_buffer(size=size, target='host')
|
594 |
+
|
595 |
+
assert writer.buffer_size == 0
|
596 |
+
writer.buffer_size = 100
|
597 |
+
assert writer.buffer_size == 100
|
598 |
+
|
599 |
+
writer.write(buf.slice(length=0))
|
600 |
+
assert writer.tell() == 0
|
601 |
+
|
602 |
+
writer.write(buf.slice(length=10))
|
603 |
+
writer.buffer_size = 200
|
604 |
+
assert writer.buffer_size == 200
|
605 |
+
assert writer.num_bytes_buffered == 0
|
606 |
+
|
607 |
+
writer.write(buf.slice(offset=10, length=300))
|
608 |
+
assert writer.num_bytes_buffered == 0
|
609 |
+
|
610 |
+
writer.write(buf.slice(offset=310, length=200))
|
611 |
+
assert writer.num_bytes_buffered == 0
|
612 |
+
|
613 |
+
writer.write(buf.slice(offset=510, length=390))
|
614 |
+
writer.write(buf.slice(offset=900, length=100))
|
615 |
+
|
616 |
+
writer.flush()
|
617 |
+
|
618 |
+
buf2 = cbuf.copy_to_host()
|
619 |
+
assert buf2.size == size
|
620 |
+
arr2 = np.frombuffer(buf2, dtype=np.uint8)
|
621 |
+
np.testing.assert_equal(arr, arr2)
|
622 |
+
|
623 |
+
|
624 |
+
def test_BufferReader():
|
625 |
+
size = 1000
|
626 |
+
arr, cbuf = make_random_buffer(size=size, target='device')
|
627 |
+
|
628 |
+
reader = cuda.BufferReader(cbuf)
|
629 |
+
reader.seek(950)
|
630 |
+
assert reader.tell() == 950
|
631 |
+
|
632 |
+
data = reader.read(100)
|
633 |
+
assert len(data) == 50
|
634 |
+
assert reader.tell() == 1000
|
635 |
+
|
636 |
+
reader.seek(925)
|
637 |
+
arr2 = np.zeros(100, dtype=np.uint8)
|
638 |
+
n = reader.readinto(arr2)
|
639 |
+
assert n == 75
|
640 |
+
assert reader.tell() == 1000
|
641 |
+
np.testing.assert_equal(arr[925:], arr2[:75])
|
642 |
+
|
643 |
+
reader.seek(0)
|
644 |
+
assert reader.tell() == 0
|
645 |
+
buf2 = reader.read_buffer()
|
646 |
+
arr2 = np.frombuffer(buf2.copy_to_host(), dtype=np.uint8)
|
647 |
+
np.testing.assert_equal(arr, arr2)
|
648 |
+
|
649 |
+
|
650 |
+
def test_BufferReader_zero_size():
|
651 |
+
arr, cbuf = make_random_buffer(size=0, target='device')
|
652 |
+
reader = cuda.BufferReader(cbuf)
|
653 |
+
reader.seek(0)
|
654 |
+
data = reader.read()
|
655 |
+
assert len(data) == 0
|
656 |
+
assert reader.tell() == 0
|
657 |
+
buf2 = reader.read_buffer()
|
658 |
+
arr2 = np.frombuffer(buf2.copy_to_host(), dtype=np.uint8)
|
659 |
+
np.testing.assert_equal(arr, arr2)
|
660 |
+
|
661 |
+
|
662 |
+
def make_recordbatch(length):
|
663 |
+
schema = pa.schema([pa.field('f0', pa.int16()),
|
664 |
+
pa.field('f1', pa.int16())])
|
665 |
+
a0 = pa.array(np.random.randint(0, 255, size=length, dtype=np.int16))
|
666 |
+
a1 = pa.array(np.random.randint(0, 255, size=length, dtype=np.int16))
|
667 |
+
batch = pa.record_batch([a0, a1], schema=schema)
|
668 |
+
return batch
|
669 |
+
|
670 |
+
|
671 |
+
def test_batch_serialize():
|
672 |
+
batch = make_recordbatch(10)
|
673 |
+
hbuf = batch.serialize()
|
674 |
+
cbuf = cuda.serialize_record_batch(batch, global_context)
|
675 |
+
|
676 |
+
# Test that read_record_batch works properly
|
677 |
+
cbatch = cuda.read_record_batch(cbuf, batch.schema)
|
678 |
+
assert isinstance(cbatch, pa.RecordBatch)
|
679 |
+
assert batch.schema == cbatch.schema
|
680 |
+
assert batch.num_columns == cbatch.num_columns
|
681 |
+
assert batch.num_rows == cbatch.num_rows
|
682 |
+
|
683 |
+
# Deserialize CUDA-serialized batch on host
|
684 |
+
buf = cbuf.copy_to_host()
|
685 |
+
assert hbuf.equals(buf)
|
686 |
+
batch2 = pa.ipc.read_record_batch(buf, batch.schema)
|
687 |
+
assert hbuf.equals(batch2.serialize())
|
688 |
+
|
689 |
+
assert batch.num_columns == batch2.num_columns
|
690 |
+
assert batch.num_rows == batch2.num_rows
|
691 |
+
assert batch.column(0).equals(batch2.column(0))
|
692 |
+
assert batch.equals(batch2)
|
693 |
+
|
694 |
+
|
695 |
+
def make_table():
|
696 |
+
a0 = pa.array([0, 1, 42, None], type=pa.int16())
|
697 |
+
a1 = pa.array([[0, 1], [2], [], None], type=pa.list_(pa.int32()))
|
698 |
+
a2 = pa.array([("ab", True), ("cde", False), (None, None), None],
|
699 |
+
type=pa.struct([("strs", pa.utf8()),
|
700 |
+
("bools", pa.bool_())]))
|
701 |
+
# Dictionaries are validated on the IPC read path, but that can produce
|
702 |
+
# issues for GPU-located dictionaries. Check that they work fine.
|
703 |
+
a3 = pa.DictionaryArray.from_arrays(
|
704 |
+
indices=[0, 1, 1, None],
|
705 |
+
dictionary=pa.array(['foo', 'bar']))
|
706 |
+
a4 = pa.DictionaryArray.from_arrays(
|
707 |
+
indices=[2, 1, 2, None],
|
708 |
+
dictionary=a1)
|
709 |
+
a5 = pa.DictionaryArray.from_arrays(
|
710 |
+
indices=[2, 1, 0, None],
|
711 |
+
dictionary=a2)
|
712 |
+
|
713 |
+
arrays = [a0, a1, a2, a3, a4, a5]
|
714 |
+
schema = pa.schema([('f{}'.format(i), arr.type)
|
715 |
+
for i, arr in enumerate(arrays)])
|
716 |
+
batch = pa.record_batch(arrays, schema=schema)
|
717 |
+
table = pa.Table.from_batches([batch])
|
718 |
+
return table
|
719 |
+
|
720 |
+
|
721 |
+
def make_table_cuda():
|
722 |
+
htable = make_table()
|
723 |
+
# Serialize the host table to bytes
|
724 |
+
sink = pa.BufferOutputStream()
|
725 |
+
with pa.ipc.new_stream(sink, htable.schema) as out:
|
726 |
+
out.write_table(htable)
|
727 |
+
hbuf = pa.py_buffer(sink.getvalue().to_pybytes())
|
728 |
+
|
729 |
+
# Copy the host bytes to a device buffer
|
730 |
+
dbuf = global_context.new_buffer(len(hbuf))
|
731 |
+
dbuf.copy_from_host(hbuf, nbytes=len(hbuf))
|
732 |
+
# Deserialize the device buffer into a Table
|
733 |
+
dtable = pa.ipc.open_stream(cuda.BufferReader(dbuf)).read_all()
|
734 |
+
return hbuf, htable, dbuf, dtable
|
735 |
+
|
736 |
+
|
737 |
+
def test_table_deserialize():
|
738 |
+
# ARROW-9659: make sure that we can deserialize a GPU-located table
|
739 |
+
# without crashing when initializing or validating the underlying arrays.
|
740 |
+
hbuf, htable, dbuf, dtable = make_table_cuda()
|
741 |
+
# Assert basic fields the same between host and device tables
|
742 |
+
assert htable.schema == dtable.schema
|
743 |
+
assert htable.num_rows == dtable.num_rows
|
744 |
+
assert htable.num_columns == dtable.num_columns
|
745 |
+
# Assert byte-level equality
|
746 |
+
assert hbuf.equals(dbuf.copy_to_host())
|
747 |
+
# Copy DtoH and assert the tables are still equivalent
|
748 |
+
assert htable.equals(pa.ipc.open_stream(
|
749 |
+
dbuf.copy_to_host()
|
750 |
+
).read_all())
|
751 |
+
|
752 |
+
|
753 |
+
def test_create_table_with_device_buffers():
|
754 |
+
# ARROW-11872: make sure that we can create an Arrow Table from
|
755 |
+
# GPU-located Arrays without crashing.
|
756 |
+
hbuf, htable, dbuf, dtable = make_table_cuda()
|
757 |
+
# Construct a new Table from the device Table
|
758 |
+
dtable2 = pa.Table.from_arrays(dtable.columns, dtable.column_names)
|
759 |
+
# Assert basic fields the same between host and device tables
|
760 |
+
assert htable.schema == dtable2.schema
|
761 |
+
assert htable.num_rows == dtable2.num_rows
|
762 |
+
assert htable.num_columns == dtable2.num_columns
|
763 |
+
# Assert byte-level equality
|
764 |
+
assert hbuf.equals(dbuf.copy_to_host())
|
765 |
+
# Copy DtoH and assert the tables are still equivalent
|
766 |
+
assert htable.equals(pa.ipc.open_stream(
|
767 |
+
dbuf.copy_to_host()
|
768 |
+
).read_all())
|
769 |
+
|
770 |
+
|
771 |
+
def other_process_for_test_IPC(handle_buffer, expected_arr):
|
772 |
+
other_context = pa.cuda.Context(0)
|
773 |
+
ipc_handle = pa.cuda.IpcMemHandle.from_buffer(handle_buffer)
|
774 |
+
ipc_buf = other_context.open_ipc_buffer(ipc_handle)
|
775 |
+
ipc_buf.context.synchronize()
|
776 |
+
buf = ipc_buf.copy_to_host()
|
777 |
+
assert buf.size == expected_arr.size, repr((buf.size, expected_arr.size))
|
778 |
+
arr = np.frombuffer(buf, dtype=expected_arr.dtype)
|
779 |
+
np.testing.assert_equal(arr, expected_arr)
|
780 |
+
|
781 |
+
|
782 |
+
@cuda_ipc
|
783 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
784 |
+
def test_IPC(size):
|
785 |
+
import multiprocessing
|
786 |
+
ctx = multiprocessing.get_context('spawn')
|
787 |
+
arr, cbuf = make_random_buffer(size=size, target='device')
|
788 |
+
ipc_handle = cbuf.export_for_ipc()
|
789 |
+
handle_buffer = ipc_handle.serialize()
|
790 |
+
p = ctx.Process(target=other_process_for_test_IPC,
|
791 |
+
args=(handle_buffer, arr))
|
792 |
+
p.start()
|
793 |
+
p.join()
|
794 |
+
assert p.exitcode == 0
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_cuda_numba_interop.py
ADDED
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import pytest
|
19 |
+
import pyarrow as pa
|
20 |
+
import numpy as np
|
21 |
+
|
22 |
+
dtypes = ['uint8', 'int16', 'float32']
|
23 |
+
cuda = pytest.importorskip("pyarrow.cuda")
|
24 |
+
nb_cuda = pytest.importorskip("numba.cuda")
|
25 |
+
|
26 |
+
from numba.cuda.cudadrv.devicearray import DeviceNDArray # noqa: E402
|
27 |
+
|
28 |
+
|
29 |
+
context_choices = None
|
30 |
+
context_choice_ids = ['pyarrow.cuda', 'numba.cuda']
|
31 |
+
|
32 |
+
|
33 |
+
def setup_module(module):
|
34 |
+
np.random.seed(1234)
|
35 |
+
ctx1 = cuda.Context()
|
36 |
+
nb_ctx1 = ctx1.to_numba()
|
37 |
+
nb_ctx2 = nb_cuda.current_context()
|
38 |
+
ctx2 = cuda.Context.from_numba(nb_ctx2)
|
39 |
+
module.context_choices = [(ctx1, nb_ctx1), (ctx2, nb_ctx2)]
|
40 |
+
|
41 |
+
|
42 |
+
def teardown_module(module):
|
43 |
+
del module.context_choices
|
44 |
+
|
45 |
+
|
46 |
+
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
|
47 |
+
ids=context_choice_ids)
|
48 |
+
def test_context(c):
|
49 |
+
ctx, nb_ctx = context_choices[c]
|
50 |
+
assert ctx.handle == nb_ctx.handle.value
|
51 |
+
assert ctx.handle == ctx.to_numba().handle.value
|
52 |
+
ctx2 = cuda.Context.from_numba(nb_ctx)
|
53 |
+
assert ctx.handle == ctx2.handle
|
54 |
+
size = 10
|
55 |
+
buf = ctx.new_buffer(size)
|
56 |
+
assert ctx.handle == buf.context.handle
|
57 |
+
|
58 |
+
|
59 |
+
def make_random_buffer(size, target='host', dtype='uint8', ctx=None):
|
60 |
+
"""Return a host or device buffer with random data.
|
61 |
+
"""
|
62 |
+
dtype = np.dtype(dtype)
|
63 |
+
if target == 'host':
|
64 |
+
assert size >= 0
|
65 |
+
buf = pa.allocate_buffer(size*dtype.itemsize)
|
66 |
+
arr = np.frombuffer(buf, dtype=dtype)
|
67 |
+
arr[:] = np.random.randint(low=0, high=255, size=size,
|
68 |
+
dtype=np.uint8)
|
69 |
+
return arr, buf
|
70 |
+
elif target == 'device':
|
71 |
+
arr, buf = make_random_buffer(size, target='host', dtype=dtype)
|
72 |
+
dbuf = ctx.new_buffer(size * dtype.itemsize)
|
73 |
+
dbuf.copy_from_host(buf, position=0, nbytes=buf.size)
|
74 |
+
return arr, dbuf
|
75 |
+
raise ValueError('invalid target value')
|
76 |
+
|
77 |
+
|
78 |
+
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
|
79 |
+
ids=context_choice_ids)
|
80 |
+
@pytest.mark.parametrize("dtype", dtypes, ids=dtypes)
|
81 |
+
@pytest.mark.parametrize("size", [0, 1, 8, 1000])
|
82 |
+
def test_from_object(c, dtype, size):
|
83 |
+
ctx, nb_ctx = context_choices[c]
|
84 |
+
arr, cbuf = make_random_buffer(size, target='device', dtype=dtype, ctx=ctx)
|
85 |
+
|
86 |
+
# Creating device buffer from numba DeviceNDArray:
|
87 |
+
darr = nb_cuda.to_device(arr)
|
88 |
+
cbuf2 = ctx.buffer_from_object(darr)
|
89 |
+
assert cbuf2.size == cbuf.size
|
90 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
91 |
+
np.testing.assert_equal(arr, arr2)
|
92 |
+
|
93 |
+
# Creating device buffer from a slice of numba DeviceNDArray:
|
94 |
+
if size >= 8:
|
95 |
+
# 1-D arrays
|
96 |
+
for s in [slice(size//4, None, None),
|
97 |
+
slice(size//4, -(size//4), None)]:
|
98 |
+
cbuf2 = ctx.buffer_from_object(darr[s])
|
99 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
100 |
+
np.testing.assert_equal(arr[s], arr2)
|
101 |
+
|
102 |
+
# cannot test negative strides due to numba bug, see its issue 3705
|
103 |
+
if 0:
|
104 |
+
rdarr = darr[::-1]
|
105 |
+
cbuf2 = ctx.buffer_from_object(rdarr)
|
106 |
+
assert cbuf2.size == cbuf.size
|
107 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
108 |
+
np.testing.assert_equal(arr, arr2)
|
109 |
+
|
110 |
+
with pytest.raises(ValueError,
|
111 |
+
match=('array data is non-contiguous')):
|
112 |
+
ctx.buffer_from_object(darr[::2])
|
113 |
+
|
114 |
+
# a rectangular 2-D array
|
115 |
+
s1 = size//4
|
116 |
+
s2 = size//s1
|
117 |
+
assert s1 * s2 == size
|
118 |
+
cbuf2 = ctx.buffer_from_object(darr.reshape(s1, s2))
|
119 |
+
assert cbuf2.size == cbuf.size
|
120 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
121 |
+
np.testing.assert_equal(arr, arr2)
|
122 |
+
|
123 |
+
with pytest.raises(ValueError,
|
124 |
+
match=('array data is non-contiguous')):
|
125 |
+
ctx.buffer_from_object(darr.reshape(s1, s2)[:, ::2])
|
126 |
+
|
127 |
+
# a 3-D array
|
128 |
+
s1 = 4
|
129 |
+
s2 = size//8
|
130 |
+
s3 = size//(s1*s2)
|
131 |
+
assert s1 * s2 * s3 == size
|
132 |
+
cbuf2 = ctx.buffer_from_object(darr.reshape(s1, s2, s3))
|
133 |
+
assert cbuf2.size == cbuf.size
|
134 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
135 |
+
np.testing.assert_equal(arr, arr2)
|
136 |
+
|
137 |
+
with pytest.raises(ValueError,
|
138 |
+
match=('array data is non-contiguous')):
|
139 |
+
ctx.buffer_from_object(darr.reshape(s1, s2, s3)[::2])
|
140 |
+
|
141 |
+
# Creating device buffer from am object implementing cuda array
|
142 |
+
# interface:
|
143 |
+
class MyObj:
|
144 |
+
def __init__(self, darr):
|
145 |
+
self.darr = darr
|
146 |
+
|
147 |
+
@property
|
148 |
+
def __cuda_array_interface__(self):
|
149 |
+
return self.darr.__cuda_array_interface__
|
150 |
+
|
151 |
+
cbuf2 = ctx.buffer_from_object(MyObj(darr))
|
152 |
+
assert cbuf2.size == cbuf.size
|
153 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
154 |
+
np.testing.assert_equal(arr, arr2)
|
155 |
+
|
156 |
+
|
157 |
+
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
|
158 |
+
ids=context_choice_ids)
|
159 |
+
@pytest.mark.parametrize("dtype", dtypes, ids=dtypes)
|
160 |
+
def test_numba_memalloc(c, dtype):
|
161 |
+
ctx, nb_ctx = context_choices[c]
|
162 |
+
dtype = np.dtype(dtype)
|
163 |
+
# Allocate memory using numba context
|
164 |
+
# Warning: this will not be reflected in pyarrow context manager
|
165 |
+
# (e.g bytes_allocated does not change)
|
166 |
+
size = 10
|
167 |
+
mem = nb_ctx.memalloc(size * dtype.itemsize)
|
168 |
+
darr = DeviceNDArray((size,), (dtype.itemsize,), dtype, gpu_data=mem)
|
169 |
+
darr[:5] = 99
|
170 |
+
darr[5:] = 88
|
171 |
+
np.testing.assert_equal(darr.copy_to_host()[:5], 99)
|
172 |
+
np.testing.assert_equal(darr.copy_to_host()[5:], 88)
|
173 |
+
|
174 |
+
# wrap numba allocated memory with CudaBuffer
|
175 |
+
cbuf = cuda.CudaBuffer.from_numba(mem)
|
176 |
+
arr2 = np.frombuffer(cbuf.copy_to_host(), dtype=dtype)
|
177 |
+
np.testing.assert_equal(arr2, darr.copy_to_host())
|
178 |
+
|
179 |
+
|
180 |
+
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
|
181 |
+
ids=context_choice_ids)
|
182 |
+
@pytest.mark.parametrize("dtype", dtypes, ids=dtypes)
|
183 |
+
def test_pyarrow_memalloc(c, dtype):
|
184 |
+
ctx, nb_ctx = context_choices[c]
|
185 |
+
size = 10
|
186 |
+
arr, cbuf = make_random_buffer(size, target='device', dtype=dtype, ctx=ctx)
|
187 |
+
|
188 |
+
# wrap CudaBuffer with numba device array
|
189 |
+
mem = cbuf.to_numba()
|
190 |
+
darr = DeviceNDArray(arr.shape, arr.strides, arr.dtype, gpu_data=mem)
|
191 |
+
np.testing.assert_equal(darr.copy_to_host(), arr)
|
192 |
+
|
193 |
+
|
194 |
+
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
|
195 |
+
ids=context_choice_ids)
|
196 |
+
@pytest.mark.parametrize("dtype", dtypes, ids=dtypes)
|
197 |
+
def test_numba_context(c, dtype):
|
198 |
+
ctx, nb_ctx = context_choices[c]
|
199 |
+
size = 10
|
200 |
+
with nb_cuda.gpus[0]:
|
201 |
+
arr, cbuf = make_random_buffer(size, target='device',
|
202 |
+
dtype=dtype, ctx=ctx)
|
203 |
+
assert cbuf.context.handle == nb_ctx.handle.value
|
204 |
+
mem = cbuf.to_numba()
|
205 |
+
darr = DeviceNDArray(arr.shape, arr.strides, arr.dtype, gpu_data=mem)
|
206 |
+
np.testing.assert_equal(darr.copy_to_host(), arr)
|
207 |
+
darr[0] = 99
|
208 |
+
cbuf.context.synchronize()
|
209 |
+
arr2 = np.frombuffer(cbuf.copy_to_host(), dtype=dtype)
|
210 |
+
assert arr2[0] == 99
|
211 |
+
|
212 |
+
|
213 |
+
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
|
214 |
+
ids=context_choice_ids)
|
215 |
+
@pytest.mark.parametrize("dtype", dtypes, ids=dtypes)
|
216 |
+
def test_pyarrow_jit(c, dtype):
|
217 |
+
ctx, nb_ctx = context_choices[c]
|
218 |
+
|
219 |
+
@nb_cuda.jit
|
220 |
+
def increment_by_one(an_array):
|
221 |
+
pos = nb_cuda.grid(1)
|
222 |
+
if pos < an_array.size:
|
223 |
+
an_array[pos] += 1
|
224 |
+
|
225 |
+
# applying numba.cuda kernel to memory hold by CudaBuffer
|
226 |
+
size = 10
|
227 |
+
arr, cbuf = make_random_buffer(size, target='device', dtype=dtype, ctx=ctx)
|
228 |
+
threadsperblock = 32
|
229 |
+
blockspergrid = (arr.size + (threadsperblock - 1)) // threadsperblock
|
230 |
+
mem = cbuf.to_numba()
|
231 |
+
darr = DeviceNDArray(arr.shape, arr.strides, arr.dtype, gpu_data=mem)
|
232 |
+
increment_by_one[blockspergrid, threadsperblock](darr)
|
233 |
+
cbuf.context.synchronize()
|
234 |
+
arr1 = np.frombuffer(cbuf.copy_to_host(), dtype=arr.dtype)
|
235 |
+
np.testing.assert_equal(arr1, arr + 1)
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_cython.py
ADDED
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import os
|
19 |
+
import shutil
|
20 |
+
import subprocess
|
21 |
+
import sys
|
22 |
+
|
23 |
+
import pytest
|
24 |
+
|
25 |
+
import pyarrow as pa
|
26 |
+
import pyarrow.tests.util as test_util
|
27 |
+
|
28 |
+
here = os.path.dirname(os.path.abspath(__file__))
|
29 |
+
test_ld_path = os.environ.get('PYARROW_TEST_LD_PATH', '')
|
30 |
+
if os.name == 'posix':
|
31 |
+
compiler_opts = ['-std=c++17']
|
32 |
+
elif os.name == 'nt':
|
33 |
+
compiler_opts = ['-D_ENABLE_EXTENDED_ALIGNED_STORAGE', '/std:c++17']
|
34 |
+
else:
|
35 |
+
compiler_opts = []
|
36 |
+
|
37 |
+
setup_template = """if 1:
|
38 |
+
from setuptools import setup
|
39 |
+
from Cython.Build import cythonize
|
40 |
+
|
41 |
+
import numpy as np
|
42 |
+
|
43 |
+
import pyarrow as pa
|
44 |
+
|
45 |
+
ext_modules = cythonize({pyx_file!r})
|
46 |
+
compiler_opts = {compiler_opts!r}
|
47 |
+
custom_ld_path = {test_ld_path!r}
|
48 |
+
|
49 |
+
for ext in ext_modules:
|
50 |
+
# XXX required for numpy/numpyconfig.h,
|
51 |
+
# included from arrow/python/api.h
|
52 |
+
ext.include_dirs.append(np.get_include())
|
53 |
+
ext.include_dirs.append(pa.get_include())
|
54 |
+
ext.libraries.extend(pa.get_libraries())
|
55 |
+
ext.library_dirs.extend(pa.get_library_dirs())
|
56 |
+
if custom_ld_path:
|
57 |
+
ext.library_dirs.append(custom_ld_path)
|
58 |
+
ext.extra_compile_args.extend(compiler_opts)
|
59 |
+
print("Extension module:",
|
60 |
+
ext, ext.include_dirs, ext.libraries, ext.library_dirs)
|
61 |
+
|
62 |
+
setup(
|
63 |
+
ext_modules=ext_modules,
|
64 |
+
)
|
65 |
+
"""
|
66 |
+
|
67 |
+
|
68 |
+
def check_cython_example_module(mod):
|
69 |
+
arr = pa.array([1, 2, 3])
|
70 |
+
assert mod.get_array_length(arr) == 3
|
71 |
+
with pytest.raises(TypeError, match="not an array"):
|
72 |
+
mod.get_array_length(None)
|
73 |
+
|
74 |
+
scal = pa.scalar(123)
|
75 |
+
cast_scal = mod.cast_scalar(scal, pa.utf8())
|
76 |
+
assert cast_scal == pa.scalar("123")
|
77 |
+
with pytest.raises(NotImplementedError,
|
78 |
+
match="Unsupported cast from int64 to list using function "
|
79 |
+
"cast_list"):
|
80 |
+
mod.cast_scalar(scal, pa.list_(pa.int64()))
|
81 |
+
|
82 |
+
|
83 |
+
@pytest.mark.cython
|
84 |
+
def test_cython_api(tmpdir):
|
85 |
+
"""
|
86 |
+
Basic test for the Cython API.
|
87 |
+
"""
|
88 |
+
# Fail early if cython is not found
|
89 |
+
import cython # noqa
|
90 |
+
|
91 |
+
with tmpdir.as_cwd():
|
92 |
+
# Set up temporary workspace
|
93 |
+
pyx_file = 'pyarrow_cython_example.pyx'
|
94 |
+
shutil.copyfile(os.path.join(here, pyx_file),
|
95 |
+
os.path.join(str(tmpdir), pyx_file))
|
96 |
+
# Create setup.py file
|
97 |
+
setup_code = setup_template.format(pyx_file=pyx_file,
|
98 |
+
compiler_opts=compiler_opts,
|
99 |
+
test_ld_path=test_ld_path)
|
100 |
+
with open('setup.py', 'w') as f:
|
101 |
+
f.write(setup_code)
|
102 |
+
|
103 |
+
# ARROW-2263: Make environment with this pyarrow/ package first on the
|
104 |
+
# PYTHONPATH, for local dev environments
|
105 |
+
subprocess_env = test_util.get_modified_env_with_pythonpath()
|
106 |
+
|
107 |
+
# Compile extension module
|
108 |
+
subprocess.check_call([sys.executable, 'setup.py',
|
109 |
+
'build_ext', '--inplace'],
|
110 |
+
env=subprocess_env)
|
111 |
+
|
112 |
+
# Check basic functionality
|
113 |
+
orig_path = sys.path[:]
|
114 |
+
sys.path.insert(0, str(tmpdir))
|
115 |
+
try:
|
116 |
+
mod = __import__('pyarrow_cython_example')
|
117 |
+
check_cython_example_module(mod)
|
118 |
+
finally:
|
119 |
+
sys.path = orig_path
|
120 |
+
|
121 |
+
# Check the extension module is loadable from a subprocess without
|
122 |
+
# pyarrow imported first.
|
123 |
+
code = """if 1:
|
124 |
+
import sys
|
125 |
+
import os
|
126 |
+
|
127 |
+
try:
|
128 |
+
# Add dll directory was added on python 3.8
|
129 |
+
# and is required in order to find extra DLLs
|
130 |
+
# only for win32
|
131 |
+
for dir in {library_dirs}:
|
132 |
+
os.add_dll_directory(dir)
|
133 |
+
except AttributeError:
|
134 |
+
pass
|
135 |
+
|
136 |
+
mod = __import__({mod_name!r})
|
137 |
+
arr = mod.make_null_array(5)
|
138 |
+
assert mod.get_array_length(arr) == 5
|
139 |
+
assert arr.null_count == 5
|
140 |
+
""".format(mod_name='pyarrow_cython_example',
|
141 |
+
library_dirs=pa.get_library_dirs())
|
142 |
+
|
143 |
+
path_var = None
|
144 |
+
if sys.platform == 'win32':
|
145 |
+
if not hasattr(os, 'add_dll_directory'):
|
146 |
+
# Python 3.8 onwards don't check extension module DLLs on path
|
147 |
+
# we have to use os.add_dll_directory instead.
|
148 |
+
delim, path_var = ';', 'PATH'
|
149 |
+
elif sys.platform == 'darwin':
|
150 |
+
delim, path_var = ':', 'DYLD_LIBRARY_PATH'
|
151 |
+
else:
|
152 |
+
delim, path_var = ':', 'LD_LIBRARY_PATH'
|
153 |
+
|
154 |
+
if path_var:
|
155 |
+
paths = sys.path
|
156 |
+
paths += pa.get_library_dirs()
|
157 |
+
paths += [subprocess_env.get(path_var, '')]
|
158 |
+
paths = [path for path in paths if path]
|
159 |
+
subprocess_env[path_var] = delim.join(paths)
|
160 |
+
subprocess.check_call([sys.executable, '-c', code],
|
161 |
+
stdout=subprocess.PIPE,
|
162 |
+
env=subprocess_env)
|
163 |
+
|
164 |
+
|
165 |
+
@pytest.mark.cython
|
166 |
+
def test_visit_strings(tmpdir):
|
167 |
+
with tmpdir.as_cwd():
|
168 |
+
# Set up temporary workspace
|
169 |
+
pyx_file = 'bound_function_visit_strings.pyx'
|
170 |
+
shutil.copyfile(os.path.join(here, pyx_file),
|
171 |
+
os.path.join(str(tmpdir), pyx_file))
|
172 |
+
# Create setup.py file
|
173 |
+
setup_code = setup_template.format(pyx_file=pyx_file,
|
174 |
+
compiler_opts=compiler_opts,
|
175 |
+
test_ld_path=test_ld_path)
|
176 |
+
with open('setup.py', 'w') as f:
|
177 |
+
f.write(setup_code)
|
178 |
+
|
179 |
+
subprocess_env = test_util.get_modified_env_with_pythonpath()
|
180 |
+
|
181 |
+
# Compile extension module
|
182 |
+
subprocess.check_call([sys.executable, 'setup.py',
|
183 |
+
'build_ext', '--inplace'],
|
184 |
+
env=subprocess_env)
|
185 |
+
|
186 |
+
sys.path.insert(0, str(tmpdir))
|
187 |
+
mod = __import__('bound_function_visit_strings')
|
188 |
+
|
189 |
+
strings = ['a', 'b', 'c']
|
190 |
+
visited = []
|
191 |
+
mod._visit_strings(strings, visited.append)
|
192 |
+
|
193 |
+
assert visited == strings
|
194 |
+
|
195 |
+
with pytest.raises(ValueError, match="wtf"):
|
196 |
+
def raise_on_b(s):
|
197 |
+
if s == 'b':
|
198 |
+
raise ValueError('wtf')
|
199 |
+
|
200 |
+
mod._visit_strings(strings, raise_on_b)
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_dataset.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_dataset_encryption.py
ADDED
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import base64
|
19 |
+
from datetime import timedelta
|
20 |
+
import numpy as np
|
21 |
+
import pyarrow.fs as fs
|
22 |
+
import pyarrow as pa
|
23 |
+
|
24 |
+
import pytest
|
25 |
+
|
26 |
+
encryption_unavailable = False
|
27 |
+
|
28 |
+
try:
|
29 |
+
import pyarrow.parquet as pq
|
30 |
+
import pyarrow.dataset as ds
|
31 |
+
except ImportError:
|
32 |
+
pq = None
|
33 |
+
ds = None
|
34 |
+
|
35 |
+
try:
|
36 |
+
from pyarrow.tests.parquet.encryption import InMemoryKmsClient
|
37 |
+
import pyarrow.parquet.encryption as pe
|
38 |
+
except ImportError:
|
39 |
+
encryption_unavailable = True
|
40 |
+
|
41 |
+
|
42 |
+
# Marks all of the tests in this module
|
43 |
+
pytestmark = pytest.mark.dataset
|
44 |
+
|
45 |
+
|
46 |
+
FOOTER_KEY = b"0123456789112345"
|
47 |
+
FOOTER_KEY_NAME = "footer_key"
|
48 |
+
COL_KEY = b"1234567890123450"
|
49 |
+
COL_KEY_NAME = "col_key"
|
50 |
+
|
51 |
+
|
52 |
+
def create_sample_table():
|
53 |
+
return pa.table(
|
54 |
+
{
|
55 |
+
"year": [2020, 2022, 2021, 2022, 2019, 2021],
|
56 |
+
"n_legs": [2, 2, 4, 4, 5, 100],
|
57 |
+
"animal": [
|
58 |
+
"Flamingo",
|
59 |
+
"Parrot",
|
60 |
+
"Dog",
|
61 |
+
"Horse",
|
62 |
+
"Brittle stars",
|
63 |
+
"Centipede",
|
64 |
+
],
|
65 |
+
}
|
66 |
+
)
|
67 |
+
|
68 |
+
|
69 |
+
def create_encryption_config():
|
70 |
+
return pe.EncryptionConfiguration(
|
71 |
+
footer_key=FOOTER_KEY_NAME,
|
72 |
+
plaintext_footer=False,
|
73 |
+
column_keys={COL_KEY_NAME: ["n_legs", "animal"]},
|
74 |
+
encryption_algorithm="AES_GCM_V1",
|
75 |
+
# requires timedelta or an assertion is raised
|
76 |
+
cache_lifetime=timedelta(minutes=5.0),
|
77 |
+
data_key_length_bits=256,
|
78 |
+
)
|
79 |
+
|
80 |
+
|
81 |
+
def create_decryption_config():
|
82 |
+
return pe.DecryptionConfiguration(cache_lifetime=300)
|
83 |
+
|
84 |
+
|
85 |
+
def create_kms_connection_config():
|
86 |
+
return pe.KmsConnectionConfig(
|
87 |
+
custom_kms_conf={
|
88 |
+
FOOTER_KEY_NAME: FOOTER_KEY.decode("UTF-8"),
|
89 |
+
COL_KEY_NAME: COL_KEY.decode("UTF-8"),
|
90 |
+
}
|
91 |
+
)
|
92 |
+
|
93 |
+
|
94 |
+
def kms_factory(kms_connection_configuration):
|
95 |
+
return InMemoryKmsClient(kms_connection_configuration)
|
96 |
+
|
97 |
+
|
98 |
+
@pytest.mark.skipif(
|
99 |
+
encryption_unavailable, reason="Parquet Encryption is not currently enabled"
|
100 |
+
)
|
101 |
+
def test_dataset_encryption_decryption():
|
102 |
+
table = create_sample_table()
|
103 |
+
|
104 |
+
encryption_config = create_encryption_config()
|
105 |
+
decryption_config = create_decryption_config()
|
106 |
+
kms_connection_config = create_kms_connection_config()
|
107 |
+
|
108 |
+
crypto_factory = pe.CryptoFactory(kms_factory)
|
109 |
+
parquet_encryption_cfg = ds.ParquetEncryptionConfig(
|
110 |
+
crypto_factory, kms_connection_config, encryption_config
|
111 |
+
)
|
112 |
+
parquet_decryption_cfg = ds.ParquetDecryptionConfig(
|
113 |
+
crypto_factory, kms_connection_config, decryption_config
|
114 |
+
)
|
115 |
+
|
116 |
+
# create write_options with dataset encryption config
|
117 |
+
pformat = pa.dataset.ParquetFileFormat()
|
118 |
+
write_options = pformat.make_write_options(encryption_config=parquet_encryption_cfg)
|
119 |
+
|
120 |
+
mockfs = fs._MockFileSystem()
|
121 |
+
mockfs.create_dir("/")
|
122 |
+
|
123 |
+
ds.write_dataset(
|
124 |
+
data=table,
|
125 |
+
base_dir="sample_dataset",
|
126 |
+
format=pformat,
|
127 |
+
file_options=write_options,
|
128 |
+
filesystem=mockfs,
|
129 |
+
)
|
130 |
+
|
131 |
+
# read without decryption config -> should error is dataset was properly encrypted
|
132 |
+
pformat = pa.dataset.ParquetFileFormat()
|
133 |
+
with pytest.raises(IOError, match=r"no decryption"):
|
134 |
+
ds.dataset("sample_dataset", format=pformat, filesystem=mockfs)
|
135 |
+
|
136 |
+
# set decryption config for parquet fragment scan options
|
137 |
+
pq_scan_opts = ds.ParquetFragmentScanOptions(
|
138 |
+
decryption_config=parquet_decryption_cfg
|
139 |
+
)
|
140 |
+
pformat = pa.dataset.ParquetFileFormat(default_fragment_scan_options=pq_scan_opts)
|
141 |
+
dataset = ds.dataset("sample_dataset", format=pformat, filesystem=mockfs)
|
142 |
+
|
143 |
+
assert table.equals(dataset.to_table())
|
144 |
+
|
145 |
+
|
146 |
+
@pytest.mark.skipif(
|
147 |
+
not encryption_unavailable, reason="Parquet Encryption is currently enabled"
|
148 |
+
)
|
149 |
+
def test_write_dataset_parquet_without_encryption():
|
150 |
+
"""Test write_dataset with ParquetFileFormat and test if an exception is thrown
|
151 |
+
if you try to set encryption_config using make_write_options"""
|
152 |
+
|
153 |
+
# Set the encryption configuration using ParquetFileFormat
|
154 |
+
# and make_write_options
|
155 |
+
pformat = pa.dataset.ParquetFileFormat()
|
156 |
+
|
157 |
+
with pytest.raises(NotImplementedError):
|
158 |
+
_ = pformat.make_write_options(encryption_config="some value")
|
159 |
+
|
160 |
+
|
161 |
+
@pytest.mark.skipif(
|
162 |
+
encryption_unavailable, reason="Parquet Encryption is not currently enabled"
|
163 |
+
)
|
164 |
+
def test_large_row_encryption_decryption():
|
165 |
+
"""Test encryption and decryption of a large number of rows."""
|
166 |
+
|
167 |
+
class NoOpKmsClient(pe.KmsClient):
|
168 |
+
def wrap_key(self, key_bytes: bytes, _: str) -> bytes:
|
169 |
+
b = base64.b64encode(key_bytes)
|
170 |
+
return b
|
171 |
+
|
172 |
+
def unwrap_key(self, wrapped_key: bytes, _: str) -> bytes:
|
173 |
+
b = base64.b64decode(wrapped_key)
|
174 |
+
return b
|
175 |
+
|
176 |
+
row_count = 2**15 + 1
|
177 |
+
table = pa.Table.from_arrays(
|
178 |
+
[pa.array(np.random.rand(row_count), type=pa.float32())], names=["foo"]
|
179 |
+
)
|
180 |
+
|
181 |
+
kms_config = pe.KmsConnectionConfig()
|
182 |
+
crypto_factory = pe.CryptoFactory(lambda _: NoOpKmsClient())
|
183 |
+
encryption_config = pe.EncryptionConfiguration(
|
184 |
+
footer_key="UNIMPORTANT_KEY",
|
185 |
+
column_keys={"UNIMPORTANT_KEY": ["foo"]},
|
186 |
+
double_wrapping=True,
|
187 |
+
plaintext_footer=False,
|
188 |
+
data_key_length_bits=128,
|
189 |
+
)
|
190 |
+
pqe_config = ds.ParquetEncryptionConfig(
|
191 |
+
crypto_factory, kms_config, encryption_config
|
192 |
+
)
|
193 |
+
pqd_config = ds.ParquetDecryptionConfig(
|
194 |
+
crypto_factory, kms_config, pe.DecryptionConfiguration()
|
195 |
+
)
|
196 |
+
scan_options = ds.ParquetFragmentScanOptions(decryption_config=pqd_config)
|
197 |
+
file_format = ds.ParquetFileFormat(default_fragment_scan_options=scan_options)
|
198 |
+
write_options = file_format.make_write_options(encryption_config=pqe_config)
|
199 |
+
file_decryption_properties = crypto_factory.file_decryption_properties(kms_config)
|
200 |
+
|
201 |
+
mockfs = fs._MockFileSystem()
|
202 |
+
mockfs.create_dir("/")
|
203 |
+
|
204 |
+
path = "large-row-test-dataset"
|
205 |
+
ds.write_dataset(table, path, format=file_format,
|
206 |
+
file_options=write_options, filesystem=mockfs)
|
207 |
+
|
208 |
+
file_path = path + "/part-0.parquet"
|
209 |
+
new_table = pq.ParquetFile(
|
210 |
+
file_path, decryption_properties=file_decryption_properties,
|
211 |
+
filesystem=mockfs
|
212 |
+
).read()
|
213 |
+
assert table == new_table
|
214 |
+
|
215 |
+
dataset = ds.dataset(path, format=file_format, filesystem=mockfs)
|
216 |
+
new_table = dataset.to_table()
|
217 |
+
assert table == new_table
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_deprecations.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# Check that various deprecation warnings are raised
|
19 |
+
|
20 |
+
# flake8: noqa
|
21 |
+
|
22 |
+
import pyarrow as pa
|
23 |
+
import pytest
|
llmeval-env/lib/python3.10/site-packages/pyarrow/tests/test_dlpack.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import ctypes
|
19 |
+
from functools import wraps
|
20 |
+
import pytest
|
21 |
+
|
22 |
+
import numpy as np
|
23 |
+
|
24 |
+
import pyarrow as pa
|
25 |
+
from pyarrow.vendored.version import Version
|
26 |
+
|
27 |
+
|
28 |
+
def PyCapsule_IsValid(capsule, name):
|
29 |
+
return ctypes.pythonapi.PyCapsule_IsValid(ctypes.py_object(capsule), name) == 1
|
30 |
+
|
31 |
+
|
32 |
+
def check_dlpack_export(arr, expected_arr):
|
33 |
+
DLTensor = arr.__dlpack__()
|
34 |
+
assert PyCapsule_IsValid(DLTensor, b"dltensor") is True
|
35 |
+
|
36 |
+
result = np.from_dlpack(arr)
|
37 |
+
np.testing.assert_array_equal(result, expected_arr, strict=True)
|
38 |
+
|
39 |
+
assert arr.__dlpack_device__() == (1, 0)
|
40 |
+
|
41 |
+
|
42 |
+
def check_bytes_allocated(f):
|
43 |
+
@wraps(f)
|
44 |
+
def wrapper(*args, **kwargs):
|
45 |
+
allocated_bytes = pa.total_allocated_bytes()
|
46 |
+
try:
|
47 |
+
return f(*args, **kwargs)
|
48 |
+
finally:
|
49 |
+
assert pa.total_allocated_bytes() == allocated_bytes
|
50 |
+
return wrapper
|
51 |
+
|
52 |
+
|
53 |
+
@check_bytes_allocated
|
54 |
+
@pytest.mark.parametrize(
|
55 |
+
('value_type', 'np_type'),
|
56 |
+
[
|
57 |
+
(pa.uint8(), np.uint8),
|
58 |
+
(pa.uint16(), np.uint16),
|
59 |
+
(pa.uint32(), np.uint32),
|
60 |
+
(pa.uint64(), np.uint64),
|
61 |
+
(pa.int8(), np.int8),
|
62 |
+
(pa.int16(), np.int16),
|
63 |
+
(pa.int32(), np.int32),
|
64 |
+
(pa.int64(), np.int64),
|
65 |
+
(pa.float16(), np.float16),
|
66 |
+
(pa.float32(), np.float32),
|
67 |
+
(pa.float64(), np.float64),
|
68 |
+
]
|
69 |
+
)
|
70 |
+
def test_dlpack(value_type, np_type):
|
71 |
+
if Version(np.__version__) < Version("1.24.0"):
|
72 |
+
pytest.skip("No dlpack support in numpy versions older than 1.22.0, "
|
73 |
+
"strict keyword in assert_array_equal added in numpy version "
|
74 |
+
"1.24.0")
|
75 |
+
|
76 |
+
expected = np.array([1, 2, 3], dtype=np_type)
|
77 |
+
arr = pa.array(expected, type=value_type)
|
78 |
+
check_dlpack_export(arr, expected)
|
79 |
+
|
80 |
+
arr_sliced = arr.slice(1, 1)
|
81 |
+
expected = np.array([2], dtype=np_type)
|
82 |
+
check_dlpack_export(arr_sliced, expected)
|
83 |
+
|
84 |
+
arr_sliced = arr.slice(0, 1)
|
85 |
+
expected = np.array([1], dtype=np_type)
|
86 |
+
check_dlpack_export(arr_sliced, expected)
|
87 |
+
|
88 |
+
arr_sliced = arr.slice(1)
|
89 |
+
expected = np.array([2, 3], dtype=np_type)
|
90 |
+
check_dlpack_export(arr_sliced, expected)
|
91 |
+
|
92 |
+
arr_zero = pa.array([], type=value_type)
|
93 |
+
expected = np.array([], dtype=np_type)
|
94 |
+
check_dlpack_export(arr_zero, expected)
|
95 |
+
|
96 |
+
|
97 |
+
def test_dlpack_not_supported():
|
98 |
+
if Version(np.__version__) < Version("1.22.0"):
|
99 |
+
pytest.skip("No dlpack support in numpy versions older than 1.22.0.")
|
100 |
+
|
101 |
+
arr = pa.array([1, None, 3])
|
102 |
+
with pytest.raises(TypeError, match="Can only use DLPack "
|
103 |
+
"on arrays with no nulls."):
|
104 |
+
np.from_dlpack(arr)
|
105 |
+
|
106 |
+
arr = pa.array(
|
107 |
+
[[0, 1], [3, 4]],
|
108 |
+
type=pa.list_(pa.int32())
|
109 |
+
)
|
110 |
+
with pytest.raises(TypeError, match="DataType is not compatible with DLPack spec"):
|
111 |
+
np.from_dlpack(arr)
|
112 |
+
|
113 |
+
arr = pa.array([])
|
114 |
+
with pytest.raises(TypeError, match="DataType is not compatible with DLPack spec"):
|
115 |
+
np.from_dlpack(arr)
|
116 |
+
|
117 |
+
# DLPack doesn't support bit-packed boolean values
|
118 |
+
arr = pa.array([True, False, True])
|
119 |
+
with pytest.raises(TypeError, match="Bit-packed boolean data type "
|
120 |
+
"not supported by DLPack."):
|
121 |
+
np.from_dlpack(arr)
|
122 |
+
|
123 |
+
|
124 |
+
def test_dlpack_cuda_not_supported():
|
125 |
+
cuda = pytest.importorskip("pyarrow.cuda")
|
126 |
+
|
127 |
+
schema = pa.schema([pa.field('f0', pa.int16())])
|
128 |
+
a0 = pa.array([1, 2, 3], type=pa.int16())
|
129 |
+
batch = pa.record_batch([a0], schema=schema)
|
130 |
+
|
131 |
+
cbuf = cuda.serialize_record_batch(batch, cuda.Context(0))
|
132 |
+
cbatch = cuda.read_record_batch(cbuf, batch.schema)
|
133 |
+
carr = cbatch["f0"]
|
134 |
+
|
135 |
+
# CudaBuffers not yet supported
|
136 |
+
with pytest.raises(NotImplementedError, match="DLPack support is implemented "
|
137 |
+
"only for buffers on CPU device."):
|
138 |
+
np.from_dlpack(carr)
|
139 |
+
|
140 |
+
with pytest.raises(NotImplementedError, match="DLPack support is implemented "
|
141 |
+
"only for buffers on CPU device."):
|
142 |
+
carr.__dlpack_device__()
|