Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step80/zero/17.post_attention_layernorm.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step80/zero/17.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step80/zero/17.post_attention_layernorm.weight/fp32.pt +3 -0
- ckpts/universal/global_step80/zero/20.attention.dense.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step80/zero/20.attention.dense.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step80/zero/23.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step80/zero/23.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step80/zero/23.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
- ckpts/universal/global_step80/zero/25.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
- venv/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/pyarrow/_azurefs.pyx +134 -0
- venv/lib/python3.10/site-packages/pyarrow/_csv.pyx +1542 -0
- venv/lib/python3.10/site-packages/pyarrow/_cuda.pyx +1058 -0
- venv/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/pyarrow/_flight.pyx +0 -0
- venv/lib/python3.10/site-packages/pyarrow/_fs.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/pyarrow/_s3fs.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/pyarrow/_substrait.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/pyarrow/gandiva.pyx +760 -0
- venv/lib/python3.10/site-packages/pyarrow/parquet/__init__.py +20 -0
- venv/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/core.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/encryption.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pyarrow/parquet/core.py +2341 -0
- venv/lib/python3.10/site-packages/pyarrow/parquet/encryption.py +23 -0
- venv/lib/python3.10/site-packages/pyarrow/scalar.pxi +1220 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/__init__.py +0 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/arrow_39313.py +47 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/arrow_7980.py +30 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/bound_function_visit_strings.pyx +67 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/conftest.py +312 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/pandas_threaded_import.py +44 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/pyarrow_cython_example.pyx +61 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/read_record_batch.py +25 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/strategies.py +457 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/test_acero.py +413 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/test_adhoc_memory_leak.py +43 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/test_array.py +0 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/test_builder.py +86 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/test_cffi.py +707 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/test_compute.py +0 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/test_convert_builtin.py +2536 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/test_cpp_internals.py +50 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/test_csv.py +2018 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/test_cuda.py +794 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/test_cuda_numba_interop.py +235 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/test_cython.py +200 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/test_dataset.py +0 -0
- venv/lib/python3.10/site-packages/pyarrow/tests/test_dataset_encryption.py +217 -0
ckpts/universal/global_step80/zero/17.post_attention_layernorm.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:034eb6229e324eae8b6ba583cef0515886e17efad4ecb6622d7f4edcf4bda173
|
3 |
+
size 9372
|
ckpts/universal/global_step80/zero/17.post_attention_layernorm.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aba2f8eafba5fc5f06752423ad2672ff0d38bd5064e8b6362fd056b265c9d2c9
|
3 |
+
size 9387
|
ckpts/universal/global_step80/zero/17.post_attention_layernorm.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4eaebbcd702fced689bf99534651a846025526e6fc6a69bf4f3be730352a4ac7
|
3 |
+
size 9293
|
ckpts/universal/global_step80/zero/20.attention.dense.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ea969c245f6822c4445ec535599b5b48779f685530f767f52d3b3cb4dc6e0976
|
3 |
+
size 16778396
|
ckpts/universal/global_step80/zero/20.attention.dense.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4ddaffb0f2b394af82c16248ec2d29dd3e47bf951f2f99441b2c42141f04f5bb
|
3 |
+
size 16778411
|
ckpts/universal/global_step80/zero/23.mlp.dense_4h_to_h.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:24b99a47225182c23b15b88365a94e9c7a413b05942e83b6511947d202d9c60c
|
3 |
+
size 33555612
|
ckpts/universal/global_step80/zero/23.mlp.dense_4h_to_h.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:27d345015bef2e0f42c7750c3a883b57cf4bbf71f86176a14e9093ed13c3ef1a
|
3 |
+
size 33555627
|
ckpts/universal/global_step80/zero/23.mlp.dense_4h_to_h.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9c97edf776f663b8ddaac4a76e4950e2886468499d3980f37cea6ac01f433b0f
|
3 |
+
size 33555533
|
ckpts/universal/global_step80/zero/25.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4f561df8938128ed96348c642a340b36d54ec20b3f24e1f8682e5ed27d014d7d
|
3 |
+
size 33555627
|
venv/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (321 kB). View file
|
|
venv/lib/python3.10/site-packages/pyarrow/_azurefs.pyx
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: language_level = 3
|
19 |
+
|
20 |
+
from cython cimport binding
|
21 |
+
|
22 |
+
|
23 |
+
from pyarrow.lib import frombytes, tobytes
|
24 |
+
from pyarrow.includes.libarrow_fs cimport *
|
25 |
+
from pyarrow._fs cimport FileSystem
|
26 |
+
|
27 |
+
|
28 |
+
cdef class AzureFileSystem(FileSystem):
|
29 |
+
"""
|
30 |
+
Azure Blob Storage backed FileSystem implementation
|
31 |
+
|
32 |
+
This implementation supports flat namespace and hierarchical namespace (HNS) a.k.a.
|
33 |
+
Data Lake Gen2 storage accounts. HNS will be automatically detected and HNS specific
|
34 |
+
features will be used when they provide a performance advantage. Azurite emulator is
|
35 |
+
also supported. Note: `/` is the only supported delimiter.
|
36 |
+
|
37 |
+
The storage account is considered the root of the filesystem. When enabled, containers
|
38 |
+
will be created or deleted during relevant directory operations. Obviously, this also
|
39 |
+
requires authentication with the additional permissions.
|
40 |
+
|
41 |
+
By default `DefaultAzureCredential <https://github.com/Azure/azure-sdk-for-cpp/blob/main/sdk/identity/azure-identity/README.md#defaultazurecredential>`__
|
42 |
+
is used for authentication. This means it will try several types of authentication
|
43 |
+
and go with the first one that works. If any authentication parameters are provided when
|
44 |
+
initialising the FileSystem, they will be used instead of the default credential.
|
45 |
+
|
46 |
+
Parameters
|
47 |
+
----------
|
48 |
+
account_name : str
|
49 |
+
Azure Blob Storage account name. This is the globally unique identifier for the
|
50 |
+
storage account.
|
51 |
+
account_key : str, default None
|
52 |
+
Account key of the storage account. Pass None to use default credential.
|
53 |
+
blob_storage_authority : str, default None
|
54 |
+
hostname[:port] of the Blob Service. Defaults to `.blob.core.windows.net`. Useful
|
55 |
+
for connecting to a local emulator, like Azurite.
|
56 |
+
dfs_storage_authority : str, default None
|
57 |
+
hostname[:port] of the Data Lake Gen 2 Service. Defaults to
|
58 |
+
`.dfs.core.windows.net`. Useful for connecting to a local emulator, like Azurite.
|
59 |
+
blob_storage_scheme : str, default None
|
60 |
+
Either `http` or `https`. Defaults to `https`. Useful for connecting to a local
|
61 |
+
emulator, like Azurite.
|
62 |
+
dfs_storage_scheme : str, default None
|
63 |
+
Either `http` or `https`. Defaults to `https`. Useful for connecting to a local
|
64 |
+
emulator, like Azurite.
|
65 |
+
|
66 |
+
Examples
|
67 |
+
--------
|
68 |
+
>>> from pyarrow import fs
|
69 |
+
>>> azure_fs = fs.AzureFileSystem(account_name='myaccount')
|
70 |
+
>>> azurite_fs = fs.AzureFileSystem(
|
71 |
+
... account_name='devstoreaccount1',
|
72 |
+
... account_key='Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==',
|
73 |
+
... blob_storage_authority='127.0.0.1:10000',
|
74 |
+
... dfs_storage_authority='127.0.0.1:10000',
|
75 |
+
... blob_storage_scheme='http',
|
76 |
+
... dfs_storage_scheme='http',
|
77 |
+
... )
|
78 |
+
|
79 |
+
For usage of the methods see examples for :func:`~pyarrow.fs.LocalFileSystem`.
|
80 |
+
"""
|
81 |
+
cdef:
|
82 |
+
CAzureFileSystem* azurefs
|
83 |
+
c_string account_key
|
84 |
+
|
85 |
+
def __init__(self, account_name, *, account_key=None, blob_storage_authority=None,
|
86 |
+
dfs_storage_authority=None, blob_storage_scheme=None,
|
87 |
+
dfs_storage_scheme=None):
|
88 |
+
cdef:
|
89 |
+
CAzureOptions options
|
90 |
+
shared_ptr[CAzureFileSystem] wrapped
|
91 |
+
|
92 |
+
options.account_name = tobytes(account_name)
|
93 |
+
if blob_storage_authority:
|
94 |
+
options.blob_storage_authority = tobytes(blob_storage_authority)
|
95 |
+
if dfs_storage_authority:
|
96 |
+
options.dfs_storage_authority = tobytes(dfs_storage_authority)
|
97 |
+
if blob_storage_scheme:
|
98 |
+
options.blob_storage_scheme = tobytes(blob_storage_scheme)
|
99 |
+
if dfs_storage_scheme:
|
100 |
+
options.dfs_storage_scheme = tobytes(dfs_storage_scheme)
|
101 |
+
|
102 |
+
if account_key:
|
103 |
+
options.ConfigureAccountKeyCredential(tobytes(account_key))
|
104 |
+
self.account_key = tobytes(account_key)
|
105 |
+
else:
|
106 |
+
options.ConfigureDefaultCredential()
|
107 |
+
|
108 |
+
with nogil:
|
109 |
+
wrapped = GetResultValue(CAzureFileSystem.Make(options))
|
110 |
+
|
111 |
+
self.init(<shared_ptr[CFileSystem]> wrapped)
|
112 |
+
|
113 |
+
cdef init(self, const shared_ptr[CFileSystem]& wrapped):
|
114 |
+
FileSystem.init(self, wrapped)
|
115 |
+
self.azurefs = <CAzureFileSystem*> wrapped.get()
|
116 |
+
|
117 |
+
@staticmethod
|
118 |
+
@binding(True) # Required for cython < 3
|
119 |
+
def _reconstruct(kwargs):
|
120 |
+
# __reduce__ doesn't allow passing named arguments directly to the
|
121 |
+
# reconstructor, hence this wrapper.
|
122 |
+
return AzureFileSystem(**kwargs)
|
123 |
+
|
124 |
+
def __reduce__(self):
|
125 |
+
cdef CAzureOptions opts = self.azurefs.options()
|
126 |
+
return (
|
127 |
+
AzureFileSystem._reconstruct, (dict(
|
128 |
+
account_name=frombytes(opts.account_name),
|
129 |
+
account_key=frombytes(self.account_key),
|
130 |
+
blob_storage_authority=frombytes(opts.blob_storage_authority),
|
131 |
+
dfs_storage_authority=frombytes(opts.dfs_storage_authority),
|
132 |
+
blob_storage_scheme=frombytes(opts.blob_storage_scheme),
|
133 |
+
dfs_storage_scheme=frombytes(opts.dfs_storage_scheme)
|
134 |
+
),))
|
venv/lib/python3.10/site-packages/pyarrow/_csv.pyx
ADDED
@@ -0,0 +1,1542 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: profile=False
|
19 |
+
# distutils: language = c++
|
20 |
+
# cython: language_level = 3
|
21 |
+
|
22 |
+
from cython.operator cimport dereference as deref
|
23 |
+
|
24 |
+
from collections import namedtuple
|
25 |
+
from collections.abc import Mapping
|
26 |
+
|
27 |
+
from pyarrow.includes.common cimport *
|
28 |
+
from pyarrow.includes.libarrow cimport *
|
29 |
+
from pyarrow.includes.libarrow_python cimport *
|
30 |
+
from pyarrow.lib cimport (check_status, Field, MemoryPool, Schema,
|
31 |
+
RecordBatchReader, ensure_type,
|
32 |
+
maybe_unbox_memory_pool, get_input_stream,
|
33 |
+
get_writer, native_transcoding_input_stream,
|
34 |
+
pyarrow_unwrap_batch, pyarrow_unwrap_schema,
|
35 |
+
pyarrow_unwrap_table, pyarrow_wrap_schema,
|
36 |
+
pyarrow_wrap_table, pyarrow_wrap_data_type,
|
37 |
+
pyarrow_unwrap_data_type, Table, RecordBatch,
|
38 |
+
StopToken, _CRecordBatchWriter)
|
39 |
+
from pyarrow.lib import frombytes, tobytes, SignalStopHandler
|
40 |
+
|
41 |
+
|
42 |
+
cdef unsigned char _single_char(s) except 0:
|
43 |
+
val = ord(s)
|
44 |
+
if val == 0 or val > 127:
|
45 |
+
raise ValueError("Expecting an ASCII character")
|
46 |
+
return <unsigned char> val
|
47 |
+
|
48 |
+
|
49 |
+
_InvalidRow = namedtuple(
|
50 |
+
"_InvalidRow", ("expected_columns", "actual_columns", "number", "text"),
|
51 |
+
module=__name__)
|
52 |
+
|
53 |
+
|
54 |
+
class InvalidRow(_InvalidRow):
|
55 |
+
"""
|
56 |
+
Description of an invalid row in a CSV file.
|
57 |
+
|
58 |
+
Parameters
|
59 |
+
----------
|
60 |
+
expected_columns : int
|
61 |
+
The expected number of columns in the row.
|
62 |
+
actual_columns : int
|
63 |
+
The actual number of columns in the row.
|
64 |
+
number : int or None
|
65 |
+
The physical row number if known, otherwise None.
|
66 |
+
text : str
|
67 |
+
The contents of the row.
|
68 |
+
"""
|
69 |
+
__slots__ = ()
|
70 |
+
|
71 |
+
|
72 |
+
cdef CInvalidRowResult _handle_invalid_row(
|
73 |
+
handler, const CCSVInvalidRow& c_row) except CInvalidRowResult_Error:
|
74 |
+
# A negative row number means undetermined (because of parallel reading)
|
75 |
+
row_number = c_row.number if c_row.number >= 0 else None
|
76 |
+
row = InvalidRow(c_row.expected_columns, c_row.actual_columns,
|
77 |
+
row_number, frombytes(<c_string> c_row.text))
|
78 |
+
result = handler(row)
|
79 |
+
if result == 'error':
|
80 |
+
return CInvalidRowResult_Error
|
81 |
+
elif result == 'skip':
|
82 |
+
return CInvalidRowResult_Skip
|
83 |
+
else:
|
84 |
+
raise ValueError("Invalid return value for invalid row handler: "
|
85 |
+
f"expected 'error' or 'skip', got {result!r}")
|
86 |
+
|
87 |
+
|
88 |
+
cdef class ReadOptions(_Weakrefable):
|
89 |
+
"""
|
90 |
+
Options for reading CSV files.
|
91 |
+
|
92 |
+
Parameters
|
93 |
+
----------
|
94 |
+
use_threads : bool, optional (default True)
|
95 |
+
Whether to use multiple threads to accelerate reading
|
96 |
+
block_size : int, optional
|
97 |
+
How much bytes to process at a time from the input stream.
|
98 |
+
This will determine multi-threading granularity as well as
|
99 |
+
the size of individual record batches or table chunks.
|
100 |
+
Minimum valid value for block size is 1
|
101 |
+
skip_rows : int, optional (default 0)
|
102 |
+
The number of rows to skip before the column names (if any)
|
103 |
+
and the CSV data.
|
104 |
+
skip_rows_after_names : int, optional (default 0)
|
105 |
+
The number of rows to skip after the column names.
|
106 |
+
This number can be larger than the number of rows in one
|
107 |
+
block, and empty rows are counted.
|
108 |
+
The order of application is as follows:
|
109 |
+
- `skip_rows` is applied (if non-zero);
|
110 |
+
- column names are read (unless `column_names` is set);
|
111 |
+
- `skip_rows_after_names` is applied (if non-zero).
|
112 |
+
column_names : list, optional
|
113 |
+
The column names of the target table. If empty, fall back on
|
114 |
+
`autogenerate_column_names`.
|
115 |
+
autogenerate_column_names : bool, optional (default False)
|
116 |
+
Whether to autogenerate column names if `column_names` is empty.
|
117 |
+
If true, column names will be of the form "f0", "f1"...
|
118 |
+
If false, column names will be read from the first CSV row
|
119 |
+
after `skip_rows`.
|
120 |
+
encoding : str, optional (default 'utf8')
|
121 |
+
The character encoding of the CSV data. Columns that cannot
|
122 |
+
decode using this encoding can still be read as Binary.
|
123 |
+
|
124 |
+
Examples
|
125 |
+
--------
|
126 |
+
|
127 |
+
Defining an example data:
|
128 |
+
|
129 |
+
>>> import io
|
130 |
+
>>> s = "1,2,3\\nFlamingo,2,2022-03-01\\nHorse,4,2022-03-02\\nBrittle stars,5,2022-03-03\\nCentipede,100,2022-03-04"
|
131 |
+
>>> print(s)
|
132 |
+
1,2,3
|
133 |
+
Flamingo,2,2022-03-01
|
134 |
+
Horse,4,2022-03-02
|
135 |
+
Brittle stars,5,2022-03-03
|
136 |
+
Centipede,100,2022-03-04
|
137 |
+
|
138 |
+
Ignore the first numbered row and substitute it with defined
|
139 |
+
or autogenerated column names:
|
140 |
+
|
141 |
+
>>> from pyarrow import csv
|
142 |
+
>>> read_options = csv.ReadOptions(
|
143 |
+
... column_names=["animals", "n_legs", "entry"],
|
144 |
+
... skip_rows=1)
|
145 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options)
|
146 |
+
pyarrow.Table
|
147 |
+
animals: string
|
148 |
+
n_legs: int64
|
149 |
+
entry: date32[day]
|
150 |
+
----
|
151 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede"]]
|
152 |
+
n_legs: [[2,4,5,100]]
|
153 |
+
entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]]
|
154 |
+
|
155 |
+
>>> read_options = csv.ReadOptions(autogenerate_column_names=True,
|
156 |
+
... skip_rows=1)
|
157 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options)
|
158 |
+
pyarrow.Table
|
159 |
+
f0: string
|
160 |
+
f1: int64
|
161 |
+
f2: date32[day]
|
162 |
+
----
|
163 |
+
f0: [["Flamingo","Horse","Brittle stars","Centipede"]]
|
164 |
+
f1: [[2,4,5,100]]
|
165 |
+
f2: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]]
|
166 |
+
|
167 |
+
Remove the first 2 rows of the data:
|
168 |
+
|
169 |
+
>>> read_options = csv.ReadOptions(skip_rows_after_names=2)
|
170 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options)
|
171 |
+
pyarrow.Table
|
172 |
+
1: string
|
173 |
+
2: int64
|
174 |
+
3: date32[day]
|
175 |
+
----
|
176 |
+
1: [["Brittle stars","Centipede"]]
|
177 |
+
2: [[5,100]]
|
178 |
+
3: [[2022-03-03,2022-03-04]]
|
179 |
+
"""
|
180 |
+
|
181 |
+
# Avoid mistakingly creating attributes
|
182 |
+
__slots__ = ()
|
183 |
+
|
184 |
+
# __init__() is not called when unpickling, initialize storage here
|
185 |
+
def __cinit__(self, *argw, **kwargs):
|
186 |
+
self.options.reset(new CCSVReadOptions(CCSVReadOptions.Defaults()))
|
187 |
+
|
188 |
+
def __init__(self, *, use_threads=None, block_size=None, skip_rows=None,
|
189 |
+
skip_rows_after_names=None, column_names=None,
|
190 |
+
autogenerate_column_names=None, encoding='utf8'):
|
191 |
+
if use_threads is not None:
|
192 |
+
self.use_threads = use_threads
|
193 |
+
if block_size is not None:
|
194 |
+
self.block_size = block_size
|
195 |
+
if skip_rows is not None:
|
196 |
+
self.skip_rows = skip_rows
|
197 |
+
if skip_rows_after_names is not None:
|
198 |
+
self.skip_rows_after_names = skip_rows_after_names
|
199 |
+
if column_names is not None:
|
200 |
+
self.column_names = column_names
|
201 |
+
if autogenerate_column_names is not None:
|
202 |
+
self.autogenerate_column_names= autogenerate_column_names
|
203 |
+
# Python-specific option
|
204 |
+
self.encoding = encoding
|
205 |
+
|
206 |
+
@property
|
207 |
+
def use_threads(self):
|
208 |
+
"""
|
209 |
+
Whether to use multiple threads to accelerate reading.
|
210 |
+
"""
|
211 |
+
return deref(self.options).use_threads
|
212 |
+
|
213 |
+
@use_threads.setter
|
214 |
+
def use_threads(self, value):
|
215 |
+
deref(self.options).use_threads = value
|
216 |
+
|
217 |
+
@property
|
218 |
+
def block_size(self):
|
219 |
+
"""
|
220 |
+
How much bytes to process at a time from the input stream.
|
221 |
+
This will determine multi-threading granularity as well as
|
222 |
+
the size of individual record batches or table chunks.
|
223 |
+
"""
|
224 |
+
return deref(self.options).block_size
|
225 |
+
|
226 |
+
@block_size.setter
|
227 |
+
def block_size(self, value):
|
228 |
+
deref(self.options).block_size = value
|
229 |
+
|
230 |
+
@property
|
231 |
+
def skip_rows(self):
|
232 |
+
"""
|
233 |
+
The number of rows to skip before the column names (if any)
|
234 |
+
and the CSV data.
|
235 |
+
See `skip_rows_after_names` for interaction description
|
236 |
+
"""
|
237 |
+
return deref(self.options).skip_rows
|
238 |
+
|
239 |
+
@skip_rows.setter
|
240 |
+
def skip_rows(self, value):
|
241 |
+
deref(self.options).skip_rows = value
|
242 |
+
|
243 |
+
@property
|
244 |
+
def skip_rows_after_names(self):
|
245 |
+
"""
|
246 |
+
The number of rows to skip after the column names.
|
247 |
+
This number can be larger than the number of rows in one
|
248 |
+
block, and empty rows are counted.
|
249 |
+
The order of application is as follows:
|
250 |
+
- `skip_rows` is applied (if non-zero);
|
251 |
+
- column names are read (unless `column_names` is set);
|
252 |
+
- `skip_rows_after_names` is applied (if non-zero).
|
253 |
+
"""
|
254 |
+
return deref(self.options).skip_rows_after_names
|
255 |
+
|
256 |
+
@skip_rows_after_names.setter
|
257 |
+
def skip_rows_after_names(self, value):
|
258 |
+
deref(self.options).skip_rows_after_names = value
|
259 |
+
|
260 |
+
@property
|
261 |
+
def column_names(self):
|
262 |
+
"""
|
263 |
+
The column names of the target table. If empty, fall back on
|
264 |
+
`autogenerate_column_names`.
|
265 |
+
"""
|
266 |
+
return [frombytes(s) for s in deref(self.options).column_names]
|
267 |
+
|
268 |
+
@column_names.setter
|
269 |
+
def column_names(self, value):
|
270 |
+
deref(self.options).column_names.clear()
|
271 |
+
for item in value:
|
272 |
+
deref(self.options).column_names.push_back(tobytes(item))
|
273 |
+
|
274 |
+
@property
|
275 |
+
def autogenerate_column_names(self):
|
276 |
+
"""
|
277 |
+
Whether to autogenerate column names if `column_names` is empty.
|
278 |
+
If true, column names will be of the form "f0", "f1"...
|
279 |
+
If false, column names will be read from the first CSV row
|
280 |
+
after `skip_rows`.
|
281 |
+
"""
|
282 |
+
return deref(self.options).autogenerate_column_names
|
283 |
+
|
284 |
+
@autogenerate_column_names.setter
|
285 |
+
def autogenerate_column_names(self, value):
|
286 |
+
deref(self.options).autogenerate_column_names = value
|
287 |
+
|
288 |
+
def validate(self):
|
289 |
+
check_status(deref(self.options).Validate())
|
290 |
+
|
291 |
+
def equals(self, ReadOptions other):
|
292 |
+
"""
|
293 |
+
Parameters
|
294 |
+
----------
|
295 |
+
other : pyarrow.csv.ReadOptions
|
296 |
+
|
297 |
+
Returns
|
298 |
+
-------
|
299 |
+
bool
|
300 |
+
"""
|
301 |
+
return (
|
302 |
+
self.use_threads == other.use_threads and
|
303 |
+
self.block_size == other.block_size and
|
304 |
+
self.skip_rows == other.skip_rows and
|
305 |
+
self.skip_rows_after_names == other.skip_rows_after_names and
|
306 |
+
self.column_names == other.column_names and
|
307 |
+
self.autogenerate_column_names ==
|
308 |
+
other.autogenerate_column_names and
|
309 |
+
self.encoding == other.encoding
|
310 |
+
)
|
311 |
+
|
312 |
+
@staticmethod
|
313 |
+
cdef ReadOptions wrap(CCSVReadOptions options):
|
314 |
+
out = ReadOptions()
|
315 |
+
out.options.reset(new CCSVReadOptions(move(options)))
|
316 |
+
out.encoding = 'utf8' # No way to know this
|
317 |
+
return out
|
318 |
+
|
319 |
+
def __getstate__(self):
|
320 |
+
return (self.use_threads, self.block_size, self.skip_rows,
|
321 |
+
self.column_names, self.autogenerate_column_names,
|
322 |
+
self.encoding, self.skip_rows_after_names)
|
323 |
+
|
324 |
+
def __setstate__(self, state):
|
325 |
+
(self.use_threads, self.block_size, self.skip_rows,
|
326 |
+
self.column_names, self.autogenerate_column_names,
|
327 |
+
self.encoding, self.skip_rows_after_names) = state
|
328 |
+
|
329 |
+
def __eq__(self, other):
|
330 |
+
try:
|
331 |
+
return self.equals(other)
|
332 |
+
except TypeError:
|
333 |
+
return False
|
334 |
+
|
335 |
+
|
336 |
+
cdef class ParseOptions(_Weakrefable):
|
337 |
+
"""
|
338 |
+
Options for parsing CSV files.
|
339 |
+
|
340 |
+
Parameters
|
341 |
+
----------
|
342 |
+
delimiter : 1-character string, optional (default ',')
|
343 |
+
The character delimiting individual cells in the CSV data.
|
344 |
+
quote_char : 1-character string or False, optional (default '"')
|
345 |
+
The character used optionally for quoting CSV values
|
346 |
+
(False if quoting is not allowed).
|
347 |
+
double_quote : bool, optional (default True)
|
348 |
+
Whether two quotes in a quoted CSV value denote a single quote
|
349 |
+
in the data.
|
350 |
+
escape_char : 1-character string or False, optional (default False)
|
351 |
+
The character used optionally for escaping special characters
|
352 |
+
(False if escaping is not allowed).
|
353 |
+
newlines_in_values : bool, optional (default False)
|
354 |
+
Whether newline characters are allowed in CSV values.
|
355 |
+
Setting this to True reduces the performance of multi-threaded
|
356 |
+
CSV reading.
|
357 |
+
ignore_empty_lines : bool, optional (default True)
|
358 |
+
Whether empty lines are ignored in CSV input.
|
359 |
+
If False, an empty line is interpreted as containing a single empty
|
360 |
+
value (assuming a one-column CSV file).
|
361 |
+
invalid_row_handler : callable, optional (default None)
|
362 |
+
If not None, this object is called for each CSV row that fails
|
363 |
+
parsing (because of a mismatching number of columns).
|
364 |
+
It should accept a single InvalidRow argument and return either
|
365 |
+
"skip" or "error" depending on the desired outcome.
|
366 |
+
|
367 |
+
Examples
|
368 |
+
--------
|
369 |
+
|
370 |
+
Defining an example file from bytes object:
|
371 |
+
|
372 |
+
>>> import io
|
373 |
+
>>> s = (
|
374 |
+
... "animals;n_legs;entry\\n"
|
375 |
+
... "Flamingo;2;2022-03-01\\n"
|
376 |
+
... "# Comment here:\\n"
|
377 |
+
... "Horse;4;2022-03-02\\n"
|
378 |
+
... "Brittle stars;5;2022-03-03\\n"
|
379 |
+
... "Centipede;100;2022-03-04"
|
380 |
+
... )
|
381 |
+
>>> print(s)
|
382 |
+
animals;n_legs;entry
|
383 |
+
Flamingo;2;2022-03-01
|
384 |
+
# Comment here:
|
385 |
+
Horse;4;2022-03-02
|
386 |
+
Brittle stars;5;2022-03-03
|
387 |
+
Centipede;100;2022-03-04
|
388 |
+
>>> source = io.BytesIO(s.encode())
|
389 |
+
|
390 |
+
Read the data from a file skipping rows with comments
|
391 |
+
and defining the delimiter:
|
392 |
+
|
393 |
+
>>> from pyarrow import csv
|
394 |
+
>>> def skip_comment(row):
|
395 |
+
... if row.text.startswith("# "):
|
396 |
+
... return 'skip'
|
397 |
+
... else:
|
398 |
+
... return 'error'
|
399 |
+
...
|
400 |
+
>>> parse_options = csv.ParseOptions(delimiter=";", invalid_row_handler=skip_comment)
|
401 |
+
>>> csv.read_csv(source, parse_options=parse_options)
|
402 |
+
pyarrow.Table
|
403 |
+
animals: string
|
404 |
+
n_legs: int64
|
405 |
+
entry: date32[day]
|
406 |
+
----
|
407 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede"]]
|
408 |
+
n_legs: [[2,4,5,100]]
|
409 |
+
entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]]
|
410 |
+
"""
|
411 |
+
__slots__ = ()
|
412 |
+
|
413 |
+
def __cinit__(self, *argw, **kwargs):
|
414 |
+
self._invalid_row_handler = None
|
415 |
+
self.options.reset(new CCSVParseOptions(CCSVParseOptions.Defaults()))
|
416 |
+
|
417 |
+
def __init__(self, *, delimiter=None, quote_char=None, double_quote=None,
|
418 |
+
escape_char=None, newlines_in_values=None,
|
419 |
+
ignore_empty_lines=None, invalid_row_handler=None):
|
420 |
+
if delimiter is not None:
|
421 |
+
self.delimiter = delimiter
|
422 |
+
if quote_char is not None:
|
423 |
+
self.quote_char = quote_char
|
424 |
+
if double_quote is not None:
|
425 |
+
self.double_quote = double_quote
|
426 |
+
if escape_char is not None:
|
427 |
+
self.escape_char = escape_char
|
428 |
+
if newlines_in_values is not None:
|
429 |
+
self.newlines_in_values = newlines_in_values
|
430 |
+
if ignore_empty_lines is not None:
|
431 |
+
self.ignore_empty_lines = ignore_empty_lines
|
432 |
+
if invalid_row_handler is not None:
|
433 |
+
self.invalid_row_handler = invalid_row_handler
|
434 |
+
|
435 |
+
@property
|
436 |
+
def delimiter(self):
|
437 |
+
"""
|
438 |
+
The character delimiting individual cells in the CSV data.
|
439 |
+
"""
|
440 |
+
return chr(deref(self.options).delimiter)
|
441 |
+
|
442 |
+
@delimiter.setter
|
443 |
+
def delimiter(self, value):
|
444 |
+
deref(self.options).delimiter = _single_char(value)
|
445 |
+
|
446 |
+
@property
|
447 |
+
def quote_char(self):
|
448 |
+
"""
|
449 |
+
The character used optionally for quoting CSV values
|
450 |
+
(False if quoting is not allowed).
|
451 |
+
"""
|
452 |
+
if deref(self.options).quoting:
|
453 |
+
return chr(deref(self.options).quote_char)
|
454 |
+
else:
|
455 |
+
return False
|
456 |
+
|
457 |
+
@quote_char.setter
|
458 |
+
def quote_char(self, value):
|
459 |
+
if value is False:
|
460 |
+
deref(self.options).quoting = False
|
461 |
+
else:
|
462 |
+
deref(self.options).quote_char = _single_char(value)
|
463 |
+
deref(self.options).quoting = True
|
464 |
+
|
465 |
+
@property
|
466 |
+
def double_quote(self):
|
467 |
+
"""
|
468 |
+
Whether two quotes in a quoted CSV value denote a single quote
|
469 |
+
in the data.
|
470 |
+
"""
|
471 |
+
return deref(self.options).double_quote
|
472 |
+
|
473 |
+
@double_quote.setter
|
474 |
+
def double_quote(self, value):
|
475 |
+
deref(self.options).double_quote = value
|
476 |
+
|
477 |
+
@property
|
478 |
+
def escape_char(self):
|
479 |
+
"""
|
480 |
+
The character used optionally for escaping special characters
|
481 |
+
(False if escaping is not allowed).
|
482 |
+
"""
|
483 |
+
if deref(self.options).escaping:
|
484 |
+
return chr(deref(self.options).escape_char)
|
485 |
+
else:
|
486 |
+
return False
|
487 |
+
|
488 |
+
@escape_char.setter
|
489 |
+
def escape_char(self, value):
|
490 |
+
if value is False:
|
491 |
+
deref(self.options).escaping = False
|
492 |
+
else:
|
493 |
+
deref(self.options).escape_char = _single_char(value)
|
494 |
+
deref(self.options).escaping = True
|
495 |
+
|
496 |
+
@property
|
497 |
+
def newlines_in_values(self):
|
498 |
+
"""
|
499 |
+
Whether newline characters are allowed in CSV values.
|
500 |
+
Setting this to True reduces the performance of multi-threaded
|
501 |
+
CSV reading.
|
502 |
+
"""
|
503 |
+
return deref(self.options).newlines_in_values
|
504 |
+
|
505 |
+
@newlines_in_values.setter
|
506 |
+
def newlines_in_values(self, value):
|
507 |
+
deref(self.options).newlines_in_values = value
|
508 |
+
|
509 |
+
@property
|
510 |
+
def ignore_empty_lines(self):
|
511 |
+
"""
|
512 |
+
Whether empty lines are ignored in CSV input.
|
513 |
+
If False, an empty line is interpreted as containing a single empty
|
514 |
+
value (assuming a one-column CSV file).
|
515 |
+
"""
|
516 |
+
return deref(self.options).ignore_empty_lines
|
517 |
+
|
518 |
+
@property
|
519 |
+
def invalid_row_handler(self):
|
520 |
+
"""
|
521 |
+
Optional handler for invalid rows.
|
522 |
+
|
523 |
+
If not None, this object is called for each CSV row that fails
|
524 |
+
parsing (because of a mismatching number of columns).
|
525 |
+
It should accept a single InvalidRow argument and return either
|
526 |
+
"skip" or "error" depending on the desired outcome.
|
527 |
+
"""
|
528 |
+
return self._invalid_row_handler
|
529 |
+
|
530 |
+
@invalid_row_handler.setter
|
531 |
+
def invalid_row_handler(self, value):
|
532 |
+
if value is not None and not callable(value):
|
533 |
+
raise TypeError("Expected callable or None, "
|
534 |
+
f"got instance of {type(value)!r}")
|
535 |
+
self._invalid_row_handler = value
|
536 |
+
deref(self.options).invalid_row_handler = MakeInvalidRowHandler(
|
537 |
+
<function[PyInvalidRowCallback]> &_handle_invalid_row, value)
|
538 |
+
|
539 |
+
@ignore_empty_lines.setter
|
540 |
+
def ignore_empty_lines(self, value):
|
541 |
+
deref(self.options).ignore_empty_lines = value
|
542 |
+
|
543 |
+
def validate(self):
|
544 |
+
check_status(deref(self.options).Validate())
|
545 |
+
|
546 |
+
def equals(self, ParseOptions other):
|
547 |
+
"""
|
548 |
+
Parameters
|
549 |
+
----------
|
550 |
+
other : pyarrow.csv.ParseOptions
|
551 |
+
|
552 |
+
Returns
|
553 |
+
-------
|
554 |
+
bool
|
555 |
+
"""
|
556 |
+
return (
|
557 |
+
self.delimiter == other.delimiter and
|
558 |
+
self.quote_char == other.quote_char and
|
559 |
+
self.double_quote == other.double_quote and
|
560 |
+
self.escape_char == other.escape_char and
|
561 |
+
self.newlines_in_values == other.newlines_in_values and
|
562 |
+
self.ignore_empty_lines == other.ignore_empty_lines and
|
563 |
+
self._invalid_row_handler == other._invalid_row_handler
|
564 |
+
)
|
565 |
+
|
566 |
+
@staticmethod
|
567 |
+
cdef ParseOptions wrap(CCSVParseOptions options):
|
568 |
+
out = ParseOptions()
|
569 |
+
out.options.reset(new CCSVParseOptions(move(options)))
|
570 |
+
return out
|
571 |
+
|
572 |
+
def __getstate__(self):
|
573 |
+
return (self.delimiter, self.quote_char, self.double_quote,
|
574 |
+
self.escape_char, self.newlines_in_values,
|
575 |
+
self.ignore_empty_lines, self.invalid_row_handler)
|
576 |
+
|
577 |
+
def __setstate__(self, state):
|
578 |
+
(self.delimiter, self.quote_char, self.double_quote,
|
579 |
+
self.escape_char, self.newlines_in_values,
|
580 |
+
self.ignore_empty_lines, self.invalid_row_handler) = state
|
581 |
+
|
582 |
+
def __eq__(self, other):
|
583 |
+
try:
|
584 |
+
return self.equals(other)
|
585 |
+
except TypeError:
|
586 |
+
return False
|
587 |
+
|
588 |
+
|
589 |
+
cdef class _ISO8601(_Weakrefable):
|
590 |
+
"""
|
591 |
+
A special object indicating ISO-8601 parsing.
|
592 |
+
"""
|
593 |
+
__slots__ = ()
|
594 |
+
|
595 |
+
def __str__(self):
|
596 |
+
return 'ISO8601'
|
597 |
+
|
598 |
+
def __eq__(self, other):
|
599 |
+
return isinstance(other, _ISO8601)
|
600 |
+
|
601 |
+
|
602 |
+
ISO8601 = _ISO8601()
|
603 |
+
|
604 |
+
|
605 |
+
cdef class ConvertOptions(_Weakrefable):
|
606 |
+
"""
|
607 |
+
Options for converting CSV data.
|
608 |
+
|
609 |
+
Parameters
|
610 |
+
----------
|
611 |
+
check_utf8 : bool, optional (default True)
|
612 |
+
Whether to check UTF8 validity of string columns.
|
613 |
+
column_types : pyarrow.Schema or dict, optional
|
614 |
+
Explicitly map column names to column types. Passing this argument
|
615 |
+
disables type inference on the defined columns.
|
616 |
+
null_values : list, optional
|
617 |
+
A sequence of strings that denote nulls in the data
|
618 |
+
(defaults are appropriate in most cases). Note that by default,
|
619 |
+
string columns are not checked for null values. To enable
|
620 |
+
null checking for those, specify ``strings_can_be_null=True``.
|
621 |
+
true_values : list, optional
|
622 |
+
A sequence of strings that denote true booleans in the data
|
623 |
+
(defaults are appropriate in most cases).
|
624 |
+
false_values : list, optional
|
625 |
+
A sequence of strings that denote false booleans in the data
|
626 |
+
(defaults are appropriate in most cases).
|
627 |
+
decimal_point : 1-character string, optional (default '.')
|
628 |
+
The character used as decimal point in floating-point and decimal
|
629 |
+
data.
|
630 |
+
strings_can_be_null : bool, optional (default False)
|
631 |
+
Whether string / binary columns can have null values.
|
632 |
+
If true, then strings in null_values are considered null for
|
633 |
+
string columns.
|
634 |
+
If false, then all strings are valid string values.
|
635 |
+
quoted_strings_can_be_null : bool, optional (default True)
|
636 |
+
Whether quoted values can be null.
|
637 |
+
If true, then strings in "null_values" are also considered null
|
638 |
+
when they appear quoted in the CSV file. Otherwise, quoted values
|
639 |
+
are never considered null.
|
640 |
+
include_columns : list, optional
|
641 |
+
The names of columns to include in the Table.
|
642 |
+
If empty, the Table will include all columns from the CSV file.
|
643 |
+
If not empty, only these columns will be included, in this order.
|
644 |
+
include_missing_columns : bool, optional (default False)
|
645 |
+
If false, columns in `include_columns` but not in the CSV file will
|
646 |
+
error out.
|
647 |
+
If true, columns in `include_columns` but not in the CSV file will
|
648 |
+
produce a column of nulls (whose type is selected using
|
649 |
+
`column_types`, or null by default).
|
650 |
+
This option is ignored if `include_columns` is empty.
|
651 |
+
auto_dict_encode : bool, optional (default False)
|
652 |
+
Whether to try to automatically dict-encode string / binary data.
|
653 |
+
If true, then when type inference detects a string or binary column,
|
654 |
+
it it dict-encoded up to `auto_dict_max_cardinality` distinct values
|
655 |
+
(per chunk), after which it switches to regular encoding.
|
656 |
+
This setting is ignored for non-inferred columns (those in
|
657 |
+
`column_types`).
|
658 |
+
auto_dict_max_cardinality : int, optional
|
659 |
+
The maximum dictionary cardinality for `auto_dict_encode`.
|
660 |
+
This value is per chunk.
|
661 |
+
timestamp_parsers : list, optional
|
662 |
+
A sequence of strptime()-compatible format strings, tried in order
|
663 |
+
when attempting to infer or convert timestamp values (the special
|
664 |
+
value ISO8601() can also be given). By default, a fast built-in
|
665 |
+
ISO-8601 parser is used.
|
666 |
+
|
667 |
+
Examples
|
668 |
+
--------
|
669 |
+
|
670 |
+
Defining an example data:
|
671 |
+
|
672 |
+
>>> import io
|
673 |
+
>>> s = (
|
674 |
+
... "animals,n_legs,entry,fast\\n"
|
675 |
+
... "Flamingo,2,01/03/2022,Yes\\n"
|
676 |
+
... "Horse,4,02/03/2022,Yes\\n"
|
677 |
+
... "Brittle stars,5,03/03/2022,No\\n"
|
678 |
+
... "Centipede,100,04/03/2022,No\\n"
|
679 |
+
... ",6,05/03/2022,"
|
680 |
+
... )
|
681 |
+
>>> print(s)
|
682 |
+
animals,n_legs,entry,fast
|
683 |
+
Flamingo,2,01/03/2022,Yes
|
684 |
+
Horse,4,02/03/2022,Yes
|
685 |
+
Brittle stars,5,03/03/2022,No
|
686 |
+
Centipede,100,04/03/2022,No
|
687 |
+
,6,05/03/2022,
|
688 |
+
|
689 |
+
Change the type of a column:
|
690 |
+
|
691 |
+
>>> import pyarrow as pa
|
692 |
+
>>> from pyarrow import csv
|
693 |
+
>>> convert_options = csv.ConvertOptions(column_types={"n_legs": pa.float64()})
|
694 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
695 |
+
pyarrow.Table
|
696 |
+
animals: string
|
697 |
+
n_legs: double
|
698 |
+
entry: string
|
699 |
+
fast: string
|
700 |
+
----
|
701 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",""]]
|
702 |
+
n_legs: [[2,4,5,100,6]]
|
703 |
+
entry: [["01/03/2022","02/03/2022","03/03/2022","04/03/2022","05/03/2022"]]
|
704 |
+
fast: [["Yes","Yes","No","No",""]]
|
705 |
+
|
706 |
+
Define a date parsing format to get a timestamp type column
|
707 |
+
(in case dates are not in ISO format and not converted by default):
|
708 |
+
|
709 |
+
>>> convert_options = csv.ConvertOptions(
|
710 |
+
... timestamp_parsers=["%m/%d/%Y", "%m-%d-%Y"])
|
711 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
712 |
+
pyarrow.Table
|
713 |
+
animals: string
|
714 |
+
n_legs: int64
|
715 |
+
entry: timestamp[s]
|
716 |
+
fast: string
|
717 |
+
----
|
718 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",""]]
|
719 |
+
n_legs: [[2,4,5,100,6]]
|
720 |
+
entry: [[2022-01-03 00:00:00,2022-02-03 00:00:00,2022-03-03 00:00:00,2022-04-03 00:00:00,2022-05-03 00:00:00]]
|
721 |
+
fast: [["Yes","Yes","No","No",""]]
|
722 |
+
|
723 |
+
Specify a subset of columns to be read:
|
724 |
+
|
725 |
+
>>> convert_options = csv.ConvertOptions(
|
726 |
+
... include_columns=["animals", "n_legs"])
|
727 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
728 |
+
pyarrow.Table
|
729 |
+
animals: string
|
730 |
+
n_legs: int64
|
731 |
+
----
|
732 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",""]]
|
733 |
+
n_legs: [[2,4,5,100,6]]
|
734 |
+
|
735 |
+
List additional column to be included as a null typed column:
|
736 |
+
|
737 |
+
>>> convert_options = csv.ConvertOptions(
|
738 |
+
... include_columns=["animals", "n_legs", "location"],
|
739 |
+
... include_missing_columns=True)
|
740 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
741 |
+
pyarrow.Table
|
742 |
+
animals: string
|
743 |
+
n_legs: int64
|
744 |
+
location: null
|
745 |
+
----
|
746 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",""]]
|
747 |
+
n_legs: [[2,4,5,100,6]]
|
748 |
+
location: [5 nulls]
|
749 |
+
|
750 |
+
Define columns as dictionary type (by default only the
|
751 |
+
string/binary columns are dictionary encoded):
|
752 |
+
|
753 |
+
>>> convert_options = csv.ConvertOptions(
|
754 |
+
... timestamp_parsers=["%m/%d/%Y", "%m-%d-%Y"],
|
755 |
+
... auto_dict_encode=True)
|
756 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
757 |
+
pyarrow.Table
|
758 |
+
animals: dictionary<values=string, indices=int32, ordered=0>
|
759 |
+
n_legs: int64
|
760 |
+
entry: timestamp[s]
|
761 |
+
fast: dictionary<values=string, indices=int32, ordered=0>
|
762 |
+
----
|
763 |
+
animals: [ -- dictionary:
|
764 |
+
["Flamingo","Horse","Brittle stars","Centipede",""] -- indices:
|
765 |
+
[0,1,2,3,4]]
|
766 |
+
n_legs: [[2,4,5,100,6]]
|
767 |
+
entry: [[2022-01-03 00:00:00,2022-02-03 00:00:00,2022-03-03 00:00:00,2022-04-03 00:00:00,2022-05-03 00:00:00]]
|
768 |
+
fast: [ -- dictionary:
|
769 |
+
["Yes","No",""] -- indices:
|
770 |
+
[0,0,1,1,2]]
|
771 |
+
|
772 |
+
Set upper limit for the number of categories. If the categories
|
773 |
+
is more than the limit, the conversion to dictionary will not
|
774 |
+
happen:
|
775 |
+
|
776 |
+
>>> convert_options = csv.ConvertOptions(
|
777 |
+
... include_columns=["animals"],
|
778 |
+
... auto_dict_encode=True,
|
779 |
+
... auto_dict_max_cardinality=2)
|
780 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
781 |
+
pyarrow.Table
|
782 |
+
animals: string
|
783 |
+
----
|
784 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",""]]
|
785 |
+
|
786 |
+
Set empty strings to missing values:
|
787 |
+
|
788 |
+
>>> convert_options = csv.ConvertOptions(include_columns=["animals", "n_legs"],
|
789 |
+
... strings_can_be_null=True)
|
790 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
791 |
+
pyarrow.Table
|
792 |
+
animals: string
|
793 |
+
n_legs: int64
|
794 |
+
----
|
795 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede",null]]
|
796 |
+
n_legs: [[2,4,5,100,6]]
|
797 |
+
|
798 |
+
Define values to be True and False when converting a column
|
799 |
+
into a bool type:
|
800 |
+
|
801 |
+
>>> convert_options = csv.ConvertOptions(
|
802 |
+
... include_columns=["fast"],
|
803 |
+
... false_values=["No"],
|
804 |
+
... true_values=["Yes"])
|
805 |
+
>>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options)
|
806 |
+
pyarrow.Table
|
807 |
+
fast: bool
|
808 |
+
----
|
809 |
+
fast: [[true,true,false,false,null]]
|
810 |
+
"""
|
811 |
+
|
812 |
+
# Avoid mistakingly creating attributes
|
813 |
+
__slots__ = ()
|
814 |
+
|
815 |
+
def __cinit__(self, *argw, **kwargs):
|
816 |
+
self.options.reset(
|
817 |
+
new CCSVConvertOptions(CCSVConvertOptions.Defaults()))
|
818 |
+
|
819 |
+
def __init__(self, *, check_utf8=None, column_types=None, null_values=None,
|
820 |
+
true_values=None, false_values=None, decimal_point=None,
|
821 |
+
strings_can_be_null=None, quoted_strings_can_be_null=None,
|
822 |
+
include_columns=None, include_missing_columns=None,
|
823 |
+
auto_dict_encode=None, auto_dict_max_cardinality=None,
|
824 |
+
timestamp_parsers=None):
|
825 |
+
if check_utf8 is not None:
|
826 |
+
self.check_utf8 = check_utf8
|
827 |
+
if column_types is not None:
|
828 |
+
self.column_types = column_types
|
829 |
+
if null_values is not None:
|
830 |
+
self.null_values = null_values
|
831 |
+
if true_values is not None:
|
832 |
+
self.true_values = true_values
|
833 |
+
if false_values is not None:
|
834 |
+
self.false_values = false_values
|
835 |
+
if decimal_point is not None:
|
836 |
+
self.decimal_point = decimal_point
|
837 |
+
if strings_can_be_null is not None:
|
838 |
+
self.strings_can_be_null = strings_can_be_null
|
839 |
+
if quoted_strings_can_be_null is not None:
|
840 |
+
self.quoted_strings_can_be_null = quoted_strings_can_be_null
|
841 |
+
if include_columns is not None:
|
842 |
+
self.include_columns = include_columns
|
843 |
+
if include_missing_columns is not None:
|
844 |
+
self.include_missing_columns = include_missing_columns
|
845 |
+
if auto_dict_encode is not None:
|
846 |
+
self.auto_dict_encode = auto_dict_encode
|
847 |
+
if auto_dict_max_cardinality is not None:
|
848 |
+
self.auto_dict_max_cardinality = auto_dict_max_cardinality
|
849 |
+
if timestamp_parsers is not None:
|
850 |
+
self.timestamp_parsers = timestamp_parsers
|
851 |
+
|
852 |
+
@property
|
853 |
+
def check_utf8(self):
|
854 |
+
"""
|
855 |
+
Whether to check UTF8 validity of string columns.
|
856 |
+
"""
|
857 |
+
return deref(self.options).check_utf8
|
858 |
+
|
859 |
+
@check_utf8.setter
|
860 |
+
def check_utf8(self, value):
|
861 |
+
deref(self.options).check_utf8 = value
|
862 |
+
|
863 |
+
@property
|
864 |
+
def strings_can_be_null(self):
|
865 |
+
"""
|
866 |
+
Whether string / binary columns can have null values.
|
867 |
+
"""
|
868 |
+
return deref(self.options).strings_can_be_null
|
869 |
+
|
870 |
+
@strings_can_be_null.setter
|
871 |
+
def strings_can_be_null(self, value):
|
872 |
+
deref(self.options).strings_can_be_null = value
|
873 |
+
|
874 |
+
@property
|
875 |
+
def quoted_strings_can_be_null(self):
|
876 |
+
"""
|
877 |
+
Whether quoted values can be null.
|
878 |
+
"""
|
879 |
+
return deref(self.options).quoted_strings_can_be_null
|
880 |
+
|
881 |
+
@quoted_strings_can_be_null.setter
|
882 |
+
def quoted_strings_can_be_null(self, value):
|
883 |
+
deref(self.options).quoted_strings_can_be_null = value
|
884 |
+
|
885 |
+
@property
|
886 |
+
def column_types(self):
|
887 |
+
"""
|
888 |
+
Explicitly map column names to column types.
|
889 |
+
"""
|
890 |
+
d = {frombytes(item.first): pyarrow_wrap_data_type(item.second)
|
891 |
+
for item in deref(self.options).column_types}
|
892 |
+
return d
|
893 |
+
|
894 |
+
@column_types.setter
|
895 |
+
def column_types(self, value):
|
896 |
+
cdef:
|
897 |
+
shared_ptr[CDataType] typ
|
898 |
+
|
899 |
+
if isinstance(value, Mapping):
|
900 |
+
value = value.items()
|
901 |
+
|
902 |
+
deref(self.options).column_types.clear()
|
903 |
+
for item in value:
|
904 |
+
if isinstance(item, Field):
|
905 |
+
k = item.name
|
906 |
+
v = item.type
|
907 |
+
else:
|
908 |
+
k, v = item
|
909 |
+
typ = pyarrow_unwrap_data_type(ensure_type(v))
|
910 |
+
assert typ != NULL
|
911 |
+
deref(self.options).column_types[tobytes(k)] = typ
|
912 |
+
|
913 |
+
@property
|
914 |
+
def null_values(self):
|
915 |
+
"""
|
916 |
+
A sequence of strings that denote nulls in the data.
|
917 |
+
"""
|
918 |
+
return [frombytes(x) for x in deref(self.options).null_values]
|
919 |
+
|
920 |
+
@null_values.setter
|
921 |
+
def null_values(self, value):
|
922 |
+
deref(self.options).null_values = [tobytes(x) for x in value]
|
923 |
+
|
924 |
+
@property
|
925 |
+
def true_values(self):
|
926 |
+
"""
|
927 |
+
A sequence of strings that denote true booleans in the data.
|
928 |
+
"""
|
929 |
+
return [frombytes(x) for x in deref(self.options).true_values]
|
930 |
+
|
931 |
+
@true_values.setter
|
932 |
+
def true_values(self, value):
|
933 |
+
deref(self.options).true_values = [tobytes(x) for x in value]
|
934 |
+
|
935 |
+
@property
|
936 |
+
def false_values(self):
|
937 |
+
"""
|
938 |
+
A sequence of strings that denote false booleans in the data.
|
939 |
+
"""
|
940 |
+
return [frombytes(x) for x in deref(self.options).false_values]
|
941 |
+
|
942 |
+
@false_values.setter
|
943 |
+
def false_values(self, value):
|
944 |
+
deref(self.options).false_values = [tobytes(x) for x in value]
|
945 |
+
|
946 |
+
@property
|
947 |
+
def decimal_point(self):
|
948 |
+
"""
|
949 |
+
The character used as decimal point in floating-point and decimal
|
950 |
+
data.
|
951 |
+
"""
|
952 |
+
return chr(deref(self.options).decimal_point)
|
953 |
+
|
954 |
+
@decimal_point.setter
|
955 |
+
def decimal_point(self, value):
|
956 |
+
deref(self.options).decimal_point = _single_char(value)
|
957 |
+
|
958 |
+
@property
|
959 |
+
def auto_dict_encode(self):
|
960 |
+
"""
|
961 |
+
Whether to try to automatically dict-encode string / binary data.
|
962 |
+
"""
|
963 |
+
return deref(self.options).auto_dict_encode
|
964 |
+
|
965 |
+
@auto_dict_encode.setter
|
966 |
+
def auto_dict_encode(self, value):
|
967 |
+
deref(self.options).auto_dict_encode = value
|
968 |
+
|
969 |
+
@property
|
970 |
+
def auto_dict_max_cardinality(self):
|
971 |
+
"""
|
972 |
+
The maximum dictionary cardinality for `auto_dict_encode`.
|
973 |
+
|
974 |
+
This value is per chunk.
|
975 |
+
"""
|
976 |
+
return deref(self.options).auto_dict_max_cardinality
|
977 |
+
|
978 |
+
@auto_dict_max_cardinality.setter
|
979 |
+
def auto_dict_max_cardinality(self, value):
|
980 |
+
deref(self.options).auto_dict_max_cardinality = value
|
981 |
+
|
982 |
+
@property
|
983 |
+
def include_columns(self):
|
984 |
+
"""
|
985 |
+
The names of columns to include in the Table.
|
986 |
+
|
987 |
+
If empty, the Table will include all columns from the CSV file.
|
988 |
+
If not empty, only these columns will be included, in this order.
|
989 |
+
"""
|
990 |
+
return [frombytes(s) for s in deref(self.options).include_columns]
|
991 |
+
|
992 |
+
@include_columns.setter
|
993 |
+
def include_columns(self, value):
|
994 |
+
deref(self.options).include_columns.clear()
|
995 |
+
for item in value:
|
996 |
+
deref(self.options).include_columns.push_back(tobytes(item))
|
997 |
+
|
998 |
+
@property
|
999 |
+
def include_missing_columns(self):
|
1000 |
+
"""
|
1001 |
+
If false, columns in `include_columns` but not in the CSV file will
|
1002 |
+
error out.
|
1003 |
+
If true, columns in `include_columns` but not in the CSV file will
|
1004 |
+
produce a null column (whose type is selected using `column_types`,
|
1005 |
+
or null by default).
|
1006 |
+
This option is ignored if `include_columns` is empty.
|
1007 |
+
"""
|
1008 |
+
return deref(self.options).include_missing_columns
|
1009 |
+
|
1010 |
+
@include_missing_columns.setter
|
1011 |
+
def include_missing_columns(self, value):
|
1012 |
+
deref(self.options).include_missing_columns = value
|
1013 |
+
|
1014 |
+
@property
|
1015 |
+
def timestamp_parsers(self):
|
1016 |
+
"""
|
1017 |
+
A sequence of strptime()-compatible format strings, tried in order
|
1018 |
+
when attempting to infer or convert timestamp values (the special
|
1019 |
+
value ISO8601() can also be given). By default, a fast built-in
|
1020 |
+
ISO-8601 parser is used.
|
1021 |
+
"""
|
1022 |
+
cdef:
|
1023 |
+
shared_ptr[CTimestampParser] c_parser
|
1024 |
+
c_string kind
|
1025 |
+
|
1026 |
+
parsers = []
|
1027 |
+
for c_parser in deref(self.options).timestamp_parsers:
|
1028 |
+
kind = deref(c_parser).kind()
|
1029 |
+
if kind == b'strptime':
|
1030 |
+
parsers.append(frombytes(deref(c_parser).format()))
|
1031 |
+
else:
|
1032 |
+
assert kind == b'iso8601'
|
1033 |
+
parsers.append(ISO8601)
|
1034 |
+
|
1035 |
+
return parsers
|
1036 |
+
|
1037 |
+
@timestamp_parsers.setter
|
1038 |
+
def timestamp_parsers(self, value):
|
1039 |
+
cdef:
|
1040 |
+
vector[shared_ptr[CTimestampParser]] c_parsers
|
1041 |
+
|
1042 |
+
for v in value:
|
1043 |
+
if isinstance(v, str):
|
1044 |
+
c_parsers.push_back(CTimestampParser.MakeStrptime(tobytes(v)))
|
1045 |
+
elif v == ISO8601:
|
1046 |
+
c_parsers.push_back(CTimestampParser.MakeISO8601())
|
1047 |
+
else:
|
1048 |
+
raise TypeError("Expected list of str or ISO8601 objects")
|
1049 |
+
|
1050 |
+
deref(self.options).timestamp_parsers = move(c_parsers)
|
1051 |
+
|
1052 |
+
@staticmethod
|
1053 |
+
cdef ConvertOptions wrap(CCSVConvertOptions options):
|
1054 |
+
out = ConvertOptions()
|
1055 |
+
out.options.reset(new CCSVConvertOptions(move(options)))
|
1056 |
+
return out
|
1057 |
+
|
1058 |
+
def validate(self):
|
1059 |
+
check_status(deref(self.options).Validate())
|
1060 |
+
|
1061 |
+
def equals(self, ConvertOptions other):
|
1062 |
+
"""
|
1063 |
+
Parameters
|
1064 |
+
----------
|
1065 |
+
other : pyarrow.csv.ConvertOptions
|
1066 |
+
|
1067 |
+
Returns
|
1068 |
+
-------
|
1069 |
+
bool
|
1070 |
+
"""
|
1071 |
+
return (
|
1072 |
+
self.check_utf8 == other.check_utf8 and
|
1073 |
+
self.column_types == other.column_types and
|
1074 |
+
self.null_values == other.null_values and
|
1075 |
+
self.true_values == other.true_values and
|
1076 |
+
self.false_values == other.false_values and
|
1077 |
+
self.decimal_point == other.decimal_point and
|
1078 |
+
self.timestamp_parsers == other.timestamp_parsers and
|
1079 |
+
self.strings_can_be_null == other.strings_can_be_null and
|
1080 |
+
self.quoted_strings_can_be_null ==
|
1081 |
+
other.quoted_strings_can_be_null and
|
1082 |
+
self.auto_dict_encode == other.auto_dict_encode and
|
1083 |
+
self.auto_dict_max_cardinality ==
|
1084 |
+
other.auto_dict_max_cardinality and
|
1085 |
+
self.include_columns == other.include_columns and
|
1086 |
+
self.include_missing_columns == other.include_missing_columns
|
1087 |
+
)
|
1088 |
+
|
1089 |
+
def __getstate__(self):
|
1090 |
+
return (self.check_utf8, self.column_types, self.null_values,
|
1091 |
+
self.true_values, self.false_values, self.decimal_point,
|
1092 |
+
self.timestamp_parsers, self.strings_can_be_null,
|
1093 |
+
self.quoted_strings_can_be_null, self.auto_dict_encode,
|
1094 |
+
self.auto_dict_max_cardinality, self.include_columns,
|
1095 |
+
self.include_missing_columns)
|
1096 |
+
|
1097 |
+
def __setstate__(self, state):
|
1098 |
+
(self.check_utf8, self.column_types, self.null_values,
|
1099 |
+
self.true_values, self.false_values, self.decimal_point,
|
1100 |
+
self.timestamp_parsers, self.strings_can_be_null,
|
1101 |
+
self.quoted_strings_can_be_null, self.auto_dict_encode,
|
1102 |
+
self.auto_dict_max_cardinality, self.include_columns,
|
1103 |
+
self.include_missing_columns) = state
|
1104 |
+
|
1105 |
+
def __eq__(self, other):
|
1106 |
+
try:
|
1107 |
+
return self.equals(other)
|
1108 |
+
except TypeError:
|
1109 |
+
return False
|
1110 |
+
|
1111 |
+
|
1112 |
+
cdef _get_reader(input_file, ReadOptions read_options,
|
1113 |
+
shared_ptr[CInputStream]* out):
|
1114 |
+
use_memory_map = False
|
1115 |
+
get_input_stream(input_file, use_memory_map, out)
|
1116 |
+
if read_options is not None:
|
1117 |
+
out[0] = native_transcoding_input_stream(out[0],
|
1118 |
+
read_options.encoding,
|
1119 |
+
'utf8')
|
1120 |
+
|
1121 |
+
|
1122 |
+
cdef _get_read_options(ReadOptions read_options, CCSVReadOptions* out):
|
1123 |
+
if read_options is None:
|
1124 |
+
out[0] = CCSVReadOptions.Defaults()
|
1125 |
+
else:
|
1126 |
+
out[0] = deref(read_options.options)
|
1127 |
+
|
1128 |
+
|
1129 |
+
cdef _get_parse_options(ParseOptions parse_options, CCSVParseOptions* out):
|
1130 |
+
if parse_options is None:
|
1131 |
+
out[0] = CCSVParseOptions.Defaults()
|
1132 |
+
else:
|
1133 |
+
out[0] = deref(parse_options.options)
|
1134 |
+
|
1135 |
+
|
1136 |
+
cdef _get_convert_options(ConvertOptions convert_options,
|
1137 |
+
CCSVConvertOptions* out):
|
1138 |
+
if convert_options is None:
|
1139 |
+
out[0] = CCSVConvertOptions.Defaults()
|
1140 |
+
else:
|
1141 |
+
out[0] = deref(convert_options.options)
|
1142 |
+
|
1143 |
+
|
1144 |
+
cdef class CSVStreamingReader(RecordBatchReader):
|
1145 |
+
"""An object that reads record batches incrementally from a CSV file.
|
1146 |
+
|
1147 |
+
Should not be instantiated directly by user code.
|
1148 |
+
"""
|
1149 |
+
cdef readonly:
|
1150 |
+
Schema schema
|
1151 |
+
|
1152 |
+
def __init__(self):
|
1153 |
+
raise TypeError("Do not call {}'s constructor directly, "
|
1154 |
+
"use pyarrow.csv.open_csv() instead."
|
1155 |
+
.format(self.__class__.__name__))
|
1156 |
+
|
1157 |
+
# Note about cancellation: we cannot create a SignalStopHandler
|
1158 |
+
# by default here, as several CSVStreamingReader instances may be
|
1159 |
+
# created (including by the same thread). Handling cancellation
|
1160 |
+
# would require having the user pass the SignalStopHandler.
|
1161 |
+
# (in addition to solving ARROW-11853)
|
1162 |
+
|
1163 |
+
cdef _open(self, shared_ptr[CInputStream] stream,
|
1164 |
+
CCSVReadOptions c_read_options,
|
1165 |
+
CCSVParseOptions c_parse_options,
|
1166 |
+
CCSVConvertOptions c_convert_options,
|
1167 |
+
MemoryPool memory_pool):
|
1168 |
+
cdef:
|
1169 |
+
shared_ptr[CSchema] c_schema
|
1170 |
+
CIOContext io_context
|
1171 |
+
|
1172 |
+
io_context = CIOContext(maybe_unbox_memory_pool(memory_pool))
|
1173 |
+
|
1174 |
+
with nogil:
|
1175 |
+
self.reader = <shared_ptr[CRecordBatchReader]> GetResultValue(
|
1176 |
+
CCSVStreamingReader.Make(
|
1177 |
+
io_context, stream,
|
1178 |
+
move(c_read_options), move(c_parse_options),
|
1179 |
+
move(c_convert_options)))
|
1180 |
+
c_schema = self.reader.get().schema()
|
1181 |
+
|
1182 |
+
self.schema = pyarrow_wrap_schema(c_schema)
|
1183 |
+
|
1184 |
+
|
1185 |
+
def read_csv(input_file, read_options=None, parse_options=None,
|
1186 |
+
convert_options=None, MemoryPool memory_pool=None):
|
1187 |
+
"""
|
1188 |
+
Read a Table from a stream of CSV data.
|
1189 |
+
|
1190 |
+
Parameters
|
1191 |
+
----------
|
1192 |
+
input_file : string, path or file-like object
|
1193 |
+
The location of CSV data. If a string or path, and if it ends
|
1194 |
+
with a recognized compressed file extension (e.g. ".gz" or ".bz2"),
|
1195 |
+
the data is automatically decompressed when reading.
|
1196 |
+
read_options : pyarrow.csv.ReadOptions, optional
|
1197 |
+
Options for the CSV reader (see pyarrow.csv.ReadOptions constructor
|
1198 |
+
for defaults)
|
1199 |
+
parse_options : pyarrow.csv.ParseOptions, optional
|
1200 |
+
Options for the CSV parser
|
1201 |
+
(see pyarrow.csv.ParseOptions constructor for defaults)
|
1202 |
+
convert_options : pyarrow.csv.ConvertOptions, optional
|
1203 |
+
Options for converting CSV data
|
1204 |
+
(see pyarrow.csv.ConvertOptions constructor for defaults)
|
1205 |
+
memory_pool : MemoryPool, optional
|
1206 |
+
Pool to allocate Table memory from
|
1207 |
+
|
1208 |
+
Returns
|
1209 |
+
-------
|
1210 |
+
:class:`pyarrow.Table`
|
1211 |
+
Contents of the CSV file as a in-memory table.
|
1212 |
+
|
1213 |
+
Examples
|
1214 |
+
--------
|
1215 |
+
|
1216 |
+
Defining an example file from bytes object:
|
1217 |
+
|
1218 |
+
>>> import io
|
1219 |
+
>>> s = (
|
1220 |
+
... "animals,n_legs,entry\\n"
|
1221 |
+
... "Flamingo,2,2022-03-01\\n"
|
1222 |
+
... "Horse,4,2022-03-02\\n"
|
1223 |
+
... "Brittle stars,5,2022-03-03\\n"
|
1224 |
+
... "Centipede,100,2022-03-04"
|
1225 |
+
... )
|
1226 |
+
>>> print(s)
|
1227 |
+
animals,n_legs,entry
|
1228 |
+
Flamingo,2,2022-03-01
|
1229 |
+
Horse,4,2022-03-02
|
1230 |
+
Brittle stars,5,2022-03-03
|
1231 |
+
Centipede,100,2022-03-04
|
1232 |
+
>>> source = io.BytesIO(s.encode())
|
1233 |
+
|
1234 |
+
Reading from the file
|
1235 |
+
|
1236 |
+
>>> from pyarrow import csv
|
1237 |
+
>>> csv.read_csv(source)
|
1238 |
+
pyarrow.Table
|
1239 |
+
animals: string
|
1240 |
+
n_legs: int64
|
1241 |
+
entry: date32[day]
|
1242 |
+
----
|
1243 |
+
animals: [["Flamingo","Horse","Brittle stars","Centipede"]]
|
1244 |
+
n_legs: [[2,4,5,100]]
|
1245 |
+
entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]]
|
1246 |
+
"""
|
1247 |
+
cdef:
|
1248 |
+
shared_ptr[CInputStream] stream
|
1249 |
+
CCSVReadOptions c_read_options
|
1250 |
+
CCSVParseOptions c_parse_options
|
1251 |
+
CCSVConvertOptions c_convert_options
|
1252 |
+
CIOContext io_context
|
1253 |
+
SharedPtrNoGIL[CCSVReader] reader
|
1254 |
+
shared_ptr[CTable] table
|
1255 |
+
|
1256 |
+
_get_reader(input_file, read_options, &stream)
|
1257 |
+
_get_read_options(read_options, &c_read_options)
|
1258 |
+
_get_parse_options(parse_options, &c_parse_options)
|
1259 |
+
_get_convert_options(convert_options, &c_convert_options)
|
1260 |
+
|
1261 |
+
with SignalStopHandler() as stop_handler:
|
1262 |
+
io_context = CIOContext(
|
1263 |
+
maybe_unbox_memory_pool(memory_pool),
|
1264 |
+
(<StopToken> stop_handler.stop_token).stop_token)
|
1265 |
+
reader = GetResultValue(CCSVReader.Make(
|
1266 |
+
io_context, stream,
|
1267 |
+
c_read_options, c_parse_options, c_convert_options))
|
1268 |
+
|
1269 |
+
with nogil:
|
1270 |
+
table = GetResultValue(reader.get().Read())
|
1271 |
+
|
1272 |
+
return pyarrow_wrap_table(table)
|
1273 |
+
|
1274 |
+
|
1275 |
+
def open_csv(input_file, read_options=None, parse_options=None,
|
1276 |
+
convert_options=None, MemoryPool memory_pool=None):
|
1277 |
+
"""
|
1278 |
+
Open a streaming reader of CSV data.
|
1279 |
+
|
1280 |
+
Reading using this function is always single-threaded.
|
1281 |
+
|
1282 |
+
Parameters
|
1283 |
+
----------
|
1284 |
+
input_file : string, path or file-like object
|
1285 |
+
The location of CSV data. If a string or path, and if it ends
|
1286 |
+
with a recognized compressed file extension (e.g. ".gz" or ".bz2"),
|
1287 |
+
the data is automatically decompressed when reading.
|
1288 |
+
read_options : pyarrow.csv.ReadOptions, optional
|
1289 |
+
Options for the CSV reader (see pyarrow.csv.ReadOptions constructor
|
1290 |
+
for defaults)
|
1291 |
+
parse_options : pyarrow.csv.ParseOptions, optional
|
1292 |
+
Options for the CSV parser
|
1293 |
+
(see pyarrow.csv.ParseOptions constructor for defaults)
|
1294 |
+
convert_options : pyarrow.csv.ConvertOptions, optional
|
1295 |
+
Options for converting CSV data
|
1296 |
+
(see pyarrow.csv.ConvertOptions constructor for defaults)
|
1297 |
+
memory_pool : MemoryPool, optional
|
1298 |
+
Pool to allocate Table memory from
|
1299 |
+
|
1300 |
+
Returns
|
1301 |
+
-------
|
1302 |
+
:class:`pyarrow.csv.CSVStreamingReader`
|
1303 |
+
"""
|
1304 |
+
cdef:
|
1305 |
+
shared_ptr[CInputStream] stream
|
1306 |
+
CCSVReadOptions c_read_options
|
1307 |
+
CCSVParseOptions c_parse_options
|
1308 |
+
CCSVConvertOptions c_convert_options
|
1309 |
+
CSVStreamingReader reader
|
1310 |
+
|
1311 |
+
_get_reader(input_file, read_options, &stream)
|
1312 |
+
_get_read_options(read_options, &c_read_options)
|
1313 |
+
_get_parse_options(parse_options, &c_parse_options)
|
1314 |
+
_get_convert_options(convert_options, &c_convert_options)
|
1315 |
+
|
1316 |
+
reader = CSVStreamingReader.__new__(CSVStreamingReader)
|
1317 |
+
reader._open(stream, move(c_read_options), move(c_parse_options),
|
1318 |
+
move(c_convert_options), memory_pool)
|
1319 |
+
return reader
|
1320 |
+
|
1321 |
+
|
1322 |
+
def _raise_invalid_function_option(value, description, *,
|
1323 |
+
exception_class=ValueError):
|
1324 |
+
raise exception_class(f"\"{value}\" is not a valid {description}")
|
1325 |
+
|
1326 |
+
|
1327 |
+
cdef CQuotingStyle unwrap_quoting_style(quoting_style) except *:
|
1328 |
+
if quoting_style == "needed":
|
1329 |
+
return CQuotingStyle_Needed
|
1330 |
+
elif quoting_style == "all_valid":
|
1331 |
+
return CQuotingStyle_AllValid
|
1332 |
+
elif quoting_style == "none":
|
1333 |
+
return CQuotingStyle_None
|
1334 |
+
_raise_invalid_function_option(quoting_style, "quoting style")
|
1335 |
+
|
1336 |
+
|
1337 |
+
cdef wrap_quoting_style(quoting_style):
|
1338 |
+
if quoting_style == CQuotingStyle_Needed:
|
1339 |
+
return 'needed'
|
1340 |
+
elif quoting_style == CQuotingStyle_AllValid:
|
1341 |
+
return 'all_valid'
|
1342 |
+
elif quoting_style == CQuotingStyle_None:
|
1343 |
+
return 'none'
|
1344 |
+
|
1345 |
+
|
1346 |
+
cdef class WriteOptions(_Weakrefable):
|
1347 |
+
"""
|
1348 |
+
Options for writing CSV files.
|
1349 |
+
|
1350 |
+
Parameters
|
1351 |
+
----------
|
1352 |
+
include_header : bool, optional (default True)
|
1353 |
+
Whether to write an initial header line with column names
|
1354 |
+
batch_size : int, optional (default 1024)
|
1355 |
+
How many rows to process together when converting and writing
|
1356 |
+
CSV data
|
1357 |
+
delimiter : 1-character string, optional (default ",")
|
1358 |
+
The character delimiting individual cells in the CSV data.
|
1359 |
+
quoting_style : str, optional (default "needed")
|
1360 |
+
Whether to quote values, and if so, which quoting style to use.
|
1361 |
+
The following values are accepted:
|
1362 |
+
|
1363 |
+
- "needed" (default): only enclose values in quotes when needed.
|
1364 |
+
- "all_valid": enclose all valid values in quotes; nulls are not quoted.
|
1365 |
+
- "none": do not enclose any values in quotes; values containing
|
1366 |
+
special characters (such as quotes, cell delimiters or line endings)
|
1367 |
+
will raise an error.
|
1368 |
+
"""
|
1369 |
+
|
1370 |
+
# Avoid mistakingly creating attributes
|
1371 |
+
__slots__ = ()
|
1372 |
+
|
1373 |
+
def __init__(self, *, include_header=None, batch_size=None,
|
1374 |
+
delimiter=None, quoting_style=None):
|
1375 |
+
self.options.reset(new CCSVWriteOptions(CCSVWriteOptions.Defaults()))
|
1376 |
+
if include_header is not None:
|
1377 |
+
self.include_header = include_header
|
1378 |
+
if batch_size is not None:
|
1379 |
+
self.batch_size = batch_size
|
1380 |
+
if delimiter is not None:
|
1381 |
+
self.delimiter = delimiter
|
1382 |
+
if quoting_style is not None:
|
1383 |
+
self.quoting_style = quoting_style
|
1384 |
+
|
1385 |
+
@property
|
1386 |
+
def include_header(self):
|
1387 |
+
"""
|
1388 |
+
Whether to write an initial header line with column names.
|
1389 |
+
"""
|
1390 |
+
return deref(self.options).include_header
|
1391 |
+
|
1392 |
+
@include_header.setter
|
1393 |
+
def include_header(self, value):
|
1394 |
+
deref(self.options).include_header = value
|
1395 |
+
|
1396 |
+
@property
|
1397 |
+
def batch_size(self):
|
1398 |
+
"""
|
1399 |
+
How many rows to process together when converting and writing
|
1400 |
+
CSV data.
|
1401 |
+
"""
|
1402 |
+
return deref(self.options).batch_size
|
1403 |
+
|
1404 |
+
@batch_size.setter
|
1405 |
+
def batch_size(self, value):
|
1406 |
+
deref(self.options).batch_size = value
|
1407 |
+
|
1408 |
+
@property
|
1409 |
+
def delimiter(self):
|
1410 |
+
"""
|
1411 |
+
The character delimiting individual cells in the CSV data.
|
1412 |
+
"""
|
1413 |
+
return chr(deref(self.options).delimiter)
|
1414 |
+
|
1415 |
+
@delimiter.setter
|
1416 |
+
def delimiter(self, value):
|
1417 |
+
deref(self.options).delimiter = _single_char(value)
|
1418 |
+
|
1419 |
+
@property
|
1420 |
+
def quoting_style(self):
|
1421 |
+
"""
|
1422 |
+
Whether to quote values, and if so, which quoting style to use.
|
1423 |
+
The following values are accepted:
|
1424 |
+
|
1425 |
+
- "needed" (default): only enclose values in quotes when needed.
|
1426 |
+
- "all_valid": enclose all valid values in quotes; nulls are not quoted.
|
1427 |
+
- "none": do not enclose any values in quotes; values containing
|
1428 |
+
special characters (such as quotes, cell delimiters or line endings)
|
1429 |
+
will raise an error.
|
1430 |
+
"""
|
1431 |
+
return wrap_quoting_style(deref(self.options).quoting_style)
|
1432 |
+
|
1433 |
+
@quoting_style.setter
|
1434 |
+
def quoting_style(self, value):
|
1435 |
+
deref(self.options).quoting_style = unwrap_quoting_style(value)
|
1436 |
+
|
1437 |
+
@staticmethod
|
1438 |
+
cdef WriteOptions wrap(CCSVWriteOptions options):
|
1439 |
+
out = WriteOptions()
|
1440 |
+
out.options.reset(new CCSVWriteOptions(move(options)))
|
1441 |
+
return out
|
1442 |
+
|
1443 |
+
def validate(self):
|
1444 |
+
check_status(self.options.get().Validate())
|
1445 |
+
|
1446 |
+
|
1447 |
+
cdef _get_write_options(WriteOptions write_options, CCSVWriteOptions* out):
|
1448 |
+
if write_options is None:
|
1449 |
+
out[0] = CCSVWriteOptions.Defaults()
|
1450 |
+
else:
|
1451 |
+
out[0] = deref(write_options.options)
|
1452 |
+
|
1453 |
+
|
1454 |
+
def write_csv(data, output_file, write_options=None,
|
1455 |
+
MemoryPool memory_pool=None):
|
1456 |
+
"""
|
1457 |
+
Write record batch or table to a CSV file.
|
1458 |
+
|
1459 |
+
Parameters
|
1460 |
+
----------
|
1461 |
+
data : pyarrow.RecordBatch or pyarrow.Table
|
1462 |
+
The data to write.
|
1463 |
+
output_file : string, path, pyarrow.NativeFile, or file-like object
|
1464 |
+
The location where to write the CSV data.
|
1465 |
+
write_options : pyarrow.csv.WriteOptions
|
1466 |
+
Options to configure writing the CSV data.
|
1467 |
+
memory_pool : MemoryPool, optional
|
1468 |
+
Pool for temporary allocations.
|
1469 |
+
|
1470 |
+
Examples
|
1471 |
+
--------
|
1472 |
+
|
1473 |
+
>>> import pyarrow as pa
|
1474 |
+
>>> from pyarrow import csv
|
1475 |
+
|
1476 |
+
>>> legs = pa.array([2, 4, 5, 100])
|
1477 |
+
>>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"])
|
1478 |
+
>>> entry_date = pa.array(["01/03/2022", "02/03/2022",
|
1479 |
+
... "03/03/2022", "04/03/2022"])
|
1480 |
+
>>> table = pa.table([animals, legs, entry_date],
|
1481 |
+
... names=["animals", "n_legs", "entry"])
|
1482 |
+
|
1483 |
+
>>> csv.write_csv(table, "animals.csv")
|
1484 |
+
|
1485 |
+
>>> write_options = csv.WriteOptions(include_header=False)
|
1486 |
+
>>> csv.write_csv(table, "animals.csv", write_options=write_options)
|
1487 |
+
|
1488 |
+
>>> write_options = csv.WriteOptions(delimiter=";")
|
1489 |
+
>>> csv.write_csv(table, "animals.csv", write_options=write_options)
|
1490 |
+
"""
|
1491 |
+
cdef:
|
1492 |
+
shared_ptr[COutputStream] stream
|
1493 |
+
CCSVWriteOptions c_write_options
|
1494 |
+
CMemoryPool* c_memory_pool
|
1495 |
+
CRecordBatch* batch
|
1496 |
+
CTable* table
|
1497 |
+
_get_write_options(write_options, &c_write_options)
|
1498 |
+
|
1499 |
+
get_writer(output_file, &stream)
|
1500 |
+
c_memory_pool = maybe_unbox_memory_pool(memory_pool)
|
1501 |
+
c_write_options.io_context = CIOContext(c_memory_pool)
|
1502 |
+
if isinstance(data, RecordBatch):
|
1503 |
+
batch = pyarrow_unwrap_batch(data).get()
|
1504 |
+
with nogil:
|
1505 |
+
check_status(WriteCSV(deref(batch), c_write_options, stream.get()))
|
1506 |
+
elif isinstance(data, Table):
|
1507 |
+
table = pyarrow_unwrap_table(data).get()
|
1508 |
+
with nogil:
|
1509 |
+
check_status(WriteCSV(deref(table), c_write_options, stream.get()))
|
1510 |
+
else:
|
1511 |
+
raise TypeError(f"Expected Table or RecordBatch, got '{type(data)}'")
|
1512 |
+
|
1513 |
+
|
1514 |
+
cdef class CSVWriter(_CRecordBatchWriter):
|
1515 |
+
"""
|
1516 |
+
Writer to create a CSV file.
|
1517 |
+
|
1518 |
+
Parameters
|
1519 |
+
----------
|
1520 |
+
sink : str, path, pyarrow.OutputStream or file-like object
|
1521 |
+
The location where to write the CSV data.
|
1522 |
+
schema : pyarrow.Schema
|
1523 |
+
The schema of the data to be written.
|
1524 |
+
write_options : pyarrow.csv.WriteOptions
|
1525 |
+
Options to configure writing the CSV data.
|
1526 |
+
memory_pool : MemoryPool, optional
|
1527 |
+
Pool for temporary allocations.
|
1528 |
+
"""
|
1529 |
+
|
1530 |
+
def __init__(self, sink, Schema schema, *,
|
1531 |
+
WriteOptions write_options=None, MemoryPool memory_pool=None):
|
1532 |
+
cdef:
|
1533 |
+
shared_ptr[COutputStream] c_stream
|
1534 |
+
shared_ptr[CSchema] c_schema = pyarrow_unwrap_schema(schema)
|
1535 |
+
CCSVWriteOptions c_write_options
|
1536 |
+
CMemoryPool* c_memory_pool = maybe_unbox_memory_pool(memory_pool)
|
1537 |
+
_get_write_options(write_options, &c_write_options)
|
1538 |
+
c_write_options.io_context = CIOContext(c_memory_pool)
|
1539 |
+
get_writer(sink, &c_stream)
|
1540 |
+
with nogil:
|
1541 |
+
self.writer = GetResultValue(MakeCSVWriter(
|
1542 |
+
c_stream, c_schema, c_write_options))
|
venv/lib/python3.10/site-packages/pyarrow/_cuda.pyx
ADDED
@@ -0,0 +1,1058 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
|
19 |
+
from pyarrow.lib cimport *
|
20 |
+
from pyarrow.includes.libarrow_cuda cimport *
|
21 |
+
from pyarrow.lib import allocate_buffer, as_buffer, ArrowTypeError
|
22 |
+
from pyarrow.util import get_contiguous_span
|
23 |
+
cimport cpython as cp
|
24 |
+
|
25 |
+
|
26 |
+
cdef class Context(_Weakrefable):
|
27 |
+
"""
|
28 |
+
CUDA driver context.
|
29 |
+
"""
|
30 |
+
|
31 |
+
def __init__(self, *args, **kwargs):
|
32 |
+
"""
|
33 |
+
Create a CUDA driver context for a particular device.
|
34 |
+
|
35 |
+
If a CUDA context handle is passed, it is wrapped, otherwise
|
36 |
+
a default CUDA context for the given device is requested.
|
37 |
+
|
38 |
+
Parameters
|
39 |
+
----------
|
40 |
+
device_number : int (default 0)
|
41 |
+
Specify the GPU device for which the CUDA driver context is
|
42 |
+
requested.
|
43 |
+
handle : int, optional
|
44 |
+
Specify CUDA handle for a shared context that has been created
|
45 |
+
by another library.
|
46 |
+
"""
|
47 |
+
# This method exposed because autodoc doesn't pick __cinit__
|
48 |
+
|
49 |
+
def __cinit__(self, int device_number=0, uintptr_t handle=0):
|
50 |
+
cdef CCudaDeviceManager* manager
|
51 |
+
manager = GetResultValue(CCudaDeviceManager.Instance())
|
52 |
+
cdef int n = manager.num_devices()
|
53 |
+
if device_number >= n or device_number < 0:
|
54 |
+
self.context.reset()
|
55 |
+
raise ValueError('device_number argument must be '
|
56 |
+
'non-negative less than %s' % (n))
|
57 |
+
if handle == 0:
|
58 |
+
self.context = GetResultValue(manager.GetContext(device_number))
|
59 |
+
else:
|
60 |
+
self.context = GetResultValue(manager.GetSharedContext(
|
61 |
+
device_number, <void*>handle))
|
62 |
+
self.device_number = device_number
|
63 |
+
|
64 |
+
@staticmethod
|
65 |
+
def from_numba(context=None):
|
66 |
+
"""
|
67 |
+
Create a Context instance from a Numba CUDA context.
|
68 |
+
|
69 |
+
Parameters
|
70 |
+
----------
|
71 |
+
context : {numba.cuda.cudadrv.driver.Context, None}
|
72 |
+
A Numba CUDA context instance.
|
73 |
+
If None, the current Numba context is used.
|
74 |
+
|
75 |
+
Returns
|
76 |
+
-------
|
77 |
+
shared_context : pyarrow.cuda.Context
|
78 |
+
Context instance.
|
79 |
+
"""
|
80 |
+
if context is None:
|
81 |
+
import numba.cuda
|
82 |
+
context = numba.cuda.current_context()
|
83 |
+
return Context(device_number=context.device.id,
|
84 |
+
handle=context.handle.value)
|
85 |
+
|
86 |
+
def to_numba(self):
|
87 |
+
"""
|
88 |
+
Convert Context to a Numba CUDA context.
|
89 |
+
|
90 |
+
Returns
|
91 |
+
-------
|
92 |
+
context : numba.cuda.cudadrv.driver.Context
|
93 |
+
Numba CUDA context instance.
|
94 |
+
"""
|
95 |
+
import ctypes
|
96 |
+
import numba.cuda
|
97 |
+
device = numba.cuda.gpus[self.device_number]
|
98 |
+
handle = ctypes.c_void_p(self.handle)
|
99 |
+
context = numba.cuda.cudadrv.driver.Context(device, handle)
|
100 |
+
|
101 |
+
class DummyPendingDeallocs(object):
|
102 |
+
# Context is managed by pyarrow
|
103 |
+
def add_item(self, *args, **kwargs):
|
104 |
+
pass
|
105 |
+
|
106 |
+
context.deallocations = DummyPendingDeallocs()
|
107 |
+
return context
|
108 |
+
|
109 |
+
@staticmethod
|
110 |
+
def get_num_devices():
|
111 |
+
""" Return the number of GPU devices.
|
112 |
+
"""
|
113 |
+
cdef CCudaDeviceManager* manager
|
114 |
+
manager = GetResultValue(CCudaDeviceManager.Instance())
|
115 |
+
return manager.num_devices()
|
116 |
+
|
117 |
+
@property
|
118 |
+
def device_number(self):
|
119 |
+
""" Return context device number.
|
120 |
+
"""
|
121 |
+
return self.device_number
|
122 |
+
|
123 |
+
@property
|
124 |
+
def handle(self):
|
125 |
+
""" Return pointer to context handle.
|
126 |
+
"""
|
127 |
+
return <uintptr_t>self.context.get().handle()
|
128 |
+
|
129 |
+
cdef void init(self, const shared_ptr[CCudaContext]& ctx):
|
130 |
+
self.context = ctx
|
131 |
+
|
132 |
+
def synchronize(self):
|
133 |
+
"""Blocks until the device has completed all preceding requested
|
134 |
+
tasks.
|
135 |
+
"""
|
136 |
+
check_status(self.context.get().Synchronize())
|
137 |
+
|
138 |
+
@property
|
139 |
+
def bytes_allocated(self):
|
140 |
+
"""Return the number of allocated bytes.
|
141 |
+
"""
|
142 |
+
return self.context.get().bytes_allocated()
|
143 |
+
|
144 |
+
def get_device_address(self, uintptr_t address):
|
145 |
+
"""Return the device address that is reachable from kernels running in
|
146 |
+
the context
|
147 |
+
|
148 |
+
Parameters
|
149 |
+
----------
|
150 |
+
address : int
|
151 |
+
Specify memory address value
|
152 |
+
|
153 |
+
Returns
|
154 |
+
-------
|
155 |
+
device_address : int
|
156 |
+
Device address accessible from device context
|
157 |
+
|
158 |
+
Notes
|
159 |
+
-----
|
160 |
+
The device address is defined as a memory address accessible
|
161 |
+
by device. While it is often a device memory address but it
|
162 |
+
can be also a host memory address, for instance, when the
|
163 |
+
memory is allocated as host memory (using cudaMallocHost or
|
164 |
+
cudaHostAlloc) or as managed memory (using cudaMallocManaged)
|
165 |
+
or the host memory is page-locked (using cudaHostRegister).
|
166 |
+
"""
|
167 |
+
return GetResultValue(self.context.get().GetDeviceAddress(address))
|
168 |
+
|
169 |
+
def new_buffer(self, int64_t nbytes):
|
170 |
+
"""Return new device buffer.
|
171 |
+
|
172 |
+
Parameters
|
173 |
+
----------
|
174 |
+
nbytes : int
|
175 |
+
Specify the number of bytes to be allocated.
|
176 |
+
|
177 |
+
Returns
|
178 |
+
-------
|
179 |
+
buf : CudaBuffer
|
180 |
+
Allocated buffer.
|
181 |
+
"""
|
182 |
+
cdef:
|
183 |
+
shared_ptr[CCudaBuffer] cudabuf
|
184 |
+
with nogil:
|
185 |
+
cudabuf = GetResultValue(self.context.get().Allocate(nbytes))
|
186 |
+
return pyarrow_wrap_cudabuffer(cudabuf)
|
187 |
+
|
188 |
+
def foreign_buffer(self, address, size, base=None):
|
189 |
+
"""
|
190 |
+
Create device buffer from address and size as a view.
|
191 |
+
|
192 |
+
The caller is responsible for allocating and freeing the
|
193 |
+
memory. When `address==size==0` then a new zero-sized buffer
|
194 |
+
is returned.
|
195 |
+
|
196 |
+
Parameters
|
197 |
+
----------
|
198 |
+
address : int
|
199 |
+
Specify the starting address of the buffer. The address can
|
200 |
+
refer to both device or host memory but it must be
|
201 |
+
accessible from device after mapping it with
|
202 |
+
`get_device_address` method.
|
203 |
+
size : int
|
204 |
+
Specify the size of device buffer in bytes.
|
205 |
+
base : {None, object}
|
206 |
+
Specify object that owns the referenced memory.
|
207 |
+
|
208 |
+
Returns
|
209 |
+
-------
|
210 |
+
cbuf : CudaBuffer
|
211 |
+
Device buffer as a view of device reachable memory.
|
212 |
+
|
213 |
+
"""
|
214 |
+
if not address and size == 0:
|
215 |
+
return self.new_buffer(0)
|
216 |
+
cdef:
|
217 |
+
uintptr_t c_addr = self.get_device_address(address)
|
218 |
+
int64_t c_size = size
|
219 |
+
shared_ptr[CCudaBuffer] cudabuf
|
220 |
+
|
221 |
+
cudabuf = GetResultValue(self.context.get().View(
|
222 |
+
<uint8_t*>c_addr, c_size))
|
223 |
+
return pyarrow_wrap_cudabuffer_base(cudabuf, base)
|
224 |
+
|
225 |
+
def open_ipc_buffer(self, ipc_handle):
|
226 |
+
""" Open existing CUDA IPC memory handle
|
227 |
+
|
228 |
+
Parameters
|
229 |
+
----------
|
230 |
+
ipc_handle : IpcMemHandle
|
231 |
+
Specify opaque pointer to CUipcMemHandle (driver API).
|
232 |
+
|
233 |
+
Returns
|
234 |
+
-------
|
235 |
+
buf : CudaBuffer
|
236 |
+
referencing device buffer
|
237 |
+
"""
|
238 |
+
handle = pyarrow_unwrap_cudaipcmemhandle(ipc_handle)
|
239 |
+
cdef shared_ptr[CCudaBuffer] cudabuf
|
240 |
+
with nogil:
|
241 |
+
cudabuf = GetResultValue(
|
242 |
+
self.context.get().OpenIpcBuffer(handle.get()[0]))
|
243 |
+
return pyarrow_wrap_cudabuffer(cudabuf)
|
244 |
+
|
245 |
+
def buffer_from_data(self, object data, int64_t offset=0, int64_t size=-1):
|
246 |
+
"""Create device buffer and initialize with data.
|
247 |
+
|
248 |
+
Parameters
|
249 |
+
----------
|
250 |
+
data : {CudaBuffer, HostBuffer, Buffer, array-like}
|
251 |
+
Specify data to be copied to device buffer.
|
252 |
+
offset : int
|
253 |
+
Specify the offset of input buffer for device data
|
254 |
+
buffering. Default: 0.
|
255 |
+
size : int
|
256 |
+
Specify the size of device buffer in bytes. Default: all
|
257 |
+
(starting from input offset)
|
258 |
+
|
259 |
+
Returns
|
260 |
+
-------
|
261 |
+
cbuf : CudaBuffer
|
262 |
+
Device buffer with copied data.
|
263 |
+
"""
|
264 |
+
is_host_data = not pyarrow_is_cudabuffer(data)
|
265 |
+
buf = as_buffer(data) if is_host_data else data
|
266 |
+
|
267 |
+
bsize = buf.size
|
268 |
+
if offset < 0 or (bsize and offset >= bsize):
|
269 |
+
raise ValueError('offset argument is out-of-range')
|
270 |
+
if size < 0:
|
271 |
+
size = bsize - offset
|
272 |
+
elif offset + size > bsize:
|
273 |
+
raise ValueError(
|
274 |
+
'requested larger slice than available in device buffer')
|
275 |
+
|
276 |
+
if offset != 0 or size != bsize:
|
277 |
+
buf = buf.slice(offset, size)
|
278 |
+
|
279 |
+
result = self.new_buffer(size)
|
280 |
+
if is_host_data:
|
281 |
+
result.copy_from_host(buf, position=0, nbytes=size)
|
282 |
+
else:
|
283 |
+
result.copy_from_device(buf, position=0, nbytes=size)
|
284 |
+
return result
|
285 |
+
|
286 |
+
def buffer_from_object(self, obj):
|
287 |
+
"""Create device buffer view of arbitrary object that references
|
288 |
+
device accessible memory.
|
289 |
+
|
290 |
+
When the object contains a non-contiguous view of device
|
291 |
+
accessible memory then the returned device buffer will contain
|
292 |
+
contiguous view of the memory, that is, including the
|
293 |
+
intermediate data that is otherwise invisible to the input
|
294 |
+
object.
|
295 |
+
|
296 |
+
Parameters
|
297 |
+
----------
|
298 |
+
obj : {object, Buffer, HostBuffer, CudaBuffer, ...}
|
299 |
+
Specify an object that holds (device or host) address that
|
300 |
+
can be accessed from device. This includes objects with
|
301 |
+
types defined in pyarrow.cuda as well as arbitrary objects
|
302 |
+
that implement the CUDA array interface as defined by numba.
|
303 |
+
|
304 |
+
Returns
|
305 |
+
-------
|
306 |
+
cbuf : CudaBuffer
|
307 |
+
Device buffer as a view of device accessible memory.
|
308 |
+
|
309 |
+
"""
|
310 |
+
if isinstance(obj, HostBuffer):
|
311 |
+
return self.foreign_buffer(obj.address, obj.size, base=obj)
|
312 |
+
elif isinstance(obj, Buffer):
|
313 |
+
return CudaBuffer.from_buffer(obj)
|
314 |
+
elif isinstance(obj, CudaBuffer):
|
315 |
+
return obj
|
316 |
+
elif hasattr(obj, '__cuda_array_interface__'):
|
317 |
+
desc = obj.__cuda_array_interface__
|
318 |
+
addr = desc['data'][0]
|
319 |
+
if addr is None:
|
320 |
+
return self.new_buffer(0)
|
321 |
+
import numpy as np
|
322 |
+
start, end = get_contiguous_span(
|
323 |
+
desc['shape'], desc.get('strides'),
|
324 |
+
np.dtype(desc['typestr']).itemsize)
|
325 |
+
return self.foreign_buffer(addr + start, end - start, base=obj)
|
326 |
+
raise ArrowTypeError('cannot create device buffer view from'
|
327 |
+
' `%s` object' % (type(obj)))
|
328 |
+
|
329 |
+
|
330 |
+
cdef class IpcMemHandle(_Weakrefable):
|
331 |
+
"""A serializable container for a CUDA IPC handle.
|
332 |
+
"""
|
333 |
+
cdef void init(self, shared_ptr[CCudaIpcMemHandle]& h):
|
334 |
+
self.handle = h
|
335 |
+
|
336 |
+
@staticmethod
|
337 |
+
def from_buffer(Buffer opaque_handle):
|
338 |
+
"""Create IpcMemHandle from opaque buffer (e.g. from another
|
339 |
+
process)
|
340 |
+
|
341 |
+
Parameters
|
342 |
+
----------
|
343 |
+
opaque_handle :
|
344 |
+
a CUipcMemHandle as a const void*
|
345 |
+
|
346 |
+
Returns
|
347 |
+
-------
|
348 |
+
ipc_handle : IpcMemHandle
|
349 |
+
"""
|
350 |
+
c_buf = pyarrow_unwrap_buffer(opaque_handle)
|
351 |
+
cdef:
|
352 |
+
shared_ptr[CCudaIpcMemHandle] handle
|
353 |
+
|
354 |
+
handle = GetResultValue(
|
355 |
+
CCudaIpcMemHandle.FromBuffer(c_buf.get().data()))
|
356 |
+
return pyarrow_wrap_cudaipcmemhandle(handle)
|
357 |
+
|
358 |
+
def serialize(self, pool=None):
|
359 |
+
"""Write IpcMemHandle to a Buffer
|
360 |
+
|
361 |
+
Parameters
|
362 |
+
----------
|
363 |
+
pool : {MemoryPool, None}
|
364 |
+
Specify a pool to allocate memory from
|
365 |
+
|
366 |
+
Returns
|
367 |
+
-------
|
368 |
+
buf : Buffer
|
369 |
+
The serialized buffer.
|
370 |
+
"""
|
371 |
+
cdef CMemoryPool* pool_ = maybe_unbox_memory_pool(pool)
|
372 |
+
cdef shared_ptr[CBuffer] buf
|
373 |
+
cdef CCudaIpcMemHandle* h = self.handle.get()
|
374 |
+
with nogil:
|
375 |
+
buf = GetResultValue(h.Serialize(pool_))
|
376 |
+
return pyarrow_wrap_buffer(buf)
|
377 |
+
|
378 |
+
|
379 |
+
cdef class CudaBuffer(Buffer):
|
380 |
+
"""An Arrow buffer with data located in a GPU device.
|
381 |
+
|
382 |
+
To create a CudaBuffer instance, use Context.device_buffer().
|
383 |
+
|
384 |
+
The memory allocated in a CudaBuffer is freed when the buffer object
|
385 |
+
is deleted.
|
386 |
+
"""
|
387 |
+
|
388 |
+
def __init__(self):
|
389 |
+
raise TypeError("Do not call CudaBuffer's constructor directly, use "
|
390 |
+
"`<pyarrow.Context instance>.device_buffer`"
|
391 |
+
" method instead.")
|
392 |
+
|
393 |
+
cdef void init_cuda(self,
|
394 |
+
const shared_ptr[CCudaBuffer]& buffer,
|
395 |
+
object base):
|
396 |
+
self.cuda_buffer = buffer
|
397 |
+
self.init(<shared_ptr[CBuffer]> buffer)
|
398 |
+
self.base = base
|
399 |
+
|
400 |
+
@staticmethod
|
401 |
+
def from_buffer(buf):
|
402 |
+
""" Convert back generic buffer into CudaBuffer
|
403 |
+
|
404 |
+
Parameters
|
405 |
+
----------
|
406 |
+
buf : Buffer
|
407 |
+
Specify buffer containing CudaBuffer
|
408 |
+
|
409 |
+
Returns
|
410 |
+
-------
|
411 |
+
dbuf : CudaBuffer
|
412 |
+
Resulting device buffer.
|
413 |
+
"""
|
414 |
+
c_buf = pyarrow_unwrap_buffer(buf)
|
415 |
+
cuda_buffer = GetResultValue(CCudaBuffer.FromBuffer(c_buf))
|
416 |
+
return pyarrow_wrap_cudabuffer(cuda_buffer)
|
417 |
+
|
418 |
+
@staticmethod
|
419 |
+
def from_numba(mem):
|
420 |
+
"""Create a CudaBuffer view from numba MemoryPointer instance.
|
421 |
+
|
422 |
+
Parameters
|
423 |
+
----------
|
424 |
+
mem : numba.cuda.cudadrv.driver.MemoryPointer
|
425 |
+
|
426 |
+
Returns
|
427 |
+
-------
|
428 |
+
cbuf : CudaBuffer
|
429 |
+
Device buffer as a view of numba MemoryPointer.
|
430 |
+
"""
|
431 |
+
ctx = Context.from_numba(mem.context)
|
432 |
+
if mem.device_pointer.value is None and mem.size==0:
|
433 |
+
return ctx.new_buffer(0)
|
434 |
+
return ctx.foreign_buffer(mem.device_pointer.value, mem.size, base=mem)
|
435 |
+
|
436 |
+
def to_numba(self):
|
437 |
+
"""Return numba memory pointer of CudaBuffer instance.
|
438 |
+
"""
|
439 |
+
import ctypes
|
440 |
+
from numba.cuda.cudadrv.driver import MemoryPointer
|
441 |
+
return MemoryPointer(self.context.to_numba(),
|
442 |
+
pointer=ctypes.c_void_p(self.address),
|
443 |
+
size=self.size)
|
444 |
+
|
445 |
+
cdef getitem(self, int64_t i):
|
446 |
+
return self.copy_to_host(position=i, nbytes=1)[0]
|
447 |
+
|
448 |
+
def copy_to_host(self, int64_t position=0, int64_t nbytes=-1,
|
449 |
+
Buffer buf=None,
|
450 |
+
MemoryPool memory_pool=None, c_bool resizable=False):
|
451 |
+
"""Copy memory from GPU device to CPU host
|
452 |
+
|
453 |
+
Caller is responsible for ensuring that all tasks affecting
|
454 |
+
the memory are finished. Use
|
455 |
+
|
456 |
+
`<CudaBuffer instance>.context.synchronize()`
|
457 |
+
|
458 |
+
when needed.
|
459 |
+
|
460 |
+
Parameters
|
461 |
+
----------
|
462 |
+
position : int
|
463 |
+
Specify the starting position of the source data in GPU
|
464 |
+
device buffer. Default: 0.
|
465 |
+
nbytes : int
|
466 |
+
Specify the number of bytes to copy. Default: -1 (all from
|
467 |
+
the position until host buffer is full).
|
468 |
+
buf : Buffer
|
469 |
+
Specify a pre-allocated output buffer in host. Default: None
|
470 |
+
(allocate new output buffer).
|
471 |
+
memory_pool : MemoryPool
|
472 |
+
resizable : bool
|
473 |
+
Specify extra arguments to allocate_buffer. Used only when
|
474 |
+
buf is None.
|
475 |
+
|
476 |
+
Returns
|
477 |
+
-------
|
478 |
+
buf : Buffer
|
479 |
+
Output buffer in host.
|
480 |
+
|
481 |
+
"""
|
482 |
+
if position < 0 or (self.size and position > self.size) \
|
483 |
+
or (self.size == 0 and position != 0):
|
484 |
+
raise ValueError('position argument is out-of-range')
|
485 |
+
cdef:
|
486 |
+
int64_t c_nbytes
|
487 |
+
if buf is None:
|
488 |
+
if nbytes < 0:
|
489 |
+
# copy all starting from position to new host buffer
|
490 |
+
c_nbytes = self.size - position
|
491 |
+
else:
|
492 |
+
if nbytes > self.size - position:
|
493 |
+
raise ValueError(
|
494 |
+
'requested more to copy than available from '
|
495 |
+
'device buffer')
|
496 |
+
# copy nbytes starting from position to new host buffer
|
497 |
+
c_nbytes = nbytes
|
498 |
+
buf = allocate_buffer(c_nbytes, memory_pool=memory_pool,
|
499 |
+
resizable=resizable)
|
500 |
+
else:
|
501 |
+
if nbytes < 0:
|
502 |
+
# copy all from position until given host buffer is full
|
503 |
+
c_nbytes = min(self.size - position, buf.size)
|
504 |
+
else:
|
505 |
+
if nbytes > buf.size:
|
506 |
+
raise ValueError(
|
507 |
+
'requested copy does not fit into host buffer')
|
508 |
+
# copy nbytes from position to given host buffer
|
509 |
+
c_nbytes = nbytes
|
510 |
+
|
511 |
+
cdef:
|
512 |
+
shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(buf)
|
513 |
+
int64_t c_position = position
|
514 |
+
with nogil:
|
515 |
+
check_status(self.cuda_buffer.get()
|
516 |
+
.CopyToHost(c_position, c_nbytes,
|
517 |
+
c_buf.get().mutable_data()))
|
518 |
+
return buf
|
519 |
+
|
520 |
+
def copy_from_host(self, data, int64_t position=0, int64_t nbytes=-1):
|
521 |
+
"""Copy data from host to device.
|
522 |
+
|
523 |
+
The device buffer must be pre-allocated.
|
524 |
+
|
525 |
+
Parameters
|
526 |
+
----------
|
527 |
+
data : {Buffer, array-like}
|
528 |
+
Specify data in host. It can be array-like that is valid
|
529 |
+
argument to py_buffer
|
530 |
+
position : int
|
531 |
+
Specify the starting position of the copy in device buffer.
|
532 |
+
Default: 0.
|
533 |
+
nbytes : int
|
534 |
+
Specify the number of bytes to copy. Default: -1 (all from
|
535 |
+
source until device buffer, starting from position, is full)
|
536 |
+
|
537 |
+
Returns
|
538 |
+
-------
|
539 |
+
nbytes : int
|
540 |
+
Number of bytes copied.
|
541 |
+
"""
|
542 |
+
if position < 0 or position > self.size:
|
543 |
+
raise ValueError('position argument is out-of-range')
|
544 |
+
cdef:
|
545 |
+
int64_t c_nbytes
|
546 |
+
buf = as_buffer(data)
|
547 |
+
|
548 |
+
if nbytes < 0:
|
549 |
+
# copy from host buffer to device buffer starting from
|
550 |
+
# position until device buffer is full
|
551 |
+
c_nbytes = min(self.size - position, buf.size)
|
552 |
+
else:
|
553 |
+
if nbytes > buf.size:
|
554 |
+
raise ValueError(
|
555 |
+
'requested more to copy than available from host buffer')
|
556 |
+
if nbytes > self.size - position:
|
557 |
+
raise ValueError(
|
558 |
+
'requested more to copy than available in device buffer')
|
559 |
+
# copy nbytes from host buffer to device buffer starting
|
560 |
+
# from position
|
561 |
+
c_nbytes = nbytes
|
562 |
+
|
563 |
+
cdef:
|
564 |
+
shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(buf)
|
565 |
+
int64_t c_position = position
|
566 |
+
with nogil:
|
567 |
+
check_status(self.cuda_buffer.get().
|
568 |
+
CopyFromHost(c_position, c_buf.get().data(),
|
569 |
+
c_nbytes))
|
570 |
+
return c_nbytes
|
571 |
+
|
572 |
+
def copy_from_device(self, buf, int64_t position=0, int64_t nbytes=-1):
|
573 |
+
"""Copy data from device to device.
|
574 |
+
|
575 |
+
Parameters
|
576 |
+
----------
|
577 |
+
buf : CudaBuffer
|
578 |
+
Specify source device buffer.
|
579 |
+
position : int
|
580 |
+
Specify the starting position of the copy in device buffer.
|
581 |
+
Default: 0.
|
582 |
+
nbytes : int
|
583 |
+
Specify the number of bytes to copy. Default: -1 (all from
|
584 |
+
source until device buffer, starting from position, is full)
|
585 |
+
|
586 |
+
Returns
|
587 |
+
-------
|
588 |
+
nbytes : int
|
589 |
+
Number of bytes copied.
|
590 |
+
|
591 |
+
"""
|
592 |
+
if position < 0 or position > self.size:
|
593 |
+
raise ValueError('position argument is out-of-range')
|
594 |
+
cdef:
|
595 |
+
int64_t c_nbytes
|
596 |
+
|
597 |
+
if nbytes < 0:
|
598 |
+
# copy from source device buffer to device buffer starting
|
599 |
+
# from position until device buffer is full
|
600 |
+
c_nbytes = min(self.size - position, buf.size)
|
601 |
+
else:
|
602 |
+
if nbytes > buf.size:
|
603 |
+
raise ValueError(
|
604 |
+
'requested more to copy than available from device buffer')
|
605 |
+
if nbytes > self.size - position:
|
606 |
+
raise ValueError(
|
607 |
+
'requested more to copy than available in device buffer')
|
608 |
+
# copy nbytes from source device buffer to device buffer
|
609 |
+
# starting from position
|
610 |
+
c_nbytes = nbytes
|
611 |
+
|
612 |
+
cdef:
|
613 |
+
shared_ptr[CCudaBuffer] c_buf = pyarrow_unwrap_cudabuffer(buf)
|
614 |
+
int64_t c_position = position
|
615 |
+
shared_ptr[CCudaContext] c_src_ctx = pyarrow_unwrap_cudacontext(
|
616 |
+
buf.context)
|
617 |
+
void* c_source_data = <void*>(c_buf.get().address())
|
618 |
+
|
619 |
+
if self.context.handle != buf.context.handle:
|
620 |
+
with nogil:
|
621 |
+
check_status(self.cuda_buffer.get().
|
622 |
+
CopyFromAnotherDevice(c_src_ctx, c_position,
|
623 |
+
c_source_data, c_nbytes))
|
624 |
+
else:
|
625 |
+
with nogil:
|
626 |
+
check_status(self.cuda_buffer.get().
|
627 |
+
CopyFromDevice(c_position, c_source_data,
|
628 |
+
c_nbytes))
|
629 |
+
return c_nbytes
|
630 |
+
|
631 |
+
def export_for_ipc(self):
|
632 |
+
"""
|
633 |
+
Expose this device buffer as IPC memory which can be used in other
|
634 |
+
processes.
|
635 |
+
|
636 |
+
After calling this function, this device memory will not be
|
637 |
+
freed when the CudaBuffer is destructed.
|
638 |
+
|
639 |
+
Returns
|
640 |
+
-------
|
641 |
+
ipc_handle : IpcMemHandle
|
642 |
+
The exported IPC handle
|
643 |
+
|
644 |
+
"""
|
645 |
+
cdef shared_ptr[CCudaIpcMemHandle] handle
|
646 |
+
with nogil:
|
647 |
+
handle = GetResultValue(self.cuda_buffer.get().ExportForIpc())
|
648 |
+
return pyarrow_wrap_cudaipcmemhandle(handle)
|
649 |
+
|
650 |
+
@property
|
651 |
+
def context(self):
|
652 |
+
"""Returns the CUDA driver context of this buffer.
|
653 |
+
"""
|
654 |
+
return pyarrow_wrap_cudacontext(self.cuda_buffer.get().context())
|
655 |
+
|
656 |
+
def slice(self, offset=0, length=None):
|
657 |
+
"""Return slice of device buffer
|
658 |
+
|
659 |
+
Parameters
|
660 |
+
----------
|
661 |
+
offset : int, default 0
|
662 |
+
Specify offset from the start of device buffer to slice
|
663 |
+
length : int, default None
|
664 |
+
Specify the length of slice (default is until end of device
|
665 |
+
buffer starting from offset). If the length is larger than
|
666 |
+
the data available, the returned slice will have a size of
|
667 |
+
the available data starting from the offset.
|
668 |
+
|
669 |
+
Returns
|
670 |
+
-------
|
671 |
+
sliced : CudaBuffer
|
672 |
+
Zero-copy slice of device buffer.
|
673 |
+
|
674 |
+
"""
|
675 |
+
if offset < 0 or (self.size and offset >= self.size):
|
676 |
+
raise ValueError('offset argument is out-of-range')
|
677 |
+
cdef int64_t offset_ = offset
|
678 |
+
cdef int64_t size
|
679 |
+
if length is None:
|
680 |
+
size = self.size - offset_
|
681 |
+
elif offset + length <= self.size:
|
682 |
+
size = length
|
683 |
+
else:
|
684 |
+
size = self.size - offset
|
685 |
+
parent = pyarrow_unwrap_cudabuffer(self)
|
686 |
+
return pyarrow_wrap_cudabuffer(make_shared[CCudaBuffer](parent,
|
687 |
+
offset_, size))
|
688 |
+
|
689 |
+
def to_pybytes(self):
|
690 |
+
"""Return device buffer content as Python bytes.
|
691 |
+
"""
|
692 |
+
return self.copy_to_host().to_pybytes()
|
693 |
+
|
694 |
+
def __getbuffer__(self, cp.Py_buffer* buffer, int flags):
|
695 |
+
# Device buffer contains data pointers on the device. Hence,
|
696 |
+
# cannot support buffer protocol PEP-3118 for CudaBuffer.
|
697 |
+
raise BufferError('buffer protocol for device buffer not supported')
|
698 |
+
|
699 |
+
|
700 |
+
cdef class HostBuffer(Buffer):
|
701 |
+
"""Device-accessible CPU memory created using cudaHostAlloc.
|
702 |
+
|
703 |
+
To create a HostBuffer instance, use
|
704 |
+
|
705 |
+
cuda.new_host_buffer(<nbytes>)
|
706 |
+
"""
|
707 |
+
|
708 |
+
def __init__(self):
|
709 |
+
raise TypeError("Do not call HostBuffer's constructor directly,"
|
710 |
+
" use `cuda.new_host_buffer` function instead.")
|
711 |
+
|
712 |
+
cdef void init_host(self, const shared_ptr[CCudaHostBuffer]& buffer):
|
713 |
+
self.host_buffer = buffer
|
714 |
+
self.init(<shared_ptr[CBuffer]> buffer)
|
715 |
+
|
716 |
+
@property
|
717 |
+
def size(self):
|
718 |
+
return self.host_buffer.get().size()
|
719 |
+
|
720 |
+
|
721 |
+
cdef class BufferReader(NativeFile):
|
722 |
+
"""File interface for zero-copy read from CUDA buffers.
|
723 |
+
|
724 |
+
Note: Read methods return pointers to device memory. This means
|
725 |
+
you must be careful using this interface with any Arrow code which
|
726 |
+
may expect to be able to do anything other than pointer arithmetic
|
727 |
+
on the returned buffers.
|
728 |
+
"""
|
729 |
+
|
730 |
+
def __cinit__(self, CudaBuffer obj):
|
731 |
+
self.buffer = obj
|
732 |
+
self.reader = new CCudaBufferReader(self.buffer.buffer)
|
733 |
+
self.set_random_access_file(
|
734 |
+
shared_ptr[CRandomAccessFile](self.reader))
|
735 |
+
self.is_readable = True
|
736 |
+
|
737 |
+
def read_buffer(self, nbytes=None):
|
738 |
+
"""Return a slice view of the underlying device buffer.
|
739 |
+
|
740 |
+
The slice will start at the current reader position and will
|
741 |
+
have specified size in bytes.
|
742 |
+
|
743 |
+
Parameters
|
744 |
+
----------
|
745 |
+
nbytes : int, default None
|
746 |
+
Specify the number of bytes to read. Default: None (read all
|
747 |
+
remaining bytes).
|
748 |
+
|
749 |
+
Returns
|
750 |
+
-------
|
751 |
+
cbuf : CudaBuffer
|
752 |
+
New device buffer.
|
753 |
+
|
754 |
+
"""
|
755 |
+
cdef:
|
756 |
+
int64_t c_nbytes
|
757 |
+
shared_ptr[CCudaBuffer] output
|
758 |
+
|
759 |
+
if nbytes is None:
|
760 |
+
c_nbytes = self.size() - self.tell()
|
761 |
+
else:
|
762 |
+
c_nbytes = nbytes
|
763 |
+
|
764 |
+
with nogil:
|
765 |
+
output = static_pointer_cast[CCudaBuffer, CBuffer](
|
766 |
+
GetResultValue(self.reader.Read(c_nbytes)))
|
767 |
+
|
768 |
+
return pyarrow_wrap_cudabuffer(output)
|
769 |
+
|
770 |
+
|
771 |
+
cdef class BufferWriter(NativeFile):
|
772 |
+
"""File interface for writing to CUDA buffers.
|
773 |
+
|
774 |
+
By default writes are unbuffered. Use set_buffer_size to enable
|
775 |
+
buffering.
|
776 |
+
"""
|
777 |
+
|
778 |
+
def __cinit__(self, CudaBuffer buffer):
|
779 |
+
self.buffer = buffer
|
780 |
+
self.writer = new CCudaBufferWriter(self.buffer.cuda_buffer)
|
781 |
+
self.set_output_stream(shared_ptr[COutputStream](self.writer))
|
782 |
+
self.is_writable = True
|
783 |
+
|
784 |
+
def writeat(self, int64_t position, object data):
|
785 |
+
"""Write data to buffer starting from position.
|
786 |
+
|
787 |
+
Parameters
|
788 |
+
----------
|
789 |
+
position : int
|
790 |
+
Specify device buffer position where the data will be
|
791 |
+
written.
|
792 |
+
data : array-like
|
793 |
+
Specify data, the data instance must implement buffer
|
794 |
+
protocol.
|
795 |
+
"""
|
796 |
+
cdef:
|
797 |
+
Buffer buf = as_buffer(data)
|
798 |
+
const uint8_t* c_data = buf.buffer.get().data()
|
799 |
+
int64_t c_size = buf.buffer.get().size()
|
800 |
+
|
801 |
+
with nogil:
|
802 |
+
check_status(self.writer.WriteAt(position, c_data, c_size))
|
803 |
+
|
804 |
+
def flush(self):
|
805 |
+
""" Flush the buffer stream """
|
806 |
+
with nogil:
|
807 |
+
check_status(self.writer.Flush())
|
808 |
+
|
809 |
+
def seek(self, int64_t position, int whence=0):
|
810 |
+
# TODO: remove this method after NativeFile.seek supports
|
811 |
+
# writable files.
|
812 |
+
cdef int64_t offset
|
813 |
+
|
814 |
+
with nogil:
|
815 |
+
if whence == 0:
|
816 |
+
offset = position
|
817 |
+
elif whence == 1:
|
818 |
+
offset = GetResultValue(self.writer.Tell())
|
819 |
+
offset = offset + position
|
820 |
+
else:
|
821 |
+
with gil:
|
822 |
+
raise ValueError("Invalid value of whence: {0}"
|
823 |
+
.format(whence))
|
824 |
+
check_status(self.writer.Seek(offset))
|
825 |
+
return self.tell()
|
826 |
+
|
827 |
+
@property
|
828 |
+
def buffer_size(self):
|
829 |
+
"""Returns size of host (CPU) buffer, 0 for unbuffered
|
830 |
+
"""
|
831 |
+
return self.writer.buffer_size()
|
832 |
+
|
833 |
+
@buffer_size.setter
|
834 |
+
def buffer_size(self, int64_t buffer_size):
|
835 |
+
"""Set CPU buffer size to limit calls to cudaMemcpy
|
836 |
+
|
837 |
+
Parameters
|
838 |
+
----------
|
839 |
+
buffer_size : int
|
840 |
+
Specify the size of CPU buffer to allocate in bytes.
|
841 |
+
"""
|
842 |
+
with nogil:
|
843 |
+
check_status(self.writer.SetBufferSize(buffer_size))
|
844 |
+
|
845 |
+
@property
|
846 |
+
def num_bytes_buffered(self):
|
847 |
+
"""Returns number of bytes buffered on host
|
848 |
+
"""
|
849 |
+
return self.writer.num_bytes_buffered()
|
850 |
+
|
851 |
+
# Functions
|
852 |
+
|
853 |
+
|
854 |
+
def new_host_buffer(const int64_t size, int device=0):
|
855 |
+
"""Return buffer with CUDA-accessible memory on CPU host
|
856 |
+
|
857 |
+
Parameters
|
858 |
+
----------
|
859 |
+
size : int
|
860 |
+
Specify the number of bytes to be allocated.
|
861 |
+
device : int
|
862 |
+
Specify GPU device number.
|
863 |
+
|
864 |
+
Returns
|
865 |
+
-------
|
866 |
+
dbuf : HostBuffer
|
867 |
+
Allocated host buffer
|
868 |
+
"""
|
869 |
+
cdef shared_ptr[CCudaHostBuffer] buffer
|
870 |
+
with nogil:
|
871 |
+
buffer = GetResultValue(AllocateCudaHostBuffer(device, size))
|
872 |
+
return pyarrow_wrap_cudahostbuffer(buffer)
|
873 |
+
|
874 |
+
|
875 |
+
def serialize_record_batch(object batch, object ctx):
|
876 |
+
""" Write record batch message to GPU device memory
|
877 |
+
|
878 |
+
Parameters
|
879 |
+
----------
|
880 |
+
batch : RecordBatch
|
881 |
+
Record batch to write
|
882 |
+
ctx : Context
|
883 |
+
CUDA Context to allocate device memory from
|
884 |
+
|
885 |
+
Returns
|
886 |
+
-------
|
887 |
+
dbuf : CudaBuffer
|
888 |
+
device buffer which contains the record batch message
|
889 |
+
"""
|
890 |
+
cdef shared_ptr[CCudaBuffer] buffer
|
891 |
+
cdef CRecordBatch* batch_ = pyarrow_unwrap_batch(batch).get()
|
892 |
+
cdef CCudaContext* ctx_ = pyarrow_unwrap_cudacontext(ctx).get()
|
893 |
+
with nogil:
|
894 |
+
buffer = GetResultValue(CudaSerializeRecordBatch(batch_[0], ctx_))
|
895 |
+
return pyarrow_wrap_cudabuffer(buffer)
|
896 |
+
|
897 |
+
|
898 |
+
def read_message(object source, pool=None):
|
899 |
+
""" Read Arrow IPC message located on GPU device
|
900 |
+
|
901 |
+
Parameters
|
902 |
+
----------
|
903 |
+
source : {CudaBuffer, cuda.BufferReader}
|
904 |
+
Device buffer or reader of device buffer.
|
905 |
+
pool : MemoryPool (optional)
|
906 |
+
Pool to allocate CPU memory for the metadata
|
907 |
+
|
908 |
+
Returns
|
909 |
+
-------
|
910 |
+
message : Message
|
911 |
+
The deserialized message, body still on device
|
912 |
+
"""
|
913 |
+
cdef:
|
914 |
+
Message result = Message.__new__(Message)
|
915 |
+
cdef CMemoryPool* pool_ = maybe_unbox_memory_pool(pool)
|
916 |
+
if not isinstance(source, BufferReader):
|
917 |
+
reader = BufferReader(source)
|
918 |
+
with nogil:
|
919 |
+
result.message = move(
|
920 |
+
GetResultValue(ReadMessage(reader.reader, pool_)))
|
921 |
+
return result
|
922 |
+
|
923 |
+
|
924 |
+
def read_record_batch(object buffer, object schema, *,
|
925 |
+
DictionaryMemo dictionary_memo=None, pool=None):
|
926 |
+
"""Construct RecordBatch referencing IPC message located on CUDA device.
|
927 |
+
|
928 |
+
While the metadata is copied to host memory for deserialization,
|
929 |
+
the record batch data remains on the device.
|
930 |
+
|
931 |
+
Parameters
|
932 |
+
----------
|
933 |
+
buffer :
|
934 |
+
Device buffer containing the complete IPC message
|
935 |
+
schema : Schema
|
936 |
+
The schema for the record batch
|
937 |
+
dictionary_memo : DictionaryMemo, optional
|
938 |
+
If message contains dictionaries, must pass a populated
|
939 |
+
DictionaryMemo
|
940 |
+
pool : MemoryPool (optional)
|
941 |
+
Pool to allocate metadata from
|
942 |
+
|
943 |
+
Returns
|
944 |
+
-------
|
945 |
+
batch : RecordBatch
|
946 |
+
Reconstructed record batch, with device pointers
|
947 |
+
|
948 |
+
"""
|
949 |
+
cdef:
|
950 |
+
shared_ptr[CSchema] schema_ = pyarrow_unwrap_schema(schema)
|
951 |
+
shared_ptr[CCudaBuffer] buffer_ = pyarrow_unwrap_cudabuffer(buffer)
|
952 |
+
CDictionaryMemo temp_memo
|
953 |
+
CDictionaryMemo* arg_dict_memo
|
954 |
+
CMemoryPool* pool_ = maybe_unbox_memory_pool(pool)
|
955 |
+
shared_ptr[CRecordBatch] batch
|
956 |
+
|
957 |
+
if dictionary_memo is not None:
|
958 |
+
arg_dict_memo = dictionary_memo.memo
|
959 |
+
else:
|
960 |
+
arg_dict_memo = &temp_memo
|
961 |
+
|
962 |
+
with nogil:
|
963 |
+
batch = GetResultValue(CudaReadRecordBatch(
|
964 |
+
schema_, arg_dict_memo, buffer_, pool_))
|
965 |
+
return pyarrow_wrap_batch(batch)
|
966 |
+
|
967 |
+
|
968 |
+
# Public API
|
969 |
+
|
970 |
+
|
971 |
+
cdef public api bint pyarrow_is_buffer(object buffer):
|
972 |
+
return isinstance(buffer, Buffer)
|
973 |
+
|
974 |
+
# cudabuffer
|
975 |
+
|
976 |
+
cdef public api bint pyarrow_is_cudabuffer(object buffer):
|
977 |
+
return isinstance(buffer, CudaBuffer)
|
978 |
+
|
979 |
+
|
980 |
+
cdef public api object \
|
981 |
+
pyarrow_wrap_cudabuffer_base(const shared_ptr[CCudaBuffer]& buf, base):
|
982 |
+
cdef CudaBuffer result = CudaBuffer.__new__(CudaBuffer)
|
983 |
+
result.init_cuda(buf, base)
|
984 |
+
return result
|
985 |
+
|
986 |
+
|
987 |
+
cdef public api object \
|
988 |
+
pyarrow_wrap_cudabuffer(const shared_ptr[CCudaBuffer]& buf):
|
989 |
+
cdef CudaBuffer result = CudaBuffer.__new__(CudaBuffer)
|
990 |
+
result.init_cuda(buf, None)
|
991 |
+
return result
|
992 |
+
|
993 |
+
|
994 |
+
cdef public api shared_ptr[CCudaBuffer] pyarrow_unwrap_cudabuffer(object obj):
|
995 |
+
if pyarrow_is_cudabuffer(obj):
|
996 |
+
return (<CudaBuffer>obj).cuda_buffer
|
997 |
+
raise TypeError('expected CudaBuffer instance, got %s'
|
998 |
+
% (type(obj).__name__))
|
999 |
+
|
1000 |
+
# cudahostbuffer
|
1001 |
+
|
1002 |
+
cdef public api bint pyarrow_is_cudahostbuffer(object buffer):
|
1003 |
+
return isinstance(buffer, HostBuffer)
|
1004 |
+
|
1005 |
+
|
1006 |
+
cdef public api object \
|
1007 |
+
pyarrow_wrap_cudahostbuffer(const shared_ptr[CCudaHostBuffer]& buf):
|
1008 |
+
cdef HostBuffer result = HostBuffer.__new__(HostBuffer)
|
1009 |
+
result.init_host(buf)
|
1010 |
+
return result
|
1011 |
+
|
1012 |
+
|
1013 |
+
cdef public api shared_ptr[CCudaHostBuffer] \
|
1014 |
+
pyarrow_unwrap_cudahostbuffer(object obj):
|
1015 |
+
if pyarrow_is_cudahostbuffer(obj):
|
1016 |
+
return (<HostBuffer>obj).host_buffer
|
1017 |
+
raise TypeError('expected HostBuffer instance, got %s'
|
1018 |
+
% (type(obj).__name__))
|
1019 |
+
|
1020 |
+
# cudacontext
|
1021 |
+
|
1022 |
+
cdef public api bint pyarrow_is_cudacontext(object ctx):
|
1023 |
+
return isinstance(ctx, Context)
|
1024 |
+
|
1025 |
+
|
1026 |
+
cdef public api object \
|
1027 |
+
pyarrow_wrap_cudacontext(const shared_ptr[CCudaContext]& ctx):
|
1028 |
+
cdef Context result = Context.__new__(Context)
|
1029 |
+
result.init(ctx)
|
1030 |
+
return result
|
1031 |
+
|
1032 |
+
|
1033 |
+
cdef public api shared_ptr[CCudaContext] \
|
1034 |
+
pyarrow_unwrap_cudacontext(object obj):
|
1035 |
+
if pyarrow_is_cudacontext(obj):
|
1036 |
+
return (<Context>obj).context
|
1037 |
+
raise TypeError('expected Context instance, got %s'
|
1038 |
+
% (type(obj).__name__))
|
1039 |
+
|
1040 |
+
# cudaipcmemhandle
|
1041 |
+
|
1042 |
+
cdef public api bint pyarrow_is_cudaipcmemhandle(object handle):
|
1043 |
+
return isinstance(handle, IpcMemHandle)
|
1044 |
+
|
1045 |
+
|
1046 |
+
cdef public api object \
|
1047 |
+
pyarrow_wrap_cudaipcmemhandle(shared_ptr[CCudaIpcMemHandle]& h):
|
1048 |
+
cdef IpcMemHandle result = IpcMemHandle.__new__(IpcMemHandle)
|
1049 |
+
result.init(h)
|
1050 |
+
return result
|
1051 |
+
|
1052 |
+
|
1053 |
+
cdef public api shared_ptr[CCudaIpcMemHandle] \
|
1054 |
+
pyarrow_unwrap_cudaipcmemhandle(object obj):
|
1055 |
+
if pyarrow_is_cudaipcmemhandle(obj):
|
1056 |
+
return (<IpcMemHandle>obj).handle
|
1057 |
+
raise TypeError('expected IpcMemHandle instance, got %s'
|
1058 |
+
% (type(obj).__name__))
|
venv/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (116 kB). View file
|
|
venv/lib/python3.10/site-packages/pyarrow/_flight.pyx
ADDED
The diff for this file is too large to render.
See raw diff
|
|
venv/lib/python3.10/site-packages/pyarrow/_fs.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (496 kB). View file
|
|
venv/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (84.2 kB). View file
|
|
venv/lib/python3.10/site-packages/pyarrow/_s3fs.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (226 kB). View file
|
|
venv/lib/python3.10/site-packages/pyarrow/_substrait.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (185 kB). View file
|
|
venv/lib/python3.10/site-packages/pyarrow/gandiva.pyx
ADDED
@@ -0,0 +1,760 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# cython: profile=False
|
19 |
+
# distutils: language = c++
|
20 |
+
# cython: language_level = 3
|
21 |
+
|
22 |
+
from libcpp.memory cimport shared_ptr
|
23 |
+
from libcpp.string cimport string as c_string
|
24 |
+
from libcpp.vector cimport vector as c_vector
|
25 |
+
from libcpp.unordered_set cimport unordered_set as c_unordered_set
|
26 |
+
from libc.stdint cimport int64_t, int32_t
|
27 |
+
|
28 |
+
from pyarrow.includes.libarrow cimport *
|
29 |
+
from pyarrow.lib cimport (DataType, Field, MemoryPool, RecordBatch,
|
30 |
+
Schema, check_status, pyarrow_wrap_array,
|
31 |
+
pyarrow_wrap_data_type, ensure_type, _Weakrefable,
|
32 |
+
pyarrow_wrap_field)
|
33 |
+
|
34 |
+
from pyarrow.includes.libgandiva cimport (
|
35 |
+
CCondition, CGandivaExpression,
|
36 |
+
CNode, CProjector, CFilter,
|
37 |
+
CSelectionVector,
|
38 |
+
_ensure_selection_mode,
|
39 |
+
CConfiguration,
|
40 |
+
CConfigurationBuilder,
|
41 |
+
TreeExprBuilder_MakeExpression,
|
42 |
+
TreeExprBuilder_MakeFunction,
|
43 |
+
TreeExprBuilder_MakeBoolLiteral,
|
44 |
+
TreeExprBuilder_MakeUInt8Literal,
|
45 |
+
TreeExprBuilder_MakeUInt16Literal,
|
46 |
+
TreeExprBuilder_MakeUInt32Literal,
|
47 |
+
TreeExprBuilder_MakeUInt64Literal,
|
48 |
+
TreeExprBuilder_MakeInt8Literal,
|
49 |
+
TreeExprBuilder_MakeInt16Literal,
|
50 |
+
TreeExprBuilder_MakeInt32Literal,
|
51 |
+
TreeExprBuilder_MakeInt64Literal,
|
52 |
+
TreeExprBuilder_MakeFloatLiteral,
|
53 |
+
TreeExprBuilder_MakeDoubleLiteral,
|
54 |
+
TreeExprBuilder_MakeStringLiteral,
|
55 |
+
TreeExprBuilder_MakeBinaryLiteral,
|
56 |
+
TreeExprBuilder_MakeField,
|
57 |
+
TreeExprBuilder_MakeIf,
|
58 |
+
TreeExprBuilder_MakeAnd,
|
59 |
+
TreeExprBuilder_MakeOr,
|
60 |
+
TreeExprBuilder_MakeCondition,
|
61 |
+
TreeExprBuilder_MakeInExpressionInt32,
|
62 |
+
TreeExprBuilder_MakeInExpressionInt64,
|
63 |
+
TreeExprBuilder_MakeInExpressionTime32,
|
64 |
+
TreeExprBuilder_MakeInExpressionTime64,
|
65 |
+
TreeExprBuilder_MakeInExpressionDate32,
|
66 |
+
TreeExprBuilder_MakeInExpressionDate64,
|
67 |
+
TreeExprBuilder_MakeInExpressionTimeStamp,
|
68 |
+
TreeExprBuilder_MakeInExpressionString,
|
69 |
+
SelectionVector_MakeInt16,
|
70 |
+
SelectionVector_MakeInt32,
|
71 |
+
SelectionVector_MakeInt64,
|
72 |
+
Projector_Make,
|
73 |
+
Filter_Make,
|
74 |
+
CFunctionSignature,
|
75 |
+
GetRegisteredFunctionSignatures)
|
76 |
+
|
77 |
+
|
78 |
+
cdef class Node(_Weakrefable):
|
79 |
+
cdef:
|
80 |
+
shared_ptr[CNode] node
|
81 |
+
|
82 |
+
def __init__(self):
|
83 |
+
raise TypeError("Do not call {}'s constructor directly, use the "
|
84 |
+
"TreeExprBuilder API directly"
|
85 |
+
.format(self.__class__.__name__))
|
86 |
+
|
87 |
+
@staticmethod
|
88 |
+
cdef create(shared_ptr[CNode] node):
|
89 |
+
cdef Node self = Node.__new__(Node)
|
90 |
+
self.node = node
|
91 |
+
return self
|
92 |
+
|
93 |
+
def __str__(self):
|
94 |
+
return self.node.get().ToString().decode()
|
95 |
+
|
96 |
+
def __repr__(self):
|
97 |
+
type_format = object.__repr__(self)
|
98 |
+
return '{0}\n{1}'.format(type_format, str(self))
|
99 |
+
|
100 |
+
def return_type(self):
|
101 |
+
return pyarrow_wrap_data_type(self.node.get().return_type())
|
102 |
+
|
103 |
+
|
104 |
+
cdef class Expression(_Weakrefable):
|
105 |
+
cdef:
|
106 |
+
shared_ptr[CGandivaExpression] expression
|
107 |
+
|
108 |
+
cdef void init(self, shared_ptr[CGandivaExpression] expression):
|
109 |
+
self.expression = expression
|
110 |
+
|
111 |
+
def __str__(self):
|
112 |
+
return self.expression.get().ToString().decode()
|
113 |
+
|
114 |
+
def __repr__(self):
|
115 |
+
type_format = object.__repr__(self)
|
116 |
+
return '{0}\n{1}'.format(type_format, str(self))
|
117 |
+
|
118 |
+
def root(self):
|
119 |
+
return Node.create(self.expression.get().root())
|
120 |
+
|
121 |
+
def result(self):
|
122 |
+
return pyarrow_wrap_field(self.expression.get().result())
|
123 |
+
|
124 |
+
|
125 |
+
cdef class Condition(_Weakrefable):
|
126 |
+
cdef:
|
127 |
+
shared_ptr[CCondition] condition
|
128 |
+
|
129 |
+
def __init__(self):
|
130 |
+
raise TypeError("Do not call {}'s constructor directly, use the "
|
131 |
+
"TreeExprBuilder API instead"
|
132 |
+
.format(self.__class__.__name__))
|
133 |
+
|
134 |
+
@staticmethod
|
135 |
+
cdef create(shared_ptr[CCondition] condition):
|
136 |
+
cdef Condition self = Condition.__new__(Condition)
|
137 |
+
self.condition = condition
|
138 |
+
return self
|
139 |
+
|
140 |
+
def __str__(self):
|
141 |
+
return self.condition.get().ToString().decode()
|
142 |
+
|
143 |
+
def __repr__(self):
|
144 |
+
type_format = object.__repr__(self)
|
145 |
+
return '{0}\n{1}'.format(type_format, str(self))
|
146 |
+
|
147 |
+
def root(self):
|
148 |
+
return Node.create(self.condition.get().root())
|
149 |
+
|
150 |
+
def result(self):
|
151 |
+
return pyarrow_wrap_field(self.condition.get().result())
|
152 |
+
|
153 |
+
|
154 |
+
cdef class SelectionVector(_Weakrefable):
|
155 |
+
cdef:
|
156 |
+
shared_ptr[CSelectionVector] selection_vector
|
157 |
+
|
158 |
+
def __init__(self):
|
159 |
+
raise TypeError("Do not call {}'s constructor directly."
|
160 |
+
.format(self.__class__.__name__))
|
161 |
+
|
162 |
+
@staticmethod
|
163 |
+
cdef create(shared_ptr[CSelectionVector] selection_vector):
|
164 |
+
cdef SelectionVector self = SelectionVector.__new__(SelectionVector)
|
165 |
+
self.selection_vector = selection_vector
|
166 |
+
return self
|
167 |
+
|
168 |
+
def to_array(self):
|
169 |
+
cdef shared_ptr[CArray] result = self.selection_vector.get().ToArray()
|
170 |
+
return pyarrow_wrap_array(result)
|
171 |
+
|
172 |
+
|
173 |
+
cdef class Projector(_Weakrefable):
|
174 |
+
cdef:
|
175 |
+
shared_ptr[CProjector] projector
|
176 |
+
MemoryPool pool
|
177 |
+
|
178 |
+
def __init__(self):
|
179 |
+
raise TypeError("Do not call {}'s constructor directly, use "
|
180 |
+
"make_projector instead"
|
181 |
+
.format(self.__class__.__name__))
|
182 |
+
|
183 |
+
@staticmethod
|
184 |
+
cdef create(shared_ptr[CProjector] projector, MemoryPool pool):
|
185 |
+
cdef Projector self = Projector.__new__(Projector)
|
186 |
+
self.projector = projector
|
187 |
+
self.pool = pool
|
188 |
+
return self
|
189 |
+
|
190 |
+
@property
|
191 |
+
def llvm_ir(self):
|
192 |
+
return self.projector.get().DumpIR().decode()
|
193 |
+
|
194 |
+
def evaluate(self, RecordBatch batch, SelectionVector selection=None):
|
195 |
+
"""
|
196 |
+
Evaluate the specified record batch and return the arrays at the
|
197 |
+
filtered positions.
|
198 |
+
|
199 |
+
Parameters
|
200 |
+
----------
|
201 |
+
batch : pyarrow.RecordBatch
|
202 |
+
selection : pyarrow.gandiva.SelectionVector
|
203 |
+
|
204 |
+
Returns
|
205 |
+
-------
|
206 |
+
list[pyarrow.Array]
|
207 |
+
"""
|
208 |
+
cdef vector[shared_ptr[CArray]] results
|
209 |
+
if selection is None:
|
210 |
+
check_status(self.projector.get().Evaluate(
|
211 |
+
batch.sp_batch.get()[0], self.pool.pool, &results))
|
212 |
+
else:
|
213 |
+
check_status(
|
214 |
+
self.projector.get().Evaluate(
|
215 |
+
batch.sp_batch.get()[0], selection.selection_vector.get(),
|
216 |
+
self.pool.pool, &results))
|
217 |
+
cdef shared_ptr[CArray] result
|
218 |
+
arrays = []
|
219 |
+
for result in results:
|
220 |
+
arrays.append(pyarrow_wrap_array(result))
|
221 |
+
return arrays
|
222 |
+
|
223 |
+
|
224 |
+
cdef class Filter(_Weakrefable):
|
225 |
+
cdef:
|
226 |
+
shared_ptr[CFilter] filter
|
227 |
+
|
228 |
+
def __init__(self):
|
229 |
+
raise TypeError("Do not call {}'s constructor directly, use "
|
230 |
+
"make_filter instead"
|
231 |
+
.format(self.__class__.__name__))
|
232 |
+
|
233 |
+
@staticmethod
|
234 |
+
cdef create(shared_ptr[CFilter] filter):
|
235 |
+
cdef Filter self = Filter.__new__(Filter)
|
236 |
+
self.filter = filter
|
237 |
+
return self
|
238 |
+
|
239 |
+
@property
|
240 |
+
def llvm_ir(self):
|
241 |
+
return self.filter.get().DumpIR().decode()
|
242 |
+
|
243 |
+
def evaluate(self, RecordBatch batch, MemoryPool pool, dtype='int32'):
|
244 |
+
"""
|
245 |
+
Evaluate the specified record batch and return a selection vector.
|
246 |
+
|
247 |
+
Parameters
|
248 |
+
----------
|
249 |
+
batch : pyarrow.RecordBatch
|
250 |
+
pool : MemoryPool
|
251 |
+
dtype : DataType or str, default int32
|
252 |
+
|
253 |
+
Returns
|
254 |
+
-------
|
255 |
+
pyarrow.gandiva.SelectionVector
|
256 |
+
"""
|
257 |
+
cdef:
|
258 |
+
DataType type = ensure_type(dtype)
|
259 |
+
shared_ptr[CSelectionVector] selection
|
260 |
+
|
261 |
+
if type.id == _Type_INT16:
|
262 |
+
check_status(SelectionVector_MakeInt16(
|
263 |
+
batch.num_rows, pool.pool, &selection))
|
264 |
+
elif type.id == _Type_INT32:
|
265 |
+
check_status(SelectionVector_MakeInt32(
|
266 |
+
batch.num_rows, pool.pool, &selection))
|
267 |
+
elif type.id == _Type_INT64:
|
268 |
+
check_status(SelectionVector_MakeInt64(
|
269 |
+
batch.num_rows, pool.pool, &selection))
|
270 |
+
else:
|
271 |
+
raise ValueError("'dtype' of the selection vector should be "
|
272 |
+
"one of 'int16', 'int32' and 'int64'.")
|
273 |
+
|
274 |
+
check_status(self.filter.get().Evaluate(
|
275 |
+
batch.sp_batch.get()[0], selection))
|
276 |
+
return SelectionVector.create(selection)
|
277 |
+
|
278 |
+
|
279 |
+
cdef class TreeExprBuilder(_Weakrefable):
|
280 |
+
|
281 |
+
def make_literal(self, value, dtype):
|
282 |
+
"""
|
283 |
+
Create a node on a literal.
|
284 |
+
|
285 |
+
Parameters
|
286 |
+
----------
|
287 |
+
value : a literal value
|
288 |
+
dtype : DataType
|
289 |
+
|
290 |
+
Returns
|
291 |
+
-------
|
292 |
+
pyarrow.gandiva.Node
|
293 |
+
"""
|
294 |
+
cdef:
|
295 |
+
DataType type = ensure_type(dtype)
|
296 |
+
shared_ptr[CNode] r
|
297 |
+
|
298 |
+
if type.id == _Type_BOOL:
|
299 |
+
r = TreeExprBuilder_MakeBoolLiteral(value)
|
300 |
+
elif type.id == _Type_UINT8:
|
301 |
+
r = TreeExprBuilder_MakeUInt8Literal(value)
|
302 |
+
elif type.id == _Type_UINT16:
|
303 |
+
r = TreeExprBuilder_MakeUInt16Literal(value)
|
304 |
+
elif type.id == _Type_UINT32:
|
305 |
+
r = TreeExprBuilder_MakeUInt32Literal(value)
|
306 |
+
elif type.id == _Type_UINT64:
|
307 |
+
r = TreeExprBuilder_MakeUInt64Literal(value)
|
308 |
+
elif type.id == _Type_INT8:
|
309 |
+
r = TreeExprBuilder_MakeInt8Literal(value)
|
310 |
+
elif type.id == _Type_INT16:
|
311 |
+
r = TreeExprBuilder_MakeInt16Literal(value)
|
312 |
+
elif type.id == _Type_INT32:
|
313 |
+
r = TreeExprBuilder_MakeInt32Literal(value)
|
314 |
+
elif type.id == _Type_INT64:
|
315 |
+
r = TreeExprBuilder_MakeInt64Literal(value)
|
316 |
+
elif type.id == _Type_FLOAT:
|
317 |
+
r = TreeExprBuilder_MakeFloatLiteral(value)
|
318 |
+
elif type.id == _Type_DOUBLE:
|
319 |
+
r = TreeExprBuilder_MakeDoubleLiteral(value)
|
320 |
+
elif type.id == _Type_STRING:
|
321 |
+
r = TreeExprBuilder_MakeStringLiteral(value.encode('UTF-8'))
|
322 |
+
elif type.id == _Type_BINARY:
|
323 |
+
r = TreeExprBuilder_MakeBinaryLiteral(value)
|
324 |
+
else:
|
325 |
+
raise TypeError("Didn't recognize dtype " + str(dtype))
|
326 |
+
|
327 |
+
return Node.create(r)
|
328 |
+
|
329 |
+
def make_expression(self, Node root_node not None,
|
330 |
+
Field return_field not None):
|
331 |
+
"""
|
332 |
+
Create an expression with the specified root_node,
|
333 |
+
and the result written to result_field.
|
334 |
+
|
335 |
+
Parameters
|
336 |
+
----------
|
337 |
+
root_node : pyarrow.gandiva.Node
|
338 |
+
return_field : pyarrow.Field
|
339 |
+
|
340 |
+
Returns
|
341 |
+
-------
|
342 |
+
pyarrow.gandiva.Expression
|
343 |
+
"""
|
344 |
+
cdef shared_ptr[CGandivaExpression] r = TreeExprBuilder_MakeExpression(
|
345 |
+
root_node.node, return_field.sp_field)
|
346 |
+
cdef Expression expression = Expression()
|
347 |
+
expression.init(r)
|
348 |
+
return expression
|
349 |
+
|
350 |
+
def make_function(self, name, children, DataType return_type):
|
351 |
+
"""
|
352 |
+
Create a node with a function.
|
353 |
+
|
354 |
+
Parameters
|
355 |
+
----------
|
356 |
+
name : str
|
357 |
+
children : pyarrow.gandiva.NodeVector
|
358 |
+
return_type : DataType
|
359 |
+
|
360 |
+
Returns
|
361 |
+
-------
|
362 |
+
pyarrow.gandiva.Node
|
363 |
+
"""
|
364 |
+
cdef c_vector[shared_ptr[CNode]] c_children
|
365 |
+
cdef Node child
|
366 |
+
for child in children:
|
367 |
+
if child is None:
|
368 |
+
raise TypeError("Child nodes must not be None")
|
369 |
+
c_children.push_back(child.node)
|
370 |
+
cdef shared_ptr[CNode] r = TreeExprBuilder_MakeFunction(
|
371 |
+
name.encode(), c_children, return_type.sp_type)
|
372 |
+
return Node.create(r)
|
373 |
+
|
374 |
+
def make_field(self, Field field not None):
|
375 |
+
"""
|
376 |
+
Create a node with an Arrow field.
|
377 |
+
|
378 |
+
Parameters
|
379 |
+
----------
|
380 |
+
field : pyarrow.Field
|
381 |
+
|
382 |
+
Returns
|
383 |
+
-------
|
384 |
+
pyarrow.gandiva.Node
|
385 |
+
"""
|
386 |
+
cdef shared_ptr[CNode] r = TreeExprBuilder_MakeField(field.sp_field)
|
387 |
+
return Node.create(r)
|
388 |
+
|
389 |
+
def make_if(self, Node condition not None, Node this_node not None,
|
390 |
+
Node else_node not None, DataType return_type not None):
|
391 |
+
"""
|
392 |
+
Create a node with an if-else expression.
|
393 |
+
|
394 |
+
Parameters
|
395 |
+
----------
|
396 |
+
condition : pyarrow.gandiva.Node
|
397 |
+
this_node : pyarrow.gandiva.Node
|
398 |
+
else_node : pyarrow.gandiva.Node
|
399 |
+
return_type : DataType
|
400 |
+
|
401 |
+
Returns
|
402 |
+
-------
|
403 |
+
pyarrow.gandiva.Node
|
404 |
+
"""
|
405 |
+
cdef shared_ptr[CNode] r = TreeExprBuilder_MakeIf(
|
406 |
+
condition.node, this_node.node, else_node.node,
|
407 |
+
return_type.sp_type)
|
408 |
+
return Node.create(r)
|
409 |
+
|
410 |
+
def make_and(self, children):
|
411 |
+
"""
|
412 |
+
Create a Node with a boolean AND expression.
|
413 |
+
|
414 |
+
Parameters
|
415 |
+
----------
|
416 |
+
children : list[pyarrow.gandiva.Node]
|
417 |
+
|
418 |
+
Returns
|
419 |
+
-------
|
420 |
+
pyarrow.gandiva.Node
|
421 |
+
"""
|
422 |
+
cdef c_vector[shared_ptr[CNode]] c_children
|
423 |
+
cdef Node child
|
424 |
+
for child in children:
|
425 |
+
if child is None:
|
426 |
+
raise TypeError("Child nodes must not be None")
|
427 |
+
c_children.push_back(child.node)
|
428 |
+
cdef shared_ptr[CNode] r = TreeExprBuilder_MakeAnd(c_children)
|
429 |
+
return Node.create(r)
|
430 |
+
|
431 |
+
def make_or(self, children):
|
432 |
+
"""
|
433 |
+
Create a Node with a boolean OR expression.
|
434 |
+
|
435 |
+
Parameters
|
436 |
+
----------
|
437 |
+
children : list[pyarrow.gandiva.Node]
|
438 |
+
|
439 |
+
Returns
|
440 |
+
-------
|
441 |
+
pyarrow.gandiva.Node
|
442 |
+
"""
|
443 |
+
cdef c_vector[shared_ptr[CNode]] c_children
|
444 |
+
cdef Node child
|
445 |
+
for child in children:
|
446 |
+
if child is None:
|
447 |
+
raise TypeError("Child nodes must not be None")
|
448 |
+
c_children.push_back(child.node)
|
449 |
+
cdef shared_ptr[CNode] r = TreeExprBuilder_MakeOr(c_children)
|
450 |
+
return Node.create(r)
|
451 |
+
|
452 |
+
def _make_in_expression_int32(self, Node node not None, values):
|
453 |
+
cdef shared_ptr[CNode] r
|
454 |
+
cdef c_unordered_set[int32_t] c_values
|
455 |
+
cdef int32_t v
|
456 |
+
for v in values:
|
457 |
+
c_values.insert(v)
|
458 |
+
r = TreeExprBuilder_MakeInExpressionInt32(node.node, c_values)
|
459 |
+
return Node.create(r)
|
460 |
+
|
461 |
+
def _make_in_expression_int64(self, Node node not None, values):
|
462 |
+
cdef shared_ptr[CNode] r
|
463 |
+
cdef c_unordered_set[int64_t] c_values
|
464 |
+
cdef int64_t v
|
465 |
+
for v in values:
|
466 |
+
c_values.insert(v)
|
467 |
+
r = TreeExprBuilder_MakeInExpressionInt64(node.node, c_values)
|
468 |
+
return Node.create(r)
|
469 |
+
|
470 |
+
def _make_in_expression_time32(self, Node node not None, values):
|
471 |
+
cdef shared_ptr[CNode] r
|
472 |
+
cdef c_unordered_set[int32_t] c_values
|
473 |
+
cdef int32_t v
|
474 |
+
for v in values:
|
475 |
+
c_values.insert(v)
|
476 |
+
r = TreeExprBuilder_MakeInExpressionTime32(node.node, c_values)
|
477 |
+
return Node.create(r)
|
478 |
+
|
479 |
+
def _make_in_expression_time64(self, Node node not None, values):
|
480 |
+
cdef shared_ptr[CNode] r
|
481 |
+
cdef c_unordered_set[int64_t] c_values
|
482 |
+
cdef int64_t v
|
483 |
+
for v in values:
|
484 |
+
c_values.insert(v)
|
485 |
+
r = TreeExprBuilder_MakeInExpressionTime64(node.node, c_values)
|
486 |
+
return Node.create(r)
|
487 |
+
|
488 |
+
def _make_in_expression_date32(self, Node node not None, values):
|
489 |
+
cdef shared_ptr[CNode] r
|
490 |
+
cdef c_unordered_set[int32_t] c_values
|
491 |
+
cdef int32_t v
|
492 |
+
for v in values:
|
493 |
+
c_values.insert(v)
|
494 |
+
r = TreeExprBuilder_MakeInExpressionDate32(node.node, c_values)
|
495 |
+
return Node.create(r)
|
496 |
+
|
497 |
+
def _make_in_expression_date64(self, Node node not None, values):
|
498 |
+
cdef shared_ptr[CNode] r
|
499 |
+
cdef c_unordered_set[int64_t] c_values
|
500 |
+
cdef int64_t v
|
501 |
+
for v in values:
|
502 |
+
c_values.insert(v)
|
503 |
+
r = TreeExprBuilder_MakeInExpressionDate64(node.node, c_values)
|
504 |
+
return Node.create(r)
|
505 |
+
|
506 |
+
def _make_in_expression_timestamp(self, Node node not None, values):
|
507 |
+
cdef shared_ptr[CNode] r
|
508 |
+
cdef c_unordered_set[int64_t] c_values
|
509 |
+
cdef int64_t v
|
510 |
+
for v in values:
|
511 |
+
c_values.insert(v)
|
512 |
+
r = TreeExprBuilder_MakeInExpressionTimeStamp(node.node, c_values)
|
513 |
+
return Node.create(r)
|
514 |
+
|
515 |
+
def _make_in_expression_binary(self, Node node not None, values):
|
516 |
+
cdef shared_ptr[CNode] r
|
517 |
+
cdef c_unordered_set[c_string] c_values
|
518 |
+
cdef c_string v
|
519 |
+
for v in values:
|
520 |
+
c_values.insert(v)
|
521 |
+
r = TreeExprBuilder_MakeInExpressionString(node.node, c_values)
|
522 |
+
return Node.create(r)
|
523 |
+
|
524 |
+
def _make_in_expression_string(self, Node node not None, values):
|
525 |
+
cdef shared_ptr[CNode] r
|
526 |
+
cdef c_unordered_set[c_string] c_values
|
527 |
+
cdef c_string _v
|
528 |
+
for v in values:
|
529 |
+
_v = v.encode('UTF-8')
|
530 |
+
c_values.insert(_v)
|
531 |
+
r = TreeExprBuilder_MakeInExpressionString(node.node, c_values)
|
532 |
+
return Node.create(r)
|
533 |
+
|
534 |
+
def make_in_expression(self, Node node not None, values, dtype):
|
535 |
+
"""
|
536 |
+
Create a Node with an IN expression.
|
537 |
+
|
538 |
+
Parameters
|
539 |
+
----------
|
540 |
+
node : pyarrow.gandiva.Node
|
541 |
+
values : iterable
|
542 |
+
dtype : DataType
|
543 |
+
|
544 |
+
Returns
|
545 |
+
-------
|
546 |
+
pyarrow.gandiva.Node
|
547 |
+
"""
|
548 |
+
cdef DataType type = ensure_type(dtype)
|
549 |
+
|
550 |
+
if type.id == _Type_INT32:
|
551 |
+
return self._make_in_expression_int32(node, values)
|
552 |
+
elif type.id == _Type_INT64:
|
553 |
+
return self._make_in_expression_int64(node, values)
|
554 |
+
elif type.id == _Type_TIME32:
|
555 |
+
return self._make_in_expression_time32(node, values)
|
556 |
+
elif type.id == _Type_TIME64:
|
557 |
+
return self._make_in_expression_time64(node, values)
|
558 |
+
elif type.id == _Type_TIMESTAMP:
|
559 |
+
return self._make_in_expression_timestamp(node, values)
|
560 |
+
elif type.id == _Type_DATE32:
|
561 |
+
return self._make_in_expression_date32(node, values)
|
562 |
+
elif type.id == _Type_DATE64:
|
563 |
+
return self._make_in_expression_date64(node, values)
|
564 |
+
elif type.id == _Type_BINARY:
|
565 |
+
return self._make_in_expression_binary(node, values)
|
566 |
+
elif type.id == _Type_STRING:
|
567 |
+
return self._make_in_expression_string(node, values)
|
568 |
+
else:
|
569 |
+
raise TypeError("Data type " + str(dtype) + " not supported.")
|
570 |
+
|
571 |
+
def make_condition(self, Node condition not None):
|
572 |
+
"""
|
573 |
+
Create a condition with the specified node.
|
574 |
+
|
575 |
+
Parameters
|
576 |
+
----------
|
577 |
+
condition : pyarrow.gandiva.Node
|
578 |
+
|
579 |
+
Returns
|
580 |
+
-------
|
581 |
+
pyarrow.gandiva.Condition
|
582 |
+
"""
|
583 |
+
cdef shared_ptr[CCondition] r = TreeExprBuilder_MakeCondition(
|
584 |
+
condition.node)
|
585 |
+
return Condition.create(r)
|
586 |
+
|
587 |
+
cdef class Configuration(_Weakrefable):
|
588 |
+
cdef:
|
589 |
+
shared_ptr[CConfiguration] configuration
|
590 |
+
|
591 |
+
def __cinit__(self, bint optimize=True, bint dump_ir=False):
|
592 |
+
"""
|
593 |
+
Initialize the configuration with specified options.
|
594 |
+
|
595 |
+
Parameters
|
596 |
+
----------
|
597 |
+
optimize : bool, default True
|
598 |
+
Whether to enable optimizations.
|
599 |
+
dump_ir : bool, default False
|
600 |
+
Whether to dump LLVM IR.
|
601 |
+
"""
|
602 |
+
self.configuration = CConfigurationBuilder().build()
|
603 |
+
self.configuration.get().set_optimize(optimize)
|
604 |
+
self.configuration.get().set_dump_ir(dump_ir)
|
605 |
+
|
606 |
+
@staticmethod
|
607 |
+
cdef create(shared_ptr[CConfiguration] configuration):
|
608 |
+
"""
|
609 |
+
Create a Configuration instance from an existing CConfiguration pointer.
|
610 |
+
|
611 |
+
Parameters
|
612 |
+
----------
|
613 |
+
configuration : shared_ptr[CConfiguration]
|
614 |
+
Existing CConfiguration pointer.
|
615 |
+
|
616 |
+
Returns
|
617 |
+
-------
|
618 |
+
Configuration instance
|
619 |
+
"""
|
620 |
+
cdef Configuration self = Configuration.__new__(Configuration)
|
621 |
+
self.configuration = configuration
|
622 |
+
return self
|
623 |
+
|
624 |
+
|
625 |
+
cpdef make_projector(Schema schema, children, MemoryPool pool,
|
626 |
+
str selection_mode="NONE",
|
627 |
+
Configuration configuration=None):
|
628 |
+
"""
|
629 |
+
Construct a projection using expressions.
|
630 |
+
|
631 |
+
A projector is built for a specific schema and vector of expressions.
|
632 |
+
Once the projector is built, it can be used to evaluate many row batches.
|
633 |
+
|
634 |
+
Parameters
|
635 |
+
----------
|
636 |
+
schema : pyarrow.Schema
|
637 |
+
Schema for the record batches, and the expressions.
|
638 |
+
children : list[pyarrow.gandiva.Expression]
|
639 |
+
List of projectable expression objects.
|
640 |
+
pool : pyarrow.MemoryPool
|
641 |
+
Memory pool used to allocate output arrays.
|
642 |
+
selection_mode : str, default "NONE"
|
643 |
+
Possible values are NONE, UINT16, UINT32, UINT64.
|
644 |
+
configuration : pyarrow.gandiva.Configuration, default None
|
645 |
+
Configuration for the projector.
|
646 |
+
|
647 |
+
Returns
|
648 |
+
-------
|
649 |
+
Projector instance
|
650 |
+
"""
|
651 |
+
cdef:
|
652 |
+
Expression child
|
653 |
+
c_vector[shared_ptr[CGandivaExpression]] c_children
|
654 |
+
shared_ptr[CProjector] result
|
655 |
+
|
656 |
+
if configuration is None:
|
657 |
+
configuration = Configuration()
|
658 |
+
|
659 |
+
for child in children:
|
660 |
+
if child is None:
|
661 |
+
raise TypeError("Expressions must not be None")
|
662 |
+
c_children.push_back(child.expression)
|
663 |
+
|
664 |
+
check_status(
|
665 |
+
Projector_Make(schema.sp_schema, c_children,
|
666 |
+
_ensure_selection_mode(selection_mode),
|
667 |
+
configuration.configuration,
|
668 |
+
&result))
|
669 |
+
return Projector.create(result, pool)
|
670 |
+
|
671 |
+
|
672 |
+
cpdef make_filter(Schema schema, Condition condition,
|
673 |
+
Configuration configuration=None):
|
674 |
+
"""
|
675 |
+
Construct a filter based on a condition.
|
676 |
+
|
677 |
+
A filter is built for a specific schema and condition. Once the filter is
|
678 |
+
built, it can be used to evaluate many row batches.
|
679 |
+
|
680 |
+
Parameters
|
681 |
+
----------
|
682 |
+
schema : pyarrow.Schema
|
683 |
+
Schema for the record batches, and the condition.
|
684 |
+
condition : pyarrow.gandiva.Condition
|
685 |
+
Filter condition.
|
686 |
+
configuration : pyarrow.gandiva.Configuration, default None
|
687 |
+
Configuration for the filter.
|
688 |
+
|
689 |
+
Returns
|
690 |
+
-------
|
691 |
+
Filter instance
|
692 |
+
"""
|
693 |
+
cdef shared_ptr[CFilter] result
|
694 |
+
if condition is None:
|
695 |
+
raise TypeError("Condition must not be None")
|
696 |
+
|
697 |
+
if configuration is None:
|
698 |
+
configuration = Configuration()
|
699 |
+
|
700 |
+
check_status(
|
701 |
+
Filter_Make(schema.sp_schema, condition.condition, configuration.configuration, &result))
|
702 |
+
return Filter.create(result)
|
703 |
+
|
704 |
+
|
705 |
+
cdef class FunctionSignature(_Weakrefable):
|
706 |
+
"""
|
707 |
+
Signature of a Gandiva function including name, parameter types
|
708 |
+
and return type.
|
709 |
+
"""
|
710 |
+
|
711 |
+
cdef:
|
712 |
+
shared_ptr[CFunctionSignature] signature
|
713 |
+
|
714 |
+
def __init__(self):
|
715 |
+
raise TypeError("Do not call {}'s constructor directly."
|
716 |
+
.format(self.__class__.__name__))
|
717 |
+
|
718 |
+
@staticmethod
|
719 |
+
cdef create(shared_ptr[CFunctionSignature] signature):
|
720 |
+
cdef FunctionSignature self = FunctionSignature.__new__(
|
721 |
+
FunctionSignature)
|
722 |
+
self.signature = signature
|
723 |
+
return self
|
724 |
+
|
725 |
+
def return_type(self):
|
726 |
+
return pyarrow_wrap_data_type(self.signature.get().ret_type())
|
727 |
+
|
728 |
+
def param_types(self):
|
729 |
+
result = []
|
730 |
+
cdef vector[shared_ptr[CDataType]] types = \
|
731 |
+
self.signature.get().param_types()
|
732 |
+
for t in types:
|
733 |
+
result.append(pyarrow_wrap_data_type(t))
|
734 |
+
return result
|
735 |
+
|
736 |
+
def name(self):
|
737 |
+
return self.signature.get().base_name().decode()
|
738 |
+
|
739 |
+
def __repr__(self):
|
740 |
+
signature = self.signature.get().ToString().decode()
|
741 |
+
return "FunctionSignature(" + signature + ")"
|
742 |
+
|
743 |
+
|
744 |
+
def get_registered_function_signatures():
|
745 |
+
"""
|
746 |
+
Return the function in Gandiva's ExpressionRegistry.
|
747 |
+
|
748 |
+
Returns
|
749 |
+
-------
|
750 |
+
registry: a list of registered function signatures
|
751 |
+
"""
|
752 |
+
results = []
|
753 |
+
|
754 |
+
cdef vector[shared_ptr[CFunctionSignature]] signatures = \
|
755 |
+
GetRegisteredFunctionSignatures()
|
756 |
+
|
757 |
+
for signature in signatures:
|
758 |
+
results.append(FunctionSignature.create(signature))
|
759 |
+
|
760 |
+
return results
|
venv/lib/python3.10/site-packages/pyarrow/parquet/__init__.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# flake8: noqa
|
19 |
+
|
20 |
+
from .core import *
|
venv/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (204 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/core.cpython-310.pyc
ADDED
Binary file (73.8 kB). View file
|
|
venv/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/encryption.cpython-310.pyc
ADDED
Binary file (368 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pyarrow/parquet/core.py
ADDED
@@ -0,0 +1,2341 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
|
19 |
+
from collections import defaultdict
|
20 |
+
from contextlib import nullcontext
|
21 |
+
from functools import reduce
|
22 |
+
|
23 |
+
import inspect
|
24 |
+
import json
|
25 |
+
import os
|
26 |
+
import re
|
27 |
+
import operator
|
28 |
+
import warnings
|
29 |
+
|
30 |
+
import pyarrow as pa
|
31 |
+
|
32 |
+
try:
|
33 |
+
import pyarrow._parquet as _parquet
|
34 |
+
except ImportError as exc:
|
35 |
+
raise ImportError(
|
36 |
+
"The pyarrow installation is not built with support "
|
37 |
+
f"for the Parquet file format ({str(exc)})"
|
38 |
+
) from None
|
39 |
+
|
40 |
+
from pyarrow._parquet import (ParquetReader, Statistics, # noqa
|
41 |
+
FileMetaData, RowGroupMetaData,
|
42 |
+
ColumnChunkMetaData,
|
43 |
+
ParquetSchema, ColumnSchema,
|
44 |
+
ParquetLogicalType,
|
45 |
+
FileEncryptionProperties,
|
46 |
+
FileDecryptionProperties,
|
47 |
+
SortingColumn)
|
48 |
+
from pyarrow.fs import (LocalFileSystem, FileSystem, FileType,
|
49 |
+
_resolve_filesystem_and_path, _ensure_filesystem)
|
50 |
+
from pyarrow.util import guid, _is_path_like, _stringify_path, _deprecate_api
|
51 |
+
|
52 |
+
|
53 |
+
def _check_contains_null(val):
|
54 |
+
if isinstance(val, bytes):
|
55 |
+
for byte in val:
|
56 |
+
if isinstance(byte, bytes):
|
57 |
+
compare_to = chr(0)
|
58 |
+
else:
|
59 |
+
compare_to = 0
|
60 |
+
if byte == compare_to:
|
61 |
+
return True
|
62 |
+
elif isinstance(val, str):
|
63 |
+
return '\x00' in val
|
64 |
+
return False
|
65 |
+
|
66 |
+
|
67 |
+
def _check_filters(filters, check_null_strings=True):
|
68 |
+
"""
|
69 |
+
Check if filters are well-formed.
|
70 |
+
"""
|
71 |
+
if filters is not None:
|
72 |
+
if len(filters) == 0 or any(len(f) == 0 for f in filters):
|
73 |
+
raise ValueError("Malformed filters")
|
74 |
+
if isinstance(filters[0][0], str):
|
75 |
+
# We have encountered the situation where we have one nesting level
|
76 |
+
# too few:
|
77 |
+
# We have [(,,), ..] instead of [[(,,), ..]]
|
78 |
+
filters = [filters]
|
79 |
+
if check_null_strings:
|
80 |
+
for conjunction in filters:
|
81 |
+
for col, op, val in conjunction:
|
82 |
+
if (
|
83 |
+
isinstance(val, list) and
|
84 |
+
all(_check_contains_null(v) for v in val) or
|
85 |
+
_check_contains_null(val)
|
86 |
+
):
|
87 |
+
raise NotImplementedError(
|
88 |
+
"Null-terminated binary strings are not supported "
|
89 |
+
"as filter values."
|
90 |
+
)
|
91 |
+
return filters
|
92 |
+
|
93 |
+
|
94 |
+
_DNF_filter_doc = """Predicates are expressed using an ``Expression`` or using
|
95 |
+
the disjunctive normal form (DNF), like ``[[('x', '=', 0), ...], ...]``.
|
96 |
+
DNF allows arbitrary boolean logical combinations of single column predicates.
|
97 |
+
The innermost tuples each describe a single column predicate. The list of inner
|
98 |
+
predicates is interpreted as a conjunction (AND), forming a more selective and
|
99 |
+
multiple column predicate. Finally, the most outer list combines these filters
|
100 |
+
as a disjunction (OR).
|
101 |
+
|
102 |
+
Predicates may also be passed as List[Tuple]. This form is interpreted
|
103 |
+
as a single conjunction. To express OR in predicates, one must
|
104 |
+
use the (preferred) List[List[Tuple]] notation.
|
105 |
+
|
106 |
+
Each tuple has format: (``key``, ``op``, ``value``) and compares the
|
107 |
+
``key`` with the ``value``.
|
108 |
+
The supported ``op`` are: ``=`` or ``==``, ``!=``, ``<``, ``>``, ``<=``,
|
109 |
+
``>=``, ``in`` and ``not in``. If the ``op`` is ``in`` or ``not in``, the
|
110 |
+
``value`` must be a collection such as a ``list``, a ``set`` or a
|
111 |
+
``tuple``.
|
112 |
+
|
113 |
+
Examples:
|
114 |
+
|
115 |
+
Using the ``Expression`` API:
|
116 |
+
|
117 |
+
.. code-block:: python
|
118 |
+
|
119 |
+
import pyarrow.compute as pc
|
120 |
+
pc.field('x') = 0
|
121 |
+
pc.field('y').isin(['a', 'b', 'c'])
|
122 |
+
~pc.field('y').isin({'a', 'b'})
|
123 |
+
|
124 |
+
Using the DNF format:
|
125 |
+
|
126 |
+
.. code-block:: python
|
127 |
+
|
128 |
+
('x', '=', 0)
|
129 |
+
('y', 'in', ['a', 'b', 'c'])
|
130 |
+
('z', 'not in', {'a','b'})
|
131 |
+
|
132 |
+
"""
|
133 |
+
|
134 |
+
|
135 |
+
def filters_to_expression(filters):
|
136 |
+
"""
|
137 |
+
Check if filters are well-formed and convert to an ``Expression``.
|
138 |
+
|
139 |
+
Parameters
|
140 |
+
----------
|
141 |
+
filters : List[Tuple] or List[List[Tuple]]
|
142 |
+
|
143 |
+
Notes
|
144 |
+
-----
|
145 |
+
See internal ``pyarrow._DNF_filter_doc`` attribute for more details.
|
146 |
+
|
147 |
+
Examples
|
148 |
+
--------
|
149 |
+
|
150 |
+
>>> filters_to_expression([('foo', '==', 'bar')])
|
151 |
+
<pyarrow.compute.Expression (foo == "bar")>
|
152 |
+
|
153 |
+
Returns
|
154 |
+
-------
|
155 |
+
pyarrow.compute.Expression
|
156 |
+
An Expression representing the filters
|
157 |
+
"""
|
158 |
+
import pyarrow.dataset as ds
|
159 |
+
|
160 |
+
if isinstance(filters, ds.Expression):
|
161 |
+
return filters
|
162 |
+
|
163 |
+
filters = _check_filters(filters, check_null_strings=False)
|
164 |
+
|
165 |
+
def convert_single_predicate(col, op, val):
|
166 |
+
field = ds.field(col)
|
167 |
+
|
168 |
+
if op == "=" or op == "==":
|
169 |
+
return field == val
|
170 |
+
elif op == "!=":
|
171 |
+
return field != val
|
172 |
+
elif op == '<':
|
173 |
+
return field < val
|
174 |
+
elif op == '>':
|
175 |
+
return field > val
|
176 |
+
elif op == '<=':
|
177 |
+
return field <= val
|
178 |
+
elif op == '>=':
|
179 |
+
return field >= val
|
180 |
+
elif op == 'in':
|
181 |
+
return field.isin(val)
|
182 |
+
elif op == 'not in':
|
183 |
+
return ~field.isin(val)
|
184 |
+
else:
|
185 |
+
raise ValueError(
|
186 |
+
'"{0}" is not a valid operator in predicates.'.format(
|
187 |
+
(col, op, val)))
|
188 |
+
|
189 |
+
disjunction_members = []
|
190 |
+
|
191 |
+
for conjunction in filters:
|
192 |
+
conjunction_members = [
|
193 |
+
convert_single_predicate(col, op, val)
|
194 |
+
for col, op, val in conjunction
|
195 |
+
]
|
196 |
+
|
197 |
+
disjunction_members.append(reduce(operator.and_, conjunction_members))
|
198 |
+
|
199 |
+
return reduce(operator.or_, disjunction_members)
|
200 |
+
|
201 |
+
|
202 |
+
_filters_to_expression = _deprecate_api(
|
203 |
+
"_filters_to_expression", "filters_to_expression",
|
204 |
+
filters_to_expression, "10.0.0", DeprecationWarning)
|
205 |
+
|
206 |
+
|
207 |
+
# ----------------------------------------------------------------------
|
208 |
+
# Reading a single Parquet file
|
209 |
+
|
210 |
+
|
211 |
+
class ParquetFile:
|
212 |
+
"""
|
213 |
+
Reader interface for a single Parquet file.
|
214 |
+
|
215 |
+
Parameters
|
216 |
+
----------
|
217 |
+
source : str, pathlib.Path, pyarrow.NativeFile, or file-like object
|
218 |
+
Readable source. For passing bytes or buffer-like file containing a
|
219 |
+
Parquet file, use pyarrow.BufferReader.
|
220 |
+
metadata : FileMetaData, default None
|
221 |
+
Use existing metadata object, rather than reading from file.
|
222 |
+
common_metadata : FileMetaData, default None
|
223 |
+
Will be used in reads for pandas schema metadata if not found in the
|
224 |
+
main file's metadata, no other uses at the moment.
|
225 |
+
read_dictionary : list
|
226 |
+
List of column names to read directly as DictionaryArray.
|
227 |
+
memory_map : bool, default False
|
228 |
+
If the source is a file path, use a memory map to read file, which can
|
229 |
+
improve performance in some environments.
|
230 |
+
buffer_size : int, default 0
|
231 |
+
If positive, perform read buffering when deserializing individual
|
232 |
+
column chunks. Otherwise IO calls are unbuffered.
|
233 |
+
pre_buffer : bool, default False
|
234 |
+
Coalesce and issue file reads in parallel to improve performance on
|
235 |
+
high-latency filesystems (e.g. S3). If True, Arrow will use a
|
236 |
+
background I/O thread pool.
|
237 |
+
coerce_int96_timestamp_unit : str, default None
|
238 |
+
Cast timestamps that are stored in INT96 format to a particular
|
239 |
+
resolution (e.g. 'ms'). Setting to None is equivalent to 'ns'
|
240 |
+
and therefore INT96 timestamps will be inferred as timestamps
|
241 |
+
in nanoseconds.
|
242 |
+
decryption_properties : FileDecryptionProperties, default None
|
243 |
+
File decryption properties for Parquet Modular Encryption.
|
244 |
+
thrift_string_size_limit : int, default None
|
245 |
+
If not None, override the maximum total string size allocated
|
246 |
+
when decoding Thrift structures. The default limit should be
|
247 |
+
sufficient for most Parquet files.
|
248 |
+
thrift_container_size_limit : int, default None
|
249 |
+
If not None, override the maximum total size of containers allocated
|
250 |
+
when decoding Thrift structures. The default limit should be
|
251 |
+
sufficient for most Parquet files.
|
252 |
+
filesystem : FileSystem, default None
|
253 |
+
If nothing passed, will be inferred based on path.
|
254 |
+
Path will try to be found in the local on-disk filesystem otherwise
|
255 |
+
it will be parsed as an URI to determine the filesystem.
|
256 |
+
page_checksum_verification : bool, default False
|
257 |
+
If True, verify the checksum for each page read from the file.
|
258 |
+
|
259 |
+
Examples
|
260 |
+
--------
|
261 |
+
|
262 |
+
Generate an example PyArrow Table and write it to Parquet file:
|
263 |
+
|
264 |
+
>>> import pyarrow as pa
|
265 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
266 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
267 |
+
... "Brittle stars", "Centipede"]})
|
268 |
+
|
269 |
+
>>> import pyarrow.parquet as pq
|
270 |
+
>>> pq.write_table(table, 'example.parquet')
|
271 |
+
|
272 |
+
Create a ``ParquetFile`` object from the Parquet file:
|
273 |
+
|
274 |
+
>>> parquet_file = pq.ParquetFile('example.parquet')
|
275 |
+
|
276 |
+
Read the data:
|
277 |
+
|
278 |
+
>>> parquet_file.read()
|
279 |
+
pyarrow.Table
|
280 |
+
n_legs: int64
|
281 |
+
animal: string
|
282 |
+
----
|
283 |
+
n_legs: [[2,2,4,4,5,100]]
|
284 |
+
animal: [["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"]]
|
285 |
+
|
286 |
+
Create a ParquetFile object with "animal" column as DictionaryArray:
|
287 |
+
|
288 |
+
>>> parquet_file = pq.ParquetFile('example.parquet',
|
289 |
+
... read_dictionary=["animal"])
|
290 |
+
>>> parquet_file.read()
|
291 |
+
pyarrow.Table
|
292 |
+
n_legs: int64
|
293 |
+
animal: dictionary<values=string, indices=int32, ordered=0>
|
294 |
+
----
|
295 |
+
n_legs: [[2,2,4,4,5,100]]
|
296 |
+
animal: [ -- dictionary:
|
297 |
+
["Flamingo","Parrot",...,"Brittle stars","Centipede"] -- indices:
|
298 |
+
[0,1,2,3,4,5]]
|
299 |
+
"""
|
300 |
+
|
301 |
+
def __init__(self, source, *, metadata=None, common_metadata=None,
|
302 |
+
read_dictionary=None, memory_map=False, buffer_size=0,
|
303 |
+
pre_buffer=False, coerce_int96_timestamp_unit=None,
|
304 |
+
decryption_properties=None, thrift_string_size_limit=None,
|
305 |
+
thrift_container_size_limit=None, filesystem=None,
|
306 |
+
page_checksum_verification=False):
|
307 |
+
|
308 |
+
self._close_source = getattr(source, 'closed', True)
|
309 |
+
|
310 |
+
filesystem, source = _resolve_filesystem_and_path(
|
311 |
+
source, filesystem, memory_map=memory_map)
|
312 |
+
if filesystem is not None:
|
313 |
+
source = filesystem.open_input_file(source)
|
314 |
+
self._close_source = True # We opened it here, ensure we close it.
|
315 |
+
|
316 |
+
self.reader = ParquetReader()
|
317 |
+
self.reader.open(
|
318 |
+
source, use_memory_map=memory_map,
|
319 |
+
buffer_size=buffer_size, pre_buffer=pre_buffer,
|
320 |
+
read_dictionary=read_dictionary, metadata=metadata,
|
321 |
+
coerce_int96_timestamp_unit=coerce_int96_timestamp_unit,
|
322 |
+
decryption_properties=decryption_properties,
|
323 |
+
thrift_string_size_limit=thrift_string_size_limit,
|
324 |
+
thrift_container_size_limit=thrift_container_size_limit,
|
325 |
+
page_checksum_verification=page_checksum_verification,
|
326 |
+
)
|
327 |
+
self.common_metadata = common_metadata
|
328 |
+
self._nested_paths_by_prefix = self._build_nested_paths()
|
329 |
+
|
330 |
+
def __enter__(self):
|
331 |
+
return self
|
332 |
+
|
333 |
+
def __exit__(self, *args, **kwargs):
|
334 |
+
self.close()
|
335 |
+
|
336 |
+
def _build_nested_paths(self):
|
337 |
+
paths = self.reader.column_paths
|
338 |
+
|
339 |
+
result = defaultdict(list)
|
340 |
+
|
341 |
+
for i, path in enumerate(paths):
|
342 |
+
key = path[0]
|
343 |
+
rest = path[1:]
|
344 |
+
while True:
|
345 |
+
result[key].append(i)
|
346 |
+
|
347 |
+
if not rest:
|
348 |
+
break
|
349 |
+
|
350 |
+
key = '.'.join((key, rest[0]))
|
351 |
+
rest = rest[1:]
|
352 |
+
|
353 |
+
return result
|
354 |
+
|
355 |
+
@property
|
356 |
+
def metadata(self):
|
357 |
+
"""
|
358 |
+
Return the Parquet metadata.
|
359 |
+
"""
|
360 |
+
return self.reader.metadata
|
361 |
+
|
362 |
+
@property
|
363 |
+
def schema(self):
|
364 |
+
"""
|
365 |
+
Return the Parquet schema, unconverted to Arrow types
|
366 |
+
"""
|
367 |
+
return self.metadata.schema
|
368 |
+
|
369 |
+
@property
|
370 |
+
def schema_arrow(self):
|
371 |
+
"""
|
372 |
+
Return the inferred Arrow schema, converted from the whole Parquet
|
373 |
+
file's schema
|
374 |
+
|
375 |
+
Examples
|
376 |
+
--------
|
377 |
+
Generate an example Parquet file:
|
378 |
+
|
379 |
+
>>> import pyarrow as pa
|
380 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
381 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
382 |
+
... "Brittle stars", "Centipede"]})
|
383 |
+
>>> import pyarrow.parquet as pq
|
384 |
+
>>> pq.write_table(table, 'example.parquet')
|
385 |
+
>>> parquet_file = pq.ParquetFile('example.parquet')
|
386 |
+
|
387 |
+
Read the Arrow schema:
|
388 |
+
|
389 |
+
>>> parquet_file.schema_arrow
|
390 |
+
n_legs: int64
|
391 |
+
animal: string
|
392 |
+
"""
|
393 |
+
return self.reader.schema_arrow
|
394 |
+
|
395 |
+
@property
|
396 |
+
def num_row_groups(self):
|
397 |
+
"""
|
398 |
+
Return the number of row groups of the Parquet file.
|
399 |
+
|
400 |
+
Examples
|
401 |
+
--------
|
402 |
+
>>> import pyarrow as pa
|
403 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
404 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
405 |
+
... "Brittle stars", "Centipede"]})
|
406 |
+
>>> import pyarrow.parquet as pq
|
407 |
+
>>> pq.write_table(table, 'example.parquet')
|
408 |
+
>>> parquet_file = pq.ParquetFile('example.parquet')
|
409 |
+
|
410 |
+
>>> parquet_file.num_row_groups
|
411 |
+
1
|
412 |
+
"""
|
413 |
+
return self.reader.num_row_groups
|
414 |
+
|
415 |
+
def close(self, force: bool = False):
|
416 |
+
if self._close_source or force:
|
417 |
+
self.reader.close()
|
418 |
+
|
419 |
+
@property
|
420 |
+
def closed(self) -> bool:
|
421 |
+
return self.reader.closed
|
422 |
+
|
423 |
+
def read_row_group(self, i, columns=None, use_threads=True,
|
424 |
+
use_pandas_metadata=False):
|
425 |
+
"""
|
426 |
+
Read a single row group from a Parquet file.
|
427 |
+
|
428 |
+
Parameters
|
429 |
+
----------
|
430 |
+
i : int
|
431 |
+
Index of the individual row group that we want to read.
|
432 |
+
columns : list
|
433 |
+
If not None, only these columns will be read from the row group. A
|
434 |
+
column name may be a prefix of a nested field, e.g. 'a' will select
|
435 |
+
'a.b', 'a.c', and 'a.d.e'.
|
436 |
+
use_threads : bool, default True
|
437 |
+
Perform multi-threaded column reads.
|
438 |
+
use_pandas_metadata : bool, default False
|
439 |
+
If True and file has custom pandas schema metadata, ensure that
|
440 |
+
index columns are also loaded.
|
441 |
+
|
442 |
+
Returns
|
443 |
+
-------
|
444 |
+
pyarrow.table.Table
|
445 |
+
Content of the row group as a table (of columns)
|
446 |
+
|
447 |
+
Examples
|
448 |
+
--------
|
449 |
+
>>> import pyarrow as pa
|
450 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
451 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
452 |
+
... "Brittle stars", "Centipede"]})
|
453 |
+
>>> import pyarrow.parquet as pq
|
454 |
+
>>> pq.write_table(table, 'example.parquet')
|
455 |
+
>>> parquet_file = pq.ParquetFile('example.parquet')
|
456 |
+
|
457 |
+
>>> parquet_file.read_row_group(0)
|
458 |
+
pyarrow.Table
|
459 |
+
n_legs: int64
|
460 |
+
animal: string
|
461 |
+
----
|
462 |
+
n_legs: [[2,2,4,4,5,100]]
|
463 |
+
animal: [["Flamingo","Parrot",...,"Brittle stars","Centipede"]]
|
464 |
+
"""
|
465 |
+
column_indices = self._get_column_indices(
|
466 |
+
columns, use_pandas_metadata=use_pandas_metadata)
|
467 |
+
return self.reader.read_row_group(i, column_indices=column_indices,
|
468 |
+
use_threads=use_threads)
|
469 |
+
|
470 |
+
def read_row_groups(self, row_groups, columns=None, use_threads=True,
|
471 |
+
use_pandas_metadata=False):
|
472 |
+
"""
|
473 |
+
Read a multiple row groups from a Parquet file.
|
474 |
+
|
475 |
+
Parameters
|
476 |
+
----------
|
477 |
+
row_groups : list
|
478 |
+
Only these row groups will be read from the file.
|
479 |
+
columns : list
|
480 |
+
If not None, only these columns will be read from the row group. A
|
481 |
+
column name may be a prefix of a nested field, e.g. 'a' will select
|
482 |
+
'a.b', 'a.c', and 'a.d.e'.
|
483 |
+
use_threads : bool, default True
|
484 |
+
Perform multi-threaded column reads.
|
485 |
+
use_pandas_metadata : bool, default False
|
486 |
+
If True and file has custom pandas schema metadata, ensure that
|
487 |
+
index columns are also loaded.
|
488 |
+
|
489 |
+
Returns
|
490 |
+
-------
|
491 |
+
pyarrow.table.Table
|
492 |
+
Content of the row groups as a table (of columns).
|
493 |
+
|
494 |
+
Examples
|
495 |
+
--------
|
496 |
+
>>> import pyarrow as pa
|
497 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
498 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
499 |
+
... "Brittle stars", "Centipede"]})
|
500 |
+
>>> import pyarrow.parquet as pq
|
501 |
+
>>> pq.write_table(table, 'example.parquet')
|
502 |
+
>>> parquet_file = pq.ParquetFile('example.parquet')
|
503 |
+
|
504 |
+
>>> parquet_file.read_row_groups([0,0])
|
505 |
+
pyarrow.Table
|
506 |
+
n_legs: int64
|
507 |
+
animal: string
|
508 |
+
----
|
509 |
+
n_legs: [[2,2,4,4,5,...,2,4,4,5,100]]
|
510 |
+
animal: [["Flamingo","Parrot","Dog",...,"Brittle stars","Centipede"]]
|
511 |
+
"""
|
512 |
+
column_indices = self._get_column_indices(
|
513 |
+
columns, use_pandas_metadata=use_pandas_metadata)
|
514 |
+
return self.reader.read_row_groups(row_groups,
|
515 |
+
column_indices=column_indices,
|
516 |
+
use_threads=use_threads)
|
517 |
+
|
518 |
+
def iter_batches(self, batch_size=65536, row_groups=None, columns=None,
|
519 |
+
use_threads=True, use_pandas_metadata=False):
|
520 |
+
"""
|
521 |
+
Read streaming batches from a Parquet file.
|
522 |
+
|
523 |
+
Parameters
|
524 |
+
----------
|
525 |
+
batch_size : int, default 64K
|
526 |
+
Maximum number of records to yield per batch. Batches may be
|
527 |
+
smaller if there aren't enough rows in the file.
|
528 |
+
row_groups : list
|
529 |
+
Only these row groups will be read from the file.
|
530 |
+
columns : list
|
531 |
+
If not None, only these columns will be read from the file. A
|
532 |
+
column name may be a prefix of a nested field, e.g. 'a' will select
|
533 |
+
'a.b', 'a.c', and 'a.d.e'.
|
534 |
+
use_threads : boolean, default True
|
535 |
+
Perform multi-threaded column reads.
|
536 |
+
use_pandas_metadata : boolean, default False
|
537 |
+
If True and file has custom pandas schema metadata, ensure that
|
538 |
+
index columns are also loaded.
|
539 |
+
|
540 |
+
Yields
|
541 |
+
------
|
542 |
+
pyarrow.RecordBatch
|
543 |
+
Contents of each batch as a record batch
|
544 |
+
|
545 |
+
Examples
|
546 |
+
--------
|
547 |
+
Generate an example Parquet file:
|
548 |
+
|
549 |
+
>>> import pyarrow as pa
|
550 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
551 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
552 |
+
... "Brittle stars", "Centipede"]})
|
553 |
+
>>> import pyarrow.parquet as pq
|
554 |
+
>>> pq.write_table(table, 'example.parquet')
|
555 |
+
>>> parquet_file = pq.ParquetFile('example.parquet')
|
556 |
+
>>> for i in parquet_file.iter_batches():
|
557 |
+
... print("RecordBatch")
|
558 |
+
... print(i.to_pandas())
|
559 |
+
...
|
560 |
+
RecordBatch
|
561 |
+
n_legs animal
|
562 |
+
0 2 Flamingo
|
563 |
+
1 2 Parrot
|
564 |
+
2 4 Dog
|
565 |
+
3 4 Horse
|
566 |
+
4 5 Brittle stars
|
567 |
+
5 100 Centipede
|
568 |
+
"""
|
569 |
+
if row_groups is None:
|
570 |
+
row_groups = range(0, self.metadata.num_row_groups)
|
571 |
+
column_indices = self._get_column_indices(
|
572 |
+
columns, use_pandas_metadata=use_pandas_metadata)
|
573 |
+
|
574 |
+
batches = self.reader.iter_batches(batch_size,
|
575 |
+
row_groups=row_groups,
|
576 |
+
column_indices=column_indices,
|
577 |
+
use_threads=use_threads)
|
578 |
+
return batches
|
579 |
+
|
580 |
+
def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
|
581 |
+
"""
|
582 |
+
Read a Table from Parquet format.
|
583 |
+
|
584 |
+
Parameters
|
585 |
+
----------
|
586 |
+
columns : list
|
587 |
+
If not None, only these columns will be read from the file. A
|
588 |
+
column name may be a prefix of a nested field, e.g. 'a' will select
|
589 |
+
'a.b', 'a.c', and 'a.d.e'.
|
590 |
+
use_threads : bool, default True
|
591 |
+
Perform multi-threaded column reads.
|
592 |
+
use_pandas_metadata : bool, default False
|
593 |
+
If True and file has custom pandas schema metadata, ensure that
|
594 |
+
index columns are also loaded.
|
595 |
+
|
596 |
+
Returns
|
597 |
+
-------
|
598 |
+
pyarrow.table.Table
|
599 |
+
Content of the file as a table (of columns).
|
600 |
+
|
601 |
+
Examples
|
602 |
+
--------
|
603 |
+
Generate an example Parquet file:
|
604 |
+
|
605 |
+
>>> import pyarrow as pa
|
606 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
607 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
608 |
+
... "Brittle stars", "Centipede"]})
|
609 |
+
>>> import pyarrow.parquet as pq
|
610 |
+
>>> pq.write_table(table, 'example.parquet')
|
611 |
+
>>> parquet_file = pq.ParquetFile('example.parquet')
|
612 |
+
|
613 |
+
Read a Table:
|
614 |
+
|
615 |
+
>>> parquet_file.read(columns=["animal"])
|
616 |
+
pyarrow.Table
|
617 |
+
animal: string
|
618 |
+
----
|
619 |
+
animal: [["Flamingo","Parrot",...,"Brittle stars","Centipede"]]
|
620 |
+
"""
|
621 |
+
column_indices = self._get_column_indices(
|
622 |
+
columns, use_pandas_metadata=use_pandas_metadata)
|
623 |
+
return self.reader.read_all(column_indices=column_indices,
|
624 |
+
use_threads=use_threads)
|
625 |
+
|
626 |
+
def scan_contents(self, columns=None, batch_size=65536):
|
627 |
+
"""
|
628 |
+
Read contents of file for the given columns and batch size.
|
629 |
+
|
630 |
+
Notes
|
631 |
+
-----
|
632 |
+
This function's primary purpose is benchmarking.
|
633 |
+
The scan is executed on a single thread.
|
634 |
+
|
635 |
+
Parameters
|
636 |
+
----------
|
637 |
+
columns : list of integers, default None
|
638 |
+
Select columns to read, if None scan all columns.
|
639 |
+
batch_size : int, default 64K
|
640 |
+
Number of rows to read at a time internally.
|
641 |
+
|
642 |
+
Returns
|
643 |
+
-------
|
644 |
+
num_rows : int
|
645 |
+
Number of rows in file
|
646 |
+
|
647 |
+
Examples
|
648 |
+
--------
|
649 |
+
>>> import pyarrow as pa
|
650 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
651 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
652 |
+
... "Brittle stars", "Centipede"]})
|
653 |
+
>>> import pyarrow.parquet as pq
|
654 |
+
>>> pq.write_table(table, 'example.parquet')
|
655 |
+
>>> parquet_file = pq.ParquetFile('example.parquet')
|
656 |
+
|
657 |
+
>>> parquet_file.scan_contents()
|
658 |
+
6
|
659 |
+
"""
|
660 |
+
column_indices = self._get_column_indices(columns)
|
661 |
+
return self.reader.scan_contents(column_indices,
|
662 |
+
batch_size=batch_size)
|
663 |
+
|
664 |
+
def _get_column_indices(self, column_names, use_pandas_metadata=False):
|
665 |
+
if column_names is None:
|
666 |
+
return None
|
667 |
+
|
668 |
+
indices = []
|
669 |
+
|
670 |
+
for name in column_names:
|
671 |
+
if name in self._nested_paths_by_prefix:
|
672 |
+
indices.extend(self._nested_paths_by_prefix[name])
|
673 |
+
|
674 |
+
if use_pandas_metadata:
|
675 |
+
file_keyvalues = self.metadata.metadata
|
676 |
+
common_keyvalues = (self.common_metadata.metadata
|
677 |
+
if self.common_metadata is not None
|
678 |
+
else None)
|
679 |
+
|
680 |
+
if file_keyvalues and b'pandas' in file_keyvalues:
|
681 |
+
index_columns = _get_pandas_index_columns(file_keyvalues)
|
682 |
+
elif common_keyvalues and b'pandas' in common_keyvalues:
|
683 |
+
index_columns = _get_pandas_index_columns(common_keyvalues)
|
684 |
+
else:
|
685 |
+
index_columns = []
|
686 |
+
|
687 |
+
if indices is not None and index_columns:
|
688 |
+
indices += [self.reader.column_name_idx(descr)
|
689 |
+
for descr in index_columns
|
690 |
+
if not isinstance(descr, dict)]
|
691 |
+
|
692 |
+
return indices
|
693 |
+
|
694 |
+
|
695 |
+
_SPARK_DISALLOWED_CHARS = re.compile('[ ,;{}()\n\t=]')
|
696 |
+
|
697 |
+
|
698 |
+
def _sanitized_spark_field_name(name):
|
699 |
+
return _SPARK_DISALLOWED_CHARS.sub('_', name)
|
700 |
+
|
701 |
+
|
702 |
+
def _sanitize_schema(schema, flavor):
|
703 |
+
if 'spark' in flavor:
|
704 |
+
sanitized_fields = []
|
705 |
+
|
706 |
+
schema_changed = False
|
707 |
+
|
708 |
+
for field in schema:
|
709 |
+
name = field.name
|
710 |
+
sanitized_name = _sanitized_spark_field_name(name)
|
711 |
+
|
712 |
+
if sanitized_name != name:
|
713 |
+
schema_changed = True
|
714 |
+
sanitized_field = pa.field(sanitized_name, field.type,
|
715 |
+
field.nullable, field.metadata)
|
716 |
+
sanitized_fields.append(sanitized_field)
|
717 |
+
else:
|
718 |
+
sanitized_fields.append(field)
|
719 |
+
|
720 |
+
new_schema = pa.schema(sanitized_fields, metadata=schema.metadata)
|
721 |
+
return new_schema, schema_changed
|
722 |
+
else:
|
723 |
+
return schema, False
|
724 |
+
|
725 |
+
|
726 |
+
def _sanitize_table(table, new_schema, flavor):
|
727 |
+
# TODO: This will not handle prohibited characters in nested field names
|
728 |
+
if 'spark' in flavor:
|
729 |
+
column_data = [table[i] for i in range(table.num_columns)]
|
730 |
+
return pa.Table.from_arrays(column_data, schema=new_schema)
|
731 |
+
else:
|
732 |
+
return table
|
733 |
+
|
734 |
+
|
735 |
+
_parquet_writer_arg_docs = """version : {"1.0", "2.4", "2.6"}, default "2.6"
|
736 |
+
Determine which Parquet logical types are available for use, whether the
|
737 |
+
reduced set from the Parquet 1.x.x format or the expanded logical types
|
738 |
+
added in later format versions.
|
739 |
+
Files written with version='2.4' or '2.6' may not be readable in all
|
740 |
+
Parquet implementations, so version='1.0' is likely the choice that
|
741 |
+
maximizes file compatibility.
|
742 |
+
UINT32 and some logical types are only available with version '2.4'.
|
743 |
+
Nanosecond timestamps are only available with version '2.6'.
|
744 |
+
Other features such as compression algorithms or the new serialized
|
745 |
+
data page format must be enabled separately (see 'compression' and
|
746 |
+
'data_page_version').
|
747 |
+
use_dictionary : bool or list, default True
|
748 |
+
Specify if we should use dictionary encoding in general or only for
|
749 |
+
some columns.
|
750 |
+
When encoding the column, if the dictionary size is too large, the
|
751 |
+
column will fallback to ``PLAIN`` encoding. Specially, ``BOOLEAN`` type
|
752 |
+
doesn't support dictionary encoding.
|
753 |
+
compression : str or dict, default 'snappy'
|
754 |
+
Specify the compression codec, either on a general basis or per-column.
|
755 |
+
Valid values: {'NONE', 'SNAPPY', 'GZIP', 'BROTLI', 'LZ4', 'ZSTD'}.
|
756 |
+
write_statistics : bool or list, default True
|
757 |
+
Specify if we should write statistics in general (default is True) or only
|
758 |
+
for some columns.
|
759 |
+
use_deprecated_int96_timestamps : bool, default None
|
760 |
+
Write timestamps to INT96 Parquet format. Defaults to False unless enabled
|
761 |
+
by flavor argument. This take priority over the coerce_timestamps option.
|
762 |
+
coerce_timestamps : str, default None
|
763 |
+
Cast timestamps to a particular resolution. If omitted, defaults are chosen
|
764 |
+
depending on `version`. By default, for ``version='1.0'`` (the default)
|
765 |
+
and ``version='2.4'``, nanoseconds are cast to microseconds ('us'), while
|
766 |
+
for other `version` values, they are written natively without loss
|
767 |
+
of resolution. Seconds are always cast to milliseconds ('ms') by default,
|
768 |
+
as Parquet does not have any temporal type with seconds resolution.
|
769 |
+
If the casting results in loss of data, it will raise an exception
|
770 |
+
unless ``allow_truncated_timestamps=True`` is given.
|
771 |
+
Valid values: {None, 'ms', 'us'}
|
772 |
+
allow_truncated_timestamps : bool, default False
|
773 |
+
Allow loss of data when coercing timestamps to a particular
|
774 |
+
resolution. E.g. if microsecond or nanosecond data is lost when coercing to
|
775 |
+
'ms', do not raise an exception. Passing ``allow_truncated_timestamp=True``
|
776 |
+
will NOT result in the truncation exception being ignored unless
|
777 |
+
``coerce_timestamps`` is not None.
|
778 |
+
data_page_size : int, default None
|
779 |
+
Set a target threshold for the approximate encoded size of data
|
780 |
+
pages within a column chunk (in bytes). If None, use the default data page
|
781 |
+
size of 1MByte.
|
782 |
+
flavor : {'spark'}, default None
|
783 |
+
Sanitize schema or set other compatibility options to work with
|
784 |
+
various target systems.
|
785 |
+
filesystem : FileSystem, default None
|
786 |
+
If nothing passed, will be inferred from `where` if path-like, else
|
787 |
+
`where` is already a file-like object so no filesystem is needed.
|
788 |
+
compression_level : int or dict, default None
|
789 |
+
Specify the compression level for a codec, either on a general basis or
|
790 |
+
per-column. If None is passed, arrow selects the compression level for
|
791 |
+
the compression codec in use. The compression level has a different
|
792 |
+
meaning for each codec, so you have to read the documentation of the
|
793 |
+
codec you are using.
|
794 |
+
An exception is thrown if the compression codec does not allow specifying
|
795 |
+
a compression level.
|
796 |
+
use_byte_stream_split : bool or list, default False
|
797 |
+
Specify if the byte_stream_split encoding should be used in general or
|
798 |
+
only for some columns. If both dictionary and byte_stream_stream are
|
799 |
+
enabled, then dictionary is preferred.
|
800 |
+
The byte_stream_split encoding is valid only for floating-point data types
|
801 |
+
and should be combined with a compression codec.
|
802 |
+
column_encoding : string or dict, default None
|
803 |
+
Specify the encoding scheme on a per column basis.
|
804 |
+
Can only be used when ``use_dictionary`` is set to False, and
|
805 |
+
cannot be used in combination with ``use_byte_stream_split``.
|
806 |
+
Currently supported values: {'PLAIN', 'BYTE_STREAM_SPLIT',
|
807 |
+
'DELTA_BINARY_PACKED', 'DELTA_LENGTH_BYTE_ARRAY', 'DELTA_BYTE_ARRAY'}.
|
808 |
+
Certain encodings are only compatible with certain data types.
|
809 |
+
Please refer to the encodings section of `Reading and writing Parquet
|
810 |
+
files <https://arrow.apache.org/docs/cpp/parquet.html#encodings>`_.
|
811 |
+
data_page_version : {"1.0", "2.0"}, default "1.0"
|
812 |
+
The serialized Parquet data page format version to write, defaults to
|
813 |
+
1.0. This does not impact the file schema logical types and Arrow to
|
814 |
+
Parquet type casting behavior; for that use the "version" option.
|
815 |
+
use_compliant_nested_type : bool, default True
|
816 |
+
Whether to write compliant Parquet nested type (lists) as defined
|
817 |
+
`here <https://github.com/apache/parquet-format/blob/master/
|
818 |
+
LogicalTypes.md#nested-types>`_, defaults to ``True``.
|
819 |
+
For ``use_compliant_nested_type=True``, this will write into a list
|
820 |
+
with 3-level structure where the middle level, named ``list``,
|
821 |
+
is a repeated group with a single field named ``element``::
|
822 |
+
|
823 |
+
<list-repetition> group <name> (LIST) {
|
824 |
+
repeated group list {
|
825 |
+
<element-repetition> <element-type> element;
|
826 |
+
}
|
827 |
+
}
|
828 |
+
|
829 |
+
For ``use_compliant_nested_type=False``, this will also write into a list
|
830 |
+
with 3-level structure, where the name of the single field of the middle
|
831 |
+
level ``list`` is taken from the element name for nested columns in Arrow,
|
832 |
+
which defaults to ``item``::
|
833 |
+
|
834 |
+
<list-repetition> group <name> (LIST) {
|
835 |
+
repeated group list {
|
836 |
+
<element-repetition> <element-type> item;
|
837 |
+
}
|
838 |
+
}
|
839 |
+
encryption_properties : FileEncryptionProperties, default None
|
840 |
+
File encryption properties for Parquet Modular Encryption.
|
841 |
+
If None, no encryption will be done.
|
842 |
+
The encryption properties can be created using:
|
843 |
+
``CryptoFactory.file_encryption_properties()``.
|
844 |
+
write_batch_size : int, default None
|
845 |
+
Number of values to write to a page at a time. If None, use the default of
|
846 |
+
1024. ``write_batch_size`` is complementary to ``data_page_size``. If pages
|
847 |
+
are exceeding the ``data_page_size`` due to large column values, lowering
|
848 |
+
the batch size can help keep page sizes closer to the intended size.
|
849 |
+
dictionary_pagesize_limit : int, default None
|
850 |
+
Specify the dictionary page size limit per row group. If None, use the
|
851 |
+
default 1MB.
|
852 |
+
store_schema : bool, default True
|
853 |
+
By default, the Arrow schema is serialized and stored in the Parquet
|
854 |
+
file metadata (in the "ARROW:schema" key). When reading the file,
|
855 |
+
if this key is available, it will be used to more faithfully recreate
|
856 |
+
the original Arrow data. For example, for tz-aware timestamp columns
|
857 |
+
it will restore the timezone (Parquet only stores the UTC values without
|
858 |
+
timezone), or columns with duration type will be restored from the int64
|
859 |
+
Parquet column.
|
860 |
+
write_page_index : bool, default False
|
861 |
+
Whether to write a page index in general for all columns.
|
862 |
+
Writing statistics to the page index disables the old method of writing
|
863 |
+
statistics to each data page header. The page index makes statistics-based
|
864 |
+
filtering more efficient than the page header, as it gathers all the
|
865 |
+
statistics for a Parquet file in a single place, avoiding scattered I/O.
|
866 |
+
Note that the page index is not yet used on the read size by PyArrow.
|
867 |
+
write_page_checksum : bool, default False
|
868 |
+
Whether to write page checksums in general for all columns.
|
869 |
+
Page checksums enable detection of data corruption, which might occur during
|
870 |
+
transmission or in the storage.
|
871 |
+
sorting_columns : Sequence of SortingColumn, default None
|
872 |
+
Specify the sort order of the data being written. The writer does not sort
|
873 |
+
the data nor does it verify that the data is sorted. The sort order is
|
874 |
+
written to the row group metadata, which can then be used by readers.
|
875 |
+
"""
|
876 |
+
|
877 |
+
_parquet_writer_example_doc = """\
|
878 |
+
Generate an example PyArrow Table and RecordBatch:
|
879 |
+
|
880 |
+
>>> import pyarrow as pa
|
881 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
882 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
883 |
+
... "Brittle stars", "Centipede"]})
|
884 |
+
>>> batch = pa.record_batch([[2, 2, 4, 4, 5, 100],
|
885 |
+
... ["Flamingo", "Parrot", "Dog", "Horse",
|
886 |
+
... "Brittle stars", "Centipede"]],
|
887 |
+
... names=['n_legs', 'animal'])
|
888 |
+
|
889 |
+
create a ParquetWriter object:
|
890 |
+
|
891 |
+
>>> import pyarrow.parquet as pq
|
892 |
+
>>> writer = pq.ParquetWriter('example.parquet', table.schema)
|
893 |
+
|
894 |
+
and write the Table into the Parquet file:
|
895 |
+
|
896 |
+
>>> writer.write_table(table)
|
897 |
+
>>> writer.close()
|
898 |
+
|
899 |
+
>>> pq.read_table('example.parquet').to_pandas()
|
900 |
+
n_legs animal
|
901 |
+
0 2 Flamingo
|
902 |
+
1 2 Parrot
|
903 |
+
2 4 Dog
|
904 |
+
3 4 Horse
|
905 |
+
4 5 Brittle stars
|
906 |
+
5 100 Centipede
|
907 |
+
|
908 |
+
create a ParquetWriter object for the RecordBatch:
|
909 |
+
|
910 |
+
>>> writer2 = pq.ParquetWriter('example2.parquet', batch.schema)
|
911 |
+
|
912 |
+
and write the RecordBatch into the Parquet file:
|
913 |
+
|
914 |
+
>>> writer2.write_batch(batch)
|
915 |
+
>>> writer2.close()
|
916 |
+
|
917 |
+
>>> pq.read_table('example2.parquet').to_pandas()
|
918 |
+
n_legs animal
|
919 |
+
0 2 Flamingo
|
920 |
+
1 2 Parrot
|
921 |
+
2 4 Dog
|
922 |
+
3 4 Horse
|
923 |
+
4 5 Brittle stars
|
924 |
+
5 100 Centipede
|
925 |
+
"""
|
926 |
+
|
927 |
+
|
928 |
+
class ParquetWriter:
|
929 |
+
|
930 |
+
__doc__ = """
|
931 |
+
Class for incrementally building a Parquet file for Arrow tables.
|
932 |
+
|
933 |
+
Parameters
|
934 |
+
----------
|
935 |
+
where : path or file-like object
|
936 |
+
schema : pyarrow.Schema
|
937 |
+
{}
|
938 |
+
writer_engine_version : unused
|
939 |
+
**options : dict
|
940 |
+
If options contains a key `metadata_collector` then the
|
941 |
+
corresponding value is assumed to be a list (or any object with
|
942 |
+
`.append` method) that will be filled with the file metadata instance
|
943 |
+
of the written file.
|
944 |
+
|
945 |
+
Examples
|
946 |
+
--------
|
947 |
+
{}
|
948 |
+
""".format(_parquet_writer_arg_docs, _parquet_writer_example_doc)
|
949 |
+
|
950 |
+
def __init__(self, where, schema, filesystem=None,
|
951 |
+
flavor=None,
|
952 |
+
version='2.6',
|
953 |
+
use_dictionary=True,
|
954 |
+
compression='snappy',
|
955 |
+
write_statistics=True,
|
956 |
+
use_deprecated_int96_timestamps=None,
|
957 |
+
compression_level=None,
|
958 |
+
use_byte_stream_split=False,
|
959 |
+
column_encoding=None,
|
960 |
+
writer_engine_version=None,
|
961 |
+
data_page_version='1.0',
|
962 |
+
use_compliant_nested_type=True,
|
963 |
+
encryption_properties=None,
|
964 |
+
write_batch_size=None,
|
965 |
+
dictionary_pagesize_limit=None,
|
966 |
+
store_schema=True,
|
967 |
+
write_page_index=False,
|
968 |
+
write_page_checksum=False,
|
969 |
+
sorting_columns=None,
|
970 |
+
**options):
|
971 |
+
if use_deprecated_int96_timestamps is None:
|
972 |
+
# Use int96 timestamps for Spark
|
973 |
+
if flavor is not None and 'spark' in flavor:
|
974 |
+
use_deprecated_int96_timestamps = True
|
975 |
+
else:
|
976 |
+
use_deprecated_int96_timestamps = False
|
977 |
+
|
978 |
+
self.flavor = flavor
|
979 |
+
if flavor is not None:
|
980 |
+
schema, self.schema_changed = _sanitize_schema(schema, flavor)
|
981 |
+
else:
|
982 |
+
self.schema_changed = False
|
983 |
+
|
984 |
+
self.schema = schema
|
985 |
+
self.where = where
|
986 |
+
|
987 |
+
# If we open a file using a filesystem, store file handle so we can be
|
988 |
+
# sure to close it when `self.close` is called.
|
989 |
+
self.file_handle = None
|
990 |
+
|
991 |
+
filesystem, path = _resolve_filesystem_and_path(where, filesystem)
|
992 |
+
if filesystem is not None:
|
993 |
+
# ARROW-10480: do not auto-detect compression. While
|
994 |
+
# a filename like foo.parquet.gz is nonconforming, it
|
995 |
+
# shouldn't implicitly apply compression.
|
996 |
+
sink = self.file_handle = filesystem.open_output_stream(
|
997 |
+
path, compression=None)
|
998 |
+
else:
|
999 |
+
sink = where
|
1000 |
+
self._metadata_collector = options.pop('metadata_collector', None)
|
1001 |
+
engine_version = 'V2'
|
1002 |
+
self.writer = _parquet.ParquetWriter(
|
1003 |
+
sink, schema,
|
1004 |
+
version=version,
|
1005 |
+
compression=compression,
|
1006 |
+
use_dictionary=use_dictionary,
|
1007 |
+
write_statistics=write_statistics,
|
1008 |
+
use_deprecated_int96_timestamps=use_deprecated_int96_timestamps,
|
1009 |
+
compression_level=compression_level,
|
1010 |
+
use_byte_stream_split=use_byte_stream_split,
|
1011 |
+
column_encoding=column_encoding,
|
1012 |
+
writer_engine_version=engine_version,
|
1013 |
+
data_page_version=data_page_version,
|
1014 |
+
use_compliant_nested_type=use_compliant_nested_type,
|
1015 |
+
encryption_properties=encryption_properties,
|
1016 |
+
write_batch_size=write_batch_size,
|
1017 |
+
dictionary_pagesize_limit=dictionary_pagesize_limit,
|
1018 |
+
store_schema=store_schema,
|
1019 |
+
write_page_index=write_page_index,
|
1020 |
+
write_page_checksum=write_page_checksum,
|
1021 |
+
sorting_columns=sorting_columns,
|
1022 |
+
**options)
|
1023 |
+
self.is_open = True
|
1024 |
+
|
1025 |
+
def __del__(self):
|
1026 |
+
if getattr(self, 'is_open', False):
|
1027 |
+
self.close()
|
1028 |
+
|
1029 |
+
def __enter__(self):
|
1030 |
+
return self
|
1031 |
+
|
1032 |
+
def __exit__(self, *args, **kwargs):
|
1033 |
+
self.close()
|
1034 |
+
# return false since we want to propagate exceptions
|
1035 |
+
return False
|
1036 |
+
|
1037 |
+
def write(self, table_or_batch, row_group_size=None):
|
1038 |
+
"""
|
1039 |
+
Write RecordBatch or Table to the Parquet file.
|
1040 |
+
|
1041 |
+
Parameters
|
1042 |
+
----------
|
1043 |
+
table_or_batch : {RecordBatch, Table}
|
1044 |
+
row_group_size : int, default None
|
1045 |
+
Maximum number of rows in each written row group. If None,
|
1046 |
+
the row group size will be the minimum of the input
|
1047 |
+
table or batch length and 1024 * 1024.
|
1048 |
+
"""
|
1049 |
+
if isinstance(table_or_batch, pa.RecordBatch):
|
1050 |
+
self.write_batch(table_or_batch, row_group_size)
|
1051 |
+
elif isinstance(table_or_batch, pa.Table):
|
1052 |
+
self.write_table(table_or_batch, row_group_size)
|
1053 |
+
else:
|
1054 |
+
raise TypeError(type(table_or_batch))
|
1055 |
+
|
1056 |
+
def write_batch(self, batch, row_group_size=None):
|
1057 |
+
"""
|
1058 |
+
Write RecordBatch to the Parquet file.
|
1059 |
+
|
1060 |
+
Parameters
|
1061 |
+
----------
|
1062 |
+
batch : RecordBatch
|
1063 |
+
row_group_size : int, default None
|
1064 |
+
Maximum number of rows in written row group. If None, the
|
1065 |
+
row group size will be the minimum of the RecordBatch
|
1066 |
+
size and 1024 * 1024. If set larger than 64Mi then 64Mi
|
1067 |
+
will be used instead.
|
1068 |
+
"""
|
1069 |
+
table = pa.Table.from_batches([batch], batch.schema)
|
1070 |
+
self.write_table(table, row_group_size)
|
1071 |
+
|
1072 |
+
def write_table(self, table, row_group_size=None):
|
1073 |
+
"""
|
1074 |
+
Write Table to the Parquet file.
|
1075 |
+
|
1076 |
+
Parameters
|
1077 |
+
----------
|
1078 |
+
table : Table
|
1079 |
+
row_group_size : int, default None
|
1080 |
+
Maximum number of rows in each written row group. If None,
|
1081 |
+
the row group size will be the minimum of the Table size
|
1082 |
+
and 1024 * 1024. If set larger than 64Mi then 64Mi will
|
1083 |
+
be used instead.
|
1084 |
+
|
1085 |
+
"""
|
1086 |
+
if self.schema_changed:
|
1087 |
+
table = _sanitize_table(table, self.schema, self.flavor)
|
1088 |
+
assert self.is_open
|
1089 |
+
|
1090 |
+
if not table.schema.equals(self.schema, check_metadata=False):
|
1091 |
+
msg = ('Table schema does not match schema used to create file: '
|
1092 |
+
'\ntable:\n{!s} vs. \nfile:\n{!s}'
|
1093 |
+
.format(table.schema, self.schema))
|
1094 |
+
raise ValueError(msg)
|
1095 |
+
|
1096 |
+
self.writer.write_table(table, row_group_size=row_group_size)
|
1097 |
+
|
1098 |
+
def close(self):
|
1099 |
+
"""
|
1100 |
+
Close the connection to the Parquet file.
|
1101 |
+
"""
|
1102 |
+
if self.is_open:
|
1103 |
+
self.writer.close()
|
1104 |
+
self.is_open = False
|
1105 |
+
if self._metadata_collector is not None:
|
1106 |
+
self._metadata_collector.append(self.writer.metadata)
|
1107 |
+
if self.file_handle is not None:
|
1108 |
+
self.file_handle.close()
|
1109 |
+
|
1110 |
+
|
1111 |
+
def _get_pandas_index_columns(keyvalues):
|
1112 |
+
return (json.loads(keyvalues[b'pandas'].decode('utf8'))
|
1113 |
+
['index_columns'])
|
1114 |
+
|
1115 |
+
|
1116 |
+
EXCLUDED_PARQUET_PATHS = {'_SUCCESS'}
|
1117 |
+
|
1118 |
+
|
1119 |
+
_read_docstring_common = """\
|
1120 |
+
read_dictionary : list, default None
|
1121 |
+
List of names or column paths (for nested types) to read directly
|
1122 |
+
as DictionaryArray. Only supported for BYTE_ARRAY storage. To read
|
1123 |
+
a flat column as dictionary-encoded pass the column name. For
|
1124 |
+
nested types, you must pass the full column "path", which could be
|
1125 |
+
something like level1.level2.list.item. Refer to the Parquet
|
1126 |
+
file's schema to obtain the paths.
|
1127 |
+
memory_map : bool, default False
|
1128 |
+
If the source is a file path, use a memory map to read file, which can
|
1129 |
+
improve performance in some environments.
|
1130 |
+
buffer_size : int, default 0
|
1131 |
+
If positive, perform read buffering when deserializing individual
|
1132 |
+
column chunks. Otherwise IO calls are unbuffered.
|
1133 |
+
partitioning : pyarrow.dataset.Partitioning or str or list of str, \
|
1134 |
+
default "hive"
|
1135 |
+
The partitioning scheme for a partitioned dataset. The default of "hive"
|
1136 |
+
assumes directory names with key=value pairs like "/year=2009/month=11".
|
1137 |
+
In addition, a scheme like "/2009/11" is also supported, in which case
|
1138 |
+
you need to specify the field names or a full schema. See the
|
1139 |
+
``pyarrow.dataset.partitioning()`` function for more details."""
|
1140 |
+
|
1141 |
+
|
1142 |
+
_parquet_dataset_example = """\
|
1143 |
+
Generate an example PyArrow Table and write it to a partitioned dataset:
|
1144 |
+
|
1145 |
+
>>> import pyarrow as pa
|
1146 |
+
>>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
|
1147 |
+
... 'n_legs': [2, 2, 4, 4, 5, 100],
|
1148 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
1149 |
+
... "Brittle stars", "Centipede"]})
|
1150 |
+
>>> import pyarrow.parquet as pq
|
1151 |
+
>>> pq.write_to_dataset(table, root_path='dataset_v2',
|
1152 |
+
... partition_cols=['year'])
|
1153 |
+
|
1154 |
+
create a ParquetDataset object from the dataset source:
|
1155 |
+
|
1156 |
+
>>> dataset = pq.ParquetDataset('dataset_v2/')
|
1157 |
+
|
1158 |
+
and read the data:
|
1159 |
+
|
1160 |
+
>>> dataset.read().to_pandas()
|
1161 |
+
n_legs animal year
|
1162 |
+
0 5 Brittle stars 2019
|
1163 |
+
1 2 Flamingo 2020
|
1164 |
+
2 4 Dog 2021
|
1165 |
+
3 100 Centipede 2021
|
1166 |
+
4 2 Parrot 2022
|
1167 |
+
5 4 Horse 2022
|
1168 |
+
|
1169 |
+
create a ParquetDataset object with filter:
|
1170 |
+
|
1171 |
+
>>> dataset = pq.ParquetDataset('dataset_v2/',
|
1172 |
+
... filters=[('n_legs','=',4)])
|
1173 |
+
>>> dataset.read().to_pandas()
|
1174 |
+
n_legs animal year
|
1175 |
+
0 4 Dog 2021
|
1176 |
+
1 4 Horse 2022
|
1177 |
+
"""
|
1178 |
+
|
1179 |
+
|
1180 |
+
class ParquetDataset:
|
1181 |
+
__doc__ = """
|
1182 |
+
Encapsulates details of reading a complete Parquet dataset possibly
|
1183 |
+
consisting of multiple files and partitions in subdirectories.
|
1184 |
+
|
1185 |
+
Parameters
|
1186 |
+
----------
|
1187 |
+
path_or_paths : str or List[str]
|
1188 |
+
A directory name, single file name, or list of file names.
|
1189 |
+
filesystem : FileSystem, default None
|
1190 |
+
If nothing passed, will be inferred based on path.
|
1191 |
+
Path will try to be found in the local on-disk filesystem otherwise
|
1192 |
+
it will be parsed as an URI to determine the filesystem.
|
1193 |
+
schema : pyarrow.parquet.Schema
|
1194 |
+
Optionally provide the Schema for the Dataset, in which case it will
|
1195 |
+
not be inferred from the source.
|
1196 |
+
filters : pyarrow.compute.Expression or List[Tuple] or List[List[Tuple]], default None
|
1197 |
+
Rows which do not match the filter predicate will be removed from scanned
|
1198 |
+
data. Partition keys embedded in a nested directory structure will be
|
1199 |
+
exploited to avoid loading files at all if they contain no matching rows.
|
1200 |
+
Within-file level filtering and different partitioning schemes are supported.
|
1201 |
+
|
1202 |
+
{1}
|
1203 |
+
{0}
|
1204 |
+
ignore_prefixes : list, optional
|
1205 |
+
Files matching any of these prefixes will be ignored by the
|
1206 |
+
discovery process.
|
1207 |
+
This is matched to the basename of a path.
|
1208 |
+
By default this is ['.', '_'].
|
1209 |
+
Note that discovery happens only if a directory is passed as source.
|
1210 |
+
pre_buffer : bool, default True
|
1211 |
+
Coalesce and issue file reads in parallel to improve performance on
|
1212 |
+
high-latency filesystems (e.g. S3, GCS). If True, Arrow will use a
|
1213 |
+
background I/O thread pool. If using a filesystem layer that itself
|
1214 |
+
performs readahead (e.g. fsspec's S3FS), disable readahead for best
|
1215 |
+
results. Set to False if you want to prioritize minimal memory usage
|
1216 |
+
over maximum speed.
|
1217 |
+
coerce_int96_timestamp_unit : str, default None
|
1218 |
+
Cast timestamps that are stored in INT96 format to a particular resolution
|
1219 |
+
(e.g. 'ms'). Setting to None is equivalent to 'ns' and therefore INT96
|
1220 |
+
timestamps will be inferred as timestamps in nanoseconds.
|
1221 |
+
decryption_properties : FileDecryptionProperties or None
|
1222 |
+
File-level decryption properties.
|
1223 |
+
The decryption properties can be created using
|
1224 |
+
``CryptoFactory.file_decryption_properties()``.
|
1225 |
+
thrift_string_size_limit : int, default None
|
1226 |
+
If not None, override the maximum total string size allocated
|
1227 |
+
when decoding Thrift structures. The default limit should be
|
1228 |
+
sufficient for most Parquet files.
|
1229 |
+
thrift_container_size_limit : int, default None
|
1230 |
+
If not None, override the maximum total size of containers allocated
|
1231 |
+
when decoding Thrift structures. The default limit should be
|
1232 |
+
sufficient for most Parquet files.
|
1233 |
+
page_checksum_verification : bool, default False
|
1234 |
+
If True, verify the page checksum for each page read from the file.
|
1235 |
+
use_legacy_dataset : bool, optional
|
1236 |
+
Deprecated and has no effect from PyArrow version 15.0.0.
|
1237 |
+
|
1238 |
+
Examples
|
1239 |
+
--------
|
1240 |
+
{2}
|
1241 |
+
""".format(_read_docstring_common, _DNF_filter_doc, _parquet_dataset_example)
|
1242 |
+
|
1243 |
+
def __init__(self, path_or_paths, filesystem=None, schema=None, *, filters=None,
|
1244 |
+
read_dictionary=None, memory_map=False, buffer_size=None,
|
1245 |
+
partitioning="hive", ignore_prefixes=None, pre_buffer=True,
|
1246 |
+
coerce_int96_timestamp_unit=None,
|
1247 |
+
decryption_properties=None, thrift_string_size_limit=None,
|
1248 |
+
thrift_container_size_limit=None,
|
1249 |
+
page_checksum_verification=False,
|
1250 |
+
use_legacy_dataset=None):
|
1251 |
+
|
1252 |
+
if use_legacy_dataset is not None:
|
1253 |
+
warnings.warn(
|
1254 |
+
"Passing 'use_legacy_dataset' is deprecated as of pyarrow 15.0.0 "
|
1255 |
+
"and will be removed in a future version.",
|
1256 |
+
FutureWarning, stacklevel=2)
|
1257 |
+
|
1258 |
+
import pyarrow.dataset as ds
|
1259 |
+
|
1260 |
+
# map format arguments
|
1261 |
+
read_options = {
|
1262 |
+
"pre_buffer": pre_buffer,
|
1263 |
+
"coerce_int96_timestamp_unit": coerce_int96_timestamp_unit,
|
1264 |
+
"thrift_string_size_limit": thrift_string_size_limit,
|
1265 |
+
"thrift_container_size_limit": thrift_container_size_limit,
|
1266 |
+
"page_checksum_verification": page_checksum_verification,
|
1267 |
+
}
|
1268 |
+
if buffer_size:
|
1269 |
+
read_options.update(use_buffered_stream=True,
|
1270 |
+
buffer_size=buffer_size)
|
1271 |
+
if read_dictionary is not None:
|
1272 |
+
read_options.update(dictionary_columns=read_dictionary)
|
1273 |
+
|
1274 |
+
if decryption_properties is not None:
|
1275 |
+
read_options.update(decryption_properties=decryption_properties)
|
1276 |
+
|
1277 |
+
self._filter_expression = None
|
1278 |
+
if filters is not None:
|
1279 |
+
self._filter_expression = filters_to_expression(filters)
|
1280 |
+
|
1281 |
+
# map old filesystems to new one
|
1282 |
+
if filesystem is not None:
|
1283 |
+
filesystem = _ensure_filesystem(
|
1284 |
+
filesystem, use_mmap=memory_map)
|
1285 |
+
elif filesystem is None and memory_map:
|
1286 |
+
# if memory_map is specified, assume local file system (string
|
1287 |
+
# path can in principle be URI for any filesystem)
|
1288 |
+
filesystem = LocalFileSystem(use_mmap=memory_map)
|
1289 |
+
|
1290 |
+
# This needs to be checked after _ensure_filesystem, because that
|
1291 |
+
# handles the case of an fsspec LocalFileSystem
|
1292 |
+
if (
|
1293 |
+
hasattr(path_or_paths, "__fspath__") and
|
1294 |
+
filesystem is not None and
|
1295 |
+
not isinstance(filesystem, LocalFileSystem)
|
1296 |
+
):
|
1297 |
+
raise TypeError(
|
1298 |
+
"Path-like objects with __fspath__ must only be used with "
|
1299 |
+
f"local file systems, not {type(filesystem)}"
|
1300 |
+
)
|
1301 |
+
|
1302 |
+
# check for single fragment dataset
|
1303 |
+
single_file = None
|
1304 |
+
self._base_dir = None
|
1305 |
+
if not isinstance(path_or_paths, list):
|
1306 |
+
if _is_path_like(path_or_paths):
|
1307 |
+
path_or_paths = _stringify_path(path_or_paths)
|
1308 |
+
if filesystem is None:
|
1309 |
+
# path might be a URI describing the FileSystem as well
|
1310 |
+
try:
|
1311 |
+
filesystem, path_or_paths = FileSystem.from_uri(
|
1312 |
+
path_or_paths)
|
1313 |
+
except ValueError:
|
1314 |
+
filesystem = LocalFileSystem(use_mmap=memory_map)
|
1315 |
+
finfo = filesystem.get_file_info(path_or_paths)
|
1316 |
+
if finfo.is_file:
|
1317 |
+
single_file = path_or_paths
|
1318 |
+
if finfo.type == FileType.Directory:
|
1319 |
+
self._base_dir = path_or_paths
|
1320 |
+
else:
|
1321 |
+
single_file = path_or_paths
|
1322 |
+
|
1323 |
+
parquet_format = ds.ParquetFileFormat(**read_options)
|
1324 |
+
|
1325 |
+
if single_file is not None:
|
1326 |
+
fragment = parquet_format.make_fragment(single_file, filesystem)
|
1327 |
+
|
1328 |
+
self._dataset = ds.FileSystemDataset(
|
1329 |
+
[fragment], schema=schema or fragment.physical_schema,
|
1330 |
+
format=parquet_format,
|
1331 |
+
filesystem=fragment.filesystem
|
1332 |
+
)
|
1333 |
+
return
|
1334 |
+
|
1335 |
+
# check partitioning to enable dictionary encoding
|
1336 |
+
if partitioning == "hive":
|
1337 |
+
partitioning = ds.HivePartitioning.discover(
|
1338 |
+
infer_dictionary=True)
|
1339 |
+
|
1340 |
+
self._dataset = ds.dataset(path_or_paths, filesystem=filesystem,
|
1341 |
+
schema=schema, format=parquet_format,
|
1342 |
+
partitioning=partitioning,
|
1343 |
+
ignore_prefixes=ignore_prefixes)
|
1344 |
+
|
1345 |
+
def equals(self, other):
|
1346 |
+
if not isinstance(other, ParquetDataset):
|
1347 |
+
raise TypeError('`other` must be an instance of ParquetDataset')
|
1348 |
+
|
1349 |
+
return (self.schema == other.schema and
|
1350 |
+
self._dataset.format == other._dataset.format and
|
1351 |
+
self.filesystem == other.filesystem and
|
1352 |
+
# self.fragments == other.fragments and
|
1353 |
+
self.files == other.files)
|
1354 |
+
|
1355 |
+
def __eq__(self, other):
|
1356 |
+
try:
|
1357 |
+
return self.equals(other)
|
1358 |
+
except TypeError:
|
1359 |
+
return NotImplemented
|
1360 |
+
|
1361 |
+
@property
|
1362 |
+
def schema(self):
|
1363 |
+
"""
|
1364 |
+
Schema of the Dataset.
|
1365 |
+
|
1366 |
+
Examples
|
1367 |
+
--------
|
1368 |
+
Generate an example dataset:
|
1369 |
+
|
1370 |
+
>>> import pyarrow as pa
|
1371 |
+
>>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
|
1372 |
+
... 'n_legs': [2, 2, 4, 4, 5, 100],
|
1373 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
1374 |
+
... "Brittle stars", "Centipede"]})
|
1375 |
+
>>> import pyarrow.parquet as pq
|
1376 |
+
>>> pq.write_to_dataset(table, root_path='dataset_v2_schema',
|
1377 |
+
... partition_cols=['year'])
|
1378 |
+
>>> dataset = pq.ParquetDataset('dataset_v2_schema/')
|
1379 |
+
|
1380 |
+
Read the schema:
|
1381 |
+
|
1382 |
+
>>> dataset.schema
|
1383 |
+
n_legs: int64
|
1384 |
+
animal: string
|
1385 |
+
year: dictionary<values=int32, indices=int32, ordered=0>
|
1386 |
+
"""
|
1387 |
+
return self._dataset.schema
|
1388 |
+
|
1389 |
+
def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
|
1390 |
+
"""
|
1391 |
+
Read (multiple) Parquet files as a single pyarrow.Table.
|
1392 |
+
|
1393 |
+
Parameters
|
1394 |
+
----------
|
1395 |
+
columns : List[str]
|
1396 |
+
Names of columns to read from the dataset. The partition fields
|
1397 |
+
are not automatically included.
|
1398 |
+
use_threads : bool, default True
|
1399 |
+
Perform multi-threaded column reads.
|
1400 |
+
use_pandas_metadata : bool, default False
|
1401 |
+
If True and file has custom pandas schema metadata, ensure that
|
1402 |
+
index columns are also loaded.
|
1403 |
+
|
1404 |
+
Returns
|
1405 |
+
-------
|
1406 |
+
pyarrow.Table
|
1407 |
+
Content of the file as a table (of columns).
|
1408 |
+
|
1409 |
+
Examples
|
1410 |
+
--------
|
1411 |
+
Generate an example dataset:
|
1412 |
+
|
1413 |
+
>>> import pyarrow as pa
|
1414 |
+
>>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
|
1415 |
+
... 'n_legs': [2, 2, 4, 4, 5, 100],
|
1416 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
1417 |
+
... "Brittle stars", "Centipede"]})
|
1418 |
+
>>> import pyarrow.parquet as pq
|
1419 |
+
>>> pq.write_to_dataset(table, root_path='dataset_v2_read',
|
1420 |
+
... partition_cols=['year'])
|
1421 |
+
>>> dataset = pq.ParquetDataset('dataset_v2_read/')
|
1422 |
+
|
1423 |
+
Read the dataset:
|
1424 |
+
|
1425 |
+
>>> dataset.read(columns=["n_legs"])
|
1426 |
+
pyarrow.Table
|
1427 |
+
n_legs: int64
|
1428 |
+
----
|
1429 |
+
n_legs: [[5],[2],[4,100],[2,4]]
|
1430 |
+
"""
|
1431 |
+
# if use_pandas_metadata, we need to include index columns in the
|
1432 |
+
# column selection, to be able to restore those in the pandas DataFrame
|
1433 |
+
metadata = self.schema.metadata or {}
|
1434 |
+
|
1435 |
+
if use_pandas_metadata:
|
1436 |
+
# if the dataset schema metadata itself doesn't have pandas
|
1437 |
+
# then try to get this from common file (for backwards compat)
|
1438 |
+
if b"pandas" not in metadata:
|
1439 |
+
common_metadata = self._get_common_pandas_metadata()
|
1440 |
+
if common_metadata:
|
1441 |
+
metadata = common_metadata
|
1442 |
+
|
1443 |
+
if columns is not None and use_pandas_metadata:
|
1444 |
+
if metadata and b'pandas' in metadata:
|
1445 |
+
# RangeIndex can be represented as dict instead of column name
|
1446 |
+
index_columns = [
|
1447 |
+
col for col in _get_pandas_index_columns(metadata)
|
1448 |
+
if not isinstance(col, dict)
|
1449 |
+
]
|
1450 |
+
columns = (
|
1451 |
+
list(columns) + list(set(index_columns) - set(columns))
|
1452 |
+
)
|
1453 |
+
|
1454 |
+
table = self._dataset.to_table(
|
1455 |
+
columns=columns, filter=self._filter_expression,
|
1456 |
+
use_threads=use_threads
|
1457 |
+
)
|
1458 |
+
|
1459 |
+
# if use_pandas_metadata, restore the pandas metadata (which gets
|
1460 |
+
# lost if doing a specific `columns` selection in to_table)
|
1461 |
+
if use_pandas_metadata:
|
1462 |
+
if metadata and b"pandas" in metadata:
|
1463 |
+
new_metadata = table.schema.metadata or {}
|
1464 |
+
new_metadata.update({b"pandas": metadata[b"pandas"]})
|
1465 |
+
table = table.replace_schema_metadata(new_metadata)
|
1466 |
+
|
1467 |
+
return table
|
1468 |
+
|
1469 |
+
def _get_common_pandas_metadata(self):
|
1470 |
+
|
1471 |
+
if not self._base_dir:
|
1472 |
+
return None
|
1473 |
+
|
1474 |
+
metadata = None
|
1475 |
+
for name in ["_common_metadata", "_metadata"]:
|
1476 |
+
metadata_path = os.path.join(str(self._base_dir), name)
|
1477 |
+
finfo = self.filesystem.get_file_info(metadata_path)
|
1478 |
+
if finfo.is_file:
|
1479 |
+
pq_meta = read_metadata(
|
1480 |
+
metadata_path, filesystem=self.filesystem)
|
1481 |
+
metadata = pq_meta.metadata
|
1482 |
+
if metadata and b'pandas' in metadata:
|
1483 |
+
break
|
1484 |
+
|
1485 |
+
return metadata
|
1486 |
+
|
1487 |
+
def read_pandas(self, **kwargs):
|
1488 |
+
"""
|
1489 |
+
Read dataset including pandas metadata, if any. Other arguments passed
|
1490 |
+
through to :func:`read`, see docstring for further details.
|
1491 |
+
|
1492 |
+
Parameters
|
1493 |
+
----------
|
1494 |
+
**kwargs : optional
|
1495 |
+
Additional options for :func:`read`
|
1496 |
+
|
1497 |
+
Examples
|
1498 |
+
--------
|
1499 |
+
Generate an example parquet file:
|
1500 |
+
|
1501 |
+
>>> import pyarrow as pa
|
1502 |
+
>>> import pandas as pd
|
1503 |
+
>>> df = pd.DataFrame({'year': [2020, 2022, 2021, 2022, 2019, 2021],
|
1504 |
+
... 'n_legs': [2, 2, 4, 4, 5, 100],
|
1505 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
1506 |
+
... "Brittle stars", "Centipede"]})
|
1507 |
+
>>> table = pa.Table.from_pandas(df)
|
1508 |
+
>>> import pyarrow.parquet as pq
|
1509 |
+
>>> pq.write_table(table, 'table_V2.parquet')
|
1510 |
+
>>> dataset = pq.ParquetDataset('table_V2.parquet')
|
1511 |
+
|
1512 |
+
Read the dataset with pandas metadata:
|
1513 |
+
|
1514 |
+
>>> dataset.read_pandas(columns=["n_legs"])
|
1515 |
+
pyarrow.Table
|
1516 |
+
n_legs: int64
|
1517 |
+
----
|
1518 |
+
n_legs: [[2,2,4,4,5,100]]
|
1519 |
+
|
1520 |
+
>>> dataset.read_pandas(columns=["n_legs"]).schema.pandas_metadata
|
1521 |
+
{'index_columns': [{'kind': 'range', 'name': None, 'start': 0, ...}
|
1522 |
+
"""
|
1523 |
+
return self.read(use_pandas_metadata=True, **kwargs)
|
1524 |
+
|
1525 |
+
@property
|
1526 |
+
def fragments(self):
|
1527 |
+
"""
|
1528 |
+
A list of the Dataset source fragments or pieces with absolute
|
1529 |
+
file paths.
|
1530 |
+
|
1531 |
+
Examples
|
1532 |
+
--------
|
1533 |
+
Generate an example dataset:
|
1534 |
+
|
1535 |
+
>>> import pyarrow as pa
|
1536 |
+
>>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
|
1537 |
+
... 'n_legs': [2, 2, 4, 4, 5, 100],
|
1538 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
1539 |
+
... "Brittle stars", "Centipede"]})
|
1540 |
+
>>> import pyarrow.parquet as pq
|
1541 |
+
>>> pq.write_to_dataset(table, root_path='dataset_v2_fragments',
|
1542 |
+
... partition_cols=['year'])
|
1543 |
+
>>> dataset = pq.ParquetDataset('dataset_v2_fragments/')
|
1544 |
+
|
1545 |
+
List the fragments:
|
1546 |
+
|
1547 |
+
>>> dataset.fragments
|
1548 |
+
[<pyarrow.dataset.ParquetFileFragment path=dataset_v2_fragments/...
|
1549 |
+
"""
|
1550 |
+
return list(self._dataset.get_fragments())
|
1551 |
+
|
1552 |
+
@property
|
1553 |
+
def files(self):
|
1554 |
+
"""
|
1555 |
+
A list of absolute Parquet file paths in the Dataset source.
|
1556 |
+
|
1557 |
+
Examples
|
1558 |
+
--------
|
1559 |
+
Generate an example dataset:
|
1560 |
+
|
1561 |
+
>>> import pyarrow as pa
|
1562 |
+
>>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
|
1563 |
+
... 'n_legs': [2, 2, 4, 4, 5, 100],
|
1564 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
1565 |
+
... "Brittle stars", "Centipede"]})
|
1566 |
+
>>> import pyarrow.parquet as pq
|
1567 |
+
>>> pq.write_to_dataset(table, root_path='dataset_v2_files',
|
1568 |
+
... partition_cols=['year'])
|
1569 |
+
>>> dataset = pq.ParquetDataset('dataset_v2_files/')
|
1570 |
+
|
1571 |
+
List the files:
|
1572 |
+
|
1573 |
+
>>> dataset.files
|
1574 |
+
['dataset_v2_files/year=2019/...-0.parquet', ...
|
1575 |
+
"""
|
1576 |
+
return self._dataset.files
|
1577 |
+
|
1578 |
+
@property
|
1579 |
+
def filesystem(self):
|
1580 |
+
"""
|
1581 |
+
The filesystem type of the Dataset source.
|
1582 |
+
"""
|
1583 |
+
return self._dataset.filesystem
|
1584 |
+
|
1585 |
+
@property
|
1586 |
+
def partitioning(self):
|
1587 |
+
"""
|
1588 |
+
The partitioning of the Dataset source, if discovered.
|
1589 |
+
"""
|
1590 |
+
return self._dataset.partitioning
|
1591 |
+
|
1592 |
+
|
1593 |
+
_read_table_docstring = """
|
1594 |
+
{0}
|
1595 |
+
|
1596 |
+
Parameters
|
1597 |
+
----------
|
1598 |
+
source : str, pyarrow.NativeFile, or file-like object
|
1599 |
+
If a string passed, can be a single file name or directory name. For
|
1600 |
+
file-like objects, only read a single file. Use pyarrow.BufferReader to
|
1601 |
+
read a file contained in a bytes or buffer-like object.
|
1602 |
+
columns : list
|
1603 |
+
If not None, only these columns will be read from the file. A column
|
1604 |
+
name may be a prefix of a nested field, e.g. 'a' will select 'a.b',
|
1605 |
+
'a.c', and 'a.d.e'. If empty, no columns will be read. Note
|
1606 |
+
that the table will still have the correct num_rows set despite having
|
1607 |
+
no columns.
|
1608 |
+
use_threads : bool, default True
|
1609 |
+
Perform multi-threaded column reads.
|
1610 |
+
schema : Schema, optional
|
1611 |
+
Optionally provide the Schema for the parquet dataset, in which case it
|
1612 |
+
will not be inferred from the source.
|
1613 |
+
{1}
|
1614 |
+
filesystem : FileSystem, default None
|
1615 |
+
If nothing passed, will be inferred based on path.
|
1616 |
+
Path will try to be found in the local on-disk filesystem otherwise
|
1617 |
+
it will be parsed as an URI to determine the filesystem.
|
1618 |
+
filters : pyarrow.compute.Expression or List[Tuple] or List[List[Tuple]], default None
|
1619 |
+
Rows which do not match the filter predicate will be removed from scanned
|
1620 |
+
data. Partition keys embedded in a nested directory structure will be
|
1621 |
+
exploited to avoid loading files at all if they contain no matching rows.
|
1622 |
+
Within-file level filtering and different partitioning schemes are supported.
|
1623 |
+
|
1624 |
+
{3}
|
1625 |
+
use_legacy_dataset : bool, optional
|
1626 |
+
Deprecated and has no effect from PyArrow version 15.0.0.
|
1627 |
+
ignore_prefixes : list, optional
|
1628 |
+
Files matching any of these prefixes will be ignored by the
|
1629 |
+
discovery process.
|
1630 |
+
This is matched to the basename of a path.
|
1631 |
+
By default this is ['.', '_'].
|
1632 |
+
Note that discovery happens only if a directory is passed as source.
|
1633 |
+
pre_buffer : bool, default True
|
1634 |
+
Coalesce and issue file reads in parallel to improve performance on
|
1635 |
+
high-latency filesystems (e.g. S3). If True, Arrow will use a
|
1636 |
+
background I/O thread pool. If using a filesystem layer that itself
|
1637 |
+
performs readahead (e.g. fsspec's S3FS), disable readahead for best
|
1638 |
+
results.
|
1639 |
+
coerce_int96_timestamp_unit : str, default None
|
1640 |
+
Cast timestamps that are stored in INT96 format to a particular
|
1641 |
+
resolution (e.g. 'ms'). Setting to None is equivalent to 'ns'
|
1642 |
+
and therefore INT96 timestamps will be inferred as timestamps
|
1643 |
+
in nanoseconds.
|
1644 |
+
decryption_properties : FileDecryptionProperties or None
|
1645 |
+
File-level decryption properties.
|
1646 |
+
The decryption properties can be created using
|
1647 |
+
``CryptoFactory.file_decryption_properties()``.
|
1648 |
+
thrift_string_size_limit : int, default None
|
1649 |
+
If not None, override the maximum total string size allocated
|
1650 |
+
when decoding Thrift structures. The default limit should be
|
1651 |
+
sufficient for most Parquet files.
|
1652 |
+
thrift_container_size_limit : int, default None
|
1653 |
+
If not None, override the maximum total size of containers allocated
|
1654 |
+
when decoding Thrift structures. The default limit should be
|
1655 |
+
sufficient for most Parquet files.
|
1656 |
+
page_checksum_verification : bool, default False
|
1657 |
+
If True, verify the checksum for each page read from the file.
|
1658 |
+
|
1659 |
+
Returns
|
1660 |
+
-------
|
1661 |
+
{2}
|
1662 |
+
|
1663 |
+
{4}
|
1664 |
+
"""
|
1665 |
+
|
1666 |
+
_read_table_example = """\
|
1667 |
+
|
1668 |
+
Examples
|
1669 |
+
--------
|
1670 |
+
|
1671 |
+
Generate an example PyArrow Table and write it to a partitioned dataset:
|
1672 |
+
|
1673 |
+
>>> import pyarrow as pa
|
1674 |
+
>>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
|
1675 |
+
... 'n_legs': [2, 2, 4, 4, 5, 100],
|
1676 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
1677 |
+
... "Brittle stars", "Centipede"]})
|
1678 |
+
>>> import pyarrow.parquet as pq
|
1679 |
+
>>> pq.write_to_dataset(table, root_path='dataset_name_2',
|
1680 |
+
... partition_cols=['year'])
|
1681 |
+
|
1682 |
+
Read the data:
|
1683 |
+
|
1684 |
+
>>> pq.read_table('dataset_name_2').to_pandas()
|
1685 |
+
n_legs animal year
|
1686 |
+
0 5 Brittle stars 2019
|
1687 |
+
1 2 Flamingo 2020
|
1688 |
+
2 4 Dog 2021
|
1689 |
+
3 100 Centipede 2021
|
1690 |
+
4 2 Parrot 2022
|
1691 |
+
5 4 Horse 2022
|
1692 |
+
|
1693 |
+
|
1694 |
+
Read only a subset of columns:
|
1695 |
+
|
1696 |
+
>>> pq.read_table('dataset_name_2', columns=["n_legs", "animal"])
|
1697 |
+
pyarrow.Table
|
1698 |
+
n_legs: int64
|
1699 |
+
animal: string
|
1700 |
+
----
|
1701 |
+
n_legs: [[5],[2],[4,100],[2,4]]
|
1702 |
+
animal: [["Brittle stars"],["Flamingo"],["Dog","Centipede"],["Parrot","Horse"]]
|
1703 |
+
|
1704 |
+
Read a subset of columns and read one column as DictionaryArray:
|
1705 |
+
|
1706 |
+
>>> pq.read_table('dataset_name_2', columns=["n_legs", "animal"],
|
1707 |
+
... read_dictionary=["animal"])
|
1708 |
+
pyarrow.Table
|
1709 |
+
n_legs: int64
|
1710 |
+
animal: dictionary<values=string, indices=int32, ordered=0>
|
1711 |
+
----
|
1712 |
+
n_legs: [[5],[2],[4,100],[2,4]]
|
1713 |
+
animal: [ -- dictionary:
|
1714 |
+
["Brittle stars"] -- indices:
|
1715 |
+
[0], -- dictionary:
|
1716 |
+
["Flamingo"] -- indices:
|
1717 |
+
[0], -- dictionary:
|
1718 |
+
["Dog","Centipede"] -- indices:
|
1719 |
+
[0,1], -- dictionary:
|
1720 |
+
["Parrot","Horse"] -- indices:
|
1721 |
+
[0,1]]
|
1722 |
+
|
1723 |
+
Read the table with filter:
|
1724 |
+
|
1725 |
+
>>> pq.read_table('dataset_name_2', columns=["n_legs", "animal"],
|
1726 |
+
... filters=[('n_legs','<',4)]).to_pandas()
|
1727 |
+
n_legs animal
|
1728 |
+
0 2 Flamingo
|
1729 |
+
1 2 Parrot
|
1730 |
+
|
1731 |
+
Read data from a single Parquet file:
|
1732 |
+
|
1733 |
+
>>> pq.write_table(table, 'example.parquet')
|
1734 |
+
>>> pq.read_table('dataset_name_2').to_pandas()
|
1735 |
+
n_legs animal year
|
1736 |
+
0 5 Brittle stars 2019
|
1737 |
+
1 2 Flamingo 2020
|
1738 |
+
2 4 Dog 2021
|
1739 |
+
3 100 Centipede 2021
|
1740 |
+
4 2 Parrot 2022
|
1741 |
+
5 4 Horse 2022
|
1742 |
+
"""
|
1743 |
+
|
1744 |
+
|
1745 |
+
def read_table(source, *, columns=None, use_threads=True,
|
1746 |
+
schema=None, use_pandas_metadata=False, read_dictionary=None,
|
1747 |
+
memory_map=False, buffer_size=0, partitioning="hive",
|
1748 |
+
filesystem=None, filters=None, use_legacy_dataset=None,
|
1749 |
+
ignore_prefixes=None, pre_buffer=True,
|
1750 |
+
coerce_int96_timestamp_unit=None,
|
1751 |
+
decryption_properties=None, thrift_string_size_limit=None,
|
1752 |
+
thrift_container_size_limit=None,
|
1753 |
+
page_checksum_verification=False):
|
1754 |
+
|
1755 |
+
if use_legacy_dataset is not None:
|
1756 |
+
warnings.warn(
|
1757 |
+
"Passing 'use_legacy_dataset' is deprecated as of pyarrow 15.0.0 "
|
1758 |
+
"and will be removed in a future version.",
|
1759 |
+
FutureWarning, stacklevel=2)
|
1760 |
+
|
1761 |
+
try:
|
1762 |
+
dataset = ParquetDataset(
|
1763 |
+
source,
|
1764 |
+
schema=schema,
|
1765 |
+
filesystem=filesystem,
|
1766 |
+
partitioning=partitioning,
|
1767 |
+
memory_map=memory_map,
|
1768 |
+
read_dictionary=read_dictionary,
|
1769 |
+
buffer_size=buffer_size,
|
1770 |
+
filters=filters,
|
1771 |
+
ignore_prefixes=ignore_prefixes,
|
1772 |
+
pre_buffer=pre_buffer,
|
1773 |
+
coerce_int96_timestamp_unit=coerce_int96_timestamp_unit,
|
1774 |
+
thrift_string_size_limit=thrift_string_size_limit,
|
1775 |
+
thrift_container_size_limit=thrift_container_size_limit,
|
1776 |
+
page_checksum_verification=page_checksum_verification,
|
1777 |
+
)
|
1778 |
+
except ImportError:
|
1779 |
+
# fall back on ParquetFile for simple cases when pyarrow.dataset
|
1780 |
+
# module is not available
|
1781 |
+
if filters is not None:
|
1782 |
+
raise ValueError(
|
1783 |
+
"the 'filters' keyword is not supported when the "
|
1784 |
+
"pyarrow.dataset module is not available"
|
1785 |
+
)
|
1786 |
+
if partitioning != "hive":
|
1787 |
+
raise ValueError(
|
1788 |
+
"the 'partitioning' keyword is not supported when the "
|
1789 |
+
"pyarrow.dataset module is not available"
|
1790 |
+
)
|
1791 |
+
if schema is not None:
|
1792 |
+
raise ValueError(
|
1793 |
+
"the 'schema' argument is not supported when the "
|
1794 |
+
"pyarrow.dataset module is not available"
|
1795 |
+
)
|
1796 |
+
filesystem, path = _resolve_filesystem_and_path(source, filesystem)
|
1797 |
+
if filesystem is not None:
|
1798 |
+
source = filesystem.open_input_file(path)
|
1799 |
+
# TODO test that source is not a directory or a list
|
1800 |
+
dataset = ParquetFile(
|
1801 |
+
source, read_dictionary=read_dictionary,
|
1802 |
+
memory_map=memory_map, buffer_size=buffer_size,
|
1803 |
+
pre_buffer=pre_buffer,
|
1804 |
+
coerce_int96_timestamp_unit=coerce_int96_timestamp_unit,
|
1805 |
+
decryption_properties=decryption_properties,
|
1806 |
+
thrift_string_size_limit=thrift_string_size_limit,
|
1807 |
+
thrift_container_size_limit=thrift_container_size_limit,
|
1808 |
+
page_checksum_verification=page_checksum_verification,
|
1809 |
+
)
|
1810 |
+
|
1811 |
+
return dataset.read(columns=columns, use_threads=use_threads,
|
1812 |
+
use_pandas_metadata=use_pandas_metadata)
|
1813 |
+
|
1814 |
+
|
1815 |
+
read_table.__doc__ = _read_table_docstring.format(
|
1816 |
+
"""Read a Table from Parquet format""",
|
1817 |
+
"\n".join(("""use_pandas_metadata : bool, default False
|
1818 |
+
If True and file has custom pandas schema metadata, ensure that
|
1819 |
+
index columns are also loaded.""", _read_docstring_common)),
|
1820 |
+
"""pyarrow.Table
|
1821 |
+
Content of the file as a table (of columns)""",
|
1822 |
+
_DNF_filter_doc, _read_table_example)
|
1823 |
+
|
1824 |
+
|
1825 |
+
def read_pandas(source, columns=None, **kwargs):
|
1826 |
+
return read_table(
|
1827 |
+
source, columns=columns, use_pandas_metadata=True, **kwargs
|
1828 |
+
)
|
1829 |
+
|
1830 |
+
|
1831 |
+
read_pandas.__doc__ = _read_table_docstring.format(
|
1832 |
+
'Read a Table from Parquet format, also reading DataFrame\n'
|
1833 |
+
'index values if known in the file metadata',
|
1834 |
+
"\n".join((_read_docstring_common,
|
1835 |
+
"""**kwargs
|
1836 |
+
additional options for :func:`read_table`""")),
|
1837 |
+
"""pyarrow.Table
|
1838 |
+
Content of the file as a Table of Columns, including DataFrame
|
1839 |
+
indexes as columns""",
|
1840 |
+
_DNF_filter_doc, "")
|
1841 |
+
|
1842 |
+
|
1843 |
+
def write_table(table, where, row_group_size=None, version='2.6',
|
1844 |
+
use_dictionary=True, compression='snappy',
|
1845 |
+
write_statistics=True,
|
1846 |
+
use_deprecated_int96_timestamps=None,
|
1847 |
+
coerce_timestamps=None,
|
1848 |
+
allow_truncated_timestamps=False,
|
1849 |
+
data_page_size=None, flavor=None,
|
1850 |
+
filesystem=None,
|
1851 |
+
compression_level=None,
|
1852 |
+
use_byte_stream_split=False,
|
1853 |
+
column_encoding=None,
|
1854 |
+
data_page_version='1.0',
|
1855 |
+
use_compliant_nested_type=True,
|
1856 |
+
encryption_properties=None,
|
1857 |
+
write_batch_size=None,
|
1858 |
+
dictionary_pagesize_limit=None,
|
1859 |
+
store_schema=True,
|
1860 |
+
write_page_index=False,
|
1861 |
+
write_page_checksum=False,
|
1862 |
+
sorting_columns=None,
|
1863 |
+
**kwargs):
|
1864 |
+
# Implementor's note: when adding keywords here / updating defaults, also
|
1865 |
+
# update it in write_to_dataset and _dataset_parquet.pyx ParquetFileWriteOptions
|
1866 |
+
row_group_size = kwargs.pop('chunk_size', row_group_size)
|
1867 |
+
use_int96 = use_deprecated_int96_timestamps
|
1868 |
+
try:
|
1869 |
+
with ParquetWriter(
|
1870 |
+
where, table.schema,
|
1871 |
+
filesystem=filesystem,
|
1872 |
+
version=version,
|
1873 |
+
flavor=flavor,
|
1874 |
+
use_dictionary=use_dictionary,
|
1875 |
+
write_statistics=write_statistics,
|
1876 |
+
coerce_timestamps=coerce_timestamps,
|
1877 |
+
data_page_size=data_page_size,
|
1878 |
+
allow_truncated_timestamps=allow_truncated_timestamps,
|
1879 |
+
compression=compression,
|
1880 |
+
use_deprecated_int96_timestamps=use_int96,
|
1881 |
+
compression_level=compression_level,
|
1882 |
+
use_byte_stream_split=use_byte_stream_split,
|
1883 |
+
column_encoding=column_encoding,
|
1884 |
+
data_page_version=data_page_version,
|
1885 |
+
use_compliant_nested_type=use_compliant_nested_type,
|
1886 |
+
encryption_properties=encryption_properties,
|
1887 |
+
write_batch_size=write_batch_size,
|
1888 |
+
dictionary_pagesize_limit=dictionary_pagesize_limit,
|
1889 |
+
store_schema=store_schema,
|
1890 |
+
write_page_index=write_page_index,
|
1891 |
+
write_page_checksum=write_page_checksum,
|
1892 |
+
sorting_columns=sorting_columns,
|
1893 |
+
**kwargs) as writer:
|
1894 |
+
writer.write_table(table, row_group_size=row_group_size)
|
1895 |
+
except Exception:
|
1896 |
+
if _is_path_like(where):
|
1897 |
+
try:
|
1898 |
+
os.remove(_stringify_path(where))
|
1899 |
+
except os.error:
|
1900 |
+
pass
|
1901 |
+
raise
|
1902 |
+
|
1903 |
+
|
1904 |
+
_write_table_example = """\
|
1905 |
+
Generate an example PyArrow Table:
|
1906 |
+
|
1907 |
+
>>> import pyarrow as pa
|
1908 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
1909 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
1910 |
+
... "Brittle stars", "Centipede"]})
|
1911 |
+
|
1912 |
+
and write the Table into Parquet file:
|
1913 |
+
|
1914 |
+
>>> import pyarrow.parquet as pq
|
1915 |
+
>>> pq.write_table(table, 'example.parquet')
|
1916 |
+
|
1917 |
+
Defining row group size for the Parquet file:
|
1918 |
+
|
1919 |
+
>>> pq.write_table(table, 'example.parquet', row_group_size=3)
|
1920 |
+
|
1921 |
+
Defining row group compression (default is Snappy):
|
1922 |
+
|
1923 |
+
>>> pq.write_table(table, 'example.parquet', compression='none')
|
1924 |
+
|
1925 |
+
Defining row group compression and encoding per-column:
|
1926 |
+
|
1927 |
+
>>> pq.write_table(table, 'example.parquet',
|
1928 |
+
... compression={'n_legs': 'snappy', 'animal': 'gzip'},
|
1929 |
+
... use_dictionary=['n_legs', 'animal'])
|
1930 |
+
|
1931 |
+
Defining column encoding per-column:
|
1932 |
+
|
1933 |
+
>>> pq.write_table(table, 'example.parquet',
|
1934 |
+
... column_encoding={'animal':'PLAIN'},
|
1935 |
+
... use_dictionary=False)
|
1936 |
+
"""
|
1937 |
+
|
1938 |
+
write_table.__doc__ = """
|
1939 |
+
Write a Table to Parquet format.
|
1940 |
+
|
1941 |
+
Parameters
|
1942 |
+
----------
|
1943 |
+
table : pyarrow.Table
|
1944 |
+
where : string or pyarrow.NativeFile
|
1945 |
+
row_group_size : int
|
1946 |
+
Maximum number of rows in each written row group. If None, the
|
1947 |
+
row group size will be the minimum of the Table size and
|
1948 |
+
1024 * 1024.
|
1949 |
+
{}
|
1950 |
+
**kwargs : optional
|
1951 |
+
Additional options for ParquetWriter
|
1952 |
+
|
1953 |
+
Examples
|
1954 |
+
--------
|
1955 |
+
{}
|
1956 |
+
""".format(_parquet_writer_arg_docs, _write_table_example)
|
1957 |
+
|
1958 |
+
|
1959 |
+
def write_to_dataset(table, root_path, partition_cols=None,
|
1960 |
+
filesystem=None, use_legacy_dataset=None,
|
1961 |
+
schema=None, partitioning=None,
|
1962 |
+
basename_template=None, use_threads=None,
|
1963 |
+
file_visitor=None, existing_data_behavior=None,
|
1964 |
+
**kwargs):
|
1965 |
+
"""Wrapper around dataset.write_dataset for writing a Table to
|
1966 |
+
Parquet format by partitions.
|
1967 |
+
For each combination of partition columns and values,
|
1968 |
+
a subdirectories are created in the following
|
1969 |
+
manner:
|
1970 |
+
|
1971 |
+
root_dir/
|
1972 |
+
group1=value1
|
1973 |
+
group2=value1
|
1974 |
+
<uuid>.parquet
|
1975 |
+
group2=value2
|
1976 |
+
<uuid>.parquet
|
1977 |
+
group1=valueN
|
1978 |
+
group2=value1
|
1979 |
+
<uuid>.parquet
|
1980 |
+
group2=valueN
|
1981 |
+
<uuid>.parquet
|
1982 |
+
|
1983 |
+
Parameters
|
1984 |
+
----------
|
1985 |
+
table : pyarrow.Table
|
1986 |
+
root_path : str, pathlib.Path
|
1987 |
+
The root directory of the dataset.
|
1988 |
+
partition_cols : list,
|
1989 |
+
Column names by which to partition the dataset.
|
1990 |
+
Columns are partitioned in the order they are given.
|
1991 |
+
filesystem : FileSystem, default None
|
1992 |
+
If nothing passed, will be inferred based on path.
|
1993 |
+
Path will try to be found in the local on-disk filesystem otherwise
|
1994 |
+
it will be parsed as an URI to determine the filesystem.
|
1995 |
+
use_legacy_dataset : bool, optional
|
1996 |
+
Deprecated and has no effect from PyArrow version 15.0.0.
|
1997 |
+
schema : Schema, optional
|
1998 |
+
This Schema of the dataset.
|
1999 |
+
partitioning : Partitioning or list[str], optional
|
2000 |
+
The partitioning scheme specified with the
|
2001 |
+
``pyarrow.dataset.partitioning()`` function or a list of field names.
|
2002 |
+
When providing a list of field names, you can use
|
2003 |
+
``partitioning_flavor`` to drive which partitioning type should be
|
2004 |
+
used.
|
2005 |
+
basename_template : str, optional
|
2006 |
+
A template string used to generate basenames of written data files.
|
2007 |
+
The token '{i}' will be replaced with an automatically incremented
|
2008 |
+
integer. If not specified, it defaults to "guid-{i}.parquet".
|
2009 |
+
use_threads : bool, default True
|
2010 |
+
Write files in parallel. If enabled, then maximum parallelism will be
|
2011 |
+
used determined by the number of available CPU cores.
|
2012 |
+
file_visitor : function
|
2013 |
+
If set, this function will be called with a WrittenFile instance
|
2014 |
+
for each file created during the call. This object will have both
|
2015 |
+
a path attribute and a metadata attribute.
|
2016 |
+
|
2017 |
+
The path attribute will be a string containing the path to
|
2018 |
+
the created file.
|
2019 |
+
|
2020 |
+
The metadata attribute will be the parquet metadata of the file.
|
2021 |
+
This metadata will have the file path attribute set and can be used
|
2022 |
+
to build a _metadata file. The metadata attribute will be None if
|
2023 |
+
the format is not parquet.
|
2024 |
+
|
2025 |
+
Example visitor which simple collects the filenames created::
|
2026 |
+
|
2027 |
+
visited_paths = []
|
2028 |
+
|
2029 |
+
def file_visitor(written_file):
|
2030 |
+
visited_paths.append(written_file.path)
|
2031 |
+
|
2032 |
+
existing_data_behavior : 'overwrite_or_ignore' | 'error' | \
|
2033 |
+
'delete_matching'
|
2034 |
+
Controls how the dataset will handle data that already exists in
|
2035 |
+
the destination. The default behaviour is 'overwrite_or_ignore'.
|
2036 |
+
|
2037 |
+
'overwrite_or_ignore' will ignore any existing data and will
|
2038 |
+
overwrite files with the same name as an output file. Other
|
2039 |
+
existing files will be ignored. This behavior, in combination
|
2040 |
+
with a unique basename_template for each write, will allow for
|
2041 |
+
an append workflow.
|
2042 |
+
|
2043 |
+
'error' will raise an error if any data exists in the destination.
|
2044 |
+
|
2045 |
+
'delete_matching' is useful when you are writing a partitioned
|
2046 |
+
dataset. The first time each partition directory is encountered
|
2047 |
+
the entire directory will be deleted. This allows you to overwrite
|
2048 |
+
old partitions completely.
|
2049 |
+
**kwargs : dict,
|
2050 |
+
Used as additional kwargs for :func:`pyarrow.dataset.write_dataset`
|
2051 |
+
function for matching kwargs, and remainder to
|
2052 |
+
:func:`pyarrow.dataset.ParquetFileFormat.make_write_options`.
|
2053 |
+
See the docstring of :func:`write_table` and
|
2054 |
+
:func:`pyarrow.dataset.write_dataset` for the available options.
|
2055 |
+
Using `metadata_collector` in kwargs allows one to collect the
|
2056 |
+
file metadata instances of dataset pieces. The file paths in the
|
2057 |
+
ColumnChunkMetaData will be set relative to `root_path`.
|
2058 |
+
|
2059 |
+
Examples
|
2060 |
+
--------
|
2061 |
+
Generate an example PyArrow Table:
|
2062 |
+
|
2063 |
+
>>> import pyarrow as pa
|
2064 |
+
>>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021],
|
2065 |
+
... 'n_legs': [2, 2, 4, 4, 5, 100],
|
2066 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
2067 |
+
... "Brittle stars", "Centipede"]})
|
2068 |
+
|
2069 |
+
and write it to a partitioned dataset:
|
2070 |
+
|
2071 |
+
>>> import pyarrow.parquet as pq
|
2072 |
+
>>> pq.write_to_dataset(table, root_path='dataset_name_3',
|
2073 |
+
... partition_cols=['year'])
|
2074 |
+
>>> pq.ParquetDataset('dataset_name_3').files
|
2075 |
+
['dataset_name_3/year=2019/...-0.parquet', ...
|
2076 |
+
|
2077 |
+
Write a single Parquet file into the root folder:
|
2078 |
+
|
2079 |
+
>>> pq.write_to_dataset(table, root_path='dataset_name_4')
|
2080 |
+
>>> pq.ParquetDataset('dataset_name_4/').files
|
2081 |
+
['dataset_name_4/...-0.parquet']
|
2082 |
+
"""
|
2083 |
+
if use_legacy_dataset is not None:
|
2084 |
+
warnings.warn(
|
2085 |
+
"Passing 'use_legacy_dataset' is deprecated as of pyarrow 15.0.0 "
|
2086 |
+
"and will be removed in a future version.",
|
2087 |
+
FutureWarning, stacklevel=2)
|
2088 |
+
|
2089 |
+
metadata_collector = kwargs.pop('metadata_collector', None)
|
2090 |
+
|
2091 |
+
# Check for conflicting keywords
|
2092 |
+
msg_confl = (
|
2093 |
+
"The '{1}' argument is not supported. "
|
2094 |
+
"Use only '{0}' instead."
|
2095 |
+
)
|
2096 |
+
if partition_cols is not None and partitioning is not None:
|
2097 |
+
raise ValueError(msg_confl.format("partitioning",
|
2098 |
+
"partition_cols"))
|
2099 |
+
|
2100 |
+
if metadata_collector is not None and file_visitor is not None:
|
2101 |
+
raise ValueError(msg_confl.format("file_visitor",
|
2102 |
+
"metadata_collector"))
|
2103 |
+
|
2104 |
+
import pyarrow.dataset as ds
|
2105 |
+
|
2106 |
+
# extract write_dataset specific options
|
2107 |
+
# reset assumed to go to make_write_options
|
2108 |
+
write_dataset_kwargs = dict()
|
2109 |
+
for key in inspect.signature(ds.write_dataset).parameters:
|
2110 |
+
if key in kwargs:
|
2111 |
+
write_dataset_kwargs[key] = kwargs.pop(key)
|
2112 |
+
write_dataset_kwargs['max_rows_per_group'] = kwargs.pop(
|
2113 |
+
'row_group_size', kwargs.pop("chunk_size", None)
|
2114 |
+
)
|
2115 |
+
|
2116 |
+
if metadata_collector is not None:
|
2117 |
+
def file_visitor(written_file):
|
2118 |
+
metadata_collector.append(written_file.metadata)
|
2119 |
+
|
2120 |
+
# map format arguments
|
2121 |
+
parquet_format = ds.ParquetFileFormat()
|
2122 |
+
write_options = parquet_format.make_write_options(**kwargs)
|
2123 |
+
|
2124 |
+
# map old filesystems to new one
|
2125 |
+
if filesystem is not None:
|
2126 |
+
filesystem = _ensure_filesystem(filesystem)
|
2127 |
+
|
2128 |
+
if partition_cols:
|
2129 |
+
part_schema = table.select(partition_cols).schema
|
2130 |
+
partitioning = ds.partitioning(part_schema, flavor="hive")
|
2131 |
+
|
2132 |
+
if basename_template is None:
|
2133 |
+
basename_template = guid() + '-{i}.parquet'
|
2134 |
+
|
2135 |
+
if existing_data_behavior is None:
|
2136 |
+
existing_data_behavior = 'overwrite_or_ignore'
|
2137 |
+
|
2138 |
+
ds.write_dataset(
|
2139 |
+
table, root_path, filesystem=filesystem,
|
2140 |
+
format=parquet_format, file_options=write_options, schema=schema,
|
2141 |
+
partitioning=partitioning, use_threads=use_threads,
|
2142 |
+
file_visitor=file_visitor,
|
2143 |
+
basename_template=basename_template,
|
2144 |
+
existing_data_behavior=existing_data_behavior,
|
2145 |
+
**write_dataset_kwargs)
|
2146 |
+
return
|
2147 |
+
|
2148 |
+
|
2149 |
+
def write_metadata(schema, where, metadata_collector=None, filesystem=None,
|
2150 |
+
**kwargs):
|
2151 |
+
"""
|
2152 |
+
Write metadata-only Parquet file from schema. This can be used with
|
2153 |
+
`write_to_dataset` to generate `_common_metadata` and `_metadata` sidecar
|
2154 |
+
files.
|
2155 |
+
|
2156 |
+
Parameters
|
2157 |
+
----------
|
2158 |
+
schema : pyarrow.Schema
|
2159 |
+
where : string or pyarrow.NativeFile
|
2160 |
+
metadata_collector : list
|
2161 |
+
where to collect metadata information.
|
2162 |
+
filesystem : FileSystem, default None
|
2163 |
+
If nothing passed, will be inferred from `where` if path-like, else
|
2164 |
+
`where` is already a file-like object so no filesystem is needed.
|
2165 |
+
**kwargs : dict,
|
2166 |
+
Additional kwargs for ParquetWriter class. See docstring for
|
2167 |
+
`ParquetWriter` for more information.
|
2168 |
+
|
2169 |
+
Examples
|
2170 |
+
--------
|
2171 |
+
Generate example data:
|
2172 |
+
|
2173 |
+
>>> import pyarrow as pa
|
2174 |
+
>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100],
|
2175 |
+
... 'animal': ["Flamingo", "Parrot", "Dog", "Horse",
|
2176 |
+
... "Brittle stars", "Centipede"]})
|
2177 |
+
|
2178 |
+
Write a dataset and collect metadata information.
|
2179 |
+
|
2180 |
+
>>> metadata_collector = []
|
2181 |
+
>>> import pyarrow.parquet as pq
|
2182 |
+
>>> pq.write_to_dataset(
|
2183 |
+
... table, 'dataset_metadata',
|
2184 |
+
... metadata_collector=metadata_collector)
|
2185 |
+
|
2186 |
+
Write the `_common_metadata` parquet file without row groups statistics.
|
2187 |
+
|
2188 |
+
>>> pq.write_metadata(
|
2189 |
+
... table.schema, 'dataset_metadata/_common_metadata')
|
2190 |
+
|
2191 |
+
Write the `_metadata` parquet file with row groups statistics.
|
2192 |
+
|
2193 |
+
>>> pq.write_metadata(
|
2194 |
+
... table.schema, 'dataset_metadata/_metadata',
|
2195 |
+
... metadata_collector=metadata_collector)
|
2196 |
+
"""
|
2197 |
+
filesystem, where = _resolve_filesystem_and_path(where, filesystem)
|
2198 |
+
|
2199 |
+
if hasattr(where, "seek"): # file-like
|
2200 |
+
cursor_position = where.tell()
|
2201 |
+
|
2202 |
+
writer = ParquetWriter(where, schema, filesystem, **kwargs)
|
2203 |
+
writer.close()
|
2204 |
+
|
2205 |
+
if metadata_collector is not None:
|
2206 |
+
# ParquetWriter doesn't expose the metadata until it's written. Write
|
2207 |
+
# it and read it again.
|
2208 |
+
metadata = read_metadata(where, filesystem=filesystem)
|
2209 |
+
if hasattr(where, "seek"):
|
2210 |
+
where.seek(cursor_position) # file-like, set cursor back.
|
2211 |
+
|
2212 |
+
for m in metadata_collector:
|
2213 |
+
metadata.append_row_groups(m)
|
2214 |
+
if filesystem is not None:
|
2215 |
+
with filesystem.open_output_stream(where) as f:
|
2216 |
+
metadata.write_metadata_file(f)
|
2217 |
+
else:
|
2218 |
+
metadata.write_metadata_file(where)
|
2219 |
+
|
2220 |
+
|
2221 |
+
def read_metadata(where, memory_map=False, decryption_properties=None,
|
2222 |
+
filesystem=None):
|
2223 |
+
"""
|
2224 |
+
Read FileMetaData from footer of a single Parquet file.
|
2225 |
+
|
2226 |
+
Parameters
|
2227 |
+
----------
|
2228 |
+
where : str (file path) or file-like object
|
2229 |
+
memory_map : bool, default False
|
2230 |
+
Create memory map when the source is a file path.
|
2231 |
+
decryption_properties : FileDecryptionProperties, default None
|
2232 |
+
Decryption properties for reading encrypted Parquet files.
|
2233 |
+
filesystem : FileSystem, default None
|
2234 |
+
If nothing passed, will be inferred based on path.
|
2235 |
+
Path will try to be found in the local on-disk filesystem otherwise
|
2236 |
+
it will be parsed as an URI to determine the filesystem.
|
2237 |
+
|
2238 |
+
Returns
|
2239 |
+
-------
|
2240 |
+
metadata : FileMetaData
|
2241 |
+
The metadata of the Parquet file
|
2242 |
+
|
2243 |
+
Examples
|
2244 |
+
--------
|
2245 |
+
>>> import pyarrow as pa
|
2246 |
+
>>> import pyarrow.parquet as pq
|
2247 |
+
>>> table = pa.table({'n_legs': [4, 5, 100],
|
2248 |
+
... 'animal': ["Dog", "Brittle stars", "Centipede"]})
|
2249 |
+
>>> pq.write_table(table, 'example.parquet')
|
2250 |
+
|
2251 |
+
>>> pq.read_metadata('example.parquet')
|
2252 |
+
<pyarrow._parquet.FileMetaData object at ...>
|
2253 |
+
created_by: parquet-cpp-arrow version ...
|
2254 |
+
num_columns: 2
|
2255 |
+
num_rows: 3
|
2256 |
+
num_row_groups: 1
|
2257 |
+
format_version: 2.6
|
2258 |
+
serialized_size: ...
|
2259 |
+
"""
|
2260 |
+
filesystem, where = _resolve_filesystem_and_path(where, filesystem)
|
2261 |
+
file_ctx = nullcontext()
|
2262 |
+
if filesystem is not None:
|
2263 |
+
file_ctx = where = filesystem.open_input_file(where)
|
2264 |
+
|
2265 |
+
with file_ctx:
|
2266 |
+
file = ParquetFile(where, memory_map=memory_map,
|
2267 |
+
decryption_properties=decryption_properties)
|
2268 |
+
return file.metadata
|
2269 |
+
|
2270 |
+
|
2271 |
+
def read_schema(where, memory_map=False, decryption_properties=None,
|
2272 |
+
filesystem=None):
|
2273 |
+
"""
|
2274 |
+
Read effective Arrow schema from Parquet file metadata.
|
2275 |
+
|
2276 |
+
Parameters
|
2277 |
+
----------
|
2278 |
+
where : str (file path) or file-like object
|
2279 |
+
memory_map : bool, default False
|
2280 |
+
Create memory map when the source is a file path.
|
2281 |
+
decryption_properties : FileDecryptionProperties, default None
|
2282 |
+
Decryption properties for reading encrypted Parquet files.
|
2283 |
+
filesystem : FileSystem, default None
|
2284 |
+
If nothing passed, will be inferred based on path.
|
2285 |
+
Path will try to be found in the local on-disk filesystem otherwise
|
2286 |
+
it will be parsed as an URI to determine the filesystem.
|
2287 |
+
|
2288 |
+
Returns
|
2289 |
+
-------
|
2290 |
+
schema : pyarrow.Schema
|
2291 |
+
The schema of the Parquet file
|
2292 |
+
|
2293 |
+
Examples
|
2294 |
+
--------
|
2295 |
+
>>> import pyarrow as pa
|
2296 |
+
>>> import pyarrow.parquet as pq
|
2297 |
+
>>> table = pa.table({'n_legs': [4, 5, 100],
|
2298 |
+
... 'animal': ["Dog", "Brittle stars", "Centipede"]})
|
2299 |
+
>>> pq.write_table(table, 'example.parquet')
|
2300 |
+
|
2301 |
+
>>> pq.read_schema('example.parquet')
|
2302 |
+
n_legs: int64
|
2303 |
+
animal: string
|
2304 |
+
"""
|
2305 |
+
filesystem, where = _resolve_filesystem_and_path(where, filesystem)
|
2306 |
+
file_ctx = nullcontext()
|
2307 |
+
if filesystem is not None:
|
2308 |
+
file_ctx = where = filesystem.open_input_file(where)
|
2309 |
+
|
2310 |
+
with file_ctx:
|
2311 |
+
file = ParquetFile(
|
2312 |
+
where, memory_map=memory_map,
|
2313 |
+
decryption_properties=decryption_properties)
|
2314 |
+
return file.schema.to_arrow_schema()
|
2315 |
+
|
2316 |
+
|
2317 |
+
__all__ = (
|
2318 |
+
"ColumnChunkMetaData",
|
2319 |
+
"ColumnSchema",
|
2320 |
+
"FileDecryptionProperties",
|
2321 |
+
"FileEncryptionProperties",
|
2322 |
+
"FileMetaData",
|
2323 |
+
"ParquetDataset",
|
2324 |
+
"ParquetFile",
|
2325 |
+
"ParquetLogicalType",
|
2326 |
+
"ParquetReader",
|
2327 |
+
"ParquetSchema",
|
2328 |
+
"ParquetWriter",
|
2329 |
+
"RowGroupMetaData",
|
2330 |
+
"SortingColumn",
|
2331 |
+
"Statistics",
|
2332 |
+
"read_metadata",
|
2333 |
+
"read_pandas",
|
2334 |
+
"read_schema",
|
2335 |
+
"read_table",
|
2336 |
+
"write_metadata",
|
2337 |
+
"write_table",
|
2338 |
+
"write_to_dataset",
|
2339 |
+
"_filters_to_expression",
|
2340 |
+
"filters_to_expression",
|
2341 |
+
)
|
venv/lib/python3.10/site-packages/pyarrow/parquet/encryption.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# pylint: disable=unused-wildcard-import, unused-import
|
2 |
+
|
3 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
4 |
+
# or more contributor license agreements. See the NOTICE file
|
5 |
+
# distributed with this work for additional information
|
6 |
+
# regarding copyright ownership. The ASF licenses this file
|
7 |
+
# to you under the Apache License, Version 2.0 (the
|
8 |
+
# "License"); you may not use this file except in compliance
|
9 |
+
# with the License. You may obtain a copy of the License at
|
10 |
+
#
|
11 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
12 |
+
#
|
13 |
+
# Unless required by applicable law or agreed to in writing,
|
14 |
+
# software distributed under the License is distributed on an
|
15 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
16 |
+
# KIND, either express or implied. See the License for the
|
17 |
+
# specific language governing permissions and limitations
|
18 |
+
# under the License.
|
19 |
+
from pyarrow._parquet_encryption import (CryptoFactory, # noqa
|
20 |
+
EncryptionConfiguration,
|
21 |
+
DecryptionConfiguration,
|
22 |
+
KmsConnectionConfig,
|
23 |
+
KmsClient)
|
venv/lib/python3.10/site-packages/pyarrow/scalar.pxi
ADDED
@@ -0,0 +1,1220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import collections
|
19 |
+
from cython cimport binding
|
20 |
+
|
21 |
+
|
22 |
+
cdef class Scalar(_Weakrefable):
|
23 |
+
"""
|
24 |
+
The base class for scalars.
|
25 |
+
"""
|
26 |
+
|
27 |
+
def __init__(self):
|
28 |
+
raise TypeError("Do not call {}'s constructor directly, use "
|
29 |
+
"pa.scalar() instead.".format(self.__class__.__name__))
|
30 |
+
|
31 |
+
cdef void init(self, const shared_ptr[CScalar]& wrapped):
|
32 |
+
self.wrapped = wrapped
|
33 |
+
|
34 |
+
@staticmethod
|
35 |
+
cdef wrap(const shared_ptr[CScalar]& wrapped):
|
36 |
+
cdef:
|
37 |
+
Scalar self
|
38 |
+
Type type_id = wrapped.get().type.get().id()
|
39 |
+
shared_ptr[CDataType] sp_data_type = wrapped.get().type
|
40 |
+
|
41 |
+
if type_id == _Type_NA:
|
42 |
+
return _NULL
|
43 |
+
|
44 |
+
if type_id not in _scalar_classes:
|
45 |
+
raise NotImplementedError(
|
46 |
+
"Wrapping scalar of type " + frombytes(sp_data_type.get().ToString()))
|
47 |
+
|
48 |
+
typ = get_scalar_class_from_type(sp_data_type)
|
49 |
+
self = typ.__new__(typ)
|
50 |
+
self.init(wrapped)
|
51 |
+
|
52 |
+
return self
|
53 |
+
|
54 |
+
cdef inline shared_ptr[CScalar] unwrap(self) nogil:
|
55 |
+
return self.wrapped
|
56 |
+
|
57 |
+
@property
|
58 |
+
def type(self):
|
59 |
+
"""
|
60 |
+
Data type of the Scalar object.
|
61 |
+
"""
|
62 |
+
return pyarrow_wrap_data_type(self.wrapped.get().type)
|
63 |
+
|
64 |
+
@property
|
65 |
+
def is_valid(self):
|
66 |
+
"""
|
67 |
+
Holds a valid (non-null) value.
|
68 |
+
"""
|
69 |
+
return self.wrapped.get().is_valid
|
70 |
+
|
71 |
+
def cast(self, object target_type=None, safe=None, options=None, memory_pool=None):
|
72 |
+
"""
|
73 |
+
Cast scalar value to another data type.
|
74 |
+
|
75 |
+
See :func:`pyarrow.compute.cast` for usage.
|
76 |
+
|
77 |
+
Parameters
|
78 |
+
----------
|
79 |
+
target_type : DataType, default None
|
80 |
+
Type to cast scalar to.
|
81 |
+
safe : boolean, default True
|
82 |
+
Whether to check for conversion errors such as overflow.
|
83 |
+
options : CastOptions, default None
|
84 |
+
Additional checks pass by CastOptions
|
85 |
+
memory_pool : MemoryPool, optional
|
86 |
+
memory pool to use for allocations during function execution.
|
87 |
+
|
88 |
+
Returns
|
89 |
+
-------
|
90 |
+
scalar : A Scalar of the given target data type.
|
91 |
+
"""
|
92 |
+
return _pc().cast(self, target_type, safe=safe,
|
93 |
+
options=options, memory_pool=memory_pool)
|
94 |
+
|
95 |
+
def validate(self, *, full=False):
|
96 |
+
"""
|
97 |
+
Perform validation checks. An exception is raised if validation fails.
|
98 |
+
|
99 |
+
By default only cheap validation checks are run. Pass `full=True`
|
100 |
+
for thorough validation checks (potentially O(n)).
|
101 |
+
|
102 |
+
Parameters
|
103 |
+
----------
|
104 |
+
full : bool, default False
|
105 |
+
If True, run expensive checks, otherwise cheap checks only.
|
106 |
+
|
107 |
+
Raises
|
108 |
+
------
|
109 |
+
ArrowInvalid
|
110 |
+
"""
|
111 |
+
if full:
|
112 |
+
with nogil:
|
113 |
+
check_status(self.wrapped.get().ValidateFull())
|
114 |
+
else:
|
115 |
+
with nogil:
|
116 |
+
check_status(self.wrapped.get().Validate())
|
117 |
+
|
118 |
+
def __repr__(self):
|
119 |
+
return '<pyarrow.{}: {!r}>'.format(
|
120 |
+
self.__class__.__name__, self.as_py()
|
121 |
+
)
|
122 |
+
|
123 |
+
def __str__(self):
|
124 |
+
return str(self.as_py())
|
125 |
+
|
126 |
+
def equals(self, Scalar other not None):
|
127 |
+
"""
|
128 |
+
Parameters
|
129 |
+
----------
|
130 |
+
other : pyarrow.Scalar
|
131 |
+
|
132 |
+
Returns
|
133 |
+
-------
|
134 |
+
bool
|
135 |
+
"""
|
136 |
+
return self.wrapped.get().Equals(other.unwrap().get()[0])
|
137 |
+
|
138 |
+
def __eq__(self, other):
|
139 |
+
try:
|
140 |
+
return self.equals(other)
|
141 |
+
except TypeError:
|
142 |
+
return NotImplemented
|
143 |
+
|
144 |
+
def __hash__(self):
|
145 |
+
cdef CScalarHash hasher
|
146 |
+
return hasher(self.wrapped)
|
147 |
+
|
148 |
+
def __reduce__(self):
|
149 |
+
return scalar, (self.as_py(), self.type)
|
150 |
+
|
151 |
+
def as_py(self):
|
152 |
+
raise NotImplementedError()
|
153 |
+
|
154 |
+
|
155 |
+
_NULL = NA = None
|
156 |
+
|
157 |
+
|
158 |
+
cdef class NullScalar(Scalar):
|
159 |
+
"""
|
160 |
+
Concrete class for null scalars.
|
161 |
+
"""
|
162 |
+
|
163 |
+
def __cinit__(self):
|
164 |
+
global NA
|
165 |
+
if NA is not None:
|
166 |
+
raise RuntimeError('Cannot create multiple NullScalar instances')
|
167 |
+
self.init(shared_ptr[CScalar](new CNullScalar()))
|
168 |
+
|
169 |
+
def __init__(self):
|
170 |
+
pass
|
171 |
+
|
172 |
+
def as_py(self):
|
173 |
+
"""
|
174 |
+
Return this value as a Python None.
|
175 |
+
"""
|
176 |
+
return None
|
177 |
+
|
178 |
+
|
179 |
+
_NULL = NA = NullScalar()
|
180 |
+
|
181 |
+
|
182 |
+
cdef class BooleanScalar(Scalar):
|
183 |
+
"""
|
184 |
+
Concrete class for boolean scalars.
|
185 |
+
"""
|
186 |
+
|
187 |
+
def as_py(self):
|
188 |
+
"""
|
189 |
+
Return this value as a Python bool.
|
190 |
+
"""
|
191 |
+
cdef CBooleanScalar* sp = <CBooleanScalar*> self.wrapped.get()
|
192 |
+
return sp.value if sp.is_valid else None
|
193 |
+
|
194 |
+
|
195 |
+
cdef class UInt8Scalar(Scalar):
|
196 |
+
"""
|
197 |
+
Concrete class for uint8 scalars.
|
198 |
+
"""
|
199 |
+
|
200 |
+
def as_py(self):
|
201 |
+
"""
|
202 |
+
Return this value as a Python int.
|
203 |
+
"""
|
204 |
+
cdef CUInt8Scalar* sp = <CUInt8Scalar*> self.wrapped.get()
|
205 |
+
return sp.value if sp.is_valid else None
|
206 |
+
|
207 |
+
|
208 |
+
cdef class Int8Scalar(Scalar):
|
209 |
+
"""
|
210 |
+
Concrete class for int8 scalars.
|
211 |
+
"""
|
212 |
+
|
213 |
+
def as_py(self):
|
214 |
+
"""
|
215 |
+
Return this value as a Python int.
|
216 |
+
"""
|
217 |
+
cdef CInt8Scalar* sp = <CInt8Scalar*> self.wrapped.get()
|
218 |
+
return sp.value if sp.is_valid else None
|
219 |
+
|
220 |
+
|
221 |
+
cdef class UInt16Scalar(Scalar):
|
222 |
+
"""
|
223 |
+
Concrete class for uint16 scalars.
|
224 |
+
"""
|
225 |
+
|
226 |
+
def as_py(self):
|
227 |
+
"""
|
228 |
+
Return this value as a Python int.
|
229 |
+
"""
|
230 |
+
cdef CUInt16Scalar* sp = <CUInt16Scalar*> self.wrapped.get()
|
231 |
+
return sp.value if sp.is_valid else None
|
232 |
+
|
233 |
+
|
234 |
+
cdef class Int16Scalar(Scalar):
|
235 |
+
"""
|
236 |
+
Concrete class for int16 scalars.
|
237 |
+
"""
|
238 |
+
|
239 |
+
def as_py(self):
|
240 |
+
"""
|
241 |
+
Return this value as a Python int.
|
242 |
+
"""
|
243 |
+
cdef CInt16Scalar* sp = <CInt16Scalar*> self.wrapped.get()
|
244 |
+
return sp.value if sp.is_valid else None
|
245 |
+
|
246 |
+
|
247 |
+
cdef class UInt32Scalar(Scalar):
|
248 |
+
"""
|
249 |
+
Concrete class for uint32 scalars.
|
250 |
+
"""
|
251 |
+
|
252 |
+
def as_py(self):
|
253 |
+
"""
|
254 |
+
Return this value as a Python int.
|
255 |
+
"""
|
256 |
+
cdef CUInt32Scalar* sp = <CUInt32Scalar*> self.wrapped.get()
|
257 |
+
return sp.value if sp.is_valid else None
|
258 |
+
|
259 |
+
|
260 |
+
cdef class Int32Scalar(Scalar):
|
261 |
+
"""
|
262 |
+
Concrete class for int32 scalars.
|
263 |
+
"""
|
264 |
+
|
265 |
+
def as_py(self):
|
266 |
+
"""
|
267 |
+
Return this value as a Python int.
|
268 |
+
"""
|
269 |
+
cdef CInt32Scalar* sp = <CInt32Scalar*> self.wrapped.get()
|
270 |
+
return sp.value if sp.is_valid else None
|
271 |
+
|
272 |
+
|
273 |
+
cdef class UInt64Scalar(Scalar):
|
274 |
+
"""
|
275 |
+
Concrete class for uint64 scalars.
|
276 |
+
"""
|
277 |
+
|
278 |
+
def as_py(self):
|
279 |
+
"""
|
280 |
+
Return this value as a Python int.
|
281 |
+
"""
|
282 |
+
cdef CUInt64Scalar* sp = <CUInt64Scalar*> self.wrapped.get()
|
283 |
+
return sp.value if sp.is_valid else None
|
284 |
+
|
285 |
+
|
286 |
+
cdef class Int64Scalar(Scalar):
|
287 |
+
"""
|
288 |
+
Concrete class for int64 scalars.
|
289 |
+
"""
|
290 |
+
|
291 |
+
def as_py(self):
|
292 |
+
"""
|
293 |
+
Return this value as a Python int.
|
294 |
+
"""
|
295 |
+
cdef CInt64Scalar* sp = <CInt64Scalar*> self.wrapped.get()
|
296 |
+
return sp.value if sp.is_valid else None
|
297 |
+
|
298 |
+
|
299 |
+
cdef class HalfFloatScalar(Scalar):
|
300 |
+
"""
|
301 |
+
Concrete class for float scalars.
|
302 |
+
"""
|
303 |
+
|
304 |
+
def as_py(self):
|
305 |
+
"""
|
306 |
+
Return this value as a Python float.
|
307 |
+
"""
|
308 |
+
cdef CHalfFloatScalar* sp = <CHalfFloatScalar*> self.wrapped.get()
|
309 |
+
return PyHalf_FromHalf(sp.value) if sp.is_valid else None
|
310 |
+
|
311 |
+
|
312 |
+
cdef class FloatScalar(Scalar):
|
313 |
+
"""
|
314 |
+
Concrete class for float scalars.
|
315 |
+
"""
|
316 |
+
|
317 |
+
def as_py(self):
|
318 |
+
"""
|
319 |
+
Return this value as a Python float.
|
320 |
+
"""
|
321 |
+
cdef CFloatScalar* sp = <CFloatScalar*> self.wrapped.get()
|
322 |
+
return sp.value if sp.is_valid else None
|
323 |
+
|
324 |
+
|
325 |
+
cdef class DoubleScalar(Scalar):
|
326 |
+
"""
|
327 |
+
Concrete class for double scalars.
|
328 |
+
"""
|
329 |
+
|
330 |
+
def as_py(self):
|
331 |
+
"""
|
332 |
+
Return this value as a Python float.
|
333 |
+
"""
|
334 |
+
cdef CDoubleScalar* sp = <CDoubleScalar*> self.wrapped.get()
|
335 |
+
return sp.value if sp.is_valid else None
|
336 |
+
|
337 |
+
|
338 |
+
cdef class Decimal128Scalar(Scalar):
|
339 |
+
"""
|
340 |
+
Concrete class for decimal128 scalars.
|
341 |
+
"""
|
342 |
+
|
343 |
+
def as_py(self):
|
344 |
+
"""
|
345 |
+
Return this value as a Python Decimal.
|
346 |
+
"""
|
347 |
+
cdef:
|
348 |
+
CDecimal128Scalar* sp = <CDecimal128Scalar*> self.wrapped.get()
|
349 |
+
CDecimal128Type* dtype = <CDecimal128Type*> sp.type.get()
|
350 |
+
if sp.is_valid:
|
351 |
+
return _pydecimal.Decimal(
|
352 |
+
frombytes(sp.value.ToString(dtype.scale()))
|
353 |
+
)
|
354 |
+
else:
|
355 |
+
return None
|
356 |
+
|
357 |
+
|
358 |
+
cdef class Decimal256Scalar(Scalar):
|
359 |
+
"""
|
360 |
+
Concrete class for decimal256 scalars.
|
361 |
+
"""
|
362 |
+
|
363 |
+
def as_py(self):
|
364 |
+
"""
|
365 |
+
Return this value as a Python Decimal.
|
366 |
+
"""
|
367 |
+
cdef:
|
368 |
+
CDecimal256Scalar* sp = <CDecimal256Scalar*> self.wrapped.get()
|
369 |
+
CDecimal256Type* dtype = <CDecimal256Type*> sp.type.get()
|
370 |
+
if sp.is_valid:
|
371 |
+
return _pydecimal.Decimal(
|
372 |
+
frombytes(sp.value.ToString(dtype.scale()))
|
373 |
+
)
|
374 |
+
else:
|
375 |
+
return None
|
376 |
+
|
377 |
+
|
378 |
+
cdef class Date32Scalar(Scalar):
|
379 |
+
"""
|
380 |
+
Concrete class for date32 scalars.
|
381 |
+
"""
|
382 |
+
|
383 |
+
@property
|
384 |
+
def value(self):
|
385 |
+
cdef CDate32Scalar* sp = <CDate32Scalar*> self.wrapped.get()
|
386 |
+
return sp.value if sp.is_valid else None
|
387 |
+
|
388 |
+
def as_py(self):
|
389 |
+
"""
|
390 |
+
Return this value as a Python datetime.datetime instance.
|
391 |
+
"""
|
392 |
+
cdef CDate32Scalar* sp = <CDate32Scalar*> self.wrapped.get()
|
393 |
+
|
394 |
+
if sp.is_valid:
|
395 |
+
# shift to seconds since epoch
|
396 |
+
return (
|
397 |
+
datetime.date(1970, 1, 1) + datetime.timedelta(days=sp.value)
|
398 |
+
)
|
399 |
+
else:
|
400 |
+
return None
|
401 |
+
|
402 |
+
|
403 |
+
cdef class Date64Scalar(Scalar):
|
404 |
+
"""
|
405 |
+
Concrete class for date64 scalars.
|
406 |
+
"""
|
407 |
+
|
408 |
+
@property
|
409 |
+
def value(self):
|
410 |
+
cdef CDate64Scalar* sp = <CDate64Scalar*> self.wrapped.get()
|
411 |
+
return sp.value if sp.is_valid else None
|
412 |
+
|
413 |
+
def as_py(self):
|
414 |
+
"""
|
415 |
+
Return this value as a Python datetime.datetime instance.
|
416 |
+
"""
|
417 |
+
cdef CDate64Scalar* sp = <CDate64Scalar*> self.wrapped.get()
|
418 |
+
|
419 |
+
if sp.is_valid:
|
420 |
+
return (
|
421 |
+
datetime.date(1970, 1, 1) +
|
422 |
+
datetime.timedelta(days=sp.value / 86400000)
|
423 |
+
)
|
424 |
+
else:
|
425 |
+
return None
|
426 |
+
|
427 |
+
|
428 |
+
def _datetime_from_int(int64_t value, TimeUnit unit, tzinfo=None):
|
429 |
+
if unit == TimeUnit_SECOND:
|
430 |
+
delta = datetime.timedelta(seconds=value)
|
431 |
+
elif unit == TimeUnit_MILLI:
|
432 |
+
delta = datetime.timedelta(milliseconds=value)
|
433 |
+
elif unit == TimeUnit_MICRO:
|
434 |
+
delta = datetime.timedelta(microseconds=value)
|
435 |
+
else:
|
436 |
+
# TimeUnit_NANO: prefer pandas timestamps if available
|
437 |
+
if _pandas_api.have_pandas:
|
438 |
+
return _pandas_api.pd.Timestamp(value, tz=tzinfo, unit='ns')
|
439 |
+
# otherwise safely truncate to microsecond resolution datetime
|
440 |
+
if value % 1000 != 0:
|
441 |
+
raise ValueError(
|
442 |
+
"Nanosecond resolution temporal type {} is not safely "
|
443 |
+
"convertible to microseconds to convert to datetime.datetime. "
|
444 |
+
"Install pandas to return as Timestamp with nanosecond "
|
445 |
+
"support or access the .value attribute.".format(value)
|
446 |
+
)
|
447 |
+
delta = datetime.timedelta(microseconds=value // 1000)
|
448 |
+
|
449 |
+
dt = datetime.datetime(1970, 1, 1) + delta
|
450 |
+
# adjust timezone if set to the datatype
|
451 |
+
if tzinfo is not None:
|
452 |
+
dt = dt.replace(tzinfo=datetime.timezone.utc).astimezone(tzinfo)
|
453 |
+
|
454 |
+
return dt
|
455 |
+
|
456 |
+
|
457 |
+
cdef class Time32Scalar(Scalar):
|
458 |
+
"""
|
459 |
+
Concrete class for time32 scalars.
|
460 |
+
"""
|
461 |
+
|
462 |
+
@property
|
463 |
+
def value(self):
|
464 |
+
cdef CTime32Scalar* sp = <CTime32Scalar*> self.wrapped.get()
|
465 |
+
return sp.value if sp.is_valid else None
|
466 |
+
|
467 |
+
def as_py(self):
|
468 |
+
"""
|
469 |
+
Return this value as a Python datetime.timedelta instance.
|
470 |
+
"""
|
471 |
+
cdef:
|
472 |
+
CTime32Scalar* sp = <CTime32Scalar*> self.wrapped.get()
|
473 |
+
CTime32Type* dtype = <CTime32Type*> sp.type.get()
|
474 |
+
|
475 |
+
if sp.is_valid:
|
476 |
+
return _datetime_from_int(sp.value, unit=dtype.unit()).time()
|
477 |
+
else:
|
478 |
+
return None
|
479 |
+
|
480 |
+
|
481 |
+
cdef class Time64Scalar(Scalar):
|
482 |
+
"""
|
483 |
+
Concrete class for time64 scalars.
|
484 |
+
"""
|
485 |
+
|
486 |
+
@property
|
487 |
+
def value(self):
|
488 |
+
cdef CTime64Scalar* sp = <CTime64Scalar*> self.wrapped.get()
|
489 |
+
return sp.value if sp.is_valid else None
|
490 |
+
|
491 |
+
def as_py(self):
|
492 |
+
"""
|
493 |
+
Return this value as a Python datetime.timedelta instance.
|
494 |
+
"""
|
495 |
+
cdef:
|
496 |
+
CTime64Scalar* sp = <CTime64Scalar*> self.wrapped.get()
|
497 |
+
CTime64Type* dtype = <CTime64Type*> sp.type.get()
|
498 |
+
|
499 |
+
if sp.is_valid:
|
500 |
+
return _datetime_from_int(sp.value, unit=dtype.unit()).time()
|
501 |
+
else:
|
502 |
+
return None
|
503 |
+
|
504 |
+
|
505 |
+
cdef class TimestampScalar(Scalar):
|
506 |
+
"""
|
507 |
+
Concrete class for timestamp scalars.
|
508 |
+
"""
|
509 |
+
|
510 |
+
@property
|
511 |
+
def value(self):
|
512 |
+
cdef CTimestampScalar* sp = <CTimestampScalar*> self.wrapped.get()
|
513 |
+
return sp.value if sp.is_valid else None
|
514 |
+
|
515 |
+
def as_py(self):
|
516 |
+
"""
|
517 |
+
Return this value as a Pandas Timestamp instance (if units are
|
518 |
+
nanoseconds and pandas is available), otherwise as a Python
|
519 |
+
datetime.datetime instance.
|
520 |
+
"""
|
521 |
+
cdef:
|
522 |
+
CTimestampScalar* sp = <CTimestampScalar*> self.wrapped.get()
|
523 |
+
CTimestampType* dtype = <CTimestampType*> sp.type.get()
|
524 |
+
|
525 |
+
if not sp.is_valid:
|
526 |
+
return None
|
527 |
+
|
528 |
+
if not dtype.timezone().empty():
|
529 |
+
tzinfo = string_to_tzinfo(frombytes(dtype.timezone()))
|
530 |
+
else:
|
531 |
+
tzinfo = None
|
532 |
+
|
533 |
+
return _datetime_from_int(sp.value, unit=dtype.unit(), tzinfo=tzinfo)
|
534 |
+
|
535 |
+
def __repr__(self):
|
536 |
+
"""
|
537 |
+
Return the representation of TimestampScalar using `strftime` to avoid
|
538 |
+
original repr datetime values being out of range.
|
539 |
+
"""
|
540 |
+
cdef:
|
541 |
+
CTimestampScalar* sp = <CTimestampScalar*> self.wrapped.get()
|
542 |
+
CTimestampType* dtype = <CTimestampType*> sp.type.get()
|
543 |
+
|
544 |
+
if not dtype.timezone().empty():
|
545 |
+
type_format = str(_pc().strftime(self, format="%Y-%m-%dT%H:%M:%S%z"))
|
546 |
+
else:
|
547 |
+
type_format = str(_pc().strftime(self))
|
548 |
+
return '<pyarrow.{}: {!r}>'.format(
|
549 |
+
self.__class__.__name__, type_format
|
550 |
+
)
|
551 |
+
|
552 |
+
|
553 |
+
cdef class DurationScalar(Scalar):
|
554 |
+
"""
|
555 |
+
Concrete class for duration scalars.
|
556 |
+
"""
|
557 |
+
|
558 |
+
@property
|
559 |
+
def value(self):
|
560 |
+
cdef CDurationScalar* sp = <CDurationScalar*> self.wrapped.get()
|
561 |
+
return sp.value if sp.is_valid else None
|
562 |
+
|
563 |
+
def as_py(self):
|
564 |
+
"""
|
565 |
+
Return this value as a Pandas Timedelta instance (if units are
|
566 |
+
nanoseconds and pandas is available), otherwise as a Python
|
567 |
+
datetime.timedelta instance.
|
568 |
+
"""
|
569 |
+
cdef:
|
570 |
+
CDurationScalar* sp = <CDurationScalar*> self.wrapped.get()
|
571 |
+
CDurationType* dtype = <CDurationType*> sp.type.get()
|
572 |
+
TimeUnit unit = dtype.unit()
|
573 |
+
|
574 |
+
if not sp.is_valid:
|
575 |
+
return None
|
576 |
+
|
577 |
+
if unit == TimeUnit_SECOND:
|
578 |
+
return datetime.timedelta(seconds=sp.value)
|
579 |
+
elif unit == TimeUnit_MILLI:
|
580 |
+
return datetime.timedelta(milliseconds=sp.value)
|
581 |
+
elif unit == TimeUnit_MICRO:
|
582 |
+
return datetime.timedelta(microseconds=sp.value)
|
583 |
+
else:
|
584 |
+
# TimeUnit_NANO: prefer pandas timestamps if available
|
585 |
+
if _pandas_api.have_pandas:
|
586 |
+
return _pandas_api.pd.Timedelta(sp.value, unit='ns')
|
587 |
+
# otherwise safely truncate to microsecond resolution timedelta
|
588 |
+
if sp.value % 1000 != 0:
|
589 |
+
raise ValueError(
|
590 |
+
"Nanosecond duration {} is not safely convertible to "
|
591 |
+
"microseconds to convert to datetime.timedelta. Install "
|
592 |
+
"pandas to return as Timedelta with nanosecond support or "
|
593 |
+
"access the .value attribute.".format(sp.value)
|
594 |
+
)
|
595 |
+
return datetime.timedelta(microseconds=sp.value // 1000)
|
596 |
+
|
597 |
+
|
598 |
+
cdef class MonthDayNanoIntervalScalar(Scalar):
|
599 |
+
"""
|
600 |
+
Concrete class for month, day, nanosecond interval scalars.
|
601 |
+
"""
|
602 |
+
|
603 |
+
@property
|
604 |
+
def value(self):
|
605 |
+
"""
|
606 |
+
Same as self.as_py()
|
607 |
+
"""
|
608 |
+
return self.as_py()
|
609 |
+
|
610 |
+
def as_py(self):
|
611 |
+
"""
|
612 |
+
Return this value as a pyarrow.MonthDayNano.
|
613 |
+
"""
|
614 |
+
cdef:
|
615 |
+
PyObject* val
|
616 |
+
CMonthDayNanoIntervalScalar* scalar
|
617 |
+
scalar = <CMonthDayNanoIntervalScalar*>self.wrapped.get()
|
618 |
+
val = GetResultValue(MonthDayNanoIntervalScalarToPyObject(
|
619 |
+
deref(scalar)))
|
620 |
+
return PyObject_to_object(val)
|
621 |
+
|
622 |
+
|
623 |
+
cdef class BinaryScalar(Scalar):
|
624 |
+
"""
|
625 |
+
Concrete class for binary-like scalars.
|
626 |
+
"""
|
627 |
+
|
628 |
+
def as_buffer(self):
|
629 |
+
"""
|
630 |
+
Return a view over this value as a Buffer object.
|
631 |
+
"""
|
632 |
+
cdef CBaseBinaryScalar* sp = <CBaseBinaryScalar*> self.wrapped.get()
|
633 |
+
return pyarrow_wrap_buffer(sp.value) if sp.is_valid else None
|
634 |
+
|
635 |
+
def as_py(self):
|
636 |
+
"""
|
637 |
+
Return this value as a Python bytes.
|
638 |
+
"""
|
639 |
+
buffer = self.as_buffer()
|
640 |
+
return None if buffer is None else buffer.to_pybytes()
|
641 |
+
|
642 |
+
|
643 |
+
cdef class LargeBinaryScalar(BinaryScalar):
|
644 |
+
pass
|
645 |
+
|
646 |
+
|
647 |
+
cdef class FixedSizeBinaryScalar(BinaryScalar):
|
648 |
+
pass
|
649 |
+
|
650 |
+
|
651 |
+
cdef class StringScalar(BinaryScalar):
|
652 |
+
"""
|
653 |
+
Concrete class for string-like (utf8) scalars.
|
654 |
+
"""
|
655 |
+
|
656 |
+
def as_py(self):
|
657 |
+
"""
|
658 |
+
Return this value as a Python string.
|
659 |
+
"""
|
660 |
+
buffer = self.as_buffer()
|
661 |
+
return None if buffer is None else str(buffer, 'utf8')
|
662 |
+
|
663 |
+
|
664 |
+
cdef class LargeStringScalar(StringScalar):
|
665 |
+
pass
|
666 |
+
|
667 |
+
|
668 |
+
cdef class BinaryViewScalar(BinaryScalar):
|
669 |
+
pass
|
670 |
+
|
671 |
+
|
672 |
+
cdef class StringViewScalar(StringScalar):
|
673 |
+
pass
|
674 |
+
|
675 |
+
|
676 |
+
cdef class ListScalar(Scalar):
|
677 |
+
"""
|
678 |
+
Concrete class for list-like scalars.
|
679 |
+
"""
|
680 |
+
|
681 |
+
@property
|
682 |
+
def values(self):
|
683 |
+
cdef CBaseListScalar* sp = <CBaseListScalar*> self.wrapped.get()
|
684 |
+
if sp.is_valid:
|
685 |
+
return pyarrow_wrap_array(sp.value)
|
686 |
+
else:
|
687 |
+
return None
|
688 |
+
|
689 |
+
def __len__(self):
|
690 |
+
"""
|
691 |
+
Return the number of values.
|
692 |
+
"""
|
693 |
+
return len(self.values)
|
694 |
+
|
695 |
+
def __getitem__(self, i):
|
696 |
+
"""
|
697 |
+
Return the value at the given index.
|
698 |
+
"""
|
699 |
+
return self.values[_normalize_index(i, len(self))]
|
700 |
+
|
701 |
+
def __iter__(self):
|
702 |
+
"""
|
703 |
+
Iterate over this element's values.
|
704 |
+
"""
|
705 |
+
return iter(self.values)
|
706 |
+
|
707 |
+
def as_py(self):
|
708 |
+
"""
|
709 |
+
Return this value as a Python list.
|
710 |
+
"""
|
711 |
+
arr = self.values
|
712 |
+
return None if arr is None else arr.to_pylist()
|
713 |
+
|
714 |
+
|
715 |
+
cdef class FixedSizeListScalar(ListScalar):
|
716 |
+
pass
|
717 |
+
|
718 |
+
|
719 |
+
cdef class LargeListScalar(ListScalar):
|
720 |
+
pass
|
721 |
+
|
722 |
+
|
723 |
+
cdef class ListViewScalar(ListScalar):
|
724 |
+
pass
|
725 |
+
|
726 |
+
|
727 |
+
cdef class LargeListViewScalar(ListScalar):
|
728 |
+
pass
|
729 |
+
|
730 |
+
|
731 |
+
cdef class StructScalar(Scalar, collections.abc.Mapping):
|
732 |
+
"""
|
733 |
+
Concrete class for struct scalars.
|
734 |
+
"""
|
735 |
+
|
736 |
+
def __len__(self):
|
737 |
+
cdef CStructScalar* sp = <CStructScalar*> self.wrapped.get()
|
738 |
+
return sp.value.size()
|
739 |
+
|
740 |
+
def __iter__(self):
|
741 |
+
cdef:
|
742 |
+
CStructScalar* sp = <CStructScalar*> self.wrapped.get()
|
743 |
+
CStructType* dtype = <CStructType*> sp.type.get()
|
744 |
+
vector[shared_ptr[CField]] fields = dtype.fields()
|
745 |
+
|
746 |
+
for i in range(dtype.num_fields()):
|
747 |
+
yield frombytes(fields[i].get().name())
|
748 |
+
|
749 |
+
def items(self):
|
750 |
+
return ((key, self[i]) for i, key in enumerate(self))
|
751 |
+
|
752 |
+
def __contains__(self, key):
|
753 |
+
return key in list(self)
|
754 |
+
|
755 |
+
def __getitem__(self, key):
|
756 |
+
"""
|
757 |
+
Return the child value for the given field.
|
758 |
+
|
759 |
+
Parameters
|
760 |
+
----------
|
761 |
+
index : Union[int, str]
|
762 |
+
Index / position or name of the field.
|
763 |
+
|
764 |
+
Returns
|
765 |
+
-------
|
766 |
+
result : Scalar
|
767 |
+
"""
|
768 |
+
cdef:
|
769 |
+
CFieldRef ref
|
770 |
+
CStructScalar* sp = <CStructScalar*> self.wrapped.get()
|
771 |
+
|
772 |
+
if isinstance(key, (bytes, str)):
|
773 |
+
ref = CFieldRef(<c_string> tobytes(key))
|
774 |
+
elif isinstance(key, int):
|
775 |
+
ref = CFieldRef(<int> key)
|
776 |
+
else:
|
777 |
+
raise TypeError('Expected integer or string index')
|
778 |
+
|
779 |
+
try:
|
780 |
+
return Scalar.wrap(GetResultValue(sp.field(ref)))
|
781 |
+
except ArrowInvalid as exc:
|
782 |
+
if isinstance(key, int):
|
783 |
+
raise IndexError(key) from exc
|
784 |
+
else:
|
785 |
+
raise KeyError(key) from exc
|
786 |
+
|
787 |
+
def as_py(self):
|
788 |
+
"""
|
789 |
+
Return this value as a Python dict.
|
790 |
+
"""
|
791 |
+
if self.is_valid:
|
792 |
+
try:
|
793 |
+
return {k: self[k].as_py() for k in self.keys()}
|
794 |
+
except KeyError:
|
795 |
+
raise ValueError(
|
796 |
+
"Converting to Python dictionary is not supported when "
|
797 |
+
"duplicate field names are present")
|
798 |
+
else:
|
799 |
+
return None
|
800 |
+
|
801 |
+
def _as_py_tuple(self):
|
802 |
+
# a version that returns a tuple instead of dict to support repr/str
|
803 |
+
# with the presence of duplicate field names
|
804 |
+
if self.is_valid:
|
805 |
+
return [(key, self[i].as_py()) for i, key in enumerate(self)]
|
806 |
+
else:
|
807 |
+
return None
|
808 |
+
|
809 |
+
def __repr__(self):
|
810 |
+
return '<pyarrow.{}: {!r}>'.format(
|
811 |
+
self.__class__.__name__, self._as_py_tuple()
|
812 |
+
)
|
813 |
+
|
814 |
+
def __str__(self):
|
815 |
+
return str(self._as_py_tuple())
|
816 |
+
|
817 |
+
|
818 |
+
cdef class MapScalar(ListScalar):
|
819 |
+
"""
|
820 |
+
Concrete class for map scalars.
|
821 |
+
"""
|
822 |
+
|
823 |
+
def __getitem__(self, i):
|
824 |
+
"""
|
825 |
+
Return the value at the given index.
|
826 |
+
"""
|
827 |
+
arr = self.values
|
828 |
+
if arr is None:
|
829 |
+
raise IndexError(i)
|
830 |
+
dct = arr[_normalize_index(i, len(arr))]
|
831 |
+
return (dct[self.type.key_field.name], dct[self.type.item_field.name])
|
832 |
+
|
833 |
+
def __iter__(self):
|
834 |
+
"""
|
835 |
+
Iterate over this element's values.
|
836 |
+
"""
|
837 |
+
arr = self.values
|
838 |
+
if arr is None:
|
839 |
+
return
|
840 |
+
for k, v in zip(arr.field(self.type.key_field.name), arr.field(self.type.item_field.name)):
|
841 |
+
yield (k.as_py(), v.as_py())
|
842 |
+
|
843 |
+
def as_py(self):
|
844 |
+
"""
|
845 |
+
Return this value as a Python list.
|
846 |
+
"""
|
847 |
+
cdef CStructScalar* sp = <CStructScalar*> self.wrapped.get()
|
848 |
+
return list(self) if sp.is_valid else None
|
849 |
+
|
850 |
+
|
851 |
+
cdef class DictionaryScalar(Scalar):
|
852 |
+
"""
|
853 |
+
Concrete class for dictionary-encoded scalars.
|
854 |
+
"""
|
855 |
+
|
856 |
+
@staticmethod
|
857 |
+
@binding(True) # Required for cython < 3
|
858 |
+
def _reconstruct(type, is_valid, index, dictionary):
|
859 |
+
cdef:
|
860 |
+
CDictionaryScalarIndexAndDictionary value
|
861 |
+
shared_ptr[CDictionaryScalar] wrapped
|
862 |
+
DataType type_
|
863 |
+
Scalar index_
|
864 |
+
Array dictionary_
|
865 |
+
|
866 |
+
type_ = ensure_type(type, allow_none=False)
|
867 |
+
if not isinstance(type_, DictionaryType):
|
868 |
+
raise TypeError('Must pass a DictionaryType instance')
|
869 |
+
|
870 |
+
if isinstance(index, Scalar):
|
871 |
+
if not index.type.equals(type.index_type):
|
872 |
+
raise TypeError("The Scalar value passed as index must have "
|
873 |
+
"identical type to the dictionary type's "
|
874 |
+
"index_type")
|
875 |
+
index_ = index
|
876 |
+
else:
|
877 |
+
index_ = scalar(index, type=type_.index_type)
|
878 |
+
|
879 |
+
if isinstance(dictionary, Array):
|
880 |
+
if not dictionary.type.equals(type.value_type):
|
881 |
+
raise TypeError("The Array passed as dictionary must have "
|
882 |
+
"identical type to the dictionary type's "
|
883 |
+
"value_type")
|
884 |
+
dictionary_ = dictionary
|
885 |
+
else:
|
886 |
+
dictionary_ = array(dictionary, type=type_.value_type)
|
887 |
+
|
888 |
+
value.index = pyarrow_unwrap_scalar(index_)
|
889 |
+
value.dictionary = pyarrow_unwrap_array(dictionary_)
|
890 |
+
|
891 |
+
wrapped = make_shared[CDictionaryScalar](
|
892 |
+
value, pyarrow_unwrap_data_type(type_), <c_bool>(is_valid)
|
893 |
+
)
|
894 |
+
return Scalar.wrap(<shared_ptr[CScalar]> wrapped)
|
895 |
+
|
896 |
+
def __reduce__(self):
|
897 |
+
return DictionaryScalar._reconstruct, (
|
898 |
+
self.type, self.is_valid, self.index, self.dictionary
|
899 |
+
)
|
900 |
+
|
901 |
+
@property
|
902 |
+
def index(self):
|
903 |
+
"""
|
904 |
+
Return this value's underlying index as a scalar.
|
905 |
+
"""
|
906 |
+
cdef CDictionaryScalar* sp = <CDictionaryScalar*> self.wrapped.get()
|
907 |
+
return Scalar.wrap(sp.value.index)
|
908 |
+
|
909 |
+
@property
|
910 |
+
def value(self):
|
911 |
+
"""
|
912 |
+
Return the encoded value as a scalar.
|
913 |
+
"""
|
914 |
+
cdef CDictionaryScalar* sp = <CDictionaryScalar*> self.wrapped.get()
|
915 |
+
return Scalar.wrap(GetResultValue(sp.GetEncodedValue()))
|
916 |
+
|
917 |
+
@property
|
918 |
+
def dictionary(self):
|
919 |
+
cdef CDictionaryScalar* sp = <CDictionaryScalar*> self.wrapped.get()
|
920 |
+
return pyarrow_wrap_array(sp.value.dictionary)
|
921 |
+
|
922 |
+
def as_py(self):
|
923 |
+
"""
|
924 |
+
Return this encoded value as a Python object.
|
925 |
+
"""
|
926 |
+
return self.value.as_py() if self.is_valid else None
|
927 |
+
|
928 |
+
|
929 |
+
cdef class RunEndEncodedScalar(Scalar):
|
930 |
+
"""
|
931 |
+
Concrete class for RunEndEncoded scalars.
|
932 |
+
"""
|
933 |
+
@property
|
934 |
+
def value(self):
|
935 |
+
"""
|
936 |
+
Return underlying value as a scalar.
|
937 |
+
"""
|
938 |
+
cdef CRunEndEncodedScalar* sp = <CRunEndEncodedScalar*> self.wrapped.get()
|
939 |
+
return Scalar.wrap(sp.value)
|
940 |
+
|
941 |
+
def as_py(self):
|
942 |
+
"""
|
943 |
+
Return underlying value as a Python object.
|
944 |
+
"""
|
945 |
+
return self.value.as_py()
|
946 |
+
|
947 |
+
|
948 |
+
cdef class UnionScalar(Scalar):
|
949 |
+
"""
|
950 |
+
Concrete class for Union scalars.
|
951 |
+
"""
|
952 |
+
|
953 |
+
@property
|
954 |
+
def value(self):
|
955 |
+
"""
|
956 |
+
Return underlying value as a scalar.
|
957 |
+
"""
|
958 |
+
cdef CSparseUnionScalar* sp
|
959 |
+
cdef CDenseUnionScalar* dp
|
960 |
+
if self.type.id == _Type_SPARSE_UNION:
|
961 |
+
sp = <CSparseUnionScalar*> self.wrapped.get()
|
962 |
+
return Scalar.wrap(sp.value[sp.child_id]) if sp.is_valid else None
|
963 |
+
else:
|
964 |
+
dp = <CDenseUnionScalar*> self.wrapped.get()
|
965 |
+
return Scalar.wrap(dp.value) if dp.is_valid else None
|
966 |
+
|
967 |
+
def as_py(self):
|
968 |
+
"""
|
969 |
+
Return underlying value as a Python object.
|
970 |
+
"""
|
971 |
+
value = self.value
|
972 |
+
return None if value is None else value.as_py()
|
973 |
+
|
974 |
+
@property
|
975 |
+
def type_code(self):
|
976 |
+
"""
|
977 |
+
Return the union type code for this scalar.
|
978 |
+
"""
|
979 |
+
cdef CUnionScalar* sp = <CUnionScalar*> self.wrapped.get()
|
980 |
+
return sp.type_code
|
981 |
+
|
982 |
+
|
983 |
+
cdef class ExtensionScalar(Scalar):
|
984 |
+
"""
|
985 |
+
Concrete class for Extension scalars.
|
986 |
+
"""
|
987 |
+
|
988 |
+
@property
|
989 |
+
def value(self):
|
990 |
+
"""
|
991 |
+
Return storage value as a scalar.
|
992 |
+
"""
|
993 |
+
cdef CExtensionScalar* sp = <CExtensionScalar*> self.wrapped.get()
|
994 |
+
return Scalar.wrap(sp.value) if sp.is_valid else None
|
995 |
+
|
996 |
+
def as_py(self):
|
997 |
+
"""
|
998 |
+
Return this scalar as a Python object.
|
999 |
+
"""
|
1000 |
+
return None if self.value is None else self.value.as_py()
|
1001 |
+
|
1002 |
+
@staticmethod
|
1003 |
+
def from_storage(BaseExtensionType typ, value):
|
1004 |
+
"""
|
1005 |
+
Construct ExtensionScalar from type and storage value.
|
1006 |
+
|
1007 |
+
Parameters
|
1008 |
+
----------
|
1009 |
+
typ : DataType
|
1010 |
+
The extension type for the result scalar.
|
1011 |
+
value : object
|
1012 |
+
The storage value for the result scalar.
|
1013 |
+
|
1014 |
+
Returns
|
1015 |
+
-------
|
1016 |
+
ext_scalar : ExtensionScalar
|
1017 |
+
"""
|
1018 |
+
cdef:
|
1019 |
+
shared_ptr[CExtensionScalar] sp_scalar
|
1020 |
+
shared_ptr[CScalar] sp_storage
|
1021 |
+
CExtensionScalar* ext_scalar
|
1022 |
+
|
1023 |
+
if value is None:
|
1024 |
+
storage = None
|
1025 |
+
elif isinstance(value, Scalar):
|
1026 |
+
if value.type != typ.storage_type:
|
1027 |
+
raise TypeError("Incompatible storage type {0} "
|
1028 |
+
"for extension type {1}"
|
1029 |
+
.format(value.type, typ))
|
1030 |
+
storage = value
|
1031 |
+
else:
|
1032 |
+
storage = scalar(value, typ.storage_type)
|
1033 |
+
|
1034 |
+
cdef c_bool is_valid = storage is not None and storage.is_valid
|
1035 |
+
if is_valid:
|
1036 |
+
sp_storage = pyarrow_unwrap_scalar(storage)
|
1037 |
+
else:
|
1038 |
+
sp_storage = MakeNullScalar((<DataType> typ.storage_type).sp_type)
|
1039 |
+
sp_scalar = make_shared[CExtensionScalar](sp_storage, typ.sp_type,
|
1040 |
+
is_valid)
|
1041 |
+
with nogil:
|
1042 |
+
check_status(sp_scalar.get().Validate())
|
1043 |
+
return pyarrow_wrap_scalar(<shared_ptr[CScalar]> sp_scalar)
|
1044 |
+
|
1045 |
+
|
1046 |
+
cdef class FixedShapeTensorScalar(ExtensionScalar):
|
1047 |
+
"""
|
1048 |
+
Concrete class for fixed shape tensor extension scalar.
|
1049 |
+
"""
|
1050 |
+
|
1051 |
+
def to_numpy(self):
|
1052 |
+
"""
|
1053 |
+
Convert fixed shape tensor scalar to a numpy.ndarray.
|
1054 |
+
|
1055 |
+
The resulting ndarray's shape matches the permuted shape of the
|
1056 |
+
fixed shape tensor scalar.
|
1057 |
+
The conversion is zero-copy.
|
1058 |
+
|
1059 |
+
Returns
|
1060 |
+
-------
|
1061 |
+
numpy.ndarray
|
1062 |
+
"""
|
1063 |
+
return self.to_tensor().to_numpy()
|
1064 |
+
|
1065 |
+
def to_tensor(self):
|
1066 |
+
"""
|
1067 |
+
Convert fixed shape tensor extension scalar to a pyarrow.Tensor, using shape
|
1068 |
+
and strides derived from corresponding FixedShapeTensorType.
|
1069 |
+
|
1070 |
+
The conversion is zero-copy.
|
1071 |
+
|
1072 |
+
Returns
|
1073 |
+
-------
|
1074 |
+
pyarrow.Tensor
|
1075 |
+
Tensor represented stored in FixedShapeTensorScalar.
|
1076 |
+
"""
|
1077 |
+
cdef:
|
1078 |
+
CFixedShapeTensorType* c_type = static_pointer_cast[CFixedShapeTensorType, CDataType](
|
1079 |
+
self.wrapped.get().type).get()
|
1080 |
+
shared_ptr[CExtensionScalar] scalar = static_pointer_cast[CExtensionScalar, CScalar](self.wrapped)
|
1081 |
+
shared_ptr[CTensor] ctensor
|
1082 |
+
|
1083 |
+
with nogil:
|
1084 |
+
ctensor = GetResultValue(c_type.MakeTensor(scalar))
|
1085 |
+
return pyarrow_wrap_tensor(ctensor)
|
1086 |
+
|
1087 |
+
|
1088 |
+
cdef dict _scalar_classes = {
|
1089 |
+
_Type_BOOL: BooleanScalar,
|
1090 |
+
_Type_UINT8: UInt8Scalar,
|
1091 |
+
_Type_UINT16: UInt16Scalar,
|
1092 |
+
_Type_UINT32: UInt32Scalar,
|
1093 |
+
_Type_UINT64: UInt64Scalar,
|
1094 |
+
_Type_INT8: Int8Scalar,
|
1095 |
+
_Type_INT16: Int16Scalar,
|
1096 |
+
_Type_INT32: Int32Scalar,
|
1097 |
+
_Type_INT64: Int64Scalar,
|
1098 |
+
_Type_HALF_FLOAT: HalfFloatScalar,
|
1099 |
+
_Type_FLOAT: FloatScalar,
|
1100 |
+
_Type_DOUBLE: DoubleScalar,
|
1101 |
+
_Type_DECIMAL128: Decimal128Scalar,
|
1102 |
+
_Type_DECIMAL256: Decimal256Scalar,
|
1103 |
+
_Type_DATE32: Date32Scalar,
|
1104 |
+
_Type_DATE64: Date64Scalar,
|
1105 |
+
_Type_TIME32: Time32Scalar,
|
1106 |
+
_Type_TIME64: Time64Scalar,
|
1107 |
+
_Type_TIMESTAMP: TimestampScalar,
|
1108 |
+
_Type_DURATION: DurationScalar,
|
1109 |
+
_Type_BINARY: BinaryScalar,
|
1110 |
+
_Type_LARGE_BINARY: LargeBinaryScalar,
|
1111 |
+
_Type_FIXED_SIZE_BINARY: FixedSizeBinaryScalar,
|
1112 |
+
_Type_BINARY_VIEW: BinaryViewScalar,
|
1113 |
+
_Type_STRING: StringScalar,
|
1114 |
+
_Type_LARGE_STRING: LargeStringScalar,
|
1115 |
+
_Type_STRING_VIEW: StringViewScalar,
|
1116 |
+
_Type_LIST: ListScalar,
|
1117 |
+
_Type_LARGE_LIST: LargeListScalar,
|
1118 |
+
_Type_FIXED_SIZE_LIST: FixedSizeListScalar,
|
1119 |
+
_Type_LIST_VIEW: ListViewScalar,
|
1120 |
+
_Type_LARGE_LIST_VIEW: LargeListViewScalar,
|
1121 |
+
_Type_STRUCT: StructScalar,
|
1122 |
+
_Type_MAP: MapScalar,
|
1123 |
+
_Type_DICTIONARY: DictionaryScalar,
|
1124 |
+
_Type_RUN_END_ENCODED: RunEndEncodedScalar,
|
1125 |
+
_Type_SPARSE_UNION: UnionScalar,
|
1126 |
+
_Type_DENSE_UNION: UnionScalar,
|
1127 |
+
_Type_INTERVAL_MONTH_DAY_NANO: MonthDayNanoIntervalScalar,
|
1128 |
+
_Type_EXTENSION: ExtensionScalar,
|
1129 |
+
}
|
1130 |
+
|
1131 |
+
|
1132 |
+
cdef object get_scalar_class_from_type(
|
1133 |
+
const shared_ptr[CDataType]& sp_data_type):
|
1134 |
+
cdef CDataType* data_type = sp_data_type.get()
|
1135 |
+
if data_type == NULL:
|
1136 |
+
raise ValueError('Scalar data type was NULL')
|
1137 |
+
|
1138 |
+
if data_type.id() == _Type_EXTENSION:
|
1139 |
+
py_ext_data_type = pyarrow_wrap_data_type(sp_data_type)
|
1140 |
+
return py_ext_data_type.__arrow_ext_scalar_class__()
|
1141 |
+
else:
|
1142 |
+
return _scalar_classes[data_type.id()]
|
1143 |
+
|
1144 |
+
|
1145 |
+
def scalar(value, type=None, *, from_pandas=None, MemoryPool memory_pool=None):
|
1146 |
+
"""
|
1147 |
+
Create a pyarrow.Scalar instance from a Python object.
|
1148 |
+
|
1149 |
+
Parameters
|
1150 |
+
----------
|
1151 |
+
value : Any
|
1152 |
+
Python object coercible to arrow's type system.
|
1153 |
+
type : pyarrow.DataType
|
1154 |
+
Explicit type to attempt to coerce to, otherwise will be inferred from
|
1155 |
+
the value.
|
1156 |
+
from_pandas : bool, default None
|
1157 |
+
Use pandas's semantics for inferring nulls from values in
|
1158 |
+
ndarray-like data. Defaults to False if not passed explicitly by user,
|
1159 |
+
or True if a pandas object is passed in.
|
1160 |
+
memory_pool : pyarrow.MemoryPool, optional
|
1161 |
+
If not passed, will allocate memory from the currently-set default
|
1162 |
+
memory pool.
|
1163 |
+
|
1164 |
+
Returns
|
1165 |
+
-------
|
1166 |
+
scalar : pyarrow.Scalar
|
1167 |
+
|
1168 |
+
Examples
|
1169 |
+
--------
|
1170 |
+
>>> import pyarrow as pa
|
1171 |
+
|
1172 |
+
>>> pa.scalar(42)
|
1173 |
+
<pyarrow.Int64Scalar: 42>
|
1174 |
+
|
1175 |
+
>>> pa.scalar("string")
|
1176 |
+
<pyarrow.StringScalar: 'string'>
|
1177 |
+
|
1178 |
+
>>> pa.scalar([1, 2])
|
1179 |
+
<pyarrow.ListScalar: [1, 2]>
|
1180 |
+
|
1181 |
+
>>> pa.scalar([1, 2], type=pa.list_(pa.int16()))
|
1182 |
+
<pyarrow.ListScalar: [1, 2]>
|
1183 |
+
"""
|
1184 |
+
cdef:
|
1185 |
+
DataType ty
|
1186 |
+
PyConversionOptions options
|
1187 |
+
shared_ptr[CScalar] scalar
|
1188 |
+
shared_ptr[CArray] array
|
1189 |
+
shared_ptr[CChunkedArray] chunked
|
1190 |
+
bint is_pandas_object = False
|
1191 |
+
CMemoryPool* pool
|
1192 |
+
|
1193 |
+
type = ensure_type(type, allow_none=True)
|
1194 |
+
pool = maybe_unbox_memory_pool(memory_pool)
|
1195 |
+
|
1196 |
+
if _is_array_like(value):
|
1197 |
+
value = get_values(value, &is_pandas_object)
|
1198 |
+
|
1199 |
+
options.size = 1
|
1200 |
+
|
1201 |
+
if type is not None:
|
1202 |
+
ty = ensure_type(type)
|
1203 |
+
options.type = ty.sp_type
|
1204 |
+
|
1205 |
+
if from_pandas is None:
|
1206 |
+
options.from_pandas = is_pandas_object
|
1207 |
+
else:
|
1208 |
+
options.from_pandas = from_pandas
|
1209 |
+
|
1210 |
+
value = [value]
|
1211 |
+
with nogil:
|
1212 |
+
chunked = GetResultValue(ConvertPySequence(value, None, options, pool))
|
1213 |
+
|
1214 |
+
# get the first chunk
|
1215 |
+
assert chunked.get().num_chunks() == 1
|
1216 |
+
array = chunked.get().chunk(0)
|
1217 |
+
|
1218 |
+
# retrieve the scalar from the first position
|
1219 |
+
scalar = GetResultValue(array.get().GetScalar(0))
|
1220 |
+
return Scalar.wrap(scalar)
|
venv/lib/python3.10/site-packages/pyarrow/tests/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/pyarrow/tests/arrow_39313.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# This file is called from a test in test_pandas.py.
|
19 |
+
|
20 |
+
from threading import Thread
|
21 |
+
|
22 |
+
import pandas as pd
|
23 |
+
from pyarrow.pandas_compat import _pandas_api
|
24 |
+
|
25 |
+
if __name__ == "__main__":
|
26 |
+
wait = True
|
27 |
+
num_threads = 10
|
28 |
+
df = pd.DataFrame()
|
29 |
+
results = []
|
30 |
+
|
31 |
+
def rc():
|
32 |
+
while wait:
|
33 |
+
pass
|
34 |
+
results.append(_pandas_api.is_data_frame(df))
|
35 |
+
|
36 |
+
threads = [Thread(target=rc) for _ in range(num_threads)]
|
37 |
+
|
38 |
+
for t in threads:
|
39 |
+
t.start()
|
40 |
+
|
41 |
+
wait = False
|
42 |
+
|
43 |
+
for t in threads:
|
44 |
+
t.join()
|
45 |
+
|
46 |
+
assert len(results) == num_threads
|
47 |
+
assert all(results), "`is_data_frame` returned False when given a DataFrame"
|
venv/lib/python3.10/site-packages/pyarrow/tests/arrow_7980.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# This file is called from a test in test_schema.py.
|
19 |
+
|
20 |
+
import pyarrow as pa
|
21 |
+
|
22 |
+
|
23 |
+
# the types where to_pandas_dtype returns a non-numpy dtype
|
24 |
+
cases = [
|
25 |
+
(pa.timestamp('ns', tz='UTC'), "datetime64[ns, UTC]"),
|
26 |
+
]
|
27 |
+
|
28 |
+
|
29 |
+
for arrow_type, pandas_type in cases:
|
30 |
+
assert str(arrow_type.to_pandas_dtype()) == pandas_type
|
venv/lib/python3.10/site-packages/pyarrow/tests/bound_function_visit_strings.pyx
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language=c++
|
19 |
+
# cython: language_level = 3
|
20 |
+
|
21 |
+
from pyarrow.lib cimport *
|
22 |
+
from pyarrow.lib import frombytes, tobytes
|
23 |
+
|
24 |
+
# basic test to roundtrip through a BoundFunction
|
25 |
+
|
26 |
+
ctypedef CStatus visit_string_cb(const c_string&)
|
27 |
+
|
28 |
+
cdef extern from * namespace "arrow::py" nogil:
|
29 |
+
"""
|
30 |
+
#include <functional>
|
31 |
+
#include <string>
|
32 |
+
#include <vector>
|
33 |
+
|
34 |
+
#include "arrow/status.h"
|
35 |
+
|
36 |
+
namespace arrow {
|
37 |
+
namespace py {
|
38 |
+
|
39 |
+
Status VisitStrings(const std::vector<std::string>& strs,
|
40 |
+
std::function<Status(const std::string&)> cb) {
|
41 |
+
for (const std::string& str : strs) {
|
42 |
+
RETURN_NOT_OK(cb(str));
|
43 |
+
}
|
44 |
+
return Status::OK();
|
45 |
+
}
|
46 |
+
|
47 |
+
} // namespace py
|
48 |
+
} // namespace arrow
|
49 |
+
"""
|
50 |
+
cdef CStatus CVisitStrings" arrow::py::VisitStrings"(
|
51 |
+
vector[c_string], function[visit_string_cb])
|
52 |
+
|
53 |
+
|
54 |
+
cdef void _visit_strings_impl(py_cb, const c_string& s) except *:
|
55 |
+
py_cb(frombytes(s))
|
56 |
+
|
57 |
+
|
58 |
+
def _visit_strings(strings, cb):
|
59 |
+
cdef:
|
60 |
+
function[visit_string_cb] c_cb
|
61 |
+
vector[c_string] c_strings
|
62 |
+
|
63 |
+
c_cb = BindFunction[visit_string_cb](&_visit_strings_impl, cb)
|
64 |
+
for s in strings:
|
65 |
+
c_strings.push_back(tobytes(s))
|
66 |
+
|
67 |
+
check_status(CVisitStrings(c_strings, c_cb))
|
venv/lib/python3.10/site-packages/pyarrow/tests/conftest.py
ADDED
@@ -0,0 +1,312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import functools
|
19 |
+
import os
|
20 |
+
import pathlib
|
21 |
+
import subprocess
|
22 |
+
import sys
|
23 |
+
import time
|
24 |
+
import urllib.request
|
25 |
+
|
26 |
+
import pytest
|
27 |
+
import hypothesis as h
|
28 |
+
from ..conftest import groups, defaults
|
29 |
+
|
30 |
+
from pyarrow import set_timezone_db_path
|
31 |
+
from pyarrow.util import find_free_port
|
32 |
+
|
33 |
+
|
34 |
+
# setup hypothesis profiles
|
35 |
+
h.settings.register_profile('ci', max_examples=1000)
|
36 |
+
h.settings.register_profile('dev', max_examples=50)
|
37 |
+
h.settings.register_profile('debug', max_examples=10,
|
38 |
+
verbosity=h.Verbosity.verbose)
|
39 |
+
|
40 |
+
# load default hypothesis profile, either set HYPOTHESIS_PROFILE environment
|
41 |
+
# variable or pass --hypothesis-profile option to pytest, to see the generated
|
42 |
+
# examples try:
|
43 |
+
# pytest pyarrow -sv --enable-hypothesis --hypothesis-profile=debug
|
44 |
+
h.settings.load_profile(os.environ.get('HYPOTHESIS_PROFILE', 'dev'))
|
45 |
+
|
46 |
+
# Set this at the beginning before the AWS SDK was loaded to avoid reading in
|
47 |
+
# user configuration values.
|
48 |
+
os.environ['AWS_CONFIG_FILE'] = "/dev/null"
|
49 |
+
|
50 |
+
|
51 |
+
if sys.platform == 'win32':
|
52 |
+
tzdata_set_path = os.environ.get('PYARROW_TZDATA_PATH', None)
|
53 |
+
if tzdata_set_path:
|
54 |
+
set_timezone_db_path(tzdata_set_path)
|
55 |
+
|
56 |
+
|
57 |
+
def pytest_addoption(parser):
|
58 |
+
# Create options to selectively enable test groups
|
59 |
+
def bool_env(name, default=None):
|
60 |
+
value = os.environ.get(name.upper())
|
61 |
+
if not value: # missing or empty
|
62 |
+
return default
|
63 |
+
value = value.lower()
|
64 |
+
if value in {'1', 'true', 'on', 'yes', 'y'}:
|
65 |
+
return True
|
66 |
+
elif value in {'0', 'false', 'off', 'no', 'n'}:
|
67 |
+
return False
|
68 |
+
else:
|
69 |
+
raise ValueError('{}={} is not parsable as boolean'
|
70 |
+
.format(name.upper(), value))
|
71 |
+
|
72 |
+
for group in groups:
|
73 |
+
default = bool_env('PYARROW_TEST_{}'.format(group), defaults[group])
|
74 |
+
parser.addoption('--enable-{}'.format(group),
|
75 |
+
action='store_true', default=default,
|
76 |
+
help=('Enable the {} test group'.format(group)))
|
77 |
+
parser.addoption('--disable-{}'.format(group),
|
78 |
+
action='store_true', default=False,
|
79 |
+
help=('Disable the {} test group'.format(group)))
|
80 |
+
|
81 |
+
|
82 |
+
class PyArrowConfig:
|
83 |
+
def __init__(self):
|
84 |
+
self.is_enabled = {}
|
85 |
+
|
86 |
+
def apply_mark(self, mark):
|
87 |
+
group = mark.name
|
88 |
+
if group in groups:
|
89 |
+
self.requires(group)
|
90 |
+
|
91 |
+
def requires(self, group):
|
92 |
+
if not self.is_enabled[group]:
|
93 |
+
pytest.skip('{} NOT enabled'.format(group))
|
94 |
+
|
95 |
+
|
96 |
+
def pytest_configure(config):
|
97 |
+
# Apply command-line options to initialize PyArrow-specific config object
|
98 |
+
config.pyarrow = PyArrowConfig()
|
99 |
+
|
100 |
+
for mark in groups:
|
101 |
+
config.addinivalue_line(
|
102 |
+
"markers", mark,
|
103 |
+
)
|
104 |
+
|
105 |
+
enable_flag = '--enable-{}'.format(mark)
|
106 |
+
disable_flag = '--disable-{}'.format(mark)
|
107 |
+
|
108 |
+
is_enabled = (config.getoption(enable_flag) and not
|
109 |
+
config.getoption(disable_flag))
|
110 |
+
config.pyarrow.is_enabled[mark] = is_enabled
|
111 |
+
|
112 |
+
|
113 |
+
def pytest_runtest_setup(item):
|
114 |
+
# Apply test markers to skip tests selectively
|
115 |
+
for mark in item.iter_markers():
|
116 |
+
item.config.pyarrow.apply_mark(mark)
|
117 |
+
|
118 |
+
|
119 |
+
@pytest.fixture
|
120 |
+
def tempdir(tmpdir):
|
121 |
+
# convert pytest's LocalPath to pathlib.Path
|
122 |
+
return pathlib.Path(tmpdir.strpath)
|
123 |
+
|
124 |
+
|
125 |
+
@pytest.fixture(scope='session')
|
126 |
+
def base_datadir():
|
127 |
+
return pathlib.Path(__file__).parent / 'data'
|
128 |
+
|
129 |
+
|
130 |
+
@pytest.fixture(autouse=True)
|
131 |
+
def disable_aws_metadata(monkeypatch):
|
132 |
+
"""Stop the AWS SDK from trying to contact the EC2 metadata server.
|
133 |
+
|
134 |
+
Otherwise, this causes a 5 second delay in tests that exercise the
|
135 |
+
S3 filesystem.
|
136 |
+
"""
|
137 |
+
monkeypatch.setenv("AWS_EC2_METADATA_DISABLED", "true")
|
138 |
+
|
139 |
+
|
140 |
+
# TODO(kszucs): move the following fixtures to test_fs.py once the previous
|
141 |
+
# parquet dataset implementation and hdfs implementation are removed.
|
142 |
+
|
143 |
+
@pytest.fixture(scope='session')
|
144 |
+
def hdfs_connection():
|
145 |
+
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'default')
|
146 |
+
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))
|
147 |
+
user = os.environ.get('ARROW_HDFS_TEST_USER', 'hdfs')
|
148 |
+
return host, port, user
|
149 |
+
|
150 |
+
|
151 |
+
@pytest.fixture(scope='session')
|
152 |
+
def s3_connection():
|
153 |
+
host, port = 'localhost', find_free_port()
|
154 |
+
access_key, secret_key = 'arrow', 'apachearrow'
|
155 |
+
return host, port, access_key, secret_key
|
156 |
+
|
157 |
+
|
158 |
+
def retry(attempts=3, delay=1.0, max_delay=None, backoff=1):
|
159 |
+
"""
|
160 |
+
Retry decorator
|
161 |
+
|
162 |
+
Parameters
|
163 |
+
----------
|
164 |
+
attempts : int, default 3
|
165 |
+
The number of attempts.
|
166 |
+
delay : float, default 1
|
167 |
+
Initial delay in seconds.
|
168 |
+
max_delay : float, optional
|
169 |
+
The max delay between attempts.
|
170 |
+
backoff : float, default 1
|
171 |
+
The multiplier to delay after each attempt.
|
172 |
+
"""
|
173 |
+
def decorate(func):
|
174 |
+
@functools.wraps(func)
|
175 |
+
def wrapper(*args, **kwargs):
|
176 |
+
remaining_attempts = attempts
|
177 |
+
curr_delay = delay
|
178 |
+
while remaining_attempts > 0:
|
179 |
+
try:
|
180 |
+
return func(*args, **kwargs)
|
181 |
+
except Exception as err:
|
182 |
+
remaining_attempts -= 1
|
183 |
+
last_exception = err
|
184 |
+
curr_delay *= backoff
|
185 |
+
if max_delay:
|
186 |
+
curr_delay = min(curr_delay, max_delay)
|
187 |
+
time.sleep(curr_delay)
|
188 |
+
raise last_exception
|
189 |
+
return wrapper
|
190 |
+
return decorate
|
191 |
+
|
192 |
+
|
193 |
+
@pytest.fixture(scope='session')
|
194 |
+
def s3_server(s3_connection, tmpdir_factory):
|
195 |
+
@retry(attempts=5, delay=0.1, backoff=2)
|
196 |
+
def minio_server_health_check(address):
|
197 |
+
resp = urllib.request.urlopen(f"http://{address}/minio/health/cluster")
|
198 |
+
assert resp.getcode() == 200
|
199 |
+
|
200 |
+
tmpdir = tmpdir_factory.getbasetemp()
|
201 |
+
host, port, access_key, secret_key = s3_connection
|
202 |
+
|
203 |
+
address = '{}:{}'.format(host, port)
|
204 |
+
env = os.environ.copy()
|
205 |
+
env.update({
|
206 |
+
'MINIO_ACCESS_KEY': access_key,
|
207 |
+
'MINIO_SECRET_KEY': secret_key
|
208 |
+
})
|
209 |
+
|
210 |
+
args = ['minio', '--compat', 'server', '--quiet', '--address',
|
211 |
+
address, tmpdir]
|
212 |
+
proc = None
|
213 |
+
try:
|
214 |
+
proc = subprocess.Popen(args, env=env)
|
215 |
+
except OSError:
|
216 |
+
pytest.skip('`minio` command cannot be located')
|
217 |
+
else:
|
218 |
+
# Wait for the server to startup before yielding
|
219 |
+
minio_server_health_check(address)
|
220 |
+
|
221 |
+
yield {
|
222 |
+
'connection': s3_connection,
|
223 |
+
'process': proc,
|
224 |
+
'tempdir': tmpdir
|
225 |
+
}
|
226 |
+
finally:
|
227 |
+
if proc is not None:
|
228 |
+
proc.kill()
|
229 |
+
proc.wait()
|
230 |
+
|
231 |
+
|
232 |
+
@pytest.fixture(scope='session')
|
233 |
+
def gcs_server():
|
234 |
+
port = find_free_port()
|
235 |
+
env = os.environ.copy()
|
236 |
+
args = [sys.executable, '-m', 'testbench', '--port', str(port)]
|
237 |
+
proc = None
|
238 |
+
try:
|
239 |
+
# check first if testbench module is available
|
240 |
+
import testbench # noqa:F401
|
241 |
+
# start server
|
242 |
+
proc = subprocess.Popen(args, env=env)
|
243 |
+
# Make sure the server is alive.
|
244 |
+
if proc.poll() is not None:
|
245 |
+
pytest.skip(f"Command {args} did not start server successfully!")
|
246 |
+
except (ModuleNotFoundError, OSError) as e:
|
247 |
+
pytest.skip(f"Command {args} failed to execute: {e}")
|
248 |
+
else:
|
249 |
+
yield {
|
250 |
+
'connection': ('localhost', port),
|
251 |
+
'process': proc,
|
252 |
+
}
|
253 |
+
finally:
|
254 |
+
if proc is not None:
|
255 |
+
proc.kill()
|
256 |
+
proc.wait()
|
257 |
+
|
258 |
+
|
259 |
+
@pytest.fixture(scope='session')
|
260 |
+
def azure_server(tmpdir_factory):
|
261 |
+
port = find_free_port()
|
262 |
+
env = os.environ.copy()
|
263 |
+
tmpdir = tmpdir_factory.getbasetemp()
|
264 |
+
# We only need blob service emulator, not queue or table.
|
265 |
+
args = ['azurite-blob', "--location", tmpdir, "--blobPort", str(port)]
|
266 |
+
proc = None
|
267 |
+
try:
|
268 |
+
proc = subprocess.Popen(args, env=env)
|
269 |
+
# Make sure the server is alive.
|
270 |
+
if proc.poll() is not None:
|
271 |
+
pytest.skip(f"Command {args} did not start server successfully!")
|
272 |
+
except (ModuleNotFoundError, OSError) as e:
|
273 |
+
pytest.skip(f"Command {args} failed to execute: {e}")
|
274 |
+
else:
|
275 |
+
yield {
|
276 |
+
# Use the standard azurite account_name and account_key.
|
277 |
+
# https://learn.microsoft.com/en-us/azure/storage/common/storage-use-emulator#authorize-with-shared-key-credentials
|
278 |
+
'connection': ('127.0.0.1', port, 'devstoreaccount1',
|
279 |
+
'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2'
|
280 |
+
'UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='),
|
281 |
+
'process': proc,
|
282 |
+
'tempdir': tmpdir,
|
283 |
+
}
|
284 |
+
finally:
|
285 |
+
if proc is not None:
|
286 |
+
proc.kill()
|
287 |
+
proc.wait()
|
288 |
+
|
289 |
+
|
290 |
+
@pytest.fixture(
|
291 |
+
params=[
|
292 |
+
'builtin_pickle',
|
293 |
+
'cloudpickle'
|
294 |
+
],
|
295 |
+
scope='session'
|
296 |
+
)
|
297 |
+
def pickle_module(request):
|
298 |
+
return request.getfixturevalue(request.param)
|
299 |
+
|
300 |
+
|
301 |
+
@pytest.fixture(scope='session')
|
302 |
+
def builtin_pickle():
|
303 |
+
import pickle
|
304 |
+
return pickle
|
305 |
+
|
306 |
+
|
307 |
+
@pytest.fixture(scope='session')
|
308 |
+
def cloudpickle():
|
309 |
+
cp = pytest.importorskip('cloudpickle')
|
310 |
+
if 'HIGHEST_PROTOCOL' not in cp.__dict__:
|
311 |
+
cp.HIGHEST_PROTOCOL = cp.DEFAULT_PROTOCOL
|
312 |
+
return cp
|
venv/lib/python3.10/site-packages/pyarrow/tests/pandas_threaded_import.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# This file is called from a test in test_pandas.py.
|
19 |
+
|
20 |
+
from concurrent.futures import ThreadPoolExecutor
|
21 |
+
import faulthandler
|
22 |
+
import sys
|
23 |
+
|
24 |
+
import pyarrow as pa
|
25 |
+
|
26 |
+
num_threads = 60
|
27 |
+
timeout = 10 # seconds
|
28 |
+
|
29 |
+
|
30 |
+
def thread_func(i):
|
31 |
+
pa.array([i]).to_pandas()
|
32 |
+
|
33 |
+
|
34 |
+
def main():
|
35 |
+
# In case of import deadlock, crash after a finite timeout
|
36 |
+
faulthandler.dump_traceback_later(timeout, exit=True)
|
37 |
+
with ThreadPoolExecutor(num_threads) as pool:
|
38 |
+
assert "pandas" not in sys.modules # pandas is imported lazily
|
39 |
+
list(pool.map(thread_func, range(num_threads)))
|
40 |
+
assert "pandas" in sys.modules
|
41 |
+
|
42 |
+
|
43 |
+
if __name__ == "__main__":
|
44 |
+
main()
|
venv/lib/python3.10/site-packages/pyarrow/tests/pyarrow_cython_example.pyx
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# distutils: language=c++
|
19 |
+
# cython: language_level = 3
|
20 |
+
|
21 |
+
from pyarrow.lib cimport *
|
22 |
+
|
23 |
+
|
24 |
+
def get_array_length(obj):
|
25 |
+
# An example function accessing both the pyarrow Cython API
|
26 |
+
# and the Arrow C++ API
|
27 |
+
cdef shared_ptr[CArray] arr = pyarrow_unwrap_array(obj)
|
28 |
+
if arr.get() == NULL:
|
29 |
+
raise TypeError("not an array")
|
30 |
+
return arr.get().length()
|
31 |
+
|
32 |
+
|
33 |
+
def make_null_array(length):
|
34 |
+
# An example function that returns a PyArrow object without PyArrow
|
35 |
+
# being imported explicitly at the Python level.
|
36 |
+
cdef shared_ptr[CArray] null_array
|
37 |
+
null_array.reset(new CNullArray(length))
|
38 |
+
return pyarrow_wrap_array(null_array)
|
39 |
+
|
40 |
+
|
41 |
+
def cast_scalar(scalar, to_type):
|
42 |
+
cdef:
|
43 |
+
shared_ptr[CScalar] c_scalar
|
44 |
+
shared_ptr[CDataType] c_type
|
45 |
+
CCastOptions cast_options
|
46 |
+
CDatum c_datum
|
47 |
+
CResult[CDatum] c_cast_result
|
48 |
+
|
49 |
+
c_scalar = pyarrow_unwrap_scalar(scalar)
|
50 |
+
if c_scalar.get() == NULL:
|
51 |
+
raise TypeError("not a scalar")
|
52 |
+
c_type = pyarrow_unwrap_data_type(to_type)
|
53 |
+
if c_type.get() == NULL:
|
54 |
+
raise TypeError("not a type")
|
55 |
+
|
56 |
+
c_datum = CDatum(c_scalar)
|
57 |
+
cast_options = CCastOptions()
|
58 |
+
cast_options.to_type = c_type
|
59 |
+
c_cast_result = Cast(c_datum, cast_options)
|
60 |
+
c_datum = GetResultValue(c_cast_result)
|
61 |
+
return pyarrow_wrap_scalar(c_datum.scalar())
|
venv/lib/python3.10/site-packages/pyarrow/tests/read_record_batch.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
# This file is called from a test in test_ipc.py.
|
19 |
+
|
20 |
+
import sys
|
21 |
+
|
22 |
+
import pyarrow as pa
|
23 |
+
|
24 |
+
with open(sys.argv[1], 'rb') as f:
|
25 |
+
pa.ipc.open_file(f).read_all().to_pandas()
|
venv/lib/python3.10/site-packages/pyarrow/tests/strategies.py
ADDED
@@ -0,0 +1,457 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import datetime
|
19 |
+
import sys
|
20 |
+
|
21 |
+
import pytest
|
22 |
+
import hypothesis as h
|
23 |
+
import hypothesis.strategies as st
|
24 |
+
import hypothesis.extra.numpy as npst
|
25 |
+
try:
|
26 |
+
import hypothesis.extra.pytz as tzst
|
27 |
+
except ImportError:
|
28 |
+
tzst = None
|
29 |
+
try:
|
30 |
+
import zoneinfo
|
31 |
+
except ImportError:
|
32 |
+
zoneinfo = None
|
33 |
+
if sys.platform == 'win32':
|
34 |
+
try:
|
35 |
+
import tzdata # noqa:F401
|
36 |
+
except ImportError:
|
37 |
+
zoneinfo = None
|
38 |
+
import numpy as np
|
39 |
+
|
40 |
+
import pyarrow as pa
|
41 |
+
|
42 |
+
|
43 |
+
# TODO(kszucs): alphanum_text, surrogate_text
|
44 |
+
custom_text = st.text(
|
45 |
+
alphabet=st.characters(
|
46 |
+
min_codepoint=0x41,
|
47 |
+
max_codepoint=0x7E
|
48 |
+
)
|
49 |
+
)
|
50 |
+
|
51 |
+
null_type = st.just(pa.null())
|
52 |
+
bool_type = st.just(pa.bool_())
|
53 |
+
|
54 |
+
binary_type = st.just(pa.binary())
|
55 |
+
string_type = st.just(pa.string())
|
56 |
+
large_binary_type = st.just(pa.large_binary())
|
57 |
+
large_string_type = st.just(pa.large_string())
|
58 |
+
fixed_size_binary_type = st.builds(
|
59 |
+
pa.binary,
|
60 |
+
st.integers(min_value=0, max_value=16)
|
61 |
+
)
|
62 |
+
binary_like_types = st.one_of(
|
63 |
+
binary_type,
|
64 |
+
string_type,
|
65 |
+
large_binary_type,
|
66 |
+
large_string_type,
|
67 |
+
fixed_size_binary_type
|
68 |
+
)
|
69 |
+
|
70 |
+
signed_integer_types = st.sampled_from([
|
71 |
+
pa.int8(),
|
72 |
+
pa.int16(),
|
73 |
+
pa.int32(),
|
74 |
+
pa.int64()
|
75 |
+
])
|
76 |
+
unsigned_integer_types = st.sampled_from([
|
77 |
+
pa.uint8(),
|
78 |
+
pa.uint16(),
|
79 |
+
pa.uint32(),
|
80 |
+
pa.uint64()
|
81 |
+
])
|
82 |
+
integer_types = st.one_of(signed_integer_types, unsigned_integer_types)
|
83 |
+
|
84 |
+
floating_types = st.sampled_from([
|
85 |
+
pa.float16(),
|
86 |
+
pa.float32(),
|
87 |
+
pa.float64()
|
88 |
+
])
|
89 |
+
decimal128_type = st.builds(
|
90 |
+
pa.decimal128,
|
91 |
+
precision=st.integers(min_value=1, max_value=38),
|
92 |
+
scale=st.integers(min_value=1, max_value=38)
|
93 |
+
)
|
94 |
+
decimal256_type = st.builds(
|
95 |
+
pa.decimal256,
|
96 |
+
precision=st.integers(min_value=1, max_value=76),
|
97 |
+
scale=st.integers(min_value=1, max_value=76)
|
98 |
+
)
|
99 |
+
numeric_types = st.one_of(integer_types, floating_types,
|
100 |
+
decimal128_type, decimal256_type)
|
101 |
+
|
102 |
+
date_types = st.sampled_from([
|
103 |
+
pa.date32(),
|
104 |
+
pa.date64()
|
105 |
+
])
|
106 |
+
time_types = st.sampled_from([
|
107 |
+
pa.time32('s'),
|
108 |
+
pa.time32('ms'),
|
109 |
+
pa.time64('us'),
|
110 |
+
pa.time64('ns')
|
111 |
+
])
|
112 |
+
|
113 |
+
if tzst and zoneinfo:
|
114 |
+
timezones = st.one_of(st.none(), tzst.timezones(), st.timezones())
|
115 |
+
elif tzst:
|
116 |
+
timezones = st.one_of(st.none(), tzst.timezones())
|
117 |
+
elif zoneinfo:
|
118 |
+
timezones = st.one_of(st.none(), st.timezones())
|
119 |
+
else:
|
120 |
+
timezones = st.none()
|
121 |
+
timestamp_types = st.builds(
|
122 |
+
pa.timestamp,
|
123 |
+
unit=st.sampled_from(['s', 'ms', 'us', 'ns']),
|
124 |
+
tz=timezones
|
125 |
+
)
|
126 |
+
duration_types = st.builds(
|
127 |
+
pa.duration,
|
128 |
+
st.sampled_from(['s', 'ms', 'us', 'ns'])
|
129 |
+
)
|
130 |
+
interval_types = st.just(pa.month_day_nano_interval())
|
131 |
+
temporal_types = st.one_of(
|
132 |
+
date_types,
|
133 |
+
time_types,
|
134 |
+
timestamp_types,
|
135 |
+
duration_types,
|
136 |
+
interval_types
|
137 |
+
)
|
138 |
+
|
139 |
+
primitive_types = st.one_of(
|
140 |
+
null_type,
|
141 |
+
bool_type,
|
142 |
+
numeric_types,
|
143 |
+
temporal_types,
|
144 |
+
binary_like_types
|
145 |
+
)
|
146 |
+
|
147 |
+
metadata = st.dictionaries(st.text(), st.text())
|
148 |
+
|
149 |
+
|
150 |
+
@st.composite
|
151 |
+
def fields(draw, type_strategy=primitive_types):
|
152 |
+
name = draw(custom_text)
|
153 |
+
typ = draw(type_strategy)
|
154 |
+
if pa.types.is_null(typ):
|
155 |
+
nullable = True
|
156 |
+
else:
|
157 |
+
nullable = draw(st.booleans())
|
158 |
+
meta = draw(metadata)
|
159 |
+
return pa.field(name, type=typ, nullable=nullable, metadata=meta)
|
160 |
+
|
161 |
+
|
162 |
+
def list_types(item_strategy=primitive_types):
|
163 |
+
return (
|
164 |
+
st.builds(pa.list_, item_strategy) |
|
165 |
+
st.builds(pa.large_list, item_strategy) |
|
166 |
+
st.builds(
|
167 |
+
pa.list_,
|
168 |
+
item_strategy,
|
169 |
+
st.integers(min_value=0, max_value=16)
|
170 |
+
) |
|
171 |
+
st.builds(pa.list_view, item_strategy) |
|
172 |
+
st.builds(pa.large_list_view, item_strategy)
|
173 |
+
)
|
174 |
+
|
175 |
+
|
176 |
+
@st.composite
|
177 |
+
def struct_types(draw, item_strategy=primitive_types):
|
178 |
+
fields_strategy = st.lists(fields(item_strategy))
|
179 |
+
fields_rendered = draw(fields_strategy)
|
180 |
+
field_names = [field.name for field in fields_rendered]
|
181 |
+
# check that field names are unique, see ARROW-9997
|
182 |
+
h.assume(len(set(field_names)) == len(field_names))
|
183 |
+
return pa.struct(fields_rendered)
|
184 |
+
|
185 |
+
|
186 |
+
def dictionary_types(key_strategy=None, value_strategy=None):
|
187 |
+
if key_strategy is None:
|
188 |
+
key_strategy = signed_integer_types
|
189 |
+
if value_strategy is None:
|
190 |
+
value_strategy = st.one_of(
|
191 |
+
bool_type,
|
192 |
+
integer_types,
|
193 |
+
st.sampled_from([pa.float32(), pa.float64()]),
|
194 |
+
binary_type,
|
195 |
+
string_type,
|
196 |
+
fixed_size_binary_type,
|
197 |
+
)
|
198 |
+
return st.builds(pa.dictionary, key_strategy, value_strategy)
|
199 |
+
|
200 |
+
|
201 |
+
@st.composite
|
202 |
+
def map_types(draw, key_strategy=primitive_types,
|
203 |
+
item_strategy=primitive_types):
|
204 |
+
key_type = draw(key_strategy)
|
205 |
+
h.assume(not pa.types.is_null(key_type))
|
206 |
+
value_type = draw(item_strategy)
|
207 |
+
return pa.map_(key_type, value_type)
|
208 |
+
|
209 |
+
|
210 |
+
# union type
|
211 |
+
# extension type
|
212 |
+
|
213 |
+
|
214 |
+
def schemas(type_strategy=primitive_types, max_fields=None):
|
215 |
+
children = st.lists(fields(type_strategy), max_size=max_fields)
|
216 |
+
return st.builds(pa.schema, children)
|
217 |
+
|
218 |
+
|
219 |
+
all_types = st.deferred(
|
220 |
+
lambda: (
|
221 |
+
primitive_types |
|
222 |
+
list_types() |
|
223 |
+
struct_types() |
|
224 |
+
dictionary_types() |
|
225 |
+
map_types() |
|
226 |
+
list_types(all_types) |
|
227 |
+
struct_types(all_types)
|
228 |
+
)
|
229 |
+
)
|
230 |
+
all_fields = fields(all_types)
|
231 |
+
all_schemas = schemas(all_types)
|
232 |
+
|
233 |
+
|
234 |
+
_default_array_sizes = st.integers(min_value=0, max_value=20)
|
235 |
+
|
236 |
+
|
237 |
+
@st.composite
|
238 |
+
def _pylist(draw, value_type, size, nullable=True):
|
239 |
+
arr = draw(arrays(value_type, size=size, nullable=False))
|
240 |
+
return arr.to_pylist()
|
241 |
+
|
242 |
+
|
243 |
+
@st.composite
|
244 |
+
def _pymap(draw, key_type, value_type, size, nullable=True):
|
245 |
+
length = draw(size)
|
246 |
+
keys = draw(_pylist(key_type, size=length, nullable=False))
|
247 |
+
values = draw(_pylist(value_type, size=length, nullable=nullable))
|
248 |
+
return list(zip(keys, values))
|
249 |
+
|
250 |
+
|
251 |
+
@st.composite
|
252 |
+
def arrays(draw, type, size=None, nullable=True):
|
253 |
+
if isinstance(type, st.SearchStrategy):
|
254 |
+
ty = draw(type)
|
255 |
+
elif isinstance(type, pa.DataType):
|
256 |
+
ty = type
|
257 |
+
else:
|
258 |
+
raise TypeError('Type must be a pyarrow DataType')
|
259 |
+
|
260 |
+
if isinstance(size, st.SearchStrategy):
|
261 |
+
size = draw(size)
|
262 |
+
elif size is None:
|
263 |
+
size = draw(_default_array_sizes)
|
264 |
+
elif not isinstance(size, int):
|
265 |
+
raise TypeError('Size must be an integer')
|
266 |
+
|
267 |
+
if pa.types.is_null(ty):
|
268 |
+
h.assume(nullable)
|
269 |
+
value = st.none()
|
270 |
+
elif pa.types.is_boolean(ty):
|
271 |
+
value = st.booleans()
|
272 |
+
elif pa.types.is_integer(ty):
|
273 |
+
values = draw(npst.arrays(ty.to_pandas_dtype(), shape=(size,)))
|
274 |
+
return pa.array(values, type=ty)
|
275 |
+
elif pa.types.is_floating(ty):
|
276 |
+
values = draw(npst.arrays(ty.to_pandas_dtype(), shape=(size,)))
|
277 |
+
# Workaround ARROW-4952: no easy way to assert array equality
|
278 |
+
# in a NaN-tolerant way.
|
279 |
+
values[np.isnan(values)] = -42.0
|
280 |
+
return pa.array(values, type=ty)
|
281 |
+
elif pa.types.is_decimal(ty):
|
282 |
+
# TODO(kszucs): properly limit the precision
|
283 |
+
# value = st.decimals(places=type.scale, allow_infinity=False)
|
284 |
+
h.reject()
|
285 |
+
elif pa.types.is_time(ty):
|
286 |
+
value = st.times()
|
287 |
+
elif pa.types.is_date(ty):
|
288 |
+
value = st.dates()
|
289 |
+
elif pa.types.is_timestamp(ty):
|
290 |
+
if zoneinfo is None:
|
291 |
+
pytest.skip('no module named zoneinfo (or tzdata on Windows)')
|
292 |
+
if ty.tz is None:
|
293 |
+
pytest.skip('requires timezone not None')
|
294 |
+
min_int64 = -(2**63)
|
295 |
+
max_int64 = 2**63 - 1
|
296 |
+
min_datetime = datetime.datetime.fromtimestamp(
|
297 |
+
min_int64 // 10**9) + datetime.timedelta(hours=12)
|
298 |
+
max_datetime = datetime.datetime.fromtimestamp(
|
299 |
+
max_int64 // 10**9) - datetime.timedelta(hours=12)
|
300 |
+
try:
|
301 |
+
offset = ty.tz.split(":")
|
302 |
+
offset_hours = int(offset[0])
|
303 |
+
offset_min = int(offset[1])
|
304 |
+
tz = datetime.timedelta(hours=offset_hours, minutes=offset_min)
|
305 |
+
except ValueError:
|
306 |
+
tz = zoneinfo.ZoneInfo(ty.tz)
|
307 |
+
value = st.datetimes(timezones=st.just(tz), min_value=min_datetime,
|
308 |
+
max_value=max_datetime)
|
309 |
+
elif pa.types.is_duration(ty):
|
310 |
+
value = st.timedeltas()
|
311 |
+
elif pa.types.is_interval(ty):
|
312 |
+
value = st.timedeltas()
|
313 |
+
elif pa.types.is_binary(ty) or pa.types.is_large_binary(ty):
|
314 |
+
value = st.binary()
|
315 |
+
elif pa.types.is_string(ty) or pa.types.is_large_string(ty):
|
316 |
+
value = st.text()
|
317 |
+
elif pa.types.is_fixed_size_binary(ty):
|
318 |
+
value = st.binary(min_size=ty.byte_width, max_size=ty.byte_width)
|
319 |
+
elif pa.types.is_list(ty):
|
320 |
+
value = _pylist(ty.value_type, size=size, nullable=nullable)
|
321 |
+
elif pa.types.is_large_list(ty):
|
322 |
+
value = _pylist(ty.value_type, size=size, nullable=nullable)
|
323 |
+
elif pa.types.is_fixed_size_list(ty):
|
324 |
+
value = _pylist(ty.value_type, size=ty.list_size, nullable=nullable)
|
325 |
+
elif pa.types.is_list_view(ty):
|
326 |
+
value = _pylist(ty.value_type, size=size, nullable=nullable)
|
327 |
+
elif pa.types.is_large_list_view(ty):
|
328 |
+
value = _pylist(ty.value_type, size=size, nullable=nullable)
|
329 |
+
elif pa.types.is_dictionary(ty):
|
330 |
+
values = _pylist(ty.value_type, size=size, nullable=nullable)
|
331 |
+
return pa.array(draw(values), type=ty)
|
332 |
+
elif pa.types.is_map(ty):
|
333 |
+
value = _pymap(ty.key_type, ty.item_type, size=_default_array_sizes,
|
334 |
+
nullable=nullable)
|
335 |
+
elif pa.types.is_struct(ty):
|
336 |
+
h.assume(len(ty) > 0)
|
337 |
+
fields, child_arrays = [], []
|
338 |
+
for field in ty:
|
339 |
+
fields.append(field)
|
340 |
+
child_arrays.append(draw(arrays(field.type, size=size)))
|
341 |
+
return pa.StructArray.from_arrays(child_arrays, fields=fields)
|
342 |
+
else:
|
343 |
+
raise NotImplementedError(ty)
|
344 |
+
|
345 |
+
if nullable:
|
346 |
+
value = st.one_of(st.none(), value)
|
347 |
+
values = st.lists(value, min_size=size, max_size=size)
|
348 |
+
|
349 |
+
return pa.array(draw(values), type=ty)
|
350 |
+
|
351 |
+
|
352 |
+
@st.composite
|
353 |
+
def chunked_arrays(draw, type, min_chunks=0, max_chunks=None, chunk_size=None):
|
354 |
+
if isinstance(type, st.SearchStrategy):
|
355 |
+
type = draw(type)
|
356 |
+
|
357 |
+
# TODO(kszucs): remove it, field metadata is not kept
|
358 |
+
h.assume(not pa.types.is_struct(type))
|
359 |
+
|
360 |
+
chunk = arrays(type, size=chunk_size)
|
361 |
+
chunks = st.lists(chunk, min_size=min_chunks, max_size=max_chunks)
|
362 |
+
|
363 |
+
return pa.chunked_array(draw(chunks), type=type)
|
364 |
+
|
365 |
+
|
366 |
+
@st.composite
|
367 |
+
def record_batches(draw, type, rows=None, max_fields=None):
|
368 |
+
if isinstance(rows, st.SearchStrategy):
|
369 |
+
rows = draw(rows)
|
370 |
+
elif rows is None:
|
371 |
+
rows = draw(_default_array_sizes)
|
372 |
+
elif not isinstance(rows, int):
|
373 |
+
raise TypeError('Rows must be an integer')
|
374 |
+
|
375 |
+
schema = draw(schemas(type, max_fields=max_fields))
|
376 |
+
children = [draw(arrays(field.type, size=rows)) for field in schema]
|
377 |
+
# TODO(kszucs): the names and schema arguments are not consistent with
|
378 |
+
# Table.from_array's arguments
|
379 |
+
return pa.RecordBatch.from_arrays(children, schema=schema)
|
380 |
+
|
381 |
+
|
382 |
+
@st.composite
|
383 |
+
def tables(draw, type, rows=None, max_fields=None):
|
384 |
+
if isinstance(rows, st.SearchStrategy):
|
385 |
+
rows = draw(rows)
|
386 |
+
elif rows is None:
|
387 |
+
rows = draw(_default_array_sizes)
|
388 |
+
elif not isinstance(rows, int):
|
389 |
+
raise TypeError('Rows must be an integer')
|
390 |
+
|
391 |
+
schema = draw(schemas(type, max_fields=max_fields))
|
392 |
+
children = [draw(arrays(field.type, size=rows)) for field in schema]
|
393 |
+
return pa.Table.from_arrays(children, schema=schema)
|
394 |
+
|
395 |
+
|
396 |
+
all_arrays = arrays(all_types)
|
397 |
+
all_chunked_arrays = chunked_arrays(all_types)
|
398 |
+
all_record_batches = record_batches(all_types)
|
399 |
+
all_tables = tables(all_types)
|
400 |
+
|
401 |
+
|
402 |
+
# Define the same rules as above for pandas tests by excluding certain types
|
403 |
+
# from the generation because of known issues.
|
404 |
+
|
405 |
+
pandas_compatible_primitive_types = st.one_of(
|
406 |
+
null_type,
|
407 |
+
bool_type,
|
408 |
+
integer_types,
|
409 |
+
st.sampled_from([pa.float32(), pa.float64()]),
|
410 |
+
decimal128_type,
|
411 |
+
date_types,
|
412 |
+
time_types,
|
413 |
+
# Need to exclude timestamp and duration types otherwise hypothesis
|
414 |
+
# discovers ARROW-10210
|
415 |
+
# timestamp_types,
|
416 |
+
# duration_types
|
417 |
+
interval_types,
|
418 |
+
binary_type,
|
419 |
+
string_type,
|
420 |
+
large_binary_type,
|
421 |
+
large_string_type,
|
422 |
+
)
|
423 |
+
|
424 |
+
# Need to exclude floating point types otherwise hypothesis discovers
|
425 |
+
# ARROW-10211
|
426 |
+
pandas_compatible_dictionary_value_types = st.one_of(
|
427 |
+
bool_type,
|
428 |
+
integer_types,
|
429 |
+
binary_type,
|
430 |
+
string_type,
|
431 |
+
fixed_size_binary_type,
|
432 |
+
)
|
433 |
+
|
434 |
+
|
435 |
+
def pandas_compatible_list_types(
|
436 |
+
item_strategy=pandas_compatible_primitive_types
|
437 |
+
):
|
438 |
+
# Need to exclude fixed size list type otherwise hypothesis discovers
|
439 |
+
# ARROW-10194
|
440 |
+
return (
|
441 |
+
st.builds(pa.list_, item_strategy) |
|
442 |
+
st.builds(pa.large_list, item_strategy)
|
443 |
+
)
|
444 |
+
|
445 |
+
|
446 |
+
pandas_compatible_types = st.deferred(
|
447 |
+
lambda: st.one_of(
|
448 |
+
pandas_compatible_primitive_types,
|
449 |
+
pandas_compatible_list_types(pandas_compatible_primitive_types),
|
450 |
+
struct_types(pandas_compatible_primitive_types),
|
451 |
+
dictionary_types(
|
452 |
+
value_strategy=pandas_compatible_dictionary_value_types
|
453 |
+
),
|
454 |
+
pandas_compatible_list_types(pandas_compatible_types),
|
455 |
+
struct_types(pandas_compatible_types)
|
456 |
+
)
|
457 |
+
)
|
venv/lib/python3.10/site-packages/pyarrow/tests/test_acero.py
ADDED
@@ -0,0 +1,413 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import pytest
|
19 |
+
|
20 |
+
import pyarrow as pa
|
21 |
+
import pyarrow.compute as pc
|
22 |
+
from pyarrow.compute import field
|
23 |
+
|
24 |
+
try:
|
25 |
+
from pyarrow.acero import (
|
26 |
+
Declaration,
|
27 |
+
TableSourceNodeOptions,
|
28 |
+
FilterNodeOptions,
|
29 |
+
ProjectNodeOptions,
|
30 |
+
AggregateNodeOptions,
|
31 |
+
OrderByNodeOptions,
|
32 |
+
HashJoinNodeOptions,
|
33 |
+
AsofJoinNodeOptions,
|
34 |
+
)
|
35 |
+
except ImportError:
|
36 |
+
pass
|
37 |
+
|
38 |
+
try:
|
39 |
+
import pyarrow.dataset as ds
|
40 |
+
from pyarrow.acero import ScanNodeOptions
|
41 |
+
except ImportError:
|
42 |
+
ds = None
|
43 |
+
|
44 |
+
pytestmark = pytest.mark.acero
|
45 |
+
|
46 |
+
|
47 |
+
@pytest.fixture
|
48 |
+
def table_source():
|
49 |
+
table = pa.table({'a': [1, 2, 3], 'b': [4, 5, 6]})
|
50 |
+
table_opts = TableSourceNodeOptions(table)
|
51 |
+
table_source = Declaration("table_source", options=table_opts)
|
52 |
+
return table_source
|
53 |
+
|
54 |
+
|
55 |
+
def test_declaration():
|
56 |
+
|
57 |
+
table = pa.table({'a': [1, 2, 3], 'b': [4, 5, 6]})
|
58 |
+
table_opts = TableSourceNodeOptions(table)
|
59 |
+
filter_opts = FilterNodeOptions(field('a') > 1)
|
60 |
+
|
61 |
+
# using sequence
|
62 |
+
decl = Declaration.from_sequence([
|
63 |
+
Declaration("table_source", options=table_opts),
|
64 |
+
Declaration("filter", options=filter_opts)
|
65 |
+
])
|
66 |
+
result = decl.to_table()
|
67 |
+
assert result.equals(table.slice(1, 2))
|
68 |
+
|
69 |
+
# using explicit inputs
|
70 |
+
table_source = Declaration("table_source", options=table_opts)
|
71 |
+
filtered = Declaration("filter", options=filter_opts, inputs=[table_source])
|
72 |
+
result = filtered.to_table()
|
73 |
+
assert result.equals(table.slice(1, 2))
|
74 |
+
|
75 |
+
|
76 |
+
def test_declaration_repr(table_source):
|
77 |
+
|
78 |
+
assert "TableSourceNode" in str(table_source)
|
79 |
+
assert "TableSourceNode" in repr(table_source)
|
80 |
+
|
81 |
+
|
82 |
+
def test_declaration_to_reader(table_source):
|
83 |
+
with table_source.to_reader() as reader:
|
84 |
+
assert reader.schema == pa.schema([("a", pa.int64()), ("b", pa.int64())])
|
85 |
+
result = reader.read_all()
|
86 |
+
expected = pa.table({'a': [1, 2, 3], 'b': [4, 5, 6]})
|
87 |
+
assert result.equals(expected)
|
88 |
+
|
89 |
+
|
90 |
+
def test_table_source():
|
91 |
+
with pytest.raises(TypeError):
|
92 |
+
TableSourceNodeOptions(pa.record_batch([pa.array([1, 2, 3])], ["a"]))
|
93 |
+
|
94 |
+
table_source = TableSourceNodeOptions(None)
|
95 |
+
decl = Declaration("table_source", table_source)
|
96 |
+
with pytest.raises(
|
97 |
+
ValueError, match="TableSourceNode requires table which is not null"
|
98 |
+
):
|
99 |
+
_ = decl.to_table()
|
100 |
+
|
101 |
+
|
102 |
+
def test_filter(table_source):
|
103 |
+
# referencing unknown field
|
104 |
+
decl = Declaration.from_sequence([
|
105 |
+
table_source,
|
106 |
+
Declaration("filter", options=FilterNodeOptions(field("c") > 1))
|
107 |
+
])
|
108 |
+
with pytest.raises(ValueError, match=r"No match for FieldRef.Name\(c\)"):
|
109 |
+
_ = decl.to_table()
|
110 |
+
|
111 |
+
# requires a pyarrow Expression
|
112 |
+
with pytest.raises(TypeError):
|
113 |
+
FilterNodeOptions(pa.array([True, False, True]))
|
114 |
+
with pytest.raises(TypeError):
|
115 |
+
FilterNodeOptions(None)
|
116 |
+
|
117 |
+
|
118 |
+
def test_project(table_source):
|
119 |
+
# default name from expression
|
120 |
+
decl = Declaration.from_sequence([
|
121 |
+
table_source,
|
122 |
+
Declaration("project", ProjectNodeOptions([pc.multiply(field("a"), 2)]))
|
123 |
+
])
|
124 |
+
result = decl.to_table()
|
125 |
+
assert result.schema.names == ["multiply(a, 2)"]
|
126 |
+
assert result[0].to_pylist() == [2, 4, 6]
|
127 |
+
|
128 |
+
# provide name
|
129 |
+
decl = Declaration.from_sequence([
|
130 |
+
table_source,
|
131 |
+
Declaration("project", ProjectNodeOptions([pc.multiply(field("a"), 2)], ["a2"]))
|
132 |
+
])
|
133 |
+
result = decl.to_table()
|
134 |
+
assert result.schema.names == ["a2"]
|
135 |
+
assert result["a2"].to_pylist() == [2, 4, 6]
|
136 |
+
|
137 |
+
# input validation
|
138 |
+
with pytest.raises(ValueError):
|
139 |
+
ProjectNodeOptions([pc.multiply(field("a"), 2)], ["a2", "b2"])
|
140 |
+
|
141 |
+
# no scalar expression
|
142 |
+
decl = Declaration.from_sequence([
|
143 |
+
table_source,
|
144 |
+
Declaration("project", ProjectNodeOptions([pc.sum(field("a"))]))
|
145 |
+
])
|
146 |
+
with pytest.raises(ValueError, match="cannot Execute non-scalar expression"):
|
147 |
+
_ = decl.to_table()
|
148 |
+
|
149 |
+
|
150 |
+
def test_aggregate_scalar(table_source):
|
151 |
+
decl = Declaration.from_sequence([
|
152 |
+
table_source,
|
153 |
+
Declaration("aggregate", AggregateNodeOptions([("a", "sum", None, "a_sum")]))
|
154 |
+
])
|
155 |
+
result = decl.to_table()
|
156 |
+
assert result.schema.names == ["a_sum"]
|
157 |
+
assert result["a_sum"].to_pylist() == [6]
|
158 |
+
|
159 |
+
# with options class
|
160 |
+
table = pa.table({'a': [1, 2, None]})
|
161 |
+
aggr_opts = AggregateNodeOptions(
|
162 |
+
[("a", "sum", pc.ScalarAggregateOptions(skip_nulls=False), "a_sum")]
|
163 |
+
)
|
164 |
+
decl = Declaration.from_sequence([
|
165 |
+
Declaration("table_source", TableSourceNodeOptions(table)),
|
166 |
+
Declaration("aggregate", aggr_opts),
|
167 |
+
])
|
168 |
+
result = decl.to_table()
|
169 |
+
assert result.schema.names == ["a_sum"]
|
170 |
+
assert result["a_sum"].to_pylist() == [None]
|
171 |
+
|
172 |
+
# test various ways of specifying the target column
|
173 |
+
for target in ["a", field("a"), 0, field(0), ["a"], [field("a")], [0]]:
|
174 |
+
aggr_opts = AggregateNodeOptions([(target, "sum", None, "a_sum")])
|
175 |
+
decl = Declaration.from_sequence(
|
176 |
+
[table_source, Declaration("aggregate", aggr_opts)]
|
177 |
+
)
|
178 |
+
result = decl.to_table()
|
179 |
+
assert result.schema.names == ["a_sum"]
|
180 |
+
assert result["a_sum"].to_pylist() == [6]
|
181 |
+
|
182 |
+
# proper error when specifying the wrong number of target columns
|
183 |
+
aggr_opts = AggregateNodeOptions([(["a", "b"], "sum", None, "a_sum")])
|
184 |
+
decl = Declaration.from_sequence(
|
185 |
+
[table_source, Declaration("aggregate", aggr_opts)]
|
186 |
+
)
|
187 |
+
with pytest.raises(
|
188 |
+
ValueError, match="Function 'sum' accepts 1 arguments but 2 passed"
|
189 |
+
):
|
190 |
+
_ = decl.to_table()
|
191 |
+
|
192 |
+
# proper error when using hash aggregation without keys
|
193 |
+
aggr_opts = AggregateNodeOptions([("a", "hash_sum", None, "a_sum")])
|
194 |
+
decl = Declaration.from_sequence(
|
195 |
+
[table_source, Declaration("aggregate", aggr_opts)]
|
196 |
+
)
|
197 |
+
with pytest.raises(ValueError, match="is a hash aggregate function"):
|
198 |
+
_ = decl.to_table()
|
199 |
+
|
200 |
+
|
201 |
+
def test_aggregate_hash():
|
202 |
+
table = pa.table({'a': [1, 2, None], 'b': ["foo", "bar", "foo"]})
|
203 |
+
table_opts = TableSourceNodeOptions(table)
|
204 |
+
table_source = Declaration("table_source", options=table_opts)
|
205 |
+
|
206 |
+
# default options
|
207 |
+
aggr_opts = AggregateNodeOptions(
|
208 |
+
[("a", "hash_count", None, "count(a)")], keys=["b"])
|
209 |
+
decl = Declaration.from_sequence([
|
210 |
+
table_source, Declaration("aggregate", aggr_opts)
|
211 |
+
])
|
212 |
+
result = decl.to_table()
|
213 |
+
expected = pa.table({"b": ["foo", "bar"], "count(a)": [1, 1]})
|
214 |
+
assert result.equals(expected)
|
215 |
+
|
216 |
+
# specify function options
|
217 |
+
aggr_opts = AggregateNodeOptions(
|
218 |
+
[("a", "hash_count", pc.CountOptions("all"), "count(a)")], keys=["b"]
|
219 |
+
)
|
220 |
+
decl = Declaration.from_sequence([
|
221 |
+
table_source, Declaration("aggregate", aggr_opts)
|
222 |
+
])
|
223 |
+
result = decl.to_table()
|
224 |
+
expected_all = pa.table({"b": ["foo", "bar"], "count(a)": [2, 1]})
|
225 |
+
assert result.equals(expected_all)
|
226 |
+
|
227 |
+
# specify keys as field references
|
228 |
+
aggr_opts = AggregateNodeOptions(
|
229 |
+
[("a", "hash_count", None, "count(a)")], keys=[field("b")]
|
230 |
+
)
|
231 |
+
decl = Declaration.from_sequence([
|
232 |
+
table_source, Declaration("aggregate", aggr_opts)
|
233 |
+
])
|
234 |
+
result = decl.to_table()
|
235 |
+
assert result.equals(expected)
|
236 |
+
|
237 |
+
# wrong type of (aggregation) function
|
238 |
+
# TODO test with kernel that matches number of arguments (arity) -> avoid segfault
|
239 |
+
aggr_opts = AggregateNodeOptions([("a", "sum", None, "a_sum")], keys=["b"])
|
240 |
+
decl = Declaration.from_sequence([
|
241 |
+
table_source, Declaration("aggregate", aggr_opts)
|
242 |
+
])
|
243 |
+
with pytest.raises(ValueError):
|
244 |
+
_ = decl.to_table()
|
245 |
+
|
246 |
+
|
247 |
+
def test_order_by():
|
248 |
+
table = pa.table({'a': [1, 2, 3, 4], 'b': [1, 3, None, 2]})
|
249 |
+
table_source = Declaration("table_source", TableSourceNodeOptions(table))
|
250 |
+
|
251 |
+
ord_opts = OrderByNodeOptions([("b", "ascending")])
|
252 |
+
decl = Declaration.from_sequence([table_source, Declaration("order_by", ord_opts)])
|
253 |
+
result = decl.to_table()
|
254 |
+
expected = pa.table({"a": [1, 4, 2, 3], "b": [1, 2, 3, None]})
|
255 |
+
assert result.equals(expected)
|
256 |
+
|
257 |
+
ord_opts = OrderByNodeOptions([(field("b"), "descending")])
|
258 |
+
decl = Declaration.from_sequence([table_source, Declaration("order_by", ord_opts)])
|
259 |
+
result = decl.to_table()
|
260 |
+
expected = pa.table({"a": [2, 4, 1, 3], "b": [3, 2, 1, None]})
|
261 |
+
assert result.equals(expected)
|
262 |
+
|
263 |
+
ord_opts = OrderByNodeOptions([(1, "descending")], null_placement="at_start")
|
264 |
+
decl = Declaration.from_sequence([table_source, Declaration("order_by", ord_opts)])
|
265 |
+
result = decl.to_table()
|
266 |
+
expected = pa.table({"a": [3, 2, 4, 1], "b": [None, 3, 2, 1]})
|
267 |
+
assert result.equals(expected)
|
268 |
+
|
269 |
+
# empty ordering
|
270 |
+
ord_opts = OrderByNodeOptions([])
|
271 |
+
decl = Declaration.from_sequence([table_source, Declaration("order_by", ord_opts)])
|
272 |
+
with pytest.raises(
|
273 |
+
ValueError, match="`ordering` must be an explicit non-empty ordering"
|
274 |
+
):
|
275 |
+
_ = decl.to_table()
|
276 |
+
|
277 |
+
with pytest.raises(ValueError, match="\"decreasing\" is not a valid sort order"):
|
278 |
+
_ = OrderByNodeOptions([("b", "decreasing")])
|
279 |
+
|
280 |
+
with pytest.raises(ValueError, match="\"start\" is not a valid null placement"):
|
281 |
+
_ = OrderByNodeOptions([("b", "ascending")], null_placement="start")
|
282 |
+
|
283 |
+
|
284 |
+
def test_hash_join():
|
285 |
+
left = pa.table({'key': [1, 2, 3], 'a': [4, 5, 6]})
|
286 |
+
left_source = Declaration("table_source", options=TableSourceNodeOptions(left))
|
287 |
+
right = pa.table({'key': [2, 3, 4], 'b': [4, 5, 6]})
|
288 |
+
right_source = Declaration("table_source", options=TableSourceNodeOptions(right))
|
289 |
+
|
290 |
+
# inner join
|
291 |
+
join_opts = HashJoinNodeOptions("inner", left_keys="key", right_keys="key")
|
292 |
+
joined = Declaration(
|
293 |
+
"hashjoin", options=join_opts, inputs=[left_source, right_source])
|
294 |
+
result = joined.to_table()
|
295 |
+
expected = pa.table(
|
296 |
+
[[2, 3], [5, 6], [2, 3], [4, 5]],
|
297 |
+
names=["key", "a", "key", "b"])
|
298 |
+
assert result.equals(expected)
|
299 |
+
|
300 |
+
for keys in [field("key"), ["key"], [field("key")]]:
|
301 |
+
join_opts = HashJoinNodeOptions("inner", left_keys=keys, right_keys=keys)
|
302 |
+
joined = Declaration(
|
303 |
+
"hashjoin", options=join_opts, inputs=[left_source, right_source])
|
304 |
+
result = joined.to_table()
|
305 |
+
assert result.equals(expected)
|
306 |
+
|
307 |
+
# left join
|
308 |
+
join_opts = HashJoinNodeOptions(
|
309 |
+
"left outer", left_keys="key", right_keys="key")
|
310 |
+
joined = Declaration(
|
311 |
+
"hashjoin", options=join_opts, inputs=[left_source, right_source])
|
312 |
+
result = joined.to_table()
|
313 |
+
expected = pa.table(
|
314 |
+
[[1, 2, 3], [4, 5, 6], [None, 2, 3], [None, 4, 5]],
|
315 |
+
names=["key", "a", "key", "b"]
|
316 |
+
)
|
317 |
+
assert result.sort_by("a").equals(expected)
|
318 |
+
|
319 |
+
# suffixes
|
320 |
+
join_opts = HashJoinNodeOptions(
|
321 |
+
"left outer", left_keys="key", right_keys="key",
|
322 |
+
output_suffix_for_left="_left", output_suffix_for_right="_right")
|
323 |
+
joined = Declaration(
|
324 |
+
"hashjoin", options=join_opts, inputs=[left_source, right_source])
|
325 |
+
result = joined.to_table()
|
326 |
+
expected = pa.table(
|
327 |
+
[[1, 2, 3], [4, 5, 6], [None, 2, 3], [None, 4, 5]],
|
328 |
+
names=["key_left", "a", "key_right", "b"]
|
329 |
+
)
|
330 |
+
assert result.sort_by("a").equals(expected)
|
331 |
+
|
332 |
+
# manually specifying output columns
|
333 |
+
join_opts = HashJoinNodeOptions(
|
334 |
+
"left outer", left_keys="key", right_keys="key",
|
335 |
+
left_output=["key", "a"], right_output=[field("b")])
|
336 |
+
joined = Declaration(
|
337 |
+
"hashjoin", options=join_opts, inputs=[left_source, right_source])
|
338 |
+
result = joined.to_table()
|
339 |
+
expected = pa.table(
|
340 |
+
[[1, 2, 3], [4, 5, 6], [None, 4, 5]],
|
341 |
+
names=["key", "a", "b"]
|
342 |
+
)
|
343 |
+
assert result.sort_by("a").equals(expected)
|
344 |
+
|
345 |
+
|
346 |
+
def test_asof_join():
|
347 |
+
left = pa.table({'key': [1, 2, 3], 'ts': [1, 1, 1], 'a': [4, 5, 6]})
|
348 |
+
left_source = Declaration("table_source", options=TableSourceNodeOptions(left))
|
349 |
+
right = pa.table({'key': [2, 3, 4], 'ts': [2, 5, 2], 'b': [4, 5, 6]})
|
350 |
+
right_source = Declaration("table_source", options=TableSourceNodeOptions(right))
|
351 |
+
|
352 |
+
# asof join
|
353 |
+
join_opts = AsofJoinNodeOptions(
|
354 |
+
left_on="ts", left_by=["key"],
|
355 |
+
right_on="ts", right_by=["key"],
|
356 |
+
tolerance=1,
|
357 |
+
)
|
358 |
+
joined = Declaration(
|
359 |
+
"asofjoin", options=join_opts, inputs=[left_source, right_source]
|
360 |
+
)
|
361 |
+
result = joined.to_table()
|
362 |
+
expected = pa.table(
|
363 |
+
[[1, 2, 3], [1, 1, 1], [4, 5, 6], [None, 4, None]],
|
364 |
+
names=["key", "ts", "a", "b"])
|
365 |
+
assert result == expected
|
366 |
+
|
367 |
+
for by in [field("key"), ["key"], [field("key")]]:
|
368 |
+
for on in [field("ts"), "ts"]:
|
369 |
+
join_opts = AsofJoinNodeOptions(
|
370 |
+
left_on=on, left_by=by,
|
371 |
+
right_on=on, right_by=by,
|
372 |
+
tolerance=1,
|
373 |
+
)
|
374 |
+
joined = Declaration(
|
375 |
+
"asofjoin", options=join_opts, inputs=[left_source, right_source])
|
376 |
+
result = joined.to_table()
|
377 |
+
assert result == expected
|
378 |
+
|
379 |
+
|
380 |
+
@pytest.mark.dataset
|
381 |
+
def test_scan(tempdir):
|
382 |
+
table = pa.table({'a': [1, 2, 3], 'b': [4, 5, 6]})
|
383 |
+
ds.write_dataset(table, tempdir / "dataset", format="parquet")
|
384 |
+
dataset = ds.dataset(tempdir / "dataset", format="parquet")
|
385 |
+
decl = Declaration("scan", ScanNodeOptions(dataset))
|
386 |
+
result = decl.to_table()
|
387 |
+
assert result.schema.names == [
|
388 |
+
"a", "b", "__fragment_index", "__batch_index",
|
389 |
+
"__last_in_fragment", "__filename"
|
390 |
+
]
|
391 |
+
assert result.select(["a", "b"]).equals(table)
|
392 |
+
|
393 |
+
# using a filter only does pushdown (depending on file format), not actual filter
|
394 |
+
|
395 |
+
scan_opts = ScanNodeOptions(dataset, filter=field('a') > 1)
|
396 |
+
decl = Declaration("scan", scan_opts)
|
397 |
+
# fragment not filtered based on min/max statistics
|
398 |
+
assert decl.to_table().num_rows == 3
|
399 |
+
|
400 |
+
scan_opts = ScanNodeOptions(dataset, filter=field('a') > 4)
|
401 |
+
decl = Declaration("scan", scan_opts)
|
402 |
+
# full fragment filtered based on min/max statistics
|
403 |
+
assert decl.to_table().num_rows == 0
|
404 |
+
|
405 |
+
# projection scan option
|
406 |
+
|
407 |
+
scan_opts = ScanNodeOptions(dataset, columns={"a2": pc.multiply(field("a"), 2)})
|
408 |
+
decl = Declaration("scan", scan_opts)
|
409 |
+
result = decl.to_table()
|
410 |
+
# "a" is included in the result (needed later on for the actual projection)
|
411 |
+
assert result["a"].to_pylist() == [1, 2, 3]
|
412 |
+
# "b" is still included, but without data as it will be removed by the projection
|
413 |
+
assert pc.all(result["b"].is_null()).as_py()
|
venv/lib/python3.10/site-packages/pyarrow/tests/test_adhoc_memory_leak.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import pytest
|
19 |
+
|
20 |
+
import numpy as np
|
21 |
+
import pyarrow as pa
|
22 |
+
|
23 |
+
import pyarrow.tests.util as test_util
|
24 |
+
|
25 |
+
try:
|
26 |
+
import pandas as pd
|
27 |
+
except ImportError:
|
28 |
+
pass
|
29 |
+
|
30 |
+
|
31 |
+
@pytest.mark.memory_leak
|
32 |
+
@pytest.mark.pandas
|
33 |
+
def test_deserialize_pandas_arrow_7956():
|
34 |
+
df = pd.DataFrame({'a': np.arange(10000),
|
35 |
+
'b': [test_util.rands(5) for _ in range(10000)]})
|
36 |
+
|
37 |
+
def action():
|
38 |
+
df_bytes = pa.ipc.serialize_pandas(df).to_pybytes()
|
39 |
+
buf = pa.py_buffer(df_bytes)
|
40 |
+
pa.ipc.deserialize_pandas(buf)
|
41 |
+
|
42 |
+
# Abort at 128MB threshold
|
43 |
+
test_util.memory_leak_check(action, threshold=1 << 27, iterations=100)
|
venv/lib/python3.10/site-packages/pyarrow/tests/test_array.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
venv/lib/python3.10/site-packages/pyarrow/tests/test_builder.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import weakref
|
19 |
+
|
20 |
+
import numpy as np
|
21 |
+
|
22 |
+
import pyarrow as pa
|
23 |
+
from pyarrow.lib import StringBuilder, StringViewBuilder
|
24 |
+
|
25 |
+
|
26 |
+
def test_weakref():
|
27 |
+
sbuilder = StringBuilder()
|
28 |
+
wr = weakref.ref(sbuilder)
|
29 |
+
assert wr() is not None
|
30 |
+
del sbuilder
|
31 |
+
assert wr() is None
|
32 |
+
|
33 |
+
|
34 |
+
def test_string_builder_append():
|
35 |
+
sbuilder = StringBuilder()
|
36 |
+
sbuilder.append(b"a byte string")
|
37 |
+
sbuilder.append("a string")
|
38 |
+
sbuilder.append(np.nan)
|
39 |
+
sbuilder.append(None)
|
40 |
+
assert len(sbuilder) == 4
|
41 |
+
assert sbuilder.null_count == 2
|
42 |
+
arr = sbuilder.finish()
|
43 |
+
assert len(sbuilder) == 0
|
44 |
+
assert isinstance(arr, pa.Array)
|
45 |
+
assert arr.null_count == 2
|
46 |
+
assert arr.type == 'str'
|
47 |
+
expected = ["a byte string", "a string", None, None]
|
48 |
+
assert arr.to_pylist() == expected
|
49 |
+
|
50 |
+
|
51 |
+
def test_string_builder_append_values():
|
52 |
+
sbuilder = StringBuilder()
|
53 |
+
sbuilder.append_values([np.nan, None, "text", None, "other text"])
|
54 |
+
assert sbuilder.null_count == 3
|
55 |
+
arr = sbuilder.finish()
|
56 |
+
assert arr.null_count == 3
|
57 |
+
expected = [None, None, "text", None, "other text"]
|
58 |
+
assert arr.to_pylist() == expected
|
59 |
+
|
60 |
+
|
61 |
+
def test_string_builder_append_after_finish():
|
62 |
+
sbuilder = StringBuilder()
|
63 |
+
sbuilder.append_values([np.nan, None, "text", None, "other text"])
|
64 |
+
arr = sbuilder.finish()
|
65 |
+
sbuilder.append("No effect")
|
66 |
+
expected = [None, None, "text", None, "other text"]
|
67 |
+
assert arr.to_pylist() == expected
|
68 |
+
|
69 |
+
|
70 |
+
def test_string_view_builder():
|
71 |
+
builder = StringViewBuilder()
|
72 |
+
builder.append(b"a byte string")
|
73 |
+
builder.append("a string")
|
74 |
+
builder.append("a longer not-inlined string")
|
75 |
+
builder.append(np.nan)
|
76 |
+
builder.append_values([None, "text"])
|
77 |
+
assert len(builder) == 6
|
78 |
+
assert builder.null_count == 2
|
79 |
+
arr = builder.finish()
|
80 |
+
assert isinstance(arr, pa.Array)
|
81 |
+
assert arr.null_count == 2
|
82 |
+
assert arr.type == 'string_view'
|
83 |
+
expected = [
|
84 |
+
"a byte string", "a string", "a longer not-inlined string", None, None, "text"
|
85 |
+
]
|
86 |
+
assert arr.to_pylist() == expected
|
venv/lib/python3.10/site-packages/pyarrow/tests/test_cffi.py
ADDED
@@ -0,0 +1,707 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
3 |
+
# or more contributor license agreements. See the NOTICE file
|
4 |
+
# distributed with this work for additional information
|
5 |
+
# regarding copyright ownership. The ASF licenses this file
|
6 |
+
# to you under the Apache License, Version 2.0 (the
|
7 |
+
# "License"); you may not use this file except in compliance
|
8 |
+
# with the License. You may obtain a copy of the License at
|
9 |
+
#
|
10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
11 |
+
#
|
12 |
+
# Unless required by applicable law or agreed to in writing,
|
13 |
+
# software distributed under the License is distributed on an
|
14 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
15 |
+
# KIND, either express or implied. See the License for the
|
16 |
+
# specific language governing permissions and limitations
|
17 |
+
# under the License.
|
18 |
+
|
19 |
+
import contextlib
|
20 |
+
import ctypes
|
21 |
+
import gc
|
22 |
+
|
23 |
+
import pyarrow as pa
|
24 |
+
try:
|
25 |
+
from pyarrow.cffi import ffi
|
26 |
+
except ImportError:
|
27 |
+
ffi = None
|
28 |
+
|
29 |
+
import pytest
|
30 |
+
|
31 |
+
try:
|
32 |
+
import pandas as pd
|
33 |
+
import pandas.testing as tm
|
34 |
+
except ImportError:
|
35 |
+
pd = tm = None
|
36 |
+
|
37 |
+
|
38 |
+
needs_cffi = pytest.mark.skipif(ffi is None,
|
39 |
+
reason="test needs cffi package installed")
|
40 |
+
|
41 |
+
assert_schema_released = pytest.raises(
|
42 |
+
ValueError, match="Cannot import released ArrowSchema")
|
43 |
+
|
44 |
+
assert_array_released = pytest.raises(
|
45 |
+
ValueError, match="Cannot import released ArrowArray")
|
46 |
+
|
47 |
+
assert_stream_released = pytest.raises(
|
48 |
+
ValueError, match="Cannot import released ArrowArrayStream")
|
49 |
+
|
50 |
+
|
51 |
+
def PyCapsule_IsValid(capsule, name):
|
52 |
+
return ctypes.pythonapi.PyCapsule_IsValid(ctypes.py_object(capsule), name) == 1
|
53 |
+
|
54 |
+
|
55 |
+
@contextlib.contextmanager
|
56 |
+
def registered_extension_type(ext_type):
|
57 |
+
pa.register_extension_type(ext_type)
|
58 |
+
try:
|
59 |
+
yield
|
60 |
+
finally:
|
61 |
+
pa.unregister_extension_type(ext_type.extension_name)
|
62 |
+
|
63 |
+
|
64 |
+
class ParamExtType(pa.ExtensionType):
|
65 |
+
|
66 |
+
def __init__(self, width):
|
67 |
+
self._width = width
|
68 |
+
super().__init__(pa.binary(width),
|
69 |
+
"pyarrow.tests.test_cffi.ParamExtType")
|
70 |
+
|
71 |
+
@property
|
72 |
+
def width(self):
|
73 |
+
return self._width
|
74 |
+
|
75 |
+
def __arrow_ext_serialize__(self):
|
76 |
+
return str(self.width).encode()
|
77 |
+
|
78 |
+
@classmethod
|
79 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized):
|
80 |
+
width = int(serialized.decode())
|
81 |
+
return cls(width)
|
82 |
+
|
83 |
+
|
84 |
+
def make_schema():
|
85 |
+
return pa.schema([('ints', pa.list_(pa.int32()))],
|
86 |
+
metadata={b'key1': b'value1'})
|
87 |
+
|
88 |
+
|
89 |
+
def make_extension_schema():
|
90 |
+
return pa.schema([('ext', ParamExtType(3))],
|
91 |
+
metadata={b'key1': b'value1'})
|
92 |
+
|
93 |
+
|
94 |
+
def make_extension_storage_schema():
|
95 |
+
# Should be kept in sync with make_extension_schema
|
96 |
+
return pa.schema([('ext', ParamExtType(3).storage_type)],
|
97 |
+
metadata={b'key1': b'value1'})
|
98 |
+
|
99 |
+
|
100 |
+
def make_batch():
|
101 |
+
return pa.record_batch([[[1], [2, 42]]], make_schema())
|
102 |
+
|
103 |
+
|
104 |
+
def make_extension_batch():
|
105 |
+
schema = make_extension_schema()
|
106 |
+
ext_col = schema[0].type.wrap_array(pa.array([b"foo", b"bar"],
|
107 |
+
type=pa.binary(3)))
|
108 |
+
return pa.record_batch([ext_col], schema)
|
109 |
+
|
110 |
+
|
111 |
+
def make_batches():
|
112 |
+
schema = make_schema()
|
113 |
+
return [
|
114 |
+
pa.record_batch([[[1], [2, 42]]], schema),
|
115 |
+
pa.record_batch([[None, [], [5, 6]]], schema),
|
116 |
+
]
|
117 |
+
|
118 |
+
|
119 |
+
def make_serialized(schema, batches):
|
120 |
+
with pa.BufferOutputStream() as sink:
|
121 |
+
with pa.ipc.new_stream(sink, schema) as out:
|
122 |
+
for batch in batches:
|
123 |
+
out.write(batch)
|
124 |
+
return sink.getvalue()
|
125 |
+
|
126 |
+
|
127 |
+
@needs_cffi
|
128 |
+
def test_export_import_type():
|
129 |
+
c_schema = ffi.new("struct ArrowSchema*")
|
130 |
+
ptr_schema = int(ffi.cast("uintptr_t", c_schema))
|
131 |
+
|
132 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
133 |
+
old_allocated = pa.total_allocated_bytes()
|
134 |
+
|
135 |
+
typ = pa.list_(pa.int32())
|
136 |
+
typ._export_to_c(ptr_schema)
|
137 |
+
assert pa.total_allocated_bytes() > old_allocated
|
138 |
+
# Delete and recreate C++ object from exported pointer
|
139 |
+
del typ
|
140 |
+
assert pa.total_allocated_bytes() > old_allocated
|
141 |
+
typ_new = pa.DataType._import_from_c(ptr_schema)
|
142 |
+
assert typ_new == pa.list_(pa.int32())
|
143 |
+
assert pa.total_allocated_bytes() == old_allocated
|
144 |
+
# Now released
|
145 |
+
with assert_schema_released:
|
146 |
+
pa.DataType._import_from_c(ptr_schema)
|
147 |
+
|
148 |
+
# Invalid format string
|
149 |
+
pa.int32()._export_to_c(ptr_schema)
|
150 |
+
bad_format = ffi.new("char[]", b"zzz")
|
151 |
+
c_schema.format = bad_format
|
152 |
+
with pytest.raises(ValueError,
|
153 |
+
match="Invalid or unsupported format string"):
|
154 |
+
pa.DataType._import_from_c(ptr_schema)
|
155 |
+
# Now released
|
156 |
+
with assert_schema_released:
|
157 |
+
pa.DataType._import_from_c(ptr_schema)
|
158 |
+
|
159 |
+
|
160 |
+
@needs_cffi
|
161 |
+
def test_export_import_field():
|
162 |
+
c_schema = ffi.new("struct ArrowSchema*")
|
163 |
+
ptr_schema = int(ffi.cast("uintptr_t", c_schema))
|
164 |
+
|
165 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
166 |
+
old_allocated = pa.total_allocated_bytes()
|
167 |
+
|
168 |
+
field = pa.field("test", pa.list_(pa.int32()), nullable=True)
|
169 |
+
field._export_to_c(ptr_schema)
|
170 |
+
assert pa.total_allocated_bytes() > old_allocated
|
171 |
+
# Delete and recreate C++ object from exported pointer
|
172 |
+
del field
|
173 |
+
assert pa.total_allocated_bytes() > old_allocated
|
174 |
+
|
175 |
+
field_new = pa.Field._import_from_c(ptr_schema)
|
176 |
+
assert field_new == pa.field("test", pa.list_(pa.int32()), nullable=True)
|
177 |
+
assert pa.total_allocated_bytes() == old_allocated
|
178 |
+
|
179 |
+
# Now released
|
180 |
+
with assert_schema_released:
|
181 |
+
pa.Field._import_from_c(ptr_schema)
|
182 |
+
|
183 |
+
|
184 |
+
def check_export_import_array(array_type, exporter, importer):
|
185 |
+
c_schema = ffi.new("struct ArrowSchema*")
|
186 |
+
ptr_schema = int(ffi.cast("uintptr_t", c_schema))
|
187 |
+
c_array = ffi.new(f"struct {array_type}*")
|
188 |
+
ptr_array = int(ffi.cast("uintptr_t", c_array))
|
189 |
+
|
190 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
191 |
+
old_allocated = pa.total_allocated_bytes()
|
192 |
+
|
193 |
+
# Type is known up front
|
194 |
+
typ = pa.list_(pa.int32())
|
195 |
+
arr = pa.array([[1], [2, 42]], type=typ)
|
196 |
+
py_value = arr.to_pylist()
|
197 |
+
exporter(arr, ptr_array)
|
198 |
+
assert pa.total_allocated_bytes() > old_allocated
|
199 |
+
# Delete recreate C++ object from exported pointer
|
200 |
+
del arr
|
201 |
+
arr_new = importer(ptr_array, typ)
|
202 |
+
assert arr_new.to_pylist() == py_value
|
203 |
+
assert arr_new.type == pa.list_(pa.int32())
|
204 |
+
assert pa.total_allocated_bytes() > old_allocated
|
205 |
+
del arr_new, typ
|
206 |
+
assert pa.total_allocated_bytes() == old_allocated
|
207 |
+
# Now released
|
208 |
+
with assert_array_released:
|
209 |
+
importer(ptr_array, pa.list_(pa.int32()))
|
210 |
+
|
211 |
+
# Type is exported and imported at the same time
|
212 |
+
arr = pa.array([[1], [2, 42]], type=pa.list_(pa.int32()))
|
213 |
+
py_value = arr.to_pylist()
|
214 |
+
exporter(arr, ptr_array, ptr_schema)
|
215 |
+
# Delete and recreate C++ objects from exported pointers
|
216 |
+
del arr
|
217 |
+
arr_new = importer(ptr_array, ptr_schema)
|
218 |
+
assert arr_new.to_pylist() == py_value
|
219 |
+
assert arr_new.type == pa.list_(pa.int32())
|
220 |
+
assert pa.total_allocated_bytes() > old_allocated
|
221 |
+
del arr_new
|
222 |
+
assert pa.total_allocated_bytes() == old_allocated
|
223 |
+
# Now released
|
224 |
+
with assert_schema_released:
|
225 |
+
importer(ptr_array, ptr_schema)
|
226 |
+
|
227 |
+
|
228 |
+
@needs_cffi
|
229 |
+
def test_export_import_array():
|
230 |
+
check_export_import_array(
|
231 |
+
"ArrowArray",
|
232 |
+
pa.Array._export_to_c,
|
233 |
+
pa.Array._import_from_c,
|
234 |
+
)
|
235 |
+
|
236 |
+
|
237 |
+
@needs_cffi
|
238 |
+
def test_export_import_device_array():
|
239 |
+
check_export_import_array(
|
240 |
+
"ArrowDeviceArray",
|
241 |
+
pa.Array._export_to_c_device,
|
242 |
+
pa.Array._import_from_c_device,
|
243 |
+
)
|
244 |
+
|
245 |
+
# verify exported struct
|
246 |
+
c_array = ffi.new("struct ArrowDeviceArray*")
|
247 |
+
ptr_array = int(ffi.cast("uintptr_t", c_array))
|
248 |
+
arr = pa.array([[1], [2, 42]], type=pa.list_(pa.int32()))
|
249 |
+
arr._export_to_c_device(ptr_array)
|
250 |
+
|
251 |
+
assert c_array.device_type == 1 # ARROW_DEVICE_CPU 1
|
252 |
+
assert c_array.device_id == -1
|
253 |
+
assert c_array.array.length == 2
|
254 |
+
|
255 |
+
|
256 |
+
def check_export_import_schema(schema_factory, expected_schema_factory=None):
|
257 |
+
if expected_schema_factory is None:
|
258 |
+
expected_schema_factory = schema_factory
|
259 |
+
|
260 |
+
c_schema = ffi.new("struct ArrowSchema*")
|
261 |
+
ptr_schema = int(ffi.cast("uintptr_t", c_schema))
|
262 |
+
|
263 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
264 |
+
old_allocated = pa.total_allocated_bytes()
|
265 |
+
|
266 |
+
schema_factory()._export_to_c(ptr_schema)
|
267 |
+
assert pa.total_allocated_bytes() > old_allocated
|
268 |
+
# Delete and recreate C++ object from exported pointer
|
269 |
+
schema_new = pa.Schema._import_from_c(ptr_schema)
|
270 |
+
assert schema_new == expected_schema_factory()
|
271 |
+
assert pa.total_allocated_bytes() == old_allocated
|
272 |
+
del schema_new
|
273 |
+
assert pa.total_allocated_bytes() == old_allocated
|
274 |
+
# Now released
|
275 |
+
with assert_schema_released:
|
276 |
+
pa.Schema._import_from_c(ptr_schema)
|
277 |
+
|
278 |
+
# Not a struct type
|
279 |
+
pa.int32()._export_to_c(ptr_schema)
|
280 |
+
with pytest.raises(ValueError,
|
281 |
+
match="ArrowSchema describes non-struct type"):
|
282 |
+
pa.Schema._import_from_c(ptr_schema)
|
283 |
+
# Now released
|
284 |
+
with assert_schema_released:
|
285 |
+
pa.Schema._import_from_c(ptr_schema)
|
286 |
+
|
287 |
+
|
288 |
+
@needs_cffi
|
289 |
+
def test_export_import_schema():
|
290 |
+
check_export_import_schema(make_schema)
|
291 |
+
|
292 |
+
|
293 |
+
@needs_cffi
|
294 |
+
def test_export_import_schema_with_extension():
|
295 |
+
# Extension type is unregistered => the storage type is imported
|
296 |
+
check_export_import_schema(make_extension_schema,
|
297 |
+
make_extension_storage_schema)
|
298 |
+
|
299 |
+
# Extension type is registered => the extension type is imported
|
300 |
+
with registered_extension_type(ParamExtType(1)):
|
301 |
+
check_export_import_schema(make_extension_schema)
|
302 |
+
|
303 |
+
|
304 |
+
@needs_cffi
|
305 |
+
def test_export_import_schema_float_pointer():
|
306 |
+
# Previous versions of the R Arrow library used to pass pointer
|
307 |
+
# values as a double.
|
308 |
+
c_schema = ffi.new("struct ArrowSchema*")
|
309 |
+
ptr_schema = int(ffi.cast("uintptr_t", c_schema))
|
310 |
+
|
311 |
+
match = "Passing a pointer value as a float is unsafe"
|
312 |
+
with pytest.warns(UserWarning, match=match):
|
313 |
+
make_schema()._export_to_c(float(ptr_schema))
|
314 |
+
with pytest.warns(UserWarning, match=match):
|
315 |
+
schema_new = pa.Schema._import_from_c(float(ptr_schema))
|
316 |
+
assert schema_new == make_schema()
|
317 |
+
|
318 |
+
|
319 |
+
def check_export_import_batch(array_type, exporter, importer, batch_factory):
|
320 |
+
c_schema = ffi.new("struct ArrowSchema*")
|
321 |
+
ptr_schema = int(ffi.cast("uintptr_t", c_schema))
|
322 |
+
c_array = ffi.new(f"struct {array_type}*")
|
323 |
+
ptr_array = int(ffi.cast("uintptr_t", c_array))
|
324 |
+
|
325 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
326 |
+
old_allocated = pa.total_allocated_bytes()
|
327 |
+
|
328 |
+
# Schema is known up front
|
329 |
+
batch = batch_factory()
|
330 |
+
schema = batch.schema
|
331 |
+
py_value = batch.to_pydict()
|
332 |
+
exporter(batch, ptr_array)
|
333 |
+
assert pa.total_allocated_bytes() > old_allocated
|
334 |
+
# Delete and recreate C++ object from exported pointer
|
335 |
+
del batch
|
336 |
+
batch_new = importer(ptr_array, schema)
|
337 |
+
assert batch_new.to_pydict() == py_value
|
338 |
+
assert batch_new.schema == schema
|
339 |
+
assert pa.total_allocated_bytes() > old_allocated
|
340 |
+
del batch_new, schema
|
341 |
+
assert pa.total_allocated_bytes() == old_allocated
|
342 |
+
# Now released
|
343 |
+
with assert_array_released:
|
344 |
+
importer(ptr_array, make_schema())
|
345 |
+
|
346 |
+
# Type is exported and imported at the same time
|
347 |
+
batch = batch_factory()
|
348 |
+
py_value = batch.to_pydict()
|
349 |
+
batch._export_to_c(ptr_array, ptr_schema)
|
350 |
+
# Delete and recreate C++ objects from exported pointers
|
351 |
+
del batch
|
352 |
+
batch_new = importer(ptr_array, ptr_schema)
|
353 |
+
assert batch_new.to_pydict() == py_value
|
354 |
+
assert batch_new.schema == batch_factory().schema
|
355 |
+
assert pa.total_allocated_bytes() > old_allocated
|
356 |
+
del batch_new
|
357 |
+
assert pa.total_allocated_bytes() == old_allocated
|
358 |
+
# Now released
|
359 |
+
with assert_schema_released:
|
360 |
+
importer(ptr_array, ptr_schema)
|
361 |
+
|
362 |
+
# Not a struct type
|
363 |
+
pa.int32()._export_to_c(ptr_schema)
|
364 |
+
batch_factory()._export_to_c(ptr_array)
|
365 |
+
with pytest.raises(ValueError,
|
366 |
+
match="ArrowSchema describes non-struct type"):
|
367 |
+
importer(ptr_array, ptr_schema)
|
368 |
+
# Now released
|
369 |
+
with assert_schema_released:
|
370 |
+
importer(ptr_array, ptr_schema)
|
371 |
+
|
372 |
+
|
373 |
+
@needs_cffi
|
374 |
+
def test_export_import_batch():
|
375 |
+
check_export_import_batch(
|
376 |
+
"ArrowArray",
|
377 |
+
pa.RecordBatch._export_to_c,
|
378 |
+
pa.RecordBatch._import_from_c,
|
379 |
+
make_batch,
|
380 |
+
)
|
381 |
+
|
382 |
+
|
383 |
+
@needs_cffi
|
384 |
+
def test_export_import_batch_with_extension():
|
385 |
+
with registered_extension_type(ParamExtType(1)):
|
386 |
+
check_export_import_batch(
|
387 |
+
"ArrowArray",
|
388 |
+
pa.RecordBatch._export_to_c,
|
389 |
+
pa.RecordBatch._import_from_c,
|
390 |
+
make_extension_batch,
|
391 |
+
)
|
392 |
+
|
393 |
+
|
394 |
+
@needs_cffi
|
395 |
+
def test_export_import_device_batch():
|
396 |
+
check_export_import_batch(
|
397 |
+
"ArrowDeviceArray",
|
398 |
+
pa.RecordBatch._export_to_c_device,
|
399 |
+
pa.RecordBatch._import_from_c_device,
|
400 |
+
make_batch,
|
401 |
+
)
|
402 |
+
|
403 |
+
# verify exported struct
|
404 |
+
c_array = ffi.new("struct ArrowDeviceArray*")
|
405 |
+
ptr_array = int(ffi.cast("uintptr_t", c_array))
|
406 |
+
batch = make_batch()
|
407 |
+
batch._export_to_c_device(ptr_array)
|
408 |
+
assert c_array.device_type == 1 # ARROW_DEVICE_CPU 1
|
409 |
+
assert c_array.device_id == -1
|
410 |
+
assert c_array.array.length == 2
|
411 |
+
|
412 |
+
|
413 |
+
def _export_import_batch_reader(ptr_stream, reader_factory):
|
414 |
+
# Prepare input
|
415 |
+
batches = make_batches()
|
416 |
+
schema = batches[0].schema
|
417 |
+
|
418 |
+
reader = reader_factory(schema, batches)
|
419 |
+
reader._export_to_c(ptr_stream)
|
420 |
+
# Delete and recreate C++ object from exported pointer
|
421 |
+
del reader, batches
|
422 |
+
|
423 |
+
reader_new = pa.RecordBatchReader._import_from_c(ptr_stream)
|
424 |
+
assert reader_new.schema == schema
|
425 |
+
got_batches = list(reader_new)
|
426 |
+
del reader_new
|
427 |
+
assert got_batches == make_batches()
|
428 |
+
|
429 |
+
# Test read_pandas()
|
430 |
+
if pd is not None:
|
431 |
+
batches = make_batches()
|
432 |
+
schema = batches[0].schema
|
433 |
+
expected_df = pa.Table.from_batches(batches).to_pandas()
|
434 |
+
|
435 |
+
reader = reader_factory(schema, batches)
|
436 |
+
reader._export_to_c(ptr_stream)
|
437 |
+
del reader, batches
|
438 |
+
|
439 |
+
reader_new = pa.RecordBatchReader._import_from_c(ptr_stream)
|
440 |
+
got_df = reader_new.read_pandas()
|
441 |
+
del reader_new
|
442 |
+
tm.assert_frame_equal(expected_df, got_df)
|
443 |
+
|
444 |
+
|
445 |
+
def make_ipc_stream_reader(schema, batches):
|
446 |
+
return pa.ipc.open_stream(make_serialized(schema, batches))
|
447 |
+
|
448 |
+
|
449 |
+
def make_py_record_batch_reader(schema, batches):
|
450 |
+
return pa.RecordBatchReader.from_batches(schema, batches)
|
451 |
+
|
452 |
+
|
453 |
+
@needs_cffi
|
454 |
+
@pytest.mark.parametrize('reader_factory',
|
455 |
+
[make_ipc_stream_reader,
|
456 |
+
make_py_record_batch_reader])
|
457 |
+
def test_export_import_batch_reader(reader_factory):
|
458 |
+
c_stream = ffi.new("struct ArrowArrayStream*")
|
459 |
+
ptr_stream = int(ffi.cast("uintptr_t", c_stream))
|
460 |
+
|
461 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
462 |
+
old_allocated = pa.total_allocated_bytes()
|
463 |
+
|
464 |
+
_export_import_batch_reader(ptr_stream, reader_factory)
|
465 |
+
|
466 |
+
assert pa.total_allocated_bytes() == old_allocated
|
467 |
+
|
468 |
+
# Now released
|
469 |
+
with assert_stream_released:
|
470 |
+
pa.RecordBatchReader._import_from_c(ptr_stream)
|
471 |
+
|
472 |
+
|
473 |
+
@needs_cffi
|
474 |
+
def test_export_import_exception_reader():
|
475 |
+
# See: https://github.com/apache/arrow/issues/37164
|
476 |
+
c_stream = ffi.new("struct ArrowArrayStream*")
|
477 |
+
ptr_stream = int(ffi.cast("uintptr_t", c_stream))
|
478 |
+
|
479 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
480 |
+
old_allocated = pa.total_allocated_bytes()
|
481 |
+
|
482 |
+
def gen():
|
483 |
+
if True:
|
484 |
+
try:
|
485 |
+
raise ValueError('foo')
|
486 |
+
except ValueError as e:
|
487 |
+
raise NotImplementedError('bar') from e
|
488 |
+
else:
|
489 |
+
yield from make_batches()
|
490 |
+
|
491 |
+
original = pa.RecordBatchReader.from_batches(make_schema(), gen())
|
492 |
+
original._export_to_c(ptr_stream)
|
493 |
+
|
494 |
+
reader = pa.RecordBatchReader._import_from_c(ptr_stream)
|
495 |
+
with pytest.raises(OSError) as exc_info:
|
496 |
+
reader.read_next_batch()
|
497 |
+
|
498 |
+
# inner *and* outer exception should be present
|
499 |
+
assert 'ValueError: foo' in str(exc_info.value)
|
500 |
+
assert 'NotImplementedError: bar' in str(exc_info.value)
|
501 |
+
# Stacktrace containing line of the raise statement
|
502 |
+
assert 'raise ValueError(\'foo\')' in str(exc_info.value)
|
503 |
+
|
504 |
+
assert pa.total_allocated_bytes() == old_allocated
|
505 |
+
|
506 |
+
|
507 |
+
@needs_cffi
|
508 |
+
def test_imported_batch_reader_error():
|
509 |
+
c_stream = ffi.new("struct ArrowArrayStream*")
|
510 |
+
ptr_stream = int(ffi.cast("uintptr_t", c_stream))
|
511 |
+
|
512 |
+
schema = pa.schema([('foo', pa.int32())])
|
513 |
+
batches = [pa.record_batch([[1, 2, 3]], schema=schema),
|
514 |
+
pa.record_batch([[4, 5, 6]], schema=schema)]
|
515 |
+
buf = make_serialized(schema, batches)
|
516 |
+
|
517 |
+
# Open a corrupt/incomplete stream and export it
|
518 |
+
reader = pa.ipc.open_stream(buf[:-16])
|
519 |
+
reader._export_to_c(ptr_stream)
|
520 |
+
del reader
|
521 |
+
|
522 |
+
reader_new = pa.RecordBatchReader._import_from_c(ptr_stream)
|
523 |
+
batch = reader_new.read_next_batch()
|
524 |
+
assert batch == batches[0]
|
525 |
+
with pytest.raises(OSError,
|
526 |
+
match="Expected to be able to read 16 bytes "
|
527 |
+
"for message body, got 8"):
|
528 |
+
reader_new.read_next_batch()
|
529 |
+
|
530 |
+
# Again, but call read_all()
|
531 |
+
reader = pa.ipc.open_stream(buf[:-16])
|
532 |
+
reader._export_to_c(ptr_stream)
|
533 |
+
del reader
|
534 |
+
|
535 |
+
reader_new = pa.RecordBatchReader._import_from_c(ptr_stream)
|
536 |
+
with pytest.raises(OSError,
|
537 |
+
match="Expected to be able to read 16 bytes "
|
538 |
+
"for message body, got 8"):
|
539 |
+
reader_new.read_all()
|
540 |
+
|
541 |
+
|
542 |
+
@pytest.mark.parametrize('obj', [pa.int32(), pa.field('foo', pa.int32()),
|
543 |
+
pa.schema({'foo': pa.int32()})],
|
544 |
+
ids=['type', 'field', 'schema'])
|
545 |
+
def test_roundtrip_schema_capsule(obj):
|
546 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
547 |
+
old_allocated = pa.total_allocated_bytes()
|
548 |
+
|
549 |
+
capsule = obj.__arrow_c_schema__()
|
550 |
+
assert PyCapsule_IsValid(capsule, b"arrow_schema") == 1
|
551 |
+
assert pa.total_allocated_bytes() > old_allocated
|
552 |
+
obj_out = type(obj)._import_from_c_capsule(capsule)
|
553 |
+
assert obj_out == obj
|
554 |
+
|
555 |
+
assert pa.total_allocated_bytes() == old_allocated
|
556 |
+
|
557 |
+
capsule = obj.__arrow_c_schema__()
|
558 |
+
|
559 |
+
assert pa.total_allocated_bytes() > old_allocated
|
560 |
+
del capsule
|
561 |
+
assert pa.total_allocated_bytes() == old_allocated
|
562 |
+
|
563 |
+
|
564 |
+
@pytest.mark.parametrize('arr,schema_accessor,bad_type,good_type', [
|
565 |
+
(pa.array(['a', 'b', 'c']), lambda x: x.type, pa.int32(), pa.string()),
|
566 |
+
(
|
567 |
+
pa.record_batch([pa.array(['a', 'b', 'c'])], names=['x']),
|
568 |
+
lambda x: x.schema,
|
569 |
+
pa.schema({'x': pa.int32()}),
|
570 |
+
pa.schema({'x': pa.string()})
|
571 |
+
),
|
572 |
+
], ids=['array', 'record_batch'])
|
573 |
+
def test_roundtrip_array_capsule(arr, schema_accessor, bad_type, good_type):
|
574 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
575 |
+
old_allocated = pa.total_allocated_bytes()
|
576 |
+
|
577 |
+
import_array = type(arr)._import_from_c_capsule
|
578 |
+
|
579 |
+
schema_capsule, capsule = arr.__arrow_c_array__()
|
580 |
+
assert PyCapsule_IsValid(schema_capsule, b"arrow_schema") == 1
|
581 |
+
assert PyCapsule_IsValid(capsule, b"arrow_array") == 1
|
582 |
+
arr_out = import_array(schema_capsule, capsule)
|
583 |
+
assert arr_out.equals(arr)
|
584 |
+
|
585 |
+
assert pa.total_allocated_bytes() > old_allocated
|
586 |
+
del arr_out
|
587 |
+
|
588 |
+
assert pa.total_allocated_bytes() == old_allocated
|
589 |
+
|
590 |
+
capsule = arr.__arrow_c_array__()
|
591 |
+
|
592 |
+
assert pa.total_allocated_bytes() > old_allocated
|
593 |
+
del capsule
|
594 |
+
assert pa.total_allocated_bytes() == old_allocated
|
595 |
+
|
596 |
+
with pytest.raises(ValueError,
|
597 |
+
match=r"Could not cast.* string to requested .* int32"):
|
598 |
+
arr.__arrow_c_array__(bad_type.__arrow_c_schema__())
|
599 |
+
|
600 |
+
schema_capsule, array_capsule = arr.__arrow_c_array__(
|
601 |
+
good_type.__arrow_c_schema__())
|
602 |
+
arr_out = import_array(schema_capsule, array_capsule)
|
603 |
+
assert schema_accessor(arr_out) == good_type
|
604 |
+
|
605 |
+
|
606 |
+
# TODO: implement requested_schema for stream
|
607 |
+
@pytest.mark.parametrize('constructor', [
|
608 |
+
pa.RecordBatchReader.from_batches,
|
609 |
+
# Use a lambda because we need to re-order the parameters
|
610 |
+
lambda schema, batches: pa.Table.from_batches(batches, schema),
|
611 |
+
], ids=['recordbatchreader', 'table'])
|
612 |
+
def test_roundtrip_reader_capsule(constructor):
|
613 |
+
batches = make_batches()
|
614 |
+
schema = batches[0].schema
|
615 |
+
|
616 |
+
gc.collect() # Make sure no Arrow data dangles in a ref cycle
|
617 |
+
old_allocated = pa.total_allocated_bytes()
|
618 |
+
|
619 |
+
obj = constructor(schema, batches)
|
620 |
+
|
621 |
+
capsule = obj.__arrow_c_stream__()
|
622 |
+
assert PyCapsule_IsValid(capsule, b"arrow_array_stream") == 1
|
623 |
+
imported_reader = pa.RecordBatchReader._import_from_c_capsule(capsule)
|
624 |
+
assert imported_reader.schema == schema
|
625 |
+
imported_batches = list(imported_reader)
|
626 |
+
assert len(imported_batches) == len(batches)
|
627 |
+
for batch, expected in zip(imported_batches, batches):
|
628 |
+
assert batch.equals(expected)
|
629 |
+
|
630 |
+
del obj, imported_reader, batch, expected, imported_batches
|
631 |
+
|
632 |
+
assert pa.total_allocated_bytes() == old_allocated
|
633 |
+
|
634 |
+
obj = constructor(schema, batches)
|
635 |
+
|
636 |
+
bad_schema = pa.schema({'ints': pa.int32()})
|
637 |
+
with pytest.raises(pa.lib.ArrowTypeError, match="Field 0 cannot be cast"):
|
638 |
+
obj.__arrow_c_stream__(bad_schema.__arrow_c_schema__())
|
639 |
+
|
640 |
+
# Can work with matching schema
|
641 |
+
matching_schema = pa.schema({'ints': pa.list_(pa.int32())})
|
642 |
+
capsule = obj.__arrow_c_stream__(matching_schema.__arrow_c_schema__())
|
643 |
+
imported_reader = pa.RecordBatchReader._import_from_c_capsule(capsule)
|
644 |
+
assert imported_reader.schema == matching_schema
|
645 |
+
for batch, expected in zip(imported_reader, batches):
|
646 |
+
assert batch.equals(expected)
|
647 |
+
|
648 |
+
|
649 |
+
def test_roundtrip_batch_reader_capsule_requested_schema():
|
650 |
+
batch = make_batch()
|
651 |
+
requested_schema = pa.schema([('ints', pa.list_(pa.int64()))])
|
652 |
+
requested_capsule = requested_schema.__arrow_c_schema__()
|
653 |
+
batch_as_requested = batch.cast(requested_schema)
|
654 |
+
|
655 |
+
capsule = batch.__arrow_c_stream__(requested_capsule)
|
656 |
+
assert PyCapsule_IsValid(capsule, b"arrow_array_stream") == 1
|
657 |
+
imported_reader = pa.RecordBatchReader._import_from_c_capsule(capsule)
|
658 |
+
assert imported_reader.schema == requested_schema
|
659 |
+
assert imported_reader.read_next_batch().equals(batch_as_requested)
|
660 |
+
with pytest.raises(StopIteration):
|
661 |
+
imported_reader.read_next_batch()
|
662 |
+
|
663 |
+
|
664 |
+
def test_roundtrip_batch_reader_capsule():
|
665 |
+
batch = make_batch()
|
666 |
+
|
667 |
+
capsule = batch.__arrow_c_stream__()
|
668 |
+
assert PyCapsule_IsValid(capsule, b"arrow_array_stream") == 1
|
669 |
+
imported_reader = pa.RecordBatchReader._import_from_c_capsule(capsule)
|
670 |
+
assert imported_reader.schema == batch.schema
|
671 |
+
assert imported_reader.read_next_batch().equals(batch)
|
672 |
+
with pytest.raises(StopIteration):
|
673 |
+
imported_reader.read_next_batch()
|
674 |
+
|
675 |
+
|
676 |
+
def test_roundtrip_chunked_array_capsule():
|
677 |
+
chunked = pa.chunked_array([pa.array(["a", "b", "c"])])
|
678 |
+
|
679 |
+
capsule = chunked.__arrow_c_stream__()
|
680 |
+
assert PyCapsule_IsValid(capsule, b"arrow_array_stream") == 1
|
681 |
+
imported_chunked = pa.ChunkedArray._import_from_c_capsule(capsule)
|
682 |
+
assert imported_chunked.type == chunked.type
|
683 |
+
assert imported_chunked == chunked
|
684 |
+
|
685 |
+
|
686 |
+
def test_roundtrip_chunked_array_capsule_requested_schema():
|
687 |
+
chunked = pa.chunked_array([pa.array(["a", "b", "c"])])
|
688 |
+
|
689 |
+
# Requesting the same type should work
|
690 |
+
requested_capsule = chunked.type.__arrow_c_schema__()
|
691 |
+
capsule = chunked.__arrow_c_stream__(requested_capsule)
|
692 |
+
imported_chunked = pa.ChunkedArray._import_from_c_capsule(capsule)
|
693 |
+
assert imported_chunked == chunked
|
694 |
+
|
695 |
+
# Casting to something else should error if not possible
|
696 |
+
requested_type = pa.binary()
|
697 |
+
requested_capsule = requested_type.__arrow_c_schema__()
|
698 |
+
capsule = chunked.__arrow_c_stream__(requested_capsule)
|
699 |
+
imported_chunked = pa.ChunkedArray._import_from_c_capsule(capsule)
|
700 |
+
assert imported_chunked == chunked.cast(pa.binary())
|
701 |
+
|
702 |
+
requested_type = pa.int64()
|
703 |
+
requested_capsule = requested_type.__arrow_c_schema__()
|
704 |
+
with pytest.raises(
|
705 |
+
ValueError, match="Could not cast string to requested type int64"
|
706 |
+
):
|
707 |
+
chunked.__arrow_c_stream__(requested_capsule)
|
venv/lib/python3.10/site-packages/pyarrow/tests/test_compute.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
venv/lib/python3.10/site-packages/pyarrow/tests/test_convert_builtin.py
ADDED
@@ -0,0 +1,2536 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import collections
|
19 |
+
import datetime
|
20 |
+
import decimal
|
21 |
+
import itertools
|
22 |
+
import math
|
23 |
+
import re
|
24 |
+
import sys
|
25 |
+
|
26 |
+
import hypothesis as h
|
27 |
+
import numpy as np
|
28 |
+
import pytest
|
29 |
+
|
30 |
+
from pyarrow.pandas_compat import _pandas_api # noqa
|
31 |
+
import pyarrow as pa
|
32 |
+
from pyarrow.tests import util
|
33 |
+
import pyarrow.tests.strategies as past
|
34 |
+
|
35 |
+
|
36 |
+
int_type_pairs = [
|
37 |
+
(np.int8, pa.int8()),
|
38 |
+
(np.int16, pa.int16()),
|
39 |
+
(np.int32, pa.int32()),
|
40 |
+
(np.int64, pa.int64()),
|
41 |
+
(np.uint8, pa.uint8()),
|
42 |
+
(np.uint16, pa.uint16()),
|
43 |
+
(np.uint32, pa.uint32()),
|
44 |
+
(np.uint64, pa.uint64())]
|
45 |
+
|
46 |
+
|
47 |
+
np_int_types, pa_int_types = zip(*int_type_pairs)
|
48 |
+
|
49 |
+
|
50 |
+
class StrangeIterable:
|
51 |
+
def __init__(self, lst):
|
52 |
+
self.lst = lst
|
53 |
+
|
54 |
+
def __iter__(self):
|
55 |
+
return self.lst.__iter__()
|
56 |
+
|
57 |
+
|
58 |
+
class MyInt:
|
59 |
+
def __init__(self, value):
|
60 |
+
self.value = value
|
61 |
+
|
62 |
+
def __int__(self):
|
63 |
+
return self.value
|
64 |
+
|
65 |
+
|
66 |
+
class MyBrokenInt:
|
67 |
+
def __int__(self):
|
68 |
+
1/0 # MARKER
|
69 |
+
|
70 |
+
|
71 |
+
def check_struct_type(ty, expected):
|
72 |
+
"""
|
73 |
+
Check a struct type is as expected, but not taking order into account.
|
74 |
+
"""
|
75 |
+
assert pa.types.is_struct(ty)
|
76 |
+
assert set(ty) == set(expected)
|
77 |
+
|
78 |
+
|
79 |
+
def test_iterable_types():
|
80 |
+
arr1 = pa.array(StrangeIterable([0, 1, 2, 3]))
|
81 |
+
arr2 = pa.array((0, 1, 2, 3))
|
82 |
+
|
83 |
+
assert arr1.equals(arr2)
|
84 |
+
|
85 |
+
|
86 |
+
def test_empty_iterable():
|
87 |
+
arr = pa.array(StrangeIterable([]))
|
88 |
+
assert len(arr) == 0
|
89 |
+
assert arr.null_count == 0
|
90 |
+
assert arr.type == pa.null()
|
91 |
+
assert arr.to_pylist() == []
|
92 |
+
|
93 |
+
|
94 |
+
def test_limited_iterator_types():
|
95 |
+
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=3)
|
96 |
+
arr2 = pa.array((0, 1, 2))
|
97 |
+
assert arr1.equals(arr2)
|
98 |
+
|
99 |
+
|
100 |
+
def test_limited_iterator_size_overflow():
|
101 |
+
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=2)
|
102 |
+
arr2 = pa.array((0, 1))
|
103 |
+
assert arr1.equals(arr2)
|
104 |
+
|
105 |
+
|
106 |
+
def test_limited_iterator_size_underflow():
|
107 |
+
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=10)
|
108 |
+
arr2 = pa.array((0, 1, 2))
|
109 |
+
assert arr1.equals(arr2)
|
110 |
+
|
111 |
+
|
112 |
+
def test_iterator_without_size():
|
113 |
+
expected = pa.array((0, 1, 2))
|
114 |
+
arr1 = pa.array(iter(range(3)))
|
115 |
+
assert arr1.equals(expected)
|
116 |
+
# Same with explicit type
|
117 |
+
arr1 = pa.array(iter(range(3)), type=pa.int64())
|
118 |
+
assert arr1.equals(expected)
|
119 |
+
|
120 |
+
|
121 |
+
def test_infinite_iterator():
|
122 |
+
expected = pa.array((0, 1, 2))
|
123 |
+
arr1 = pa.array(itertools.count(0), size=3)
|
124 |
+
assert arr1.equals(expected)
|
125 |
+
# Same with explicit type
|
126 |
+
arr1 = pa.array(itertools.count(0), type=pa.int64(), size=3)
|
127 |
+
assert arr1.equals(expected)
|
128 |
+
|
129 |
+
|
130 |
+
def test_failing_iterator():
|
131 |
+
with pytest.raises(ZeroDivisionError):
|
132 |
+
pa.array((1 // 0 for x in range(10)))
|
133 |
+
# ARROW-17253
|
134 |
+
with pytest.raises(ZeroDivisionError):
|
135 |
+
pa.array((1 // 0 for x in range(10)), size=10)
|
136 |
+
|
137 |
+
|
138 |
+
class ObjectWithOnlyGetitem:
|
139 |
+
def __getitem__(self, key):
|
140 |
+
return 3
|
141 |
+
|
142 |
+
|
143 |
+
def test_object_with_getitem():
|
144 |
+
# https://github.com/apache/arrow/issues/34944
|
145 |
+
# considered as sequence because of __getitem__, but has no length
|
146 |
+
with pytest.raises(TypeError, match="has no len()"):
|
147 |
+
pa.array(ObjectWithOnlyGetitem())
|
148 |
+
|
149 |
+
|
150 |
+
def _as_list(xs):
|
151 |
+
return xs
|
152 |
+
|
153 |
+
|
154 |
+
def _as_tuple(xs):
|
155 |
+
return tuple(xs)
|
156 |
+
|
157 |
+
|
158 |
+
def _as_deque(xs):
|
159 |
+
# deque is a sequence while neither tuple nor list
|
160 |
+
return collections.deque(xs)
|
161 |
+
|
162 |
+
|
163 |
+
def _as_dict_values(xs):
|
164 |
+
# a dict values object is not a sequence, just a regular iterable
|
165 |
+
dct = {k: v for k, v in enumerate(xs)}
|
166 |
+
return dct.values()
|
167 |
+
|
168 |
+
|
169 |
+
def _as_numpy_array(xs):
|
170 |
+
arr = np.empty(len(xs), dtype=object)
|
171 |
+
arr[:] = xs
|
172 |
+
return arr
|
173 |
+
|
174 |
+
|
175 |
+
def _as_set(xs):
|
176 |
+
return set(xs)
|
177 |
+
|
178 |
+
|
179 |
+
SEQUENCE_TYPES = [_as_list, _as_tuple, _as_numpy_array]
|
180 |
+
ITERABLE_TYPES = [_as_set, _as_dict_values] + SEQUENCE_TYPES
|
181 |
+
COLLECTIONS_TYPES = [_as_deque] + ITERABLE_TYPES
|
182 |
+
|
183 |
+
parametrize_with_iterable_types = pytest.mark.parametrize(
|
184 |
+
"seq", ITERABLE_TYPES
|
185 |
+
)
|
186 |
+
|
187 |
+
parametrize_with_sequence_types = pytest.mark.parametrize(
|
188 |
+
"seq", SEQUENCE_TYPES
|
189 |
+
)
|
190 |
+
|
191 |
+
parametrize_with_collections_types = pytest.mark.parametrize(
|
192 |
+
"seq", COLLECTIONS_TYPES
|
193 |
+
)
|
194 |
+
|
195 |
+
|
196 |
+
@parametrize_with_collections_types
|
197 |
+
def test_sequence_types(seq):
|
198 |
+
arr1 = pa.array(seq([1, 2, 3]))
|
199 |
+
arr2 = pa.array([1, 2, 3])
|
200 |
+
|
201 |
+
assert arr1.equals(arr2)
|
202 |
+
|
203 |
+
|
204 |
+
@parametrize_with_iterable_types
|
205 |
+
def test_nested_sequence_types(seq):
|
206 |
+
arr1 = pa.array([seq([1, 2, 3])])
|
207 |
+
arr2 = pa.array([[1, 2, 3]])
|
208 |
+
|
209 |
+
assert arr1.equals(arr2)
|
210 |
+
|
211 |
+
|
212 |
+
@parametrize_with_sequence_types
|
213 |
+
def test_sequence_boolean(seq):
|
214 |
+
expected = [True, None, False, None]
|
215 |
+
arr = pa.array(seq(expected))
|
216 |
+
assert len(arr) == 4
|
217 |
+
assert arr.null_count == 2
|
218 |
+
assert arr.type == pa.bool_()
|
219 |
+
assert arr.to_pylist() == expected
|
220 |
+
|
221 |
+
|
222 |
+
@parametrize_with_sequence_types
|
223 |
+
def test_sequence_numpy_boolean(seq):
|
224 |
+
expected = [np.bool_(True), None, np.bool_(False), None]
|
225 |
+
arr = pa.array(seq(expected))
|
226 |
+
assert arr.type == pa.bool_()
|
227 |
+
assert arr.to_pylist() == [True, None, False, None]
|
228 |
+
|
229 |
+
|
230 |
+
@parametrize_with_sequence_types
|
231 |
+
def test_sequence_mixed_numpy_python_bools(seq):
|
232 |
+
values = np.array([True, False])
|
233 |
+
arr = pa.array(seq([values[0], None, values[1], True, False]))
|
234 |
+
assert arr.type == pa.bool_()
|
235 |
+
assert arr.to_pylist() == [True, None, False, True, False]
|
236 |
+
|
237 |
+
|
238 |
+
@parametrize_with_collections_types
|
239 |
+
def test_empty_list(seq):
|
240 |
+
arr = pa.array(seq([]))
|
241 |
+
assert len(arr) == 0
|
242 |
+
assert arr.null_count == 0
|
243 |
+
assert arr.type == pa.null()
|
244 |
+
assert arr.to_pylist() == []
|
245 |
+
|
246 |
+
|
247 |
+
@parametrize_with_sequence_types
|
248 |
+
def test_nested_lists(seq):
|
249 |
+
data = [[], [1, 2], None]
|
250 |
+
arr = pa.array(seq(data))
|
251 |
+
assert len(arr) == 3
|
252 |
+
assert arr.null_count == 1
|
253 |
+
assert arr.type == pa.list_(pa.int64())
|
254 |
+
assert arr.to_pylist() == data
|
255 |
+
|
256 |
+
|
257 |
+
@parametrize_with_sequence_types
|
258 |
+
@pytest.mark.parametrize("factory", [
|
259 |
+
pa.list_, pa.large_list, pa.list_view, pa.large_list_view])
|
260 |
+
def test_nested_lists_with_explicit_type(seq, factory):
|
261 |
+
data = [[], [1, 2], None]
|
262 |
+
arr = pa.array(seq(data), type=factory(pa.int16()))
|
263 |
+
assert len(arr) == 3
|
264 |
+
assert arr.null_count == 1
|
265 |
+
assert arr.type == factory(pa.int16())
|
266 |
+
assert arr.to_pylist() == data
|
267 |
+
|
268 |
+
|
269 |
+
@parametrize_with_collections_types
|
270 |
+
def test_list_with_non_list(seq):
|
271 |
+
# List types don't accept non-sequences
|
272 |
+
with pytest.raises(TypeError):
|
273 |
+
pa.array(seq([[], [1, 2], 3]), type=pa.list_(pa.int64()))
|
274 |
+
with pytest.raises(TypeError):
|
275 |
+
pa.array(seq([[], [1, 2], 3]), type=pa.large_list(pa.int64()))
|
276 |
+
with pytest.raises(TypeError):
|
277 |
+
pa.array(seq([[], [1, 2], 3]), type=pa.list_view(pa.int64()))
|
278 |
+
with pytest.raises(TypeError):
|
279 |
+
pa.array(seq([[], [1, 2], 3]), type=pa.large_list_view(pa.int64()))
|
280 |
+
|
281 |
+
|
282 |
+
@parametrize_with_sequence_types
|
283 |
+
@pytest.mark.parametrize("factory", [
|
284 |
+
pa.list_, pa.large_list, pa.list_view, pa.large_list_view])
|
285 |
+
def test_nested_arrays(seq, factory):
|
286 |
+
arr = pa.array(seq([np.array([], dtype=np.int64),
|
287 |
+
np.array([1, 2], dtype=np.int64), None]),
|
288 |
+
type=factory(pa.int64()))
|
289 |
+
assert len(arr) == 3
|
290 |
+
assert arr.null_count == 1
|
291 |
+
assert arr.type == factory(pa.int64())
|
292 |
+
assert arr.to_pylist() == [[], [1, 2], None]
|
293 |
+
|
294 |
+
|
295 |
+
@parametrize_with_sequence_types
|
296 |
+
def test_nested_fixed_size_list(seq):
|
297 |
+
# sequence of lists
|
298 |
+
data = [[1, 2], [3, None], None]
|
299 |
+
arr = pa.array(seq(data), type=pa.list_(pa.int64(), 2))
|
300 |
+
assert len(arr) == 3
|
301 |
+
assert arr.null_count == 1
|
302 |
+
assert arr.type == pa.list_(pa.int64(), 2)
|
303 |
+
assert arr.to_pylist() == data
|
304 |
+
|
305 |
+
# sequence of numpy arrays
|
306 |
+
data = [np.array([1, 2], dtype='int64'), np.array([3, 4], dtype='int64'),
|
307 |
+
None]
|
308 |
+
arr = pa.array(seq(data), type=pa.list_(pa.int64(), 2))
|
309 |
+
assert len(arr) == 3
|
310 |
+
assert arr.null_count == 1
|
311 |
+
assert arr.type == pa.list_(pa.int64(), 2)
|
312 |
+
assert arr.to_pylist() == [[1, 2], [3, 4], None]
|
313 |
+
|
314 |
+
# incorrect length of the lists or arrays
|
315 |
+
data = [[1, 2, 4], [3, None], None]
|
316 |
+
for data in [[[1, 2, 3]], [np.array([1, 2, 4], dtype='int64')]]:
|
317 |
+
with pytest.raises(
|
318 |
+
ValueError, match="Length of item not correct: expected 2"):
|
319 |
+
pa.array(seq(data), type=pa.list_(pa.int64(), 2))
|
320 |
+
|
321 |
+
# with list size of 0
|
322 |
+
data = [[], [], None]
|
323 |
+
arr = pa.array(seq(data), type=pa.list_(pa.int64(), 0))
|
324 |
+
assert len(arr) == 3
|
325 |
+
assert arr.null_count == 1
|
326 |
+
assert arr.type == pa.list_(pa.int64(), 0)
|
327 |
+
assert arr.to_pylist() == [[], [], None]
|
328 |
+
|
329 |
+
|
330 |
+
@parametrize_with_sequence_types
|
331 |
+
def test_sequence_all_none(seq):
|
332 |
+
arr = pa.array(seq([None, None]))
|
333 |
+
assert len(arr) == 2
|
334 |
+
assert arr.null_count == 2
|
335 |
+
assert arr.type == pa.null()
|
336 |
+
assert arr.to_pylist() == [None, None]
|
337 |
+
|
338 |
+
|
339 |
+
@parametrize_with_sequence_types
|
340 |
+
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
|
341 |
+
def test_sequence_integer(seq, np_scalar_pa_type):
|
342 |
+
np_scalar, pa_type = np_scalar_pa_type
|
343 |
+
expected = [1, None, 3, None,
|
344 |
+
np.iinfo(np_scalar).min, np.iinfo(np_scalar).max]
|
345 |
+
arr = pa.array(seq(expected), type=pa_type)
|
346 |
+
assert len(arr) == 6
|
347 |
+
assert arr.null_count == 2
|
348 |
+
assert arr.type == pa_type
|
349 |
+
assert arr.to_pylist() == expected
|
350 |
+
|
351 |
+
|
352 |
+
@parametrize_with_collections_types
|
353 |
+
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
|
354 |
+
def test_sequence_integer_np_nan(seq, np_scalar_pa_type):
|
355 |
+
# ARROW-2806: numpy.nan is a double value and thus should produce
|
356 |
+
# a double array.
|
357 |
+
_, pa_type = np_scalar_pa_type
|
358 |
+
with pytest.raises(ValueError):
|
359 |
+
pa.array(seq([np.nan]), type=pa_type, from_pandas=False)
|
360 |
+
|
361 |
+
arr = pa.array(seq([np.nan]), type=pa_type, from_pandas=True)
|
362 |
+
expected = [None]
|
363 |
+
assert len(arr) == 1
|
364 |
+
assert arr.null_count == 1
|
365 |
+
assert arr.type == pa_type
|
366 |
+
assert arr.to_pylist() == expected
|
367 |
+
|
368 |
+
|
369 |
+
@parametrize_with_sequence_types
|
370 |
+
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
|
371 |
+
def test_sequence_integer_nested_np_nan(seq, np_scalar_pa_type):
|
372 |
+
# ARROW-2806: numpy.nan is a double value and thus should produce
|
373 |
+
# a double array.
|
374 |
+
_, pa_type = np_scalar_pa_type
|
375 |
+
with pytest.raises(ValueError):
|
376 |
+
pa.array(seq([[np.nan]]), type=pa.list_(pa_type), from_pandas=False)
|
377 |
+
|
378 |
+
arr = pa.array(seq([[np.nan]]), type=pa.list_(pa_type), from_pandas=True)
|
379 |
+
expected = [[None]]
|
380 |
+
assert len(arr) == 1
|
381 |
+
assert arr.null_count == 0
|
382 |
+
assert arr.type == pa.list_(pa_type)
|
383 |
+
assert arr.to_pylist() == expected
|
384 |
+
|
385 |
+
|
386 |
+
@parametrize_with_sequence_types
|
387 |
+
def test_sequence_integer_inferred(seq):
|
388 |
+
expected = [1, None, 3, None]
|
389 |
+
arr = pa.array(seq(expected))
|
390 |
+
assert len(arr) == 4
|
391 |
+
assert arr.null_count == 2
|
392 |
+
assert arr.type == pa.int64()
|
393 |
+
assert arr.to_pylist() == expected
|
394 |
+
|
395 |
+
|
396 |
+
@parametrize_with_sequence_types
|
397 |
+
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
|
398 |
+
def test_sequence_numpy_integer(seq, np_scalar_pa_type):
|
399 |
+
np_scalar, pa_type = np_scalar_pa_type
|
400 |
+
expected = [np_scalar(1), None, np_scalar(3), None,
|
401 |
+
np_scalar(np.iinfo(np_scalar).min),
|
402 |
+
np_scalar(np.iinfo(np_scalar).max)]
|
403 |
+
arr = pa.array(seq(expected), type=pa_type)
|
404 |
+
assert len(arr) == 6
|
405 |
+
assert arr.null_count == 2
|
406 |
+
assert arr.type == pa_type
|
407 |
+
assert arr.to_pylist() == expected
|
408 |
+
|
409 |
+
|
410 |
+
@parametrize_with_sequence_types
|
411 |
+
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
|
412 |
+
def test_sequence_numpy_integer_inferred(seq, np_scalar_pa_type):
|
413 |
+
np_scalar, pa_type = np_scalar_pa_type
|
414 |
+
expected = [np_scalar(1), None, np_scalar(3), None]
|
415 |
+
expected += [np_scalar(np.iinfo(np_scalar).min),
|
416 |
+
np_scalar(np.iinfo(np_scalar).max)]
|
417 |
+
arr = pa.array(seq(expected))
|
418 |
+
assert len(arr) == 6
|
419 |
+
assert arr.null_count == 2
|
420 |
+
assert arr.type == pa_type
|
421 |
+
assert arr.to_pylist() == expected
|
422 |
+
|
423 |
+
|
424 |
+
@parametrize_with_sequence_types
|
425 |
+
def test_sequence_custom_integers(seq):
|
426 |
+
expected = [0, 42, 2**33 + 1, -2**63]
|
427 |
+
data = list(map(MyInt, expected))
|
428 |
+
arr = pa.array(seq(data), type=pa.int64())
|
429 |
+
assert arr.to_pylist() == expected
|
430 |
+
|
431 |
+
|
432 |
+
@parametrize_with_collections_types
|
433 |
+
def test_broken_integers(seq):
|
434 |
+
data = [MyBrokenInt()]
|
435 |
+
with pytest.raises(pa.ArrowInvalid, match="tried to convert to int"):
|
436 |
+
pa.array(seq(data), type=pa.int64())
|
437 |
+
|
438 |
+
|
439 |
+
def test_numpy_scalars_mixed_type():
|
440 |
+
# ARROW-4324
|
441 |
+
data = [np.int32(10), np.float32(0.5)]
|
442 |
+
arr = pa.array(data)
|
443 |
+
expected = pa.array([10, 0.5], type="float64")
|
444 |
+
assert arr.equals(expected)
|
445 |
+
|
446 |
+
# ARROW-9490
|
447 |
+
data = [np.int8(10), np.float32(0.5)]
|
448 |
+
arr = pa.array(data)
|
449 |
+
expected = pa.array([10, 0.5], type="float32")
|
450 |
+
assert arr.equals(expected)
|
451 |
+
|
452 |
+
|
453 |
+
@pytest.mark.xfail(reason="Type inference for uint64 not implemented",
|
454 |
+
raises=OverflowError)
|
455 |
+
def test_uint64_max_convert():
|
456 |
+
data = [0, np.iinfo(np.uint64).max]
|
457 |
+
|
458 |
+
arr = pa.array(data, type=pa.uint64())
|
459 |
+
expected = pa.array(np.array(data, dtype='uint64'))
|
460 |
+
assert arr.equals(expected)
|
461 |
+
|
462 |
+
arr_inferred = pa.array(data)
|
463 |
+
assert arr_inferred.equals(expected)
|
464 |
+
|
465 |
+
|
466 |
+
@pytest.mark.parametrize("bits", [8, 16, 32, 64])
|
467 |
+
def test_signed_integer_overflow(bits):
|
468 |
+
ty = getattr(pa, "int%d" % bits)()
|
469 |
+
# XXX ideally would always raise OverflowError
|
470 |
+
with pytest.raises((OverflowError, pa.ArrowInvalid)):
|
471 |
+
pa.array([2 ** (bits - 1)], ty)
|
472 |
+
with pytest.raises((OverflowError, pa.ArrowInvalid)):
|
473 |
+
pa.array([-2 ** (bits - 1) - 1], ty)
|
474 |
+
|
475 |
+
|
476 |
+
@pytest.mark.parametrize("bits", [8, 16, 32, 64])
|
477 |
+
def test_unsigned_integer_overflow(bits):
|
478 |
+
ty = getattr(pa, "uint%d" % bits)()
|
479 |
+
# XXX ideally would always raise OverflowError
|
480 |
+
with pytest.raises((OverflowError, pa.ArrowInvalid)):
|
481 |
+
pa.array([2 ** bits], ty)
|
482 |
+
with pytest.raises((OverflowError, pa.ArrowInvalid)):
|
483 |
+
pa.array([-1], ty)
|
484 |
+
|
485 |
+
|
486 |
+
@parametrize_with_collections_types
|
487 |
+
@pytest.mark.parametrize("typ", pa_int_types)
|
488 |
+
def test_integer_from_string_error(seq, typ):
|
489 |
+
# ARROW-9451: pa.array(['1'], type=pa.uint32()) should not succeed
|
490 |
+
with pytest.raises(pa.ArrowInvalid):
|
491 |
+
pa.array(seq(['1']), type=typ)
|
492 |
+
|
493 |
+
|
494 |
+
def test_convert_with_mask():
|
495 |
+
data = [1, 2, 3, 4, 5]
|
496 |
+
mask = np.array([False, True, False, False, True])
|
497 |
+
|
498 |
+
result = pa.array(data, mask=mask)
|
499 |
+
expected = pa.array([1, None, 3, 4, None])
|
500 |
+
|
501 |
+
assert result.equals(expected)
|
502 |
+
|
503 |
+
# Mask wrong length
|
504 |
+
with pytest.raises(ValueError):
|
505 |
+
pa.array(data, mask=mask[1:])
|
506 |
+
|
507 |
+
|
508 |
+
def test_garbage_collection():
|
509 |
+
import gc
|
510 |
+
|
511 |
+
# Force the cyclic garbage collector to run
|
512 |
+
gc.collect()
|
513 |
+
|
514 |
+
bytes_before = pa.total_allocated_bytes()
|
515 |
+
pa.array([1, None, 3, None])
|
516 |
+
gc.collect()
|
517 |
+
assert pa.total_allocated_bytes() == bytes_before
|
518 |
+
|
519 |
+
|
520 |
+
def test_sequence_double():
|
521 |
+
data = [1.5, 1., None, 2.5, None, None]
|
522 |
+
arr = pa.array(data)
|
523 |
+
assert len(arr) == 6
|
524 |
+
assert arr.null_count == 3
|
525 |
+
assert arr.type == pa.float64()
|
526 |
+
assert arr.to_pylist() == data
|
527 |
+
|
528 |
+
|
529 |
+
def test_double_auto_coerce_from_integer():
|
530 |
+
# Done as part of ARROW-2814
|
531 |
+
data = [1.5, 1., None, 2.5, None, None]
|
532 |
+
arr = pa.array(data)
|
533 |
+
|
534 |
+
data2 = [1.5, 1, None, 2.5, None, None]
|
535 |
+
arr2 = pa.array(data2)
|
536 |
+
|
537 |
+
assert arr.equals(arr2)
|
538 |
+
|
539 |
+
data3 = [1, 1.5, None, 2.5, None, None]
|
540 |
+
arr3 = pa.array(data3)
|
541 |
+
|
542 |
+
data4 = [1., 1.5, None, 2.5, None, None]
|
543 |
+
arr4 = pa.array(data4)
|
544 |
+
|
545 |
+
assert arr3.equals(arr4)
|
546 |
+
|
547 |
+
|
548 |
+
def test_double_integer_coerce_representable_range():
|
549 |
+
valid_values = [1.5, 1, 2, None, 1 << 53, -(1 << 53)]
|
550 |
+
invalid_values = [1.5, 1, 2, None, (1 << 53) + 1]
|
551 |
+
invalid_values2 = [1.5, 1, 2, None, -((1 << 53) + 1)]
|
552 |
+
|
553 |
+
# it works
|
554 |
+
pa.array(valid_values)
|
555 |
+
|
556 |
+
# it fails
|
557 |
+
with pytest.raises(ValueError):
|
558 |
+
pa.array(invalid_values)
|
559 |
+
|
560 |
+
with pytest.raises(ValueError):
|
561 |
+
pa.array(invalid_values2)
|
562 |
+
|
563 |
+
|
564 |
+
def test_float32_integer_coerce_representable_range():
|
565 |
+
f32 = np.float32
|
566 |
+
valid_values = [f32(1.5), 1 << 24, -(1 << 24)]
|
567 |
+
invalid_values = [f32(1.5), (1 << 24) + 1]
|
568 |
+
invalid_values2 = [f32(1.5), -((1 << 24) + 1)]
|
569 |
+
|
570 |
+
# it works
|
571 |
+
pa.array(valid_values, type=pa.float32())
|
572 |
+
|
573 |
+
# it fails
|
574 |
+
with pytest.raises(ValueError):
|
575 |
+
pa.array(invalid_values, type=pa.float32())
|
576 |
+
|
577 |
+
with pytest.raises(ValueError):
|
578 |
+
pa.array(invalid_values2, type=pa.float32())
|
579 |
+
|
580 |
+
|
581 |
+
def test_mixed_sequence_errors():
|
582 |
+
with pytest.raises(ValueError, match="tried to convert to boolean"):
|
583 |
+
pa.array([True, 'foo'], type=pa.bool_())
|
584 |
+
|
585 |
+
with pytest.raises(ValueError, match="tried to convert to float32"):
|
586 |
+
pa.array([1.5, 'foo'], type=pa.float32())
|
587 |
+
|
588 |
+
with pytest.raises(ValueError, match="tried to convert to double"):
|
589 |
+
pa.array([1.5, 'foo'])
|
590 |
+
|
591 |
+
|
592 |
+
@parametrize_with_sequence_types
|
593 |
+
@pytest.mark.parametrize("np_scalar,pa_type", [
|
594 |
+
(np.float16, pa.float16()),
|
595 |
+
(np.float32, pa.float32()),
|
596 |
+
(np.float64, pa.float64())
|
597 |
+
])
|
598 |
+
@pytest.mark.parametrize("from_pandas", [True, False])
|
599 |
+
def test_sequence_numpy_double(seq, np_scalar, pa_type, from_pandas):
|
600 |
+
data = [np_scalar(1.5), np_scalar(1), None, np_scalar(2.5), None, np.nan]
|
601 |
+
arr = pa.array(seq(data), from_pandas=from_pandas)
|
602 |
+
assert len(arr) == 6
|
603 |
+
if from_pandas:
|
604 |
+
assert arr.null_count == 3
|
605 |
+
else:
|
606 |
+
assert arr.null_count == 2
|
607 |
+
if from_pandas:
|
608 |
+
# The NaN is skipped in type inference, otherwise it forces a
|
609 |
+
# float64 promotion
|
610 |
+
assert arr.type == pa_type
|
611 |
+
else:
|
612 |
+
assert arr.type == pa.float64()
|
613 |
+
|
614 |
+
assert arr.to_pylist()[:4] == data[:4]
|
615 |
+
if from_pandas:
|
616 |
+
assert arr.to_pylist()[5] is None
|
617 |
+
else:
|
618 |
+
assert np.isnan(arr.to_pylist()[5])
|
619 |
+
|
620 |
+
|
621 |
+
@pytest.mark.parametrize("from_pandas", [True, False])
|
622 |
+
@pytest.mark.parametrize("inner_seq", [np.array, list])
|
623 |
+
def test_ndarray_nested_numpy_double(from_pandas, inner_seq):
|
624 |
+
# ARROW-2806
|
625 |
+
data = np.array([
|
626 |
+
inner_seq([1., 2.]),
|
627 |
+
inner_seq([1., 2., 3.]),
|
628 |
+
inner_seq([np.nan]),
|
629 |
+
None
|
630 |
+
], dtype=object)
|
631 |
+
arr = pa.array(data, from_pandas=from_pandas)
|
632 |
+
assert len(arr) == 4
|
633 |
+
assert arr.null_count == 1
|
634 |
+
assert arr.type == pa.list_(pa.float64())
|
635 |
+
if from_pandas:
|
636 |
+
assert arr.to_pylist() == [[1.0, 2.0], [1.0, 2.0, 3.0], [None], None]
|
637 |
+
else:
|
638 |
+
np.testing.assert_equal(arr.to_pylist(),
|
639 |
+
[[1., 2.], [1., 2., 3.], [np.nan], None])
|
640 |
+
|
641 |
+
|
642 |
+
def test_nested_ndarray_in_object_array():
|
643 |
+
# ARROW-4350
|
644 |
+
arr = np.empty(2, dtype=object)
|
645 |
+
arr[:] = [np.array([1, 2], dtype=np.int64),
|
646 |
+
np.array([2, 3], dtype=np.int64)]
|
647 |
+
|
648 |
+
arr2 = np.empty(2, dtype=object)
|
649 |
+
arr2[0] = [3, 4]
|
650 |
+
arr2[1] = [5, 6]
|
651 |
+
|
652 |
+
expected_type = pa.list_(pa.list_(pa.int64()))
|
653 |
+
assert pa.infer_type([arr]) == expected_type
|
654 |
+
|
655 |
+
result = pa.array([arr, arr2])
|
656 |
+
expected = pa.array([[[1, 2], [2, 3]], [[3, 4], [5, 6]]],
|
657 |
+
type=expected_type)
|
658 |
+
|
659 |
+
assert result.equals(expected)
|
660 |
+
|
661 |
+
# test case for len-1 arrays to ensure they are interpreted as
|
662 |
+
# sublists and not scalars
|
663 |
+
arr = np.empty(2, dtype=object)
|
664 |
+
arr[:] = [np.array([1]), np.array([2])]
|
665 |
+
result = pa.array([arr, arr])
|
666 |
+
assert result.to_pylist() == [[[1], [2]], [[1], [2]]]
|
667 |
+
|
668 |
+
|
669 |
+
@pytest.mark.xfail(reason=("Type inference for multidimensional ndarray "
|
670 |
+
"not yet implemented"),
|
671 |
+
raises=AssertionError)
|
672 |
+
def test_multidimensional_ndarray_as_nested_list():
|
673 |
+
# TODO(wesm): see ARROW-5645
|
674 |
+
arr = np.array([[1, 2], [2, 3]], dtype=np.int64)
|
675 |
+
arr2 = np.array([[3, 4], [5, 6]], dtype=np.int64)
|
676 |
+
|
677 |
+
expected_type = pa.list_(pa.list_(pa.int64()))
|
678 |
+
assert pa.infer_type([arr]) == expected_type
|
679 |
+
|
680 |
+
result = pa.array([arr, arr2])
|
681 |
+
expected = pa.array([[[1, 2], [2, 3]], [[3, 4], [5, 6]]],
|
682 |
+
type=expected_type)
|
683 |
+
|
684 |
+
assert result.equals(expected)
|
685 |
+
|
686 |
+
|
687 |
+
@pytest.mark.parametrize(('data', 'value_type'), [
|
688 |
+
([True, False], pa.bool_()),
|
689 |
+
([None, None], pa.null()),
|
690 |
+
([1, 2, None], pa.int8()),
|
691 |
+
([1, 2., 3., None], pa.float32()),
|
692 |
+
([datetime.date.today(), None], pa.date32()),
|
693 |
+
([None, datetime.date.today()], pa.date64()),
|
694 |
+
([datetime.time(1, 1, 1), None], pa.time32('s')),
|
695 |
+
([None, datetime.time(2, 2, 2)], pa.time64('us')),
|
696 |
+
([datetime.datetime.now(), None], pa.timestamp('us')),
|
697 |
+
([datetime.timedelta(seconds=10)], pa.duration('s')),
|
698 |
+
([b"a", b"b"], pa.binary()),
|
699 |
+
([b"aaa", b"bbb", b"ccc"], pa.binary(3)),
|
700 |
+
([b"a", b"b", b"c"], pa.large_binary()),
|
701 |
+
(["a", "b", "c"], pa.string()),
|
702 |
+
(["a", "b", "c"], pa.large_string()),
|
703 |
+
(
|
704 |
+
[{"a": 1, "b": 2}, None, {"a": 5, "b": None}],
|
705 |
+
pa.struct([('a', pa.int8()), ('b', pa.int16())])
|
706 |
+
)
|
707 |
+
])
|
708 |
+
def test_list_array_from_object_ndarray(data, value_type):
|
709 |
+
ty = pa.list_(value_type)
|
710 |
+
ndarray = np.array(data, dtype=object)
|
711 |
+
arr = pa.array([ndarray], type=ty)
|
712 |
+
assert arr.type.equals(ty)
|
713 |
+
assert arr.to_pylist() == [data]
|
714 |
+
|
715 |
+
|
716 |
+
@pytest.mark.parametrize(('data', 'value_type'), [
|
717 |
+
([[1, 2], [3]], pa.list_(pa.int64())),
|
718 |
+
([[1, 2], [3, 4]], pa.list_(pa.int64(), 2)),
|
719 |
+
([[1], [2, 3]], pa.large_list(pa.int64()))
|
720 |
+
])
|
721 |
+
def test_nested_list_array_from_object_ndarray(data, value_type):
|
722 |
+
ndarray = np.empty(len(data), dtype=object)
|
723 |
+
ndarray[:] = [np.array(item, dtype=object) for item in data]
|
724 |
+
|
725 |
+
ty = pa.list_(value_type)
|
726 |
+
arr = pa.array([ndarray], type=ty)
|
727 |
+
assert arr.type.equals(ty)
|
728 |
+
assert arr.to_pylist() == [data]
|
729 |
+
|
730 |
+
|
731 |
+
def test_array_ignore_nan_from_pandas():
|
732 |
+
# See ARROW-4324, this reverts logic that was introduced in
|
733 |
+
# ARROW-2240
|
734 |
+
with pytest.raises(ValueError):
|
735 |
+
pa.array([np.nan, 'str'])
|
736 |
+
|
737 |
+
arr = pa.array([np.nan, 'str'], from_pandas=True)
|
738 |
+
expected = pa.array([None, 'str'])
|
739 |
+
assert arr.equals(expected)
|
740 |
+
|
741 |
+
|
742 |
+
def test_nested_ndarray_different_dtypes():
|
743 |
+
data = [
|
744 |
+
np.array([1, 2, 3], dtype='int64'),
|
745 |
+
None,
|
746 |
+
np.array([4, 5, 6], dtype='uint32')
|
747 |
+
]
|
748 |
+
|
749 |
+
arr = pa.array(data)
|
750 |
+
expected = pa.array([[1, 2, 3], None, [4, 5, 6]],
|
751 |
+
type=pa.list_(pa.int64()))
|
752 |
+
assert arr.equals(expected)
|
753 |
+
|
754 |
+
t2 = pa.list_(pa.uint32())
|
755 |
+
arr2 = pa.array(data, type=t2)
|
756 |
+
expected2 = expected.cast(t2)
|
757 |
+
assert arr2.equals(expected2)
|
758 |
+
|
759 |
+
|
760 |
+
def test_sequence_unicode():
|
761 |
+
data = ['foo', 'bar', None, 'mañana']
|
762 |
+
arr = pa.array(data)
|
763 |
+
assert len(arr) == 4
|
764 |
+
assert arr.null_count == 1
|
765 |
+
assert arr.type == pa.string()
|
766 |
+
assert arr.to_pylist() == data
|
767 |
+
|
768 |
+
|
769 |
+
@pytest.mark.parametrize("ty", [pa.string(), pa.large_string(), pa.string_view()])
|
770 |
+
def test_sequence_unicode_explicit_type(ty):
|
771 |
+
data = ['foo', 'bar', None, 'mañana']
|
772 |
+
arr = pa.array(data, type=ty)
|
773 |
+
assert len(arr) == 4
|
774 |
+
assert arr.null_count == 1
|
775 |
+
assert arr.type == ty
|
776 |
+
assert arr.to_pylist() == data
|
777 |
+
|
778 |
+
|
779 |
+
def check_array_mixed_unicode_bytes(binary_type, string_type):
|
780 |
+
values = ['qux', b'foo', bytearray(b'barz')]
|
781 |
+
b_values = [b'qux', b'foo', b'barz']
|
782 |
+
u_values = ['qux', 'foo', 'barz']
|
783 |
+
|
784 |
+
arr = pa.array(values)
|
785 |
+
expected = pa.array(b_values, type=pa.binary())
|
786 |
+
assert arr.type == pa.binary()
|
787 |
+
assert arr.equals(expected)
|
788 |
+
|
789 |
+
arr = pa.array(values, type=binary_type)
|
790 |
+
expected = pa.array(b_values, type=binary_type)
|
791 |
+
assert arr.type == binary_type
|
792 |
+
assert arr.equals(expected)
|
793 |
+
|
794 |
+
arr = pa.array(values, type=string_type)
|
795 |
+
expected = pa.array(u_values, type=string_type)
|
796 |
+
assert arr.type == string_type
|
797 |
+
assert arr.equals(expected)
|
798 |
+
|
799 |
+
|
800 |
+
def test_array_mixed_unicode_bytes():
|
801 |
+
check_array_mixed_unicode_bytes(pa.binary(), pa.string())
|
802 |
+
check_array_mixed_unicode_bytes(pa.large_binary(), pa.large_string())
|
803 |
+
check_array_mixed_unicode_bytes(pa.binary_view(), pa.string_view())
|
804 |
+
|
805 |
+
|
806 |
+
@pytest.mark.large_memory
|
807 |
+
@pytest.mark.parametrize("ty", [pa.large_binary(), pa.large_string()])
|
808 |
+
def test_large_binary_array(ty):
|
809 |
+
# Construct a large binary array with more than 4GB of data
|
810 |
+
s = b"0123456789abcdefghijklmnopqrstuvwxyz" * 10
|
811 |
+
nrepeats = math.ceil((2**32 + 5) / len(s))
|
812 |
+
data = [s] * nrepeats
|
813 |
+
arr = pa.array(data, type=ty)
|
814 |
+
assert isinstance(arr, pa.Array)
|
815 |
+
assert arr.type == ty
|
816 |
+
assert len(arr) == nrepeats
|
817 |
+
|
818 |
+
|
819 |
+
@pytest.mark.slow
|
820 |
+
@pytest.mark.large_memory
|
821 |
+
@pytest.mark.parametrize("ty", [pa.large_binary(), pa.large_string()])
|
822 |
+
def test_large_binary_value(ty):
|
823 |
+
# Construct a large binary array with a single value larger than 4GB
|
824 |
+
s = b"0123456789abcdefghijklmnopqrstuvwxyz"
|
825 |
+
nrepeats = math.ceil((2**32 + 5) / len(s))
|
826 |
+
arr = pa.array([b"foo", s * nrepeats, None, b"bar"], type=ty)
|
827 |
+
assert isinstance(arr, pa.Array)
|
828 |
+
assert arr.type == ty
|
829 |
+
assert len(arr) == 4
|
830 |
+
buf = arr[1].as_buffer()
|
831 |
+
assert len(buf) == len(s) * nrepeats
|
832 |
+
|
833 |
+
|
834 |
+
@pytest.mark.large_memory
|
835 |
+
@pytest.mark.parametrize("ty", [pa.binary(), pa.string(), pa.string_view()])
|
836 |
+
def test_string_too_large(ty):
|
837 |
+
# Construct a binary array with a single value larger than 4GB
|
838 |
+
s = b"0123456789abcdefghijklmnopqrstuvwxyz"
|
839 |
+
nrepeats = math.ceil((2**32 + 5) / len(s))
|
840 |
+
with pytest.raises(pa.ArrowCapacityError):
|
841 |
+
pa.array([b"foo", s * nrepeats, None, b"bar"], type=ty)
|
842 |
+
|
843 |
+
|
844 |
+
def test_sequence_bytes():
|
845 |
+
u1 = b'ma\xc3\xb1ana'
|
846 |
+
|
847 |
+
data = [b'foo',
|
848 |
+
memoryview(b'dada'),
|
849 |
+
memoryview(b'd-a-t-a')[::2], # non-contiguous is made contiguous
|
850 |
+
u1.decode('utf-8'), # unicode gets encoded,
|
851 |
+
bytearray(b'bar'),
|
852 |
+
None]
|
853 |
+
for ty in [None, pa.binary(), pa.large_binary(), pa.binary_view()]:
|
854 |
+
arr = pa.array(data, type=ty)
|
855 |
+
assert len(arr) == 6
|
856 |
+
assert arr.null_count == 1
|
857 |
+
assert arr.type == ty or pa.binary()
|
858 |
+
assert arr.to_pylist() == [b'foo', b'dada', b'data', u1, b'bar', None]
|
859 |
+
|
860 |
+
|
861 |
+
@pytest.mark.parametrize("ty", [pa.string(), pa.large_string(), pa.string_view()])
|
862 |
+
def test_sequence_utf8_to_unicode(ty):
|
863 |
+
# ARROW-1225
|
864 |
+
data = [b'foo', None, b'bar']
|
865 |
+
arr = pa.array(data, type=ty)
|
866 |
+
assert arr.type == ty
|
867 |
+
assert arr[0].as_py() == 'foo'
|
868 |
+
|
869 |
+
# test a non-utf8 unicode string
|
870 |
+
val = ('mañana').encode('utf-16-le')
|
871 |
+
with pytest.raises(pa.ArrowInvalid):
|
872 |
+
pa.array([val], type=ty)
|
873 |
+
|
874 |
+
|
875 |
+
def test_sequence_fixed_size_bytes():
|
876 |
+
data = [b'foof', None, bytearray(b'barb'), b'2346']
|
877 |
+
arr = pa.array(data, type=pa.binary(4))
|
878 |
+
assert len(arr) == 4
|
879 |
+
assert arr.null_count == 1
|
880 |
+
assert arr.type == pa.binary(4)
|
881 |
+
assert arr.to_pylist() == [b'foof', None, b'barb', b'2346']
|
882 |
+
|
883 |
+
|
884 |
+
def test_fixed_size_bytes_does_not_accept_varying_lengths():
|
885 |
+
data = [b'foo', None, b'barb', b'2346']
|
886 |
+
with pytest.raises(pa.ArrowInvalid):
|
887 |
+
pa.array(data, type=pa.binary(4))
|
888 |
+
|
889 |
+
|
890 |
+
def test_fixed_size_binary_length_check():
|
891 |
+
# ARROW-10193
|
892 |
+
data = [b'\x19h\r\x9e\x00\x00\x00\x00\x01\x9b\x9fA']
|
893 |
+
assert len(data[0]) == 12
|
894 |
+
ty = pa.binary(12)
|
895 |
+
arr = pa.array(data, type=ty)
|
896 |
+
assert arr.to_pylist() == data
|
897 |
+
|
898 |
+
|
899 |
+
def test_sequence_date():
|
900 |
+
data = [datetime.date(2000, 1, 1), None, datetime.date(1970, 1, 1),
|
901 |
+
datetime.date(2040, 2, 26)]
|
902 |
+
arr = pa.array(data)
|
903 |
+
assert len(arr) == 4
|
904 |
+
assert arr.type == pa.date32()
|
905 |
+
assert arr.null_count == 1
|
906 |
+
assert arr[0].as_py() == datetime.date(2000, 1, 1)
|
907 |
+
assert arr[1].as_py() is None
|
908 |
+
assert arr[2].as_py() == datetime.date(1970, 1, 1)
|
909 |
+
assert arr[3].as_py() == datetime.date(2040, 2, 26)
|
910 |
+
|
911 |
+
|
912 |
+
@pytest.mark.parametrize('input',
|
913 |
+
[(pa.date32(), [10957, None]),
|
914 |
+
(pa.date64(), [10957 * 86400000, None])])
|
915 |
+
def test_sequence_explicit_types(input):
|
916 |
+
t, ex_values = input
|
917 |
+
data = [datetime.date(2000, 1, 1), None]
|
918 |
+
arr = pa.array(data, type=t)
|
919 |
+
arr2 = pa.array(ex_values, type=t)
|
920 |
+
|
921 |
+
for x in [arr, arr2]:
|
922 |
+
assert len(x) == 2
|
923 |
+
assert x.type == t
|
924 |
+
assert x.null_count == 1
|
925 |
+
assert x[0].as_py() == datetime.date(2000, 1, 1)
|
926 |
+
assert x[1].as_py() is None
|
927 |
+
|
928 |
+
|
929 |
+
def test_date32_overflow():
|
930 |
+
# Overflow
|
931 |
+
data3 = [2**32, None]
|
932 |
+
with pytest.raises((OverflowError, pa.ArrowException)):
|
933 |
+
pa.array(data3, type=pa.date32())
|
934 |
+
|
935 |
+
|
936 |
+
@pytest.mark.parametrize(('time_type', 'unit', 'int_type'), [
|
937 |
+
(pa.time32, 's', 'int32'),
|
938 |
+
(pa.time32, 'ms', 'int32'),
|
939 |
+
(pa.time64, 'us', 'int64'),
|
940 |
+
(pa.time64, 'ns', 'int64'),
|
941 |
+
])
|
942 |
+
def test_sequence_time_with_timezone(time_type, unit, int_type):
|
943 |
+
def expected_integer_value(t):
|
944 |
+
# only use with utc time object because it doesn't adjust with the
|
945 |
+
# offset
|
946 |
+
units = ['s', 'ms', 'us', 'ns']
|
947 |
+
multiplier = 10**(units.index(unit) * 3)
|
948 |
+
if t is None:
|
949 |
+
return None
|
950 |
+
seconds = (
|
951 |
+
t.hour * 3600 +
|
952 |
+
t.minute * 60 +
|
953 |
+
t.second +
|
954 |
+
t.microsecond * 10**-6
|
955 |
+
)
|
956 |
+
return int(seconds * multiplier)
|
957 |
+
|
958 |
+
def expected_time_value(t):
|
959 |
+
# only use with utc time object because it doesn't adjust with the
|
960 |
+
# time objects tzdata
|
961 |
+
if unit == 's':
|
962 |
+
return t.replace(microsecond=0)
|
963 |
+
elif unit == 'ms':
|
964 |
+
return t.replace(microsecond=(t.microsecond // 1000) * 1000)
|
965 |
+
else:
|
966 |
+
return t
|
967 |
+
|
968 |
+
# only timezone naive times are supported in arrow
|
969 |
+
data = [
|
970 |
+
datetime.time(8, 23, 34, 123456),
|
971 |
+
datetime.time(5, 0, 0, 1000),
|
972 |
+
None,
|
973 |
+
datetime.time(1, 11, 56, 432539),
|
974 |
+
datetime.time(23, 10, 0, 437699)
|
975 |
+
]
|
976 |
+
|
977 |
+
ty = time_type(unit)
|
978 |
+
arr = pa.array(data, type=ty)
|
979 |
+
assert len(arr) == 5
|
980 |
+
assert arr.type == ty
|
981 |
+
assert arr.null_count == 1
|
982 |
+
|
983 |
+
# test that the underlying integers are UTC values
|
984 |
+
values = arr.cast(int_type)
|
985 |
+
expected = list(map(expected_integer_value, data))
|
986 |
+
assert values.to_pylist() == expected
|
987 |
+
|
988 |
+
# test that the scalars are datetime.time objects with UTC timezone
|
989 |
+
assert arr[0].as_py() == expected_time_value(data[0])
|
990 |
+
assert arr[1].as_py() == expected_time_value(data[1])
|
991 |
+
assert arr[2].as_py() is None
|
992 |
+
assert arr[3].as_py() == expected_time_value(data[3])
|
993 |
+
assert arr[4].as_py() == expected_time_value(data[4])
|
994 |
+
|
995 |
+
def tz(hours, minutes=0):
|
996 |
+
offset = datetime.timedelta(hours=hours, minutes=minutes)
|
997 |
+
return datetime.timezone(offset)
|
998 |
+
|
999 |
+
|
1000 |
+
def test_sequence_timestamp():
|
1001 |
+
data = [
|
1002 |
+
datetime.datetime(2007, 7, 13, 1, 23, 34, 123456),
|
1003 |
+
None,
|
1004 |
+
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539),
|
1005 |
+
datetime.datetime(2010, 8, 13, 5, 46, 57, 437699)
|
1006 |
+
]
|
1007 |
+
arr = pa.array(data)
|
1008 |
+
assert len(arr) == 4
|
1009 |
+
assert arr.type == pa.timestamp('us')
|
1010 |
+
assert arr.null_count == 1
|
1011 |
+
assert arr[0].as_py() == datetime.datetime(2007, 7, 13, 1,
|
1012 |
+
23, 34, 123456)
|
1013 |
+
assert arr[1].as_py() is None
|
1014 |
+
assert arr[2].as_py() == datetime.datetime(2006, 1, 13, 12,
|
1015 |
+
34, 56, 432539)
|
1016 |
+
assert arr[3].as_py() == datetime.datetime(2010, 8, 13, 5,
|
1017 |
+
46, 57, 437699)
|
1018 |
+
|
1019 |
+
|
1020 |
+
@pytest.mark.parametrize('timezone', [
|
1021 |
+
None,
|
1022 |
+
'UTC',
|
1023 |
+
'Etc/GMT-1',
|
1024 |
+
'Europe/Budapest',
|
1025 |
+
])
|
1026 |
+
@pytest.mark.parametrize('unit', [
|
1027 |
+
's',
|
1028 |
+
'ms',
|
1029 |
+
'us',
|
1030 |
+
'ns'
|
1031 |
+
])
|
1032 |
+
def test_sequence_timestamp_with_timezone(timezone, unit):
|
1033 |
+
pytz = pytest.importorskip("pytz")
|
1034 |
+
|
1035 |
+
def expected_integer_value(dt):
|
1036 |
+
units = ['s', 'ms', 'us', 'ns']
|
1037 |
+
multiplier = 10**(units.index(unit) * 3)
|
1038 |
+
if dt is None:
|
1039 |
+
return None
|
1040 |
+
else:
|
1041 |
+
# avoid float precision issues
|
1042 |
+
ts = decimal.Decimal(str(dt.timestamp()))
|
1043 |
+
return int(ts * multiplier)
|
1044 |
+
|
1045 |
+
def expected_datetime_value(dt):
|
1046 |
+
if dt is None:
|
1047 |
+
return None
|
1048 |
+
|
1049 |
+
if unit == 's':
|
1050 |
+
dt = dt.replace(microsecond=0)
|
1051 |
+
elif unit == 'ms':
|
1052 |
+
dt = dt.replace(microsecond=(dt.microsecond // 1000) * 1000)
|
1053 |
+
|
1054 |
+
# adjust the timezone
|
1055 |
+
if timezone is None:
|
1056 |
+
# make datetime timezone unaware
|
1057 |
+
return dt.replace(tzinfo=None)
|
1058 |
+
else:
|
1059 |
+
# convert to the expected timezone
|
1060 |
+
return dt.astimezone(pytz.timezone(timezone))
|
1061 |
+
|
1062 |
+
data = [
|
1063 |
+
datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive
|
1064 |
+
pytz.utc.localize(
|
1065 |
+
datetime.datetime(2008, 1, 5, 5, 0, 0, 1000)
|
1066 |
+
),
|
1067 |
+
None,
|
1068 |
+
pytz.timezone('US/Eastern').localize(
|
1069 |
+
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539)
|
1070 |
+
),
|
1071 |
+
pytz.timezone('Europe/Moscow').localize(
|
1072 |
+
datetime.datetime(2010, 8, 13, 5, 0, 0, 437699)
|
1073 |
+
),
|
1074 |
+
]
|
1075 |
+
utcdata = [
|
1076 |
+
pytz.utc.localize(data[0]),
|
1077 |
+
data[1],
|
1078 |
+
None,
|
1079 |
+
data[3].astimezone(pytz.utc),
|
1080 |
+
data[4].astimezone(pytz.utc),
|
1081 |
+
]
|
1082 |
+
|
1083 |
+
ty = pa.timestamp(unit, tz=timezone)
|
1084 |
+
arr = pa.array(data, type=ty)
|
1085 |
+
assert len(arr) == 5
|
1086 |
+
assert arr.type == ty
|
1087 |
+
assert arr.null_count == 1
|
1088 |
+
|
1089 |
+
# test that the underlying integers are UTC values
|
1090 |
+
values = arr.cast('int64')
|
1091 |
+
expected = list(map(expected_integer_value, utcdata))
|
1092 |
+
assert values.to_pylist() == expected
|
1093 |
+
|
1094 |
+
# test that the scalars are datetimes with the correct timezone
|
1095 |
+
for i in range(len(arr)):
|
1096 |
+
assert arr[i].as_py() == expected_datetime_value(utcdata[i])
|
1097 |
+
|
1098 |
+
|
1099 |
+
@pytest.mark.parametrize('timezone', [
|
1100 |
+
None,
|
1101 |
+
'UTC',
|
1102 |
+
'Etc/GMT-1',
|
1103 |
+
'Europe/Budapest',
|
1104 |
+
])
|
1105 |
+
def test_pyarrow_ignore_timezone_environment_variable(monkeypatch, timezone):
|
1106 |
+
# note that any non-empty value will evaluate to true
|
1107 |
+
pytest.importorskip("pytz")
|
1108 |
+
import pytz
|
1109 |
+
|
1110 |
+
monkeypatch.setenv("PYARROW_IGNORE_TIMEZONE", "1")
|
1111 |
+
data = [
|
1112 |
+
datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive
|
1113 |
+
pytz.utc.localize(
|
1114 |
+
datetime.datetime(2008, 1, 5, 5, 0, 0, 1000)
|
1115 |
+
),
|
1116 |
+
pytz.timezone('US/Eastern').localize(
|
1117 |
+
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539)
|
1118 |
+
),
|
1119 |
+
pytz.timezone('Europe/Moscow').localize(
|
1120 |
+
datetime.datetime(2010, 8, 13, 5, 0, 0, 437699)
|
1121 |
+
),
|
1122 |
+
]
|
1123 |
+
|
1124 |
+
expected = [dt.replace(tzinfo=None) for dt in data]
|
1125 |
+
if timezone is not None:
|
1126 |
+
tzinfo = pytz.timezone(timezone)
|
1127 |
+
expected = [tzinfo.fromutc(dt) for dt in expected]
|
1128 |
+
|
1129 |
+
ty = pa.timestamp('us', tz=timezone)
|
1130 |
+
arr = pa.array(data, type=ty)
|
1131 |
+
assert arr.to_pylist() == expected
|
1132 |
+
|
1133 |
+
|
1134 |
+
def test_sequence_timestamp_with_timezone_inference():
|
1135 |
+
pytest.importorskip("pytz")
|
1136 |
+
import pytz
|
1137 |
+
|
1138 |
+
data = [
|
1139 |
+
datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive
|
1140 |
+
pytz.utc.localize(
|
1141 |
+
datetime.datetime(2008, 1, 5, 5, 0, 0, 1000)
|
1142 |
+
),
|
1143 |
+
None,
|
1144 |
+
pytz.timezone('US/Eastern').localize(
|
1145 |
+
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539)
|
1146 |
+
),
|
1147 |
+
pytz.timezone('Europe/Moscow').localize(
|
1148 |
+
datetime.datetime(2010, 8, 13, 5, 0, 0, 437699)
|
1149 |
+
),
|
1150 |
+
]
|
1151 |
+
expected = [
|
1152 |
+
pa.timestamp('us', tz=None),
|
1153 |
+
pa.timestamp('us', tz='UTC'),
|
1154 |
+
pa.timestamp('us', tz=None),
|
1155 |
+
pa.timestamp('us', tz='US/Eastern'),
|
1156 |
+
pa.timestamp('us', tz='Europe/Moscow')
|
1157 |
+
]
|
1158 |
+
for dt, expected_type in zip(data, expected):
|
1159 |
+
prepended = [dt] + data
|
1160 |
+
arr = pa.array(prepended)
|
1161 |
+
assert arr.type == expected_type
|
1162 |
+
|
1163 |
+
|
1164 |
+
def test_sequence_timestamp_with_zoneinfo_timezone_inference():
|
1165 |
+
pytest.importorskip("zoneinfo")
|
1166 |
+
import zoneinfo
|
1167 |
+
|
1168 |
+
data = [
|
1169 |
+
datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive
|
1170 |
+
datetime.datetime(2008, 1, 5, 5, 0, 0, 1000,
|
1171 |
+
tzinfo=datetime.timezone.utc),
|
1172 |
+
None,
|
1173 |
+
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539,
|
1174 |
+
tzinfo=zoneinfo.ZoneInfo(key='US/Eastern')),
|
1175 |
+
datetime.datetime(2010, 8, 13, 5, 0, 0, 437699,
|
1176 |
+
tzinfo=zoneinfo.ZoneInfo(key='Europe/Moscow')),
|
1177 |
+
]
|
1178 |
+
expected = [
|
1179 |
+
pa.timestamp('us', tz=None),
|
1180 |
+
pa.timestamp('us', tz='UTC'),
|
1181 |
+
pa.timestamp('us', tz=None),
|
1182 |
+
pa.timestamp('us', tz='US/Eastern'),
|
1183 |
+
pa.timestamp('us', tz='Europe/Moscow')
|
1184 |
+
]
|
1185 |
+
for dt, expected_type in zip(data, expected):
|
1186 |
+
prepended = [dt] + data
|
1187 |
+
arr = pa.array(prepended)
|
1188 |
+
assert arr.type == expected_type
|
1189 |
+
|
1190 |
+
|
1191 |
+
@pytest.mark.pandas
|
1192 |
+
def test_sequence_timestamp_from_mixed_builtin_and_pandas_datetimes():
|
1193 |
+
pytest.importorskip("pytz")
|
1194 |
+
import pytz
|
1195 |
+
import pandas as pd
|
1196 |
+
|
1197 |
+
data = [
|
1198 |
+
pd.Timestamp(1184307814123456123, tz=pytz.timezone('US/Eastern'),
|
1199 |
+
unit='ns'),
|
1200 |
+
datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive
|
1201 |
+
pytz.utc.localize(
|
1202 |
+
datetime.datetime(2008, 1, 5, 5, 0, 0, 1000)
|
1203 |
+
),
|
1204 |
+
None,
|
1205 |
+
]
|
1206 |
+
utcdata = [
|
1207 |
+
data[0].astimezone(pytz.utc),
|
1208 |
+
pytz.utc.localize(data[1]),
|
1209 |
+
data[2].astimezone(pytz.utc),
|
1210 |
+
None,
|
1211 |
+
]
|
1212 |
+
|
1213 |
+
arr = pa.array(data)
|
1214 |
+
assert arr.type == pa.timestamp('us', tz='US/Eastern')
|
1215 |
+
|
1216 |
+
values = arr.cast('int64')
|
1217 |
+
expected = [int(dt.timestamp() * 10**6) if dt else None for dt in utcdata]
|
1218 |
+
assert values.to_pylist() == expected
|
1219 |
+
|
1220 |
+
|
1221 |
+
def test_sequence_timestamp_out_of_bounds_nanosecond():
|
1222 |
+
# https://issues.apache.org/jira/browse/ARROW-9768
|
1223 |
+
# datetime outside of range supported for nanosecond resolution
|
1224 |
+
data = [datetime.datetime(2262, 4, 12)]
|
1225 |
+
with pytest.raises(ValueError, match="out of bounds"):
|
1226 |
+
pa.array(data, type=pa.timestamp('ns'))
|
1227 |
+
|
1228 |
+
# with microsecond resolution it works fine
|
1229 |
+
arr = pa.array(data, type=pa.timestamp('us'))
|
1230 |
+
assert arr.to_pylist() == data
|
1231 |
+
|
1232 |
+
# case where the naive is within bounds, but converted to UTC not
|
1233 |
+
tz = datetime.timezone(datetime.timedelta(hours=-1))
|
1234 |
+
data = [datetime.datetime(2262, 4, 11, 23, tzinfo=tz)]
|
1235 |
+
with pytest.raises(ValueError, match="out of bounds"):
|
1236 |
+
pa.array(data, type=pa.timestamp('ns'))
|
1237 |
+
|
1238 |
+
arr = pa.array(data, type=pa.timestamp('us'))
|
1239 |
+
assert arr.to_pylist()[0] == datetime.datetime(2262, 4, 12)
|
1240 |
+
|
1241 |
+
|
1242 |
+
def test_sequence_numpy_timestamp():
|
1243 |
+
data = [
|
1244 |
+
np.datetime64(datetime.datetime(2007, 7, 13, 1, 23, 34, 123456)),
|
1245 |
+
None,
|
1246 |
+
np.datetime64(datetime.datetime(2006, 1, 13, 12, 34, 56, 432539)),
|
1247 |
+
np.datetime64(datetime.datetime(2010, 8, 13, 5, 46, 57, 437699))
|
1248 |
+
]
|
1249 |
+
arr = pa.array(data)
|
1250 |
+
assert len(arr) == 4
|
1251 |
+
assert arr.type == pa.timestamp('us')
|
1252 |
+
assert arr.null_count == 1
|
1253 |
+
assert arr[0].as_py() == datetime.datetime(2007, 7, 13, 1,
|
1254 |
+
23, 34, 123456)
|
1255 |
+
assert arr[1].as_py() is None
|
1256 |
+
assert arr[2].as_py() == datetime.datetime(2006, 1, 13, 12,
|
1257 |
+
34, 56, 432539)
|
1258 |
+
assert arr[3].as_py() == datetime.datetime(2010, 8, 13, 5,
|
1259 |
+
46, 57, 437699)
|
1260 |
+
|
1261 |
+
|
1262 |
+
class MyDate(datetime.date):
|
1263 |
+
pass
|
1264 |
+
|
1265 |
+
|
1266 |
+
class MyDatetime(datetime.datetime):
|
1267 |
+
pass
|
1268 |
+
|
1269 |
+
|
1270 |
+
class MyTimedelta(datetime.timedelta):
|
1271 |
+
pass
|
1272 |
+
|
1273 |
+
|
1274 |
+
def test_datetime_subclassing():
|
1275 |
+
data = [
|
1276 |
+
MyDate(2007, 7, 13),
|
1277 |
+
]
|
1278 |
+
date_type = pa.date32()
|
1279 |
+
arr_date = pa.array(data, type=date_type)
|
1280 |
+
assert len(arr_date) == 1
|
1281 |
+
assert arr_date.type == date_type
|
1282 |
+
assert arr_date[0].as_py() == datetime.date(2007, 7, 13)
|
1283 |
+
|
1284 |
+
data = [
|
1285 |
+
MyDatetime(2007, 7, 13, 1, 23, 34, 123456),
|
1286 |
+
]
|
1287 |
+
|
1288 |
+
s = pa.timestamp('s')
|
1289 |
+
ms = pa.timestamp('ms')
|
1290 |
+
us = pa.timestamp('us')
|
1291 |
+
|
1292 |
+
arr_s = pa.array(data, type=s)
|
1293 |
+
assert len(arr_s) == 1
|
1294 |
+
assert arr_s.type == s
|
1295 |
+
assert arr_s[0].as_py() == datetime.datetime(2007, 7, 13, 1,
|
1296 |
+
23, 34, 0)
|
1297 |
+
|
1298 |
+
arr_ms = pa.array(data, type=ms)
|
1299 |
+
assert len(arr_ms) == 1
|
1300 |
+
assert arr_ms.type == ms
|
1301 |
+
assert arr_ms[0].as_py() == datetime.datetime(2007, 7, 13, 1,
|
1302 |
+
23, 34, 123000)
|
1303 |
+
|
1304 |
+
arr_us = pa.array(data, type=us)
|
1305 |
+
assert len(arr_us) == 1
|
1306 |
+
assert arr_us.type == us
|
1307 |
+
assert arr_us[0].as_py() == datetime.datetime(2007, 7, 13, 1,
|
1308 |
+
23, 34, 123456)
|
1309 |
+
|
1310 |
+
data = [
|
1311 |
+
MyTimedelta(123, 456, 1002),
|
1312 |
+
]
|
1313 |
+
|
1314 |
+
s = pa.duration('s')
|
1315 |
+
ms = pa.duration('ms')
|
1316 |
+
us = pa.duration('us')
|
1317 |
+
|
1318 |
+
arr_s = pa.array(data)
|
1319 |
+
assert len(arr_s) == 1
|
1320 |
+
assert arr_s.type == us
|
1321 |
+
assert arr_s[0].as_py() == datetime.timedelta(123, 456, 1002)
|
1322 |
+
|
1323 |
+
arr_s = pa.array(data, type=s)
|
1324 |
+
assert len(arr_s) == 1
|
1325 |
+
assert arr_s.type == s
|
1326 |
+
assert arr_s[0].as_py() == datetime.timedelta(123, 456)
|
1327 |
+
|
1328 |
+
arr_ms = pa.array(data, type=ms)
|
1329 |
+
assert len(arr_ms) == 1
|
1330 |
+
assert arr_ms.type == ms
|
1331 |
+
assert arr_ms[0].as_py() == datetime.timedelta(123, 456, 1000)
|
1332 |
+
|
1333 |
+
arr_us = pa.array(data, type=us)
|
1334 |
+
assert len(arr_us) == 1
|
1335 |
+
assert arr_us.type == us
|
1336 |
+
assert arr_us[0].as_py() == datetime.timedelta(123, 456, 1002)
|
1337 |
+
|
1338 |
+
|
1339 |
+
@pytest.mark.xfail(not _pandas_api.have_pandas,
|
1340 |
+
reason="pandas required for nanosecond conversion")
|
1341 |
+
def test_sequence_timestamp_nanoseconds():
|
1342 |
+
inputs = [
|
1343 |
+
[datetime.datetime(2007, 7, 13, 1, 23, 34, 123456)],
|
1344 |
+
[MyDatetime(2007, 7, 13, 1, 23, 34, 123456)]
|
1345 |
+
]
|
1346 |
+
|
1347 |
+
for data in inputs:
|
1348 |
+
ns = pa.timestamp('ns')
|
1349 |
+
arr_ns = pa.array(data, type=ns)
|
1350 |
+
assert len(arr_ns) == 1
|
1351 |
+
assert arr_ns.type == ns
|
1352 |
+
assert arr_ns[0].as_py() == datetime.datetime(2007, 7, 13, 1,
|
1353 |
+
23, 34, 123456)
|
1354 |
+
|
1355 |
+
|
1356 |
+
@pytest.mark.pandas
|
1357 |
+
@pytest.mark.skipif(sys.platform == "win32" and not util.windows_has_tzdata(),
|
1358 |
+
reason="Timezone database is not installed on Windows")
|
1359 |
+
def test_sequence_timestamp_from_int_with_unit():
|
1360 |
+
# TODO(wesm): This test might be rewritten to assert the actual behavior
|
1361 |
+
# when pandas is not installed
|
1362 |
+
|
1363 |
+
data = [1]
|
1364 |
+
|
1365 |
+
s = pa.timestamp('s')
|
1366 |
+
ms = pa.timestamp('ms')
|
1367 |
+
us = pa.timestamp('us')
|
1368 |
+
ns = pa.timestamp('ns')
|
1369 |
+
|
1370 |
+
arr_s = pa.array(data, type=s)
|
1371 |
+
assert len(arr_s) == 1
|
1372 |
+
assert arr_s.type == s
|
1373 |
+
assert repr(arr_s[0]) == (
|
1374 |
+
"<pyarrow.TimestampScalar: '1970-01-01T00:00:01'>"
|
1375 |
+
)
|
1376 |
+
assert str(arr_s[0]) == "1970-01-01 00:00:01"
|
1377 |
+
|
1378 |
+
arr_ms = pa.array(data, type=ms)
|
1379 |
+
assert len(arr_ms) == 1
|
1380 |
+
assert arr_ms.type == ms
|
1381 |
+
assert repr(arr_ms[0].as_py()) == (
|
1382 |
+
"datetime.datetime(1970, 1, 1, 0, 0, 0, 1000)"
|
1383 |
+
)
|
1384 |
+
assert str(arr_ms[0]) == "1970-01-01 00:00:00.001000"
|
1385 |
+
|
1386 |
+
arr_us = pa.array(data, type=us)
|
1387 |
+
assert len(arr_us) == 1
|
1388 |
+
assert arr_us.type == us
|
1389 |
+
assert repr(arr_us[0].as_py()) == (
|
1390 |
+
"datetime.datetime(1970, 1, 1, 0, 0, 0, 1)"
|
1391 |
+
)
|
1392 |
+
assert str(arr_us[0]) == "1970-01-01 00:00:00.000001"
|
1393 |
+
|
1394 |
+
arr_ns = pa.array(data, type=ns)
|
1395 |
+
assert len(arr_ns) == 1
|
1396 |
+
assert arr_ns.type == ns
|
1397 |
+
assert repr(arr_ns[0].as_py()) == (
|
1398 |
+
"Timestamp('1970-01-01 00:00:00.000000001')"
|
1399 |
+
)
|
1400 |
+
assert str(arr_ns[0]) == "1970-01-01 00:00:00.000000001"
|
1401 |
+
|
1402 |
+
expected_exc = TypeError
|
1403 |
+
|
1404 |
+
class CustomClass():
|
1405 |
+
pass
|
1406 |
+
|
1407 |
+
for ty in [ns, pa.date32(), pa.date64()]:
|
1408 |
+
with pytest.raises(expected_exc):
|
1409 |
+
pa.array([1, CustomClass()], type=ty)
|
1410 |
+
|
1411 |
+
|
1412 |
+
@pytest.mark.parametrize('np_scalar', [True, False])
|
1413 |
+
def test_sequence_duration(np_scalar):
|
1414 |
+
td1 = datetime.timedelta(2, 3601, 1)
|
1415 |
+
td2 = datetime.timedelta(1, 100, 1000)
|
1416 |
+
if np_scalar:
|
1417 |
+
data = [np.timedelta64(td1), None, np.timedelta64(td2)]
|
1418 |
+
else:
|
1419 |
+
data = [td1, None, td2]
|
1420 |
+
|
1421 |
+
arr = pa.array(data)
|
1422 |
+
assert len(arr) == 3
|
1423 |
+
assert arr.type == pa.duration('us')
|
1424 |
+
assert arr.null_count == 1
|
1425 |
+
assert arr[0].as_py() == td1
|
1426 |
+
assert arr[1].as_py() is None
|
1427 |
+
assert arr[2].as_py() == td2
|
1428 |
+
|
1429 |
+
|
1430 |
+
@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns'])
|
1431 |
+
def test_sequence_duration_with_unit(unit):
|
1432 |
+
data = [
|
1433 |
+
datetime.timedelta(3, 22, 1001),
|
1434 |
+
]
|
1435 |
+
expected = {'s': datetime.timedelta(3, 22),
|
1436 |
+
'ms': datetime.timedelta(3, 22, 1000),
|
1437 |
+
'us': datetime.timedelta(3, 22, 1001),
|
1438 |
+
'ns': datetime.timedelta(3, 22, 1001)}
|
1439 |
+
|
1440 |
+
ty = pa.duration(unit)
|
1441 |
+
|
1442 |
+
arr_s = pa.array(data, type=ty)
|
1443 |
+
assert len(arr_s) == 1
|
1444 |
+
assert arr_s.type == ty
|
1445 |
+
assert arr_s[0].as_py() == expected[unit]
|
1446 |
+
|
1447 |
+
|
1448 |
+
@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns'])
|
1449 |
+
def test_sequence_duration_from_int_with_unit(unit):
|
1450 |
+
data = [5]
|
1451 |
+
|
1452 |
+
ty = pa.duration(unit)
|
1453 |
+
arr = pa.array(data, type=ty)
|
1454 |
+
assert len(arr) == 1
|
1455 |
+
assert arr.type == ty
|
1456 |
+
assert arr[0].value == 5
|
1457 |
+
|
1458 |
+
|
1459 |
+
def test_sequence_duration_nested_lists():
|
1460 |
+
td1 = datetime.timedelta(1, 1, 1000)
|
1461 |
+
td2 = datetime.timedelta(1, 100)
|
1462 |
+
|
1463 |
+
data = [[td1, None], [td1, td2]]
|
1464 |
+
|
1465 |
+
arr = pa.array(data)
|
1466 |
+
assert len(arr) == 2
|
1467 |
+
assert arr.type == pa.list_(pa.duration('us'))
|
1468 |
+
assert arr.to_pylist() == data
|
1469 |
+
|
1470 |
+
|
1471 |
+
@pytest.mark.parametrize("factory", [
|
1472 |
+
pa.list_, pa.large_list, pa.list_view, pa.large_list_view])
|
1473 |
+
def test_sequence_duration_nested_lists_with_explicit_type(factory):
|
1474 |
+
td1 = datetime.timedelta(1, 1, 1000)
|
1475 |
+
td2 = datetime.timedelta(1, 100)
|
1476 |
+
|
1477 |
+
data = [[td1, None], [td1, td2]]
|
1478 |
+
|
1479 |
+
arr = pa.array(data, type=factory(pa.duration('ms')))
|
1480 |
+
assert len(arr) == 2
|
1481 |
+
assert arr.type == factory(pa.duration('ms'))
|
1482 |
+
assert arr.to_pylist() == data
|
1483 |
+
|
1484 |
+
|
1485 |
+
def test_sequence_duration_nested_lists_numpy():
|
1486 |
+
td1 = datetime.timedelta(1, 1, 1000)
|
1487 |
+
td2 = datetime.timedelta(1, 100)
|
1488 |
+
|
1489 |
+
data = [[np.timedelta64(td1), None],
|
1490 |
+
[np.timedelta64(td1), np.timedelta64(td2)]]
|
1491 |
+
|
1492 |
+
arr = pa.array(data)
|
1493 |
+
assert len(arr) == 2
|
1494 |
+
assert arr.type == pa.list_(pa.duration('us'))
|
1495 |
+
assert arr.to_pylist() == [[td1, None], [td1, td2]]
|
1496 |
+
|
1497 |
+
data = [np.array([np.timedelta64(td1), None], dtype='timedelta64[us]'),
|
1498 |
+
np.array([np.timedelta64(td1), np.timedelta64(td2)])]
|
1499 |
+
|
1500 |
+
arr = pa.array(data)
|
1501 |
+
assert len(arr) == 2
|
1502 |
+
assert arr.type == pa.list_(pa.duration('us'))
|
1503 |
+
assert arr.to_pylist() == [[td1, None], [td1, td2]]
|
1504 |
+
|
1505 |
+
|
1506 |
+
def test_sequence_nesting_levels():
|
1507 |
+
data = [1, 2, None]
|
1508 |
+
arr = pa.array(data)
|
1509 |
+
assert arr.type == pa.int64()
|
1510 |
+
assert arr.to_pylist() == data
|
1511 |
+
|
1512 |
+
data = [[1], [2], None]
|
1513 |
+
arr = pa.array(data)
|
1514 |
+
assert arr.type == pa.list_(pa.int64())
|
1515 |
+
assert arr.to_pylist() == data
|
1516 |
+
|
1517 |
+
data = [[1], [2, 3, 4], [None]]
|
1518 |
+
arr = pa.array(data)
|
1519 |
+
assert arr.type == pa.list_(pa.int64())
|
1520 |
+
assert arr.to_pylist() == data
|
1521 |
+
|
1522 |
+
data = [None, [[None, 1]], [[2, 3, 4], None], [None]]
|
1523 |
+
arr = pa.array(data)
|
1524 |
+
assert arr.type == pa.list_(pa.list_(pa.int64()))
|
1525 |
+
assert arr.to_pylist() == data
|
1526 |
+
|
1527 |
+
exceptions = (pa.ArrowInvalid, pa.ArrowTypeError)
|
1528 |
+
|
1529 |
+
# Mixed nesting levels are rejected
|
1530 |
+
with pytest.raises(exceptions):
|
1531 |
+
pa.array([1, 2, [1]])
|
1532 |
+
|
1533 |
+
with pytest.raises(exceptions):
|
1534 |
+
pa.array([1, 2, []])
|
1535 |
+
|
1536 |
+
with pytest.raises(exceptions):
|
1537 |
+
pa.array([[1], [2], [None, [1]]])
|
1538 |
+
|
1539 |
+
|
1540 |
+
def test_sequence_mixed_types_fails():
|
1541 |
+
data = ['a', 1, 2.0]
|
1542 |
+
with pytest.raises(pa.ArrowTypeError):
|
1543 |
+
pa.array(data)
|
1544 |
+
|
1545 |
+
|
1546 |
+
def test_sequence_mixed_types_with_specified_type_fails():
|
1547 |
+
data = ['-10', '-5', {'a': 1}, '0', '5', '10']
|
1548 |
+
|
1549 |
+
type = pa.string()
|
1550 |
+
with pytest.raises(TypeError):
|
1551 |
+
pa.array(data, type=type)
|
1552 |
+
|
1553 |
+
|
1554 |
+
def test_sequence_decimal():
|
1555 |
+
data = [decimal.Decimal('1234.183'), decimal.Decimal('8094.234')]
|
1556 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1557 |
+
arr = pa.array(data, type=type(precision=7, scale=3))
|
1558 |
+
assert arr.to_pylist() == data
|
1559 |
+
|
1560 |
+
|
1561 |
+
def test_sequence_decimal_different_precisions():
|
1562 |
+
data = [
|
1563 |
+
decimal.Decimal('1234234983.183'), decimal.Decimal('80943244.234')
|
1564 |
+
]
|
1565 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1566 |
+
arr = pa.array(data, type=type(precision=13, scale=3))
|
1567 |
+
assert arr.to_pylist() == data
|
1568 |
+
|
1569 |
+
|
1570 |
+
def test_sequence_decimal_no_scale():
|
1571 |
+
data = [decimal.Decimal('1234234983'), decimal.Decimal('8094324')]
|
1572 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1573 |
+
arr = pa.array(data, type=type(precision=10))
|
1574 |
+
assert arr.to_pylist() == data
|
1575 |
+
|
1576 |
+
|
1577 |
+
def test_sequence_decimal_negative():
|
1578 |
+
data = [decimal.Decimal('-1234.234983'), decimal.Decimal('-8.094324')]
|
1579 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1580 |
+
arr = pa.array(data, type=type(precision=10, scale=6))
|
1581 |
+
assert arr.to_pylist() == data
|
1582 |
+
|
1583 |
+
|
1584 |
+
def test_sequence_decimal_no_whole_part():
|
1585 |
+
data = [decimal.Decimal('-.4234983'), decimal.Decimal('.0103943')]
|
1586 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1587 |
+
arr = pa.array(data, type=type(precision=7, scale=7))
|
1588 |
+
assert arr.to_pylist() == data
|
1589 |
+
|
1590 |
+
|
1591 |
+
def test_sequence_decimal_large_integer():
|
1592 |
+
data = [decimal.Decimal('-394029506937548693.42983'),
|
1593 |
+
decimal.Decimal('32358695912932.01033')]
|
1594 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1595 |
+
arr = pa.array(data, type=type(precision=23, scale=5))
|
1596 |
+
assert arr.to_pylist() == data
|
1597 |
+
|
1598 |
+
|
1599 |
+
def test_sequence_decimal_from_integers():
|
1600 |
+
data = [0, 1, -39402950693754869342983]
|
1601 |
+
expected = [decimal.Decimal(x) for x in data]
|
1602 |
+
for type in [pa.decimal128, pa.decimal256]:
|
1603 |
+
arr = pa.array(data, type=type(precision=28, scale=5))
|
1604 |
+
assert arr.to_pylist() == expected
|
1605 |
+
|
1606 |
+
|
1607 |
+
def test_sequence_decimal_too_high_precision():
|
1608 |
+
# ARROW-6989 python decimal has too high precision
|
1609 |
+
with pytest.raises(ValueError, match="precision out of range"):
|
1610 |
+
pa.array([decimal.Decimal('1' * 80)])
|
1611 |
+
|
1612 |
+
|
1613 |
+
def test_sequence_decimal_infer():
|
1614 |
+
for data, typ in [
|
1615 |
+
# simple case
|
1616 |
+
(decimal.Decimal('1.234'), pa.decimal128(4, 3)),
|
1617 |
+
# trailing zeros
|
1618 |
+
(decimal.Decimal('12300'), pa.decimal128(5, 0)),
|
1619 |
+
(decimal.Decimal('12300.0'), pa.decimal128(6, 1)),
|
1620 |
+
# scientific power notation
|
1621 |
+
(decimal.Decimal('1.23E+4'), pa.decimal128(5, 0)),
|
1622 |
+
(decimal.Decimal('123E+2'), pa.decimal128(5, 0)),
|
1623 |
+
(decimal.Decimal('123E+4'), pa.decimal128(7, 0)),
|
1624 |
+
# leading zeros
|
1625 |
+
(decimal.Decimal('0.0123'), pa.decimal128(4, 4)),
|
1626 |
+
(decimal.Decimal('0.01230'), pa.decimal128(5, 5)),
|
1627 |
+
(decimal.Decimal('1.230E-2'), pa.decimal128(5, 5)),
|
1628 |
+
]:
|
1629 |
+
assert pa.infer_type([data]) == typ
|
1630 |
+
arr = pa.array([data])
|
1631 |
+
assert arr.type == typ
|
1632 |
+
assert arr.to_pylist()[0] == data
|
1633 |
+
|
1634 |
+
|
1635 |
+
def test_sequence_decimal_infer_mixed():
|
1636 |
+
# ARROW-12150 - ensure mixed precision gets correctly inferred to
|
1637 |
+
# common type that can hold all input values
|
1638 |
+
cases = [
|
1639 |
+
([decimal.Decimal('1.234'), decimal.Decimal('3.456')],
|
1640 |
+
pa.decimal128(4, 3)),
|
1641 |
+
([decimal.Decimal('1.234'), decimal.Decimal('456.7')],
|
1642 |
+
pa.decimal128(6, 3)),
|
1643 |
+
([decimal.Decimal('123.4'), decimal.Decimal('4.567')],
|
1644 |
+
pa.decimal128(6, 3)),
|
1645 |
+
([decimal.Decimal('123e2'), decimal.Decimal('4567e3')],
|
1646 |
+
pa.decimal128(7, 0)),
|
1647 |
+
([decimal.Decimal('123e4'), decimal.Decimal('4567e2')],
|
1648 |
+
pa.decimal128(7, 0)),
|
1649 |
+
([decimal.Decimal('0.123'), decimal.Decimal('0.04567')],
|
1650 |
+
pa.decimal128(5, 5)),
|
1651 |
+
([decimal.Decimal('0.001'), decimal.Decimal('1.01E5')],
|
1652 |
+
pa.decimal128(9, 3)),
|
1653 |
+
]
|
1654 |
+
for data, typ in cases:
|
1655 |
+
assert pa.infer_type(data) == typ
|
1656 |
+
arr = pa.array(data)
|
1657 |
+
assert arr.type == typ
|
1658 |
+
assert arr.to_pylist() == data
|
1659 |
+
|
1660 |
+
|
1661 |
+
def test_sequence_decimal_given_type():
|
1662 |
+
for data, typs, wrong_typs in [
|
1663 |
+
# simple case
|
1664 |
+
(
|
1665 |
+
decimal.Decimal('1.234'),
|
1666 |
+
[pa.decimal128(4, 3), pa.decimal128(5, 3), pa.decimal128(5, 4)],
|
1667 |
+
[pa.decimal128(4, 2), pa.decimal128(4, 4)]
|
1668 |
+
),
|
1669 |
+
# trailing zeros
|
1670 |
+
(
|
1671 |
+
decimal.Decimal('12300'),
|
1672 |
+
[pa.decimal128(5, 0), pa.decimal128(6, 0), pa.decimal128(3, -2)],
|
1673 |
+
[pa.decimal128(4, 0), pa.decimal128(3, -3)]
|
1674 |
+
),
|
1675 |
+
# scientific power notation
|
1676 |
+
(
|
1677 |
+
decimal.Decimal('1.23E+4'),
|
1678 |
+
[pa.decimal128(5, 0), pa.decimal128(6, 0), pa.decimal128(3, -2)],
|
1679 |
+
[pa.decimal128(4, 0), pa.decimal128(3, -3)]
|
1680 |
+
),
|
1681 |
+
]:
|
1682 |
+
for typ in typs:
|
1683 |
+
arr = pa.array([data], type=typ)
|
1684 |
+
assert arr.type == typ
|
1685 |
+
assert arr.to_pylist()[0] == data
|
1686 |
+
for typ in wrong_typs:
|
1687 |
+
with pytest.raises(ValueError):
|
1688 |
+
pa.array([data], type=typ)
|
1689 |
+
|
1690 |
+
|
1691 |
+
def test_range_types():
|
1692 |
+
arr1 = pa.array(range(3))
|
1693 |
+
arr2 = pa.array((0, 1, 2))
|
1694 |
+
assert arr1.equals(arr2)
|
1695 |
+
|
1696 |
+
|
1697 |
+
def test_empty_range():
|
1698 |
+
arr = pa.array(range(0))
|
1699 |
+
assert len(arr) == 0
|
1700 |
+
assert arr.null_count == 0
|
1701 |
+
assert arr.type == pa.null()
|
1702 |
+
assert arr.to_pylist() == []
|
1703 |
+
|
1704 |
+
|
1705 |
+
def test_structarray():
|
1706 |
+
arr = pa.StructArray.from_arrays([], names=[])
|
1707 |
+
assert arr.type == pa.struct([])
|
1708 |
+
assert len(arr) == 0
|
1709 |
+
assert arr.to_pylist() == []
|
1710 |
+
|
1711 |
+
ints = pa.array([None, 2, 3], type=pa.int64())
|
1712 |
+
strs = pa.array(['a', None, 'c'], type=pa.string())
|
1713 |
+
bools = pa.array([True, False, None], type=pa.bool_())
|
1714 |
+
arr = pa.StructArray.from_arrays(
|
1715 |
+
[ints, strs, bools],
|
1716 |
+
['ints', 'strs', 'bools'])
|
1717 |
+
|
1718 |
+
expected = [
|
1719 |
+
{'ints': None, 'strs': 'a', 'bools': True},
|
1720 |
+
{'ints': 2, 'strs': None, 'bools': False},
|
1721 |
+
{'ints': 3, 'strs': 'c', 'bools': None},
|
1722 |
+
]
|
1723 |
+
|
1724 |
+
pylist = arr.to_pylist()
|
1725 |
+
assert pylist == expected, (pylist, expected)
|
1726 |
+
|
1727 |
+
# len(names) != len(arrays)
|
1728 |
+
with pytest.raises(ValueError):
|
1729 |
+
pa.StructArray.from_arrays([ints], ['ints', 'strs'])
|
1730 |
+
|
1731 |
+
|
1732 |
+
def test_struct_from_dicts():
|
1733 |
+
ty = pa.struct([pa.field('a', pa.int32()),
|
1734 |
+
pa.field('b', pa.string()),
|
1735 |
+
pa.field('c', pa.bool_())])
|
1736 |
+
arr = pa.array([], type=ty)
|
1737 |
+
assert arr.to_pylist() == []
|
1738 |
+
|
1739 |
+
data = [{'a': 5, 'b': 'foo', 'c': True},
|
1740 |
+
{'a': 6, 'b': 'bar', 'c': False}]
|
1741 |
+
arr = pa.array(data, type=ty)
|
1742 |
+
assert arr.to_pylist() == data
|
1743 |
+
|
1744 |
+
# With omitted values
|
1745 |
+
data = [{'a': 5, 'c': True},
|
1746 |
+
None,
|
1747 |
+
{},
|
1748 |
+
{'a': None, 'b': 'bar'}]
|
1749 |
+
arr = pa.array(data, type=ty)
|
1750 |
+
expected = [{'a': 5, 'b': None, 'c': True},
|
1751 |
+
None,
|
1752 |
+
{'a': None, 'b': None, 'c': None},
|
1753 |
+
{'a': None, 'b': 'bar', 'c': None}]
|
1754 |
+
assert arr.to_pylist() == expected
|
1755 |
+
|
1756 |
+
|
1757 |
+
def test_struct_from_dicts_bytes_keys():
|
1758 |
+
# ARROW-6878
|
1759 |
+
ty = pa.struct([pa.field('a', pa.int32()),
|
1760 |
+
pa.field('b', pa.string()),
|
1761 |
+
pa.field('c', pa.bool_())])
|
1762 |
+
arr = pa.array([], type=ty)
|
1763 |
+
assert arr.to_pylist() == []
|
1764 |
+
|
1765 |
+
data = [{b'a': 5, b'b': 'foo'},
|
1766 |
+
{b'a': 6, b'c': False}]
|
1767 |
+
arr = pa.array(data, type=ty)
|
1768 |
+
assert arr.to_pylist() == [
|
1769 |
+
{'a': 5, 'b': 'foo', 'c': None},
|
1770 |
+
{'a': 6, 'b': None, 'c': False},
|
1771 |
+
]
|
1772 |
+
|
1773 |
+
|
1774 |
+
def test_struct_from_tuples():
|
1775 |
+
ty = pa.struct([pa.field('a', pa.int32()),
|
1776 |
+
pa.field('b', pa.string()),
|
1777 |
+
pa.field('c', pa.bool_())])
|
1778 |
+
|
1779 |
+
data = [(5, 'foo', True),
|
1780 |
+
(6, 'bar', False)]
|
1781 |
+
expected = [{'a': 5, 'b': 'foo', 'c': True},
|
1782 |
+
{'a': 6, 'b': 'bar', 'c': False}]
|
1783 |
+
arr = pa.array(data, type=ty)
|
1784 |
+
|
1785 |
+
data_as_ndarray = np.empty(len(data), dtype=object)
|
1786 |
+
data_as_ndarray[:] = data
|
1787 |
+
arr2 = pa.array(data_as_ndarray, type=ty)
|
1788 |
+
assert arr.to_pylist() == expected
|
1789 |
+
|
1790 |
+
assert arr.equals(arr2)
|
1791 |
+
|
1792 |
+
# With omitted values
|
1793 |
+
data = [(5, 'foo', None),
|
1794 |
+
None,
|
1795 |
+
(6, None, False)]
|
1796 |
+
expected = [{'a': 5, 'b': 'foo', 'c': None},
|
1797 |
+
None,
|
1798 |
+
{'a': 6, 'b': None, 'c': False}]
|
1799 |
+
arr = pa.array(data, type=ty)
|
1800 |
+
assert arr.to_pylist() == expected
|
1801 |
+
|
1802 |
+
# Invalid tuple size
|
1803 |
+
for tup in [(5, 'foo'), (), ('5', 'foo', True, None)]:
|
1804 |
+
with pytest.raises(ValueError, match="(?i)tuple size"):
|
1805 |
+
pa.array([tup], type=ty)
|
1806 |
+
|
1807 |
+
|
1808 |
+
def test_struct_from_list_of_pairs():
|
1809 |
+
ty = pa.struct([
|
1810 |
+
pa.field('a', pa.int32()),
|
1811 |
+
pa.field('b', pa.string()),
|
1812 |
+
pa.field('c', pa.bool_())
|
1813 |
+
])
|
1814 |
+
data = [
|
1815 |
+
[('a', 5), ('b', 'foo'), ('c', True)],
|
1816 |
+
[('a', 6), ('b', 'bar'), ('c', False)],
|
1817 |
+
None
|
1818 |
+
]
|
1819 |
+
arr = pa.array(data, type=ty)
|
1820 |
+
assert arr.to_pylist() == [
|
1821 |
+
{'a': 5, 'b': 'foo', 'c': True},
|
1822 |
+
{'a': 6, 'b': 'bar', 'c': False},
|
1823 |
+
None
|
1824 |
+
]
|
1825 |
+
|
1826 |
+
# test with duplicated field names
|
1827 |
+
ty = pa.struct([
|
1828 |
+
pa.field('a', pa.int32()),
|
1829 |
+
pa.field('a', pa.string()),
|
1830 |
+
pa.field('b', pa.bool_())
|
1831 |
+
])
|
1832 |
+
data = [
|
1833 |
+
[('a', 5), ('a', 'foo'), ('b', True)],
|
1834 |
+
[('a', 6), ('a', 'bar'), ('b', False)],
|
1835 |
+
]
|
1836 |
+
arr = pa.array(data, type=ty)
|
1837 |
+
with pytest.raises(ValueError):
|
1838 |
+
# TODO(kszucs): ARROW-9997
|
1839 |
+
arr.to_pylist()
|
1840 |
+
|
1841 |
+
# test with empty elements
|
1842 |
+
ty = pa.struct([
|
1843 |
+
pa.field('a', pa.int32()),
|
1844 |
+
pa.field('b', pa.string()),
|
1845 |
+
pa.field('c', pa.bool_())
|
1846 |
+
])
|
1847 |
+
data = [
|
1848 |
+
[],
|
1849 |
+
[('a', 5), ('b', 'foo'), ('c', True)],
|
1850 |
+
[('a', 2), ('b', 'baz')],
|
1851 |
+
[('a', 1), ('b', 'bar'), ('c', False), ('d', 'julia')],
|
1852 |
+
]
|
1853 |
+
expected = [
|
1854 |
+
{'a': None, 'b': None, 'c': None},
|
1855 |
+
{'a': 5, 'b': 'foo', 'c': True},
|
1856 |
+
{'a': 2, 'b': 'baz', 'c': None},
|
1857 |
+
{'a': 1, 'b': 'bar', 'c': False},
|
1858 |
+
]
|
1859 |
+
arr = pa.array(data, type=ty)
|
1860 |
+
assert arr.to_pylist() == expected
|
1861 |
+
|
1862 |
+
|
1863 |
+
def test_struct_from_list_of_pairs_errors():
|
1864 |
+
ty = pa.struct([
|
1865 |
+
pa.field('a', pa.int32()),
|
1866 |
+
pa.field('b', pa.string()),
|
1867 |
+
pa.field('c', pa.bool_())
|
1868 |
+
])
|
1869 |
+
|
1870 |
+
# test that it raises if the key doesn't match the expected field name
|
1871 |
+
data = [
|
1872 |
+
[],
|
1873 |
+
[('a', 5), ('c', True), ('b', None)],
|
1874 |
+
]
|
1875 |
+
msg = "The expected field name is `b` but `c` was given"
|
1876 |
+
with pytest.raises(ValueError, match=msg):
|
1877 |
+
pa.array(data, type=ty)
|
1878 |
+
|
1879 |
+
# test various errors both at the first position and after because of key
|
1880 |
+
# type inference
|
1881 |
+
template = (
|
1882 |
+
r"Could not convert {} with type {}: was expecting tuple of "
|
1883 |
+
r"(key, value) pair"
|
1884 |
+
)
|
1885 |
+
cases = [
|
1886 |
+
tuple(), # empty key-value pair
|
1887 |
+
tuple('a',), # missing value
|
1888 |
+
tuple('unknown-key',), # not known field name
|
1889 |
+
'string', # not a tuple
|
1890 |
+
]
|
1891 |
+
for key_value_pair in cases:
|
1892 |
+
msg = re.escape(template.format(
|
1893 |
+
repr(key_value_pair), type(key_value_pair).__name__
|
1894 |
+
))
|
1895 |
+
|
1896 |
+
with pytest.raises(TypeError, match=msg):
|
1897 |
+
pa.array([
|
1898 |
+
[key_value_pair],
|
1899 |
+
[('a', 5), ('b', 'foo'), ('c', None)],
|
1900 |
+
], type=ty)
|
1901 |
+
|
1902 |
+
with pytest.raises(TypeError, match=msg):
|
1903 |
+
pa.array([
|
1904 |
+
[('a', 5), ('b', 'foo'), ('c', None)],
|
1905 |
+
[key_value_pair],
|
1906 |
+
], type=ty)
|
1907 |
+
|
1908 |
+
|
1909 |
+
def test_struct_from_mixed_sequence():
|
1910 |
+
# It is forbidden to mix dicts and tuples when initializing a struct array
|
1911 |
+
ty = pa.struct([pa.field('a', pa.int32()),
|
1912 |
+
pa.field('b', pa.string()),
|
1913 |
+
pa.field('c', pa.bool_())])
|
1914 |
+
data = [(5, 'foo', True),
|
1915 |
+
{'a': 6, 'b': 'bar', 'c': False}]
|
1916 |
+
with pytest.raises(TypeError):
|
1917 |
+
pa.array(data, type=ty)
|
1918 |
+
|
1919 |
+
|
1920 |
+
def test_struct_from_dicts_inference():
|
1921 |
+
expected_type = pa.struct([pa.field('a', pa.int64()),
|
1922 |
+
pa.field('b', pa.string()),
|
1923 |
+
pa.field('c', pa.bool_())])
|
1924 |
+
data = [{'a': 5, 'b': 'foo', 'c': True},
|
1925 |
+
{'a': 6, 'b': 'bar', 'c': False}]
|
1926 |
+
|
1927 |
+
arr = pa.array(data)
|
1928 |
+
check_struct_type(arr.type, expected_type)
|
1929 |
+
assert arr.to_pylist() == data
|
1930 |
+
|
1931 |
+
# With omitted values
|
1932 |
+
data = [{'a': 5, 'c': True},
|
1933 |
+
None,
|
1934 |
+
{},
|
1935 |
+
{'a': None, 'b': 'bar'}]
|
1936 |
+
expected = [{'a': 5, 'b': None, 'c': True},
|
1937 |
+
None,
|
1938 |
+
{'a': None, 'b': None, 'c': None},
|
1939 |
+
{'a': None, 'b': 'bar', 'c': None}]
|
1940 |
+
|
1941 |
+
arr = pa.array(data)
|
1942 |
+
data_as_ndarray = np.empty(len(data), dtype=object)
|
1943 |
+
data_as_ndarray[:] = data
|
1944 |
+
arr2 = pa.array(data)
|
1945 |
+
|
1946 |
+
check_struct_type(arr.type, expected_type)
|
1947 |
+
assert arr.to_pylist() == expected
|
1948 |
+
assert arr.equals(arr2)
|
1949 |
+
|
1950 |
+
# Nested
|
1951 |
+
expected_type = pa.struct([
|
1952 |
+
pa.field('a', pa.struct([pa.field('aa', pa.list_(pa.int64())),
|
1953 |
+
pa.field('ab', pa.bool_())])),
|
1954 |
+
pa.field('b', pa.string())])
|
1955 |
+
data = [{'a': {'aa': [5, 6], 'ab': True}, 'b': 'foo'},
|
1956 |
+
{'a': {'aa': None, 'ab': False}, 'b': None},
|
1957 |
+
{'a': None, 'b': 'bar'}]
|
1958 |
+
arr = pa.array(data)
|
1959 |
+
|
1960 |
+
assert arr.to_pylist() == data
|
1961 |
+
|
1962 |
+
# Edge cases
|
1963 |
+
arr = pa.array([{}])
|
1964 |
+
assert arr.type == pa.struct([])
|
1965 |
+
assert arr.to_pylist() == [{}]
|
1966 |
+
|
1967 |
+
# Mixing structs and scalars is rejected
|
1968 |
+
with pytest.raises((pa.ArrowInvalid, pa.ArrowTypeError)):
|
1969 |
+
pa.array([1, {'a': 2}])
|
1970 |
+
|
1971 |
+
|
1972 |
+
def test_structarray_from_arrays_coerce():
|
1973 |
+
# ARROW-1706
|
1974 |
+
ints = [None, 2, 3]
|
1975 |
+
strs = ['a', None, 'c']
|
1976 |
+
bools = [True, False, None]
|
1977 |
+
ints_nonnull = [1, 2, 3]
|
1978 |
+
|
1979 |
+
arrays = [ints, strs, bools, ints_nonnull]
|
1980 |
+
result = pa.StructArray.from_arrays(arrays,
|
1981 |
+
['ints', 'strs', 'bools',
|
1982 |
+
'int_nonnull'])
|
1983 |
+
expected = pa.StructArray.from_arrays(
|
1984 |
+
[pa.array(ints, type='int64'),
|
1985 |
+
pa.array(strs, type='utf8'),
|
1986 |
+
pa.array(bools),
|
1987 |
+
pa.array(ints_nonnull, type='int64')],
|
1988 |
+
['ints', 'strs', 'bools', 'int_nonnull'])
|
1989 |
+
|
1990 |
+
with pytest.raises(ValueError):
|
1991 |
+
pa.StructArray.from_arrays(arrays)
|
1992 |
+
|
1993 |
+
assert result.equals(expected)
|
1994 |
+
|
1995 |
+
|
1996 |
+
def test_decimal_array_with_none_and_nan():
|
1997 |
+
values = [decimal.Decimal('1.234'), None, np.nan, decimal.Decimal('nan')]
|
1998 |
+
|
1999 |
+
with pytest.raises(TypeError):
|
2000 |
+
# ARROW-6227: Without from_pandas=True, NaN is considered a float
|
2001 |
+
array = pa.array(values)
|
2002 |
+
|
2003 |
+
array = pa.array(values, from_pandas=True)
|
2004 |
+
assert array.type == pa.decimal128(4, 3)
|
2005 |
+
assert array.to_pylist() == values[:2] + [None, None]
|
2006 |
+
|
2007 |
+
array = pa.array(values, type=pa.decimal128(10, 4), from_pandas=True)
|
2008 |
+
assert array.to_pylist() == [decimal.Decimal('1.2340'), None, None, None]
|
2009 |
+
|
2010 |
+
|
2011 |
+
def test_map_from_dicts():
|
2012 |
+
data = [[{'key': b'a', 'value': 1}, {'key': b'b', 'value': 2}],
|
2013 |
+
[{'key': b'c', 'value': 3}],
|
2014 |
+
[{'key': b'd', 'value': 4}, {'key': b'e', 'value': 5},
|
2015 |
+
{'key': b'f', 'value': None}],
|
2016 |
+
[{'key': b'g', 'value': 7}]]
|
2017 |
+
expected = [[(d['key'], d['value']) for d in entry] for entry in data]
|
2018 |
+
|
2019 |
+
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
|
2020 |
+
|
2021 |
+
assert arr.to_pylist() == expected
|
2022 |
+
|
2023 |
+
# With omitted values
|
2024 |
+
data[1] = None
|
2025 |
+
expected[1] = None
|
2026 |
+
|
2027 |
+
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
|
2028 |
+
|
2029 |
+
assert arr.to_pylist() == expected
|
2030 |
+
|
2031 |
+
# Invalid dictionary
|
2032 |
+
for entry in [[{'value': 5}], [{}], [{'k': 1, 'v': 2}]]:
|
2033 |
+
with pytest.raises(ValueError, match="Invalid Map"):
|
2034 |
+
pa.array([entry], type=pa.map_('i4', 'i4'))
|
2035 |
+
|
2036 |
+
# Invalid dictionary types
|
2037 |
+
for entry in [[{'key': '1', 'value': 5}], [{'key': {'value': 2}}]]:
|
2038 |
+
with pytest.raises(pa.ArrowInvalid, match="tried to convert to int"):
|
2039 |
+
pa.array([entry], type=pa.map_('i4', 'i4'))
|
2040 |
+
|
2041 |
+
|
2042 |
+
def test_map_from_tuples():
|
2043 |
+
expected = [[(b'a', 1), (b'b', 2)],
|
2044 |
+
[(b'c', 3)],
|
2045 |
+
[(b'd', 4), (b'e', 5), (b'f', None)],
|
2046 |
+
[(b'g', 7)]]
|
2047 |
+
|
2048 |
+
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
|
2049 |
+
|
2050 |
+
assert arr.to_pylist() == expected
|
2051 |
+
|
2052 |
+
# With omitted values
|
2053 |
+
expected[1] = None
|
2054 |
+
|
2055 |
+
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
|
2056 |
+
|
2057 |
+
assert arr.to_pylist() == expected
|
2058 |
+
|
2059 |
+
# Invalid tuple size
|
2060 |
+
for entry in [[(5,)], [()], [('5', 'foo', True)]]:
|
2061 |
+
with pytest.raises(ValueError, match="(?i)tuple size"):
|
2062 |
+
pa.array([entry], type=pa.map_('i4', 'i4'))
|
2063 |
+
|
2064 |
+
|
2065 |
+
def test_dictionary_from_boolean():
|
2066 |
+
typ = pa.dictionary(pa.int8(), value_type=pa.bool_())
|
2067 |
+
a = pa.array([False, False, True, False, True], type=typ)
|
2068 |
+
assert isinstance(a.type, pa.DictionaryType)
|
2069 |
+
assert a.type.equals(typ)
|
2070 |
+
|
2071 |
+
expected_indices = pa.array([0, 0, 1, 0, 1], type=pa.int8())
|
2072 |
+
expected_dictionary = pa.array([False, True], type=pa.bool_())
|
2073 |
+
assert a.indices.equals(expected_indices)
|
2074 |
+
assert a.dictionary.equals(expected_dictionary)
|
2075 |
+
|
2076 |
+
|
2077 |
+
@pytest.mark.parametrize('value_type', [
|
2078 |
+
pa.int8(),
|
2079 |
+
pa.int16(),
|
2080 |
+
pa.int32(),
|
2081 |
+
pa.int64(),
|
2082 |
+
pa.uint8(),
|
2083 |
+
pa.uint16(),
|
2084 |
+
pa.uint32(),
|
2085 |
+
pa.uint64(),
|
2086 |
+
pa.float32(),
|
2087 |
+
pa.float64(),
|
2088 |
+
])
|
2089 |
+
def test_dictionary_from_integers(value_type):
|
2090 |
+
typ = pa.dictionary(pa.int8(), value_type=value_type)
|
2091 |
+
a = pa.array([1, 2, 1, 1, 2, 3], type=typ)
|
2092 |
+
assert isinstance(a.type, pa.DictionaryType)
|
2093 |
+
assert a.type.equals(typ)
|
2094 |
+
|
2095 |
+
expected_indices = pa.array([0, 1, 0, 0, 1, 2], type=pa.int8())
|
2096 |
+
expected_dictionary = pa.array([1, 2, 3], type=value_type)
|
2097 |
+
assert a.indices.equals(expected_indices)
|
2098 |
+
assert a.dictionary.equals(expected_dictionary)
|
2099 |
+
|
2100 |
+
|
2101 |
+
@pytest.mark.parametrize('input_index_type', [
|
2102 |
+
pa.int8(),
|
2103 |
+
pa.int16(),
|
2104 |
+
pa.int32(),
|
2105 |
+
pa.int64()
|
2106 |
+
])
|
2107 |
+
def test_dictionary_index_type(input_index_type):
|
2108 |
+
# dictionary array is constructed using adaptive index type builder,
|
2109 |
+
# but the input index type is considered as the minimal width type to use
|
2110 |
+
|
2111 |
+
typ = pa.dictionary(input_index_type, value_type=pa.int64())
|
2112 |
+
arr = pa.array(range(10), type=typ)
|
2113 |
+
assert arr.type.equals(typ)
|
2114 |
+
|
2115 |
+
|
2116 |
+
def test_dictionary_is_always_adaptive():
|
2117 |
+
# dictionary array is constructed using adaptive index type builder,
|
2118 |
+
# meaning that the output index type may be wider than the given index type
|
2119 |
+
# since it depends on the input data
|
2120 |
+
typ = pa.dictionary(pa.int8(), value_type=pa.int64())
|
2121 |
+
|
2122 |
+
a = pa.array(range(2**7), type=typ)
|
2123 |
+
expected = pa.dictionary(pa.int8(), pa.int64())
|
2124 |
+
assert a.type.equals(expected)
|
2125 |
+
|
2126 |
+
a = pa.array(range(2**7 + 1), type=typ)
|
2127 |
+
expected = pa.dictionary(pa.int16(), pa.int64())
|
2128 |
+
assert a.type.equals(expected)
|
2129 |
+
|
2130 |
+
|
2131 |
+
def test_dictionary_from_strings():
|
2132 |
+
for value_type in [pa.binary(), pa.string()]:
|
2133 |
+
typ = pa.dictionary(pa.int8(), value_type)
|
2134 |
+
a = pa.array(["", "a", "bb", "a", "bb", "ccc"], type=typ)
|
2135 |
+
|
2136 |
+
assert isinstance(a.type, pa.DictionaryType)
|
2137 |
+
|
2138 |
+
expected_indices = pa.array([0, 1, 2, 1, 2, 3], type=pa.int8())
|
2139 |
+
expected_dictionary = pa.array(["", "a", "bb", "ccc"], type=value_type)
|
2140 |
+
assert a.indices.equals(expected_indices)
|
2141 |
+
assert a.dictionary.equals(expected_dictionary)
|
2142 |
+
|
2143 |
+
# fixed size binary type
|
2144 |
+
typ = pa.dictionary(pa.int8(), pa.binary(3))
|
2145 |
+
a = pa.array(["aaa", "aaa", "bbb", "ccc", "bbb"], type=typ)
|
2146 |
+
assert isinstance(a.type, pa.DictionaryType)
|
2147 |
+
|
2148 |
+
expected_indices = pa.array([0, 0, 1, 2, 1], type=pa.int8())
|
2149 |
+
expected_dictionary = pa.array(["aaa", "bbb", "ccc"], type=pa.binary(3))
|
2150 |
+
assert a.indices.equals(expected_indices)
|
2151 |
+
assert a.dictionary.equals(expected_dictionary)
|
2152 |
+
|
2153 |
+
|
2154 |
+
@pytest.mark.parametrize(('unit', 'expected'), [
|
2155 |
+
('s', datetime.timedelta(seconds=-2147483000)),
|
2156 |
+
('ms', datetime.timedelta(milliseconds=-2147483000)),
|
2157 |
+
('us', datetime.timedelta(microseconds=-2147483000)),
|
2158 |
+
('ns', datetime.timedelta(microseconds=-2147483))
|
2159 |
+
])
|
2160 |
+
def test_duration_array_roundtrip_corner_cases(unit, expected):
|
2161 |
+
# Corner case discovered by hypothesis: there were implicit conversions to
|
2162 |
+
# unsigned values resulting wrong values with wrong signs.
|
2163 |
+
ty = pa.duration(unit)
|
2164 |
+
arr = pa.array([-2147483000], type=ty)
|
2165 |
+
restored = pa.array(arr.to_pylist(), type=ty)
|
2166 |
+
assert arr.equals(restored)
|
2167 |
+
|
2168 |
+
expected_list = [expected]
|
2169 |
+
if unit == 'ns':
|
2170 |
+
# if pandas is available then a pandas Timedelta is returned
|
2171 |
+
try:
|
2172 |
+
import pandas as pd
|
2173 |
+
except ImportError:
|
2174 |
+
pass
|
2175 |
+
else:
|
2176 |
+
expected_list = [pd.Timedelta(-2147483000, unit='ns')]
|
2177 |
+
|
2178 |
+
assert restored.to_pylist() == expected_list
|
2179 |
+
|
2180 |
+
|
2181 |
+
@pytest.mark.pandas
|
2182 |
+
def test_roundtrip_nanosecond_resolution_pandas_temporal_objects():
|
2183 |
+
# corner case discovered by hypothesis: preserving the nanoseconds on
|
2184 |
+
# conversion from a list of Timedelta and Timestamp objects
|
2185 |
+
import pandas as pd
|
2186 |
+
|
2187 |
+
ty = pa.duration('ns')
|
2188 |
+
arr = pa.array([9223371273709551616], type=ty)
|
2189 |
+
data = arr.to_pylist()
|
2190 |
+
assert isinstance(data[0], pd.Timedelta)
|
2191 |
+
restored = pa.array(data, type=ty)
|
2192 |
+
assert arr.equals(restored)
|
2193 |
+
assert restored.to_pylist() == [
|
2194 |
+
pd.Timedelta(9223371273709551616, unit='ns')
|
2195 |
+
]
|
2196 |
+
|
2197 |
+
ty = pa.timestamp('ns')
|
2198 |
+
arr = pa.array([9223371273709551616], type=ty)
|
2199 |
+
data = arr.to_pylist()
|
2200 |
+
assert isinstance(data[0], pd.Timestamp)
|
2201 |
+
restored = pa.array(data, type=ty)
|
2202 |
+
assert arr.equals(restored)
|
2203 |
+
assert restored.to_pylist() == [
|
2204 |
+
pd.Timestamp(9223371273709551616, unit='ns')
|
2205 |
+
]
|
2206 |
+
|
2207 |
+
ty = pa.timestamp('ns', tz='US/Eastern')
|
2208 |
+
value = 1604119893000000000
|
2209 |
+
arr = pa.array([value], type=ty)
|
2210 |
+
data = arr.to_pylist()
|
2211 |
+
assert isinstance(data[0], pd.Timestamp)
|
2212 |
+
restored = pa.array(data, type=ty)
|
2213 |
+
assert arr.equals(restored)
|
2214 |
+
assert restored.to_pylist() == [
|
2215 |
+
pd.Timestamp(value, unit='ns').tz_localize(
|
2216 |
+
"UTC").tz_convert('US/Eastern')
|
2217 |
+
]
|
2218 |
+
|
2219 |
+
|
2220 |
+
@h.given(past.all_arrays)
|
2221 |
+
def test_array_to_pylist_roundtrip(arr):
|
2222 |
+
seq = arr.to_pylist()
|
2223 |
+
restored = pa.array(seq, type=arr.type)
|
2224 |
+
assert restored.equals(arr)
|
2225 |
+
|
2226 |
+
|
2227 |
+
@pytest.mark.large_memory
|
2228 |
+
def test_auto_chunking_binary_like():
|
2229 |
+
# single chunk
|
2230 |
+
v1 = b'x' * 100000000
|
2231 |
+
v2 = b'x' * 147483646
|
2232 |
+
|
2233 |
+
# single chunk
|
2234 |
+
one_chunk_data = [v1] * 20 + [b'', None, v2]
|
2235 |
+
arr = pa.array(one_chunk_data, type=pa.binary())
|
2236 |
+
assert isinstance(arr, pa.Array)
|
2237 |
+
assert len(arr) == 23
|
2238 |
+
assert arr[20].as_py() == b''
|
2239 |
+
assert arr[21].as_py() is None
|
2240 |
+
assert arr[22].as_py() == v2
|
2241 |
+
|
2242 |
+
# two chunks
|
2243 |
+
two_chunk_data = one_chunk_data + [b'two']
|
2244 |
+
arr = pa.array(two_chunk_data, type=pa.binary())
|
2245 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2246 |
+
assert arr.num_chunks == 2
|
2247 |
+
assert len(arr.chunk(0)) == 23
|
2248 |
+
assert len(arr.chunk(1)) == 1
|
2249 |
+
assert arr.chunk(0)[20].as_py() == b''
|
2250 |
+
assert arr.chunk(0)[21].as_py() is None
|
2251 |
+
assert arr.chunk(0)[22].as_py() == v2
|
2252 |
+
assert arr.chunk(1).to_pylist() == [b'two']
|
2253 |
+
|
2254 |
+
# three chunks
|
2255 |
+
three_chunk_data = one_chunk_data * 2 + [b'three', b'three']
|
2256 |
+
arr = pa.array(three_chunk_data, type=pa.binary())
|
2257 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2258 |
+
assert arr.num_chunks == 3
|
2259 |
+
assert len(arr.chunk(0)) == 23
|
2260 |
+
assert len(arr.chunk(1)) == 23
|
2261 |
+
assert len(arr.chunk(2)) == 2
|
2262 |
+
for i in range(2):
|
2263 |
+
assert arr.chunk(i)[20].as_py() == b''
|
2264 |
+
assert arr.chunk(i)[21].as_py() is None
|
2265 |
+
assert arr.chunk(i)[22].as_py() == v2
|
2266 |
+
assert arr.chunk(2).to_pylist() == [b'three', b'three']
|
2267 |
+
|
2268 |
+
|
2269 |
+
@pytest.mark.large_memory
|
2270 |
+
def test_auto_chunking_list_of_binary():
|
2271 |
+
# ARROW-6281
|
2272 |
+
vals = [['x' * 1024]] * ((2 << 20) + 1)
|
2273 |
+
arr = pa.array(vals)
|
2274 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2275 |
+
assert arr.num_chunks == 2
|
2276 |
+
assert len(arr.chunk(0)) == 2**21 - 1
|
2277 |
+
assert len(arr.chunk(1)) == 2
|
2278 |
+
assert arr.chunk(1).to_pylist() == [['x' * 1024]] * 2
|
2279 |
+
|
2280 |
+
|
2281 |
+
@pytest.mark.large_memory
|
2282 |
+
def test_auto_chunking_list_like():
|
2283 |
+
item = np.ones((2**28,), dtype='uint8')
|
2284 |
+
data = [item] * (2**3 - 1)
|
2285 |
+
arr = pa.array(data, type=pa.list_(pa.uint8()))
|
2286 |
+
assert isinstance(arr, pa.Array)
|
2287 |
+
assert len(arr) == 7
|
2288 |
+
|
2289 |
+
item = np.ones((2**28,), dtype='uint8')
|
2290 |
+
data = [item] * 2**3
|
2291 |
+
arr = pa.array(data, type=pa.list_(pa.uint8()))
|
2292 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2293 |
+
assert arr.num_chunks == 2
|
2294 |
+
assert len(arr.chunk(0)) == 7
|
2295 |
+
assert len(arr.chunk(1)) == 1
|
2296 |
+
chunk = arr.chunk(1)
|
2297 |
+
scalar = chunk[0]
|
2298 |
+
assert isinstance(scalar, pa.ListScalar)
|
2299 |
+
expected = pa.array(item, type=pa.uint8())
|
2300 |
+
assert scalar.values == expected
|
2301 |
+
|
2302 |
+
|
2303 |
+
@pytest.mark.slow
|
2304 |
+
@pytest.mark.large_memory
|
2305 |
+
def test_auto_chunking_map_type():
|
2306 |
+
# takes ~20 minutes locally
|
2307 |
+
ty = pa.map_(pa.int8(), pa.int8())
|
2308 |
+
item = [(1, 1)] * 2**28
|
2309 |
+
data = [item] * 2**3
|
2310 |
+
arr = pa.array(data, type=ty)
|
2311 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2312 |
+
assert len(arr.chunk(0)) == 7
|
2313 |
+
assert len(arr.chunk(1)) == 1
|
2314 |
+
|
2315 |
+
|
2316 |
+
@pytest.mark.large_memory
|
2317 |
+
@pytest.mark.parametrize(('ty', 'char'), [
|
2318 |
+
(pa.string(), 'x'),
|
2319 |
+
(pa.binary(), b'x'),
|
2320 |
+
])
|
2321 |
+
def test_nested_auto_chunking(ty, char):
|
2322 |
+
v1 = char * 100000000
|
2323 |
+
v2 = char * 147483646
|
2324 |
+
|
2325 |
+
struct_type = pa.struct([
|
2326 |
+
pa.field('bool', pa.bool_()),
|
2327 |
+
pa.field('integer', pa.int64()),
|
2328 |
+
pa.field('string-like', ty),
|
2329 |
+
])
|
2330 |
+
|
2331 |
+
data = [{'bool': True, 'integer': 1, 'string-like': v1}] * 20
|
2332 |
+
data.append({'bool': True, 'integer': 1, 'string-like': v2})
|
2333 |
+
arr = pa.array(data, type=struct_type)
|
2334 |
+
assert isinstance(arr, pa.Array)
|
2335 |
+
|
2336 |
+
data.append({'bool': True, 'integer': 1, 'string-like': char})
|
2337 |
+
arr = pa.array(data, type=struct_type)
|
2338 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2339 |
+
assert arr.num_chunks == 2
|
2340 |
+
assert len(arr.chunk(0)) == 21
|
2341 |
+
assert len(arr.chunk(1)) == 1
|
2342 |
+
assert arr.chunk(1)[0].as_py() == {
|
2343 |
+
'bool': True,
|
2344 |
+
'integer': 1,
|
2345 |
+
'string-like': char
|
2346 |
+
}
|
2347 |
+
|
2348 |
+
|
2349 |
+
@pytest.mark.large_memory
|
2350 |
+
def test_array_from_pylist_data_overflow():
|
2351 |
+
# Regression test for ARROW-12983
|
2352 |
+
# Data buffer overflow - should result in chunked array
|
2353 |
+
items = [b'a' * 4096] * (2 ** 19)
|
2354 |
+
arr = pa.array(items, type=pa.string())
|
2355 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2356 |
+
assert len(arr) == 2**19
|
2357 |
+
assert len(arr.chunks) > 1
|
2358 |
+
|
2359 |
+
mask = np.zeros(2**19, bool)
|
2360 |
+
arr = pa.array(items, mask=mask, type=pa.string())
|
2361 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2362 |
+
assert len(arr) == 2**19
|
2363 |
+
assert len(arr.chunks) > 1
|
2364 |
+
|
2365 |
+
arr = pa.array(items, type=pa.binary())
|
2366 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2367 |
+
assert len(arr) == 2**19
|
2368 |
+
assert len(arr.chunks) > 1
|
2369 |
+
|
2370 |
+
|
2371 |
+
@pytest.mark.slow
|
2372 |
+
@pytest.mark.large_memory
|
2373 |
+
def test_array_from_pylist_offset_overflow():
|
2374 |
+
# Regression test for ARROW-12983
|
2375 |
+
# Offset buffer overflow - should result in chunked array
|
2376 |
+
# Note this doesn't apply to primitive arrays
|
2377 |
+
items = [b'a'] * (2 ** 31)
|
2378 |
+
arr = pa.array(items, type=pa.string())
|
2379 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2380 |
+
assert len(arr) == 2**31
|
2381 |
+
assert len(arr.chunks) > 1
|
2382 |
+
|
2383 |
+
mask = np.zeros(2**31, bool)
|
2384 |
+
arr = pa.array(items, mask=mask, type=pa.string())
|
2385 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2386 |
+
assert len(arr) == 2**31
|
2387 |
+
assert len(arr.chunks) > 1
|
2388 |
+
|
2389 |
+
arr = pa.array(items, type=pa.binary())
|
2390 |
+
assert isinstance(arr, pa.ChunkedArray)
|
2391 |
+
assert len(arr) == 2**31
|
2392 |
+
assert len(arr.chunks) > 1
|
2393 |
+
|
2394 |
+
|
2395 |
+
@parametrize_with_collections_types
|
2396 |
+
@pytest.mark.parametrize(('data', 'scalar_data', 'value_type'), [
|
2397 |
+
([True, False, None], [pa.scalar(True), pa.scalar(False), None], pa.bool_()),
|
2398 |
+
(
|
2399 |
+
[1, 2, None],
|
2400 |
+
[pa.scalar(1), pa.scalar(2), pa.scalar(None, pa.int64())],
|
2401 |
+
pa.int64()
|
2402 |
+
),
|
2403 |
+
([1, None, None], [pa.scalar(1), None, pa.scalar(None, pa.int64())], pa.int64()),
|
2404 |
+
([None, None], [pa.scalar(None), pa.scalar(None)], pa.null()),
|
2405 |
+
([1., 2., None], [pa.scalar(1.), pa.scalar(2.), None], pa.float64()),
|
2406 |
+
(
|
2407 |
+
[None, datetime.date.today()],
|
2408 |
+
[None, pa.scalar(datetime.date.today())],
|
2409 |
+
pa.date32()
|
2410 |
+
),
|
2411 |
+
(
|
2412 |
+
[None, datetime.date.today()],
|
2413 |
+
[None, pa.scalar(datetime.date.today(), pa.date64())],
|
2414 |
+
pa.date64()
|
2415 |
+
),
|
2416 |
+
(
|
2417 |
+
[datetime.time(1, 1, 1), None],
|
2418 |
+
[pa.scalar(datetime.time(1, 1, 1)), None],
|
2419 |
+
pa.time64('us')
|
2420 |
+
),
|
2421 |
+
(
|
2422 |
+
[datetime.timedelta(seconds=10)],
|
2423 |
+
[pa.scalar(datetime.timedelta(seconds=10))],
|
2424 |
+
pa.duration('us')
|
2425 |
+
),
|
2426 |
+
(
|
2427 |
+
[None, datetime.datetime(2014, 1, 1)],
|
2428 |
+
[None, pa.scalar(datetime.datetime(2014, 1, 1))],
|
2429 |
+
pa.timestamp('us')
|
2430 |
+
),
|
2431 |
+
(
|
2432 |
+
[pa.MonthDayNano([1, -1, -10100])],
|
2433 |
+
[pa.scalar(pa.MonthDayNano([1, -1, -10100]))],
|
2434 |
+
pa.month_day_nano_interval()
|
2435 |
+
),
|
2436 |
+
(["a", "b"], [pa.scalar("a"), pa.scalar("b")], pa.string()),
|
2437 |
+
([b"a", b"b"], [pa.scalar(b"a"), pa.scalar(b"b")], pa.binary()),
|
2438 |
+
(
|
2439 |
+
[b"a", b"b"],
|
2440 |
+
[pa.scalar(b"a", pa.binary(1)), pa.scalar(b"b", pa.binary(1))],
|
2441 |
+
pa.binary(1)
|
2442 |
+
),
|
2443 |
+
([[1, 2, 3]], [pa.scalar([1, 2, 3])], pa.list_(pa.int64())),
|
2444 |
+
([["a", "b"]], [pa.scalar(["a", "b"])], pa.list_(pa.string())),
|
2445 |
+
([[1, 2, 3]], [pa.scalar([1, 2, 3], type=pa.list_view(pa.int64()))],
|
2446 |
+
pa.list_view(pa.int64())),
|
2447 |
+
([["a", "b"]], [pa.scalar(["a", "b"], type=pa.list_view(pa.string()))],
|
2448 |
+
pa.list_view(pa.string())),
|
2449 |
+
(
|
2450 |
+
[1, 2, None],
|
2451 |
+
[pa.scalar(1, type=pa.int8()), pa.scalar(2, type=pa.int8()), None],
|
2452 |
+
pa.int8()
|
2453 |
+
),
|
2454 |
+
([1, None], [pa.scalar(1.0, type=pa.int32()), None], pa.int32()),
|
2455 |
+
(
|
2456 |
+
["aaa", "bbb"],
|
2457 |
+
[pa.scalar("aaa", type=pa.binary(3)), pa.scalar("bbb", type=pa.binary(3))],
|
2458 |
+
pa.binary(3)),
|
2459 |
+
([b"a"], [pa.scalar("a", type=pa.large_binary())], pa.large_binary()),
|
2460 |
+
(["a"], [pa.scalar("a", type=pa.large_string())], pa.large_string()),
|
2461 |
+
([b"a"], [pa.scalar("a", type=pa.binary_view())], pa.binary_view()),
|
2462 |
+
(["a"], [pa.scalar("a", type=pa.string_view())], pa.string_view()),
|
2463 |
+
(
|
2464 |
+
["a"],
|
2465 |
+
[pa.scalar("a", type=pa.dictionary(pa.int64(), pa.string()))],
|
2466 |
+
pa.dictionary(pa.int64(), pa.string())
|
2467 |
+
),
|
2468 |
+
(
|
2469 |
+
["a", "b"],
|
2470 |
+
[pa.scalar("a", pa.dictionary(pa.int64(), pa.string())),
|
2471 |
+
pa.scalar("b", pa.dictionary(pa.int64(), pa.string()))],
|
2472 |
+
pa.dictionary(pa.int64(), pa.string())
|
2473 |
+
),
|
2474 |
+
(
|
2475 |
+
[1],
|
2476 |
+
[pa.scalar(1, type=pa.dictionary(pa.int64(), pa.int32()))],
|
2477 |
+
pa.dictionary(pa.int64(), pa.int32())
|
2478 |
+
),
|
2479 |
+
(
|
2480 |
+
[(1, 2)],
|
2481 |
+
[pa.scalar([('a', 1), ('b', 2)], type=pa.struct(
|
2482 |
+
[('a', pa.int8()), ('b', pa.int8())]))],
|
2483 |
+
pa.struct([('a', pa.int8()), ('b', pa.int8())])
|
2484 |
+
),
|
2485 |
+
(
|
2486 |
+
[(1, 'bar')],
|
2487 |
+
[pa.scalar([('a', 1), ('b', 'bar')], type=pa.struct(
|
2488 |
+
[('a', pa.int8()), ('b', pa.string())]))],
|
2489 |
+
pa.struct([('a', pa.int8()), ('b', pa.string())])
|
2490 |
+
)
|
2491 |
+
])
|
2492 |
+
def test_array_accepts_pyarrow_scalar(seq, data, scalar_data, value_type):
|
2493 |
+
if type(seq(scalar_data)) == set:
|
2494 |
+
pytest.skip("The elements in the set get reordered.")
|
2495 |
+
expect = pa.array(data, type=value_type)
|
2496 |
+
result = pa.array(seq(scalar_data))
|
2497 |
+
assert expect.equals(result)
|
2498 |
+
|
2499 |
+
result = pa.array(seq(scalar_data), type=value_type)
|
2500 |
+
assert expect.equals(result)
|
2501 |
+
|
2502 |
+
|
2503 |
+
@parametrize_with_collections_types
|
2504 |
+
def test_array_accepts_pyarrow_scalar_errors(seq):
|
2505 |
+
sequence = seq([pa.scalar(1), pa.scalar("a"), pa.scalar(3.0)])
|
2506 |
+
with pytest.raises(pa.ArrowInvalid,
|
2507 |
+
match="cannot mix scalars with different types"):
|
2508 |
+
pa.array(sequence)
|
2509 |
+
|
2510 |
+
sequence = seq([1, pa.scalar("a"), None])
|
2511 |
+
with pytest.raises(pa.ArrowInvalid,
|
2512 |
+
match="pyarrow scalars cannot be mixed with other "
|
2513 |
+
"Python scalar values currently"):
|
2514 |
+
pa.array(sequence)
|
2515 |
+
|
2516 |
+
sequence = seq([np.float16("0.1"), pa.scalar("a"), None])
|
2517 |
+
with pytest.raises(pa.ArrowInvalid,
|
2518 |
+
match="pyarrow scalars cannot be mixed with other "
|
2519 |
+
"Python scalar values currently"):
|
2520 |
+
pa.array(sequence)
|
2521 |
+
|
2522 |
+
sequence = seq([pa.scalar("a"), np.float16("0.1"), None])
|
2523 |
+
with pytest.raises(pa.ArrowInvalid,
|
2524 |
+
match="pyarrow scalars cannot be mixed with other "
|
2525 |
+
"Python scalar values currently"):
|
2526 |
+
pa.array(sequence)
|
2527 |
+
|
2528 |
+
with pytest.raises(pa.ArrowInvalid,
|
2529 |
+
match="Cannot append scalar of type string "
|
2530 |
+
"to builder for type int32"):
|
2531 |
+
pa.array([pa.scalar("a")], type=pa.int32())
|
2532 |
+
|
2533 |
+
with pytest.raises(pa.ArrowInvalid,
|
2534 |
+
match="Cannot append scalar of type int64 "
|
2535 |
+
"to builder for type null"):
|
2536 |
+
pa.array([pa.scalar(1)], type=pa.null())
|
venv/lib/python3.10/site-packages/pyarrow/tests/test_cpp_internals.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import os.path
|
19 |
+
from os.path import join as pjoin
|
20 |
+
|
21 |
+
from pyarrow._pyarrow_cpp_tests import get_cpp_tests
|
22 |
+
|
23 |
+
|
24 |
+
def inject_cpp_tests(ns):
|
25 |
+
"""
|
26 |
+
Inject C++ tests as Python functions into namespace `ns` (a dict).
|
27 |
+
"""
|
28 |
+
for case in get_cpp_tests():
|
29 |
+
def wrapper(case=case):
|
30 |
+
case()
|
31 |
+
wrapper.__name__ = wrapper.__qualname__ = case.name
|
32 |
+
wrapper.__module__ = ns['__name__']
|
33 |
+
ns[case.name] = wrapper
|
34 |
+
|
35 |
+
|
36 |
+
inject_cpp_tests(globals())
|
37 |
+
|
38 |
+
|
39 |
+
def test_pyarrow_include():
|
40 |
+
# We need to make sure that pyarrow/include is always
|
41 |
+
# created. Either with PyArrow C++ header files or with
|
42 |
+
# Arrow C++ and PyArrow C++ header files together
|
43 |
+
|
44 |
+
source = os.path.dirname(os.path.abspath(__file__))
|
45 |
+
pyarrow_dir = pjoin(source, '..')
|
46 |
+
pyarrow_include = pjoin(pyarrow_dir, 'include')
|
47 |
+
pyarrow_cpp_include = pjoin(pyarrow_include, 'arrow', 'python')
|
48 |
+
|
49 |
+
assert os.path.exists(pyarrow_include)
|
50 |
+
assert os.path.exists(pyarrow_cpp_include)
|
venv/lib/python3.10/site-packages/pyarrow/tests/test_csv.py
ADDED
@@ -0,0 +1,2018 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import abc
|
19 |
+
import bz2
|
20 |
+
from datetime import date, datetime
|
21 |
+
from decimal import Decimal
|
22 |
+
import gc
|
23 |
+
import gzip
|
24 |
+
import io
|
25 |
+
import itertools
|
26 |
+
import os
|
27 |
+
import select
|
28 |
+
import shutil
|
29 |
+
import signal
|
30 |
+
import string
|
31 |
+
import tempfile
|
32 |
+
import threading
|
33 |
+
import time
|
34 |
+
import unittest
|
35 |
+
import weakref
|
36 |
+
|
37 |
+
import pytest
|
38 |
+
|
39 |
+
import numpy as np
|
40 |
+
|
41 |
+
import pyarrow as pa
|
42 |
+
from pyarrow.csv import (
|
43 |
+
open_csv, read_csv, ReadOptions, ParseOptions, ConvertOptions, ISO8601,
|
44 |
+
write_csv, WriteOptions, CSVWriter, InvalidRow)
|
45 |
+
from pyarrow.tests import util
|
46 |
+
|
47 |
+
|
48 |
+
def generate_col_names():
|
49 |
+
# 'a', 'b'... 'z', then 'aa', 'ab'...
|
50 |
+
letters = string.ascii_lowercase
|
51 |
+
yield from letters
|
52 |
+
for first in letters:
|
53 |
+
for second in letters:
|
54 |
+
yield first + second
|
55 |
+
|
56 |
+
|
57 |
+
def make_random_csv(num_cols=2, num_rows=10, linesep='\r\n', write_names=True):
|
58 |
+
arr = np.random.RandomState(42).randint(0, 1000, size=(num_cols, num_rows))
|
59 |
+
csv = io.StringIO()
|
60 |
+
col_names = list(itertools.islice(generate_col_names(), num_cols))
|
61 |
+
if write_names:
|
62 |
+
csv.write(",".join(col_names))
|
63 |
+
csv.write(linesep)
|
64 |
+
for row in arr.T:
|
65 |
+
csv.write(",".join(map(str, row)))
|
66 |
+
csv.write(linesep)
|
67 |
+
csv = csv.getvalue().encode()
|
68 |
+
columns = [pa.array(a, type=pa.int64()) for a in arr]
|
69 |
+
expected = pa.Table.from_arrays(columns, col_names)
|
70 |
+
return csv, expected
|
71 |
+
|
72 |
+
|
73 |
+
def make_empty_csv(column_names):
|
74 |
+
csv = io.StringIO()
|
75 |
+
csv.write(",".join(column_names))
|
76 |
+
csv.write("\n")
|
77 |
+
return csv.getvalue().encode()
|
78 |
+
|
79 |
+
|
80 |
+
def check_options_class(cls, **attr_values):
|
81 |
+
"""
|
82 |
+
Check setting and getting attributes of an *Options class.
|
83 |
+
"""
|
84 |
+
opts = cls()
|
85 |
+
|
86 |
+
for name, values in attr_values.items():
|
87 |
+
assert getattr(opts, name) == values[0], \
|
88 |
+
"incorrect default value for " + name
|
89 |
+
for v in values:
|
90 |
+
setattr(opts, name, v)
|
91 |
+
assert getattr(opts, name) == v, "failed setting value"
|
92 |
+
|
93 |
+
with pytest.raises(AttributeError):
|
94 |
+
opts.zzz_non_existent = True
|
95 |
+
|
96 |
+
# Check constructor named arguments
|
97 |
+
non_defaults = {name: values[1] for name, values in attr_values.items()}
|
98 |
+
opts = cls(**non_defaults)
|
99 |
+
for name, value in non_defaults.items():
|
100 |
+
assert getattr(opts, name) == value
|
101 |
+
|
102 |
+
|
103 |
+
# The various options classes need to be picklable for dataset
|
104 |
+
def check_options_class_pickling(cls, pickler, **attr_values):
|
105 |
+
opts = cls(**attr_values)
|
106 |
+
new_opts = pickler.loads(pickler.dumps(opts,
|
107 |
+
protocol=pickler.HIGHEST_PROTOCOL))
|
108 |
+
for name, value in attr_values.items():
|
109 |
+
assert getattr(new_opts, name) == value
|
110 |
+
|
111 |
+
|
112 |
+
class InvalidRowHandler:
|
113 |
+
def __init__(self, result):
|
114 |
+
self.result = result
|
115 |
+
self.rows = []
|
116 |
+
|
117 |
+
def __call__(self, row):
|
118 |
+
self.rows.append(row)
|
119 |
+
return self.result
|
120 |
+
|
121 |
+
def __eq__(self, other):
|
122 |
+
return (isinstance(other, InvalidRowHandler) and
|
123 |
+
other.result == self.result)
|
124 |
+
|
125 |
+
def __ne__(self, other):
|
126 |
+
return (not isinstance(other, InvalidRowHandler) or
|
127 |
+
other.result != self.result)
|
128 |
+
|
129 |
+
|
130 |
+
def test_read_options(pickle_module):
|
131 |
+
cls = ReadOptions
|
132 |
+
opts = cls()
|
133 |
+
|
134 |
+
check_options_class(cls, use_threads=[True, False],
|
135 |
+
skip_rows=[0, 3],
|
136 |
+
column_names=[[], ["ab", "cd"]],
|
137 |
+
autogenerate_column_names=[False, True],
|
138 |
+
encoding=['utf8', 'utf16'],
|
139 |
+
skip_rows_after_names=[0, 27])
|
140 |
+
|
141 |
+
check_options_class_pickling(cls, pickler=pickle_module,
|
142 |
+
use_threads=True,
|
143 |
+
skip_rows=3,
|
144 |
+
column_names=["ab", "cd"],
|
145 |
+
autogenerate_column_names=False,
|
146 |
+
encoding='utf16',
|
147 |
+
skip_rows_after_names=27)
|
148 |
+
|
149 |
+
assert opts.block_size > 0
|
150 |
+
opts.block_size = 12345
|
151 |
+
assert opts.block_size == 12345
|
152 |
+
|
153 |
+
opts = cls(block_size=1234)
|
154 |
+
assert opts.block_size == 1234
|
155 |
+
|
156 |
+
opts.validate()
|
157 |
+
|
158 |
+
match = "ReadOptions: block_size must be at least 1: 0"
|
159 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
160 |
+
opts = cls()
|
161 |
+
opts.block_size = 0
|
162 |
+
opts.validate()
|
163 |
+
|
164 |
+
match = "ReadOptions: skip_rows cannot be negative: -1"
|
165 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
166 |
+
opts = cls()
|
167 |
+
opts.skip_rows = -1
|
168 |
+
opts.validate()
|
169 |
+
|
170 |
+
match = "ReadOptions: skip_rows_after_names cannot be negative: -1"
|
171 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
172 |
+
opts = cls()
|
173 |
+
opts.skip_rows_after_names = -1
|
174 |
+
opts.validate()
|
175 |
+
|
176 |
+
match = "ReadOptions: autogenerate_column_names cannot be true when" \
|
177 |
+
" column_names are provided"
|
178 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
179 |
+
opts = cls()
|
180 |
+
opts.autogenerate_column_names = True
|
181 |
+
opts.column_names = ('a', 'b')
|
182 |
+
opts.validate()
|
183 |
+
|
184 |
+
|
185 |
+
def test_parse_options(pickle_module):
|
186 |
+
cls = ParseOptions
|
187 |
+
skip_handler = InvalidRowHandler('skip')
|
188 |
+
|
189 |
+
check_options_class(cls, delimiter=[',', 'x'],
|
190 |
+
escape_char=[False, 'y'],
|
191 |
+
quote_char=['"', 'z', False],
|
192 |
+
double_quote=[True, False],
|
193 |
+
newlines_in_values=[False, True],
|
194 |
+
ignore_empty_lines=[True, False],
|
195 |
+
invalid_row_handler=[None, skip_handler])
|
196 |
+
|
197 |
+
check_options_class_pickling(cls, pickler=pickle_module,
|
198 |
+
delimiter='x',
|
199 |
+
escape_char='y',
|
200 |
+
quote_char=False,
|
201 |
+
double_quote=False,
|
202 |
+
newlines_in_values=True,
|
203 |
+
ignore_empty_lines=False,
|
204 |
+
invalid_row_handler=skip_handler)
|
205 |
+
|
206 |
+
cls().validate()
|
207 |
+
opts = cls()
|
208 |
+
opts.delimiter = "\t"
|
209 |
+
opts.validate()
|
210 |
+
|
211 |
+
match = "ParseOptions: delimiter cannot be \\\\r or \\\\n"
|
212 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
213 |
+
opts = cls()
|
214 |
+
opts.delimiter = "\n"
|
215 |
+
opts.validate()
|
216 |
+
|
217 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
218 |
+
opts = cls()
|
219 |
+
opts.delimiter = "\r"
|
220 |
+
opts.validate()
|
221 |
+
|
222 |
+
match = "ParseOptions: quote_char cannot be \\\\r or \\\\n"
|
223 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
224 |
+
opts = cls()
|
225 |
+
opts.quote_char = "\n"
|
226 |
+
opts.validate()
|
227 |
+
|
228 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
229 |
+
opts = cls()
|
230 |
+
opts.quote_char = "\r"
|
231 |
+
opts.validate()
|
232 |
+
|
233 |
+
match = "ParseOptions: escape_char cannot be \\\\r or \\\\n"
|
234 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
235 |
+
opts = cls()
|
236 |
+
opts.escape_char = "\n"
|
237 |
+
opts.validate()
|
238 |
+
|
239 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
240 |
+
opts = cls()
|
241 |
+
opts.escape_char = "\r"
|
242 |
+
opts.validate()
|
243 |
+
|
244 |
+
|
245 |
+
def test_convert_options(pickle_module):
|
246 |
+
cls = ConvertOptions
|
247 |
+
opts = cls()
|
248 |
+
|
249 |
+
check_options_class(
|
250 |
+
cls, check_utf8=[True, False],
|
251 |
+
strings_can_be_null=[False, True],
|
252 |
+
quoted_strings_can_be_null=[True, False],
|
253 |
+
decimal_point=['.', ','],
|
254 |
+
include_columns=[[], ['def', 'abc']],
|
255 |
+
include_missing_columns=[False, True],
|
256 |
+
auto_dict_encode=[False, True],
|
257 |
+
timestamp_parsers=[[], [ISO8601, '%y-%m']])
|
258 |
+
|
259 |
+
check_options_class_pickling(
|
260 |
+
cls, pickler=pickle_module,
|
261 |
+
check_utf8=False,
|
262 |
+
strings_can_be_null=True,
|
263 |
+
quoted_strings_can_be_null=False,
|
264 |
+
decimal_point=',',
|
265 |
+
include_columns=['def', 'abc'],
|
266 |
+
include_missing_columns=False,
|
267 |
+
auto_dict_encode=True,
|
268 |
+
timestamp_parsers=[ISO8601, '%y-%m'])
|
269 |
+
|
270 |
+
with pytest.raises(ValueError):
|
271 |
+
opts.decimal_point = '..'
|
272 |
+
|
273 |
+
assert opts.auto_dict_max_cardinality > 0
|
274 |
+
opts.auto_dict_max_cardinality = 99999
|
275 |
+
assert opts.auto_dict_max_cardinality == 99999
|
276 |
+
|
277 |
+
assert opts.column_types == {}
|
278 |
+
# Pass column_types as mapping
|
279 |
+
opts.column_types = {'b': pa.int16(), 'c': pa.float32()}
|
280 |
+
assert opts.column_types == {'b': pa.int16(), 'c': pa.float32()}
|
281 |
+
opts.column_types = {'v': 'int16', 'w': 'null'}
|
282 |
+
assert opts.column_types == {'v': pa.int16(), 'w': pa.null()}
|
283 |
+
# Pass column_types as schema
|
284 |
+
schema = pa.schema([('a', pa.int32()), ('b', pa.string())])
|
285 |
+
opts.column_types = schema
|
286 |
+
assert opts.column_types == {'a': pa.int32(), 'b': pa.string()}
|
287 |
+
# Pass column_types as sequence
|
288 |
+
opts.column_types = [('x', pa.binary())]
|
289 |
+
assert opts.column_types == {'x': pa.binary()}
|
290 |
+
|
291 |
+
with pytest.raises(TypeError, match='DataType expected'):
|
292 |
+
opts.column_types = {'a': None}
|
293 |
+
with pytest.raises(TypeError):
|
294 |
+
opts.column_types = 0
|
295 |
+
|
296 |
+
assert isinstance(opts.null_values, list)
|
297 |
+
assert '' in opts.null_values
|
298 |
+
assert 'N/A' in opts.null_values
|
299 |
+
opts.null_values = ['xxx', 'yyy']
|
300 |
+
assert opts.null_values == ['xxx', 'yyy']
|
301 |
+
|
302 |
+
assert isinstance(opts.true_values, list)
|
303 |
+
opts.true_values = ['xxx', 'yyy']
|
304 |
+
assert opts.true_values == ['xxx', 'yyy']
|
305 |
+
|
306 |
+
assert isinstance(opts.false_values, list)
|
307 |
+
opts.false_values = ['xxx', 'yyy']
|
308 |
+
assert opts.false_values == ['xxx', 'yyy']
|
309 |
+
|
310 |
+
assert opts.timestamp_parsers == []
|
311 |
+
opts.timestamp_parsers = [ISO8601]
|
312 |
+
assert opts.timestamp_parsers == [ISO8601]
|
313 |
+
|
314 |
+
opts = cls(column_types={'a': pa.null()},
|
315 |
+
null_values=['N', 'nn'], true_values=['T', 'tt'],
|
316 |
+
false_values=['F', 'ff'], auto_dict_max_cardinality=999,
|
317 |
+
timestamp_parsers=[ISO8601, '%Y-%m-%d'])
|
318 |
+
assert opts.column_types == {'a': pa.null()}
|
319 |
+
assert opts.null_values == ['N', 'nn']
|
320 |
+
assert opts.false_values == ['F', 'ff']
|
321 |
+
assert opts.true_values == ['T', 'tt']
|
322 |
+
assert opts.auto_dict_max_cardinality == 999
|
323 |
+
assert opts.timestamp_parsers == [ISO8601, '%Y-%m-%d']
|
324 |
+
|
325 |
+
|
326 |
+
def test_write_options():
|
327 |
+
cls = WriteOptions
|
328 |
+
opts = cls()
|
329 |
+
|
330 |
+
check_options_class(
|
331 |
+
cls, include_header=[True, False], delimiter=[',', '\t', '|'],
|
332 |
+
quoting_style=['needed', 'none', 'all_valid'])
|
333 |
+
|
334 |
+
assert opts.batch_size > 0
|
335 |
+
opts.batch_size = 12345
|
336 |
+
assert opts.batch_size == 12345
|
337 |
+
|
338 |
+
opts = cls(batch_size=9876)
|
339 |
+
assert opts.batch_size == 9876
|
340 |
+
|
341 |
+
opts.validate()
|
342 |
+
|
343 |
+
match = "WriteOptions: batch_size must be at least 1: 0"
|
344 |
+
with pytest.raises(pa.ArrowInvalid, match=match):
|
345 |
+
opts = cls()
|
346 |
+
opts.batch_size = 0
|
347 |
+
opts.validate()
|
348 |
+
|
349 |
+
|
350 |
+
class BaseTestCSV(abc.ABC):
|
351 |
+
"""Common tests which are shared by streaming and non streaming readers"""
|
352 |
+
|
353 |
+
@abc.abstractmethod
|
354 |
+
def read_bytes(self, b, **kwargs):
|
355 |
+
"""
|
356 |
+
:param b: bytes to be parsed
|
357 |
+
:param kwargs: arguments passed on to open the csv file
|
358 |
+
:return: b parsed as a single RecordBatch
|
359 |
+
"""
|
360 |
+
raise NotImplementedError
|
361 |
+
|
362 |
+
@property
|
363 |
+
@abc.abstractmethod
|
364 |
+
def use_threads(self):
|
365 |
+
"""Whether this test is multi-threaded"""
|
366 |
+
raise NotImplementedError
|
367 |
+
|
368 |
+
@staticmethod
|
369 |
+
def check_names(table, names):
|
370 |
+
assert table.num_columns == len(names)
|
371 |
+
assert table.column_names == names
|
372 |
+
|
373 |
+
def test_header_skip_rows(self):
|
374 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
375 |
+
|
376 |
+
opts = ReadOptions()
|
377 |
+
opts.skip_rows = 1
|
378 |
+
table = self.read_bytes(rows, read_options=opts)
|
379 |
+
self.check_names(table, ["ef", "gh"])
|
380 |
+
assert table.to_pydict() == {
|
381 |
+
"ef": ["ij", "mn"],
|
382 |
+
"gh": ["kl", "op"],
|
383 |
+
}
|
384 |
+
|
385 |
+
opts.skip_rows = 3
|
386 |
+
table = self.read_bytes(rows, read_options=opts)
|
387 |
+
self.check_names(table, ["mn", "op"])
|
388 |
+
assert table.to_pydict() == {
|
389 |
+
"mn": [],
|
390 |
+
"op": [],
|
391 |
+
}
|
392 |
+
|
393 |
+
opts.skip_rows = 4
|
394 |
+
with pytest.raises(pa.ArrowInvalid):
|
395 |
+
# Not enough rows
|
396 |
+
table = self.read_bytes(rows, read_options=opts)
|
397 |
+
|
398 |
+
# Can skip rows with a different number of columns
|
399 |
+
rows = b"abcd\n,,,,,\nij,kl\nmn,op\n"
|
400 |
+
opts.skip_rows = 2
|
401 |
+
table = self.read_bytes(rows, read_options=opts)
|
402 |
+
self.check_names(table, ["ij", "kl"])
|
403 |
+
assert table.to_pydict() == {
|
404 |
+
"ij": ["mn"],
|
405 |
+
"kl": ["op"],
|
406 |
+
}
|
407 |
+
|
408 |
+
# Can skip all rows exactly when columns are given
|
409 |
+
opts.skip_rows = 4
|
410 |
+
opts.column_names = ['ij', 'kl']
|
411 |
+
table = self.read_bytes(rows, read_options=opts)
|
412 |
+
self.check_names(table, ["ij", "kl"])
|
413 |
+
assert table.to_pydict() == {
|
414 |
+
"ij": [],
|
415 |
+
"kl": [],
|
416 |
+
}
|
417 |
+
|
418 |
+
def test_skip_rows_after_names(self):
|
419 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
420 |
+
|
421 |
+
opts = ReadOptions()
|
422 |
+
opts.skip_rows_after_names = 1
|
423 |
+
table = self.read_bytes(rows, read_options=opts)
|
424 |
+
self.check_names(table, ["ab", "cd"])
|
425 |
+
assert table.to_pydict() == {
|
426 |
+
"ab": ["ij", "mn"],
|
427 |
+
"cd": ["kl", "op"],
|
428 |
+
}
|
429 |
+
|
430 |
+
# Can skip exact number of rows
|
431 |
+
opts.skip_rows_after_names = 3
|
432 |
+
table = self.read_bytes(rows, read_options=opts)
|
433 |
+
self.check_names(table, ["ab", "cd"])
|
434 |
+
assert table.to_pydict() == {
|
435 |
+
"ab": [],
|
436 |
+
"cd": [],
|
437 |
+
}
|
438 |
+
|
439 |
+
# Can skip beyond all rows
|
440 |
+
opts.skip_rows_after_names = 4
|
441 |
+
table = self.read_bytes(rows, read_options=opts)
|
442 |
+
self.check_names(table, ["ab", "cd"])
|
443 |
+
assert table.to_pydict() == {
|
444 |
+
"ab": [],
|
445 |
+
"cd": [],
|
446 |
+
}
|
447 |
+
|
448 |
+
# Can skip rows with a different number of columns
|
449 |
+
rows = b"abcd\n,,,,,\nij,kl\nmn,op\n"
|
450 |
+
opts.skip_rows_after_names = 2
|
451 |
+
opts.column_names = ["f0", "f1"]
|
452 |
+
table = self.read_bytes(rows, read_options=opts)
|
453 |
+
self.check_names(table, ["f0", "f1"])
|
454 |
+
assert table.to_pydict() == {
|
455 |
+
"f0": ["ij", "mn"],
|
456 |
+
"f1": ["kl", "op"],
|
457 |
+
}
|
458 |
+
opts = ReadOptions()
|
459 |
+
|
460 |
+
# Can skip rows with new lines in the value
|
461 |
+
rows = b'ab,cd\n"e\nf","g\n\nh"\n"ij","k\nl"\nmn,op'
|
462 |
+
opts.skip_rows_after_names = 2
|
463 |
+
parse_opts = ParseOptions()
|
464 |
+
parse_opts.newlines_in_values = True
|
465 |
+
table = self.read_bytes(rows, read_options=opts,
|
466 |
+
parse_options=parse_opts)
|
467 |
+
self.check_names(table, ["ab", "cd"])
|
468 |
+
assert table.to_pydict() == {
|
469 |
+
"ab": ["mn"],
|
470 |
+
"cd": ["op"],
|
471 |
+
}
|
472 |
+
|
473 |
+
# Can skip rows when block ends in middle of quoted value
|
474 |
+
opts.skip_rows_after_names = 2
|
475 |
+
opts.block_size = 26
|
476 |
+
table = self.read_bytes(rows, read_options=opts,
|
477 |
+
parse_options=parse_opts)
|
478 |
+
self.check_names(table, ["ab", "cd"])
|
479 |
+
assert table.to_pydict() == {
|
480 |
+
"ab": ["mn"],
|
481 |
+
"cd": ["op"],
|
482 |
+
}
|
483 |
+
opts = ReadOptions()
|
484 |
+
|
485 |
+
# Can skip rows that are beyond the first block without lexer
|
486 |
+
rows, expected = make_random_csv(num_cols=5, num_rows=1000)
|
487 |
+
opts.skip_rows_after_names = 900
|
488 |
+
opts.block_size = len(rows) / 11
|
489 |
+
table = self.read_bytes(rows, read_options=opts)
|
490 |
+
assert table.schema == expected.schema
|
491 |
+
assert table.num_rows == 100
|
492 |
+
table_dict = table.to_pydict()
|
493 |
+
for name, values in expected.to_pydict().items():
|
494 |
+
assert values[900:] == table_dict[name]
|
495 |
+
|
496 |
+
# Can skip rows that are beyond the first block with lexer
|
497 |
+
table = self.read_bytes(rows, read_options=opts,
|
498 |
+
parse_options=parse_opts)
|
499 |
+
assert table.schema == expected.schema
|
500 |
+
assert table.num_rows == 100
|
501 |
+
table_dict = table.to_pydict()
|
502 |
+
for name, values in expected.to_pydict().items():
|
503 |
+
assert values[900:] == table_dict[name]
|
504 |
+
|
505 |
+
# Skip rows and skip rows after names
|
506 |
+
rows, expected = make_random_csv(num_cols=5, num_rows=200,
|
507 |
+
write_names=False)
|
508 |
+
opts = ReadOptions()
|
509 |
+
opts.skip_rows = 37
|
510 |
+
opts.skip_rows_after_names = 41
|
511 |
+
opts.column_names = expected.schema.names
|
512 |
+
table = self.read_bytes(rows, read_options=opts,
|
513 |
+
parse_options=parse_opts)
|
514 |
+
assert table.schema == expected.schema
|
515 |
+
assert (table.num_rows ==
|
516 |
+
expected.num_rows - opts.skip_rows -
|
517 |
+
opts.skip_rows_after_names)
|
518 |
+
table_dict = table.to_pydict()
|
519 |
+
for name, values in expected.to_pydict().items():
|
520 |
+
assert (values[opts.skip_rows + opts.skip_rows_after_names:] ==
|
521 |
+
table_dict[name])
|
522 |
+
|
523 |
+
def test_row_number_offset_in_errors(self):
|
524 |
+
# Row numbers are only correctly counted in serial reads
|
525 |
+
def format_msg(msg_format, row, *args):
|
526 |
+
if self.use_threads:
|
527 |
+
row_info = ""
|
528 |
+
else:
|
529 |
+
row_info = "Row #{}: ".format(row)
|
530 |
+
return msg_format.format(row_info, *args)
|
531 |
+
|
532 |
+
csv, _ = make_random_csv(4, 100, write_names=True)
|
533 |
+
|
534 |
+
read_options = ReadOptions()
|
535 |
+
read_options.block_size = len(csv) / 3
|
536 |
+
convert_options = ConvertOptions()
|
537 |
+
convert_options.column_types = {"a": pa.int32()}
|
538 |
+
|
539 |
+
# Test without skip_rows and column names in the csv
|
540 |
+
csv_bad_columns = csv + b"1,2\r\n"
|
541 |
+
message_columns = format_msg("{}Expected 4 columns, got 2", 102)
|
542 |
+
with pytest.raises(pa.ArrowInvalid, match=message_columns):
|
543 |
+
self.read_bytes(csv_bad_columns,
|
544 |
+
read_options=read_options,
|
545 |
+
convert_options=convert_options)
|
546 |
+
|
547 |
+
csv_bad_type = csv + b"a,b,c,d\r\n"
|
548 |
+
message_value = format_msg(
|
549 |
+
"In CSV column #0: {}"
|
550 |
+
"CSV conversion error to int32: invalid value 'a'",
|
551 |
+
102, csv)
|
552 |
+
with pytest.raises(pa.ArrowInvalid, match=message_value):
|
553 |
+
self.read_bytes(csv_bad_type,
|
554 |
+
read_options=read_options,
|
555 |
+
convert_options=convert_options)
|
556 |
+
|
557 |
+
long_row = (b"this is a long row" * 15) + b",3\r\n"
|
558 |
+
csv_bad_columns_long = csv + long_row
|
559 |
+
message_long = format_msg("{}Expected 4 columns, got 2: {} ...", 102,
|
560 |
+
long_row[0:96].decode("utf-8"))
|
561 |
+
with pytest.raises(pa.ArrowInvalid, match=message_long):
|
562 |
+
self.read_bytes(csv_bad_columns_long,
|
563 |
+
read_options=read_options,
|
564 |
+
convert_options=convert_options)
|
565 |
+
|
566 |
+
# Test skipping rows after the names
|
567 |
+
read_options.skip_rows_after_names = 47
|
568 |
+
|
569 |
+
with pytest.raises(pa.ArrowInvalid, match=message_columns):
|
570 |
+
self.read_bytes(csv_bad_columns,
|
571 |
+
read_options=read_options,
|
572 |
+
convert_options=convert_options)
|
573 |
+
|
574 |
+
with pytest.raises(pa.ArrowInvalid, match=message_value):
|
575 |
+
self.read_bytes(csv_bad_type,
|
576 |
+
read_options=read_options,
|
577 |
+
convert_options=convert_options)
|
578 |
+
|
579 |
+
with pytest.raises(pa.ArrowInvalid, match=message_long):
|
580 |
+
self.read_bytes(csv_bad_columns_long,
|
581 |
+
read_options=read_options,
|
582 |
+
convert_options=convert_options)
|
583 |
+
|
584 |
+
read_options.skip_rows_after_names = 0
|
585 |
+
|
586 |
+
# Test without skip_rows and column names not in the csv
|
587 |
+
csv, _ = make_random_csv(4, 100, write_names=False)
|
588 |
+
read_options.column_names = ["a", "b", "c", "d"]
|
589 |
+
csv_bad_columns = csv + b"1,2\r\n"
|
590 |
+
message_columns = format_msg("{}Expected 4 columns, got 2", 101)
|
591 |
+
with pytest.raises(pa.ArrowInvalid, match=message_columns):
|
592 |
+
self.read_bytes(csv_bad_columns,
|
593 |
+
read_options=read_options,
|
594 |
+
convert_options=convert_options)
|
595 |
+
|
596 |
+
csv_bad_columns_long = csv + long_row
|
597 |
+
message_long = format_msg("{}Expected 4 columns, got 2: {} ...", 101,
|
598 |
+
long_row[0:96].decode("utf-8"))
|
599 |
+
with pytest.raises(pa.ArrowInvalid, match=message_long):
|
600 |
+
self.read_bytes(csv_bad_columns_long,
|
601 |
+
read_options=read_options,
|
602 |
+
convert_options=convert_options)
|
603 |
+
|
604 |
+
csv_bad_type = csv + b"a,b,c,d\r\n"
|
605 |
+
message_value = format_msg(
|
606 |
+
"In CSV column #0: {}"
|
607 |
+
"CSV conversion error to int32: invalid value 'a'",
|
608 |
+
101)
|
609 |
+
message_value = message_value.format(len(csv))
|
610 |
+
with pytest.raises(pa.ArrowInvalid, match=message_value):
|
611 |
+
self.read_bytes(csv_bad_type,
|
612 |
+
read_options=read_options,
|
613 |
+
convert_options=convert_options)
|
614 |
+
|
615 |
+
# Test with skip_rows and column names not in the csv
|
616 |
+
read_options.skip_rows = 23
|
617 |
+
with pytest.raises(pa.ArrowInvalid, match=message_columns):
|
618 |
+
self.read_bytes(csv_bad_columns,
|
619 |
+
read_options=read_options,
|
620 |
+
convert_options=convert_options)
|
621 |
+
|
622 |
+
with pytest.raises(pa.ArrowInvalid, match=message_value):
|
623 |
+
self.read_bytes(csv_bad_type,
|
624 |
+
read_options=read_options,
|
625 |
+
convert_options=convert_options)
|
626 |
+
|
627 |
+
def test_invalid_row_handler(self, pickle_module):
|
628 |
+
rows = b"a,b\nc\nd,e\nf,g,h\ni,j\n"
|
629 |
+
parse_opts = ParseOptions()
|
630 |
+
with pytest.raises(
|
631 |
+
ValueError,
|
632 |
+
match="Expected 2 columns, got 1: c"):
|
633 |
+
self.read_bytes(rows, parse_options=parse_opts)
|
634 |
+
|
635 |
+
# Skip requested
|
636 |
+
parse_opts.invalid_row_handler = InvalidRowHandler('skip')
|
637 |
+
table = self.read_bytes(rows, parse_options=parse_opts)
|
638 |
+
assert table.to_pydict() == {
|
639 |
+
'a': ["d", "i"],
|
640 |
+
'b': ["e", "j"],
|
641 |
+
}
|
642 |
+
|
643 |
+
def row_num(x):
|
644 |
+
return None if self.use_threads else x
|
645 |
+
expected_rows = [
|
646 |
+
InvalidRow(2, 1, row_num(2), "c"),
|
647 |
+
InvalidRow(2, 3, row_num(4), "f,g,h"),
|
648 |
+
]
|
649 |
+
assert parse_opts.invalid_row_handler.rows == expected_rows
|
650 |
+
|
651 |
+
# Error requested
|
652 |
+
parse_opts.invalid_row_handler = InvalidRowHandler('error')
|
653 |
+
with pytest.raises(
|
654 |
+
ValueError,
|
655 |
+
match="Expected 2 columns, got 1: c"):
|
656 |
+
self.read_bytes(rows, parse_options=parse_opts)
|
657 |
+
expected_rows = [InvalidRow(2, 1, row_num(2), "c")]
|
658 |
+
assert parse_opts.invalid_row_handler.rows == expected_rows
|
659 |
+
|
660 |
+
# Test ser/de
|
661 |
+
parse_opts.invalid_row_handler = InvalidRowHandler('skip')
|
662 |
+
parse_opts = pickle_module.loads(pickle_module.dumps(parse_opts))
|
663 |
+
|
664 |
+
table = self.read_bytes(rows, parse_options=parse_opts)
|
665 |
+
assert table.to_pydict() == {
|
666 |
+
'a': ["d", "i"],
|
667 |
+
'b': ["e", "j"],
|
668 |
+
}
|
669 |
+
|
670 |
+
def test_chunker_out_of_sync(self):
|
671 |
+
# GH-39892: if there are newlines in values, the parser may become
|
672 |
+
# out of sync with the chunker. In this case, we try to produce an
|
673 |
+
# informative error message.
|
674 |
+
rows = b"""a,b,c\nd,e,"f\n"\ng,h,i\n"""
|
675 |
+
expected = {
|
676 |
+
'a': ["d", "g"],
|
677 |
+
'b': ["e", "h"],
|
678 |
+
'c': ["f\n", "i"],
|
679 |
+
}
|
680 |
+
for block_size in range(8, 15):
|
681 |
+
# Sanity check: parsing works with newlines_in_values=True
|
682 |
+
d = self.read_bytes(
|
683 |
+
rows, parse_options=ParseOptions(newlines_in_values=True),
|
684 |
+
read_options=ReadOptions(block_size=block_size)).to_pydict()
|
685 |
+
assert d == expected
|
686 |
+
# With these block sizes, a block would end on the physical newline
|
687 |
+
# inside the quoted cell value, leading to a mismatch between
|
688 |
+
# CSV chunker and parser.
|
689 |
+
for block_size in range(8, 11):
|
690 |
+
with pytest.raises(ValueError,
|
691 |
+
match="cell values spanning multiple lines"):
|
692 |
+
self.read_bytes(
|
693 |
+
rows, read_options=ReadOptions(block_size=block_size))
|
694 |
+
|
695 |
+
|
696 |
+
class BaseCSVTableRead(BaseTestCSV):
|
697 |
+
|
698 |
+
def read_csv(self, csv, *args, validate_full=True, **kwargs):
|
699 |
+
"""
|
700 |
+
Reads the CSV file into memory using pyarrow's read_csv
|
701 |
+
csv The CSV bytes
|
702 |
+
args Positional arguments to be forwarded to pyarrow's read_csv
|
703 |
+
validate_full Whether or not to fully validate the resulting table
|
704 |
+
kwargs Keyword arguments to be forwarded to pyarrow's read_csv
|
705 |
+
"""
|
706 |
+
assert isinstance(self.use_threads, bool) # sanity check
|
707 |
+
read_options = kwargs.setdefault('read_options', ReadOptions())
|
708 |
+
read_options.use_threads = self.use_threads
|
709 |
+
table = read_csv(csv, *args, **kwargs)
|
710 |
+
table.validate(full=validate_full)
|
711 |
+
return table
|
712 |
+
|
713 |
+
def read_bytes(self, b, **kwargs):
|
714 |
+
return self.read_csv(pa.py_buffer(b), **kwargs)
|
715 |
+
|
716 |
+
def test_file_object(self):
|
717 |
+
data = b"a,b\n1,2\n"
|
718 |
+
expected_data = {'a': [1], 'b': [2]}
|
719 |
+
bio = io.BytesIO(data)
|
720 |
+
table = self.read_csv(bio)
|
721 |
+
assert table.to_pydict() == expected_data
|
722 |
+
# Text files not allowed
|
723 |
+
sio = io.StringIO(data.decode())
|
724 |
+
with pytest.raises(TypeError):
|
725 |
+
self.read_csv(sio)
|
726 |
+
|
727 |
+
def test_header(self):
|
728 |
+
rows = b"abc,def,gh\n"
|
729 |
+
table = self.read_bytes(rows)
|
730 |
+
assert isinstance(table, pa.Table)
|
731 |
+
self.check_names(table, ["abc", "def", "gh"])
|
732 |
+
assert table.num_rows == 0
|
733 |
+
|
734 |
+
def test_bom(self):
|
735 |
+
rows = b"\xef\xbb\xbfa,b\n1,2\n"
|
736 |
+
expected_data = {'a': [1], 'b': [2]}
|
737 |
+
table = self.read_bytes(rows)
|
738 |
+
assert table.to_pydict() == expected_data
|
739 |
+
|
740 |
+
def test_one_chunk(self):
|
741 |
+
# ARROW-7661: lack of newline at end of file should not produce
|
742 |
+
# an additional chunk.
|
743 |
+
rows = [b"a,b", b"1,2", b"3,4", b"56,78"]
|
744 |
+
for line_ending in [b'\n', b'\r', b'\r\n']:
|
745 |
+
for file_ending in [b'', line_ending]:
|
746 |
+
data = line_ending.join(rows) + file_ending
|
747 |
+
table = self.read_bytes(data)
|
748 |
+
assert len(table.to_batches()) == 1
|
749 |
+
assert table.to_pydict() == {
|
750 |
+
"a": [1, 3, 56],
|
751 |
+
"b": [2, 4, 78],
|
752 |
+
}
|
753 |
+
|
754 |
+
def test_header_column_names(self):
|
755 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
756 |
+
|
757 |
+
opts = ReadOptions()
|
758 |
+
opts.column_names = ["x", "y"]
|
759 |
+
table = self.read_bytes(rows, read_options=opts)
|
760 |
+
self.check_names(table, ["x", "y"])
|
761 |
+
assert table.to_pydict() == {
|
762 |
+
"x": ["ab", "ef", "ij", "mn"],
|
763 |
+
"y": ["cd", "gh", "kl", "op"],
|
764 |
+
}
|
765 |
+
|
766 |
+
opts.skip_rows = 3
|
767 |
+
table = self.read_bytes(rows, read_options=opts)
|
768 |
+
self.check_names(table, ["x", "y"])
|
769 |
+
assert table.to_pydict() == {
|
770 |
+
"x": ["mn"],
|
771 |
+
"y": ["op"],
|
772 |
+
}
|
773 |
+
|
774 |
+
opts.skip_rows = 4
|
775 |
+
table = self.read_bytes(rows, read_options=opts)
|
776 |
+
self.check_names(table, ["x", "y"])
|
777 |
+
assert table.to_pydict() == {
|
778 |
+
"x": [],
|
779 |
+
"y": [],
|
780 |
+
}
|
781 |
+
|
782 |
+
opts.skip_rows = 5
|
783 |
+
with pytest.raises(pa.ArrowInvalid):
|
784 |
+
# Not enough rows
|
785 |
+
table = self.read_bytes(rows, read_options=opts)
|
786 |
+
|
787 |
+
# Unexpected number of columns
|
788 |
+
opts.skip_rows = 0
|
789 |
+
opts.column_names = ["x", "y", "z"]
|
790 |
+
with pytest.raises(pa.ArrowInvalid,
|
791 |
+
match="Expected 3 columns, got 2"):
|
792 |
+
table = self.read_bytes(rows, read_options=opts)
|
793 |
+
|
794 |
+
# Can skip rows with a different number of columns
|
795 |
+
rows = b"abcd\n,,,,,\nij,kl\nmn,op\n"
|
796 |
+
opts.skip_rows = 2
|
797 |
+
opts.column_names = ["x", "y"]
|
798 |
+
table = self.read_bytes(rows, read_options=opts)
|
799 |
+
self.check_names(table, ["x", "y"])
|
800 |
+
assert table.to_pydict() == {
|
801 |
+
"x": ["ij", "mn"],
|
802 |
+
"y": ["kl", "op"],
|
803 |
+
}
|
804 |
+
|
805 |
+
def test_header_autogenerate_column_names(self):
|
806 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
807 |
+
|
808 |
+
opts = ReadOptions()
|
809 |
+
opts.autogenerate_column_names = True
|
810 |
+
table = self.read_bytes(rows, read_options=opts)
|
811 |
+
self.check_names(table, ["f0", "f1"])
|
812 |
+
assert table.to_pydict() == {
|
813 |
+
"f0": ["ab", "ef", "ij", "mn"],
|
814 |
+
"f1": ["cd", "gh", "kl", "op"],
|
815 |
+
}
|
816 |
+
|
817 |
+
opts.skip_rows = 3
|
818 |
+
table = self.read_bytes(rows, read_options=opts)
|
819 |
+
self.check_names(table, ["f0", "f1"])
|
820 |
+
assert table.to_pydict() == {
|
821 |
+
"f0": ["mn"],
|
822 |
+
"f1": ["op"],
|
823 |
+
}
|
824 |
+
|
825 |
+
# Not enough rows, impossible to infer number of columns
|
826 |
+
opts.skip_rows = 4
|
827 |
+
with pytest.raises(pa.ArrowInvalid):
|
828 |
+
table = self.read_bytes(rows, read_options=opts)
|
829 |
+
|
830 |
+
def test_include_columns(self):
|
831 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
832 |
+
|
833 |
+
convert_options = ConvertOptions()
|
834 |
+
convert_options.include_columns = ['ab']
|
835 |
+
table = self.read_bytes(rows, convert_options=convert_options)
|
836 |
+
self.check_names(table, ["ab"])
|
837 |
+
assert table.to_pydict() == {
|
838 |
+
"ab": ["ef", "ij", "mn"],
|
839 |
+
}
|
840 |
+
|
841 |
+
# Order of include_columns is respected, regardless of CSV order
|
842 |
+
convert_options.include_columns = ['cd', 'ab']
|
843 |
+
table = self.read_bytes(rows, convert_options=convert_options)
|
844 |
+
schema = pa.schema([('cd', pa.string()),
|
845 |
+
('ab', pa.string())])
|
846 |
+
assert table.schema == schema
|
847 |
+
assert table.to_pydict() == {
|
848 |
+
"cd": ["gh", "kl", "op"],
|
849 |
+
"ab": ["ef", "ij", "mn"],
|
850 |
+
}
|
851 |
+
|
852 |
+
# Include a column not in the CSV file => raises by default
|
853 |
+
convert_options.include_columns = ['xx', 'ab', 'yy']
|
854 |
+
with pytest.raises(KeyError,
|
855 |
+
match="Column 'xx' in include_columns "
|
856 |
+
"does not exist in CSV file"):
|
857 |
+
self.read_bytes(rows, convert_options=convert_options)
|
858 |
+
|
859 |
+
def test_include_missing_columns(self):
|
860 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
861 |
+
|
862 |
+
read_options = ReadOptions()
|
863 |
+
convert_options = ConvertOptions()
|
864 |
+
convert_options.include_columns = ['xx', 'ab', 'yy']
|
865 |
+
convert_options.include_missing_columns = True
|
866 |
+
table = self.read_bytes(rows, read_options=read_options,
|
867 |
+
convert_options=convert_options)
|
868 |
+
schema = pa.schema([('xx', pa.null()),
|
869 |
+
('ab', pa.string()),
|
870 |
+
('yy', pa.null())])
|
871 |
+
assert table.schema == schema
|
872 |
+
assert table.to_pydict() == {
|
873 |
+
"xx": [None, None, None],
|
874 |
+
"ab": ["ef", "ij", "mn"],
|
875 |
+
"yy": [None, None, None],
|
876 |
+
}
|
877 |
+
|
878 |
+
# Combining with `column_names`
|
879 |
+
read_options.column_names = ["xx", "yy"]
|
880 |
+
convert_options.include_columns = ["yy", "cd"]
|
881 |
+
table = self.read_bytes(rows, read_options=read_options,
|
882 |
+
convert_options=convert_options)
|
883 |
+
schema = pa.schema([('yy', pa.string()),
|
884 |
+
('cd', pa.null())])
|
885 |
+
assert table.schema == schema
|
886 |
+
assert table.to_pydict() == {
|
887 |
+
"yy": ["cd", "gh", "kl", "op"],
|
888 |
+
"cd": [None, None, None, None],
|
889 |
+
}
|
890 |
+
|
891 |
+
# And with `column_types` as well
|
892 |
+
convert_options.column_types = {"yy": pa.binary(),
|
893 |
+
"cd": pa.int32()}
|
894 |
+
table = self.read_bytes(rows, read_options=read_options,
|
895 |
+
convert_options=convert_options)
|
896 |
+
schema = pa.schema([('yy', pa.binary()),
|
897 |
+
('cd', pa.int32())])
|
898 |
+
assert table.schema == schema
|
899 |
+
assert table.to_pydict() == {
|
900 |
+
"yy": [b"cd", b"gh", b"kl", b"op"],
|
901 |
+
"cd": [None, None, None, None],
|
902 |
+
}
|
903 |
+
|
904 |
+
def test_simple_ints(self):
|
905 |
+
# Infer integer columns
|
906 |
+
rows = b"a,b,c\n1,2,3\n4,5,6\n"
|
907 |
+
table = self.read_bytes(rows)
|
908 |
+
schema = pa.schema([('a', pa.int64()),
|
909 |
+
('b', pa.int64()),
|
910 |
+
('c', pa.int64())])
|
911 |
+
assert table.schema == schema
|
912 |
+
assert table.to_pydict() == {
|
913 |
+
'a': [1, 4],
|
914 |
+
'b': [2, 5],
|
915 |
+
'c': [3, 6],
|
916 |
+
}
|
917 |
+
|
918 |
+
def test_simple_varied(self):
|
919 |
+
# Infer various kinds of data
|
920 |
+
rows = b"a,b,c,d\n1,2,3,0\n4.0,-5,foo,True\n"
|
921 |
+
table = self.read_bytes(rows)
|
922 |
+
schema = pa.schema([('a', pa.float64()),
|
923 |
+
('b', pa.int64()),
|
924 |
+
('c', pa.string()),
|
925 |
+
('d', pa.bool_())])
|
926 |
+
assert table.schema == schema
|
927 |
+
assert table.to_pydict() == {
|
928 |
+
'a': [1.0, 4.0],
|
929 |
+
'b': [2, -5],
|
930 |
+
'c': ["3", "foo"],
|
931 |
+
'd': [False, True],
|
932 |
+
}
|
933 |
+
|
934 |
+
def test_simple_nulls(self):
|
935 |
+
# Infer various kinds of data, with nulls
|
936 |
+
rows = (b"a,b,c,d,e,f\n"
|
937 |
+
b"1,2,,,3,N/A\n"
|
938 |
+
b"nan,-5,foo,,nan,TRUE\n"
|
939 |
+
b"4.5,#N/A,nan,,\xff,false\n")
|
940 |
+
table = self.read_bytes(rows)
|
941 |
+
schema = pa.schema([('a', pa.float64()),
|
942 |
+
('b', pa.int64()),
|
943 |
+
('c', pa.string()),
|
944 |
+
('d', pa.null()),
|
945 |
+
('e', pa.binary()),
|
946 |
+
('f', pa.bool_())])
|
947 |
+
assert table.schema == schema
|
948 |
+
assert table.to_pydict() == {
|
949 |
+
'a': [1.0, None, 4.5],
|
950 |
+
'b': [2, -5, None],
|
951 |
+
'c': ["", "foo", "nan"],
|
952 |
+
'd': [None, None, None],
|
953 |
+
'e': [b"3", b"nan", b"\xff"],
|
954 |
+
'f': [None, True, False],
|
955 |
+
}
|
956 |
+
|
957 |
+
def test_decimal_point(self):
|
958 |
+
# Infer floats with a custom decimal point
|
959 |
+
parse_options = ParseOptions(delimiter=';')
|
960 |
+
rows = b"a;b\n1.25;2,5\nNA;-3\n-4;NA"
|
961 |
+
|
962 |
+
table = self.read_bytes(rows, parse_options=parse_options)
|
963 |
+
schema = pa.schema([('a', pa.float64()),
|
964 |
+
('b', pa.string())])
|
965 |
+
assert table.schema == schema
|
966 |
+
assert table.to_pydict() == {
|
967 |
+
'a': [1.25, None, -4.0],
|
968 |
+
'b': ["2,5", "-3", "NA"],
|
969 |
+
}
|
970 |
+
|
971 |
+
convert_options = ConvertOptions(decimal_point=',')
|
972 |
+
table = self.read_bytes(rows, parse_options=parse_options,
|
973 |
+
convert_options=convert_options)
|
974 |
+
schema = pa.schema([('a', pa.string()),
|
975 |
+
('b', pa.float64())])
|
976 |
+
assert table.schema == schema
|
977 |
+
assert table.to_pydict() == {
|
978 |
+
'a': ["1.25", "NA", "-4"],
|
979 |
+
'b': [2.5, -3.0, None],
|
980 |
+
}
|
981 |
+
|
982 |
+
def test_simple_timestamps(self):
|
983 |
+
# Infer a timestamp column
|
984 |
+
rows = (b"a,b,c\n"
|
985 |
+
b"1970,1970-01-01 00:00:00,1970-01-01 00:00:00.123\n"
|
986 |
+
b"1989,1989-07-14 01:00:00,1989-07-14 01:00:00.123456\n")
|
987 |
+
table = self.read_bytes(rows)
|
988 |
+
schema = pa.schema([('a', pa.int64()),
|
989 |
+
('b', pa.timestamp('s')),
|
990 |
+
('c', pa.timestamp('ns'))])
|
991 |
+
assert table.schema == schema
|
992 |
+
assert table.to_pydict() == {
|
993 |
+
'a': [1970, 1989],
|
994 |
+
'b': [datetime(1970, 1, 1), datetime(1989, 7, 14, 1)],
|
995 |
+
'c': [datetime(1970, 1, 1, 0, 0, 0, 123000),
|
996 |
+
datetime(1989, 7, 14, 1, 0, 0, 123456)],
|
997 |
+
}
|
998 |
+
|
999 |
+
def test_timestamp_parsers(self):
|
1000 |
+
# Infer timestamps with custom parsers
|
1001 |
+
rows = b"a,b\n1970/01/01,1980-01-01 00\n1970/01/02,1980-01-02 00\n"
|
1002 |
+
opts = ConvertOptions()
|
1003 |
+
|
1004 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1005 |
+
schema = pa.schema([('a', pa.string()),
|
1006 |
+
('b', pa.timestamp('s'))])
|
1007 |
+
assert table.schema == schema
|
1008 |
+
assert table.to_pydict() == {
|
1009 |
+
'a': ['1970/01/01', '1970/01/02'],
|
1010 |
+
'b': [datetime(1980, 1, 1), datetime(1980, 1, 2)],
|
1011 |
+
}
|
1012 |
+
|
1013 |
+
opts.timestamp_parsers = ['%Y/%m/%d']
|
1014 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1015 |
+
schema = pa.schema([('a', pa.timestamp('s')),
|
1016 |
+
('b', pa.string())])
|
1017 |
+
assert table.schema == schema
|
1018 |
+
assert table.to_pydict() == {
|
1019 |
+
'a': [datetime(1970, 1, 1), datetime(1970, 1, 2)],
|
1020 |
+
'b': ['1980-01-01 00', '1980-01-02 00'],
|
1021 |
+
}
|
1022 |
+
|
1023 |
+
opts.timestamp_parsers = ['%Y/%m/%d', ISO8601]
|
1024 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1025 |
+
schema = pa.schema([('a', pa.timestamp('s')),
|
1026 |
+
('b', pa.timestamp('s'))])
|
1027 |
+
assert table.schema == schema
|
1028 |
+
assert table.to_pydict() == {
|
1029 |
+
'a': [datetime(1970, 1, 1), datetime(1970, 1, 2)],
|
1030 |
+
'b': [datetime(1980, 1, 1), datetime(1980, 1, 2)],
|
1031 |
+
}
|
1032 |
+
|
1033 |
+
def test_dates(self):
|
1034 |
+
# Dates are inferred as date32 by default
|
1035 |
+
rows = b"a,b\n1970-01-01,1970-01-02\n1971-01-01,1971-01-02\n"
|
1036 |
+
table = self.read_bytes(rows)
|
1037 |
+
schema = pa.schema([('a', pa.date32()),
|
1038 |
+
('b', pa.date32())])
|
1039 |
+
assert table.schema == schema
|
1040 |
+
assert table.to_pydict() == {
|
1041 |
+
'a': [date(1970, 1, 1), date(1971, 1, 1)],
|
1042 |
+
'b': [date(1970, 1, 2), date(1971, 1, 2)],
|
1043 |
+
}
|
1044 |
+
|
1045 |
+
# Can ask for date types explicitly
|
1046 |
+
opts = ConvertOptions()
|
1047 |
+
opts.column_types = {'a': pa.date32(), 'b': pa.date64()}
|
1048 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1049 |
+
schema = pa.schema([('a', pa.date32()),
|
1050 |
+
('b', pa.date64())])
|
1051 |
+
assert table.schema == schema
|
1052 |
+
assert table.to_pydict() == {
|
1053 |
+
'a': [date(1970, 1, 1), date(1971, 1, 1)],
|
1054 |
+
'b': [date(1970, 1, 2), date(1971, 1, 2)],
|
1055 |
+
}
|
1056 |
+
|
1057 |
+
# Can ask for timestamp types explicitly
|
1058 |
+
opts = ConvertOptions()
|
1059 |
+
opts.column_types = {'a': pa.timestamp('s'), 'b': pa.timestamp('ms')}
|
1060 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1061 |
+
schema = pa.schema([('a', pa.timestamp('s')),
|
1062 |
+
('b', pa.timestamp('ms'))])
|
1063 |
+
assert table.schema == schema
|
1064 |
+
assert table.to_pydict() == {
|
1065 |
+
'a': [datetime(1970, 1, 1), datetime(1971, 1, 1)],
|
1066 |
+
'b': [datetime(1970, 1, 2), datetime(1971, 1, 2)],
|
1067 |
+
}
|
1068 |
+
|
1069 |
+
def test_times(self):
|
1070 |
+
# Times are inferred as time32[s] by default
|
1071 |
+
from datetime import time
|
1072 |
+
|
1073 |
+
rows = b"a,b\n12:34:56,12:34:56.789\n23:59:59,23:59:59.999\n"
|
1074 |
+
table = self.read_bytes(rows)
|
1075 |
+
# Column 'b' has subseconds, so cannot be inferred as time32[s]
|
1076 |
+
schema = pa.schema([('a', pa.time32('s')),
|
1077 |
+
('b', pa.string())])
|
1078 |
+
assert table.schema == schema
|
1079 |
+
assert table.to_pydict() == {
|
1080 |
+
'a': [time(12, 34, 56), time(23, 59, 59)],
|
1081 |
+
'b': ["12:34:56.789", "23:59:59.999"],
|
1082 |
+
}
|
1083 |
+
|
1084 |
+
# Can ask for time types explicitly
|
1085 |
+
opts = ConvertOptions()
|
1086 |
+
opts.column_types = {'a': pa.time64('us'), 'b': pa.time32('ms')}
|
1087 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1088 |
+
schema = pa.schema([('a', pa.time64('us')),
|
1089 |
+
('b', pa.time32('ms'))])
|
1090 |
+
assert table.schema == schema
|
1091 |
+
assert table.to_pydict() == {
|
1092 |
+
'a': [time(12, 34, 56), time(23, 59, 59)],
|
1093 |
+
'b': [time(12, 34, 56, 789000), time(23, 59, 59, 999000)],
|
1094 |
+
}
|
1095 |
+
|
1096 |
+
def test_auto_dict_encode(self):
|
1097 |
+
opts = ConvertOptions(auto_dict_encode=True)
|
1098 |
+
rows = "a,b\nab,1\ncdé,2\ncdé,3\nab,4".encode()
|
1099 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1100 |
+
schema = pa.schema([('a', pa.dictionary(pa.int32(), pa.string())),
|
1101 |
+
('b', pa.int64())])
|
1102 |
+
expected = {
|
1103 |
+
'a': ["ab", "cdé", "cdé", "ab"],
|
1104 |
+
'b': [1, 2, 3, 4],
|
1105 |
+
}
|
1106 |
+
assert table.schema == schema
|
1107 |
+
assert table.to_pydict() == expected
|
1108 |
+
|
1109 |
+
opts.auto_dict_max_cardinality = 2
|
1110 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1111 |
+
assert table.schema == schema
|
1112 |
+
assert table.to_pydict() == expected
|
1113 |
+
|
1114 |
+
# Cardinality above max => plain-encoded
|
1115 |
+
opts.auto_dict_max_cardinality = 1
|
1116 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1117 |
+
assert table.schema == pa.schema([('a', pa.string()),
|
1118 |
+
('b', pa.int64())])
|
1119 |
+
assert table.to_pydict() == expected
|
1120 |
+
|
1121 |
+
# With invalid UTF8, not checked
|
1122 |
+
opts.auto_dict_max_cardinality = 50
|
1123 |
+
opts.check_utf8 = False
|
1124 |
+
rows = b"a,b\nab,1\ncd\xff,2\nab,3"
|
1125 |
+
table = self.read_bytes(rows, convert_options=opts,
|
1126 |
+
validate_full=False)
|
1127 |
+
assert table.schema == schema
|
1128 |
+
dict_values = table['a'].chunk(0).dictionary
|
1129 |
+
assert len(dict_values) == 2
|
1130 |
+
assert dict_values[0].as_py() == "ab"
|
1131 |
+
assert dict_values[1].as_buffer() == b"cd\xff"
|
1132 |
+
|
1133 |
+
# With invalid UTF8, checked
|
1134 |
+
opts.check_utf8 = True
|
1135 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1136 |
+
schema = pa.schema([('a', pa.dictionary(pa.int32(), pa.binary())),
|
1137 |
+
('b', pa.int64())])
|
1138 |
+
expected = {
|
1139 |
+
'a': [b"ab", b"cd\xff", b"ab"],
|
1140 |
+
'b': [1, 2, 3],
|
1141 |
+
}
|
1142 |
+
assert table.schema == schema
|
1143 |
+
assert table.to_pydict() == expected
|
1144 |
+
|
1145 |
+
def test_custom_nulls(self):
|
1146 |
+
# Infer nulls with custom values
|
1147 |
+
opts = ConvertOptions(null_values=['Xxx', 'Zzz'])
|
1148 |
+
rows = b"""a,b,c,d\nZzz,"Xxx",1,2\nXxx,#N/A,,Zzz\n"""
|
1149 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1150 |
+
schema = pa.schema([('a', pa.null()),
|
1151 |
+
('b', pa.string()),
|
1152 |
+
('c', pa.string()),
|
1153 |
+
('d', pa.int64())])
|
1154 |
+
assert table.schema == schema
|
1155 |
+
assert table.to_pydict() == {
|
1156 |
+
'a': [None, None],
|
1157 |
+
'b': ["Xxx", "#N/A"],
|
1158 |
+
'c': ["1", ""],
|
1159 |
+
'd': [2, None],
|
1160 |
+
}
|
1161 |
+
|
1162 |
+
opts = ConvertOptions(null_values=['Xxx', 'Zzz'],
|
1163 |
+
strings_can_be_null=True)
|
1164 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1165 |
+
assert table.to_pydict() == {
|
1166 |
+
'a': [None, None],
|
1167 |
+
'b': [None, "#N/A"],
|
1168 |
+
'c': ["1", ""],
|
1169 |
+
'd': [2, None],
|
1170 |
+
}
|
1171 |
+
opts.quoted_strings_can_be_null = False
|
1172 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1173 |
+
assert table.to_pydict() == {
|
1174 |
+
'a': [None, None],
|
1175 |
+
'b': ["Xxx", "#N/A"],
|
1176 |
+
'c': ["1", ""],
|
1177 |
+
'd': [2, None],
|
1178 |
+
}
|
1179 |
+
|
1180 |
+
opts = ConvertOptions(null_values=[])
|
1181 |
+
rows = b"a,b\n#N/A,\n"
|
1182 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1183 |
+
schema = pa.schema([('a', pa.string()),
|
1184 |
+
('b', pa.string())])
|
1185 |
+
assert table.schema == schema
|
1186 |
+
assert table.to_pydict() == {
|
1187 |
+
'a': ["#N/A"],
|
1188 |
+
'b': [""],
|
1189 |
+
}
|
1190 |
+
|
1191 |
+
def test_custom_bools(self):
|
1192 |
+
# Infer booleans with custom values
|
1193 |
+
opts = ConvertOptions(true_values=['T', 'yes'],
|
1194 |
+
false_values=['F', 'no'])
|
1195 |
+
rows = (b"a,b,c\n"
|
1196 |
+
b"True,T,t\n"
|
1197 |
+
b"False,F,f\n"
|
1198 |
+
b"True,yes,yes\n"
|
1199 |
+
b"False,no,no\n"
|
1200 |
+
b"N/A,N/A,N/A\n")
|
1201 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1202 |
+
schema = pa.schema([('a', pa.string()),
|
1203 |
+
('b', pa.bool_()),
|
1204 |
+
('c', pa.string())])
|
1205 |
+
assert table.schema == schema
|
1206 |
+
assert table.to_pydict() == {
|
1207 |
+
'a': ["True", "False", "True", "False", "N/A"],
|
1208 |
+
'b': [True, False, True, False, None],
|
1209 |
+
'c': ["t", "f", "yes", "no", "N/A"],
|
1210 |
+
}
|
1211 |
+
|
1212 |
+
def test_column_types(self):
|
1213 |
+
# Ask for specific column types in ConvertOptions
|
1214 |
+
opts = ConvertOptions(column_types={'b': 'float32',
|
1215 |
+
'c': 'string',
|
1216 |
+
'd': 'boolean',
|
1217 |
+
'e': pa.decimal128(11, 2),
|
1218 |
+
'zz': 'null'})
|
1219 |
+
rows = b"a,b,c,d,e\n1,2,3,true,1.0\n4,-5,6,false,0\n"
|
1220 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1221 |
+
schema = pa.schema([('a', pa.int64()),
|
1222 |
+
('b', pa.float32()),
|
1223 |
+
('c', pa.string()),
|
1224 |
+
('d', pa.bool_()),
|
1225 |
+
('e', pa.decimal128(11, 2))])
|
1226 |
+
expected = {
|
1227 |
+
'a': [1, 4],
|
1228 |
+
'b': [2.0, -5.0],
|
1229 |
+
'c': ["3", "6"],
|
1230 |
+
'd': [True, False],
|
1231 |
+
'e': [Decimal("1.00"), Decimal("0.00")]
|
1232 |
+
}
|
1233 |
+
assert table.schema == schema
|
1234 |
+
assert table.to_pydict() == expected
|
1235 |
+
# Pass column_types as schema
|
1236 |
+
opts = ConvertOptions(
|
1237 |
+
column_types=pa.schema([('b', pa.float32()),
|
1238 |
+
('c', pa.string()),
|
1239 |
+
('d', pa.bool_()),
|
1240 |
+
('e', pa.decimal128(11, 2)),
|
1241 |
+
('zz', pa.bool_())]))
|
1242 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1243 |
+
assert table.schema == schema
|
1244 |
+
assert table.to_pydict() == expected
|
1245 |
+
# One of the columns in column_types fails converting
|
1246 |
+
rows = b"a,b,c,d,e\n1,XXX,3,true,5\n4,-5,6,false,7\n"
|
1247 |
+
with pytest.raises(pa.ArrowInvalid) as exc:
|
1248 |
+
self.read_bytes(rows, convert_options=opts)
|
1249 |
+
err = str(exc.value)
|
1250 |
+
assert "In CSV column #1: " in err
|
1251 |
+
assert "CSV conversion error to float: invalid value 'XXX'" in err
|
1252 |
+
|
1253 |
+
def test_column_types_dict(self):
|
1254 |
+
# Ask for dict-encoded column types in ConvertOptions
|
1255 |
+
column_types = [
|
1256 |
+
('a', pa.dictionary(pa.int32(), pa.utf8())),
|
1257 |
+
('b', pa.dictionary(pa.int32(), pa.int64())),
|
1258 |
+
('c', pa.dictionary(pa.int32(), pa.decimal128(11, 2))),
|
1259 |
+
('d', pa.dictionary(pa.int32(), pa.large_utf8()))]
|
1260 |
+
|
1261 |
+
opts = ConvertOptions(column_types=dict(column_types))
|
1262 |
+
rows = (b"a,b,c,d\n"
|
1263 |
+
b"abc,123456,1.0,zz\n"
|
1264 |
+
b"defg,123456,0.5,xx\n"
|
1265 |
+
b"abc,N/A,1.0,xx\n")
|
1266 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1267 |
+
|
1268 |
+
schema = pa.schema(column_types)
|
1269 |
+
expected = {
|
1270 |
+
'a': ["abc", "defg", "abc"],
|
1271 |
+
'b': [123456, 123456, None],
|
1272 |
+
'c': [Decimal("1.00"), Decimal("0.50"), Decimal("1.00")],
|
1273 |
+
'd': ["zz", "xx", "xx"],
|
1274 |
+
}
|
1275 |
+
assert table.schema == schema
|
1276 |
+
assert table.to_pydict() == expected
|
1277 |
+
|
1278 |
+
# Unsupported index type
|
1279 |
+
column_types[0] = ('a', pa.dictionary(pa.int8(), pa.utf8()))
|
1280 |
+
|
1281 |
+
opts = ConvertOptions(column_types=dict(column_types))
|
1282 |
+
with pytest.raises(NotImplementedError):
|
1283 |
+
table = self.read_bytes(rows, convert_options=opts)
|
1284 |
+
|
1285 |
+
def test_column_types_with_column_names(self):
|
1286 |
+
# When both `column_names` and `column_types` are given, names
|
1287 |
+
# in `column_types` should refer to names in `column_names`
|
1288 |
+
rows = b"a,b\nc,d\ne,f\n"
|
1289 |
+
read_options = ReadOptions(column_names=['x', 'y'])
|
1290 |
+
convert_options = ConvertOptions(column_types={'x': pa.binary()})
|
1291 |
+
table = self.read_bytes(rows, read_options=read_options,
|
1292 |
+
convert_options=convert_options)
|
1293 |
+
schema = pa.schema([('x', pa.binary()),
|
1294 |
+
('y', pa.string())])
|
1295 |
+
assert table.schema == schema
|
1296 |
+
assert table.to_pydict() == {
|
1297 |
+
'x': [b'a', b'c', b'e'],
|
1298 |
+
'y': ['b', 'd', 'f'],
|
1299 |
+
}
|
1300 |
+
|
1301 |
+
def test_no_ending_newline(self):
|
1302 |
+
# No \n after last line
|
1303 |
+
rows = b"a,b,c\n1,2,3\n4,5,6"
|
1304 |
+
table = self.read_bytes(rows)
|
1305 |
+
assert table.to_pydict() == {
|
1306 |
+
'a': [1, 4],
|
1307 |
+
'b': [2, 5],
|
1308 |
+
'c': [3, 6],
|
1309 |
+
}
|
1310 |
+
|
1311 |
+
def test_trivial(self):
|
1312 |
+
# A bit pointless, but at least it shouldn't crash
|
1313 |
+
rows = b",\n\n"
|
1314 |
+
table = self.read_bytes(rows)
|
1315 |
+
assert table.to_pydict() == {'': []}
|
1316 |
+
|
1317 |
+
def test_empty_lines(self):
|
1318 |
+
rows = b"a,b\n\r1,2\r\n\r\n3,4\r\n"
|
1319 |
+
table = self.read_bytes(rows)
|
1320 |
+
assert table.to_pydict() == {
|
1321 |
+
'a': [1, 3],
|
1322 |
+
'b': [2, 4],
|
1323 |
+
}
|
1324 |
+
parse_options = ParseOptions(ignore_empty_lines=False)
|
1325 |
+
table = self.read_bytes(rows, parse_options=parse_options)
|
1326 |
+
assert table.to_pydict() == {
|
1327 |
+
'a': [None, 1, None, 3],
|
1328 |
+
'b': [None, 2, None, 4],
|
1329 |
+
}
|
1330 |
+
read_options = ReadOptions(skip_rows=2)
|
1331 |
+
table = self.read_bytes(rows, parse_options=parse_options,
|
1332 |
+
read_options=read_options)
|
1333 |
+
assert table.to_pydict() == {
|
1334 |
+
'1': [None, 3],
|
1335 |
+
'2': [None, 4],
|
1336 |
+
}
|
1337 |
+
|
1338 |
+
def test_invalid_csv(self):
|
1339 |
+
# Various CSV errors
|
1340 |
+
rows = b"a,b,c\n1,2\n4,5,6\n"
|
1341 |
+
with pytest.raises(pa.ArrowInvalid, match="Expected 3 columns, got 2"):
|
1342 |
+
self.read_bytes(rows)
|
1343 |
+
rows = b"a,b,c\n1,2,3\n4"
|
1344 |
+
with pytest.raises(pa.ArrowInvalid, match="Expected 3 columns, got 1"):
|
1345 |
+
self.read_bytes(rows)
|
1346 |
+
for rows in [b"", b"\n", b"\r\n", b"\r", b"\n\n"]:
|
1347 |
+
with pytest.raises(pa.ArrowInvalid, match="Empty CSV file"):
|
1348 |
+
self.read_bytes(rows)
|
1349 |
+
|
1350 |
+
def test_options_delimiter(self):
|
1351 |
+
rows = b"a;b,c\nde,fg;eh\n"
|
1352 |
+
table = self.read_bytes(rows)
|
1353 |
+
assert table.to_pydict() == {
|
1354 |
+
'a;b': ['de'],
|
1355 |
+
'c': ['fg;eh'],
|
1356 |
+
}
|
1357 |
+
opts = ParseOptions(delimiter=';')
|
1358 |
+
table = self.read_bytes(rows, parse_options=opts)
|
1359 |
+
assert table.to_pydict() == {
|
1360 |
+
'a': ['de,fg'],
|
1361 |
+
'b,c': ['eh'],
|
1362 |
+
}
|
1363 |
+
|
1364 |
+
def test_small_random_csv(self):
|
1365 |
+
csv, expected = make_random_csv(num_cols=2, num_rows=10)
|
1366 |
+
table = self.read_bytes(csv)
|
1367 |
+
assert table.schema == expected.schema
|
1368 |
+
assert table.equals(expected)
|
1369 |
+
assert table.to_pydict() == expected.to_pydict()
|
1370 |
+
|
1371 |
+
def test_stress_block_sizes(self):
|
1372 |
+
# Test a number of small block sizes to stress block stitching
|
1373 |
+
csv_base, expected = make_random_csv(num_cols=2, num_rows=500)
|
1374 |
+
block_sizes = [11, 12, 13, 17, 37, 111]
|
1375 |
+
csvs = [csv_base, csv_base.rstrip(b'\r\n')]
|
1376 |
+
for csv in csvs:
|
1377 |
+
for block_size in block_sizes:
|
1378 |
+
read_options = ReadOptions(block_size=block_size)
|
1379 |
+
table = self.read_bytes(csv, read_options=read_options)
|
1380 |
+
assert table.schema == expected.schema
|
1381 |
+
if not table.equals(expected):
|
1382 |
+
# Better error output
|
1383 |
+
assert table.to_pydict() == expected.to_pydict()
|
1384 |
+
|
1385 |
+
def test_stress_convert_options_blowup(self):
|
1386 |
+
# ARROW-6481: A convert_options with a very large number of columns
|
1387 |
+
# should not blow memory and CPU time.
|
1388 |
+
try:
|
1389 |
+
clock = time.thread_time
|
1390 |
+
except AttributeError:
|
1391 |
+
clock = time.time
|
1392 |
+
num_columns = 10000
|
1393 |
+
col_names = ["K{}".format(i) for i in range(num_columns)]
|
1394 |
+
csv = make_empty_csv(col_names)
|
1395 |
+
t1 = clock()
|
1396 |
+
convert_options = ConvertOptions(
|
1397 |
+
column_types={k: pa.string() for k in col_names[::2]})
|
1398 |
+
table = self.read_bytes(csv, convert_options=convert_options)
|
1399 |
+
dt = clock() - t1
|
1400 |
+
# Check that processing time didn't blow up.
|
1401 |
+
# This is a conservative check (it takes less than 300 ms
|
1402 |
+
# in debug mode on my local machine).
|
1403 |
+
assert dt <= 10.0
|
1404 |
+
# Check result
|
1405 |
+
assert table.num_columns == num_columns
|
1406 |
+
assert table.num_rows == 0
|
1407 |
+
assert table.column_names == col_names
|
1408 |
+
|
1409 |
+
def test_cancellation(self):
|
1410 |
+
if (threading.current_thread().ident !=
|
1411 |
+
threading.main_thread().ident):
|
1412 |
+
pytest.skip("test only works from main Python thread")
|
1413 |
+
# Skips test if not available
|
1414 |
+
raise_signal = util.get_raise_signal()
|
1415 |
+
signum = signal.SIGINT
|
1416 |
+
|
1417 |
+
def signal_from_thread():
|
1418 |
+
# Give our workload a chance to start up
|
1419 |
+
time.sleep(0.2)
|
1420 |
+
raise_signal(signum)
|
1421 |
+
|
1422 |
+
# We start with a small CSV reading workload and increase its size
|
1423 |
+
# until it's large enough to get an interruption during it, even in
|
1424 |
+
# release mode on fast machines.
|
1425 |
+
last_duration = 0.0
|
1426 |
+
workload_size = 100_000
|
1427 |
+
attempts = 0
|
1428 |
+
|
1429 |
+
while last_duration < 5.0 and attempts < 10:
|
1430 |
+
print("workload size:", workload_size)
|
1431 |
+
large_csv = b"a,b,c\n" + b"1,2,3\n" * workload_size
|
1432 |
+
exc_info = None
|
1433 |
+
|
1434 |
+
try:
|
1435 |
+
# We use a signal fd to reliably ensure that the signal
|
1436 |
+
# has been delivered to Python, regardless of how exactly
|
1437 |
+
# it was caught.
|
1438 |
+
with util.signal_wakeup_fd() as sigfd:
|
1439 |
+
try:
|
1440 |
+
t = threading.Thread(target=signal_from_thread)
|
1441 |
+
t.start()
|
1442 |
+
t1 = time.time()
|
1443 |
+
try:
|
1444 |
+
self.read_bytes(large_csv)
|
1445 |
+
except KeyboardInterrupt as e:
|
1446 |
+
exc_info = e
|
1447 |
+
last_duration = time.time() - t1
|
1448 |
+
finally:
|
1449 |
+
# Wait for signal to arrive if it didn't already,
|
1450 |
+
# to avoid getting a KeyboardInterrupt after the
|
1451 |
+
# `except` block below.
|
1452 |
+
select.select([sigfd], [], [sigfd], 10.0)
|
1453 |
+
|
1454 |
+
except KeyboardInterrupt:
|
1455 |
+
# KeyboardInterrupt didn't interrupt `read_bytes` above.
|
1456 |
+
pass
|
1457 |
+
|
1458 |
+
if exc_info is not None:
|
1459 |
+
# We managed to get `self.read_bytes` interrupted, see if it
|
1460 |
+
# was actually interrupted inside Arrow C++ or in the Python
|
1461 |
+
# scaffolding.
|
1462 |
+
if exc_info.__context__ is not None:
|
1463 |
+
# Interrupted inside Arrow C++, we're satisfied now
|
1464 |
+
break
|
1465 |
+
|
1466 |
+
# Increase workload size to get a better chance
|
1467 |
+
workload_size = workload_size * 3
|
1468 |
+
|
1469 |
+
if exc_info is None:
|
1470 |
+
pytest.fail("Failed to get an interruption during CSV reading")
|
1471 |
+
|
1472 |
+
# Interruption should have arrived timely
|
1473 |
+
assert last_duration <= 1.0
|
1474 |
+
e = exc_info.__context__
|
1475 |
+
assert isinstance(e, pa.ArrowCancelled)
|
1476 |
+
assert e.signum == signum
|
1477 |
+
|
1478 |
+
def test_cancellation_disabled(self):
|
1479 |
+
# ARROW-12622: reader would segfault when the cancelling signal
|
1480 |
+
# handler was not enabled (e.g. if disabled, or if not on the
|
1481 |
+
# main thread)
|
1482 |
+
t = threading.Thread(
|
1483 |
+
target=lambda: self.read_bytes(b"f64\n0.1"))
|
1484 |
+
t.start()
|
1485 |
+
t.join()
|
1486 |
+
|
1487 |
+
|
1488 |
+
class TestSerialCSVTableRead(BaseCSVTableRead):
|
1489 |
+
@property
|
1490 |
+
def use_threads(self):
|
1491 |
+
return False
|
1492 |
+
|
1493 |
+
|
1494 |
+
class TestThreadedCSVTableRead(BaseCSVTableRead):
|
1495 |
+
@property
|
1496 |
+
def use_threads(self):
|
1497 |
+
return True
|
1498 |
+
|
1499 |
+
|
1500 |
+
class BaseStreamingCSVRead(BaseTestCSV):
|
1501 |
+
|
1502 |
+
def open_csv(self, csv, *args, **kwargs):
|
1503 |
+
"""
|
1504 |
+
Reads the CSV file into memory using pyarrow's open_csv
|
1505 |
+
csv The CSV bytes
|
1506 |
+
args Positional arguments to be forwarded to pyarrow's open_csv
|
1507 |
+
kwargs Keyword arguments to be forwarded to pyarrow's open_csv
|
1508 |
+
"""
|
1509 |
+
read_options = kwargs.setdefault('read_options', ReadOptions())
|
1510 |
+
read_options.use_threads = self.use_threads
|
1511 |
+
return open_csv(csv, *args, **kwargs)
|
1512 |
+
|
1513 |
+
def open_bytes(self, b, **kwargs):
|
1514 |
+
return self.open_csv(pa.py_buffer(b), **kwargs)
|
1515 |
+
|
1516 |
+
def check_reader(self, reader, expected_schema, expected_data):
|
1517 |
+
assert reader.schema == expected_schema
|
1518 |
+
batches = list(reader)
|
1519 |
+
assert len(batches) == len(expected_data)
|
1520 |
+
for batch, expected_batch in zip(batches, expected_data):
|
1521 |
+
batch.validate(full=True)
|
1522 |
+
assert batch.schema == expected_schema
|
1523 |
+
assert batch.to_pydict() == expected_batch
|
1524 |
+
|
1525 |
+
def read_bytes(self, b, **kwargs):
|
1526 |
+
return self.open_bytes(b, **kwargs).read_all()
|
1527 |
+
|
1528 |
+
def test_file_object(self):
|
1529 |
+
data = b"a,b\n1,2\n3,4\n"
|
1530 |
+
expected_data = {'a': [1, 3], 'b': [2, 4]}
|
1531 |
+
bio = io.BytesIO(data)
|
1532 |
+
reader = self.open_csv(bio)
|
1533 |
+
expected_schema = pa.schema([('a', pa.int64()),
|
1534 |
+
('b', pa.int64())])
|
1535 |
+
self.check_reader(reader, expected_schema, [expected_data])
|
1536 |
+
|
1537 |
+
def test_header(self):
|
1538 |
+
rows = b"abc,def,gh\n"
|
1539 |
+
reader = self.open_bytes(rows)
|
1540 |
+
expected_schema = pa.schema([('abc', pa.null()),
|
1541 |
+
('def', pa.null()),
|
1542 |
+
('gh', pa.null())])
|
1543 |
+
self.check_reader(reader, expected_schema, [])
|
1544 |
+
|
1545 |
+
def test_inference(self):
|
1546 |
+
# Inference is done on first block
|
1547 |
+
rows = b"a,b\n123,456\nabc,de\xff\ngh,ij\n"
|
1548 |
+
expected_schema = pa.schema([('a', pa.string()),
|
1549 |
+
('b', pa.binary())])
|
1550 |
+
|
1551 |
+
read_options = ReadOptions()
|
1552 |
+
read_options.block_size = len(rows)
|
1553 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1554 |
+
self.check_reader(reader, expected_schema,
|
1555 |
+
[{'a': ['123', 'abc', 'gh'],
|
1556 |
+
'b': [b'456', b'de\xff', b'ij']}])
|
1557 |
+
|
1558 |
+
read_options.block_size = len(rows) - 1
|
1559 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1560 |
+
self.check_reader(reader, expected_schema,
|
1561 |
+
[{'a': ['123', 'abc'],
|
1562 |
+
'b': [b'456', b'de\xff']},
|
1563 |
+
{'a': ['gh'],
|
1564 |
+
'b': [b'ij']}])
|
1565 |
+
|
1566 |
+
def test_inference_failure(self):
|
1567 |
+
# Inference on first block, then conversion failure on second block
|
1568 |
+
rows = b"a,b\n123,456\nabc,de\xff\ngh,ij\n"
|
1569 |
+
read_options = ReadOptions()
|
1570 |
+
read_options.block_size = len(rows) - 7
|
1571 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1572 |
+
expected_schema = pa.schema([('a', pa.int64()),
|
1573 |
+
('b', pa.int64())])
|
1574 |
+
assert reader.schema == expected_schema
|
1575 |
+
assert reader.read_next_batch().to_pydict() == {
|
1576 |
+
'a': [123], 'b': [456]
|
1577 |
+
}
|
1578 |
+
# Second block
|
1579 |
+
with pytest.raises(ValueError,
|
1580 |
+
match="CSV conversion error to int64"):
|
1581 |
+
reader.read_next_batch()
|
1582 |
+
# EOF
|
1583 |
+
with pytest.raises(StopIteration):
|
1584 |
+
reader.read_next_batch()
|
1585 |
+
|
1586 |
+
def test_invalid_csv(self):
|
1587 |
+
# CSV errors on first block
|
1588 |
+
rows = b"a,b\n1,2,3\n4,5\n6,7\n"
|
1589 |
+
read_options = ReadOptions()
|
1590 |
+
read_options.block_size = 10
|
1591 |
+
with pytest.raises(pa.ArrowInvalid,
|
1592 |
+
match="Expected 2 columns, got 3"):
|
1593 |
+
reader = self.open_bytes(
|
1594 |
+
rows, read_options=read_options)
|
1595 |
+
|
1596 |
+
# CSV errors on second block
|
1597 |
+
rows = b"a,b\n1,2\n3,4,5\n6,7\n"
|
1598 |
+
read_options.block_size = 8
|
1599 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1600 |
+
assert reader.read_next_batch().to_pydict() == {'a': [1], 'b': [2]}
|
1601 |
+
with pytest.raises(pa.ArrowInvalid,
|
1602 |
+
match="Expected 2 columns, got 3"):
|
1603 |
+
reader.read_next_batch()
|
1604 |
+
# Cannot continue after a parse error
|
1605 |
+
with pytest.raises(StopIteration):
|
1606 |
+
reader.read_next_batch()
|
1607 |
+
|
1608 |
+
def test_options_delimiter(self):
|
1609 |
+
rows = b"a;b,c\nde,fg;eh\n"
|
1610 |
+
reader = self.open_bytes(rows)
|
1611 |
+
expected_schema = pa.schema([('a;b', pa.string()),
|
1612 |
+
('c', pa.string())])
|
1613 |
+
self.check_reader(reader, expected_schema,
|
1614 |
+
[{'a;b': ['de'],
|
1615 |
+
'c': ['fg;eh']}])
|
1616 |
+
|
1617 |
+
opts = ParseOptions(delimiter=';')
|
1618 |
+
reader = self.open_bytes(rows, parse_options=opts)
|
1619 |
+
expected_schema = pa.schema([('a', pa.string()),
|
1620 |
+
('b,c', pa.string())])
|
1621 |
+
self.check_reader(reader, expected_schema,
|
1622 |
+
[{'a': ['de,fg'],
|
1623 |
+
'b,c': ['eh']}])
|
1624 |
+
|
1625 |
+
def test_no_ending_newline(self):
|
1626 |
+
# No \n after last line
|
1627 |
+
rows = b"a,b,c\n1,2,3\n4,5,6"
|
1628 |
+
reader = self.open_bytes(rows)
|
1629 |
+
expected_schema = pa.schema([('a', pa.int64()),
|
1630 |
+
('b', pa.int64()),
|
1631 |
+
('c', pa.int64())])
|
1632 |
+
self.check_reader(reader, expected_schema,
|
1633 |
+
[{'a': [1, 4],
|
1634 |
+
'b': [2, 5],
|
1635 |
+
'c': [3, 6]}])
|
1636 |
+
|
1637 |
+
def test_empty_file(self):
|
1638 |
+
with pytest.raises(ValueError, match="Empty CSV file"):
|
1639 |
+
self.open_bytes(b"")
|
1640 |
+
|
1641 |
+
def test_column_options(self):
|
1642 |
+
# With column_names
|
1643 |
+
rows = b"1,2,3\n4,5,6"
|
1644 |
+
read_options = ReadOptions()
|
1645 |
+
read_options.column_names = ['d', 'e', 'f']
|
1646 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1647 |
+
expected_schema = pa.schema([('d', pa.int64()),
|
1648 |
+
('e', pa.int64()),
|
1649 |
+
('f', pa.int64())])
|
1650 |
+
self.check_reader(reader, expected_schema,
|
1651 |
+
[{'d': [1, 4],
|
1652 |
+
'e': [2, 5],
|
1653 |
+
'f': [3, 6]}])
|
1654 |
+
|
1655 |
+
# With include_columns
|
1656 |
+
convert_options = ConvertOptions()
|
1657 |
+
convert_options.include_columns = ['f', 'e']
|
1658 |
+
reader = self.open_bytes(rows, read_options=read_options,
|
1659 |
+
convert_options=convert_options)
|
1660 |
+
expected_schema = pa.schema([('f', pa.int64()),
|
1661 |
+
('e', pa.int64())])
|
1662 |
+
self.check_reader(reader, expected_schema,
|
1663 |
+
[{'e': [2, 5],
|
1664 |
+
'f': [3, 6]}])
|
1665 |
+
|
1666 |
+
# With column_types
|
1667 |
+
convert_options.column_types = {'e': pa.string()}
|
1668 |
+
reader = self.open_bytes(rows, read_options=read_options,
|
1669 |
+
convert_options=convert_options)
|
1670 |
+
expected_schema = pa.schema([('f', pa.int64()),
|
1671 |
+
('e', pa.string())])
|
1672 |
+
self.check_reader(reader, expected_schema,
|
1673 |
+
[{'e': ["2", "5"],
|
1674 |
+
'f': [3, 6]}])
|
1675 |
+
|
1676 |
+
# Missing columns in include_columns
|
1677 |
+
convert_options.include_columns = ['g', 'f', 'e']
|
1678 |
+
with pytest.raises(
|
1679 |
+
KeyError,
|
1680 |
+
match="Column 'g' in include_columns does not exist"):
|
1681 |
+
reader = self.open_bytes(rows, read_options=read_options,
|
1682 |
+
convert_options=convert_options)
|
1683 |
+
|
1684 |
+
convert_options.include_missing_columns = True
|
1685 |
+
reader = self.open_bytes(rows, read_options=read_options,
|
1686 |
+
convert_options=convert_options)
|
1687 |
+
expected_schema = pa.schema([('g', pa.null()),
|
1688 |
+
('f', pa.int64()),
|
1689 |
+
('e', pa.string())])
|
1690 |
+
self.check_reader(reader, expected_schema,
|
1691 |
+
[{'g': [None, None],
|
1692 |
+
'e': ["2", "5"],
|
1693 |
+
'f': [3, 6]}])
|
1694 |
+
|
1695 |
+
convert_options.column_types = {'e': pa.string(), 'g': pa.float64()}
|
1696 |
+
reader = self.open_bytes(rows, read_options=read_options,
|
1697 |
+
convert_options=convert_options)
|
1698 |
+
expected_schema = pa.schema([('g', pa.float64()),
|
1699 |
+
('f', pa.int64()),
|
1700 |
+
('e', pa.string())])
|
1701 |
+
self.check_reader(reader, expected_schema,
|
1702 |
+
[{'g': [None, None],
|
1703 |
+
'e': ["2", "5"],
|
1704 |
+
'f': [3, 6]}])
|
1705 |
+
|
1706 |
+
def test_encoding(self):
|
1707 |
+
# latin-1 (invalid utf-8)
|
1708 |
+
rows = b"a,b\nun,\xe9l\xe9phant"
|
1709 |
+
read_options = ReadOptions()
|
1710 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1711 |
+
expected_schema = pa.schema([('a', pa.string()),
|
1712 |
+
('b', pa.binary())])
|
1713 |
+
self.check_reader(reader, expected_schema,
|
1714 |
+
[{'a': ["un"],
|
1715 |
+
'b': [b"\xe9l\xe9phant"]}])
|
1716 |
+
|
1717 |
+
read_options.encoding = 'latin1'
|
1718 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1719 |
+
expected_schema = pa.schema([('a', pa.string()),
|
1720 |
+
('b', pa.string())])
|
1721 |
+
self.check_reader(reader, expected_schema,
|
1722 |
+
[{'a': ["un"],
|
1723 |
+
'b': ["éléphant"]}])
|
1724 |
+
|
1725 |
+
# utf-16
|
1726 |
+
rows = (b'\xff\xfea\x00,\x00b\x00\n\x00u\x00n\x00,'
|
1727 |
+
b'\x00\xe9\x00l\x00\xe9\x00p\x00h\x00a\x00n\x00t\x00')
|
1728 |
+
read_options.encoding = 'utf16'
|
1729 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1730 |
+
expected_schema = pa.schema([('a', pa.string()),
|
1731 |
+
('b', pa.string())])
|
1732 |
+
self.check_reader(reader, expected_schema,
|
1733 |
+
[{'a': ["un"],
|
1734 |
+
'b': ["éléphant"]}])
|
1735 |
+
|
1736 |
+
def test_small_random_csv(self):
|
1737 |
+
csv, expected = make_random_csv(num_cols=2, num_rows=10)
|
1738 |
+
reader = self.open_bytes(csv)
|
1739 |
+
table = reader.read_all()
|
1740 |
+
assert table.schema == expected.schema
|
1741 |
+
assert table.equals(expected)
|
1742 |
+
assert table.to_pydict() == expected.to_pydict()
|
1743 |
+
|
1744 |
+
def test_stress_block_sizes(self):
|
1745 |
+
# Test a number of small block sizes to stress block stitching
|
1746 |
+
csv_base, expected = make_random_csv(num_cols=2, num_rows=500)
|
1747 |
+
block_sizes = [19, 21, 23, 26, 37, 111]
|
1748 |
+
csvs = [csv_base, csv_base.rstrip(b'\r\n')]
|
1749 |
+
for csv in csvs:
|
1750 |
+
for block_size in block_sizes:
|
1751 |
+
# Need at least two lines for type inference
|
1752 |
+
assert csv[:block_size].count(b'\n') >= 2
|
1753 |
+
read_options = ReadOptions(block_size=block_size)
|
1754 |
+
reader = self.open_bytes(
|
1755 |
+
csv, read_options=read_options)
|
1756 |
+
table = reader.read_all()
|
1757 |
+
assert table.schema == expected.schema
|
1758 |
+
if not table.equals(expected):
|
1759 |
+
# Better error output
|
1760 |
+
assert table.to_pydict() == expected.to_pydict()
|
1761 |
+
|
1762 |
+
def test_batch_lifetime(self):
|
1763 |
+
gc.collect()
|
1764 |
+
old_allocated = pa.total_allocated_bytes()
|
1765 |
+
|
1766 |
+
# Memory occupation should not grow with CSV file size
|
1767 |
+
def check_one_batch(reader, expected):
|
1768 |
+
batch = reader.read_next_batch()
|
1769 |
+
assert batch.to_pydict() == expected
|
1770 |
+
|
1771 |
+
rows = b"10,11\n12,13\n14,15\n16,17\n"
|
1772 |
+
read_options = ReadOptions()
|
1773 |
+
read_options.column_names = ['a', 'b']
|
1774 |
+
read_options.block_size = 6
|
1775 |
+
reader = self.open_bytes(rows, read_options=read_options)
|
1776 |
+
check_one_batch(reader, {'a': [10], 'b': [11]})
|
1777 |
+
allocated_after_first_batch = pa.total_allocated_bytes()
|
1778 |
+
check_one_batch(reader, {'a': [12], 'b': [13]})
|
1779 |
+
assert pa.total_allocated_bytes() <= allocated_after_first_batch
|
1780 |
+
check_one_batch(reader, {'a': [14], 'b': [15]})
|
1781 |
+
assert pa.total_allocated_bytes() <= allocated_after_first_batch
|
1782 |
+
check_one_batch(reader, {'a': [16], 'b': [17]})
|
1783 |
+
assert pa.total_allocated_bytes() <= allocated_after_first_batch
|
1784 |
+
with pytest.raises(StopIteration):
|
1785 |
+
reader.read_next_batch()
|
1786 |
+
assert pa.total_allocated_bytes() == old_allocated
|
1787 |
+
reader = None
|
1788 |
+
assert pa.total_allocated_bytes() == old_allocated
|
1789 |
+
|
1790 |
+
def test_header_skip_rows(self):
|
1791 |
+
super().test_header_skip_rows()
|
1792 |
+
|
1793 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
1794 |
+
|
1795 |
+
# Skipping all rows immediately results in end of iteration
|
1796 |
+
opts = ReadOptions()
|
1797 |
+
opts.skip_rows = 4
|
1798 |
+
opts.column_names = ['ab', 'cd']
|
1799 |
+
reader = self.open_bytes(rows, read_options=opts)
|
1800 |
+
with pytest.raises(StopIteration):
|
1801 |
+
assert reader.read_next_batch()
|
1802 |
+
|
1803 |
+
def test_skip_rows_after_names(self):
|
1804 |
+
super().test_skip_rows_after_names()
|
1805 |
+
|
1806 |
+
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
|
1807 |
+
|
1808 |
+
# Skipping all rows immediately results in end of iteration
|
1809 |
+
opts = ReadOptions()
|
1810 |
+
opts.skip_rows_after_names = 3
|
1811 |
+
reader = self.open_bytes(rows, read_options=opts)
|
1812 |
+
with pytest.raises(StopIteration):
|
1813 |
+
assert reader.read_next_batch()
|
1814 |
+
|
1815 |
+
# Skipping beyond all rows immediately results in end of iteration
|
1816 |
+
opts.skip_rows_after_names = 99999
|
1817 |
+
reader = self.open_bytes(rows, read_options=opts)
|
1818 |
+
with pytest.raises(StopIteration):
|
1819 |
+
assert reader.read_next_batch()
|
1820 |
+
|
1821 |
+
|
1822 |
+
class TestSerialStreamingCSVRead(BaseStreamingCSVRead):
|
1823 |
+
@property
|
1824 |
+
def use_threads(self):
|
1825 |
+
return False
|
1826 |
+
|
1827 |
+
|
1828 |
+
class TestThreadedStreamingCSVRead(BaseStreamingCSVRead):
|
1829 |
+
@property
|
1830 |
+
def use_threads(self):
|
1831 |
+
return True
|
1832 |
+
|
1833 |
+
|
1834 |
+
class BaseTestCompressedCSVRead:
|
1835 |
+
|
1836 |
+
def setUp(self):
|
1837 |
+
self.tmpdir = tempfile.mkdtemp(prefix='arrow-csv-test-')
|
1838 |
+
|
1839 |
+
def tearDown(self):
|
1840 |
+
shutil.rmtree(self.tmpdir)
|
1841 |
+
|
1842 |
+
def read_csv(self, csv_path):
|
1843 |
+
try:
|
1844 |
+
return read_csv(csv_path)
|
1845 |
+
except pa.ArrowNotImplementedError as e:
|
1846 |
+
pytest.skip(str(e))
|
1847 |
+
|
1848 |
+
def test_random_csv(self):
|
1849 |
+
csv, expected = make_random_csv(num_cols=2, num_rows=100)
|
1850 |
+
csv_path = os.path.join(self.tmpdir, self.csv_filename)
|
1851 |
+
self.write_file(csv_path, csv)
|
1852 |
+
table = self.read_csv(csv_path)
|
1853 |
+
table.validate(full=True)
|
1854 |
+
assert table.schema == expected.schema
|
1855 |
+
assert table.equals(expected)
|
1856 |
+
assert table.to_pydict() == expected.to_pydict()
|
1857 |
+
|
1858 |
+
|
1859 |
+
class TestGZipCSVRead(BaseTestCompressedCSVRead, unittest.TestCase):
|
1860 |
+
csv_filename = "compressed.csv.gz"
|
1861 |
+
|
1862 |
+
def write_file(self, path, contents):
|
1863 |
+
with gzip.open(path, 'wb', 3) as f:
|
1864 |
+
f.write(contents)
|
1865 |
+
|
1866 |
+
def test_concatenated(self):
|
1867 |
+
# ARROW-5974
|
1868 |
+
csv_path = os.path.join(self.tmpdir, self.csv_filename)
|
1869 |
+
with gzip.open(csv_path, 'wb', 3) as f:
|
1870 |
+
f.write(b"ab,cd\nef,gh\n")
|
1871 |
+
with gzip.open(csv_path, 'ab', 3) as f:
|
1872 |
+
f.write(b"ij,kl\nmn,op\n")
|
1873 |
+
table = self.read_csv(csv_path)
|
1874 |
+
assert table.to_pydict() == {
|
1875 |
+
'ab': ['ef', 'ij', 'mn'],
|
1876 |
+
'cd': ['gh', 'kl', 'op'],
|
1877 |
+
}
|
1878 |
+
|
1879 |
+
|
1880 |
+
class TestBZ2CSVRead(BaseTestCompressedCSVRead, unittest.TestCase):
|
1881 |
+
csv_filename = "compressed.csv.bz2"
|
1882 |
+
|
1883 |
+
def write_file(self, path, contents):
|
1884 |
+
with bz2.BZ2File(path, 'w') as f:
|
1885 |
+
f.write(contents)
|
1886 |
+
|
1887 |
+
|
1888 |
+
def test_read_csv_does_not_close_passed_file_handles():
|
1889 |
+
# ARROW-4823
|
1890 |
+
buf = io.BytesIO(b"a,b,c\n1,2,3\n4,5,6")
|
1891 |
+
read_csv(buf)
|
1892 |
+
assert not buf.closed
|
1893 |
+
|
1894 |
+
|
1895 |
+
def test_write_read_round_trip():
|
1896 |
+
t = pa.Table.from_arrays([[1, 2, 3], ["a", "b", "c"]], ["c1", "c2"])
|
1897 |
+
record_batch = t.to_batches(max_chunksize=4)[0]
|
1898 |
+
for data in [t, record_batch]:
|
1899 |
+
# Test with header
|
1900 |
+
buf = io.BytesIO()
|
1901 |
+
write_csv(data, buf, WriteOptions(include_header=True))
|
1902 |
+
buf.seek(0)
|
1903 |
+
assert t == read_csv(buf)
|
1904 |
+
|
1905 |
+
# Test without header
|
1906 |
+
buf = io.BytesIO()
|
1907 |
+
write_csv(data, buf, WriteOptions(include_header=False))
|
1908 |
+
buf.seek(0)
|
1909 |
+
|
1910 |
+
read_options = ReadOptions(column_names=t.column_names)
|
1911 |
+
assert t == read_csv(buf, read_options=read_options)
|
1912 |
+
|
1913 |
+
# Test with writer
|
1914 |
+
for read_options, parse_options, write_options in [
|
1915 |
+
(None, None, WriteOptions(include_header=True)),
|
1916 |
+
(ReadOptions(column_names=t.column_names), None,
|
1917 |
+
WriteOptions(include_header=False)),
|
1918 |
+
(None, ParseOptions(delimiter='|'),
|
1919 |
+
WriteOptions(include_header=True, delimiter='|')),
|
1920 |
+
(ReadOptions(column_names=t.column_names),
|
1921 |
+
ParseOptions(delimiter='\t'),
|
1922 |
+
WriteOptions(include_header=False, delimiter='\t')),
|
1923 |
+
]:
|
1924 |
+
buf = io.BytesIO()
|
1925 |
+
with CSVWriter(buf, t.schema, write_options=write_options) as writer:
|
1926 |
+
writer.write_table(t)
|
1927 |
+
buf.seek(0)
|
1928 |
+
assert t == read_csv(buf, read_options=read_options,
|
1929 |
+
parse_options=parse_options)
|
1930 |
+
buf = io.BytesIO()
|
1931 |
+
with CSVWriter(buf, t.schema, write_options=write_options) as writer:
|
1932 |
+
for batch in t.to_batches(max_chunksize=1):
|
1933 |
+
writer.write_batch(batch)
|
1934 |
+
buf.seek(0)
|
1935 |
+
assert t == read_csv(buf, read_options=read_options,
|
1936 |
+
parse_options=parse_options)
|
1937 |
+
|
1938 |
+
|
1939 |
+
def test_write_quoting_style():
|
1940 |
+
t = pa.Table.from_arrays([[1, 2, None], ["a", None, "c"]], ["c1", "c2"])
|
1941 |
+
buf = io.BytesIO()
|
1942 |
+
for write_options, res in [
|
1943 |
+
(WriteOptions(quoting_style='none'), b'"c1","c2"\n1,a\n2,\n,c\n'),
|
1944 |
+
(WriteOptions(), b'"c1","c2"\n1,"a"\n2,\n,"c"\n'),
|
1945 |
+
(WriteOptions(quoting_style='all_valid'),
|
1946 |
+
b'"c1","c2"\n"1","a"\n"2",\n,"c"\n'),
|
1947 |
+
]:
|
1948 |
+
with CSVWriter(buf, t.schema, write_options=write_options) as writer:
|
1949 |
+
writer.write_table(t)
|
1950 |
+
assert buf.getvalue() == res
|
1951 |
+
buf.seek(0)
|
1952 |
+
|
1953 |
+
# Test writing special characters with different quoting styles
|
1954 |
+
t = pa.Table.from_arrays([[",", "\""]], ["c1"])
|
1955 |
+
buf = io.BytesIO()
|
1956 |
+
for write_options, res in [
|
1957 |
+
(WriteOptions(quoting_style='needed'), b'"c1"\n","\n""""\n'),
|
1958 |
+
(WriteOptions(quoting_style='none'), pa.lib.ArrowInvalid),
|
1959 |
+
]:
|
1960 |
+
with CSVWriter(buf, t.schema, write_options=write_options) as writer:
|
1961 |
+
try:
|
1962 |
+
writer.write_table(t)
|
1963 |
+
except Exception as e:
|
1964 |
+
# This will trigger when we try to write a comma (,)
|
1965 |
+
# without quotes, which is invalid
|
1966 |
+
assert isinstance(e, res)
|
1967 |
+
break
|
1968 |
+
assert buf.getvalue() == res
|
1969 |
+
buf.seek(0)
|
1970 |
+
|
1971 |
+
|
1972 |
+
def test_read_csv_reference_cycle():
|
1973 |
+
# ARROW-13187
|
1974 |
+
def inner():
|
1975 |
+
buf = io.BytesIO(b"a,b,c\n1,2,3\n4,5,6")
|
1976 |
+
table = read_csv(buf)
|
1977 |
+
return weakref.ref(table)
|
1978 |
+
|
1979 |
+
with util.disabled_gc():
|
1980 |
+
wr = inner()
|
1981 |
+
assert wr() is None
|
1982 |
+
|
1983 |
+
|
1984 |
+
@pytest.mark.parametrize("type_factory", (
|
1985 |
+
lambda: pa.decimal128(20, 1),
|
1986 |
+
lambda: pa.decimal128(38, 15),
|
1987 |
+
lambda: pa.decimal256(20, 1),
|
1988 |
+
lambda: pa.decimal256(76, 10),
|
1989 |
+
))
|
1990 |
+
def test_write_csv_decimal(tmpdir, type_factory):
|
1991 |
+
type = type_factory()
|
1992 |
+
table = pa.table({"col": pa.array([1, 2]).cast(type)})
|
1993 |
+
|
1994 |
+
write_csv(table, tmpdir / "out.csv")
|
1995 |
+
out = read_csv(tmpdir / "out.csv")
|
1996 |
+
|
1997 |
+
assert out.column('col').cast(type) == table.column('col')
|
1998 |
+
|
1999 |
+
|
2000 |
+
def test_read_csv_gil_deadlock():
|
2001 |
+
# GH-38676
|
2002 |
+
# This test depends on several preconditions:
|
2003 |
+
# - the CSV input is a Python file object
|
2004 |
+
# - reading the CSV file produces an error
|
2005 |
+
data = b"a,b,c"
|
2006 |
+
|
2007 |
+
class MyBytesIO(io.BytesIO):
|
2008 |
+
def read(self, *args):
|
2009 |
+
time.sleep(0.001)
|
2010 |
+
return super().read(*args)
|
2011 |
+
|
2012 |
+
def readinto(self, *args):
|
2013 |
+
time.sleep(0.001)
|
2014 |
+
return super().readinto(*args)
|
2015 |
+
|
2016 |
+
for i in range(20):
|
2017 |
+
with pytest.raises(pa.ArrowInvalid):
|
2018 |
+
read_csv(MyBytesIO(data))
|
venv/lib/python3.10/site-packages/pyarrow/tests/test_cuda.py
ADDED
@@ -0,0 +1,794 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
"""
|
19 |
+
UNTESTED:
|
20 |
+
read_message
|
21 |
+
"""
|
22 |
+
|
23 |
+
import sys
|
24 |
+
import sysconfig
|
25 |
+
|
26 |
+
import pytest
|
27 |
+
|
28 |
+
import pyarrow as pa
|
29 |
+
import numpy as np
|
30 |
+
|
31 |
+
|
32 |
+
cuda = pytest.importorskip("pyarrow.cuda")
|
33 |
+
|
34 |
+
platform = sysconfig.get_platform()
|
35 |
+
# TODO: enable ppc64 when Arrow C++ supports IPC in ppc64 systems:
|
36 |
+
has_ipc_support = platform == 'linux-x86_64' # or 'ppc64' in platform
|
37 |
+
|
38 |
+
cuda_ipc = pytest.mark.skipif(
|
39 |
+
not has_ipc_support,
|
40 |
+
reason='CUDA IPC not supported in platform `%s`' % (platform))
|
41 |
+
|
42 |
+
global_context = None # for flake8
|
43 |
+
global_context1 = None # for flake8
|
44 |
+
|
45 |
+
|
46 |
+
def setup_module(module):
|
47 |
+
module.global_context = cuda.Context(0)
|
48 |
+
module.global_context1 = cuda.Context(cuda.Context.get_num_devices() - 1)
|
49 |
+
|
50 |
+
|
51 |
+
def teardown_module(module):
|
52 |
+
del module.global_context
|
53 |
+
|
54 |
+
|
55 |
+
def test_Context():
|
56 |
+
assert cuda.Context.get_num_devices() > 0
|
57 |
+
assert global_context.device_number == 0
|
58 |
+
assert global_context1.device_number == cuda.Context.get_num_devices() - 1
|
59 |
+
|
60 |
+
with pytest.raises(ValueError,
|
61 |
+
match=("device_number argument must "
|
62 |
+
"be non-negative less than")):
|
63 |
+
cuda.Context(cuda.Context.get_num_devices())
|
64 |
+
|
65 |
+
|
66 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
67 |
+
def test_manage_allocate_free_host(size):
|
68 |
+
buf = cuda.new_host_buffer(size)
|
69 |
+
arr = np.frombuffer(buf, dtype=np.uint8)
|
70 |
+
arr[size//4:3*size//4] = 1
|
71 |
+
arr_cp = arr.copy()
|
72 |
+
arr2 = np.frombuffer(buf, dtype=np.uint8)
|
73 |
+
np.testing.assert_equal(arr2, arr_cp)
|
74 |
+
assert buf.size == size
|
75 |
+
|
76 |
+
|
77 |
+
def test_context_allocate_del():
|
78 |
+
bytes_allocated = global_context.bytes_allocated
|
79 |
+
cudabuf = global_context.new_buffer(128)
|
80 |
+
assert global_context.bytes_allocated == bytes_allocated + 128
|
81 |
+
del cudabuf
|
82 |
+
assert global_context.bytes_allocated == bytes_allocated
|
83 |
+
|
84 |
+
|
85 |
+
def make_random_buffer(size, target='host'):
|
86 |
+
"""Return a host or device buffer with random data.
|
87 |
+
"""
|
88 |
+
if target == 'host':
|
89 |
+
assert size >= 0
|
90 |
+
buf = pa.allocate_buffer(size)
|
91 |
+
assert buf.size == size
|
92 |
+
arr = np.frombuffer(buf, dtype=np.uint8)
|
93 |
+
assert arr.size == size
|
94 |
+
arr[:] = np.random.randint(low=1, high=255, size=size, dtype=np.uint8)
|
95 |
+
assert arr.sum() > 0 or size == 0
|
96 |
+
arr_ = np.frombuffer(buf, dtype=np.uint8)
|
97 |
+
np.testing.assert_equal(arr, arr_)
|
98 |
+
return arr, buf
|
99 |
+
elif target == 'device':
|
100 |
+
arr, buf = make_random_buffer(size, target='host')
|
101 |
+
dbuf = global_context.new_buffer(size)
|
102 |
+
assert dbuf.size == size
|
103 |
+
dbuf.copy_from_host(buf, position=0, nbytes=size)
|
104 |
+
return arr, dbuf
|
105 |
+
raise ValueError('invalid target value')
|
106 |
+
|
107 |
+
|
108 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
109 |
+
def test_context_device_buffer(size):
|
110 |
+
# Creating device buffer from host buffer;
|
111 |
+
arr, buf = make_random_buffer(size)
|
112 |
+
cudabuf = global_context.buffer_from_data(buf)
|
113 |
+
assert cudabuf.size == size
|
114 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
115 |
+
np.testing.assert_equal(arr, arr2)
|
116 |
+
|
117 |
+
# CudaBuffer does not support buffer protocol
|
118 |
+
with pytest.raises(BufferError):
|
119 |
+
memoryview(cudabuf)
|
120 |
+
|
121 |
+
# Creating device buffer from array:
|
122 |
+
cudabuf = global_context.buffer_from_data(arr)
|
123 |
+
assert cudabuf.size == size
|
124 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
125 |
+
np.testing.assert_equal(arr, arr2)
|
126 |
+
|
127 |
+
# Creating device buffer from bytes:
|
128 |
+
cudabuf = global_context.buffer_from_data(arr.tobytes())
|
129 |
+
assert cudabuf.size == size
|
130 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
131 |
+
np.testing.assert_equal(arr, arr2)
|
132 |
+
|
133 |
+
# Creating a device buffer from another device buffer, view:
|
134 |
+
cudabuf2 = cudabuf.slice(0, cudabuf.size)
|
135 |
+
assert cudabuf2.size == size
|
136 |
+
arr2 = np.frombuffer(cudabuf2.copy_to_host(), dtype=np.uint8)
|
137 |
+
np.testing.assert_equal(arr, arr2)
|
138 |
+
|
139 |
+
if size > 1:
|
140 |
+
cudabuf2.copy_from_host(arr[size//2:])
|
141 |
+
arr3 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
142 |
+
np.testing.assert_equal(np.concatenate((arr[size//2:], arr[size//2:])),
|
143 |
+
arr3)
|
144 |
+
cudabuf2.copy_from_host(arr[:size//2]) # restoring arr
|
145 |
+
|
146 |
+
# Creating a device buffer from another device buffer, copy:
|
147 |
+
cudabuf2 = global_context.buffer_from_data(cudabuf)
|
148 |
+
assert cudabuf2.size == size
|
149 |
+
arr2 = np.frombuffer(cudabuf2.copy_to_host(), dtype=np.uint8)
|
150 |
+
np.testing.assert_equal(arr, arr2)
|
151 |
+
|
152 |
+
cudabuf2.copy_from_host(arr[size//2:])
|
153 |
+
arr3 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
154 |
+
np.testing.assert_equal(arr, arr3)
|
155 |
+
|
156 |
+
# Slice of a device buffer
|
157 |
+
cudabuf2 = cudabuf.slice(0, cudabuf.size+10)
|
158 |
+
assert cudabuf2.size == size
|
159 |
+
arr2 = np.frombuffer(cudabuf2.copy_to_host(), dtype=np.uint8)
|
160 |
+
np.testing.assert_equal(arr, arr2)
|
161 |
+
|
162 |
+
cudabuf2 = cudabuf.slice(size//4, size+10)
|
163 |
+
assert cudabuf2.size == size - size//4
|
164 |
+
arr2 = np.frombuffer(cudabuf2.copy_to_host(), dtype=np.uint8)
|
165 |
+
np.testing.assert_equal(arr[size//4:], arr2)
|
166 |
+
|
167 |
+
# Creating a device buffer from a slice of host buffer
|
168 |
+
soffset = size//4
|
169 |
+
ssize = 2*size//4
|
170 |
+
cudabuf = global_context.buffer_from_data(buf, offset=soffset,
|
171 |
+
size=ssize)
|
172 |
+
assert cudabuf.size == ssize
|
173 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
174 |
+
np.testing.assert_equal(arr[soffset:soffset + ssize], arr2)
|
175 |
+
|
176 |
+
cudabuf = global_context.buffer_from_data(buf.slice(offset=soffset,
|
177 |
+
length=ssize))
|
178 |
+
assert cudabuf.size == ssize
|
179 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
180 |
+
np.testing.assert_equal(arr[soffset:soffset + ssize], arr2)
|
181 |
+
|
182 |
+
# Creating a device buffer from a slice of an array
|
183 |
+
cudabuf = global_context.buffer_from_data(arr, offset=soffset, size=ssize)
|
184 |
+
assert cudabuf.size == ssize
|
185 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
186 |
+
np.testing.assert_equal(arr[soffset:soffset + ssize], arr2)
|
187 |
+
|
188 |
+
cudabuf = global_context.buffer_from_data(arr[soffset:soffset+ssize])
|
189 |
+
assert cudabuf.size == ssize
|
190 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
191 |
+
np.testing.assert_equal(arr[soffset:soffset + ssize], arr2)
|
192 |
+
|
193 |
+
# Creating a device buffer from a slice of bytes
|
194 |
+
cudabuf = global_context.buffer_from_data(arr.tobytes(),
|
195 |
+
offset=soffset,
|
196 |
+
size=ssize)
|
197 |
+
assert cudabuf.size == ssize
|
198 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
199 |
+
np.testing.assert_equal(arr[soffset:soffset + ssize], arr2)
|
200 |
+
|
201 |
+
# Creating a device buffer from size
|
202 |
+
cudabuf = global_context.new_buffer(size)
|
203 |
+
assert cudabuf.size == size
|
204 |
+
|
205 |
+
# Creating device buffer from a slice of another device buffer:
|
206 |
+
cudabuf = global_context.buffer_from_data(arr)
|
207 |
+
cudabuf2 = cudabuf.slice(soffset, ssize)
|
208 |
+
assert cudabuf2.size == ssize
|
209 |
+
arr2 = np.frombuffer(cudabuf2.copy_to_host(), dtype=np.uint8)
|
210 |
+
np.testing.assert_equal(arr[soffset:soffset+ssize], arr2)
|
211 |
+
|
212 |
+
# Creating device buffer from HostBuffer
|
213 |
+
|
214 |
+
buf = cuda.new_host_buffer(size)
|
215 |
+
arr_ = np.frombuffer(buf, dtype=np.uint8)
|
216 |
+
arr_[:] = arr
|
217 |
+
cudabuf = global_context.buffer_from_data(buf)
|
218 |
+
assert cudabuf.size == size
|
219 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
220 |
+
np.testing.assert_equal(arr, arr2)
|
221 |
+
|
222 |
+
# Creating device buffer from HostBuffer slice
|
223 |
+
|
224 |
+
cudabuf = global_context.buffer_from_data(buf, offset=soffset, size=ssize)
|
225 |
+
assert cudabuf.size == ssize
|
226 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
227 |
+
np.testing.assert_equal(arr[soffset:soffset+ssize], arr2)
|
228 |
+
|
229 |
+
cudabuf = global_context.buffer_from_data(
|
230 |
+
buf.slice(offset=soffset, length=ssize))
|
231 |
+
assert cudabuf.size == ssize
|
232 |
+
arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8)
|
233 |
+
np.testing.assert_equal(arr[soffset:soffset+ssize], arr2)
|
234 |
+
|
235 |
+
|
236 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
237 |
+
def test_context_from_object(size):
|
238 |
+
ctx = global_context
|
239 |
+
arr, cbuf = make_random_buffer(size, target='device')
|
240 |
+
dtype = arr.dtype
|
241 |
+
|
242 |
+
# Creating device buffer from a CUDA host buffer
|
243 |
+
hbuf = cuda.new_host_buffer(size * arr.dtype.itemsize)
|
244 |
+
np.frombuffer(hbuf, dtype=dtype)[:] = arr
|
245 |
+
cbuf2 = ctx.buffer_from_object(hbuf)
|
246 |
+
assert cbuf2.size == cbuf.size
|
247 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
248 |
+
np.testing.assert_equal(arr, arr2)
|
249 |
+
|
250 |
+
# Creating device buffer from a device buffer
|
251 |
+
cbuf2 = ctx.buffer_from_object(cbuf2)
|
252 |
+
assert cbuf2.size == cbuf.size
|
253 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
254 |
+
np.testing.assert_equal(arr, arr2)
|
255 |
+
|
256 |
+
# Trying to create a device buffer from a Buffer
|
257 |
+
with pytest.raises(pa.ArrowTypeError,
|
258 |
+
match=('buffer is not backed by a CudaBuffer')):
|
259 |
+
ctx.buffer_from_object(pa.py_buffer(b"123"))
|
260 |
+
|
261 |
+
# Trying to create a device buffer from numpy.array
|
262 |
+
with pytest.raises(pa.ArrowTypeError,
|
263 |
+
match=("cannot create device buffer view from "
|
264 |
+
".* \'numpy.ndarray\'")):
|
265 |
+
ctx.buffer_from_object(np.array([1, 2, 3]))
|
266 |
+
|
267 |
+
|
268 |
+
def test_foreign_buffer():
|
269 |
+
ctx = global_context
|
270 |
+
dtype = np.dtype(np.uint8)
|
271 |
+
size = 10
|
272 |
+
hbuf = cuda.new_host_buffer(size * dtype.itemsize)
|
273 |
+
|
274 |
+
# test host buffer memory reference counting
|
275 |
+
rc = sys.getrefcount(hbuf)
|
276 |
+
fbuf = ctx.foreign_buffer(hbuf.address, hbuf.size, hbuf)
|
277 |
+
assert sys.getrefcount(hbuf) == rc + 1
|
278 |
+
del fbuf
|
279 |
+
assert sys.getrefcount(hbuf) == rc
|
280 |
+
|
281 |
+
# test postponed deallocation of host buffer memory
|
282 |
+
fbuf = ctx.foreign_buffer(hbuf.address, hbuf.size, hbuf)
|
283 |
+
del hbuf
|
284 |
+
fbuf.copy_to_host()
|
285 |
+
|
286 |
+
# test deallocating the host buffer memory making it inaccessible
|
287 |
+
hbuf = cuda.new_host_buffer(size * dtype.itemsize)
|
288 |
+
fbuf = ctx.foreign_buffer(hbuf.address, hbuf.size)
|
289 |
+
del hbuf
|
290 |
+
with pytest.raises(pa.ArrowIOError,
|
291 |
+
match=('Cuda error ')):
|
292 |
+
fbuf.copy_to_host()
|
293 |
+
|
294 |
+
|
295 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
296 |
+
def test_CudaBuffer(size):
|
297 |
+
arr, buf = make_random_buffer(size)
|
298 |
+
assert arr.tobytes() == buf.to_pybytes()
|
299 |
+
cbuf = global_context.buffer_from_data(buf)
|
300 |
+
assert cbuf.size == size
|
301 |
+
assert not cbuf.is_cpu
|
302 |
+
assert arr.tobytes() == cbuf.to_pybytes()
|
303 |
+
if size > 0:
|
304 |
+
assert cbuf.address > 0
|
305 |
+
|
306 |
+
for i in range(size):
|
307 |
+
assert cbuf[i] == arr[i]
|
308 |
+
|
309 |
+
for s in [
|
310 |
+
slice(None),
|
311 |
+
slice(size//4, size//2),
|
312 |
+
]:
|
313 |
+
assert cbuf[s].to_pybytes() == arr[s].tobytes()
|
314 |
+
|
315 |
+
sbuf = cbuf.slice(size//4, size//2)
|
316 |
+
assert sbuf.parent == cbuf
|
317 |
+
|
318 |
+
with pytest.raises(TypeError,
|
319 |
+
match="Do not call CudaBuffer's constructor directly"):
|
320 |
+
cuda.CudaBuffer()
|
321 |
+
|
322 |
+
|
323 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
324 |
+
def test_HostBuffer(size):
|
325 |
+
arr, buf = make_random_buffer(size)
|
326 |
+
assert arr.tobytes() == buf.to_pybytes()
|
327 |
+
hbuf = cuda.new_host_buffer(size)
|
328 |
+
np.frombuffer(hbuf, dtype=np.uint8)[:] = arr
|
329 |
+
assert hbuf.size == size
|
330 |
+
assert hbuf.is_cpu
|
331 |
+
assert arr.tobytes() == hbuf.to_pybytes()
|
332 |
+
for i in range(size):
|
333 |
+
assert hbuf[i] == arr[i]
|
334 |
+
for s in [
|
335 |
+
slice(None),
|
336 |
+
slice(size//4, size//2),
|
337 |
+
]:
|
338 |
+
assert hbuf[s].to_pybytes() == arr[s].tobytes()
|
339 |
+
|
340 |
+
sbuf = hbuf.slice(size//4, size//2)
|
341 |
+
assert sbuf.parent == hbuf
|
342 |
+
|
343 |
+
del hbuf
|
344 |
+
|
345 |
+
with pytest.raises(TypeError,
|
346 |
+
match="Do not call HostBuffer's constructor directly"):
|
347 |
+
cuda.HostBuffer()
|
348 |
+
|
349 |
+
|
350 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
351 |
+
def test_copy_from_to_host(size):
|
352 |
+
# Create a buffer in host containing range(size)
|
353 |
+
dt = np.dtype('uint16')
|
354 |
+
nbytes = size * dt.itemsize
|
355 |
+
buf = pa.allocate_buffer(nbytes, resizable=True) # in host
|
356 |
+
assert isinstance(buf, pa.Buffer)
|
357 |
+
assert not isinstance(buf, cuda.CudaBuffer)
|
358 |
+
arr = np.frombuffer(buf, dtype=dt)
|
359 |
+
assert arr.size == size
|
360 |
+
arr[:] = range(size)
|
361 |
+
arr_ = np.frombuffer(buf, dtype=dt)
|
362 |
+
np.testing.assert_equal(arr, arr_)
|
363 |
+
|
364 |
+
# Create a device buffer of the same size and copy from host
|
365 |
+
device_buffer = global_context.new_buffer(nbytes)
|
366 |
+
assert isinstance(device_buffer, cuda.CudaBuffer)
|
367 |
+
assert isinstance(device_buffer, pa.Buffer)
|
368 |
+
assert device_buffer.size == nbytes
|
369 |
+
assert not device_buffer.is_cpu
|
370 |
+
device_buffer.copy_from_host(buf, position=0, nbytes=nbytes)
|
371 |
+
|
372 |
+
# Copy back to host and compare contents
|
373 |
+
buf2 = device_buffer.copy_to_host(position=0, nbytes=nbytes)
|
374 |
+
arr2 = np.frombuffer(buf2, dtype=dt)
|
375 |
+
np.testing.assert_equal(arr, arr2)
|
376 |
+
|
377 |
+
|
378 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
379 |
+
def test_copy_to_host(size):
|
380 |
+
arr, dbuf = make_random_buffer(size, target='device')
|
381 |
+
|
382 |
+
buf = dbuf.copy_to_host()
|
383 |
+
assert buf.is_cpu
|
384 |
+
np.testing.assert_equal(arr, np.frombuffer(buf, dtype=np.uint8))
|
385 |
+
|
386 |
+
buf = dbuf.copy_to_host(position=size//4)
|
387 |
+
assert buf.is_cpu
|
388 |
+
np.testing.assert_equal(arr[size//4:], np.frombuffer(buf, dtype=np.uint8))
|
389 |
+
|
390 |
+
buf = dbuf.copy_to_host(position=size//4, nbytes=size//8)
|
391 |
+
assert buf.is_cpu
|
392 |
+
np.testing.assert_equal(arr[size//4:size//4+size//8],
|
393 |
+
np.frombuffer(buf, dtype=np.uint8))
|
394 |
+
|
395 |
+
buf = dbuf.copy_to_host(position=size//4, nbytes=0)
|
396 |
+
assert buf.is_cpu
|
397 |
+
assert buf.size == 0
|
398 |
+
|
399 |
+
for (position, nbytes) in [
|
400 |
+
(size+2, -1), (-2, -1), (size+1, 0), (-3, 0),
|
401 |
+
]:
|
402 |
+
with pytest.raises(ValueError,
|
403 |
+
match='position argument is out-of-range'):
|
404 |
+
dbuf.copy_to_host(position=position, nbytes=nbytes)
|
405 |
+
|
406 |
+
for (position, nbytes) in [
|
407 |
+
(0, size+1), (size//2, (size+1)//2+1), (size, 1)
|
408 |
+
]:
|
409 |
+
with pytest.raises(ValueError,
|
410 |
+
match=('requested more to copy than'
|
411 |
+
' available from device buffer')):
|
412 |
+
dbuf.copy_to_host(position=position, nbytes=nbytes)
|
413 |
+
|
414 |
+
buf = pa.allocate_buffer(size//4)
|
415 |
+
dbuf.copy_to_host(buf=buf)
|
416 |
+
np.testing.assert_equal(arr[:size//4], np.frombuffer(buf, dtype=np.uint8))
|
417 |
+
|
418 |
+
if size < 12:
|
419 |
+
return
|
420 |
+
|
421 |
+
dbuf.copy_to_host(buf=buf, position=12)
|
422 |
+
np.testing.assert_equal(arr[12:12+size//4],
|
423 |
+
np.frombuffer(buf, dtype=np.uint8))
|
424 |
+
|
425 |
+
dbuf.copy_to_host(buf=buf, nbytes=12)
|
426 |
+
np.testing.assert_equal(arr[:12], np.frombuffer(buf, dtype=np.uint8)[:12])
|
427 |
+
|
428 |
+
dbuf.copy_to_host(buf=buf, nbytes=12, position=6)
|
429 |
+
np.testing.assert_equal(arr[6:6+12],
|
430 |
+
np.frombuffer(buf, dtype=np.uint8)[:12])
|
431 |
+
|
432 |
+
for (position, nbytes) in [
|
433 |
+
(0, size+10), (10, size-5),
|
434 |
+
(0, size//2), (size//4, size//4+1)
|
435 |
+
]:
|
436 |
+
with pytest.raises(ValueError,
|
437 |
+
match=('requested copy does not '
|
438 |
+
'fit into host buffer')):
|
439 |
+
dbuf.copy_to_host(buf=buf, position=position, nbytes=nbytes)
|
440 |
+
|
441 |
+
|
442 |
+
@pytest.mark.parametrize("dest_ctx", ['same', 'another'])
|
443 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
444 |
+
def test_copy_from_device(dest_ctx, size):
|
445 |
+
arr, buf = make_random_buffer(size=size, target='device')
|
446 |
+
lst = arr.tolist()
|
447 |
+
if dest_ctx == 'another':
|
448 |
+
dest_ctx = global_context1
|
449 |
+
if buf.context.device_number == dest_ctx.device_number:
|
450 |
+
pytest.skip("not a multi-GPU system")
|
451 |
+
else:
|
452 |
+
dest_ctx = buf.context
|
453 |
+
dbuf = dest_ctx.new_buffer(size)
|
454 |
+
|
455 |
+
def put(*args, **kwargs):
|
456 |
+
dbuf.copy_from_device(buf, *args, **kwargs)
|
457 |
+
rbuf = dbuf.copy_to_host()
|
458 |
+
return np.frombuffer(rbuf, dtype=np.uint8).tolist()
|
459 |
+
assert put() == lst
|
460 |
+
if size > 4:
|
461 |
+
assert put(position=size//4) == lst[:size//4]+lst[:-size//4]
|
462 |
+
assert put() == lst
|
463 |
+
assert put(position=1, nbytes=size//2) == \
|
464 |
+
lst[:1] + lst[:size//2] + lst[-(size-size//2-1):]
|
465 |
+
|
466 |
+
for (position, nbytes) in [
|
467 |
+
(size+2, -1), (-2, -1), (size+1, 0), (-3, 0),
|
468 |
+
]:
|
469 |
+
with pytest.raises(ValueError,
|
470 |
+
match='position argument is out-of-range'):
|
471 |
+
put(position=position, nbytes=nbytes)
|
472 |
+
|
473 |
+
for (position, nbytes) in [
|
474 |
+
(0, size+1),
|
475 |
+
]:
|
476 |
+
with pytest.raises(ValueError,
|
477 |
+
match=('requested more to copy than'
|
478 |
+
' available from device buffer')):
|
479 |
+
put(position=position, nbytes=nbytes)
|
480 |
+
|
481 |
+
if size < 4:
|
482 |
+
return
|
483 |
+
|
484 |
+
for (position, nbytes) in [
|
485 |
+
(size//2, (size+1)//2+1)
|
486 |
+
]:
|
487 |
+
with pytest.raises(ValueError,
|
488 |
+
match=('requested more to copy than'
|
489 |
+
' available in device buffer')):
|
490 |
+
put(position=position, nbytes=nbytes)
|
491 |
+
|
492 |
+
|
493 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
494 |
+
def test_copy_from_host(size):
|
495 |
+
arr, buf = make_random_buffer(size=size, target='host')
|
496 |
+
lst = arr.tolist()
|
497 |
+
dbuf = global_context.new_buffer(size)
|
498 |
+
|
499 |
+
def put(*args, **kwargs):
|
500 |
+
dbuf.copy_from_host(buf, *args, **kwargs)
|
501 |
+
rbuf = dbuf.copy_to_host()
|
502 |
+
return np.frombuffer(rbuf, dtype=np.uint8).tolist()
|
503 |
+
assert put() == lst
|
504 |
+
if size > 4:
|
505 |
+
assert put(position=size//4) == lst[:size//4]+lst[:-size//4]
|
506 |
+
assert put() == lst
|
507 |
+
assert put(position=1, nbytes=size//2) == \
|
508 |
+
lst[:1] + lst[:size//2] + lst[-(size-size//2-1):]
|
509 |
+
|
510 |
+
for (position, nbytes) in [
|
511 |
+
(size+2, -1), (-2, -1), (size+1, 0), (-3, 0),
|
512 |
+
]:
|
513 |
+
with pytest.raises(ValueError,
|
514 |
+
match='position argument is out-of-range'):
|
515 |
+
put(position=position, nbytes=nbytes)
|
516 |
+
|
517 |
+
for (position, nbytes) in [
|
518 |
+
(0, size+1),
|
519 |
+
]:
|
520 |
+
with pytest.raises(ValueError,
|
521 |
+
match=('requested more to copy than'
|
522 |
+
' available from host buffer')):
|
523 |
+
put(position=position, nbytes=nbytes)
|
524 |
+
|
525 |
+
if size < 4:
|
526 |
+
return
|
527 |
+
|
528 |
+
for (position, nbytes) in [
|
529 |
+
(size//2, (size+1)//2+1)
|
530 |
+
]:
|
531 |
+
with pytest.raises(ValueError,
|
532 |
+
match=('requested more to copy than'
|
533 |
+
' available in device buffer')):
|
534 |
+
put(position=position, nbytes=nbytes)
|
535 |
+
|
536 |
+
|
537 |
+
def test_BufferWriter():
|
538 |
+
def allocate(size):
|
539 |
+
cbuf = global_context.new_buffer(size)
|
540 |
+
writer = cuda.BufferWriter(cbuf)
|
541 |
+
return cbuf, writer
|
542 |
+
|
543 |
+
def test_writes(total_size, chunksize, buffer_size=0):
|
544 |
+
cbuf, writer = allocate(total_size)
|
545 |
+
arr, buf = make_random_buffer(size=total_size, target='host')
|
546 |
+
|
547 |
+
if buffer_size > 0:
|
548 |
+
writer.buffer_size = buffer_size
|
549 |
+
|
550 |
+
position = writer.tell()
|
551 |
+
assert position == 0
|
552 |
+
writer.write(buf.slice(length=chunksize))
|
553 |
+
assert writer.tell() == chunksize
|
554 |
+
writer.seek(0)
|
555 |
+
position = writer.tell()
|
556 |
+
assert position == 0
|
557 |
+
|
558 |
+
while position < total_size:
|
559 |
+
bytes_to_write = min(chunksize, total_size - position)
|
560 |
+
writer.write(buf.slice(offset=position, length=bytes_to_write))
|
561 |
+
position += bytes_to_write
|
562 |
+
|
563 |
+
writer.flush()
|
564 |
+
assert cbuf.size == total_size
|
565 |
+
cbuf.context.synchronize()
|
566 |
+
buf2 = cbuf.copy_to_host()
|
567 |
+
cbuf.context.synchronize()
|
568 |
+
assert buf2.size == total_size
|
569 |
+
arr2 = np.frombuffer(buf2, dtype=np.uint8)
|
570 |
+
np.testing.assert_equal(arr, arr2)
|
571 |
+
|
572 |
+
total_size, chunk_size = 1 << 16, 1000
|
573 |
+
test_writes(total_size, chunk_size)
|
574 |
+
test_writes(total_size, chunk_size, total_size // 16)
|
575 |
+
|
576 |
+
cbuf, writer = allocate(100)
|
577 |
+
writer.write(np.arange(100, dtype=np.uint8))
|
578 |
+
writer.writeat(50, np.arange(25, dtype=np.uint8))
|
579 |
+
writer.write(np.arange(25, dtype=np.uint8))
|
580 |
+
writer.flush()
|
581 |
+
|
582 |
+
arr = np.frombuffer(cbuf.copy_to_host(), np.uint8)
|
583 |
+
np.testing.assert_equal(arr[:50], np.arange(50, dtype=np.uint8))
|
584 |
+
np.testing.assert_equal(arr[50:75], np.arange(25, dtype=np.uint8))
|
585 |
+
np.testing.assert_equal(arr[75:], np.arange(25, dtype=np.uint8))
|
586 |
+
|
587 |
+
|
588 |
+
def test_BufferWriter_edge_cases():
|
589 |
+
# edge cases, see cuda-test.cc for more information:
|
590 |
+
size = 1000
|
591 |
+
cbuf = global_context.new_buffer(size)
|
592 |
+
writer = cuda.BufferWriter(cbuf)
|
593 |
+
arr, buf = make_random_buffer(size=size, target='host')
|
594 |
+
|
595 |
+
assert writer.buffer_size == 0
|
596 |
+
writer.buffer_size = 100
|
597 |
+
assert writer.buffer_size == 100
|
598 |
+
|
599 |
+
writer.write(buf.slice(length=0))
|
600 |
+
assert writer.tell() == 0
|
601 |
+
|
602 |
+
writer.write(buf.slice(length=10))
|
603 |
+
writer.buffer_size = 200
|
604 |
+
assert writer.buffer_size == 200
|
605 |
+
assert writer.num_bytes_buffered == 0
|
606 |
+
|
607 |
+
writer.write(buf.slice(offset=10, length=300))
|
608 |
+
assert writer.num_bytes_buffered == 0
|
609 |
+
|
610 |
+
writer.write(buf.slice(offset=310, length=200))
|
611 |
+
assert writer.num_bytes_buffered == 0
|
612 |
+
|
613 |
+
writer.write(buf.slice(offset=510, length=390))
|
614 |
+
writer.write(buf.slice(offset=900, length=100))
|
615 |
+
|
616 |
+
writer.flush()
|
617 |
+
|
618 |
+
buf2 = cbuf.copy_to_host()
|
619 |
+
assert buf2.size == size
|
620 |
+
arr2 = np.frombuffer(buf2, dtype=np.uint8)
|
621 |
+
np.testing.assert_equal(arr, arr2)
|
622 |
+
|
623 |
+
|
624 |
+
def test_BufferReader():
|
625 |
+
size = 1000
|
626 |
+
arr, cbuf = make_random_buffer(size=size, target='device')
|
627 |
+
|
628 |
+
reader = cuda.BufferReader(cbuf)
|
629 |
+
reader.seek(950)
|
630 |
+
assert reader.tell() == 950
|
631 |
+
|
632 |
+
data = reader.read(100)
|
633 |
+
assert len(data) == 50
|
634 |
+
assert reader.tell() == 1000
|
635 |
+
|
636 |
+
reader.seek(925)
|
637 |
+
arr2 = np.zeros(100, dtype=np.uint8)
|
638 |
+
n = reader.readinto(arr2)
|
639 |
+
assert n == 75
|
640 |
+
assert reader.tell() == 1000
|
641 |
+
np.testing.assert_equal(arr[925:], arr2[:75])
|
642 |
+
|
643 |
+
reader.seek(0)
|
644 |
+
assert reader.tell() == 0
|
645 |
+
buf2 = reader.read_buffer()
|
646 |
+
arr2 = np.frombuffer(buf2.copy_to_host(), dtype=np.uint8)
|
647 |
+
np.testing.assert_equal(arr, arr2)
|
648 |
+
|
649 |
+
|
650 |
+
def test_BufferReader_zero_size():
|
651 |
+
arr, cbuf = make_random_buffer(size=0, target='device')
|
652 |
+
reader = cuda.BufferReader(cbuf)
|
653 |
+
reader.seek(0)
|
654 |
+
data = reader.read()
|
655 |
+
assert len(data) == 0
|
656 |
+
assert reader.tell() == 0
|
657 |
+
buf2 = reader.read_buffer()
|
658 |
+
arr2 = np.frombuffer(buf2.copy_to_host(), dtype=np.uint8)
|
659 |
+
np.testing.assert_equal(arr, arr2)
|
660 |
+
|
661 |
+
|
662 |
+
def make_recordbatch(length):
|
663 |
+
schema = pa.schema([pa.field('f0', pa.int16()),
|
664 |
+
pa.field('f1', pa.int16())])
|
665 |
+
a0 = pa.array(np.random.randint(0, 255, size=length, dtype=np.int16))
|
666 |
+
a1 = pa.array(np.random.randint(0, 255, size=length, dtype=np.int16))
|
667 |
+
batch = pa.record_batch([a0, a1], schema=schema)
|
668 |
+
return batch
|
669 |
+
|
670 |
+
|
671 |
+
def test_batch_serialize():
|
672 |
+
batch = make_recordbatch(10)
|
673 |
+
hbuf = batch.serialize()
|
674 |
+
cbuf = cuda.serialize_record_batch(batch, global_context)
|
675 |
+
|
676 |
+
# Test that read_record_batch works properly
|
677 |
+
cbatch = cuda.read_record_batch(cbuf, batch.schema)
|
678 |
+
assert isinstance(cbatch, pa.RecordBatch)
|
679 |
+
assert batch.schema == cbatch.schema
|
680 |
+
assert batch.num_columns == cbatch.num_columns
|
681 |
+
assert batch.num_rows == cbatch.num_rows
|
682 |
+
|
683 |
+
# Deserialize CUDA-serialized batch on host
|
684 |
+
buf = cbuf.copy_to_host()
|
685 |
+
assert hbuf.equals(buf)
|
686 |
+
batch2 = pa.ipc.read_record_batch(buf, batch.schema)
|
687 |
+
assert hbuf.equals(batch2.serialize())
|
688 |
+
|
689 |
+
assert batch.num_columns == batch2.num_columns
|
690 |
+
assert batch.num_rows == batch2.num_rows
|
691 |
+
assert batch.column(0).equals(batch2.column(0))
|
692 |
+
assert batch.equals(batch2)
|
693 |
+
|
694 |
+
|
695 |
+
def make_table():
|
696 |
+
a0 = pa.array([0, 1, 42, None], type=pa.int16())
|
697 |
+
a1 = pa.array([[0, 1], [2], [], None], type=pa.list_(pa.int32()))
|
698 |
+
a2 = pa.array([("ab", True), ("cde", False), (None, None), None],
|
699 |
+
type=pa.struct([("strs", pa.utf8()),
|
700 |
+
("bools", pa.bool_())]))
|
701 |
+
# Dictionaries are validated on the IPC read path, but that can produce
|
702 |
+
# issues for GPU-located dictionaries. Check that they work fine.
|
703 |
+
a3 = pa.DictionaryArray.from_arrays(
|
704 |
+
indices=[0, 1, 1, None],
|
705 |
+
dictionary=pa.array(['foo', 'bar']))
|
706 |
+
a4 = pa.DictionaryArray.from_arrays(
|
707 |
+
indices=[2, 1, 2, None],
|
708 |
+
dictionary=a1)
|
709 |
+
a5 = pa.DictionaryArray.from_arrays(
|
710 |
+
indices=[2, 1, 0, None],
|
711 |
+
dictionary=a2)
|
712 |
+
|
713 |
+
arrays = [a0, a1, a2, a3, a4, a5]
|
714 |
+
schema = pa.schema([('f{}'.format(i), arr.type)
|
715 |
+
for i, arr in enumerate(arrays)])
|
716 |
+
batch = pa.record_batch(arrays, schema=schema)
|
717 |
+
table = pa.Table.from_batches([batch])
|
718 |
+
return table
|
719 |
+
|
720 |
+
|
721 |
+
def make_table_cuda():
|
722 |
+
htable = make_table()
|
723 |
+
# Serialize the host table to bytes
|
724 |
+
sink = pa.BufferOutputStream()
|
725 |
+
with pa.ipc.new_stream(sink, htable.schema) as out:
|
726 |
+
out.write_table(htable)
|
727 |
+
hbuf = pa.py_buffer(sink.getvalue().to_pybytes())
|
728 |
+
|
729 |
+
# Copy the host bytes to a device buffer
|
730 |
+
dbuf = global_context.new_buffer(len(hbuf))
|
731 |
+
dbuf.copy_from_host(hbuf, nbytes=len(hbuf))
|
732 |
+
# Deserialize the device buffer into a Table
|
733 |
+
dtable = pa.ipc.open_stream(cuda.BufferReader(dbuf)).read_all()
|
734 |
+
return hbuf, htable, dbuf, dtable
|
735 |
+
|
736 |
+
|
737 |
+
def test_table_deserialize():
|
738 |
+
# ARROW-9659: make sure that we can deserialize a GPU-located table
|
739 |
+
# without crashing when initializing or validating the underlying arrays.
|
740 |
+
hbuf, htable, dbuf, dtable = make_table_cuda()
|
741 |
+
# Assert basic fields the same between host and device tables
|
742 |
+
assert htable.schema == dtable.schema
|
743 |
+
assert htable.num_rows == dtable.num_rows
|
744 |
+
assert htable.num_columns == dtable.num_columns
|
745 |
+
# Assert byte-level equality
|
746 |
+
assert hbuf.equals(dbuf.copy_to_host())
|
747 |
+
# Copy DtoH and assert the tables are still equivalent
|
748 |
+
assert htable.equals(pa.ipc.open_stream(
|
749 |
+
dbuf.copy_to_host()
|
750 |
+
).read_all())
|
751 |
+
|
752 |
+
|
753 |
+
def test_create_table_with_device_buffers():
|
754 |
+
# ARROW-11872: make sure that we can create an Arrow Table from
|
755 |
+
# GPU-located Arrays without crashing.
|
756 |
+
hbuf, htable, dbuf, dtable = make_table_cuda()
|
757 |
+
# Construct a new Table from the device Table
|
758 |
+
dtable2 = pa.Table.from_arrays(dtable.columns, dtable.column_names)
|
759 |
+
# Assert basic fields the same between host and device tables
|
760 |
+
assert htable.schema == dtable2.schema
|
761 |
+
assert htable.num_rows == dtable2.num_rows
|
762 |
+
assert htable.num_columns == dtable2.num_columns
|
763 |
+
# Assert byte-level equality
|
764 |
+
assert hbuf.equals(dbuf.copy_to_host())
|
765 |
+
# Copy DtoH and assert the tables are still equivalent
|
766 |
+
assert htable.equals(pa.ipc.open_stream(
|
767 |
+
dbuf.copy_to_host()
|
768 |
+
).read_all())
|
769 |
+
|
770 |
+
|
771 |
+
def other_process_for_test_IPC(handle_buffer, expected_arr):
|
772 |
+
other_context = pa.cuda.Context(0)
|
773 |
+
ipc_handle = pa.cuda.IpcMemHandle.from_buffer(handle_buffer)
|
774 |
+
ipc_buf = other_context.open_ipc_buffer(ipc_handle)
|
775 |
+
ipc_buf.context.synchronize()
|
776 |
+
buf = ipc_buf.copy_to_host()
|
777 |
+
assert buf.size == expected_arr.size, repr((buf.size, expected_arr.size))
|
778 |
+
arr = np.frombuffer(buf, dtype=expected_arr.dtype)
|
779 |
+
np.testing.assert_equal(arr, expected_arr)
|
780 |
+
|
781 |
+
|
782 |
+
@cuda_ipc
|
783 |
+
@pytest.mark.parametrize("size", [0, 1, 1000])
|
784 |
+
def test_IPC(size):
|
785 |
+
import multiprocessing
|
786 |
+
ctx = multiprocessing.get_context('spawn')
|
787 |
+
arr, cbuf = make_random_buffer(size=size, target='device')
|
788 |
+
ipc_handle = cbuf.export_for_ipc()
|
789 |
+
handle_buffer = ipc_handle.serialize()
|
790 |
+
p = ctx.Process(target=other_process_for_test_IPC,
|
791 |
+
args=(handle_buffer, arr))
|
792 |
+
p.start()
|
793 |
+
p.join()
|
794 |
+
assert p.exitcode == 0
|
venv/lib/python3.10/site-packages/pyarrow/tests/test_cuda_numba_interop.py
ADDED
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import pytest
|
19 |
+
import pyarrow as pa
|
20 |
+
import numpy as np
|
21 |
+
|
22 |
+
dtypes = ['uint8', 'int16', 'float32']
|
23 |
+
cuda = pytest.importorskip("pyarrow.cuda")
|
24 |
+
nb_cuda = pytest.importorskip("numba.cuda")
|
25 |
+
|
26 |
+
from numba.cuda.cudadrv.devicearray import DeviceNDArray # noqa: E402
|
27 |
+
|
28 |
+
|
29 |
+
context_choices = None
|
30 |
+
context_choice_ids = ['pyarrow.cuda', 'numba.cuda']
|
31 |
+
|
32 |
+
|
33 |
+
def setup_module(module):
|
34 |
+
np.random.seed(1234)
|
35 |
+
ctx1 = cuda.Context()
|
36 |
+
nb_ctx1 = ctx1.to_numba()
|
37 |
+
nb_ctx2 = nb_cuda.current_context()
|
38 |
+
ctx2 = cuda.Context.from_numba(nb_ctx2)
|
39 |
+
module.context_choices = [(ctx1, nb_ctx1), (ctx2, nb_ctx2)]
|
40 |
+
|
41 |
+
|
42 |
+
def teardown_module(module):
|
43 |
+
del module.context_choices
|
44 |
+
|
45 |
+
|
46 |
+
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
|
47 |
+
ids=context_choice_ids)
|
48 |
+
def test_context(c):
|
49 |
+
ctx, nb_ctx = context_choices[c]
|
50 |
+
assert ctx.handle == nb_ctx.handle.value
|
51 |
+
assert ctx.handle == ctx.to_numba().handle.value
|
52 |
+
ctx2 = cuda.Context.from_numba(nb_ctx)
|
53 |
+
assert ctx.handle == ctx2.handle
|
54 |
+
size = 10
|
55 |
+
buf = ctx.new_buffer(size)
|
56 |
+
assert ctx.handle == buf.context.handle
|
57 |
+
|
58 |
+
|
59 |
+
def make_random_buffer(size, target='host', dtype='uint8', ctx=None):
|
60 |
+
"""Return a host or device buffer with random data.
|
61 |
+
"""
|
62 |
+
dtype = np.dtype(dtype)
|
63 |
+
if target == 'host':
|
64 |
+
assert size >= 0
|
65 |
+
buf = pa.allocate_buffer(size*dtype.itemsize)
|
66 |
+
arr = np.frombuffer(buf, dtype=dtype)
|
67 |
+
arr[:] = np.random.randint(low=0, high=255, size=size,
|
68 |
+
dtype=np.uint8)
|
69 |
+
return arr, buf
|
70 |
+
elif target == 'device':
|
71 |
+
arr, buf = make_random_buffer(size, target='host', dtype=dtype)
|
72 |
+
dbuf = ctx.new_buffer(size * dtype.itemsize)
|
73 |
+
dbuf.copy_from_host(buf, position=0, nbytes=buf.size)
|
74 |
+
return arr, dbuf
|
75 |
+
raise ValueError('invalid target value')
|
76 |
+
|
77 |
+
|
78 |
+
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
|
79 |
+
ids=context_choice_ids)
|
80 |
+
@pytest.mark.parametrize("dtype", dtypes, ids=dtypes)
|
81 |
+
@pytest.mark.parametrize("size", [0, 1, 8, 1000])
|
82 |
+
def test_from_object(c, dtype, size):
|
83 |
+
ctx, nb_ctx = context_choices[c]
|
84 |
+
arr, cbuf = make_random_buffer(size, target='device', dtype=dtype, ctx=ctx)
|
85 |
+
|
86 |
+
# Creating device buffer from numba DeviceNDArray:
|
87 |
+
darr = nb_cuda.to_device(arr)
|
88 |
+
cbuf2 = ctx.buffer_from_object(darr)
|
89 |
+
assert cbuf2.size == cbuf.size
|
90 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
91 |
+
np.testing.assert_equal(arr, arr2)
|
92 |
+
|
93 |
+
# Creating device buffer from a slice of numba DeviceNDArray:
|
94 |
+
if size >= 8:
|
95 |
+
# 1-D arrays
|
96 |
+
for s in [slice(size//4, None, None),
|
97 |
+
slice(size//4, -(size//4), None)]:
|
98 |
+
cbuf2 = ctx.buffer_from_object(darr[s])
|
99 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
100 |
+
np.testing.assert_equal(arr[s], arr2)
|
101 |
+
|
102 |
+
# cannot test negative strides due to numba bug, see its issue 3705
|
103 |
+
if 0:
|
104 |
+
rdarr = darr[::-1]
|
105 |
+
cbuf2 = ctx.buffer_from_object(rdarr)
|
106 |
+
assert cbuf2.size == cbuf.size
|
107 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
108 |
+
np.testing.assert_equal(arr, arr2)
|
109 |
+
|
110 |
+
with pytest.raises(ValueError,
|
111 |
+
match=('array data is non-contiguous')):
|
112 |
+
ctx.buffer_from_object(darr[::2])
|
113 |
+
|
114 |
+
# a rectangular 2-D array
|
115 |
+
s1 = size//4
|
116 |
+
s2 = size//s1
|
117 |
+
assert s1 * s2 == size
|
118 |
+
cbuf2 = ctx.buffer_from_object(darr.reshape(s1, s2))
|
119 |
+
assert cbuf2.size == cbuf.size
|
120 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
121 |
+
np.testing.assert_equal(arr, arr2)
|
122 |
+
|
123 |
+
with pytest.raises(ValueError,
|
124 |
+
match=('array data is non-contiguous')):
|
125 |
+
ctx.buffer_from_object(darr.reshape(s1, s2)[:, ::2])
|
126 |
+
|
127 |
+
# a 3-D array
|
128 |
+
s1 = 4
|
129 |
+
s2 = size//8
|
130 |
+
s3 = size//(s1*s2)
|
131 |
+
assert s1 * s2 * s3 == size
|
132 |
+
cbuf2 = ctx.buffer_from_object(darr.reshape(s1, s2, s3))
|
133 |
+
assert cbuf2.size == cbuf.size
|
134 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
135 |
+
np.testing.assert_equal(arr, arr2)
|
136 |
+
|
137 |
+
with pytest.raises(ValueError,
|
138 |
+
match=('array data is non-contiguous')):
|
139 |
+
ctx.buffer_from_object(darr.reshape(s1, s2, s3)[::2])
|
140 |
+
|
141 |
+
# Creating device buffer from am object implementing cuda array
|
142 |
+
# interface:
|
143 |
+
class MyObj:
|
144 |
+
def __init__(self, darr):
|
145 |
+
self.darr = darr
|
146 |
+
|
147 |
+
@property
|
148 |
+
def __cuda_array_interface__(self):
|
149 |
+
return self.darr.__cuda_array_interface__
|
150 |
+
|
151 |
+
cbuf2 = ctx.buffer_from_object(MyObj(darr))
|
152 |
+
assert cbuf2.size == cbuf.size
|
153 |
+
arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype)
|
154 |
+
np.testing.assert_equal(arr, arr2)
|
155 |
+
|
156 |
+
|
157 |
+
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
|
158 |
+
ids=context_choice_ids)
|
159 |
+
@pytest.mark.parametrize("dtype", dtypes, ids=dtypes)
|
160 |
+
def test_numba_memalloc(c, dtype):
|
161 |
+
ctx, nb_ctx = context_choices[c]
|
162 |
+
dtype = np.dtype(dtype)
|
163 |
+
# Allocate memory using numba context
|
164 |
+
# Warning: this will not be reflected in pyarrow context manager
|
165 |
+
# (e.g bytes_allocated does not change)
|
166 |
+
size = 10
|
167 |
+
mem = nb_ctx.memalloc(size * dtype.itemsize)
|
168 |
+
darr = DeviceNDArray((size,), (dtype.itemsize,), dtype, gpu_data=mem)
|
169 |
+
darr[:5] = 99
|
170 |
+
darr[5:] = 88
|
171 |
+
np.testing.assert_equal(darr.copy_to_host()[:5], 99)
|
172 |
+
np.testing.assert_equal(darr.copy_to_host()[5:], 88)
|
173 |
+
|
174 |
+
# wrap numba allocated memory with CudaBuffer
|
175 |
+
cbuf = cuda.CudaBuffer.from_numba(mem)
|
176 |
+
arr2 = np.frombuffer(cbuf.copy_to_host(), dtype=dtype)
|
177 |
+
np.testing.assert_equal(arr2, darr.copy_to_host())
|
178 |
+
|
179 |
+
|
180 |
+
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
|
181 |
+
ids=context_choice_ids)
|
182 |
+
@pytest.mark.parametrize("dtype", dtypes, ids=dtypes)
|
183 |
+
def test_pyarrow_memalloc(c, dtype):
|
184 |
+
ctx, nb_ctx = context_choices[c]
|
185 |
+
size = 10
|
186 |
+
arr, cbuf = make_random_buffer(size, target='device', dtype=dtype, ctx=ctx)
|
187 |
+
|
188 |
+
# wrap CudaBuffer with numba device array
|
189 |
+
mem = cbuf.to_numba()
|
190 |
+
darr = DeviceNDArray(arr.shape, arr.strides, arr.dtype, gpu_data=mem)
|
191 |
+
np.testing.assert_equal(darr.copy_to_host(), arr)
|
192 |
+
|
193 |
+
|
194 |
+
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
|
195 |
+
ids=context_choice_ids)
|
196 |
+
@pytest.mark.parametrize("dtype", dtypes, ids=dtypes)
|
197 |
+
def test_numba_context(c, dtype):
|
198 |
+
ctx, nb_ctx = context_choices[c]
|
199 |
+
size = 10
|
200 |
+
with nb_cuda.gpus[0]:
|
201 |
+
arr, cbuf = make_random_buffer(size, target='device',
|
202 |
+
dtype=dtype, ctx=ctx)
|
203 |
+
assert cbuf.context.handle == nb_ctx.handle.value
|
204 |
+
mem = cbuf.to_numba()
|
205 |
+
darr = DeviceNDArray(arr.shape, arr.strides, arr.dtype, gpu_data=mem)
|
206 |
+
np.testing.assert_equal(darr.copy_to_host(), arr)
|
207 |
+
darr[0] = 99
|
208 |
+
cbuf.context.synchronize()
|
209 |
+
arr2 = np.frombuffer(cbuf.copy_to_host(), dtype=dtype)
|
210 |
+
assert arr2[0] == 99
|
211 |
+
|
212 |
+
|
213 |
+
@pytest.mark.parametrize("c", range(len(context_choice_ids)),
|
214 |
+
ids=context_choice_ids)
|
215 |
+
@pytest.mark.parametrize("dtype", dtypes, ids=dtypes)
|
216 |
+
def test_pyarrow_jit(c, dtype):
|
217 |
+
ctx, nb_ctx = context_choices[c]
|
218 |
+
|
219 |
+
@nb_cuda.jit
|
220 |
+
def increment_by_one(an_array):
|
221 |
+
pos = nb_cuda.grid(1)
|
222 |
+
if pos < an_array.size:
|
223 |
+
an_array[pos] += 1
|
224 |
+
|
225 |
+
# applying numba.cuda kernel to memory hold by CudaBuffer
|
226 |
+
size = 10
|
227 |
+
arr, cbuf = make_random_buffer(size, target='device', dtype=dtype, ctx=ctx)
|
228 |
+
threadsperblock = 32
|
229 |
+
blockspergrid = (arr.size + (threadsperblock - 1)) // threadsperblock
|
230 |
+
mem = cbuf.to_numba()
|
231 |
+
darr = DeviceNDArray(arr.shape, arr.strides, arr.dtype, gpu_data=mem)
|
232 |
+
increment_by_one[blockspergrid, threadsperblock](darr)
|
233 |
+
cbuf.context.synchronize()
|
234 |
+
arr1 = np.frombuffer(cbuf.copy_to_host(), dtype=arr.dtype)
|
235 |
+
np.testing.assert_equal(arr1, arr + 1)
|
venv/lib/python3.10/site-packages/pyarrow/tests/test_cython.py
ADDED
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import os
|
19 |
+
import shutil
|
20 |
+
import subprocess
|
21 |
+
import sys
|
22 |
+
|
23 |
+
import pytest
|
24 |
+
|
25 |
+
import pyarrow as pa
|
26 |
+
import pyarrow.tests.util as test_util
|
27 |
+
|
28 |
+
here = os.path.dirname(os.path.abspath(__file__))
|
29 |
+
test_ld_path = os.environ.get('PYARROW_TEST_LD_PATH', '')
|
30 |
+
if os.name == 'posix':
|
31 |
+
compiler_opts = ['-std=c++17']
|
32 |
+
elif os.name == 'nt':
|
33 |
+
compiler_opts = ['-D_ENABLE_EXTENDED_ALIGNED_STORAGE', '/std:c++17']
|
34 |
+
else:
|
35 |
+
compiler_opts = []
|
36 |
+
|
37 |
+
setup_template = """if 1:
|
38 |
+
from setuptools import setup
|
39 |
+
from Cython.Build import cythonize
|
40 |
+
|
41 |
+
import numpy as np
|
42 |
+
|
43 |
+
import pyarrow as pa
|
44 |
+
|
45 |
+
ext_modules = cythonize({pyx_file!r})
|
46 |
+
compiler_opts = {compiler_opts!r}
|
47 |
+
custom_ld_path = {test_ld_path!r}
|
48 |
+
|
49 |
+
for ext in ext_modules:
|
50 |
+
# XXX required for numpy/numpyconfig.h,
|
51 |
+
# included from arrow/python/api.h
|
52 |
+
ext.include_dirs.append(np.get_include())
|
53 |
+
ext.include_dirs.append(pa.get_include())
|
54 |
+
ext.libraries.extend(pa.get_libraries())
|
55 |
+
ext.library_dirs.extend(pa.get_library_dirs())
|
56 |
+
if custom_ld_path:
|
57 |
+
ext.library_dirs.append(custom_ld_path)
|
58 |
+
ext.extra_compile_args.extend(compiler_opts)
|
59 |
+
print("Extension module:",
|
60 |
+
ext, ext.include_dirs, ext.libraries, ext.library_dirs)
|
61 |
+
|
62 |
+
setup(
|
63 |
+
ext_modules=ext_modules,
|
64 |
+
)
|
65 |
+
"""
|
66 |
+
|
67 |
+
|
68 |
+
def check_cython_example_module(mod):
|
69 |
+
arr = pa.array([1, 2, 3])
|
70 |
+
assert mod.get_array_length(arr) == 3
|
71 |
+
with pytest.raises(TypeError, match="not an array"):
|
72 |
+
mod.get_array_length(None)
|
73 |
+
|
74 |
+
scal = pa.scalar(123)
|
75 |
+
cast_scal = mod.cast_scalar(scal, pa.utf8())
|
76 |
+
assert cast_scal == pa.scalar("123")
|
77 |
+
with pytest.raises(NotImplementedError,
|
78 |
+
match="Unsupported cast from int64 to list using function "
|
79 |
+
"cast_list"):
|
80 |
+
mod.cast_scalar(scal, pa.list_(pa.int64()))
|
81 |
+
|
82 |
+
|
83 |
+
@pytest.mark.cython
|
84 |
+
def test_cython_api(tmpdir):
|
85 |
+
"""
|
86 |
+
Basic test for the Cython API.
|
87 |
+
"""
|
88 |
+
# Fail early if cython is not found
|
89 |
+
import cython # noqa
|
90 |
+
|
91 |
+
with tmpdir.as_cwd():
|
92 |
+
# Set up temporary workspace
|
93 |
+
pyx_file = 'pyarrow_cython_example.pyx'
|
94 |
+
shutil.copyfile(os.path.join(here, pyx_file),
|
95 |
+
os.path.join(str(tmpdir), pyx_file))
|
96 |
+
# Create setup.py file
|
97 |
+
setup_code = setup_template.format(pyx_file=pyx_file,
|
98 |
+
compiler_opts=compiler_opts,
|
99 |
+
test_ld_path=test_ld_path)
|
100 |
+
with open('setup.py', 'w') as f:
|
101 |
+
f.write(setup_code)
|
102 |
+
|
103 |
+
# ARROW-2263: Make environment with this pyarrow/ package first on the
|
104 |
+
# PYTHONPATH, for local dev environments
|
105 |
+
subprocess_env = test_util.get_modified_env_with_pythonpath()
|
106 |
+
|
107 |
+
# Compile extension module
|
108 |
+
subprocess.check_call([sys.executable, 'setup.py',
|
109 |
+
'build_ext', '--inplace'],
|
110 |
+
env=subprocess_env)
|
111 |
+
|
112 |
+
# Check basic functionality
|
113 |
+
orig_path = sys.path[:]
|
114 |
+
sys.path.insert(0, str(tmpdir))
|
115 |
+
try:
|
116 |
+
mod = __import__('pyarrow_cython_example')
|
117 |
+
check_cython_example_module(mod)
|
118 |
+
finally:
|
119 |
+
sys.path = orig_path
|
120 |
+
|
121 |
+
# Check the extension module is loadable from a subprocess without
|
122 |
+
# pyarrow imported first.
|
123 |
+
code = """if 1:
|
124 |
+
import sys
|
125 |
+
import os
|
126 |
+
|
127 |
+
try:
|
128 |
+
# Add dll directory was added on python 3.8
|
129 |
+
# and is required in order to find extra DLLs
|
130 |
+
# only for win32
|
131 |
+
for dir in {library_dirs}:
|
132 |
+
os.add_dll_directory(dir)
|
133 |
+
except AttributeError:
|
134 |
+
pass
|
135 |
+
|
136 |
+
mod = __import__({mod_name!r})
|
137 |
+
arr = mod.make_null_array(5)
|
138 |
+
assert mod.get_array_length(arr) == 5
|
139 |
+
assert arr.null_count == 5
|
140 |
+
""".format(mod_name='pyarrow_cython_example',
|
141 |
+
library_dirs=pa.get_library_dirs())
|
142 |
+
|
143 |
+
path_var = None
|
144 |
+
if sys.platform == 'win32':
|
145 |
+
if not hasattr(os, 'add_dll_directory'):
|
146 |
+
# Python 3.8 onwards don't check extension module DLLs on path
|
147 |
+
# we have to use os.add_dll_directory instead.
|
148 |
+
delim, path_var = ';', 'PATH'
|
149 |
+
elif sys.platform == 'darwin':
|
150 |
+
delim, path_var = ':', 'DYLD_LIBRARY_PATH'
|
151 |
+
else:
|
152 |
+
delim, path_var = ':', 'LD_LIBRARY_PATH'
|
153 |
+
|
154 |
+
if path_var:
|
155 |
+
paths = sys.path
|
156 |
+
paths += pa.get_library_dirs()
|
157 |
+
paths += [subprocess_env.get(path_var, '')]
|
158 |
+
paths = [path for path in paths if path]
|
159 |
+
subprocess_env[path_var] = delim.join(paths)
|
160 |
+
subprocess.check_call([sys.executable, '-c', code],
|
161 |
+
stdout=subprocess.PIPE,
|
162 |
+
env=subprocess_env)
|
163 |
+
|
164 |
+
|
165 |
+
@pytest.mark.cython
|
166 |
+
def test_visit_strings(tmpdir):
|
167 |
+
with tmpdir.as_cwd():
|
168 |
+
# Set up temporary workspace
|
169 |
+
pyx_file = 'bound_function_visit_strings.pyx'
|
170 |
+
shutil.copyfile(os.path.join(here, pyx_file),
|
171 |
+
os.path.join(str(tmpdir), pyx_file))
|
172 |
+
# Create setup.py file
|
173 |
+
setup_code = setup_template.format(pyx_file=pyx_file,
|
174 |
+
compiler_opts=compiler_opts,
|
175 |
+
test_ld_path=test_ld_path)
|
176 |
+
with open('setup.py', 'w') as f:
|
177 |
+
f.write(setup_code)
|
178 |
+
|
179 |
+
subprocess_env = test_util.get_modified_env_with_pythonpath()
|
180 |
+
|
181 |
+
# Compile extension module
|
182 |
+
subprocess.check_call([sys.executable, 'setup.py',
|
183 |
+
'build_ext', '--inplace'],
|
184 |
+
env=subprocess_env)
|
185 |
+
|
186 |
+
sys.path.insert(0, str(tmpdir))
|
187 |
+
mod = __import__('bound_function_visit_strings')
|
188 |
+
|
189 |
+
strings = ['a', 'b', 'c']
|
190 |
+
visited = []
|
191 |
+
mod._visit_strings(strings, visited.append)
|
192 |
+
|
193 |
+
assert visited == strings
|
194 |
+
|
195 |
+
with pytest.raises(ValueError, match="wtf"):
|
196 |
+
def raise_on_b(s):
|
197 |
+
if s == 'b':
|
198 |
+
raise ValueError('wtf')
|
199 |
+
|
200 |
+
mod._visit_strings(strings, raise_on_b)
|
venv/lib/python3.10/site-packages/pyarrow/tests/test_dataset.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
venv/lib/python3.10/site-packages/pyarrow/tests/test_dataset_encryption.py
ADDED
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Licensed to the Apache Software Foundation (ASF) under one
|
2 |
+
# or more contributor license agreements. See the NOTICE file
|
3 |
+
# distributed with this work for additional information
|
4 |
+
# regarding copyright ownership. The ASF licenses this file
|
5 |
+
# to you under the Apache License, Version 2.0 (the
|
6 |
+
# "License"); you may not use this file except in compliance
|
7 |
+
# with the License. You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing,
|
12 |
+
# software distributed under the License is distributed on an
|
13 |
+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
14 |
+
# KIND, either express or implied. See the License for the
|
15 |
+
# specific language governing permissions and limitations
|
16 |
+
# under the License.
|
17 |
+
|
18 |
+
import base64
|
19 |
+
from datetime import timedelta
|
20 |
+
import numpy as np
|
21 |
+
import pyarrow.fs as fs
|
22 |
+
import pyarrow as pa
|
23 |
+
|
24 |
+
import pytest
|
25 |
+
|
26 |
+
encryption_unavailable = False
|
27 |
+
|
28 |
+
try:
|
29 |
+
import pyarrow.parquet as pq
|
30 |
+
import pyarrow.dataset as ds
|
31 |
+
except ImportError:
|
32 |
+
pq = None
|
33 |
+
ds = None
|
34 |
+
|
35 |
+
try:
|
36 |
+
from pyarrow.tests.parquet.encryption import InMemoryKmsClient
|
37 |
+
import pyarrow.parquet.encryption as pe
|
38 |
+
except ImportError:
|
39 |
+
encryption_unavailable = True
|
40 |
+
|
41 |
+
|
42 |
+
# Marks all of the tests in this module
|
43 |
+
pytestmark = pytest.mark.dataset
|
44 |
+
|
45 |
+
|
46 |
+
FOOTER_KEY = b"0123456789112345"
|
47 |
+
FOOTER_KEY_NAME = "footer_key"
|
48 |
+
COL_KEY = b"1234567890123450"
|
49 |
+
COL_KEY_NAME = "col_key"
|
50 |
+
|
51 |
+
|
52 |
+
def create_sample_table():
|
53 |
+
return pa.table(
|
54 |
+
{
|
55 |
+
"year": [2020, 2022, 2021, 2022, 2019, 2021],
|
56 |
+
"n_legs": [2, 2, 4, 4, 5, 100],
|
57 |
+
"animal": [
|
58 |
+
"Flamingo",
|
59 |
+
"Parrot",
|
60 |
+
"Dog",
|
61 |
+
"Horse",
|
62 |
+
"Brittle stars",
|
63 |
+
"Centipede",
|
64 |
+
],
|
65 |
+
}
|
66 |
+
)
|
67 |
+
|
68 |
+
|
69 |
+
def create_encryption_config():
|
70 |
+
return pe.EncryptionConfiguration(
|
71 |
+
footer_key=FOOTER_KEY_NAME,
|
72 |
+
plaintext_footer=False,
|
73 |
+
column_keys={COL_KEY_NAME: ["n_legs", "animal"]},
|
74 |
+
encryption_algorithm="AES_GCM_V1",
|
75 |
+
# requires timedelta or an assertion is raised
|
76 |
+
cache_lifetime=timedelta(minutes=5.0),
|
77 |
+
data_key_length_bits=256,
|
78 |
+
)
|
79 |
+
|
80 |
+
|
81 |
+
def create_decryption_config():
|
82 |
+
return pe.DecryptionConfiguration(cache_lifetime=300)
|
83 |
+
|
84 |
+
|
85 |
+
def create_kms_connection_config():
|
86 |
+
return pe.KmsConnectionConfig(
|
87 |
+
custom_kms_conf={
|
88 |
+
FOOTER_KEY_NAME: FOOTER_KEY.decode("UTF-8"),
|
89 |
+
COL_KEY_NAME: COL_KEY.decode("UTF-8"),
|
90 |
+
}
|
91 |
+
)
|
92 |
+
|
93 |
+
|
94 |
+
def kms_factory(kms_connection_configuration):
|
95 |
+
return InMemoryKmsClient(kms_connection_configuration)
|
96 |
+
|
97 |
+
|
98 |
+
@pytest.mark.skipif(
|
99 |
+
encryption_unavailable, reason="Parquet Encryption is not currently enabled"
|
100 |
+
)
|
101 |
+
def test_dataset_encryption_decryption():
|
102 |
+
table = create_sample_table()
|
103 |
+
|
104 |
+
encryption_config = create_encryption_config()
|
105 |
+
decryption_config = create_decryption_config()
|
106 |
+
kms_connection_config = create_kms_connection_config()
|
107 |
+
|
108 |
+
crypto_factory = pe.CryptoFactory(kms_factory)
|
109 |
+
parquet_encryption_cfg = ds.ParquetEncryptionConfig(
|
110 |
+
crypto_factory, kms_connection_config, encryption_config
|
111 |
+
)
|
112 |
+
parquet_decryption_cfg = ds.ParquetDecryptionConfig(
|
113 |
+
crypto_factory, kms_connection_config, decryption_config
|
114 |
+
)
|
115 |
+
|
116 |
+
# create write_options with dataset encryption config
|
117 |
+
pformat = pa.dataset.ParquetFileFormat()
|
118 |
+
write_options = pformat.make_write_options(encryption_config=parquet_encryption_cfg)
|
119 |
+
|
120 |
+
mockfs = fs._MockFileSystem()
|
121 |
+
mockfs.create_dir("/")
|
122 |
+
|
123 |
+
ds.write_dataset(
|
124 |
+
data=table,
|
125 |
+
base_dir="sample_dataset",
|
126 |
+
format=pformat,
|
127 |
+
file_options=write_options,
|
128 |
+
filesystem=mockfs,
|
129 |
+
)
|
130 |
+
|
131 |
+
# read without decryption config -> should error is dataset was properly encrypted
|
132 |
+
pformat = pa.dataset.ParquetFileFormat()
|
133 |
+
with pytest.raises(IOError, match=r"no decryption"):
|
134 |
+
ds.dataset("sample_dataset", format=pformat, filesystem=mockfs)
|
135 |
+
|
136 |
+
# set decryption config for parquet fragment scan options
|
137 |
+
pq_scan_opts = ds.ParquetFragmentScanOptions(
|
138 |
+
decryption_config=parquet_decryption_cfg
|
139 |
+
)
|
140 |
+
pformat = pa.dataset.ParquetFileFormat(default_fragment_scan_options=pq_scan_opts)
|
141 |
+
dataset = ds.dataset("sample_dataset", format=pformat, filesystem=mockfs)
|
142 |
+
|
143 |
+
assert table.equals(dataset.to_table())
|
144 |
+
|
145 |
+
|
146 |
+
@pytest.mark.skipif(
|
147 |
+
not encryption_unavailable, reason="Parquet Encryption is currently enabled"
|
148 |
+
)
|
149 |
+
def test_write_dataset_parquet_without_encryption():
|
150 |
+
"""Test write_dataset with ParquetFileFormat and test if an exception is thrown
|
151 |
+
if you try to set encryption_config using make_write_options"""
|
152 |
+
|
153 |
+
# Set the encryption configuration using ParquetFileFormat
|
154 |
+
# and make_write_options
|
155 |
+
pformat = pa.dataset.ParquetFileFormat()
|
156 |
+
|
157 |
+
with pytest.raises(NotImplementedError):
|
158 |
+
_ = pformat.make_write_options(encryption_config="some value")
|
159 |
+
|
160 |
+
|
161 |
+
@pytest.mark.skipif(
|
162 |
+
encryption_unavailable, reason="Parquet Encryption is not currently enabled"
|
163 |
+
)
|
164 |
+
def test_large_row_encryption_decryption():
|
165 |
+
"""Test encryption and decryption of a large number of rows."""
|
166 |
+
|
167 |
+
class NoOpKmsClient(pe.KmsClient):
|
168 |
+
def wrap_key(self, key_bytes: bytes, _: str) -> bytes:
|
169 |
+
b = base64.b64encode(key_bytes)
|
170 |
+
return b
|
171 |
+
|
172 |
+
def unwrap_key(self, wrapped_key: bytes, _: str) -> bytes:
|
173 |
+
b = base64.b64decode(wrapped_key)
|
174 |
+
return b
|
175 |
+
|
176 |
+
row_count = 2**15 + 1
|
177 |
+
table = pa.Table.from_arrays(
|
178 |
+
[pa.array(np.random.rand(row_count), type=pa.float32())], names=["foo"]
|
179 |
+
)
|
180 |
+
|
181 |
+
kms_config = pe.KmsConnectionConfig()
|
182 |
+
crypto_factory = pe.CryptoFactory(lambda _: NoOpKmsClient())
|
183 |
+
encryption_config = pe.EncryptionConfiguration(
|
184 |
+
footer_key="UNIMPORTANT_KEY",
|
185 |
+
column_keys={"UNIMPORTANT_KEY": ["foo"]},
|
186 |
+
double_wrapping=True,
|
187 |
+
plaintext_footer=False,
|
188 |
+
data_key_length_bits=128,
|
189 |
+
)
|
190 |
+
pqe_config = ds.ParquetEncryptionConfig(
|
191 |
+
crypto_factory, kms_config, encryption_config
|
192 |
+
)
|
193 |
+
pqd_config = ds.ParquetDecryptionConfig(
|
194 |
+
crypto_factory, kms_config, pe.DecryptionConfiguration()
|
195 |
+
)
|
196 |
+
scan_options = ds.ParquetFragmentScanOptions(decryption_config=pqd_config)
|
197 |
+
file_format = ds.ParquetFileFormat(default_fragment_scan_options=scan_options)
|
198 |
+
write_options = file_format.make_write_options(encryption_config=pqe_config)
|
199 |
+
file_decryption_properties = crypto_factory.file_decryption_properties(kms_config)
|
200 |
+
|
201 |
+
mockfs = fs._MockFileSystem()
|
202 |
+
mockfs.create_dir("/")
|
203 |
+
|
204 |
+
path = "large-row-test-dataset"
|
205 |
+
ds.write_dataset(table, path, format=file_format,
|
206 |
+
file_options=write_options, filesystem=mockfs)
|
207 |
+
|
208 |
+
file_path = path + "/part-0.parquet"
|
209 |
+
new_table = pq.ParquetFile(
|
210 |
+
file_path, decryption_properties=file_decryption_properties,
|
211 |
+
filesystem=mockfs
|
212 |
+
).read()
|
213 |
+
assert table == new_table
|
214 |
+
|
215 |
+
dataset = ds.dataset(path, format=file_format, filesystem=mockfs)
|
216 |
+
new_table = dataset.to_table()
|
217 |
+
assert table == new_table
|